bnx2: Prevent ethtool -s from crashing when device is down.
[pandora-kernel.git] / drivers / net / bnx2.c
1 /* bnx2.c: Broadcom NX2 network driver.
2  *
3  * Copyright (c) 2004-2008 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Written by: Michael Chan  (mchan@broadcom.com)
10  */
11
12
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
15
16 #include <linux/kernel.h>
17 #include <linux/timer.h>
18 #include <linux/errno.h>
19 #include <linux/ioport.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/interrupt.h>
23 #include <linux/pci.h>
24 #include <linux/init.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/dma-mapping.h>
29 #include <linux/bitops.h>
30 #include <asm/io.h>
31 #include <asm/irq.h>
32 #include <linux/delay.h>
33 #include <asm/byteorder.h>
34 #include <asm/page.h>
35 #include <linux/time.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #ifdef NETIF_F_HW_VLAN_TX
39 #include <linux/if_vlan.h>
40 #define BCM_VLAN 1
41 #endif
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/zlib.h>
50
51 #include "bnx2.h"
52 #include "bnx2_fw.h"
53 #include "bnx2_fw2.h"
54
55 #define FW_BUF_SIZE             0x10000
56
57 #define DRV_MODULE_NAME         "bnx2"
58 #define PFX DRV_MODULE_NAME     ": "
59 #define DRV_MODULE_VERSION      "1.7.7"
60 #define DRV_MODULE_RELDATE      "June 17, 2008"
61
62 #define RUN_AT(x) (jiffies + (x))
63
64 /* Time in jiffies before concluding the transmitter is hung. */
65 #define TX_TIMEOUT  (5*HZ)
66
67 static char version[] __devinitdata =
68         "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
69
70 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
71 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709 Driver");
72 MODULE_LICENSE("GPL");
73 MODULE_VERSION(DRV_MODULE_VERSION);
74
75 static int disable_msi = 0;
76
77 module_param(disable_msi, int, 0);
78 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
79
80 typedef enum {
81         BCM5706 = 0,
82         NC370T,
83         NC370I,
84         BCM5706S,
85         NC370F,
86         BCM5708,
87         BCM5708S,
88         BCM5709,
89         BCM5709S,
90 } board_t;
91
92 /* indexed by board_t, above */
93 static struct {
94         char *name;
95 } board_info[] __devinitdata = {
96         { "Broadcom NetXtreme II BCM5706 1000Base-T" },
97         { "HP NC370T Multifunction Gigabit Server Adapter" },
98         { "HP NC370i Multifunction Gigabit Server Adapter" },
99         { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
100         { "HP NC370F Multifunction Gigabit Server Adapter" },
101         { "Broadcom NetXtreme II BCM5708 1000Base-T" },
102         { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
103         { "Broadcom NetXtreme II BCM5709 1000Base-T" },
104         { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
105         };
106
107 static struct pci_device_id bnx2_pci_tbl[] = {
108         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
109           PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
110         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
111           PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
112         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
113           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
114         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
115           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
116         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
117           PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
118         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
119           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
120         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
121           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
122         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
123           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
124         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
125           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
126         { 0, }
127 };
128
129 static struct flash_spec flash_table[] =
130 {
131 #define BUFFERED_FLAGS          (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
132 #define NONBUFFERED_FLAGS       (BNX2_NV_WREN)
133         /* Slow EEPROM */
134         {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
135          BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
136          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
137          "EEPROM - slow"},
138         /* Expansion entry 0001 */
139         {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
140          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
141          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
142          "Entry 0001"},
143         /* Saifun SA25F010 (non-buffered flash) */
144         /* strap, cfg1, & write1 need updates */
145         {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
146          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
147          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
148          "Non-buffered flash (128kB)"},
149         /* Saifun SA25F020 (non-buffered flash) */
150         /* strap, cfg1, & write1 need updates */
151         {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
152          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
153          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
154          "Non-buffered flash (256kB)"},
155         /* Expansion entry 0100 */
156         {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
157          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
158          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
159          "Entry 0100"},
160         /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
161         {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
162          NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
163          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
164          "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
165         /* Entry 0110: ST M45PE20 (non-buffered flash)*/
166         {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
167          NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
168          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
169          "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
170         /* Saifun SA25F005 (non-buffered flash) */
171         /* strap, cfg1, & write1 need updates */
172         {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
173          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
174          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
175          "Non-buffered flash (64kB)"},
176         /* Fast EEPROM */
177         {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
178          BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
179          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
180          "EEPROM - fast"},
181         /* Expansion entry 1001 */
182         {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
183          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
184          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
185          "Entry 1001"},
186         /* Expansion entry 1010 */
187         {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
188          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
189          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
190          "Entry 1010"},
191         /* ATMEL AT45DB011B (buffered flash) */
192         {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
193          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
194          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
195          "Buffered flash (128kB)"},
196         /* Expansion entry 1100 */
197         {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
198          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
199          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
200          "Entry 1100"},
201         /* Expansion entry 1101 */
202         {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
203          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
204          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
205          "Entry 1101"},
206         /* Ateml Expansion entry 1110 */
207         {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
208          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
209          BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
210          "Entry 1110 (Atmel)"},
211         /* ATMEL AT45DB021B (buffered flash) */
212         {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
213          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
214          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
215          "Buffered flash (256kB)"},
216 };
217
218 static struct flash_spec flash_5709 = {
219         .flags          = BNX2_NV_BUFFERED,
220         .page_bits      = BCM5709_FLASH_PAGE_BITS,
221         .page_size      = BCM5709_FLASH_PAGE_SIZE,
222         .addr_mask      = BCM5709_FLASH_BYTE_ADDR_MASK,
223         .total_size     = BUFFERED_FLASH_TOTAL_SIZE*2,
224         .name           = "5709 Buffered flash (256kB)",
225 };
226
227 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
228
229 static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
230 {
231         u32 diff;
232
233         smp_mb();
234
235         /* The ring uses 256 indices for 255 entries, one of them
236          * needs to be skipped.
237          */
238         diff = txr->tx_prod - txr->tx_cons;
239         if (unlikely(diff >= TX_DESC_CNT)) {
240                 diff &= 0xffff;
241                 if (diff == TX_DESC_CNT)
242                         diff = MAX_TX_DESC_CNT;
243         }
244         return (bp->tx_ring_size - diff);
245 }
246
247 static u32
248 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
249 {
250         u32 val;
251
252         spin_lock_bh(&bp->indirect_lock);
253         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
254         val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
255         spin_unlock_bh(&bp->indirect_lock);
256         return val;
257 }
258
259 static void
260 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
261 {
262         spin_lock_bh(&bp->indirect_lock);
263         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
264         REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
265         spin_unlock_bh(&bp->indirect_lock);
266 }
267
268 static void
269 bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val)
270 {
271         bnx2_reg_wr_ind(bp, bp->shmem_base + offset, val);
272 }
273
274 static u32
275 bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
276 {
277         return (bnx2_reg_rd_ind(bp, bp->shmem_base + offset));
278 }
279
280 static void
281 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
282 {
283         offset += cid_addr;
284         spin_lock_bh(&bp->indirect_lock);
285         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
286                 int i;
287
288                 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
289                 REG_WR(bp, BNX2_CTX_CTX_CTRL,
290                        offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
291                 for (i = 0; i < 5; i++) {
292                         u32 val;
293                         val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
294                         if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
295                                 break;
296                         udelay(5);
297                 }
298         } else {
299                 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
300                 REG_WR(bp, BNX2_CTX_DATA, val);
301         }
302         spin_unlock_bh(&bp->indirect_lock);
303 }
304
305 static int
306 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
307 {
308         u32 val1;
309         int i, ret;
310
311         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
312                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
313                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
314
315                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
316                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
317
318                 udelay(40);
319         }
320
321         val1 = (bp->phy_addr << 21) | (reg << 16) |
322                 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
323                 BNX2_EMAC_MDIO_COMM_START_BUSY;
324         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
325
326         for (i = 0; i < 50; i++) {
327                 udelay(10);
328
329                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
330                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
331                         udelay(5);
332
333                         val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
334                         val1 &= BNX2_EMAC_MDIO_COMM_DATA;
335
336                         break;
337                 }
338         }
339
340         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
341                 *val = 0x0;
342                 ret = -EBUSY;
343         }
344         else {
345                 *val = val1;
346                 ret = 0;
347         }
348
349         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
350                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
351                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
352
353                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
354                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
355
356                 udelay(40);
357         }
358
359         return ret;
360 }
361
362 static int
363 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
364 {
365         u32 val1;
366         int i, ret;
367
368         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
369                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
370                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
371
372                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
373                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
374
375                 udelay(40);
376         }
377
378         val1 = (bp->phy_addr << 21) | (reg << 16) | val |
379                 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
380                 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
381         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
382
383         for (i = 0; i < 50; i++) {
384                 udelay(10);
385
386                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
387                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
388                         udelay(5);
389                         break;
390                 }
391         }
392
393         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
394                 ret = -EBUSY;
395         else
396                 ret = 0;
397
398         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
399                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
400                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
401
402                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
403                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
404
405                 udelay(40);
406         }
407
408         return ret;
409 }
410
411 static void
412 bnx2_disable_int(struct bnx2 *bp)
413 {
414         int i;
415         struct bnx2_napi *bnapi;
416
417         for (i = 0; i < bp->irq_nvecs; i++) {
418                 bnapi = &bp->bnx2_napi[i];
419                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
420                        BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
421         }
422         REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
423 }
424
425 static void
426 bnx2_enable_int(struct bnx2 *bp)
427 {
428         int i;
429         struct bnx2_napi *bnapi;
430
431         for (i = 0; i < bp->irq_nvecs; i++) {
432                 bnapi = &bp->bnx2_napi[i];
433
434                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
435                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
436                        BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
437                        bnapi->last_status_idx);
438
439                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
440                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
441                        bnapi->last_status_idx);
442         }
443         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
444 }
445
446 static void
447 bnx2_disable_int_sync(struct bnx2 *bp)
448 {
449         int i;
450
451         atomic_inc(&bp->intr_sem);
452         bnx2_disable_int(bp);
453         for (i = 0; i < bp->irq_nvecs; i++)
454                 synchronize_irq(bp->irq_tbl[i].vector);
455 }
456
457 static void
458 bnx2_napi_disable(struct bnx2 *bp)
459 {
460         int i;
461
462         for (i = 0; i < bp->irq_nvecs; i++)
463                 napi_disable(&bp->bnx2_napi[i].napi);
464 }
465
466 static void
467 bnx2_napi_enable(struct bnx2 *bp)
468 {
469         int i;
470
471         for (i = 0; i < bp->irq_nvecs; i++)
472                 napi_enable(&bp->bnx2_napi[i].napi);
473 }
474
475 static void
476 bnx2_netif_stop(struct bnx2 *bp)
477 {
478         bnx2_disable_int_sync(bp);
479         if (netif_running(bp->dev)) {
480                 bnx2_napi_disable(bp);
481                 netif_tx_disable(bp->dev);
482                 bp->dev->trans_start = jiffies; /* prevent tx timeout */
483         }
484 }
485
486 static void
487 bnx2_netif_start(struct bnx2 *bp)
488 {
489         if (atomic_dec_and_test(&bp->intr_sem)) {
490                 if (netif_running(bp->dev)) {
491                         netif_wake_queue(bp->dev);
492                         bnx2_napi_enable(bp);
493                         bnx2_enable_int(bp);
494                 }
495         }
496 }
497
498 static void
499 bnx2_free_tx_mem(struct bnx2 *bp)
500 {
501         int i;
502
503         for (i = 0; i < bp->num_tx_rings; i++) {
504                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
505                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
506
507                 if (txr->tx_desc_ring) {
508                         pci_free_consistent(bp->pdev, TXBD_RING_SIZE,
509                                             txr->tx_desc_ring,
510                                             txr->tx_desc_mapping);
511                         txr->tx_desc_ring = NULL;
512                 }
513                 kfree(txr->tx_buf_ring);
514                 txr->tx_buf_ring = NULL;
515         }
516 }
517
518 static void
519 bnx2_free_rx_mem(struct bnx2 *bp)
520 {
521         int i;
522
523         for (i = 0; i < bp->num_rx_rings; i++) {
524                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
525                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
526                 int j;
527
528                 for (j = 0; j < bp->rx_max_ring; j++) {
529                         if (rxr->rx_desc_ring[j])
530                                 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
531                                                     rxr->rx_desc_ring[j],
532                                                     rxr->rx_desc_mapping[j]);
533                         rxr->rx_desc_ring[j] = NULL;
534                 }
535                 if (rxr->rx_buf_ring)
536                         vfree(rxr->rx_buf_ring);
537                 rxr->rx_buf_ring = NULL;
538
539                 for (j = 0; j < bp->rx_max_pg_ring; j++) {
540                         if (rxr->rx_pg_desc_ring[j])
541                                 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
542                                                     rxr->rx_pg_desc_ring[i],
543                                                     rxr->rx_pg_desc_mapping[i]);
544                         rxr->rx_pg_desc_ring[i] = NULL;
545                 }
546                 if (rxr->rx_pg_ring)
547                         vfree(rxr->rx_pg_ring);
548                 rxr->rx_pg_ring = NULL;
549         }
550 }
551
552 static int
553 bnx2_alloc_tx_mem(struct bnx2 *bp)
554 {
555         int i;
556
557         for (i = 0; i < bp->num_tx_rings; i++) {
558                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
559                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
560
561                 txr->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
562                 if (txr->tx_buf_ring == NULL)
563                         return -ENOMEM;
564
565                 txr->tx_desc_ring =
566                         pci_alloc_consistent(bp->pdev, TXBD_RING_SIZE,
567                                              &txr->tx_desc_mapping);
568                 if (txr->tx_desc_ring == NULL)
569                         return -ENOMEM;
570         }
571         return 0;
572 }
573
574 static int
575 bnx2_alloc_rx_mem(struct bnx2 *bp)
576 {
577         int i;
578
579         for (i = 0; i < bp->num_rx_rings; i++) {
580                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
581                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
582                 int j;
583
584                 rxr->rx_buf_ring =
585                         vmalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
586                 if (rxr->rx_buf_ring == NULL)
587                         return -ENOMEM;
588
589                 memset(rxr->rx_buf_ring, 0,
590                        SW_RXBD_RING_SIZE * bp->rx_max_ring);
591
592                 for (j = 0; j < bp->rx_max_ring; j++) {
593                         rxr->rx_desc_ring[j] =
594                                 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
595                                                      &rxr->rx_desc_mapping[j]);
596                         if (rxr->rx_desc_ring[j] == NULL)
597                                 return -ENOMEM;
598
599                 }
600
601                 if (bp->rx_pg_ring_size) {
602                         rxr->rx_pg_ring = vmalloc(SW_RXPG_RING_SIZE *
603                                                   bp->rx_max_pg_ring);
604                         if (rxr->rx_pg_ring == NULL)
605                                 return -ENOMEM;
606
607                         memset(rxr->rx_pg_ring, 0, SW_RXPG_RING_SIZE *
608                                bp->rx_max_pg_ring);
609                 }
610
611                 for (j = 0; j < bp->rx_max_pg_ring; j++) {
612                         rxr->rx_pg_desc_ring[j] =
613                                 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
614                                                 &rxr->rx_pg_desc_mapping[j]);
615                         if (rxr->rx_pg_desc_ring[j] == NULL)
616                                 return -ENOMEM;
617
618                 }
619         }
620         return 0;
621 }
622
623 static void
624 bnx2_free_mem(struct bnx2 *bp)
625 {
626         int i;
627         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
628
629         bnx2_free_tx_mem(bp);
630         bnx2_free_rx_mem(bp);
631
632         for (i = 0; i < bp->ctx_pages; i++) {
633                 if (bp->ctx_blk[i]) {
634                         pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
635                                             bp->ctx_blk[i],
636                                             bp->ctx_blk_mapping[i]);
637                         bp->ctx_blk[i] = NULL;
638                 }
639         }
640         if (bnapi->status_blk.msi) {
641                 pci_free_consistent(bp->pdev, bp->status_stats_size,
642                                     bnapi->status_blk.msi,
643                                     bp->status_blk_mapping);
644                 bnapi->status_blk.msi = NULL;
645                 bp->stats_blk = NULL;
646         }
647 }
648
649 static int
650 bnx2_alloc_mem(struct bnx2 *bp)
651 {
652         int i, status_blk_size, err;
653         struct bnx2_napi *bnapi;
654         void *status_blk;
655
656         /* Combine status and statistics blocks into one allocation. */
657         status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
658         if (bp->flags & BNX2_FLAG_MSIX_CAP)
659                 status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
660                                                  BNX2_SBLK_MSIX_ALIGN_SIZE);
661         bp->status_stats_size = status_blk_size +
662                                 sizeof(struct statistics_block);
663
664         status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
665                                           &bp->status_blk_mapping);
666         if (status_blk == NULL)
667                 goto alloc_mem_err;
668
669         memset(status_blk, 0, bp->status_stats_size);
670
671         bnapi = &bp->bnx2_napi[0];
672         bnapi->status_blk.msi = status_blk;
673         bnapi->hw_tx_cons_ptr =
674                 &bnapi->status_blk.msi->status_tx_quick_consumer_index0;
675         bnapi->hw_rx_cons_ptr =
676                 &bnapi->status_blk.msi->status_rx_quick_consumer_index0;
677         if (bp->flags & BNX2_FLAG_MSIX_CAP) {
678                 for (i = 1; i < BNX2_MAX_MSIX_VEC; i++) {
679                         struct status_block_msix *sblk;
680
681                         bnapi = &bp->bnx2_napi[i];
682
683                         sblk = (void *) (status_blk +
684                                          BNX2_SBLK_MSIX_ALIGN_SIZE * i);
685                         bnapi->status_blk.msix = sblk;
686                         bnapi->hw_tx_cons_ptr =
687                                 &sblk->status_tx_quick_consumer_index;
688                         bnapi->hw_rx_cons_ptr =
689                                 &sblk->status_rx_quick_consumer_index;
690                         bnapi->int_num = i << 24;
691                 }
692         }
693
694         bp->stats_blk = status_blk + status_blk_size;
695
696         bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
697
698         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
699                 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
700                 if (bp->ctx_pages == 0)
701                         bp->ctx_pages = 1;
702                 for (i = 0; i < bp->ctx_pages; i++) {
703                         bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
704                                                 BCM_PAGE_SIZE,
705                                                 &bp->ctx_blk_mapping[i]);
706                         if (bp->ctx_blk[i] == NULL)
707                                 goto alloc_mem_err;
708                 }
709         }
710
711         err = bnx2_alloc_rx_mem(bp);
712         if (err)
713                 goto alloc_mem_err;
714
715         err = bnx2_alloc_tx_mem(bp);
716         if (err)
717                 goto alloc_mem_err;
718
719         return 0;
720
721 alloc_mem_err:
722         bnx2_free_mem(bp);
723         return -ENOMEM;
724 }
725
726 static void
727 bnx2_report_fw_link(struct bnx2 *bp)
728 {
729         u32 fw_link_status = 0;
730
731         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
732                 return;
733
734         if (bp->link_up) {
735                 u32 bmsr;
736
737                 switch (bp->line_speed) {
738                 case SPEED_10:
739                         if (bp->duplex == DUPLEX_HALF)
740                                 fw_link_status = BNX2_LINK_STATUS_10HALF;
741                         else
742                                 fw_link_status = BNX2_LINK_STATUS_10FULL;
743                         break;
744                 case SPEED_100:
745                         if (bp->duplex == DUPLEX_HALF)
746                                 fw_link_status = BNX2_LINK_STATUS_100HALF;
747                         else
748                                 fw_link_status = BNX2_LINK_STATUS_100FULL;
749                         break;
750                 case SPEED_1000:
751                         if (bp->duplex == DUPLEX_HALF)
752                                 fw_link_status = BNX2_LINK_STATUS_1000HALF;
753                         else
754                                 fw_link_status = BNX2_LINK_STATUS_1000FULL;
755                         break;
756                 case SPEED_2500:
757                         if (bp->duplex == DUPLEX_HALF)
758                                 fw_link_status = BNX2_LINK_STATUS_2500HALF;
759                         else
760                                 fw_link_status = BNX2_LINK_STATUS_2500FULL;
761                         break;
762                 }
763
764                 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
765
766                 if (bp->autoneg) {
767                         fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
768
769                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
770                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
771
772                         if (!(bmsr & BMSR_ANEGCOMPLETE) ||
773                             bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)
774                                 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
775                         else
776                                 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
777                 }
778         }
779         else
780                 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
781
782         bnx2_shmem_wr(bp, BNX2_LINK_STATUS, fw_link_status);
783 }
784
785 static char *
786 bnx2_xceiver_str(struct bnx2 *bp)
787 {
788         return ((bp->phy_port == PORT_FIBRE) ? "SerDes" :
789                 ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
790                  "Copper"));
791 }
792
793 static void
794 bnx2_report_link(struct bnx2 *bp)
795 {
796         if (bp->link_up) {
797                 netif_carrier_on(bp->dev);
798                 printk(KERN_INFO PFX "%s NIC %s Link is Up, ", bp->dev->name,
799                        bnx2_xceiver_str(bp));
800
801                 printk("%d Mbps ", bp->line_speed);
802
803                 if (bp->duplex == DUPLEX_FULL)
804                         printk("full duplex");
805                 else
806                         printk("half duplex");
807
808                 if (bp->flow_ctrl) {
809                         if (bp->flow_ctrl & FLOW_CTRL_RX) {
810                                 printk(", receive ");
811                                 if (bp->flow_ctrl & FLOW_CTRL_TX)
812                                         printk("& transmit ");
813                         }
814                         else {
815                                 printk(", transmit ");
816                         }
817                         printk("flow control ON");
818                 }
819                 printk("\n");
820         }
821         else {
822                 netif_carrier_off(bp->dev);
823                 printk(KERN_ERR PFX "%s NIC %s Link is Down\n", bp->dev->name,
824                        bnx2_xceiver_str(bp));
825         }
826
827         bnx2_report_fw_link(bp);
828 }
829
830 static void
831 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
832 {
833         u32 local_adv, remote_adv;
834
835         bp->flow_ctrl = 0;
836         if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
837                 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
838
839                 if (bp->duplex == DUPLEX_FULL) {
840                         bp->flow_ctrl = bp->req_flow_ctrl;
841                 }
842                 return;
843         }
844
845         if (bp->duplex != DUPLEX_FULL) {
846                 return;
847         }
848
849         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
850             (CHIP_NUM(bp) == CHIP_NUM_5708)) {
851                 u32 val;
852
853                 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
854                 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
855                         bp->flow_ctrl |= FLOW_CTRL_TX;
856                 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
857                         bp->flow_ctrl |= FLOW_CTRL_RX;
858                 return;
859         }
860
861         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
862         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
863
864         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
865                 u32 new_local_adv = 0;
866                 u32 new_remote_adv = 0;
867
868                 if (local_adv & ADVERTISE_1000XPAUSE)
869                         new_local_adv |= ADVERTISE_PAUSE_CAP;
870                 if (local_adv & ADVERTISE_1000XPSE_ASYM)
871                         new_local_adv |= ADVERTISE_PAUSE_ASYM;
872                 if (remote_adv & ADVERTISE_1000XPAUSE)
873                         new_remote_adv |= ADVERTISE_PAUSE_CAP;
874                 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
875                         new_remote_adv |= ADVERTISE_PAUSE_ASYM;
876
877                 local_adv = new_local_adv;
878                 remote_adv = new_remote_adv;
879         }
880
881         /* See Table 28B-3 of 802.3ab-1999 spec. */
882         if (local_adv & ADVERTISE_PAUSE_CAP) {
883                 if(local_adv & ADVERTISE_PAUSE_ASYM) {
884                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
885                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
886                         }
887                         else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
888                                 bp->flow_ctrl = FLOW_CTRL_RX;
889                         }
890                 }
891                 else {
892                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
893                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
894                         }
895                 }
896         }
897         else if (local_adv & ADVERTISE_PAUSE_ASYM) {
898                 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
899                         (remote_adv & ADVERTISE_PAUSE_ASYM)) {
900
901                         bp->flow_ctrl = FLOW_CTRL_TX;
902                 }
903         }
904 }
905
906 static int
907 bnx2_5709s_linkup(struct bnx2 *bp)
908 {
909         u32 val, speed;
910
911         bp->link_up = 1;
912
913         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
914         bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
915         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
916
917         if ((bp->autoneg & AUTONEG_SPEED) == 0) {
918                 bp->line_speed = bp->req_line_speed;
919                 bp->duplex = bp->req_duplex;
920                 return 0;
921         }
922         speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
923         switch (speed) {
924                 case MII_BNX2_GP_TOP_AN_SPEED_10:
925                         bp->line_speed = SPEED_10;
926                         break;
927                 case MII_BNX2_GP_TOP_AN_SPEED_100:
928                         bp->line_speed = SPEED_100;
929                         break;
930                 case MII_BNX2_GP_TOP_AN_SPEED_1G:
931                 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
932                         bp->line_speed = SPEED_1000;
933                         break;
934                 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
935                         bp->line_speed = SPEED_2500;
936                         break;
937         }
938         if (val & MII_BNX2_GP_TOP_AN_FD)
939                 bp->duplex = DUPLEX_FULL;
940         else
941                 bp->duplex = DUPLEX_HALF;
942         return 0;
943 }
944
945 static int
946 bnx2_5708s_linkup(struct bnx2 *bp)
947 {
948         u32 val;
949
950         bp->link_up = 1;
951         bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
952         switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
953                 case BCM5708S_1000X_STAT1_SPEED_10:
954                         bp->line_speed = SPEED_10;
955                         break;
956                 case BCM5708S_1000X_STAT1_SPEED_100:
957                         bp->line_speed = SPEED_100;
958                         break;
959                 case BCM5708S_1000X_STAT1_SPEED_1G:
960                         bp->line_speed = SPEED_1000;
961                         break;
962                 case BCM5708S_1000X_STAT1_SPEED_2G5:
963                         bp->line_speed = SPEED_2500;
964                         break;
965         }
966         if (val & BCM5708S_1000X_STAT1_FD)
967                 bp->duplex = DUPLEX_FULL;
968         else
969                 bp->duplex = DUPLEX_HALF;
970
971         return 0;
972 }
973
974 static int
975 bnx2_5706s_linkup(struct bnx2 *bp)
976 {
977         u32 bmcr, local_adv, remote_adv, common;
978
979         bp->link_up = 1;
980         bp->line_speed = SPEED_1000;
981
982         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
983         if (bmcr & BMCR_FULLDPLX) {
984                 bp->duplex = DUPLEX_FULL;
985         }
986         else {
987                 bp->duplex = DUPLEX_HALF;
988         }
989
990         if (!(bmcr & BMCR_ANENABLE)) {
991                 return 0;
992         }
993
994         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
995         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
996
997         common = local_adv & remote_adv;
998         if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
999
1000                 if (common & ADVERTISE_1000XFULL) {
1001                         bp->duplex = DUPLEX_FULL;
1002                 }
1003                 else {
1004                         bp->duplex = DUPLEX_HALF;
1005                 }
1006         }
1007
1008         return 0;
1009 }
1010
1011 static int
1012 bnx2_copper_linkup(struct bnx2 *bp)
1013 {
1014         u32 bmcr;
1015
1016         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1017         if (bmcr & BMCR_ANENABLE) {
1018                 u32 local_adv, remote_adv, common;
1019
1020                 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
1021                 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
1022
1023                 common = local_adv & (remote_adv >> 2);
1024                 if (common & ADVERTISE_1000FULL) {
1025                         bp->line_speed = SPEED_1000;
1026                         bp->duplex = DUPLEX_FULL;
1027                 }
1028                 else if (common & ADVERTISE_1000HALF) {
1029                         bp->line_speed = SPEED_1000;
1030                         bp->duplex = DUPLEX_HALF;
1031                 }
1032                 else {
1033                         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1034                         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1035
1036                         common = local_adv & remote_adv;
1037                         if (common & ADVERTISE_100FULL) {
1038                                 bp->line_speed = SPEED_100;
1039                                 bp->duplex = DUPLEX_FULL;
1040                         }
1041                         else if (common & ADVERTISE_100HALF) {
1042                                 bp->line_speed = SPEED_100;
1043                                 bp->duplex = DUPLEX_HALF;
1044                         }
1045                         else if (common & ADVERTISE_10FULL) {
1046                                 bp->line_speed = SPEED_10;
1047                                 bp->duplex = DUPLEX_FULL;
1048                         }
1049                         else if (common & ADVERTISE_10HALF) {
1050                                 bp->line_speed = SPEED_10;
1051                                 bp->duplex = DUPLEX_HALF;
1052                         }
1053                         else {
1054                                 bp->line_speed = 0;
1055                                 bp->link_up = 0;
1056                         }
1057                 }
1058         }
1059         else {
1060                 if (bmcr & BMCR_SPEED100) {
1061                         bp->line_speed = SPEED_100;
1062                 }
1063                 else {
1064                         bp->line_speed = SPEED_10;
1065                 }
1066                 if (bmcr & BMCR_FULLDPLX) {
1067                         bp->duplex = DUPLEX_FULL;
1068                 }
1069                 else {
1070                         bp->duplex = DUPLEX_HALF;
1071                 }
1072         }
1073
1074         return 0;
1075 }
1076
1077 static void
1078 bnx2_init_rx_context(struct bnx2 *bp, u32 cid)
1079 {
1080         u32 val, rx_cid_addr = GET_CID_ADDR(cid);
1081
1082         val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
1083         val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
1084         val |= 0x02 << 8;
1085
1086         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1087                 u32 lo_water, hi_water;
1088
1089                 if (bp->flow_ctrl & FLOW_CTRL_TX)
1090                         lo_water = BNX2_L2CTX_LO_WATER_MARK_DEFAULT;
1091                 else
1092                         lo_water = BNX2_L2CTX_LO_WATER_MARK_DIS;
1093                 if (lo_water >= bp->rx_ring_size)
1094                         lo_water = 0;
1095
1096                 hi_water = bp->rx_ring_size / 4;
1097
1098                 if (hi_water <= lo_water)
1099                         lo_water = 0;
1100
1101                 hi_water /= BNX2_L2CTX_HI_WATER_MARK_SCALE;
1102                 lo_water /= BNX2_L2CTX_LO_WATER_MARK_SCALE;
1103
1104                 if (hi_water > 0xf)
1105                         hi_water = 0xf;
1106                 else if (hi_water == 0)
1107                         lo_water = 0;
1108                 val |= lo_water | (hi_water << BNX2_L2CTX_HI_WATER_MARK_SHIFT);
1109         }
1110         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
1111 }
1112
1113 static void
1114 bnx2_init_all_rx_contexts(struct bnx2 *bp)
1115 {
1116         int i;
1117         u32 cid;
1118
1119         for (i = 0, cid = RX_CID; i < bp->num_rx_rings; i++, cid++) {
1120                 if (i == 1)
1121                         cid = RX_RSS_CID;
1122                 bnx2_init_rx_context(bp, cid);
1123         }
1124 }
1125
1126 static int
1127 bnx2_set_mac_link(struct bnx2 *bp)
1128 {
1129         u32 val;
1130
1131         REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
1132         if (bp->link_up && (bp->line_speed == SPEED_1000) &&
1133                 (bp->duplex == DUPLEX_HALF)) {
1134                 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
1135         }
1136
1137         /* Configure the EMAC mode register. */
1138         val = REG_RD(bp, BNX2_EMAC_MODE);
1139
1140         val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1141                 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1142                 BNX2_EMAC_MODE_25G_MODE);
1143
1144         if (bp->link_up) {
1145                 switch (bp->line_speed) {
1146                         case SPEED_10:
1147                                 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
1148                                         val |= BNX2_EMAC_MODE_PORT_MII_10M;
1149                                         break;
1150                                 }
1151                                 /* fall through */
1152                         case SPEED_100:
1153                                 val |= BNX2_EMAC_MODE_PORT_MII;
1154                                 break;
1155                         case SPEED_2500:
1156                                 val |= BNX2_EMAC_MODE_25G_MODE;
1157                                 /* fall through */
1158                         case SPEED_1000:
1159                                 val |= BNX2_EMAC_MODE_PORT_GMII;
1160                                 break;
1161                 }
1162         }
1163         else {
1164                 val |= BNX2_EMAC_MODE_PORT_GMII;
1165         }
1166
1167         /* Set the MAC to operate in the appropriate duplex mode. */
1168         if (bp->duplex == DUPLEX_HALF)
1169                 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
1170         REG_WR(bp, BNX2_EMAC_MODE, val);
1171
1172         /* Enable/disable rx PAUSE. */
1173         bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
1174
1175         if (bp->flow_ctrl & FLOW_CTRL_RX)
1176                 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1177         REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1178
1179         /* Enable/disable tx PAUSE. */
1180         val = REG_RD(bp, BNX2_EMAC_TX_MODE);
1181         val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1182
1183         if (bp->flow_ctrl & FLOW_CTRL_TX)
1184                 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1185         REG_WR(bp, BNX2_EMAC_TX_MODE, val);
1186
1187         /* Acknowledge the interrupt. */
1188         REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1189
1190         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1191                 bnx2_init_all_rx_contexts(bp);
1192
1193         return 0;
1194 }
1195
1196 static void
1197 bnx2_enable_bmsr1(struct bnx2 *bp)
1198 {
1199         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1200             (CHIP_NUM(bp) == CHIP_NUM_5709))
1201                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1202                                MII_BNX2_BLK_ADDR_GP_STATUS);
1203 }
1204
1205 static void
1206 bnx2_disable_bmsr1(struct bnx2 *bp)
1207 {
1208         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1209             (CHIP_NUM(bp) == CHIP_NUM_5709))
1210                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1211                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1212 }
1213
1214 static int
1215 bnx2_test_and_enable_2g5(struct bnx2 *bp)
1216 {
1217         u32 up1;
1218         int ret = 1;
1219
1220         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1221                 return 0;
1222
1223         if (bp->autoneg & AUTONEG_SPEED)
1224                 bp->advertising |= ADVERTISED_2500baseX_Full;
1225
1226         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1227                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1228
1229         bnx2_read_phy(bp, bp->mii_up1, &up1);
1230         if (!(up1 & BCM5708S_UP1_2G5)) {
1231                 up1 |= BCM5708S_UP1_2G5;
1232                 bnx2_write_phy(bp, bp->mii_up1, up1);
1233                 ret = 0;
1234         }
1235
1236         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1237                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1238                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1239
1240         return ret;
1241 }
1242
1243 static int
1244 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1245 {
1246         u32 up1;
1247         int ret = 0;
1248
1249         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1250                 return 0;
1251
1252         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1253                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1254
1255         bnx2_read_phy(bp, bp->mii_up1, &up1);
1256         if (up1 & BCM5708S_UP1_2G5) {
1257                 up1 &= ~BCM5708S_UP1_2G5;
1258                 bnx2_write_phy(bp, bp->mii_up1, up1);
1259                 ret = 1;
1260         }
1261
1262         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1263                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1264                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1265
1266         return ret;
1267 }
1268
1269 static void
1270 bnx2_enable_forced_2g5(struct bnx2 *bp)
1271 {
1272         u32 bmcr;
1273
1274         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1275                 return;
1276
1277         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1278                 u32 val;
1279
1280                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1281                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1282                 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1283                 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1284                 val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G;
1285                 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1286
1287                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1288                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1289                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1290
1291         } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1292                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1293                 bmcr |= BCM5708S_BMCR_FORCE_2500;
1294         }
1295
1296         if (bp->autoneg & AUTONEG_SPEED) {
1297                 bmcr &= ~BMCR_ANENABLE;
1298                 if (bp->req_duplex == DUPLEX_FULL)
1299                         bmcr |= BMCR_FULLDPLX;
1300         }
1301         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1302 }
1303
1304 static void
1305 bnx2_disable_forced_2g5(struct bnx2 *bp)
1306 {
1307         u32 bmcr;
1308
1309         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1310                 return;
1311
1312         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1313                 u32 val;
1314
1315                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1316                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1317                 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1318                 val &= ~MII_BNX2_SD_MISC1_FORCE;
1319                 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1320
1321                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1322                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1323                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1324
1325         } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1326                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1327                 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1328         }
1329
1330         if (bp->autoneg & AUTONEG_SPEED)
1331                 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1332         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1333 }
1334
1335 static void
1336 bnx2_5706s_force_link_dn(struct bnx2 *bp, int start)
1337 {
1338         u32 val;
1339
1340         bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL);
1341         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1342         if (start)
1343                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f);
1344         else
1345                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0);
1346 }
1347
1348 static int
1349 bnx2_set_link(struct bnx2 *bp)
1350 {
1351         u32 bmsr;
1352         u8 link_up;
1353
1354         if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1355                 bp->link_up = 1;
1356                 return 0;
1357         }
1358
1359         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1360                 return 0;
1361
1362         link_up = bp->link_up;
1363
1364         bnx2_enable_bmsr1(bp);
1365         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1366         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1367         bnx2_disable_bmsr1(bp);
1368
1369         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1370             (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1371                 u32 val, an_dbg;
1372
1373                 if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
1374                         bnx2_5706s_force_link_dn(bp, 0);
1375                         bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
1376                 }
1377                 val = REG_RD(bp, BNX2_EMAC_STATUS);
1378
1379                 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
1380                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1381                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1382
1383                 if ((val & BNX2_EMAC_STATUS_LINK) &&
1384                     !(an_dbg & MISC_SHDW_AN_DBG_NOSYNC))
1385                         bmsr |= BMSR_LSTATUS;
1386                 else
1387                         bmsr &= ~BMSR_LSTATUS;
1388         }
1389
1390         if (bmsr & BMSR_LSTATUS) {
1391                 bp->link_up = 1;
1392
1393                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1394                         if (CHIP_NUM(bp) == CHIP_NUM_5706)
1395                                 bnx2_5706s_linkup(bp);
1396                         else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1397                                 bnx2_5708s_linkup(bp);
1398                         else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1399                                 bnx2_5709s_linkup(bp);
1400                 }
1401                 else {
1402                         bnx2_copper_linkup(bp);
1403                 }
1404                 bnx2_resolve_flow_ctrl(bp);
1405         }
1406         else {
1407                 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1408                     (bp->autoneg & AUTONEG_SPEED))
1409                         bnx2_disable_forced_2g5(bp);
1410
1411                 if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) {
1412                         u32 bmcr;
1413
1414                         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1415                         bmcr |= BMCR_ANENABLE;
1416                         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1417
1418                         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
1419                 }
1420                 bp->link_up = 0;
1421         }
1422
1423         if (bp->link_up != link_up) {
1424                 bnx2_report_link(bp);
1425         }
1426
1427         bnx2_set_mac_link(bp);
1428
1429         return 0;
1430 }
1431
1432 static int
1433 bnx2_reset_phy(struct bnx2 *bp)
1434 {
1435         int i;
1436         u32 reg;
1437
1438         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1439
1440 #define PHY_RESET_MAX_WAIT 100
1441         for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1442                 udelay(10);
1443
1444                 bnx2_read_phy(bp, bp->mii_bmcr, &reg);
1445                 if (!(reg & BMCR_RESET)) {
1446                         udelay(20);
1447                         break;
1448                 }
1449         }
1450         if (i == PHY_RESET_MAX_WAIT) {
1451                 return -EBUSY;
1452         }
1453         return 0;
1454 }
1455
1456 static u32
1457 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1458 {
1459         u32 adv = 0;
1460
1461         if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1462                 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1463
1464                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1465                         adv = ADVERTISE_1000XPAUSE;
1466                 }
1467                 else {
1468                         adv = ADVERTISE_PAUSE_CAP;
1469                 }
1470         }
1471         else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1472                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1473                         adv = ADVERTISE_1000XPSE_ASYM;
1474                 }
1475                 else {
1476                         adv = ADVERTISE_PAUSE_ASYM;
1477                 }
1478         }
1479         else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1480                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1481                         adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1482                 }
1483                 else {
1484                         adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1485                 }
1486         }
1487         return adv;
1488 }
1489
1490 static int bnx2_fw_sync(struct bnx2 *, u32, int);
1491
1492 static int
1493 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1494 {
1495         u32 speed_arg = 0, pause_adv;
1496
1497         pause_adv = bnx2_phy_get_pause_adv(bp);
1498
1499         if (bp->autoneg & AUTONEG_SPEED) {
1500                 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1501                 if (bp->advertising & ADVERTISED_10baseT_Half)
1502                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1503                 if (bp->advertising & ADVERTISED_10baseT_Full)
1504                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1505                 if (bp->advertising & ADVERTISED_100baseT_Half)
1506                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1507                 if (bp->advertising & ADVERTISED_100baseT_Full)
1508                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1509                 if (bp->advertising & ADVERTISED_1000baseT_Full)
1510                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1511                 if (bp->advertising & ADVERTISED_2500baseX_Full)
1512                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1513         } else {
1514                 if (bp->req_line_speed == SPEED_2500)
1515                         speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1516                 else if (bp->req_line_speed == SPEED_1000)
1517                         speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1518                 else if (bp->req_line_speed == SPEED_100) {
1519                         if (bp->req_duplex == DUPLEX_FULL)
1520                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1521                         else
1522                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1523                 } else if (bp->req_line_speed == SPEED_10) {
1524                         if (bp->req_duplex == DUPLEX_FULL)
1525                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1526                         else
1527                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1528                 }
1529         }
1530
1531         if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1532                 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1533         if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_PAUSE_ASYM))
1534                 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1535
1536         if (port == PORT_TP)
1537                 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1538                              BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1539
1540         bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg);
1541
1542         spin_unlock_bh(&bp->phy_lock);
1543         bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 0);
1544         spin_lock_bh(&bp->phy_lock);
1545
1546         return 0;
1547 }
1548
1549 static int
1550 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1551 {
1552         u32 adv, bmcr;
1553         u32 new_adv = 0;
1554
1555         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1556                 return (bnx2_setup_remote_phy(bp, port));
1557
1558         if (!(bp->autoneg & AUTONEG_SPEED)) {
1559                 u32 new_bmcr;
1560                 int force_link_down = 0;
1561
1562                 if (bp->req_line_speed == SPEED_2500) {
1563                         if (!bnx2_test_and_enable_2g5(bp))
1564                                 force_link_down = 1;
1565                 } else if (bp->req_line_speed == SPEED_1000) {
1566                         if (bnx2_test_and_disable_2g5(bp))
1567                                 force_link_down = 1;
1568                 }
1569                 bnx2_read_phy(bp, bp->mii_adv, &adv);
1570                 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1571
1572                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1573                 new_bmcr = bmcr & ~BMCR_ANENABLE;
1574                 new_bmcr |= BMCR_SPEED1000;
1575
1576                 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1577                         if (bp->req_line_speed == SPEED_2500)
1578                                 bnx2_enable_forced_2g5(bp);
1579                         else if (bp->req_line_speed == SPEED_1000) {
1580                                 bnx2_disable_forced_2g5(bp);
1581                                 new_bmcr &= ~0x2000;
1582                         }
1583
1584                 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1585                         if (bp->req_line_speed == SPEED_2500)
1586                                 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1587                         else
1588                                 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1589                 }
1590
1591                 if (bp->req_duplex == DUPLEX_FULL) {
1592                         adv |= ADVERTISE_1000XFULL;
1593                         new_bmcr |= BMCR_FULLDPLX;
1594                 }
1595                 else {
1596                         adv |= ADVERTISE_1000XHALF;
1597                         new_bmcr &= ~BMCR_FULLDPLX;
1598                 }
1599                 if ((new_bmcr != bmcr) || (force_link_down)) {
1600                         /* Force a link down visible on the other side */
1601                         if (bp->link_up) {
1602                                 bnx2_write_phy(bp, bp->mii_adv, adv &
1603                                                ~(ADVERTISE_1000XFULL |
1604                                                  ADVERTISE_1000XHALF));
1605                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1606                                         BMCR_ANRESTART | BMCR_ANENABLE);
1607
1608                                 bp->link_up = 0;
1609                                 netif_carrier_off(bp->dev);
1610                                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1611                                 bnx2_report_link(bp);
1612                         }
1613                         bnx2_write_phy(bp, bp->mii_adv, adv);
1614                         bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1615                 } else {
1616                         bnx2_resolve_flow_ctrl(bp);
1617                         bnx2_set_mac_link(bp);
1618                 }
1619                 return 0;
1620         }
1621
1622         bnx2_test_and_enable_2g5(bp);
1623
1624         if (bp->advertising & ADVERTISED_1000baseT_Full)
1625                 new_adv |= ADVERTISE_1000XFULL;
1626
1627         new_adv |= bnx2_phy_get_pause_adv(bp);
1628
1629         bnx2_read_phy(bp, bp->mii_adv, &adv);
1630         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1631
1632         bp->serdes_an_pending = 0;
1633         if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1634                 /* Force a link down visible on the other side */
1635                 if (bp->link_up) {
1636                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1637                         spin_unlock_bh(&bp->phy_lock);
1638                         msleep(20);
1639                         spin_lock_bh(&bp->phy_lock);
1640                 }
1641
1642                 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1643                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1644                         BMCR_ANENABLE);
1645                 /* Speed up link-up time when the link partner
1646                  * does not autonegotiate which is very common
1647                  * in blade servers. Some blade servers use
1648                  * IPMI for kerboard input and it's important
1649                  * to minimize link disruptions. Autoneg. involves
1650                  * exchanging base pages plus 3 next pages and
1651                  * normally completes in about 120 msec.
1652                  */
1653                 bp->current_interval = SERDES_AN_TIMEOUT;
1654                 bp->serdes_an_pending = 1;
1655                 mod_timer(&bp->timer, jiffies + bp->current_interval);
1656         } else {
1657                 bnx2_resolve_flow_ctrl(bp);
1658                 bnx2_set_mac_link(bp);
1659         }
1660
1661         return 0;
1662 }
1663
1664 #define ETHTOOL_ALL_FIBRE_SPEED                                         \
1665         (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ?                  \
1666                 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1667                 (ADVERTISED_1000baseT_Full)
1668
1669 #define ETHTOOL_ALL_COPPER_SPEED                                        \
1670         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |            \
1671         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |           \
1672         ADVERTISED_1000baseT_Full)
1673
1674 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1675         ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1676
1677 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1678
1679 static void
1680 bnx2_set_default_remote_link(struct bnx2 *bp)
1681 {
1682         u32 link;
1683
1684         if (bp->phy_port == PORT_TP)
1685                 link = bnx2_shmem_rd(bp, BNX2_RPHY_COPPER_LINK);
1686         else
1687                 link = bnx2_shmem_rd(bp, BNX2_RPHY_SERDES_LINK);
1688
1689         if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1690                 bp->req_line_speed = 0;
1691                 bp->autoneg |= AUTONEG_SPEED;
1692                 bp->advertising = ADVERTISED_Autoneg;
1693                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1694                         bp->advertising |= ADVERTISED_10baseT_Half;
1695                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1696                         bp->advertising |= ADVERTISED_10baseT_Full;
1697                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1698                         bp->advertising |= ADVERTISED_100baseT_Half;
1699                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1700                         bp->advertising |= ADVERTISED_100baseT_Full;
1701                 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1702                         bp->advertising |= ADVERTISED_1000baseT_Full;
1703                 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1704                         bp->advertising |= ADVERTISED_2500baseX_Full;
1705         } else {
1706                 bp->autoneg = 0;
1707                 bp->advertising = 0;
1708                 bp->req_duplex = DUPLEX_FULL;
1709                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1710                         bp->req_line_speed = SPEED_10;
1711                         if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1712                                 bp->req_duplex = DUPLEX_HALF;
1713                 }
1714                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1715                         bp->req_line_speed = SPEED_100;
1716                         if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1717                                 bp->req_duplex = DUPLEX_HALF;
1718                 }
1719                 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1720                         bp->req_line_speed = SPEED_1000;
1721                 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1722                         bp->req_line_speed = SPEED_2500;
1723         }
1724 }
1725
1726 static void
1727 bnx2_set_default_link(struct bnx2 *bp)
1728 {
1729         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
1730                 bnx2_set_default_remote_link(bp);
1731                 return;
1732         }
1733
1734         bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1735         bp->req_line_speed = 0;
1736         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1737                 u32 reg;
1738
1739                 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1740
1741                 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG);
1742                 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1743                 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1744                         bp->autoneg = 0;
1745                         bp->req_line_speed = bp->line_speed = SPEED_1000;
1746                         bp->req_duplex = DUPLEX_FULL;
1747                 }
1748         } else
1749                 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1750 }
1751
1752 static void
1753 bnx2_send_heart_beat(struct bnx2 *bp)
1754 {
1755         u32 msg;
1756         u32 addr;
1757
1758         spin_lock(&bp->indirect_lock);
1759         msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1760         addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1761         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1762         REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1763         spin_unlock(&bp->indirect_lock);
1764 }
1765
1766 static void
1767 bnx2_remote_phy_event(struct bnx2 *bp)
1768 {
1769         u32 msg;
1770         u8 link_up = bp->link_up;
1771         u8 old_port;
1772
1773         msg = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
1774
1775         if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1776                 bnx2_send_heart_beat(bp);
1777
1778         msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1779
1780         if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1781                 bp->link_up = 0;
1782         else {
1783                 u32 speed;
1784
1785                 bp->link_up = 1;
1786                 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1787                 bp->duplex = DUPLEX_FULL;
1788                 switch (speed) {
1789                         case BNX2_LINK_STATUS_10HALF:
1790                                 bp->duplex = DUPLEX_HALF;
1791                         case BNX2_LINK_STATUS_10FULL:
1792                                 bp->line_speed = SPEED_10;
1793                                 break;
1794                         case BNX2_LINK_STATUS_100HALF:
1795                                 bp->duplex = DUPLEX_HALF;
1796                         case BNX2_LINK_STATUS_100BASE_T4:
1797                         case BNX2_LINK_STATUS_100FULL:
1798                                 bp->line_speed = SPEED_100;
1799                                 break;
1800                         case BNX2_LINK_STATUS_1000HALF:
1801                                 bp->duplex = DUPLEX_HALF;
1802                         case BNX2_LINK_STATUS_1000FULL:
1803                                 bp->line_speed = SPEED_1000;
1804                                 break;
1805                         case BNX2_LINK_STATUS_2500HALF:
1806                                 bp->duplex = DUPLEX_HALF;
1807                         case BNX2_LINK_STATUS_2500FULL:
1808                                 bp->line_speed = SPEED_2500;
1809                                 break;
1810                         default:
1811                                 bp->line_speed = 0;
1812                                 break;
1813                 }
1814
1815                 bp->flow_ctrl = 0;
1816                 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1817                     (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1818                         if (bp->duplex == DUPLEX_FULL)
1819                                 bp->flow_ctrl = bp->req_flow_ctrl;
1820                 } else {
1821                         if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
1822                                 bp->flow_ctrl |= FLOW_CTRL_TX;
1823                         if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
1824                                 bp->flow_ctrl |= FLOW_CTRL_RX;
1825                 }
1826
1827                 old_port = bp->phy_port;
1828                 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
1829                         bp->phy_port = PORT_FIBRE;
1830                 else
1831                         bp->phy_port = PORT_TP;
1832
1833                 if (old_port != bp->phy_port)
1834                         bnx2_set_default_link(bp);
1835
1836         }
1837         if (bp->link_up != link_up)
1838                 bnx2_report_link(bp);
1839
1840         bnx2_set_mac_link(bp);
1841 }
1842
1843 static int
1844 bnx2_set_remote_link(struct bnx2 *bp)
1845 {
1846         u32 evt_code;
1847
1848         evt_code = bnx2_shmem_rd(bp, BNX2_FW_EVT_CODE_MB);
1849         switch (evt_code) {
1850                 case BNX2_FW_EVT_CODE_LINK_EVENT:
1851                         bnx2_remote_phy_event(bp);
1852                         break;
1853                 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
1854                 default:
1855                         bnx2_send_heart_beat(bp);
1856                         break;
1857         }
1858         return 0;
1859 }
1860
1861 static int
1862 bnx2_setup_copper_phy(struct bnx2 *bp)
1863 {
1864         u32 bmcr;
1865         u32 new_bmcr;
1866
1867         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1868
1869         if (bp->autoneg & AUTONEG_SPEED) {
1870                 u32 adv_reg, adv1000_reg;
1871                 u32 new_adv_reg = 0;
1872                 u32 new_adv1000_reg = 0;
1873
1874                 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
1875                 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1876                         ADVERTISE_PAUSE_ASYM);
1877
1878                 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1879                 adv1000_reg &= PHY_ALL_1000_SPEED;
1880
1881                 if (bp->advertising & ADVERTISED_10baseT_Half)
1882                         new_adv_reg |= ADVERTISE_10HALF;
1883                 if (bp->advertising & ADVERTISED_10baseT_Full)
1884                         new_adv_reg |= ADVERTISE_10FULL;
1885                 if (bp->advertising & ADVERTISED_100baseT_Half)
1886                         new_adv_reg |= ADVERTISE_100HALF;
1887                 if (bp->advertising & ADVERTISED_100baseT_Full)
1888                         new_adv_reg |= ADVERTISE_100FULL;
1889                 if (bp->advertising & ADVERTISED_1000baseT_Full)
1890                         new_adv1000_reg |= ADVERTISE_1000FULL;
1891
1892                 new_adv_reg |= ADVERTISE_CSMA;
1893
1894                 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1895
1896                 if ((adv1000_reg != new_adv1000_reg) ||
1897                         (adv_reg != new_adv_reg) ||
1898                         ((bmcr & BMCR_ANENABLE) == 0)) {
1899
1900                         bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
1901                         bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
1902                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
1903                                 BMCR_ANENABLE);
1904                 }
1905                 else if (bp->link_up) {
1906                         /* Flow ctrl may have changed from auto to forced */
1907                         /* or vice-versa. */
1908
1909                         bnx2_resolve_flow_ctrl(bp);
1910                         bnx2_set_mac_link(bp);
1911                 }
1912                 return 0;
1913         }
1914
1915         new_bmcr = 0;
1916         if (bp->req_line_speed == SPEED_100) {
1917                 new_bmcr |= BMCR_SPEED100;
1918         }
1919         if (bp->req_duplex == DUPLEX_FULL) {
1920                 new_bmcr |= BMCR_FULLDPLX;
1921         }
1922         if (new_bmcr != bmcr) {
1923                 u32 bmsr;
1924
1925                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1926                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1927
1928                 if (bmsr & BMSR_LSTATUS) {
1929                         /* Force link down */
1930                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1931                         spin_unlock_bh(&bp->phy_lock);
1932                         msleep(50);
1933                         spin_lock_bh(&bp->phy_lock);
1934
1935                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1936                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1937                 }
1938
1939                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1940
1941                 /* Normally, the new speed is setup after the link has
1942                  * gone down and up again. In some cases, link will not go
1943                  * down so we need to set up the new speed here.
1944                  */
1945                 if (bmsr & BMSR_LSTATUS) {
1946                         bp->line_speed = bp->req_line_speed;
1947                         bp->duplex = bp->req_duplex;
1948                         bnx2_resolve_flow_ctrl(bp);
1949                         bnx2_set_mac_link(bp);
1950                 }
1951         } else {
1952                 bnx2_resolve_flow_ctrl(bp);
1953                 bnx2_set_mac_link(bp);
1954         }
1955         return 0;
1956 }
1957
1958 static int
1959 bnx2_setup_phy(struct bnx2 *bp, u8 port)
1960 {
1961         if (bp->loopback == MAC_LOOPBACK)
1962                 return 0;
1963
1964         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1965                 return (bnx2_setup_serdes_phy(bp, port));
1966         }
1967         else {
1968                 return (bnx2_setup_copper_phy(bp));
1969         }
1970 }
1971
1972 static int
1973 bnx2_init_5709s_phy(struct bnx2 *bp, int reset_phy)
1974 {
1975         u32 val;
1976
1977         bp->mii_bmcr = MII_BMCR + 0x10;
1978         bp->mii_bmsr = MII_BMSR + 0x10;
1979         bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
1980         bp->mii_adv = MII_ADVERTISE + 0x10;
1981         bp->mii_lpa = MII_LPA + 0x10;
1982         bp->mii_up1 = MII_BNX2_OVER1G_UP1;
1983
1984         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
1985         bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
1986
1987         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1988         if (reset_phy)
1989                 bnx2_reset_phy(bp);
1990
1991         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
1992
1993         bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
1994         val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
1995         val |= MII_BNX2_SD_1000XCTL1_FIBER;
1996         bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
1997
1998         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1999         bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
2000         if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
2001                 val |= BCM5708S_UP1_2G5;
2002         else
2003                 val &= ~BCM5708S_UP1_2G5;
2004         bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
2005
2006         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
2007         bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
2008         val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
2009         bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
2010
2011         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
2012
2013         val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
2014               MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
2015         bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
2016
2017         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2018
2019         return 0;
2020 }
2021
2022 static int
2023 bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy)
2024 {
2025         u32 val;
2026
2027         if (reset_phy)
2028                 bnx2_reset_phy(bp);
2029
2030         bp->mii_up1 = BCM5708S_UP1;
2031
2032         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
2033         bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
2034         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2035
2036         bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
2037         val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
2038         bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
2039
2040         bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
2041         val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
2042         bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
2043
2044         if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) {
2045                 bnx2_read_phy(bp, BCM5708S_UP1, &val);
2046                 val |= BCM5708S_UP1_2G5;
2047                 bnx2_write_phy(bp, BCM5708S_UP1, val);
2048         }
2049
2050         if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
2051             (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
2052             (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
2053                 /* increase tx signal amplitude */
2054                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2055                                BCM5708S_BLK_ADDR_TX_MISC);
2056                 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
2057                 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
2058                 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
2059                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2060         }
2061
2062         val = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG) &
2063               BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
2064
2065         if (val) {
2066                 u32 is_backplane;
2067
2068                 is_backplane = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
2069                 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
2070                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2071                                        BCM5708S_BLK_ADDR_TX_MISC);
2072                         bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
2073                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2074                                        BCM5708S_BLK_ADDR_DIG);
2075                 }
2076         }
2077         return 0;
2078 }
2079
2080 static int
2081 bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy)
2082 {
2083         if (reset_phy)
2084                 bnx2_reset_phy(bp);
2085
2086         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
2087
2088         if (CHIP_NUM(bp) == CHIP_NUM_5706)
2089                 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
2090
2091         if (bp->dev->mtu > 1500) {
2092                 u32 val;
2093
2094                 /* Set extended packet length bit */
2095                 bnx2_write_phy(bp, 0x18, 0x7);
2096                 bnx2_read_phy(bp, 0x18, &val);
2097                 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
2098
2099                 bnx2_write_phy(bp, 0x1c, 0x6c00);
2100                 bnx2_read_phy(bp, 0x1c, &val);
2101                 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
2102         }
2103         else {
2104                 u32 val;
2105
2106                 bnx2_write_phy(bp, 0x18, 0x7);
2107                 bnx2_read_phy(bp, 0x18, &val);
2108                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2109
2110                 bnx2_write_phy(bp, 0x1c, 0x6c00);
2111                 bnx2_read_phy(bp, 0x1c, &val);
2112                 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
2113         }
2114
2115         return 0;
2116 }
2117
2118 static int
2119 bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
2120 {
2121         u32 val;
2122
2123         if (reset_phy)
2124                 bnx2_reset_phy(bp);
2125
2126         if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
2127                 bnx2_write_phy(bp, 0x18, 0x0c00);
2128                 bnx2_write_phy(bp, 0x17, 0x000a);
2129                 bnx2_write_phy(bp, 0x15, 0x310b);
2130                 bnx2_write_phy(bp, 0x17, 0x201f);
2131                 bnx2_write_phy(bp, 0x15, 0x9506);
2132                 bnx2_write_phy(bp, 0x17, 0x401f);
2133                 bnx2_write_phy(bp, 0x15, 0x14e2);
2134                 bnx2_write_phy(bp, 0x18, 0x0400);
2135         }
2136
2137         if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) {
2138                 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
2139                                MII_BNX2_DSP_EXPAND_REG | 0x8);
2140                 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
2141                 val &= ~(1 << 8);
2142                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
2143         }
2144
2145         if (bp->dev->mtu > 1500) {
2146                 /* Set extended packet length bit */
2147                 bnx2_write_phy(bp, 0x18, 0x7);
2148                 bnx2_read_phy(bp, 0x18, &val);
2149                 bnx2_write_phy(bp, 0x18, val | 0x4000);
2150
2151                 bnx2_read_phy(bp, 0x10, &val);
2152                 bnx2_write_phy(bp, 0x10, val | 0x1);
2153         }
2154         else {
2155                 bnx2_write_phy(bp, 0x18, 0x7);
2156                 bnx2_read_phy(bp, 0x18, &val);
2157                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2158
2159                 bnx2_read_phy(bp, 0x10, &val);
2160                 bnx2_write_phy(bp, 0x10, val & ~0x1);
2161         }
2162
2163         /* ethernet@wirespeed */
2164         bnx2_write_phy(bp, 0x18, 0x7007);
2165         bnx2_read_phy(bp, 0x18, &val);
2166         bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
2167         return 0;
2168 }
2169
2170
2171 static int
2172 bnx2_init_phy(struct bnx2 *bp, int reset_phy)
2173 {
2174         u32 val;
2175         int rc = 0;
2176
2177         bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK;
2178         bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY;
2179
2180         bp->mii_bmcr = MII_BMCR;
2181         bp->mii_bmsr = MII_BMSR;
2182         bp->mii_bmsr1 = MII_BMSR;
2183         bp->mii_adv = MII_ADVERTISE;
2184         bp->mii_lpa = MII_LPA;
2185
2186         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
2187
2188         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
2189                 goto setup_phy;
2190
2191         bnx2_read_phy(bp, MII_PHYSID1, &val);
2192         bp->phy_id = val << 16;
2193         bnx2_read_phy(bp, MII_PHYSID2, &val);
2194         bp->phy_id |= val & 0xffff;
2195
2196         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2197                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
2198                         rc = bnx2_init_5706s_phy(bp, reset_phy);
2199                 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
2200                         rc = bnx2_init_5708s_phy(bp, reset_phy);
2201                 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
2202                         rc = bnx2_init_5709s_phy(bp, reset_phy);
2203         }
2204         else {
2205                 rc = bnx2_init_copper_phy(bp, reset_phy);
2206         }
2207
2208 setup_phy:
2209         if (!rc)
2210                 rc = bnx2_setup_phy(bp, bp->phy_port);
2211
2212         return rc;
2213 }
2214
2215 static int
2216 bnx2_set_mac_loopback(struct bnx2 *bp)
2217 {
2218         u32 mac_mode;
2219
2220         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2221         mac_mode &= ~BNX2_EMAC_MODE_PORT;
2222         mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2223         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2224         bp->link_up = 1;
2225         return 0;
2226 }
2227
2228 static int bnx2_test_link(struct bnx2 *);
2229
2230 static int
2231 bnx2_set_phy_loopback(struct bnx2 *bp)
2232 {
2233         u32 mac_mode;
2234         int rc, i;
2235
2236         spin_lock_bh(&bp->phy_lock);
2237         rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
2238                             BMCR_SPEED1000);
2239         spin_unlock_bh(&bp->phy_lock);
2240         if (rc)
2241                 return rc;
2242
2243         for (i = 0; i < 10; i++) {
2244                 if (bnx2_test_link(bp) == 0)
2245                         break;
2246                 msleep(100);
2247         }
2248
2249         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2250         mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2251                       BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
2252                       BNX2_EMAC_MODE_25G_MODE);
2253
2254         mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2255         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2256         bp->link_up = 1;
2257         return 0;
2258 }
2259
2260 static int
2261 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
2262 {
2263         int i;
2264         u32 val;
2265
2266         bp->fw_wr_seq++;
2267         msg_data |= bp->fw_wr_seq;
2268
2269         bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2270
2271         /* wait for an acknowledgement. */
2272         for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
2273                 msleep(10);
2274
2275                 val = bnx2_shmem_rd(bp, BNX2_FW_MB);
2276
2277                 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2278                         break;
2279         }
2280         if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2281                 return 0;
2282
2283         /* If we timed out, inform the firmware that this is the case. */
2284         if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2285                 if (!silent)
2286                         printk(KERN_ERR PFX "fw sync timeout, reset code = "
2287                                             "%x\n", msg_data);
2288
2289                 msg_data &= ~BNX2_DRV_MSG_CODE;
2290                 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2291
2292                 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2293
2294                 return -EBUSY;
2295         }
2296
2297         if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2298                 return -EIO;
2299
2300         return 0;
2301 }
2302
2303 static int
2304 bnx2_init_5709_context(struct bnx2 *bp)
2305 {
2306         int i, ret = 0;
2307         u32 val;
2308
2309         val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2310         val |= (BCM_PAGE_BITS - 8) << 16;
2311         REG_WR(bp, BNX2_CTX_COMMAND, val);
2312         for (i = 0; i < 10; i++) {
2313                 val = REG_RD(bp, BNX2_CTX_COMMAND);
2314                 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2315                         break;
2316                 udelay(2);
2317         }
2318         if (val & BNX2_CTX_COMMAND_MEM_INIT)
2319                 return -EBUSY;
2320
2321         for (i = 0; i < bp->ctx_pages; i++) {
2322                 int j;
2323
2324                 if (bp->ctx_blk[i])
2325                         memset(bp->ctx_blk[i], 0, BCM_PAGE_SIZE);
2326                 else
2327                         return -ENOMEM;
2328
2329                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2330                        (bp->ctx_blk_mapping[i] & 0xffffffff) |
2331                        BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2332                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2333                        (u64) bp->ctx_blk_mapping[i] >> 32);
2334                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2335                        BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2336                 for (j = 0; j < 10; j++) {
2337
2338                         val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2339                         if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2340                                 break;
2341                         udelay(5);
2342                 }
2343                 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2344                         ret = -EBUSY;
2345                         break;
2346                 }
2347         }
2348         return ret;
2349 }
2350
2351 static void
2352 bnx2_init_context(struct bnx2 *bp)
2353 {
2354         u32 vcid;
2355
2356         vcid = 96;
2357         while (vcid) {
2358                 u32 vcid_addr, pcid_addr, offset;
2359                 int i;
2360
2361                 vcid--;
2362
2363                 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2364                         u32 new_vcid;
2365
2366                         vcid_addr = GET_PCID_ADDR(vcid);
2367                         if (vcid & 0x8) {
2368                                 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2369                         }
2370                         else {
2371                                 new_vcid = vcid;
2372                         }
2373                         pcid_addr = GET_PCID_ADDR(new_vcid);
2374                 }
2375                 else {
2376                         vcid_addr = GET_CID_ADDR(vcid);
2377                         pcid_addr = vcid_addr;
2378                 }
2379
2380                 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2381                         vcid_addr += (i << PHY_CTX_SHIFT);
2382                         pcid_addr += (i << PHY_CTX_SHIFT);
2383
2384                         REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2385                         REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2386
2387                         /* Zero out the context. */
2388                         for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2389                                 bnx2_ctx_wr(bp, vcid_addr, offset, 0);
2390                 }
2391         }
2392 }
2393
2394 static int
2395 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2396 {
2397         u16 *good_mbuf;
2398         u32 good_mbuf_cnt;
2399         u32 val;
2400
2401         good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2402         if (good_mbuf == NULL) {
2403                 printk(KERN_ERR PFX "Failed to allocate memory in "
2404                                     "bnx2_alloc_bad_rbuf\n");
2405                 return -ENOMEM;
2406         }
2407
2408         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2409                 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2410
2411         good_mbuf_cnt = 0;
2412
2413         /* Allocate a bunch of mbufs and save the good ones in an array. */
2414         val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2415         while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2416                 bnx2_reg_wr_ind(bp, BNX2_RBUF_COMMAND,
2417                                 BNX2_RBUF_COMMAND_ALLOC_REQ);
2418
2419                 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_FW_BUF_ALLOC);
2420
2421                 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2422
2423                 /* The addresses with Bit 9 set are bad memory blocks. */
2424                 if (!(val & (1 << 9))) {
2425                         good_mbuf[good_mbuf_cnt] = (u16) val;
2426                         good_mbuf_cnt++;
2427                 }
2428
2429                 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2430         }
2431
2432         /* Free the good ones back to the mbuf pool thus discarding
2433          * all the bad ones. */
2434         while (good_mbuf_cnt) {
2435                 good_mbuf_cnt--;
2436
2437                 val = good_mbuf[good_mbuf_cnt];
2438                 val = (val << 9) | val | 1;
2439
2440                 bnx2_reg_wr_ind(bp, BNX2_RBUF_FW_BUF_FREE, val);
2441         }
2442         kfree(good_mbuf);
2443         return 0;
2444 }
2445
2446 static void
2447 bnx2_set_mac_addr(struct bnx2 *bp)
2448 {
2449         u32 val;
2450         u8 *mac_addr = bp->dev->dev_addr;
2451
2452         val = (mac_addr[0] << 8) | mac_addr[1];
2453
2454         REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
2455
2456         val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2457                 (mac_addr[4] << 8) | mac_addr[5];
2458
2459         REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
2460 }
2461
2462 static inline int
2463 bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2464 {
2465         dma_addr_t mapping;
2466         struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2467         struct rx_bd *rxbd =
2468                 &rxr->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)];
2469         struct page *page = alloc_page(GFP_ATOMIC);
2470
2471         if (!page)
2472                 return -ENOMEM;
2473         mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
2474                                PCI_DMA_FROMDEVICE);
2475         rx_pg->page = page;
2476         pci_unmap_addr_set(rx_pg, mapping, mapping);
2477         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2478         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2479         return 0;
2480 }
2481
2482 static void
2483 bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2484 {
2485         struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2486         struct page *page = rx_pg->page;
2487
2488         if (!page)
2489                 return;
2490
2491         pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping), PAGE_SIZE,
2492                        PCI_DMA_FROMDEVICE);
2493
2494         __free_page(page);
2495         rx_pg->page = NULL;
2496 }
2497
2498 static inline int
2499 bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2500 {
2501         struct sk_buff *skb;
2502         struct sw_bd *rx_buf = &rxr->rx_buf_ring[index];
2503         dma_addr_t mapping;
2504         struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
2505         unsigned long align;
2506
2507         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
2508         if (skb == NULL) {
2509                 return -ENOMEM;
2510         }
2511
2512         if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
2513                 skb_reserve(skb, BNX2_RX_ALIGN - align);
2514
2515         mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
2516                 PCI_DMA_FROMDEVICE);
2517
2518         rx_buf->skb = skb;
2519         pci_unmap_addr_set(rx_buf, mapping, mapping);
2520
2521         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2522         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2523
2524         rxr->rx_prod_bseq += bp->rx_buf_use_size;
2525
2526         return 0;
2527 }
2528
2529 static int
2530 bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
2531 {
2532         struct status_block *sblk = bnapi->status_blk.msi;
2533         u32 new_link_state, old_link_state;
2534         int is_set = 1;
2535
2536         new_link_state = sblk->status_attn_bits & event;
2537         old_link_state = sblk->status_attn_bits_ack & event;
2538         if (new_link_state != old_link_state) {
2539                 if (new_link_state)
2540                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2541                 else
2542                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2543         } else
2544                 is_set = 0;
2545
2546         return is_set;
2547 }
2548
2549 static void
2550 bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
2551 {
2552         spin_lock(&bp->phy_lock);
2553
2554         if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE))
2555                 bnx2_set_link(bp);
2556         if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
2557                 bnx2_set_remote_link(bp);
2558
2559         spin_unlock(&bp->phy_lock);
2560
2561 }
2562
2563 static inline u16
2564 bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
2565 {
2566         u16 cons;
2567
2568         /* Tell compiler that status block fields can change. */
2569         barrier();
2570         cons = *bnapi->hw_tx_cons_ptr;
2571         if (unlikely((cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT))
2572                 cons++;
2573         return cons;
2574 }
2575
2576 static int
2577 bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2578 {
2579         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
2580         u16 hw_cons, sw_cons, sw_ring_cons;
2581         int tx_pkt = 0;
2582
2583         hw_cons = bnx2_get_hw_tx_cons(bnapi);
2584         sw_cons = txr->tx_cons;
2585
2586         while (sw_cons != hw_cons) {
2587                 struct sw_bd *tx_buf;
2588                 struct sk_buff *skb;
2589                 int i, last;
2590
2591                 sw_ring_cons = TX_RING_IDX(sw_cons);
2592
2593                 tx_buf = &txr->tx_buf_ring[sw_ring_cons];
2594                 skb = tx_buf->skb;
2595
2596                 /* partial BD completions possible with TSO packets */
2597                 if (skb_is_gso(skb)) {
2598                         u16 last_idx, last_ring_idx;
2599
2600                         last_idx = sw_cons +
2601                                 skb_shinfo(skb)->nr_frags + 1;
2602                         last_ring_idx = sw_ring_cons +
2603                                 skb_shinfo(skb)->nr_frags + 1;
2604                         if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2605                                 last_idx++;
2606                         }
2607                         if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2608                                 break;
2609                         }
2610                 }
2611
2612                 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
2613                         skb_headlen(skb), PCI_DMA_TODEVICE);
2614
2615                 tx_buf->skb = NULL;
2616                 last = skb_shinfo(skb)->nr_frags;
2617
2618                 for (i = 0; i < last; i++) {
2619                         sw_cons = NEXT_TX_BD(sw_cons);
2620
2621                         pci_unmap_page(bp->pdev,
2622                                 pci_unmap_addr(
2623                                         &txr->tx_buf_ring[TX_RING_IDX(sw_cons)],
2624                                         mapping),
2625                                 skb_shinfo(skb)->frags[i].size,
2626                                 PCI_DMA_TODEVICE);
2627                 }
2628
2629                 sw_cons = NEXT_TX_BD(sw_cons);
2630
2631                 dev_kfree_skb(skb);
2632                 tx_pkt++;
2633                 if (tx_pkt == budget)
2634                         break;
2635
2636                 hw_cons = bnx2_get_hw_tx_cons(bnapi);
2637         }
2638
2639         txr->hw_tx_cons = hw_cons;
2640         txr->tx_cons = sw_cons;
2641         /* Need to make the tx_cons update visible to bnx2_start_xmit()
2642          * before checking for netif_queue_stopped().  Without the
2643          * memory barrier, there is a small possibility that bnx2_start_xmit()
2644          * will miss it and cause the queue to be stopped forever.
2645          */
2646         smp_mb();
2647
2648         if (unlikely(netif_queue_stopped(bp->dev)) &&
2649                      (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
2650                 netif_tx_lock(bp->dev);
2651                 if ((netif_queue_stopped(bp->dev)) &&
2652                     (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh))
2653                         netif_wake_queue(bp->dev);
2654                 netif_tx_unlock(bp->dev);
2655         }
2656         return tx_pkt;
2657 }
2658
2659 static void
2660 bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2661                         struct sk_buff *skb, int count)
2662 {
2663         struct sw_pg *cons_rx_pg, *prod_rx_pg;
2664         struct rx_bd *cons_bd, *prod_bd;
2665         dma_addr_t mapping;
2666         int i;
2667         u16 hw_prod = rxr->rx_pg_prod, prod;
2668         u16 cons = rxr->rx_pg_cons;
2669
2670         for (i = 0; i < count; i++) {
2671                 prod = RX_PG_RING_IDX(hw_prod);
2672
2673                 prod_rx_pg = &rxr->rx_pg_ring[prod];
2674                 cons_rx_pg = &rxr->rx_pg_ring[cons];
2675                 cons_bd = &rxr->rx_pg_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2676                 prod_bd = &rxr->rx_pg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2677
2678                 if (i == 0 && skb) {
2679                         struct page *page;
2680                         struct skb_shared_info *shinfo;
2681
2682                         shinfo = skb_shinfo(skb);
2683                         shinfo->nr_frags--;
2684                         page = shinfo->frags[shinfo->nr_frags].page;
2685                         shinfo->frags[shinfo->nr_frags].page = NULL;
2686                         mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
2687                                                PCI_DMA_FROMDEVICE);
2688                         cons_rx_pg->page = page;
2689                         pci_unmap_addr_set(cons_rx_pg, mapping, mapping);
2690                         dev_kfree_skb(skb);
2691                 }
2692                 if (prod != cons) {
2693                         prod_rx_pg->page = cons_rx_pg->page;
2694                         cons_rx_pg->page = NULL;
2695                         pci_unmap_addr_set(prod_rx_pg, mapping,
2696                                 pci_unmap_addr(cons_rx_pg, mapping));
2697
2698                         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2699                         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2700
2701                 }
2702                 cons = RX_PG_RING_IDX(NEXT_RX_BD(cons));
2703                 hw_prod = NEXT_RX_BD(hw_prod);
2704         }
2705         rxr->rx_pg_prod = hw_prod;
2706         rxr->rx_pg_cons = cons;
2707 }
2708
2709 static inline void
2710 bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2711                   struct sk_buff *skb, u16 cons, u16 prod)
2712 {
2713         struct sw_bd *cons_rx_buf, *prod_rx_buf;
2714         struct rx_bd *cons_bd, *prod_bd;
2715
2716         cons_rx_buf = &rxr->rx_buf_ring[cons];
2717         prod_rx_buf = &rxr->rx_buf_ring[prod];
2718
2719         pci_dma_sync_single_for_device(bp->pdev,
2720                 pci_unmap_addr(cons_rx_buf, mapping),
2721                 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2722
2723         rxr->rx_prod_bseq += bp->rx_buf_use_size;
2724
2725         prod_rx_buf->skb = skb;
2726
2727         if (cons == prod)
2728                 return;
2729
2730         pci_unmap_addr_set(prod_rx_buf, mapping,
2731                         pci_unmap_addr(cons_rx_buf, mapping));
2732
2733         cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2734         prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2735         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2736         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2737 }
2738
2739 static int
2740 bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
2741             unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
2742             u32 ring_idx)
2743 {
2744         int err;
2745         u16 prod = ring_idx & 0xffff;
2746
2747         err = bnx2_alloc_rx_skb(bp, rxr, prod);
2748         if (unlikely(err)) {
2749                 bnx2_reuse_rx_skb(bp, rxr, skb, (u16) (ring_idx >> 16), prod);
2750                 if (hdr_len) {
2751                         unsigned int raw_len = len + 4;
2752                         int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
2753
2754                         bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
2755                 }
2756                 return err;
2757         }
2758
2759         skb_reserve(skb, BNX2_RX_OFFSET);
2760         pci_unmap_single(bp->pdev, dma_addr, bp->rx_buf_use_size,
2761                          PCI_DMA_FROMDEVICE);
2762
2763         if (hdr_len == 0) {
2764                 skb_put(skb, len);
2765                 return 0;
2766         } else {
2767                 unsigned int i, frag_len, frag_size, pages;
2768                 struct sw_pg *rx_pg;
2769                 u16 pg_cons = rxr->rx_pg_cons;
2770                 u16 pg_prod = rxr->rx_pg_prod;
2771
2772                 frag_size = len + 4 - hdr_len;
2773                 pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
2774                 skb_put(skb, hdr_len);
2775
2776                 for (i = 0; i < pages; i++) {
2777                         frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
2778                         if (unlikely(frag_len <= 4)) {
2779                                 unsigned int tail = 4 - frag_len;
2780
2781                                 rxr->rx_pg_cons = pg_cons;
2782                                 rxr->rx_pg_prod = pg_prod;
2783                                 bnx2_reuse_rx_skb_pages(bp, rxr, NULL,
2784                                                         pages - i);
2785                                 skb->len -= tail;
2786                                 if (i == 0) {
2787                                         skb->tail -= tail;
2788                                 } else {
2789                                         skb_frag_t *frag =
2790                                                 &skb_shinfo(skb)->frags[i - 1];
2791                                         frag->size -= tail;
2792                                         skb->data_len -= tail;
2793                                         skb->truesize -= tail;
2794                                 }
2795                                 return 0;
2796                         }
2797                         rx_pg = &rxr->rx_pg_ring[pg_cons];
2798
2799                         pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping),
2800                                        PAGE_SIZE, PCI_DMA_FROMDEVICE);
2801
2802                         if (i == pages - 1)
2803                                 frag_len -= 4;
2804
2805                         skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
2806                         rx_pg->page = NULL;
2807
2808                         err = bnx2_alloc_rx_page(bp, rxr,
2809                                                  RX_PG_RING_IDX(pg_prod));
2810                         if (unlikely(err)) {
2811                                 rxr->rx_pg_cons = pg_cons;
2812                                 rxr->rx_pg_prod = pg_prod;
2813                                 bnx2_reuse_rx_skb_pages(bp, rxr, skb,
2814                                                         pages - i);
2815                                 return err;
2816                         }
2817
2818                         frag_size -= frag_len;
2819                         skb->data_len += frag_len;
2820                         skb->truesize += frag_len;
2821                         skb->len += frag_len;
2822
2823                         pg_prod = NEXT_RX_BD(pg_prod);
2824                         pg_cons = RX_PG_RING_IDX(NEXT_RX_BD(pg_cons));
2825                 }
2826                 rxr->rx_pg_prod = pg_prod;
2827                 rxr->rx_pg_cons = pg_cons;
2828         }
2829         return 0;
2830 }
2831
2832 static inline u16
2833 bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
2834 {
2835         u16 cons;
2836
2837         /* Tell compiler that status block fields can change. */
2838         barrier();
2839         cons = *bnapi->hw_rx_cons_ptr;
2840         if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT))
2841                 cons++;
2842         return cons;
2843 }
2844
2845 static int
2846 bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2847 {
2848         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
2849         u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
2850         struct l2_fhdr *rx_hdr;
2851         int rx_pkt = 0, pg_ring_used = 0;
2852
2853         hw_cons = bnx2_get_hw_rx_cons(bnapi);
2854         sw_cons = rxr->rx_cons;
2855         sw_prod = rxr->rx_prod;
2856
2857         /* Memory barrier necessary as speculative reads of the rx
2858          * buffer can be ahead of the index in the status block
2859          */
2860         rmb();
2861         while (sw_cons != hw_cons) {
2862                 unsigned int len, hdr_len;
2863                 u32 status;
2864                 struct sw_bd *rx_buf;
2865                 struct sk_buff *skb;
2866                 dma_addr_t dma_addr;
2867
2868                 sw_ring_cons = RX_RING_IDX(sw_cons);
2869                 sw_ring_prod = RX_RING_IDX(sw_prod);
2870
2871                 rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
2872                 skb = rx_buf->skb;
2873
2874                 rx_buf->skb = NULL;
2875
2876                 dma_addr = pci_unmap_addr(rx_buf, mapping);
2877
2878                 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
2879                         BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
2880                         PCI_DMA_FROMDEVICE);
2881
2882                 rx_hdr = (struct l2_fhdr *) skb->data;
2883                 len = rx_hdr->l2_fhdr_pkt_len;
2884
2885                 if ((status = rx_hdr->l2_fhdr_status) &
2886                         (L2_FHDR_ERRORS_BAD_CRC |
2887                         L2_FHDR_ERRORS_PHY_DECODE |
2888                         L2_FHDR_ERRORS_ALIGNMENT |
2889                         L2_FHDR_ERRORS_TOO_SHORT |
2890                         L2_FHDR_ERRORS_GIANT_FRAME)) {
2891
2892                         bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
2893                                           sw_ring_prod);
2894                         goto next_rx;
2895                 }
2896                 hdr_len = 0;
2897                 if (status & L2_FHDR_STATUS_SPLIT) {
2898                         hdr_len = rx_hdr->l2_fhdr_ip_xsum;
2899                         pg_ring_used = 1;
2900                 } else if (len > bp->rx_jumbo_thresh) {
2901                         hdr_len = bp->rx_jumbo_thresh;
2902                         pg_ring_used = 1;
2903                 }
2904
2905                 len -= 4;
2906
2907                 if (len <= bp->rx_copy_thresh) {
2908                         struct sk_buff *new_skb;
2909
2910                         new_skb = netdev_alloc_skb(bp->dev, len + 2);
2911                         if (new_skb == NULL) {
2912                                 bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
2913                                                   sw_ring_prod);
2914                                 goto next_rx;
2915                         }
2916
2917                         /* aligned copy */
2918                         skb_copy_from_linear_data_offset(skb,
2919                                                          BNX2_RX_OFFSET - 2,
2920                                       new_skb->data, len + 2);
2921                         skb_reserve(new_skb, 2);
2922                         skb_put(new_skb, len);
2923
2924                         bnx2_reuse_rx_skb(bp, rxr, skb,
2925                                 sw_ring_cons, sw_ring_prod);
2926
2927                         skb = new_skb;
2928                 } else if (unlikely(bnx2_rx_skb(bp, rxr, skb, len, hdr_len,
2929                            dma_addr, (sw_ring_cons << 16) | sw_ring_prod)))
2930                         goto next_rx;
2931
2932                 skb->protocol = eth_type_trans(skb, bp->dev);
2933
2934                 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
2935                         (ntohs(skb->protocol) != 0x8100)) {
2936
2937                         dev_kfree_skb(skb);
2938                         goto next_rx;
2939
2940                 }
2941
2942                 skb->ip_summed = CHECKSUM_NONE;
2943                 if (bp->rx_csum &&
2944                         (status & (L2_FHDR_STATUS_TCP_SEGMENT |
2945                         L2_FHDR_STATUS_UDP_DATAGRAM))) {
2946
2947                         if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
2948                                               L2_FHDR_ERRORS_UDP_XSUM)) == 0))
2949                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
2950                 }
2951
2952 #ifdef BCM_VLAN
2953                 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && bp->vlgrp) {
2954                         vlan_hwaccel_receive_skb(skb, bp->vlgrp,
2955                                 rx_hdr->l2_fhdr_vlan_tag);
2956                 }
2957                 else
2958 #endif
2959                         netif_receive_skb(skb);
2960
2961                 bp->dev->last_rx = jiffies;
2962                 rx_pkt++;
2963
2964 next_rx:
2965                 sw_cons = NEXT_RX_BD(sw_cons);
2966                 sw_prod = NEXT_RX_BD(sw_prod);
2967
2968                 if ((rx_pkt == budget))
2969                         break;
2970
2971                 /* Refresh hw_cons to see if there is new work */
2972                 if (sw_cons == hw_cons) {
2973                         hw_cons = bnx2_get_hw_rx_cons(bnapi);
2974                         rmb();
2975                 }
2976         }
2977         rxr->rx_cons = sw_cons;
2978         rxr->rx_prod = sw_prod;
2979
2980         if (pg_ring_used)
2981                 REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
2982
2983         REG_WR16(bp, rxr->rx_bidx_addr, sw_prod);
2984
2985         REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
2986
2987         mmiowb();
2988
2989         return rx_pkt;
2990
2991 }
2992
2993 /* MSI ISR - The only difference between this and the INTx ISR
2994  * is that the MSI interrupt is always serviced.
2995  */
2996 static irqreturn_t
2997 bnx2_msi(int irq, void *dev_instance)
2998 {
2999         struct bnx2_napi *bnapi = dev_instance;
3000         struct bnx2 *bp = bnapi->bp;
3001         struct net_device *dev = bp->dev;
3002
3003         prefetch(bnapi->status_blk.msi);
3004         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3005                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3006                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3007
3008         /* Return here if interrupt is disabled. */
3009         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3010                 return IRQ_HANDLED;
3011
3012         netif_rx_schedule(dev, &bnapi->napi);
3013
3014         return IRQ_HANDLED;
3015 }
3016
3017 static irqreturn_t
3018 bnx2_msi_1shot(int irq, void *dev_instance)
3019 {
3020         struct bnx2_napi *bnapi = dev_instance;
3021         struct bnx2 *bp = bnapi->bp;
3022         struct net_device *dev = bp->dev;
3023
3024         prefetch(bnapi->status_blk.msi);
3025
3026         /* Return here if interrupt is disabled. */
3027         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3028                 return IRQ_HANDLED;
3029
3030         netif_rx_schedule(dev, &bnapi->napi);
3031
3032         return IRQ_HANDLED;
3033 }
3034
3035 static irqreturn_t
3036 bnx2_interrupt(int irq, void *dev_instance)
3037 {
3038         struct bnx2_napi *bnapi = dev_instance;
3039         struct bnx2 *bp = bnapi->bp;
3040         struct net_device *dev = bp->dev;
3041         struct status_block *sblk = bnapi->status_blk.msi;
3042
3043         /* When using INTx, it is possible for the interrupt to arrive
3044          * at the CPU before the status block posted prior to the
3045          * interrupt. Reading a register will flush the status block.
3046          * When using MSI, the MSI message will always complete after
3047          * the status block write.
3048          */
3049         if ((sblk->status_idx == bnapi->last_status_idx) &&
3050             (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
3051              BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
3052                 return IRQ_NONE;
3053
3054         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3055                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3056                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3057
3058         /* Read back to deassert IRQ immediately to avoid too many
3059          * spurious interrupts.
3060          */
3061         REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
3062
3063         /* Return here if interrupt is shared and is disabled. */
3064         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3065                 return IRQ_HANDLED;
3066
3067         if (netif_rx_schedule_prep(dev, &bnapi->napi)) {
3068                 bnapi->last_status_idx = sblk->status_idx;
3069                 __netif_rx_schedule(dev, &bnapi->napi);
3070         }
3071
3072         return IRQ_HANDLED;
3073 }
3074
3075 static inline int
3076 bnx2_has_fast_work(struct bnx2_napi *bnapi)
3077 {
3078         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3079         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3080
3081         if ((bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons) ||
3082             (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons))
3083                 return 1;
3084         return 0;
3085 }
3086
3087 #define STATUS_ATTN_EVENTS      (STATUS_ATTN_BITS_LINK_STATE | \
3088                                  STATUS_ATTN_BITS_TIMER_ABORT)
3089
3090 static inline int
3091 bnx2_has_work(struct bnx2_napi *bnapi)
3092 {
3093         struct status_block *sblk = bnapi->status_blk.msi;
3094
3095         if (bnx2_has_fast_work(bnapi))
3096                 return 1;
3097
3098         if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
3099             (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
3100                 return 1;
3101
3102         return 0;
3103 }
3104
3105 static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
3106 {
3107         struct status_block *sblk = bnapi->status_blk.msi;
3108         u32 status_attn_bits = sblk->status_attn_bits;
3109         u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
3110
3111         if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
3112             (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
3113
3114                 bnx2_phy_int(bp, bnapi);
3115
3116                 /* This is needed to take care of transient status
3117                  * during link changes.
3118                  */
3119                 REG_WR(bp, BNX2_HC_COMMAND,
3120                        bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3121                 REG_RD(bp, BNX2_HC_COMMAND);
3122         }
3123 }
3124
3125 static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
3126                           int work_done, int budget)
3127 {
3128         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3129         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3130
3131         if (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons)
3132                 bnx2_tx_int(bp, bnapi, 0);
3133
3134         if (bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons)
3135                 work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
3136
3137         return work_done;
3138 }
3139
3140 static int bnx2_poll_msix(struct napi_struct *napi, int budget)
3141 {
3142         struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3143         struct bnx2 *bp = bnapi->bp;
3144         int work_done = 0;
3145         struct status_block_msix *sblk = bnapi->status_blk.msix;
3146
3147         while (1) {
3148                 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3149                 if (unlikely(work_done >= budget))
3150                         break;
3151
3152                 bnapi->last_status_idx = sblk->status_idx;
3153                 /* status idx must be read before checking for more work. */
3154                 rmb();
3155                 if (likely(!bnx2_has_fast_work(bnapi))) {
3156
3157                         netif_rx_complete(bp->dev, napi);
3158                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
3159                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3160                                bnapi->last_status_idx);
3161                         break;
3162                 }
3163         }
3164         return work_done;
3165 }
3166
3167 static int bnx2_poll(struct napi_struct *napi, int budget)
3168 {
3169         struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3170         struct bnx2 *bp = bnapi->bp;
3171         int work_done = 0;
3172         struct status_block *sblk = bnapi->status_blk.msi;
3173
3174         while (1) {
3175                 bnx2_poll_link(bp, bnapi);
3176
3177                 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3178
3179                 if (unlikely(work_done >= budget))
3180                         break;
3181
3182                 /* bnapi->last_status_idx is used below to tell the hw how
3183                  * much work has been processed, so we must read it before
3184                  * checking for more work.
3185                  */
3186                 bnapi->last_status_idx = sblk->status_idx;
3187                 rmb();
3188                 if (likely(!bnx2_has_work(bnapi))) {
3189                         netif_rx_complete(bp->dev, napi);
3190                         if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
3191                                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3192                                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3193                                        bnapi->last_status_idx);
3194                                 break;
3195                         }
3196                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3197                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3198                                BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
3199                                bnapi->last_status_idx);
3200
3201                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3202                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3203                                bnapi->last_status_idx);
3204                         break;
3205                 }
3206         }
3207
3208         return work_done;
3209 }
3210
3211 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
3212  * from set_multicast.
3213  */
3214 static void
3215 bnx2_set_rx_mode(struct net_device *dev)
3216 {
3217         struct bnx2 *bp = netdev_priv(dev);
3218         u32 rx_mode, sort_mode;
3219         int i;
3220
3221         spin_lock_bh(&bp->phy_lock);
3222
3223         rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
3224                                   BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
3225         sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
3226 #ifdef BCM_VLAN
3227         if (!bp->vlgrp && !(bp->flags & BNX2_FLAG_ASF_ENABLE))
3228                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3229 #else
3230         if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
3231                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3232 #endif
3233         if (dev->flags & IFF_PROMISC) {
3234                 /* Promiscuous mode. */
3235                 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3236                 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3237                              BNX2_RPM_SORT_USER0_PROM_VLAN;
3238         }
3239         else if (dev->flags & IFF_ALLMULTI) {
3240                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3241                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3242                                0xffffffff);
3243                 }
3244                 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
3245         }
3246         else {
3247                 /* Accept one or more multicast(s). */
3248                 struct dev_mc_list *mclist;
3249                 u32 mc_filter[NUM_MC_HASH_REGISTERS];
3250                 u32 regidx;
3251                 u32 bit;
3252                 u32 crc;
3253
3254                 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
3255
3256                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
3257                      i++, mclist = mclist->next) {
3258
3259                         crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
3260                         bit = crc & 0xff;
3261                         regidx = (bit & 0xe0) >> 5;
3262                         bit &= 0x1f;
3263                         mc_filter[regidx] |= (1 << bit);
3264                 }
3265
3266                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3267                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3268                                mc_filter[i]);
3269                 }
3270
3271                 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
3272         }
3273
3274         if (rx_mode != bp->rx_mode) {
3275                 bp->rx_mode = rx_mode;
3276                 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
3277         }
3278
3279         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3280         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3281         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3282
3283         spin_unlock_bh(&bp->phy_lock);
3284 }
3285
3286 static void
3287 load_rv2p_fw(struct bnx2 *bp, __le32 *rv2p_code, u32 rv2p_code_len,
3288         u32 rv2p_proc)
3289 {
3290         int i;
3291         u32 val;
3292
3293         if (rv2p_proc == RV2P_PROC2 && CHIP_NUM(bp) == CHIP_NUM_5709) {
3294                 val = le32_to_cpu(rv2p_code[XI_RV2P_PROC2_MAX_BD_PAGE_LOC]);
3295                 val &= ~XI_RV2P_PROC2_BD_PAGE_SIZE_MSK;
3296                 val |= XI_RV2P_PROC2_BD_PAGE_SIZE;
3297                 rv2p_code[XI_RV2P_PROC2_MAX_BD_PAGE_LOC] = cpu_to_le32(val);
3298         }
3299
3300         for (i = 0; i < rv2p_code_len; i += 8) {
3301                 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, le32_to_cpu(*rv2p_code));
3302                 rv2p_code++;
3303                 REG_WR(bp, BNX2_RV2P_INSTR_LOW, le32_to_cpu(*rv2p_code));
3304                 rv2p_code++;
3305
3306                 if (rv2p_proc == RV2P_PROC1) {
3307                         val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
3308                         REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
3309                 }
3310                 else {
3311                         val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3312                         REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
3313                 }
3314         }
3315
3316         /* Reset the processor, un-stall is done later. */
3317         if (rv2p_proc == RV2P_PROC1) {
3318                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3319         }
3320         else {
3321                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3322         }
3323 }
3324
3325 static int
3326 load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg, struct fw_info *fw)
3327 {
3328         u32 offset;
3329         u32 val;
3330         int rc;
3331
3332         /* Halt the CPU. */
3333         val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3334         val |= cpu_reg->mode_value_halt;
3335         bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3336         bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3337
3338         /* Load the Text area. */
3339         offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
3340         if (fw->gz_text) {
3341                 int j;
3342
3343                 rc = zlib_inflate_blob(fw->text, FW_BUF_SIZE, fw->gz_text,
3344                                        fw->gz_text_len);
3345                 if (rc < 0)
3346                         return rc;
3347
3348                 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
3349                         bnx2_reg_wr_ind(bp, offset, le32_to_cpu(fw->text[j]));
3350                 }
3351         }
3352
3353         /* Load the Data area. */
3354         offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
3355         if (fw->data) {
3356                 int j;
3357
3358                 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
3359                         bnx2_reg_wr_ind(bp, offset, fw->data[j]);
3360                 }
3361         }
3362
3363         /* Load the SBSS area. */
3364         offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
3365         if (fw->sbss_len) {
3366                 int j;
3367
3368                 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
3369                         bnx2_reg_wr_ind(bp, offset, 0);
3370                 }
3371         }
3372
3373         /* Load the BSS area. */
3374         offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
3375         if (fw->bss_len) {
3376                 int j;
3377
3378                 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
3379                         bnx2_reg_wr_ind(bp, offset, 0);
3380                 }
3381         }
3382
3383         /* Load the Read-Only area. */
3384         offset = cpu_reg->spad_base +
3385                 (fw->rodata_addr - cpu_reg->mips_view_base);
3386         if (fw->rodata) {
3387                 int j;
3388
3389                 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
3390                         bnx2_reg_wr_ind(bp, offset, fw->rodata[j]);
3391                 }
3392         }
3393
3394         /* Clear the pre-fetch instruction. */
3395         bnx2_reg_wr_ind(bp, cpu_reg->inst, 0);
3396         bnx2_reg_wr_ind(bp, cpu_reg->pc, fw->start_addr);
3397
3398         /* Start the CPU. */
3399         val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3400         val &= ~cpu_reg->mode_value_halt;
3401         bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3402         bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3403
3404         return 0;
3405 }
3406
3407 static int
3408 bnx2_init_cpus(struct bnx2 *bp)
3409 {
3410         struct fw_info *fw;
3411         int rc, rv2p_len;
3412         void *text, *rv2p;
3413
3414         /* Initialize the RV2P processor. */
3415         text = vmalloc(FW_BUF_SIZE);
3416         if (!text)
3417                 return -ENOMEM;
3418         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3419                 rv2p = bnx2_xi_rv2p_proc1;
3420                 rv2p_len = sizeof(bnx2_xi_rv2p_proc1);
3421         } else {
3422                 rv2p = bnx2_rv2p_proc1;
3423                 rv2p_len = sizeof(bnx2_rv2p_proc1);
3424         }
3425         rc = zlib_inflate_blob(text, FW_BUF_SIZE, rv2p, rv2p_len);
3426         if (rc < 0)
3427                 goto init_cpu_err;
3428
3429         load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC1);
3430
3431         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3432                 rv2p = bnx2_xi_rv2p_proc2;
3433                 rv2p_len = sizeof(bnx2_xi_rv2p_proc2);
3434         } else {
3435                 rv2p = bnx2_rv2p_proc2;
3436                 rv2p_len = sizeof(bnx2_rv2p_proc2);
3437         }
3438         rc = zlib_inflate_blob(text, FW_BUF_SIZE, rv2p, rv2p_len);
3439         if (rc < 0)
3440                 goto init_cpu_err;
3441
3442         load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC2);
3443
3444         /* Initialize the RX Processor. */
3445         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3446                 fw = &bnx2_rxp_fw_09;
3447         else
3448                 fw = &bnx2_rxp_fw_06;
3449
3450         fw->text = text;
3451         rc = load_cpu_fw(bp, &cpu_reg_rxp, fw);
3452         if (rc)
3453                 goto init_cpu_err;
3454
3455         /* Initialize the TX Processor. */
3456         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3457                 fw = &bnx2_txp_fw_09;
3458         else
3459                 fw = &bnx2_txp_fw_06;
3460
3461         fw->text = text;
3462         rc = load_cpu_fw(bp, &cpu_reg_txp, fw);
3463         if (rc)
3464                 goto init_cpu_err;
3465
3466         /* Initialize the TX Patch-up Processor. */
3467         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3468                 fw = &bnx2_tpat_fw_09;
3469         else
3470                 fw = &bnx2_tpat_fw_06;
3471
3472         fw->text = text;
3473         rc = load_cpu_fw(bp, &cpu_reg_tpat, fw);
3474         if (rc)
3475                 goto init_cpu_err;
3476
3477         /* Initialize the Completion Processor. */
3478         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3479                 fw = &bnx2_com_fw_09;
3480         else
3481                 fw = &bnx2_com_fw_06;
3482
3483         fw->text = text;
3484         rc = load_cpu_fw(bp, &cpu_reg_com, fw);
3485         if (rc)
3486                 goto init_cpu_err;
3487
3488         /* Initialize the Command Processor. */
3489         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3490                 fw = &bnx2_cp_fw_09;
3491         else
3492                 fw = &bnx2_cp_fw_06;
3493
3494         fw->text = text;
3495         rc = load_cpu_fw(bp, &cpu_reg_cp, fw);
3496
3497 init_cpu_err:
3498         vfree(text);
3499         return rc;
3500 }
3501
3502 static int
3503 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
3504 {
3505         u16 pmcsr;
3506
3507         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3508
3509         switch (state) {
3510         case PCI_D0: {
3511                 u32 val;
3512
3513                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3514                         (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3515                         PCI_PM_CTRL_PME_STATUS);
3516
3517                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3518                         /* delay required during transition out of D3hot */
3519                         msleep(20);
3520
3521                 val = REG_RD(bp, BNX2_EMAC_MODE);
3522                 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3523                 val &= ~BNX2_EMAC_MODE_MPKT;
3524                 REG_WR(bp, BNX2_EMAC_MODE, val);
3525
3526                 val = REG_RD(bp, BNX2_RPM_CONFIG);
3527                 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3528                 REG_WR(bp, BNX2_RPM_CONFIG, val);
3529                 break;
3530         }
3531         case PCI_D3hot: {
3532                 int i;
3533                 u32 val, wol_msg;
3534
3535                 if (bp->wol) {
3536                         u32 advertising;
3537                         u8 autoneg;
3538
3539                         autoneg = bp->autoneg;
3540                         advertising = bp->advertising;
3541
3542                         if (bp->phy_port == PORT_TP) {
3543                                 bp->autoneg = AUTONEG_SPEED;
3544                                 bp->advertising = ADVERTISED_10baseT_Half |
3545                                         ADVERTISED_10baseT_Full |
3546                                         ADVERTISED_100baseT_Half |
3547                                         ADVERTISED_100baseT_Full |
3548                                         ADVERTISED_Autoneg;
3549                         }
3550
3551                         spin_lock_bh(&bp->phy_lock);
3552                         bnx2_setup_phy(bp, bp->phy_port);
3553                         spin_unlock_bh(&bp->phy_lock);
3554
3555                         bp->autoneg = autoneg;
3556                         bp->advertising = advertising;
3557
3558                         bnx2_set_mac_addr(bp);
3559
3560                         val = REG_RD(bp, BNX2_EMAC_MODE);
3561
3562                         /* Enable port mode. */
3563                         val &= ~BNX2_EMAC_MODE_PORT;
3564                         val |= BNX2_EMAC_MODE_MPKT_RCVD |
3565                                BNX2_EMAC_MODE_ACPI_RCVD |
3566                                BNX2_EMAC_MODE_MPKT;
3567                         if (bp->phy_port == PORT_TP)
3568                                 val |= BNX2_EMAC_MODE_PORT_MII;
3569                         else {
3570                                 val |= BNX2_EMAC_MODE_PORT_GMII;
3571                                 if (bp->line_speed == SPEED_2500)
3572                                         val |= BNX2_EMAC_MODE_25G_MODE;
3573                         }
3574
3575                         REG_WR(bp, BNX2_EMAC_MODE, val);
3576
3577                         /* receive all multicast */
3578                         for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3579                                 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3580                                        0xffffffff);
3581                         }
3582                         REG_WR(bp, BNX2_EMAC_RX_MODE,
3583                                BNX2_EMAC_RX_MODE_SORT_MODE);
3584
3585                         val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3586                               BNX2_RPM_SORT_USER0_MC_EN;
3587                         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3588                         REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3589                         REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3590                                BNX2_RPM_SORT_USER0_ENA);
3591
3592                         /* Need to enable EMAC and RPM for WOL. */
3593                         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3594                                BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3595                                BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3596                                BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3597
3598                         val = REG_RD(bp, BNX2_RPM_CONFIG);
3599                         val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3600                         REG_WR(bp, BNX2_RPM_CONFIG, val);
3601
3602                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3603                 }
3604                 else {
3605                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3606                 }
3607
3608                 if (!(bp->flags & BNX2_FLAG_NO_WOL))
3609                         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
3610
3611                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3612                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3613                     (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3614
3615                         if (bp->wol)
3616                                 pmcsr |= 3;
3617                 }
3618                 else {
3619                         pmcsr |= 3;
3620                 }
3621                 if (bp->wol) {
3622                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3623                 }
3624                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3625                                       pmcsr);
3626
3627                 /* No more memory access after this point until
3628                  * device is brought back to D0.
3629                  */
3630                 udelay(50);
3631                 break;
3632         }
3633         default:
3634                 return -EINVAL;
3635         }
3636         return 0;
3637 }
3638
3639 static int
3640 bnx2_acquire_nvram_lock(struct bnx2 *bp)
3641 {
3642         u32 val;
3643         int j;
3644
3645         /* Request access to the flash interface. */
3646         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
3647         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3648                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3649                 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
3650                         break;
3651
3652                 udelay(5);
3653         }
3654
3655         if (j >= NVRAM_TIMEOUT_COUNT)
3656                 return -EBUSY;
3657
3658         return 0;
3659 }
3660
3661 static int
3662 bnx2_release_nvram_lock(struct bnx2 *bp)
3663 {
3664         int j;
3665         u32 val;
3666
3667         /* Relinquish nvram interface. */
3668         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
3669
3670         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3671                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3672                 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
3673                         break;
3674
3675                 udelay(5);
3676         }
3677
3678         if (j >= NVRAM_TIMEOUT_COUNT)
3679                 return -EBUSY;
3680
3681         return 0;
3682 }
3683
3684
3685 static int
3686 bnx2_enable_nvram_write(struct bnx2 *bp)
3687 {
3688         u32 val;
3689
3690         val = REG_RD(bp, BNX2_MISC_CFG);
3691         REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
3692
3693         if (bp->flash_info->flags & BNX2_NV_WREN) {
3694                 int j;
3695
3696                 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3697                 REG_WR(bp, BNX2_NVM_COMMAND,
3698                        BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
3699
3700                 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3701                         udelay(5);
3702
3703                         val = REG_RD(bp, BNX2_NVM_COMMAND);
3704                         if (val & BNX2_NVM_COMMAND_DONE)
3705                                 break;
3706                 }
3707
3708                 if (j >= NVRAM_TIMEOUT_COUNT)
3709                         return -EBUSY;
3710         }
3711         return 0;
3712 }
3713
3714 static void
3715 bnx2_disable_nvram_write(struct bnx2 *bp)
3716 {
3717         u32 val;
3718
3719         val = REG_RD(bp, BNX2_MISC_CFG);
3720         REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
3721 }
3722
3723
3724 static void
3725 bnx2_enable_nvram_access(struct bnx2 *bp)
3726 {
3727         u32 val;
3728
3729         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3730         /* Enable both bits, even on read. */
3731         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3732                val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
3733 }
3734
3735 static void
3736 bnx2_disable_nvram_access(struct bnx2 *bp)
3737 {
3738         u32 val;
3739
3740         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3741         /* Disable both bits, even after read. */
3742         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3743                 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
3744                         BNX2_NVM_ACCESS_ENABLE_WR_EN));
3745 }
3746
3747 static int
3748 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
3749 {
3750         u32 cmd;
3751         int j;
3752
3753         if (bp->flash_info->flags & BNX2_NV_BUFFERED)
3754                 /* Buffered flash, no erase needed */
3755                 return 0;
3756
3757         /* Build an erase command */
3758         cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
3759               BNX2_NVM_COMMAND_DOIT;
3760
3761         /* Need to clear DONE bit separately. */
3762         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3763
3764         /* Address of the NVRAM to read from. */
3765         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3766
3767         /* Issue an erase command. */
3768         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3769
3770         /* Wait for completion. */
3771         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3772                 u32 val;
3773
3774                 udelay(5);
3775
3776                 val = REG_RD(bp, BNX2_NVM_COMMAND);
3777                 if (val & BNX2_NVM_COMMAND_DONE)
3778                         break;
3779         }
3780
3781         if (j >= NVRAM_TIMEOUT_COUNT)
3782                 return -EBUSY;
3783
3784         return 0;
3785 }
3786
3787 static int
3788 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
3789 {
3790         u32 cmd;
3791         int j;
3792
3793         /* Build the command word. */
3794         cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
3795
3796         /* Calculate an offset of a buffered flash, not needed for 5709. */
3797         if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
3798                 offset = ((offset / bp->flash_info->page_size) <<
3799                            bp->flash_info->page_bits) +
3800                           (offset % bp->flash_info->page_size);
3801         }
3802
3803         /* Need to clear DONE bit separately. */
3804         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3805
3806         /* Address of the NVRAM to read from. */
3807         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3808
3809         /* Issue a read command. */
3810         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3811
3812         /* Wait for completion. */
3813         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3814                 u32 val;
3815
3816                 udelay(5);
3817
3818                 val = REG_RD(bp, BNX2_NVM_COMMAND);
3819                 if (val & BNX2_NVM_COMMAND_DONE) {
3820                         __be32 v = cpu_to_be32(REG_RD(bp, BNX2_NVM_READ));
3821                         memcpy(ret_val, &v, 4);
3822                         break;
3823                 }
3824         }
3825         if (j >= NVRAM_TIMEOUT_COUNT)
3826                 return -EBUSY;
3827
3828         return 0;
3829 }
3830
3831
3832 static int
3833 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
3834 {
3835         u32 cmd;
3836         __be32 val32;
3837         int j;
3838
3839         /* Build the command word. */
3840         cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
3841
3842         /* Calculate an offset of a buffered flash, not needed for 5709. */
3843         if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
3844                 offset = ((offset / bp->flash_info->page_size) <<
3845                           bp->flash_info->page_bits) +
3846                          (offset % bp->flash_info->page_size);
3847         }
3848
3849         /* Need to clear DONE bit separately. */
3850         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3851
3852         memcpy(&val32, val, 4);
3853
3854         /* Write the data. */
3855         REG_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32));
3856
3857         /* Address of the NVRAM to write to. */
3858         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3859
3860         /* Issue the write command. */
3861         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3862
3863         /* Wait for completion. */
3864         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3865                 udelay(5);
3866
3867                 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
3868                         break;
3869         }
3870         if (j >= NVRAM_TIMEOUT_COUNT)
3871                 return -EBUSY;
3872
3873         return 0;
3874 }
3875
3876 static int
3877 bnx2_init_nvram(struct bnx2 *bp)
3878 {
3879         u32 val;
3880         int j, entry_count, rc = 0;
3881         struct flash_spec *flash;
3882
3883         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3884                 bp->flash_info = &flash_5709;
3885                 goto get_flash_size;
3886         }
3887
3888         /* Determine the selected interface. */
3889         val = REG_RD(bp, BNX2_NVM_CFG1);
3890
3891         entry_count = ARRAY_SIZE(flash_table);
3892
3893         if (val & 0x40000000) {
3894
3895                 /* Flash interface has been reconfigured */
3896                 for (j = 0, flash = &flash_table[0]; j < entry_count;
3897                      j++, flash++) {
3898                         if ((val & FLASH_BACKUP_STRAP_MASK) ==
3899                             (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
3900                                 bp->flash_info = flash;
3901                                 break;
3902                         }
3903                 }
3904         }
3905         else {
3906                 u32 mask;
3907                 /* Not yet been reconfigured */
3908
3909                 if (val & (1 << 23))
3910                         mask = FLASH_BACKUP_STRAP_MASK;
3911                 else
3912                         mask = FLASH_STRAP_MASK;
3913
3914                 for (j = 0, flash = &flash_table[0]; j < entry_count;
3915                         j++, flash++) {
3916
3917                         if ((val & mask) == (flash->strapping & mask)) {
3918                                 bp->flash_info = flash;
3919
3920                                 /* Request access to the flash interface. */
3921                                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3922                                         return rc;
3923
3924                                 /* Enable access to flash interface */
3925                                 bnx2_enable_nvram_access(bp);
3926
3927                                 /* Reconfigure the flash interface */
3928                                 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
3929                                 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
3930                                 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
3931                                 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
3932
3933                                 /* Disable access to flash interface */
3934                                 bnx2_disable_nvram_access(bp);
3935                                 bnx2_release_nvram_lock(bp);
3936
3937                                 break;
3938                         }
3939                 }
3940         } /* if (val & 0x40000000) */
3941
3942         if (j == entry_count) {
3943                 bp->flash_info = NULL;
3944                 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
3945                 return -ENODEV;
3946         }
3947
3948 get_flash_size:
3949         val = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG2);
3950         val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
3951         if (val)
3952                 bp->flash_size = val;
3953         else
3954                 bp->flash_size = bp->flash_info->total_size;
3955
3956         return rc;
3957 }
3958
3959 static int
3960 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
3961                 int buf_size)
3962 {
3963         int rc = 0;
3964         u32 cmd_flags, offset32, len32, extra;
3965
3966         if (buf_size == 0)
3967                 return 0;
3968
3969         /* Request access to the flash interface. */
3970         if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3971                 return rc;
3972
3973         /* Enable access to flash interface */
3974         bnx2_enable_nvram_access(bp);
3975
3976         len32 = buf_size;
3977         offset32 = offset;
3978         extra = 0;
3979
3980         cmd_flags = 0;
3981
3982         if (offset32 & 3) {
3983                 u8 buf[4];
3984                 u32 pre_len;
3985
3986                 offset32 &= ~3;
3987                 pre_len = 4 - (offset & 3);
3988
3989                 if (pre_len >= len32) {
3990                         pre_len = len32;
3991                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
3992                                     BNX2_NVM_COMMAND_LAST;
3993                 }
3994                 else {
3995                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
3996                 }
3997
3998                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3999
4000                 if (rc)
4001                         return rc;
4002
4003                 memcpy(ret_buf, buf + (offset & 3), pre_len);
4004
4005                 offset32 += 4;
4006                 ret_buf += pre_len;
4007                 len32 -= pre_len;
4008         }
4009         if (len32 & 3) {
4010                 extra = 4 - (len32 & 3);
4011                 len32 = (len32 + 4) & ~3;
4012         }
4013
4014         if (len32 == 4) {
4015                 u8 buf[4];
4016
4017                 if (cmd_flags)
4018                         cmd_flags = BNX2_NVM_COMMAND_LAST;
4019                 else
4020                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
4021                                     BNX2_NVM_COMMAND_LAST;
4022
4023                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4024
4025                 memcpy(ret_buf, buf, 4 - extra);
4026         }
4027         else if (len32 > 0) {
4028                 u8 buf[4];
4029
4030                 /* Read the first word. */
4031                 if (cmd_flags)
4032                         cmd_flags = 0;
4033                 else
4034                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
4035
4036                 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
4037
4038                 /* Advance to the next dword. */
4039                 offset32 += 4;
4040                 ret_buf += 4;
4041                 len32 -= 4;
4042
4043                 while (len32 > 4 && rc == 0) {
4044                         rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
4045
4046                         /* Advance to the next dword. */
4047                         offset32 += 4;
4048                         ret_buf += 4;
4049                         len32 -= 4;
4050                 }
4051
4052                 if (rc)
4053                         return rc;
4054
4055                 cmd_flags = BNX2_NVM_COMMAND_LAST;
4056                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4057
4058                 memcpy(ret_buf, buf, 4 - extra);
4059         }
4060
4061         /* Disable access to flash interface */
4062         bnx2_disable_nvram_access(bp);
4063
4064         bnx2_release_nvram_lock(bp);
4065
4066         return rc;
4067 }
4068
4069 static int
4070 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
4071                 int buf_size)
4072 {
4073         u32 written, offset32, len32;
4074         u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
4075         int rc = 0;
4076         int align_start, align_end;
4077
4078         buf = data_buf;
4079         offset32 = offset;
4080         len32 = buf_size;
4081         align_start = align_end = 0;
4082
4083         if ((align_start = (offset32 & 3))) {
4084                 offset32 &= ~3;
4085                 len32 += align_start;
4086                 if (len32 < 4)
4087                         len32 = 4;
4088                 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
4089                         return rc;
4090         }
4091
4092         if (len32 & 3) {
4093                 align_end = 4 - (len32 & 3);
4094                 len32 += align_end;
4095                 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
4096                         return rc;
4097         }
4098
4099         if (align_start || align_end) {
4100                 align_buf = kmalloc(len32, GFP_KERNEL);
4101                 if (align_buf == NULL)
4102                         return -ENOMEM;
4103                 if (align_start) {
4104                         memcpy(align_buf, start, 4);
4105                 }
4106                 if (align_end) {
4107                         memcpy(align_buf + len32 - 4, end, 4);
4108                 }
4109                 memcpy(align_buf + align_start, data_buf, buf_size);
4110                 buf = align_buf;
4111         }
4112
4113         if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4114                 flash_buffer = kmalloc(264, GFP_KERNEL);
4115                 if (flash_buffer == NULL) {
4116                         rc = -ENOMEM;
4117                         goto nvram_write_end;
4118                 }
4119         }
4120
4121         written = 0;
4122         while ((written < len32) && (rc == 0)) {
4123                 u32 page_start, page_end, data_start, data_end;
4124                 u32 addr, cmd_flags;
4125                 int i;
4126
4127                 /* Find the page_start addr */
4128                 page_start = offset32 + written;
4129                 page_start -= (page_start % bp->flash_info->page_size);
4130                 /* Find the page_end addr */
4131                 page_end = page_start + bp->flash_info->page_size;
4132                 /* Find the data_start addr */
4133                 data_start = (written == 0) ? offset32 : page_start;
4134                 /* Find the data_end addr */
4135                 data_end = (page_end > offset32 + len32) ?
4136                         (offset32 + len32) : page_end;
4137
4138                 /* Request access to the flash interface. */
4139                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4140                         goto nvram_write_end;
4141
4142                 /* Enable access to flash interface */
4143                 bnx2_enable_nvram_access(bp);
4144
4145                 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4146                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4147                         int j;
4148
4149                         /* Read the whole page into the buffer
4150                          * (non-buffer flash only) */
4151                         for (j = 0; j < bp->flash_info->page_size; j += 4) {
4152                                 if (j == (bp->flash_info->page_size - 4)) {
4153                                         cmd_flags |= BNX2_NVM_COMMAND_LAST;
4154                                 }
4155                                 rc = bnx2_nvram_read_dword(bp,
4156                                         page_start + j,
4157                                         &flash_buffer[j],
4158                                         cmd_flags);
4159
4160                                 if (rc)
4161                                         goto nvram_write_end;
4162
4163                                 cmd_flags = 0;
4164                         }
4165                 }
4166
4167                 /* Enable writes to flash interface (unlock write-protect) */
4168                 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
4169                         goto nvram_write_end;
4170
4171                 /* Loop to write back the buffer data from page_start to
4172                  * data_start */
4173                 i = 0;
4174                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4175                         /* Erase the page */
4176                         if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
4177                                 goto nvram_write_end;
4178
4179                         /* Re-enable the write again for the actual write */
4180                         bnx2_enable_nvram_write(bp);
4181
4182                         for (addr = page_start; addr < data_start;
4183                                 addr += 4, i += 4) {
4184
4185                                 rc = bnx2_nvram_write_dword(bp, addr,
4186                                         &flash_buffer[i], cmd_flags);
4187
4188                                 if (rc != 0)
4189                                         goto nvram_write_end;
4190
4191                                 cmd_flags = 0;
4192                         }
4193                 }
4194
4195                 /* Loop to write the new data from data_start to data_end */
4196                 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
4197                         if ((addr == page_end - 4) ||
4198                                 ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
4199                                  (addr == data_end - 4))) {
4200
4201                                 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4202                         }
4203                         rc = bnx2_nvram_write_dword(bp, addr, buf,
4204                                 cmd_flags);
4205
4206                         if (rc != 0)
4207                                 goto nvram_write_end;
4208
4209                         cmd_flags = 0;
4210                         buf += 4;
4211                 }
4212
4213                 /* Loop to write back the buffer data from data_end
4214                  * to page_end */
4215                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4216                         for (addr = data_end; addr < page_end;
4217                                 addr += 4, i += 4) {
4218
4219                                 if (addr == page_end-4) {
4220                                         cmd_flags = BNX2_NVM_COMMAND_LAST;
4221                                 }
4222                                 rc = bnx2_nvram_write_dword(bp, addr,
4223                                         &flash_buffer[i], cmd_flags);
4224
4225                                 if (rc != 0)
4226                                         goto nvram_write_end;
4227
4228                                 cmd_flags = 0;
4229                         }
4230                 }
4231
4232                 /* Disable writes to flash interface (lock write-protect) */
4233                 bnx2_disable_nvram_write(bp);
4234
4235                 /* Disable access to flash interface */
4236                 bnx2_disable_nvram_access(bp);
4237                 bnx2_release_nvram_lock(bp);
4238
4239                 /* Increment written */
4240                 written += data_end - data_start;
4241         }
4242
4243 nvram_write_end:
4244         kfree(flash_buffer);
4245         kfree(align_buf);
4246         return rc;
4247 }
4248
4249 static void
4250 bnx2_init_remote_phy(struct bnx2 *bp)
4251 {
4252         u32 val;
4253
4254         bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4255         if (!(bp->phy_flags & BNX2_PHY_FLAG_SERDES))
4256                 return;
4257
4258         val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB);
4259         if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4260                 return;
4261
4262         if (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE) {
4263                 bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4264
4265                 val = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
4266                 if (val & BNX2_LINK_STATUS_SERDES_LINK)
4267                         bp->phy_port = PORT_FIBRE;
4268                 else
4269                         bp->phy_port = PORT_TP;
4270
4271                 if (netif_running(bp->dev)) {
4272                         u32 sig;
4273
4274                         sig = BNX2_DRV_ACK_CAP_SIGNATURE |
4275                               BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
4276                         bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
4277                 }
4278         }
4279 }
4280
4281 static void
4282 bnx2_setup_msix_tbl(struct bnx2 *bp)
4283 {
4284         REG_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
4285
4286         REG_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
4287         REG_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
4288 }
4289
4290 static int
4291 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4292 {
4293         u32 val;
4294         int i, rc = 0;
4295         u8 old_port;
4296
4297         /* Wait for the current PCI transaction to complete before
4298          * issuing a reset. */
4299         REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4300                BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4301                BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4302                BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4303                BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4304         val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4305         udelay(5);
4306
4307         /* Wait for the firmware to tell us it is ok to issue a reset. */
4308         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
4309
4310         /* Deposit a driver reset signature so the firmware knows that
4311          * this is a soft reset. */
4312         bnx2_shmem_wr(bp, BNX2_DRV_RESET_SIGNATURE,
4313                       BNX2_DRV_RESET_SIGNATURE_MAGIC);
4314
4315         /* Do a dummy read to force the chip to complete all current transaction
4316          * before we issue a reset. */
4317         val = REG_RD(bp, BNX2_MISC_ID);
4318
4319         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4320                 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4321                 REG_RD(bp, BNX2_MISC_COMMAND);
4322                 udelay(5);
4323
4324                 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4325                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4326
4327                 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
4328
4329         } else {
4330                 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4331                       BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4332                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4333
4334                 /* Chip reset. */
4335                 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4336
4337                 /* Reading back any register after chip reset will hang the
4338                  * bus on 5706 A0 and A1.  The msleep below provides plenty
4339                  * of margin for write posting.
4340                  */
4341                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
4342                     (CHIP_ID(bp) == CHIP_ID_5706_A1))
4343                         msleep(20);
4344
4345                 /* Reset takes approximate 30 usec */
4346                 for (i = 0; i < 10; i++) {
4347                         val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4348                         if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4349                                     BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4350                                 break;
4351                         udelay(10);
4352                 }
4353
4354                 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4355                            BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4356                         printk(KERN_ERR PFX "Chip reset did not complete\n");
4357                         return -EBUSY;
4358                 }
4359         }
4360
4361         /* Make sure byte swapping is properly configured. */
4362         val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
4363         if (val != 0x01020304) {
4364                 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
4365                 return -ENODEV;
4366         }
4367
4368         /* Wait for the firmware to finish its initialization. */
4369         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
4370         if (rc)
4371                 return rc;
4372
4373         spin_lock_bh(&bp->phy_lock);
4374         old_port = bp->phy_port;
4375         bnx2_init_remote_phy(bp);
4376         if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) &&
4377             old_port != bp->phy_port)
4378                 bnx2_set_default_remote_link(bp);
4379         spin_unlock_bh(&bp->phy_lock);
4380
4381         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4382                 /* Adjust the voltage regular to two steps lower.  The default
4383                  * of this register is 0x0000000e. */
4384                 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4385
4386                 /* Remove bad rbuf memory from the free pool. */
4387                 rc = bnx2_alloc_bad_rbuf(bp);
4388         }
4389
4390         if (bp->flags & BNX2_FLAG_USING_MSIX)
4391                 bnx2_setup_msix_tbl(bp);
4392
4393         return rc;
4394 }
4395
4396 static int
4397 bnx2_init_chip(struct bnx2 *bp)
4398 {
4399         u32 val;
4400         int rc, i;
4401
4402         /* Make sure the interrupt is not active. */
4403         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4404
4405         val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4406               BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4407 #ifdef __BIG_ENDIAN
4408               BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
4409 #endif
4410               BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
4411               DMA_READ_CHANS << 12 |
4412               DMA_WRITE_CHANS << 16;
4413
4414         val |= (0x2 << 20) | (1 << 11);
4415
4416         if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133))
4417                 val |= (1 << 23);
4418
4419         if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
4420             (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & BNX2_FLAG_PCIX))
4421                 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4422
4423         REG_WR(bp, BNX2_DMA_CONFIG, val);
4424
4425         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4426                 val = REG_RD(bp, BNX2_TDMA_CONFIG);
4427                 val |= BNX2_TDMA_CONFIG_ONE_DMA;
4428                 REG_WR(bp, BNX2_TDMA_CONFIG, val);
4429         }
4430
4431         if (bp->flags & BNX2_FLAG_PCIX) {
4432                 u16 val16;
4433
4434                 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4435                                      &val16);
4436                 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4437                                       val16 & ~PCI_X_CMD_ERO);
4438         }
4439
4440         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4441                BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4442                BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4443                BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4444
4445         /* Initialize context mapping and zero out the quick contexts.  The
4446          * context block must have already been enabled. */
4447         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4448                 rc = bnx2_init_5709_context(bp);
4449                 if (rc)
4450                         return rc;
4451         } else
4452                 bnx2_init_context(bp);
4453
4454         if ((rc = bnx2_init_cpus(bp)) != 0)
4455                 return rc;
4456
4457         bnx2_init_nvram(bp);
4458
4459         bnx2_set_mac_addr(bp);
4460
4461         val = REG_RD(bp, BNX2_MQ_CONFIG);
4462         val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4463         val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4464         if (CHIP_ID(bp) == CHIP_ID_5709_A0 || CHIP_ID(bp) == CHIP_ID_5709_A1)
4465                 val |= BNX2_MQ_CONFIG_HALT_DIS;
4466
4467         REG_WR(bp, BNX2_MQ_CONFIG, val);
4468
4469         val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4470         REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4471         REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4472
4473         val = (BCM_PAGE_BITS - 8) << 24;
4474         REG_WR(bp, BNX2_RV2P_CONFIG, val);
4475
4476         /* Configure page size. */
4477         val = REG_RD(bp, BNX2_TBDR_CONFIG);
4478         val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4479         val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4480         REG_WR(bp, BNX2_TBDR_CONFIG, val);
4481
4482         val = bp->mac_addr[0] +
4483               (bp->mac_addr[1] << 8) +
4484               (bp->mac_addr[2] << 16) +
4485               bp->mac_addr[3] +
4486               (bp->mac_addr[4] << 8) +
4487               (bp->mac_addr[5] << 16);
4488         REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4489
4490         /* Program the MTU.  Also include 4 bytes for CRC32. */
4491         val = bp->dev->mtu + ETH_HLEN + 4;
4492         if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4493                 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4494         REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4495
4496         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
4497                 bp->bnx2_napi[i].last_status_idx = 0;
4498
4499         bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4500
4501         /* Set up how to generate a link change interrupt. */
4502         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4503
4504         REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
4505                (u64) bp->status_blk_mapping & 0xffffffff);
4506         REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4507
4508         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4509                (u64) bp->stats_blk_mapping & 0xffffffff);
4510         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4511                (u64) bp->stats_blk_mapping >> 32);
4512
4513         REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
4514                (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4515
4516         REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4517                (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4518
4519         REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4520                (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4521
4522         REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4523
4524         REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4525
4526         REG_WR(bp, BNX2_HC_COM_TICKS,
4527                (bp->com_ticks_int << 16) | bp->com_ticks);
4528
4529         REG_WR(bp, BNX2_HC_CMD_TICKS,
4530                (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4531
4532         if (CHIP_NUM(bp) == CHIP_NUM_5708)
4533                 REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4534         else
4535                 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
4536         REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8);  /* 3ms */
4537
4538         if (CHIP_ID(bp) == CHIP_ID_5706_A1)
4539                 val = BNX2_HC_CONFIG_COLLECT_STATS;
4540         else {
4541                 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4542                       BNX2_HC_CONFIG_COLLECT_STATS;
4543         }
4544
4545         if (bp->irq_nvecs > 1) {
4546                 REG_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
4547                        BNX2_HC_MSIX_BIT_VECTOR_VAL);
4548
4549                 val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
4550         }
4551
4552         if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
4553                 val |= BNX2_HC_CONFIG_ONE_SHOT;
4554
4555         REG_WR(bp, BNX2_HC_CONFIG, val);
4556
4557         for (i = 1; i < bp->irq_nvecs; i++) {
4558                 u32 base = ((i - 1) * BNX2_HC_SB_CONFIG_SIZE) +
4559                            BNX2_HC_SB_CONFIG_1;
4560
4561                 REG_WR(bp, base,
4562                         BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
4563                         BNX2_HC_SB_CONFIG_1_RX_TMR_MODE |
4564                         BNX2_HC_SB_CONFIG_1_ONE_SHOT);
4565
4566                 REG_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF,
4567                         (bp->tx_quick_cons_trip_int << 16) |
4568                          bp->tx_quick_cons_trip);
4569
4570                 REG_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
4571                         (bp->tx_ticks_int << 16) | bp->tx_ticks);
4572
4573                 REG_WR(bp, base + BNX2_HC_RX_QUICK_CONS_TRIP_OFF,
4574                        (bp->rx_quick_cons_trip_int << 16) |
4575                         bp->rx_quick_cons_trip);
4576
4577                 REG_WR(bp, base + BNX2_HC_RX_TICKS_OFF,
4578                         (bp->rx_ticks_int << 16) | bp->rx_ticks);
4579         }
4580
4581         /* Clear internal stats counters. */
4582         REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
4583
4584         REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
4585
4586         /* Initialize the receive filter. */
4587         bnx2_set_rx_mode(bp->dev);
4588
4589         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4590                 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4591                 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4592                 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4593         }
4594         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
4595                           0);
4596
4597         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
4598         REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
4599
4600         udelay(20);
4601
4602         bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
4603
4604         return rc;
4605 }
4606
4607 static void
4608 bnx2_clear_ring_states(struct bnx2 *bp)
4609 {
4610         struct bnx2_napi *bnapi;
4611         struct bnx2_tx_ring_info *txr;
4612         struct bnx2_rx_ring_info *rxr;
4613         int i;
4614
4615         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
4616                 bnapi = &bp->bnx2_napi[i];
4617                 txr = &bnapi->tx_ring;
4618                 rxr = &bnapi->rx_ring;
4619
4620                 txr->tx_cons = 0;
4621                 txr->hw_tx_cons = 0;
4622                 rxr->rx_prod_bseq = 0;
4623                 rxr->rx_prod = 0;
4624                 rxr->rx_cons = 0;
4625                 rxr->rx_pg_prod = 0;
4626                 rxr->rx_pg_cons = 0;
4627         }
4628 }
4629
4630 static void
4631 bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr)
4632 {
4633         u32 val, offset0, offset1, offset2, offset3;
4634         u32 cid_addr = GET_CID_ADDR(cid);
4635
4636         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4637                 offset0 = BNX2_L2CTX_TYPE_XI;
4638                 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
4639                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
4640                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
4641         } else {
4642                 offset0 = BNX2_L2CTX_TYPE;
4643                 offset1 = BNX2_L2CTX_CMD_TYPE;
4644                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
4645                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
4646         }
4647         val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
4648         bnx2_ctx_wr(bp, cid_addr, offset0, val);
4649
4650         val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
4651         bnx2_ctx_wr(bp, cid_addr, offset1, val);
4652
4653         val = (u64) txr->tx_desc_mapping >> 32;
4654         bnx2_ctx_wr(bp, cid_addr, offset2, val);
4655
4656         val = (u64) txr->tx_desc_mapping & 0xffffffff;
4657         bnx2_ctx_wr(bp, cid_addr, offset3, val);
4658 }
4659
4660 static void
4661 bnx2_init_tx_ring(struct bnx2 *bp, int ring_num)
4662 {
4663         struct tx_bd *txbd;
4664         u32 cid = TX_CID;
4665         struct bnx2_napi *bnapi;
4666         struct bnx2_tx_ring_info *txr;
4667
4668         bnapi = &bp->bnx2_napi[ring_num];
4669         txr = &bnapi->tx_ring;
4670
4671         if (ring_num == 0)
4672                 cid = TX_CID;
4673         else
4674                 cid = TX_TSS_CID + ring_num - 1;
4675
4676         bp->tx_wake_thresh = bp->tx_ring_size / 2;
4677
4678         txbd = &txr->tx_desc_ring[MAX_TX_DESC_CNT];
4679
4680         txbd->tx_bd_haddr_hi = (u64) txr->tx_desc_mapping >> 32;
4681         txbd->tx_bd_haddr_lo = (u64) txr->tx_desc_mapping & 0xffffffff;
4682
4683         txr->tx_prod = 0;
4684         txr->tx_prod_bseq = 0;
4685
4686         txr->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
4687         txr->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
4688
4689         bnx2_init_tx_context(bp, cid, txr);
4690 }
4691
4692 static void
4693 bnx2_init_rxbd_rings(struct rx_bd *rx_ring[], dma_addr_t dma[], u32 buf_size,
4694                      int num_rings)
4695 {
4696         int i;
4697         struct rx_bd *rxbd;
4698
4699         for (i = 0; i < num_rings; i++) {
4700                 int j;
4701
4702                 rxbd = &rx_ring[i][0];
4703                 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
4704                         rxbd->rx_bd_len = buf_size;
4705                         rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
4706                 }
4707                 if (i == (num_rings - 1))
4708                         j = 0;
4709                 else
4710                         j = i + 1;
4711                 rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
4712                 rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
4713         }
4714 }
4715
4716 static void
4717 bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
4718 {
4719         int i;
4720         u16 prod, ring_prod;
4721         u32 cid, rx_cid_addr, val;
4722         struct bnx2_napi *bnapi = &bp->bnx2_napi[ring_num];
4723         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
4724
4725         if (ring_num == 0)
4726                 cid = RX_CID;
4727         else
4728                 cid = RX_RSS_CID + ring_num - 1;
4729
4730         rx_cid_addr = GET_CID_ADDR(cid);
4731
4732         bnx2_init_rxbd_rings(rxr->rx_desc_ring, rxr->rx_desc_mapping,
4733                              bp->rx_buf_use_size, bp->rx_max_ring);
4734
4735         bnx2_init_rx_context(bp, cid);
4736
4737         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4738                 val = REG_RD(bp, BNX2_MQ_MAP_L2_5);
4739                 REG_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM);
4740         }
4741
4742         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
4743         if (bp->rx_pg_ring_size) {
4744                 bnx2_init_rxbd_rings(rxr->rx_pg_desc_ring,
4745                                      rxr->rx_pg_desc_mapping,
4746                                      PAGE_SIZE, bp->rx_max_pg_ring);
4747                 val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
4748                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
4749                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
4750                        BNX2_L2CTX_RBDC_JUMBO_KEY - ring_num);
4751
4752                 val = (u64) rxr->rx_pg_desc_mapping[0] >> 32;
4753                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
4754
4755                 val = (u64) rxr->rx_pg_desc_mapping[0] & 0xffffffff;
4756                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
4757
4758                 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4759                         REG_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
4760         }
4761
4762         val = (u64) rxr->rx_desc_mapping[0] >> 32;
4763         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
4764
4765         val = (u64) rxr->rx_desc_mapping[0] & 0xffffffff;
4766         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
4767
4768         ring_prod = prod = rxr->rx_pg_prod;
4769         for (i = 0; i < bp->rx_pg_ring_size; i++) {
4770                 if (bnx2_alloc_rx_page(bp, rxr, ring_prod) < 0)
4771                         break;
4772                 prod = NEXT_RX_BD(prod);
4773                 ring_prod = RX_PG_RING_IDX(prod);
4774         }
4775         rxr->rx_pg_prod = prod;
4776
4777         ring_prod = prod = rxr->rx_prod;
4778         for (i = 0; i < bp->rx_ring_size; i++) {
4779                 if (bnx2_alloc_rx_skb(bp, rxr, ring_prod) < 0)
4780                         break;
4781                 prod = NEXT_RX_BD(prod);
4782                 ring_prod = RX_RING_IDX(prod);
4783         }
4784         rxr->rx_prod = prod;
4785
4786         rxr->rx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BDIDX;
4787         rxr->rx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BSEQ;
4788         rxr->rx_pg_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_PG_BDIDX;
4789
4790         REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
4791         REG_WR16(bp, rxr->rx_bidx_addr, prod);
4792
4793         REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
4794 }
4795
4796 static void
4797 bnx2_init_all_rings(struct bnx2 *bp)
4798 {
4799         int i;
4800         u32 val;
4801
4802         bnx2_clear_ring_states(bp);
4803
4804         REG_WR(bp, BNX2_TSCH_TSS_CFG, 0);
4805         for (i = 0; i < bp->num_tx_rings; i++)
4806                 bnx2_init_tx_ring(bp, i);
4807
4808         if (bp->num_tx_rings > 1)
4809                 REG_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) |
4810                        (TX_TSS_CID << 7));
4811
4812         REG_WR(bp, BNX2_RLUP_RSS_CONFIG, 0);
4813         bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ, 0);
4814
4815         for (i = 0; i < bp->num_rx_rings; i++)
4816                 bnx2_init_rx_ring(bp, i);
4817
4818         if (bp->num_rx_rings > 1) {
4819                 u32 tbl_32;
4820                 u8 *tbl = (u8 *) &tbl_32;
4821
4822                 bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ,
4823                                 BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES);
4824
4825                 for (i = 0; i < BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES; i++) {
4826                         tbl[i % 4] = i % (bp->num_rx_rings - 1);
4827                         if ((i % 4) == 3)
4828                                 bnx2_reg_wr_ind(bp,
4829                                                 BNX2_RXP_SCRATCH_RSS_TBL + i,
4830                                                 cpu_to_be32(tbl_32));
4831                 }
4832
4833                 val = BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI |
4834                       BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_ALL_XI;
4835
4836                 REG_WR(bp, BNX2_RLUP_RSS_CONFIG, val);
4837
4838         }
4839 }
4840
4841 static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
4842 {
4843         u32 max, num_rings = 1;
4844
4845         while (ring_size > MAX_RX_DESC_CNT) {
4846                 ring_size -= MAX_RX_DESC_CNT;
4847                 num_rings++;
4848         }
4849         /* round to next power of 2 */
4850         max = max_size;
4851         while ((max & num_rings) == 0)
4852                 max >>= 1;
4853
4854         if (num_rings != max)
4855                 max <<= 1;
4856
4857         return max;
4858 }
4859
4860 static void
4861 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
4862 {
4863         u32 rx_size, rx_space, jumbo_size;
4864
4865         /* 8 for CRC and VLAN */
4866         rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8;
4867
4868         rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
4869                 sizeof(struct skb_shared_info);
4870
4871         bp->rx_copy_thresh = BNX2_RX_COPY_THRESH;
4872         bp->rx_pg_ring_size = 0;
4873         bp->rx_max_pg_ring = 0;
4874         bp->rx_max_pg_ring_idx = 0;
4875         if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) {
4876                 int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
4877
4878                 jumbo_size = size * pages;
4879                 if (jumbo_size > MAX_TOTAL_RX_PG_DESC_CNT)
4880                         jumbo_size = MAX_TOTAL_RX_PG_DESC_CNT;
4881
4882                 bp->rx_pg_ring_size = jumbo_size;
4883                 bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
4884                                                         MAX_RX_PG_RINGS);
4885                 bp->rx_max_pg_ring_idx = (bp->rx_max_pg_ring * RX_DESC_CNT) - 1;
4886                 rx_size = BNX2_RX_COPY_THRESH + BNX2_RX_OFFSET;
4887                 bp->rx_copy_thresh = 0;
4888         }
4889
4890         bp->rx_buf_use_size = rx_size;
4891         /* hw alignment */
4892         bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
4893         bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
4894         bp->rx_ring_size = size;
4895         bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS);
4896         bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
4897 }
4898
4899 static void
4900 bnx2_free_tx_skbs(struct bnx2 *bp)
4901 {
4902         int i;
4903
4904         for (i = 0; i < bp->num_tx_rings; i++) {
4905                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
4906                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
4907                 int j;
4908
4909                 if (txr->tx_buf_ring == NULL)
4910                         continue;
4911
4912                 for (j = 0; j < TX_DESC_CNT; ) {
4913                         struct sw_bd *tx_buf = &txr->tx_buf_ring[j];
4914                         struct sk_buff *skb = tx_buf->skb;
4915                         int k, last;
4916
4917                         if (skb == NULL) {
4918                                 j++;
4919                                 continue;
4920                         }
4921
4922                         pci_unmap_single(bp->pdev,
4923                                          pci_unmap_addr(tx_buf, mapping),
4924                         skb_headlen(skb), PCI_DMA_TODEVICE);
4925
4926                         tx_buf->skb = NULL;
4927
4928                         last = skb_shinfo(skb)->nr_frags;
4929                         for (k = 0; k < last; k++) {
4930                                 tx_buf = &txr->tx_buf_ring[j + k + 1];
4931                                 pci_unmap_page(bp->pdev,
4932                                         pci_unmap_addr(tx_buf, mapping),
4933                                         skb_shinfo(skb)->frags[j].size,
4934                                         PCI_DMA_TODEVICE);
4935                         }
4936                         dev_kfree_skb(skb);
4937                         j += k + 1;
4938                 }
4939         }
4940 }
4941
4942 static void
4943 bnx2_free_rx_skbs(struct bnx2 *bp)
4944 {
4945         int i;
4946
4947         for (i = 0; i < bp->num_rx_rings; i++) {
4948                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
4949                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
4950                 int j;
4951
4952                 if (rxr->rx_buf_ring == NULL)
4953                         return;
4954
4955                 for (j = 0; j < bp->rx_max_ring_idx; j++) {
4956                         struct sw_bd *rx_buf = &rxr->rx_buf_ring[j];
4957                         struct sk_buff *skb = rx_buf->skb;
4958
4959                         if (skb == NULL)
4960                                 continue;
4961
4962                         pci_unmap_single(bp->pdev,
4963                                          pci_unmap_addr(rx_buf, mapping),
4964                                          bp->rx_buf_use_size,
4965                                          PCI_DMA_FROMDEVICE);
4966
4967                         rx_buf->skb = NULL;
4968
4969                         dev_kfree_skb(skb);
4970                 }
4971                 for (j = 0; j < bp->rx_max_pg_ring_idx; j++)
4972                         bnx2_free_rx_page(bp, rxr, j);
4973         }
4974 }
4975
4976 static void
4977 bnx2_free_skbs(struct bnx2 *bp)
4978 {
4979         bnx2_free_tx_skbs(bp);
4980         bnx2_free_rx_skbs(bp);
4981 }
4982
4983 static int
4984 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
4985 {
4986         int rc;
4987
4988         rc = bnx2_reset_chip(bp, reset_code);
4989         bnx2_free_skbs(bp);
4990         if (rc)
4991                 return rc;
4992
4993         if ((rc = bnx2_init_chip(bp)) != 0)
4994                 return rc;
4995
4996         bnx2_init_all_rings(bp);
4997         return 0;
4998 }
4999
5000 static int
5001 bnx2_init_nic(struct bnx2 *bp, int reset_phy)
5002 {
5003         int rc;
5004
5005         if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
5006                 return rc;
5007
5008         spin_lock_bh(&bp->phy_lock);
5009         bnx2_init_phy(bp, reset_phy);
5010         bnx2_set_link(bp);
5011         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5012                 bnx2_remote_phy_event(bp);
5013         spin_unlock_bh(&bp->phy_lock);
5014         return 0;
5015 }
5016
5017 static int
5018 bnx2_test_registers(struct bnx2 *bp)
5019 {
5020         int ret;
5021         int i, is_5709;
5022         static const struct {
5023                 u16   offset;
5024                 u16   flags;
5025 #define BNX2_FL_NOT_5709        1
5026                 u32   rw_mask;
5027                 u32   ro_mask;
5028         } reg_tbl[] = {
5029                 { 0x006c, 0, 0x00000000, 0x0000003f },
5030                 { 0x0090, 0, 0xffffffff, 0x00000000 },
5031                 { 0x0094, 0, 0x00000000, 0x00000000 },
5032
5033                 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
5034                 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5035                 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5036                 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
5037                 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
5038                 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5039                 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
5040                 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5041                 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5042
5043                 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5044                 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5045                 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5046                 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5047                 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5048                 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5049
5050                 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5051                 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
5052                 { 0x0c08, BNX2_FL_NOT_5709,  0x0f0ff073, 0x00000000 },
5053
5054                 { 0x1000, 0, 0x00000000, 0x00000001 },
5055                 { 0x1004, BNX2_FL_NOT_5709, 0x00000000, 0x000f0001 },
5056
5057                 { 0x1408, 0, 0x01c00800, 0x00000000 },
5058                 { 0x149c, 0, 0x8000ffff, 0x00000000 },
5059                 { 0x14a8, 0, 0x00000000, 0x000001ff },
5060                 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
5061                 { 0x14b0, 0, 0x00000002, 0x00000001 },
5062                 { 0x14b8, 0, 0x00000000, 0x00000000 },
5063                 { 0x14c0, 0, 0x00000000, 0x00000009 },
5064                 { 0x14c4, 0, 0x00003fff, 0x00000000 },
5065                 { 0x14cc, 0, 0x00000000, 0x00000001 },
5066                 { 0x14d0, 0, 0xffffffff, 0x00000000 },
5067
5068                 { 0x1800, 0, 0x00000000, 0x00000001 },
5069                 { 0x1804, 0, 0x00000000, 0x00000003 },
5070
5071                 { 0x2800, 0, 0x00000000, 0x00000001 },
5072                 { 0x2804, 0, 0x00000000, 0x00003f01 },
5073                 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
5074                 { 0x2810, 0, 0xffff0000, 0x00000000 },
5075                 { 0x2814, 0, 0xffff0000, 0x00000000 },
5076                 { 0x2818, 0, 0xffff0000, 0x00000000 },
5077                 { 0x281c, 0, 0xffff0000, 0x00000000 },
5078                 { 0x2834, 0, 0xffffffff, 0x00000000 },
5079                 { 0x2840, 0, 0x00000000, 0xffffffff },
5080                 { 0x2844, 0, 0x00000000, 0xffffffff },
5081                 { 0x2848, 0, 0xffffffff, 0x00000000 },
5082                 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
5083
5084                 { 0x2c00, 0, 0x00000000, 0x00000011 },
5085                 { 0x2c04, 0, 0x00000000, 0x00030007 },
5086
5087                 { 0x3c00, 0, 0x00000000, 0x00000001 },
5088                 { 0x3c04, 0, 0x00000000, 0x00070000 },
5089                 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
5090                 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
5091                 { 0x3c10, 0, 0xffffffff, 0x00000000 },
5092                 { 0x3c14, 0, 0x00000000, 0xffffffff },
5093                 { 0x3c18, 0, 0x00000000, 0xffffffff },
5094                 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
5095                 { 0x3c20, 0, 0xffffff00, 0x00000000 },
5096
5097                 { 0x5004, 0, 0x00000000, 0x0000007f },
5098                 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
5099
5100                 { 0x5c00, 0, 0x00000000, 0x00000001 },
5101                 { 0x5c04, 0, 0x00000000, 0x0003000f },
5102                 { 0x5c08, 0, 0x00000003, 0x00000000 },
5103                 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
5104                 { 0x5c10, 0, 0x00000000, 0xffffffff },
5105                 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
5106                 { 0x5c84, 0, 0x00000000, 0x0000f333 },
5107                 { 0x5c88, 0, 0x00000000, 0x00077373 },
5108                 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
5109
5110                 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
5111                 { 0x680c, 0, 0xffffffff, 0x00000000 },
5112                 { 0x6810, 0, 0xffffffff, 0x00000000 },
5113                 { 0x6814, 0, 0xffffffff, 0x00000000 },
5114                 { 0x6818, 0, 0xffffffff, 0x00000000 },
5115                 { 0x681c, 0, 0xffffffff, 0x00000000 },
5116                 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
5117                 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
5118                 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
5119                 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
5120                 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
5121                 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
5122                 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
5123                 { 0x683c, 0, 0x0000ffff, 0x00000000 },
5124                 { 0x6840, 0, 0x00000ff0, 0x00000000 },
5125                 { 0x6844, 0, 0x00ffff00, 0x00000000 },
5126                 { 0x684c, 0, 0xffffffff, 0x00000000 },
5127                 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
5128                 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
5129                 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
5130                 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
5131                 { 0x6908, 0, 0x00000000, 0x0001ff0f },
5132                 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
5133
5134                 { 0xffff, 0, 0x00000000, 0x00000000 },
5135         };
5136
5137         ret = 0;
5138         is_5709 = 0;
5139         if (CHIP_NUM(bp) == CHIP_NUM_5709)
5140                 is_5709 = 1;
5141
5142         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
5143                 u32 offset, rw_mask, ro_mask, save_val, val;
5144                 u16 flags = reg_tbl[i].flags;
5145
5146                 if (is_5709 && (flags & BNX2_FL_NOT_5709))
5147                         continue;
5148
5149                 offset = (u32) reg_tbl[i].offset;
5150                 rw_mask = reg_tbl[i].rw_mask;
5151                 ro_mask = reg_tbl[i].ro_mask;
5152
5153                 save_val = readl(bp->regview + offset);
5154
5155                 writel(0, bp->regview + offset);
5156
5157                 val = readl(bp->regview + offset);
5158                 if ((val & rw_mask) != 0) {
5159                         goto reg_test_err;
5160                 }
5161
5162                 if ((val & ro_mask) != (save_val & ro_mask)) {
5163                         goto reg_test_err;
5164                 }
5165
5166                 writel(0xffffffff, bp->regview + offset);
5167
5168                 val = readl(bp->regview + offset);
5169                 if ((val & rw_mask) != rw_mask) {
5170                         goto reg_test_err;
5171                 }
5172
5173                 if ((val & ro_mask) != (save_val & ro_mask)) {
5174                         goto reg_test_err;
5175                 }
5176
5177                 writel(save_val, bp->regview + offset);
5178                 continue;
5179
5180 reg_test_err:
5181                 writel(save_val, bp->regview + offset);
5182                 ret = -ENODEV;
5183                 break;
5184         }
5185         return ret;
5186 }
5187
5188 static int
5189 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
5190 {
5191         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
5192                 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
5193         int i;
5194
5195         for (i = 0; i < sizeof(test_pattern) / 4; i++) {
5196                 u32 offset;
5197
5198                 for (offset = 0; offset < size; offset += 4) {
5199
5200                         bnx2_reg_wr_ind(bp, start + offset, test_pattern[i]);
5201
5202                         if (bnx2_reg_rd_ind(bp, start + offset) !=
5203                                 test_pattern[i]) {
5204                                 return -ENODEV;
5205                         }
5206                 }
5207         }
5208         return 0;
5209 }
5210
5211 static int
5212 bnx2_test_memory(struct bnx2 *bp)
5213 {
5214         int ret = 0;
5215         int i;
5216         static struct mem_entry {
5217                 u32   offset;
5218                 u32   len;
5219         } mem_tbl_5706[] = {
5220                 { 0x60000,  0x4000 },
5221                 { 0xa0000,  0x3000 },
5222                 { 0xe0000,  0x4000 },
5223                 { 0x120000, 0x4000 },
5224                 { 0x1a0000, 0x4000 },
5225                 { 0x160000, 0x4000 },
5226                 { 0xffffffff, 0    },
5227         },
5228         mem_tbl_5709[] = {
5229                 { 0x60000,  0x4000 },
5230                 { 0xa0000,  0x3000 },
5231                 { 0xe0000,  0x4000 },
5232                 { 0x120000, 0x4000 },
5233                 { 0x1a0000, 0x4000 },
5234                 { 0xffffffff, 0    },
5235         };
5236         struct mem_entry *mem_tbl;
5237
5238         if (CHIP_NUM(bp) == CHIP_NUM_5709)
5239                 mem_tbl = mem_tbl_5709;
5240         else
5241                 mem_tbl = mem_tbl_5706;
5242
5243         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
5244                 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
5245                         mem_tbl[i].len)) != 0) {
5246                         return ret;
5247                 }
5248         }
5249
5250         return ret;
5251 }
5252
5253 #define BNX2_MAC_LOOPBACK       0
5254 #define BNX2_PHY_LOOPBACK       1
5255
5256 static int
5257 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5258 {
5259         unsigned int pkt_size, num_pkts, i;
5260         struct sk_buff *skb, *rx_skb;
5261         unsigned char *packet;
5262         u16 rx_start_idx, rx_idx;
5263         dma_addr_t map;
5264         struct tx_bd *txbd;
5265         struct sw_bd *rx_buf;
5266         struct l2_fhdr *rx_hdr;
5267         int ret = -ENODEV;
5268         struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
5269         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5270         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5271
5272         tx_napi = bnapi;
5273
5274         txr = &tx_napi->tx_ring;
5275         rxr = &bnapi->rx_ring;
5276         if (loopback_mode == BNX2_MAC_LOOPBACK) {
5277                 bp->loopback = MAC_LOOPBACK;
5278                 bnx2_set_mac_loopback(bp);
5279         }
5280         else if (loopback_mode == BNX2_PHY_LOOPBACK) {
5281                 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5282                         return 0;
5283
5284                 bp->loopback = PHY_LOOPBACK;
5285                 bnx2_set_phy_loopback(bp);
5286         }
5287         else
5288                 return -EINVAL;
5289
5290         pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
5291         skb = netdev_alloc_skb(bp->dev, pkt_size);
5292         if (!skb)
5293                 return -ENOMEM;
5294         packet = skb_put(skb, pkt_size);
5295         memcpy(packet, bp->dev->dev_addr, 6);
5296         memset(packet + 6, 0x0, 8);
5297         for (i = 14; i < pkt_size; i++)
5298                 packet[i] = (unsigned char) (i & 0xff);
5299
5300         map = pci_map_single(bp->pdev, skb->data, pkt_size,
5301                 PCI_DMA_TODEVICE);
5302
5303         REG_WR(bp, BNX2_HC_COMMAND,
5304                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5305
5306         REG_RD(bp, BNX2_HC_COMMAND);
5307
5308         udelay(5);
5309         rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
5310
5311         num_pkts = 0;
5312
5313         txbd = &txr->tx_desc_ring[TX_RING_IDX(txr->tx_prod)];
5314
5315         txbd->tx_bd_haddr_hi = (u64) map >> 32;
5316         txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
5317         txbd->tx_bd_mss_nbytes = pkt_size;
5318         txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
5319
5320         num_pkts++;
5321         txr->tx_prod = NEXT_TX_BD(txr->tx_prod);
5322         txr->tx_prod_bseq += pkt_size;
5323
5324         REG_WR16(bp, txr->tx_bidx_addr, txr->tx_prod);
5325         REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
5326
5327         udelay(100);
5328
5329         REG_WR(bp, BNX2_HC_COMMAND,
5330                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5331
5332         REG_RD(bp, BNX2_HC_COMMAND);
5333
5334         udelay(5);
5335
5336         pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
5337         dev_kfree_skb(skb);
5338
5339         if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod)
5340                 goto loopback_test_done;
5341
5342         rx_idx = bnx2_get_hw_rx_cons(bnapi);
5343         if (rx_idx != rx_start_idx + num_pkts) {
5344                 goto loopback_test_done;
5345         }
5346
5347         rx_buf = &rxr->rx_buf_ring[rx_start_idx];
5348         rx_skb = rx_buf->skb;
5349
5350         rx_hdr = (struct l2_fhdr *) rx_skb->data;
5351         skb_reserve(rx_skb, BNX2_RX_OFFSET);
5352
5353         pci_dma_sync_single_for_cpu(bp->pdev,
5354                 pci_unmap_addr(rx_buf, mapping),
5355                 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
5356
5357         if (rx_hdr->l2_fhdr_status &
5358                 (L2_FHDR_ERRORS_BAD_CRC |
5359                 L2_FHDR_ERRORS_PHY_DECODE |
5360                 L2_FHDR_ERRORS_ALIGNMENT |
5361                 L2_FHDR_ERRORS_TOO_SHORT |
5362                 L2_FHDR_ERRORS_GIANT_FRAME)) {
5363
5364                 goto loopback_test_done;
5365         }
5366
5367         if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
5368                 goto loopback_test_done;
5369         }
5370
5371         for (i = 14; i < pkt_size; i++) {
5372                 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
5373                         goto loopback_test_done;
5374                 }
5375         }
5376
5377         ret = 0;
5378
5379 loopback_test_done:
5380         bp->loopback = 0;
5381         return ret;
5382 }
5383
5384 #define BNX2_MAC_LOOPBACK_FAILED        1
5385 #define BNX2_PHY_LOOPBACK_FAILED        2
5386 #define BNX2_LOOPBACK_FAILED            (BNX2_MAC_LOOPBACK_FAILED |     \
5387                                          BNX2_PHY_LOOPBACK_FAILED)
5388
5389 static int
5390 bnx2_test_loopback(struct bnx2 *bp)
5391 {
5392         int rc = 0;
5393
5394         if (!netif_running(bp->dev))
5395                 return BNX2_LOOPBACK_FAILED;
5396
5397         bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5398         spin_lock_bh(&bp->phy_lock);
5399         bnx2_init_phy(bp, 1);
5400         spin_unlock_bh(&bp->phy_lock);
5401         if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5402                 rc |= BNX2_MAC_LOOPBACK_FAILED;
5403         if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5404                 rc |= BNX2_PHY_LOOPBACK_FAILED;
5405         return rc;
5406 }
5407
5408 #define NVRAM_SIZE 0x200
5409 #define CRC32_RESIDUAL 0xdebb20e3
5410
5411 static int
5412 bnx2_test_nvram(struct bnx2 *bp)
5413 {
5414         __be32 buf[NVRAM_SIZE / 4];
5415         u8 *data = (u8 *) buf;
5416         int rc = 0;
5417         u32 magic, csum;
5418
5419         if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5420                 goto test_nvram_done;
5421
5422         magic = be32_to_cpu(buf[0]);
5423         if (magic != 0x669955aa) {
5424                 rc = -ENODEV;
5425                 goto test_nvram_done;
5426         }
5427
5428         if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5429                 goto test_nvram_done;
5430
5431         csum = ether_crc_le(0x100, data);
5432         if (csum != CRC32_RESIDUAL) {
5433                 rc = -ENODEV;
5434                 goto test_nvram_done;
5435         }
5436
5437         csum = ether_crc_le(0x100, data + 0x100);
5438         if (csum != CRC32_RESIDUAL) {
5439                 rc = -ENODEV;
5440         }
5441
5442 test_nvram_done:
5443         return rc;
5444 }
5445
5446 static int
5447 bnx2_test_link(struct bnx2 *bp)
5448 {
5449         u32 bmsr;
5450
5451         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
5452                 if (bp->link_up)
5453                         return 0;
5454                 return -ENODEV;
5455         }
5456         spin_lock_bh(&bp->phy_lock);
5457         bnx2_enable_bmsr1(bp);
5458         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5459         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5460         bnx2_disable_bmsr1(bp);
5461         spin_unlock_bh(&bp->phy_lock);
5462
5463         if (bmsr & BMSR_LSTATUS) {
5464                 return 0;
5465         }
5466         return -ENODEV;
5467 }
5468
5469 static int
5470 bnx2_test_intr(struct bnx2 *bp)
5471 {
5472         int i;
5473         u16 status_idx;
5474
5475         if (!netif_running(bp->dev))
5476                 return -ENODEV;
5477
5478         status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
5479
5480         /* This register is not touched during run-time. */
5481         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
5482         REG_RD(bp, BNX2_HC_COMMAND);
5483
5484         for (i = 0; i < 10; i++) {
5485                 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
5486                         status_idx) {
5487
5488                         break;
5489                 }
5490
5491                 msleep_interruptible(10);
5492         }
5493         if (i < 10)
5494                 return 0;
5495
5496         return -ENODEV;
5497 }
5498
5499 /* Determining link for parallel detection. */
5500 static int
5501 bnx2_5706_serdes_has_link(struct bnx2 *bp)
5502 {
5503         u32 mode_ctl, an_dbg, exp;
5504
5505         if (bp->phy_flags & BNX2_PHY_FLAG_NO_PARALLEL)
5506                 return 0;
5507
5508         bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL);
5509         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl);
5510
5511         if (!(mode_ctl & MISC_SHDW_MODE_CTL_SIG_DET))
5512                 return 0;
5513
5514         bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5515         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5516         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5517
5518         if (an_dbg & (MISC_SHDW_AN_DBG_NOSYNC | MISC_SHDW_AN_DBG_RUDI_INVALID))
5519                 return 0;
5520
5521         bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_REG1);
5522         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5523         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5524
5525         if (exp & MII_EXPAND_REG1_RUDI_C)       /* receiving CONFIG */
5526                 return 0;
5527
5528         return 1;
5529 }
5530
5531 static void
5532 bnx2_5706_serdes_timer(struct bnx2 *bp)
5533 {
5534         int check_link = 1;
5535
5536         spin_lock(&bp->phy_lock);
5537         if (bp->serdes_an_pending) {
5538                 bp->serdes_an_pending--;
5539                 check_link = 0;
5540         } else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5541                 u32 bmcr;
5542
5543                 bp->current_interval = bp->timer_interval;
5544
5545                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5546
5547                 if (bmcr & BMCR_ANENABLE) {
5548                         if (bnx2_5706_serdes_has_link(bp)) {
5549                                 bmcr &= ~BMCR_ANENABLE;
5550                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5551                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5552                                 bp->phy_flags |= BNX2_PHY_FLAG_PARALLEL_DETECT;
5553                         }
5554                 }
5555         }
5556         else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
5557                  (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) {
5558                 u32 phy2;
5559
5560                 bnx2_write_phy(bp, 0x17, 0x0f01);
5561                 bnx2_read_phy(bp, 0x15, &phy2);
5562                 if (phy2 & 0x20) {
5563                         u32 bmcr;
5564
5565                         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5566                         bmcr |= BMCR_ANENABLE;
5567                         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5568
5569                         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
5570                 }
5571         } else
5572                 bp->current_interval = bp->timer_interval;
5573
5574         if (check_link) {
5575                 u32 val;
5576
5577                 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5578                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
5579                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
5580
5581                 if (bp->link_up && (val & MISC_SHDW_AN_DBG_NOSYNC)) {
5582                         if (!(bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN)) {
5583                                 bnx2_5706s_force_link_dn(bp, 1);
5584                                 bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN;
5585                         } else
5586                                 bnx2_set_link(bp);
5587                 } else if (!bp->link_up && !(val & MISC_SHDW_AN_DBG_NOSYNC))
5588                         bnx2_set_link(bp);
5589         }
5590         spin_unlock(&bp->phy_lock);
5591 }
5592
5593 static void
5594 bnx2_5708_serdes_timer(struct bnx2 *bp)
5595 {
5596         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5597                 return;
5598
5599         if ((bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) == 0) {
5600                 bp->serdes_an_pending = 0;
5601                 return;
5602         }
5603
5604         spin_lock(&bp->phy_lock);
5605         if (bp->serdes_an_pending)
5606                 bp->serdes_an_pending--;
5607         else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5608                 u32 bmcr;
5609
5610                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5611                 if (bmcr & BMCR_ANENABLE) {
5612                         bnx2_enable_forced_2g5(bp);
5613                         bp->current_interval = SERDES_FORCED_TIMEOUT;
5614                 } else {
5615                         bnx2_disable_forced_2g5(bp);
5616                         bp->serdes_an_pending = 2;
5617                         bp->current_interval = bp->timer_interval;
5618                 }
5619
5620         } else
5621                 bp->current_interval = bp->timer_interval;
5622
5623         spin_unlock(&bp->phy_lock);
5624 }
5625
5626 static void
5627 bnx2_timer(unsigned long data)
5628 {
5629         struct bnx2 *bp = (struct bnx2 *) data;
5630
5631         if (!netif_running(bp->dev))
5632                 return;
5633
5634         if (atomic_read(&bp->intr_sem) != 0)
5635                 goto bnx2_restart_timer;
5636
5637         bnx2_send_heart_beat(bp);
5638
5639         bp->stats_blk->stat_FwRxDrop =
5640                 bnx2_reg_rd_ind(bp, BNX2_FW_RX_DROP_COUNT);
5641
5642         /* workaround occasional corrupted counters */
5643         if (CHIP_NUM(bp) == CHIP_NUM_5708 && bp->stats_ticks)
5644                 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
5645                                             BNX2_HC_COMMAND_STATS_NOW);
5646
5647         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
5648                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
5649                         bnx2_5706_serdes_timer(bp);
5650                 else
5651                         bnx2_5708_serdes_timer(bp);
5652         }
5653
5654 bnx2_restart_timer:
5655         mod_timer(&bp->timer, jiffies + bp->current_interval);
5656 }
5657
5658 static int
5659 bnx2_request_irq(struct bnx2 *bp)
5660 {
5661         unsigned long flags;
5662         struct bnx2_irq *irq;
5663         int rc = 0, i;
5664
5665         if (bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)
5666                 flags = 0;
5667         else
5668                 flags = IRQF_SHARED;
5669
5670         for (i = 0; i < bp->irq_nvecs; i++) {
5671                 irq = &bp->irq_tbl[i];
5672                 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
5673                                  &bp->bnx2_napi[i]);
5674                 if (rc)
5675                         break;
5676                 irq->requested = 1;
5677         }
5678         return rc;
5679 }
5680
5681 static void
5682 bnx2_free_irq(struct bnx2 *bp)
5683 {
5684         struct bnx2_irq *irq;
5685         int i;
5686
5687         for (i = 0; i < bp->irq_nvecs; i++) {
5688                 irq = &bp->irq_tbl[i];
5689                 if (irq->requested)
5690                         free_irq(irq->vector, &bp->bnx2_napi[i]);
5691                 irq->requested = 0;
5692         }
5693         if (bp->flags & BNX2_FLAG_USING_MSI)
5694                 pci_disable_msi(bp->pdev);
5695         else if (bp->flags & BNX2_FLAG_USING_MSIX)
5696                 pci_disable_msix(bp->pdev);
5697
5698         bp->flags &= ~(BNX2_FLAG_USING_MSI_OR_MSIX | BNX2_FLAG_ONE_SHOT_MSI);
5699 }
5700
5701 static void
5702 bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
5703 {
5704         int i, rc;
5705         struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
5706
5707         bnx2_setup_msix_tbl(bp);
5708         REG_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1);
5709         REG_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
5710         REG_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
5711
5712         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
5713                 msix_ent[i].entry = i;
5714                 msix_ent[i].vector = 0;
5715
5716                 strcpy(bp->irq_tbl[i].name, bp->dev->name);
5717                 bp->irq_tbl[i].handler = bnx2_msi_1shot;
5718         }
5719
5720         rc = pci_enable_msix(bp->pdev, msix_ent, BNX2_MAX_MSIX_VEC);
5721         if (rc != 0)
5722                 return;
5723
5724         bp->irq_nvecs = msix_vecs;
5725         bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI;
5726         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
5727                 bp->irq_tbl[i].vector = msix_ent[i].vector;
5728 }
5729
5730 static void
5731 bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
5732 {
5733         int cpus = num_online_cpus();
5734         int msix_vecs = min(cpus + 1, RX_MAX_RSS_RINGS);
5735
5736         bp->irq_tbl[0].handler = bnx2_interrupt;
5737         strcpy(bp->irq_tbl[0].name, bp->dev->name);
5738         bp->irq_nvecs = 1;
5739         bp->irq_tbl[0].vector = bp->pdev->irq;
5740
5741         if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi && cpus > 1)
5742                 bnx2_enable_msix(bp, msix_vecs);
5743
5744         if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi &&
5745             !(bp->flags & BNX2_FLAG_USING_MSIX)) {
5746                 if (pci_enable_msi(bp->pdev) == 0) {
5747                         bp->flags |= BNX2_FLAG_USING_MSI;
5748                         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5749                                 bp->flags |= BNX2_FLAG_ONE_SHOT_MSI;
5750                                 bp->irq_tbl[0].handler = bnx2_msi_1shot;
5751                         } else
5752                                 bp->irq_tbl[0].handler = bnx2_msi;
5753
5754                         bp->irq_tbl[0].vector = bp->pdev->irq;
5755                 }
5756         }
5757         bp->num_tx_rings = 1;
5758         bp->num_rx_rings = bp->irq_nvecs;
5759 }
5760
5761 /* Called with rtnl_lock */
5762 static int
5763 bnx2_open(struct net_device *dev)
5764 {
5765         struct bnx2 *bp = netdev_priv(dev);
5766         int rc;
5767
5768         netif_carrier_off(dev);
5769
5770         bnx2_set_power_state(bp, PCI_D0);
5771         bnx2_disable_int(bp);
5772
5773         bnx2_setup_int_mode(bp, disable_msi);
5774         bnx2_napi_enable(bp);
5775         rc = bnx2_alloc_mem(bp);
5776         if (rc)
5777                 goto open_err;
5778
5779         rc = bnx2_request_irq(bp);
5780         if (rc)
5781                 goto open_err;
5782
5783         rc = bnx2_init_nic(bp, 1);
5784         if (rc)
5785                 goto open_err;
5786
5787         mod_timer(&bp->timer, jiffies + bp->current_interval);
5788
5789         atomic_set(&bp->intr_sem, 0);
5790
5791         bnx2_enable_int(bp);
5792
5793         if (bp->flags & BNX2_FLAG_USING_MSI) {
5794                 /* Test MSI to make sure it is working
5795                  * If MSI test fails, go back to INTx mode
5796                  */
5797                 if (bnx2_test_intr(bp) != 0) {
5798                         printk(KERN_WARNING PFX "%s: No interrupt was generated"
5799                                " using MSI, switching to INTx mode. Please"
5800                                " report this failure to the PCI maintainer"
5801                                " and include system chipset information.\n",
5802                                bp->dev->name);
5803
5804                         bnx2_disable_int(bp);
5805                         bnx2_free_irq(bp);
5806
5807                         bnx2_setup_int_mode(bp, 1);
5808
5809                         rc = bnx2_init_nic(bp, 0);
5810
5811                         if (!rc)
5812                                 rc = bnx2_request_irq(bp);
5813
5814                         if (rc) {
5815                                 del_timer_sync(&bp->timer);
5816                                 goto open_err;
5817                         }
5818                         bnx2_enable_int(bp);
5819                 }
5820         }
5821         if (bp->flags & BNX2_FLAG_USING_MSI)
5822                 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
5823         else if (bp->flags & BNX2_FLAG_USING_MSIX)
5824                 printk(KERN_INFO PFX "%s: using MSIX\n", dev->name);
5825
5826         netif_start_queue(dev);
5827
5828         return 0;
5829
5830 open_err:
5831         bnx2_napi_disable(bp);
5832         bnx2_free_skbs(bp);
5833         bnx2_free_irq(bp);
5834         bnx2_free_mem(bp);
5835         return rc;
5836 }
5837
5838 static void
5839 bnx2_reset_task(struct work_struct *work)
5840 {
5841         struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
5842
5843         if (!netif_running(bp->dev))
5844                 return;
5845
5846         bnx2_netif_stop(bp);
5847
5848         bnx2_init_nic(bp, 1);
5849
5850         atomic_set(&bp->intr_sem, 1);
5851         bnx2_netif_start(bp);
5852 }
5853
5854 static void
5855 bnx2_tx_timeout(struct net_device *dev)
5856 {
5857         struct bnx2 *bp = netdev_priv(dev);
5858
5859         /* This allows the netif to be shutdown gracefully before resetting */
5860         schedule_work(&bp->reset_task);
5861 }
5862
5863 #ifdef BCM_VLAN
5864 /* Called with rtnl_lock */
5865 static void
5866 bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
5867 {
5868         struct bnx2 *bp = netdev_priv(dev);
5869
5870         bnx2_netif_stop(bp);
5871
5872         bp->vlgrp = vlgrp;
5873         bnx2_set_rx_mode(dev);
5874
5875         bnx2_netif_start(bp);
5876 }
5877 #endif
5878
5879 /* Called with netif_tx_lock.
5880  * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
5881  * netif_wake_queue().
5882  */
5883 static int
5884 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
5885 {
5886         struct bnx2 *bp = netdev_priv(dev);
5887         dma_addr_t mapping;
5888         struct tx_bd *txbd;
5889         struct sw_bd *tx_buf;
5890         u32 len, vlan_tag_flags, last_frag, mss;
5891         u16 prod, ring_prod;
5892         int i;
5893         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
5894         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5895
5896         if (unlikely(bnx2_tx_avail(bp, txr) <
5897             (skb_shinfo(skb)->nr_frags + 1))) {
5898                 netif_stop_queue(dev);
5899                 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
5900                         dev->name);
5901
5902                 return NETDEV_TX_BUSY;
5903         }
5904         len = skb_headlen(skb);
5905         prod = txr->tx_prod;
5906         ring_prod = TX_RING_IDX(prod);
5907
5908         vlan_tag_flags = 0;
5909         if (skb->ip_summed == CHECKSUM_PARTIAL) {
5910                 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
5911         }
5912
5913         if (bp->vlgrp && vlan_tx_tag_present(skb)) {
5914                 vlan_tag_flags |=
5915                         (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
5916         }
5917         if ((mss = skb_shinfo(skb)->gso_size)) {
5918                 u32 tcp_opt_len, ip_tcp_len;
5919                 struct iphdr *iph;
5920
5921                 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
5922
5923                 tcp_opt_len = tcp_optlen(skb);
5924
5925                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
5926                         u32 tcp_off = skb_transport_offset(skb) -
5927                                       sizeof(struct ipv6hdr) - ETH_HLEN;
5928
5929                         vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
5930                                           TX_BD_FLAGS_SW_FLAGS;
5931                         if (likely(tcp_off == 0))
5932                                 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
5933                         else {
5934                                 tcp_off >>= 3;
5935                                 vlan_tag_flags |= ((tcp_off & 0x3) <<
5936                                                    TX_BD_FLAGS_TCP6_OFF0_SHL) |
5937                                                   ((tcp_off & 0x10) <<
5938                                                    TX_BD_FLAGS_TCP6_OFF4_SHL);
5939                                 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
5940                         }
5941                 } else {
5942                         if (skb_header_cloned(skb) &&
5943                             pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5944                                 dev_kfree_skb(skb);
5945                                 return NETDEV_TX_OK;
5946                         }
5947
5948                         ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5949
5950                         iph = ip_hdr(skb);
5951                         iph->check = 0;
5952                         iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
5953                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
5954                                                                  iph->daddr, 0,
5955                                                                  IPPROTO_TCP,
5956                                                                  0);
5957                         if (tcp_opt_len || (iph->ihl > 5)) {
5958                                 vlan_tag_flags |= ((iph->ihl - 5) +
5959                                                    (tcp_opt_len >> 2)) << 8;
5960                         }
5961                 }
5962         } else
5963                 mss = 0;
5964
5965         mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
5966
5967         tx_buf = &txr->tx_buf_ring[ring_prod];
5968         tx_buf->skb = skb;
5969         pci_unmap_addr_set(tx_buf, mapping, mapping);
5970
5971         txbd = &txr->tx_desc_ring[ring_prod];
5972
5973         txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5974         txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5975         txbd->tx_bd_mss_nbytes = len | (mss << 16);
5976         txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
5977
5978         last_frag = skb_shinfo(skb)->nr_frags;
5979
5980         for (i = 0; i < last_frag; i++) {
5981                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5982
5983                 prod = NEXT_TX_BD(prod);
5984                 ring_prod = TX_RING_IDX(prod);
5985                 txbd = &txr->tx_desc_ring[ring_prod];
5986
5987                 len = frag->size;
5988                 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
5989                         len, PCI_DMA_TODEVICE);
5990                 pci_unmap_addr_set(&txr->tx_buf_ring[ring_prod],
5991                                 mapping, mapping);
5992
5993                 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5994                 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5995                 txbd->tx_bd_mss_nbytes = len | (mss << 16);
5996                 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
5997
5998         }
5999         txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
6000
6001         prod = NEXT_TX_BD(prod);
6002         txr->tx_prod_bseq += skb->len;
6003
6004         REG_WR16(bp, txr->tx_bidx_addr, prod);
6005         REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
6006
6007         mmiowb();
6008
6009         txr->tx_prod = prod;
6010         dev->trans_start = jiffies;
6011
6012         if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) {
6013                 netif_stop_queue(dev);
6014                 if (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)
6015                         netif_wake_queue(dev);
6016         }
6017
6018         return NETDEV_TX_OK;
6019 }
6020
6021 /* Called with rtnl_lock */
6022 static int
6023 bnx2_close(struct net_device *dev)
6024 {
6025         struct bnx2 *bp = netdev_priv(dev);
6026         u32 reset_code;
6027
6028         cancel_work_sync(&bp->reset_task);
6029
6030         bnx2_disable_int_sync(bp);
6031         bnx2_napi_disable(bp);
6032         del_timer_sync(&bp->timer);
6033         if (bp->flags & BNX2_FLAG_NO_WOL)
6034                 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
6035         else if (bp->wol)
6036                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
6037         else
6038                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
6039         bnx2_reset_chip(bp, reset_code);
6040         bnx2_free_irq(bp);
6041         bnx2_free_skbs(bp);
6042         bnx2_free_mem(bp);
6043         bp->link_up = 0;
6044         netif_carrier_off(bp->dev);
6045         bnx2_set_power_state(bp, PCI_D3hot);
6046         return 0;
6047 }
6048
6049 #define GET_NET_STATS64(ctr)                                    \
6050         (unsigned long) ((unsigned long) (ctr##_hi) << 32) +    \
6051         (unsigned long) (ctr##_lo)
6052
6053 #define GET_NET_STATS32(ctr)            \
6054         (ctr##_lo)
6055
6056 #if (BITS_PER_LONG == 64)
6057 #define GET_NET_STATS   GET_NET_STATS64
6058 #else
6059 #define GET_NET_STATS   GET_NET_STATS32
6060 #endif
6061
6062 static struct net_device_stats *
6063 bnx2_get_stats(struct net_device *dev)
6064 {
6065         struct bnx2 *bp = netdev_priv(dev);
6066         struct statistics_block *stats_blk = bp->stats_blk;
6067         struct net_device_stats *net_stats = &bp->net_stats;
6068
6069         if (bp->stats_blk == NULL) {
6070                 return net_stats;
6071         }
6072         net_stats->rx_packets =
6073                 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
6074                 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
6075                 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
6076
6077         net_stats->tx_packets =
6078                 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
6079                 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
6080                 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
6081
6082         net_stats->rx_bytes =
6083                 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
6084
6085         net_stats->tx_bytes =
6086                 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
6087
6088         net_stats->multicast =
6089                 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
6090
6091         net_stats->collisions =
6092                 (unsigned long) stats_blk->stat_EtherStatsCollisions;
6093
6094         net_stats->rx_length_errors =
6095                 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
6096                 stats_blk->stat_EtherStatsOverrsizePkts);
6097
6098         net_stats->rx_over_errors =
6099                 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
6100
6101         net_stats->rx_frame_errors =
6102                 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
6103
6104         net_stats->rx_crc_errors =
6105                 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
6106
6107         net_stats->rx_errors = net_stats->rx_length_errors +
6108                 net_stats->rx_over_errors + net_stats->rx_frame_errors +
6109                 net_stats->rx_crc_errors;
6110
6111         net_stats->tx_aborted_errors =
6112                 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
6113                 stats_blk->stat_Dot3StatsLateCollisions);
6114
6115         if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
6116             (CHIP_ID(bp) == CHIP_ID_5708_A0))
6117                 net_stats->tx_carrier_errors = 0;
6118         else {
6119                 net_stats->tx_carrier_errors =
6120                         (unsigned long)
6121                         stats_blk->stat_Dot3StatsCarrierSenseErrors;
6122         }
6123
6124         net_stats->tx_errors =
6125                 (unsigned long)
6126                 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
6127                 +
6128                 net_stats->tx_aborted_errors +
6129                 net_stats->tx_carrier_errors;
6130
6131         net_stats->rx_missed_errors =
6132                 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
6133                 stats_blk->stat_FwRxDrop);
6134
6135         return net_stats;
6136 }
6137
6138 /* All ethtool functions called with rtnl_lock */
6139
6140 static int
6141 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6142 {
6143         struct bnx2 *bp = netdev_priv(dev);
6144         int support_serdes = 0, support_copper = 0;
6145
6146         cmd->supported = SUPPORTED_Autoneg;
6147         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6148                 support_serdes = 1;
6149                 support_copper = 1;
6150         } else if (bp->phy_port == PORT_FIBRE)
6151                 support_serdes = 1;
6152         else
6153                 support_copper = 1;
6154
6155         if (support_serdes) {
6156                 cmd->supported |= SUPPORTED_1000baseT_Full |
6157                         SUPPORTED_FIBRE;
6158                 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
6159                         cmd->supported |= SUPPORTED_2500baseX_Full;
6160
6161         }
6162         if (support_copper) {
6163                 cmd->supported |= SUPPORTED_10baseT_Half |
6164                         SUPPORTED_10baseT_Full |
6165                         SUPPORTED_100baseT_Half |
6166                         SUPPORTED_100baseT_Full |
6167                         SUPPORTED_1000baseT_Full |
6168                         SUPPORTED_TP;
6169
6170         }
6171
6172         spin_lock_bh(&bp->phy_lock);
6173         cmd->port = bp->phy_port;
6174         cmd->advertising = bp->advertising;
6175
6176         if (bp->autoneg & AUTONEG_SPEED) {
6177                 cmd->autoneg = AUTONEG_ENABLE;
6178         }
6179         else {
6180                 cmd->autoneg = AUTONEG_DISABLE;
6181         }
6182
6183         if (netif_carrier_ok(dev)) {
6184                 cmd->speed = bp->line_speed;
6185                 cmd->duplex = bp->duplex;
6186         }
6187         else {
6188                 cmd->speed = -1;
6189                 cmd->duplex = -1;
6190         }
6191         spin_unlock_bh(&bp->phy_lock);
6192
6193         cmd->transceiver = XCVR_INTERNAL;
6194         cmd->phy_address = bp->phy_addr;
6195
6196         return 0;
6197 }
6198
6199 static int
6200 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6201 {
6202         struct bnx2 *bp = netdev_priv(dev);
6203         u8 autoneg = bp->autoneg;
6204         u8 req_duplex = bp->req_duplex;
6205         u16 req_line_speed = bp->req_line_speed;
6206         u32 advertising = bp->advertising;
6207         int err = -EINVAL;
6208
6209         spin_lock_bh(&bp->phy_lock);
6210
6211         if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
6212                 goto err_out_unlock;
6213
6214         if (cmd->port != bp->phy_port &&
6215             !(bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP))
6216                 goto err_out_unlock;
6217
6218         /* If device is down, we can store the settings only if the user
6219          * is setting the currently active port.
6220          */
6221         if (!netif_running(dev) && cmd->port != bp->phy_port)
6222                 goto err_out_unlock;
6223
6224         if (cmd->autoneg == AUTONEG_ENABLE) {
6225                 autoneg |= AUTONEG_SPEED;
6226
6227                 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
6228
6229                 /* allow advertising 1 speed */
6230                 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
6231                         (cmd->advertising == ADVERTISED_10baseT_Full) ||
6232                         (cmd->advertising == ADVERTISED_100baseT_Half) ||
6233                         (cmd->advertising == ADVERTISED_100baseT_Full)) {
6234
6235                         if (cmd->port == PORT_FIBRE)
6236                                 goto err_out_unlock;
6237
6238                         advertising = cmd->advertising;
6239
6240                 } else if (cmd->advertising == ADVERTISED_2500baseX_Full) {
6241                         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ||
6242                             (cmd->port == PORT_TP))
6243                                 goto err_out_unlock;
6244                 } else if (cmd->advertising == ADVERTISED_1000baseT_Full)
6245                         advertising = cmd->advertising;
6246                 else if (cmd->advertising == ADVERTISED_1000baseT_Half)
6247                         goto err_out_unlock;
6248                 else {
6249                         if (cmd->port == PORT_FIBRE)
6250                                 advertising = ETHTOOL_ALL_FIBRE_SPEED;
6251                         else
6252                                 advertising = ETHTOOL_ALL_COPPER_SPEED;
6253                 }
6254                 advertising |= ADVERTISED_Autoneg;
6255         }
6256         else {
6257                 if (cmd->port == PORT_FIBRE) {
6258                         if ((cmd->speed != SPEED_1000 &&
6259                              cmd->speed != SPEED_2500) ||
6260                             (cmd->duplex != DUPLEX_FULL))
6261                                 goto err_out_unlock;
6262
6263                         if (cmd->speed == SPEED_2500 &&
6264                             !(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
6265                                 goto err_out_unlock;
6266                 }
6267                 else if (cmd->speed == SPEED_1000 || cmd->speed == SPEED_2500)
6268                         goto err_out_unlock;
6269
6270                 autoneg &= ~AUTONEG_SPEED;
6271                 req_line_speed = cmd->speed;
6272                 req_duplex = cmd->duplex;
6273                 advertising = 0;
6274         }
6275
6276         bp->autoneg = autoneg;
6277         bp->advertising = advertising;
6278         bp->req_line_speed = req_line_speed;
6279         bp->req_duplex = req_duplex;
6280
6281         err = 0;
6282         /* If device is down, the new settings will be picked up when it is
6283          * brought up.
6284          */
6285         if (netif_running(dev))
6286                 err = bnx2_setup_phy(bp, cmd->port);
6287
6288 err_out_unlock:
6289         spin_unlock_bh(&bp->phy_lock);
6290
6291         return err;
6292 }
6293
6294 static void
6295 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
6296 {
6297         struct bnx2 *bp = netdev_priv(dev);
6298
6299         strcpy(info->driver, DRV_MODULE_NAME);
6300         strcpy(info->version, DRV_MODULE_VERSION);
6301         strcpy(info->bus_info, pci_name(bp->pdev));
6302         strcpy(info->fw_version, bp->fw_version);
6303 }
6304
6305 #define BNX2_REGDUMP_LEN                (32 * 1024)
6306
6307 static int
6308 bnx2_get_regs_len(struct net_device *dev)
6309 {
6310         return BNX2_REGDUMP_LEN;
6311 }
6312
6313 static void
6314 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
6315 {
6316         u32 *p = _p, i, offset;
6317         u8 *orig_p = _p;
6318         struct bnx2 *bp = netdev_priv(dev);
6319         u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
6320                                  0x0800, 0x0880, 0x0c00, 0x0c10,
6321                                  0x0c30, 0x0d08, 0x1000, 0x101c,
6322                                  0x1040, 0x1048, 0x1080, 0x10a4,
6323                                  0x1400, 0x1490, 0x1498, 0x14f0,
6324                                  0x1500, 0x155c, 0x1580, 0x15dc,
6325                                  0x1600, 0x1658, 0x1680, 0x16d8,
6326                                  0x1800, 0x1820, 0x1840, 0x1854,
6327                                  0x1880, 0x1894, 0x1900, 0x1984,
6328                                  0x1c00, 0x1c0c, 0x1c40, 0x1c54,
6329                                  0x1c80, 0x1c94, 0x1d00, 0x1d84,
6330                                  0x2000, 0x2030, 0x23c0, 0x2400,
6331                                  0x2800, 0x2820, 0x2830, 0x2850,
6332                                  0x2b40, 0x2c10, 0x2fc0, 0x3058,
6333                                  0x3c00, 0x3c94, 0x4000, 0x4010,
6334                                  0x4080, 0x4090, 0x43c0, 0x4458,
6335                                  0x4c00, 0x4c18, 0x4c40, 0x4c54,
6336                                  0x4fc0, 0x5010, 0x53c0, 0x5444,
6337                                  0x5c00, 0x5c18, 0x5c80, 0x5c90,
6338                                  0x5fc0, 0x6000, 0x6400, 0x6428,
6339                                  0x6800, 0x6848, 0x684c, 0x6860,
6340                                  0x6888, 0x6910, 0x8000 };
6341
6342         regs->version = 0;
6343
6344         memset(p, 0, BNX2_REGDUMP_LEN);
6345
6346         if (!netif_running(bp->dev))
6347                 return;
6348
6349         i = 0;
6350         offset = reg_boundaries[0];
6351         p += offset;
6352         while (offset < BNX2_REGDUMP_LEN) {
6353                 *p++ = REG_RD(bp, offset);
6354                 offset += 4;
6355                 if (offset == reg_boundaries[i + 1]) {
6356                         offset = reg_boundaries[i + 2];
6357                         p = (u32 *) (orig_p + offset);
6358                         i += 2;
6359                 }
6360         }
6361 }
6362
6363 static void
6364 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6365 {
6366         struct bnx2 *bp = netdev_priv(dev);
6367
6368         if (bp->flags & BNX2_FLAG_NO_WOL) {
6369                 wol->supported = 0;
6370                 wol->wolopts = 0;
6371         }
6372         else {
6373                 wol->supported = WAKE_MAGIC;
6374                 if (bp->wol)
6375                         wol->wolopts = WAKE_MAGIC;
6376                 else
6377                         wol->wolopts = 0;
6378         }
6379         memset(&wol->sopass, 0, sizeof(wol->sopass));
6380 }
6381
6382 static int
6383 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6384 {
6385         struct bnx2 *bp = netdev_priv(dev);
6386
6387         if (wol->wolopts & ~WAKE_MAGIC)
6388                 return -EINVAL;
6389
6390         if (wol->wolopts & WAKE_MAGIC) {
6391                 if (bp->flags & BNX2_FLAG_NO_WOL)
6392                         return -EINVAL;
6393
6394                 bp->wol = 1;
6395         }
6396         else {
6397                 bp->wol = 0;
6398         }
6399         return 0;
6400 }
6401
6402 static int
6403 bnx2_nway_reset(struct net_device *dev)
6404 {
6405         struct bnx2 *bp = netdev_priv(dev);
6406         u32 bmcr;
6407
6408         if (!(bp->autoneg & AUTONEG_SPEED)) {
6409                 return -EINVAL;
6410         }
6411
6412         spin_lock_bh(&bp->phy_lock);
6413
6414         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6415                 int rc;
6416
6417                 rc = bnx2_setup_remote_phy(bp, bp->phy_port);
6418                 spin_unlock_bh(&bp->phy_lock);
6419                 return rc;
6420         }
6421
6422         /* Force a link down visible on the other side */
6423         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
6424                 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
6425                 spin_unlock_bh(&bp->phy_lock);
6426
6427                 msleep(20);
6428
6429                 spin_lock_bh(&bp->phy_lock);
6430
6431                 bp->current_interval = SERDES_AN_TIMEOUT;
6432                 bp->serdes_an_pending = 1;
6433                 mod_timer(&bp->timer, jiffies + bp->current_interval);
6434         }
6435
6436         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6437         bmcr &= ~BMCR_LOOPBACK;
6438         bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
6439
6440         spin_unlock_bh(&bp->phy_lock);
6441
6442         return 0;
6443 }
6444
6445 static int
6446 bnx2_get_eeprom_len(struct net_device *dev)
6447 {
6448         struct bnx2 *bp = netdev_priv(dev);
6449
6450         if (bp->flash_info == NULL)
6451                 return 0;
6452
6453         return (int) bp->flash_size;
6454 }
6455
6456 static int
6457 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6458                 u8 *eebuf)
6459 {
6460         struct bnx2 *bp = netdev_priv(dev);
6461         int rc;
6462
6463         /* parameters already validated in ethtool_get_eeprom */
6464
6465         rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
6466
6467         return rc;
6468 }
6469
6470 static int
6471 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6472                 u8 *eebuf)
6473 {
6474         struct bnx2 *bp = netdev_priv(dev);
6475         int rc;
6476
6477         /* parameters already validated in ethtool_set_eeprom */
6478
6479         rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
6480
6481         return rc;
6482 }
6483
6484 static int
6485 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
6486 {
6487         struct bnx2 *bp = netdev_priv(dev);
6488
6489         memset(coal, 0, sizeof(struct ethtool_coalesce));
6490
6491         coal->rx_coalesce_usecs = bp->rx_ticks;
6492         coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
6493         coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
6494         coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
6495
6496         coal->tx_coalesce_usecs = bp->tx_ticks;
6497         coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
6498         coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
6499         coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
6500
6501         coal->stats_block_coalesce_usecs = bp->stats_ticks;
6502
6503         return 0;
6504 }
6505
6506 static int
6507 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
6508 {
6509         struct bnx2 *bp = netdev_priv(dev);
6510
6511         bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
6512         if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
6513
6514         bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
6515         if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
6516
6517         bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
6518         if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
6519
6520         bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
6521         if (bp->rx_quick_cons_trip_int > 0xff)
6522                 bp->rx_quick_cons_trip_int = 0xff;
6523
6524         bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
6525         if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
6526
6527         bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
6528         if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
6529
6530         bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
6531         if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
6532
6533         bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
6534         if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
6535                 0xff;
6536
6537         bp->stats_ticks = coal->stats_block_coalesce_usecs;
6538         if (CHIP_NUM(bp) == CHIP_NUM_5708) {
6539                 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
6540                         bp->stats_ticks = USEC_PER_SEC;
6541         }
6542         if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
6543                 bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
6544         bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
6545
6546         if (netif_running(bp->dev)) {
6547                 bnx2_netif_stop(bp);
6548                 bnx2_init_nic(bp, 0);
6549                 bnx2_netif_start(bp);
6550         }
6551
6552         return 0;
6553 }
6554
6555 static void
6556 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6557 {
6558         struct bnx2 *bp = netdev_priv(dev);
6559
6560         ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
6561         ering->rx_mini_max_pending = 0;
6562         ering->rx_jumbo_max_pending = MAX_TOTAL_RX_PG_DESC_CNT;
6563
6564         ering->rx_pending = bp->rx_ring_size;
6565         ering->rx_mini_pending = 0;
6566         ering->rx_jumbo_pending = bp->rx_pg_ring_size;
6567
6568         ering->tx_max_pending = MAX_TX_DESC_CNT;
6569         ering->tx_pending = bp->tx_ring_size;
6570 }
6571
6572 static int
6573 bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
6574 {
6575         if (netif_running(bp->dev)) {
6576                 bnx2_netif_stop(bp);
6577                 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
6578                 bnx2_free_skbs(bp);
6579                 bnx2_free_mem(bp);
6580         }
6581
6582         bnx2_set_rx_ring_size(bp, rx);
6583         bp->tx_ring_size = tx;
6584
6585         if (netif_running(bp->dev)) {
6586                 int rc;
6587
6588                 rc = bnx2_alloc_mem(bp);
6589                 if (rc)
6590                         return rc;
6591                 bnx2_init_nic(bp, 0);
6592                 bnx2_netif_start(bp);
6593         }
6594         return 0;
6595 }
6596
6597 static int
6598 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6599 {
6600         struct bnx2 *bp = netdev_priv(dev);
6601         int rc;
6602
6603         if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
6604                 (ering->tx_pending > MAX_TX_DESC_CNT) ||
6605                 (ering->tx_pending <= MAX_SKB_FRAGS)) {
6606
6607                 return -EINVAL;
6608         }
6609         rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending);
6610         return rc;
6611 }
6612
6613 static void
6614 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6615 {
6616         struct bnx2 *bp = netdev_priv(dev);
6617
6618         epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
6619         epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
6620         epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
6621 }
6622
6623 static int
6624 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6625 {
6626         struct bnx2 *bp = netdev_priv(dev);
6627
6628         bp->req_flow_ctrl = 0;
6629         if (epause->rx_pause)
6630                 bp->req_flow_ctrl |= FLOW_CTRL_RX;
6631         if (epause->tx_pause)
6632                 bp->req_flow_ctrl |= FLOW_CTRL_TX;
6633
6634         if (epause->autoneg) {
6635                 bp->autoneg |= AUTONEG_FLOW_CTRL;
6636         }
6637         else {
6638                 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
6639         }
6640
6641         spin_lock_bh(&bp->phy_lock);
6642
6643         bnx2_setup_phy(bp, bp->phy_port);
6644
6645         spin_unlock_bh(&bp->phy_lock);
6646
6647         return 0;
6648 }
6649
6650 static u32
6651 bnx2_get_rx_csum(struct net_device *dev)
6652 {
6653         struct bnx2 *bp = netdev_priv(dev);
6654
6655         return bp->rx_csum;
6656 }
6657
6658 static int
6659 bnx2_set_rx_csum(struct net_device *dev, u32 data)
6660 {
6661         struct bnx2 *bp = netdev_priv(dev);
6662
6663         bp->rx_csum = data;
6664         return 0;
6665 }
6666
6667 static int
6668 bnx2_set_tso(struct net_device *dev, u32 data)
6669 {
6670         struct bnx2 *bp = netdev_priv(dev);
6671
6672         if (data) {
6673                 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
6674                 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6675                         dev->features |= NETIF_F_TSO6;
6676         } else
6677                 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
6678                                    NETIF_F_TSO_ECN);
6679         return 0;
6680 }
6681
6682 #define BNX2_NUM_STATS 46
6683
6684 static struct {
6685         char string[ETH_GSTRING_LEN];
6686 } bnx2_stats_str_arr[BNX2_NUM_STATS] = {
6687         { "rx_bytes" },
6688         { "rx_error_bytes" },
6689         { "tx_bytes" },
6690         { "tx_error_bytes" },
6691         { "rx_ucast_packets" },
6692         { "rx_mcast_packets" },
6693         { "rx_bcast_packets" },
6694         { "tx_ucast_packets" },
6695         { "tx_mcast_packets" },
6696         { "tx_bcast_packets" },
6697         { "tx_mac_errors" },
6698         { "tx_carrier_errors" },
6699         { "rx_crc_errors" },
6700         { "rx_align_errors" },
6701         { "tx_single_collisions" },
6702         { "tx_multi_collisions" },
6703         { "tx_deferred" },
6704         { "tx_excess_collisions" },
6705         { "tx_late_collisions" },
6706         { "tx_total_collisions" },
6707         { "rx_fragments" },
6708         { "rx_jabbers" },
6709         { "rx_undersize_packets" },
6710         { "rx_oversize_packets" },
6711         { "rx_64_byte_packets" },
6712         { "rx_65_to_127_byte_packets" },
6713         { "rx_128_to_255_byte_packets" },
6714         { "rx_256_to_511_byte_packets" },
6715         { "rx_512_to_1023_byte_packets" },
6716         { "rx_1024_to_1522_byte_packets" },
6717         { "rx_1523_to_9022_byte_packets" },
6718         { "tx_64_byte_packets" },
6719         { "tx_65_to_127_byte_packets" },
6720         { "tx_128_to_255_byte_packets" },
6721         { "tx_256_to_511_byte_packets" },
6722         { "tx_512_to_1023_byte_packets" },
6723         { "tx_1024_to_1522_byte_packets" },
6724         { "tx_1523_to_9022_byte_packets" },
6725         { "rx_xon_frames" },
6726         { "rx_xoff_frames" },
6727         { "tx_xon_frames" },
6728         { "tx_xoff_frames" },
6729         { "rx_mac_ctrl_frames" },
6730         { "rx_filtered_packets" },
6731         { "rx_discards" },
6732         { "rx_fw_discards" },
6733 };
6734
6735 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
6736
6737 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
6738     STATS_OFFSET32(stat_IfHCInOctets_hi),
6739     STATS_OFFSET32(stat_IfHCInBadOctets_hi),
6740     STATS_OFFSET32(stat_IfHCOutOctets_hi),
6741     STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
6742     STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
6743     STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
6744     STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
6745     STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
6746     STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
6747     STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
6748     STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
6749     STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
6750     STATS_OFFSET32(stat_Dot3StatsFCSErrors),
6751     STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
6752     STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
6753     STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
6754     STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
6755     STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
6756     STATS_OFFSET32(stat_Dot3StatsLateCollisions),
6757     STATS_OFFSET32(stat_EtherStatsCollisions),
6758     STATS_OFFSET32(stat_EtherStatsFragments),
6759     STATS_OFFSET32(stat_EtherStatsJabbers),
6760     STATS_OFFSET32(stat_EtherStatsUndersizePkts),
6761     STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
6762     STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
6763     STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
6764     STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
6765     STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
6766     STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
6767     STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
6768     STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
6769     STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
6770     STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
6771     STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
6772     STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
6773     STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
6774     STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
6775     STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
6776     STATS_OFFSET32(stat_XonPauseFramesReceived),
6777     STATS_OFFSET32(stat_XoffPauseFramesReceived),
6778     STATS_OFFSET32(stat_OutXonSent),
6779     STATS_OFFSET32(stat_OutXoffSent),
6780     STATS_OFFSET32(stat_MacControlFramesReceived),
6781     STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
6782     STATS_OFFSET32(stat_IfInMBUFDiscards),
6783     STATS_OFFSET32(stat_FwRxDrop),
6784 };
6785
6786 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
6787  * skipped because of errata.
6788  */
6789 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
6790         8,0,8,8,8,8,8,8,8,8,
6791         4,0,4,4,4,4,4,4,4,4,
6792         4,4,4,4,4,4,4,4,4,4,
6793         4,4,4,4,4,4,4,4,4,4,
6794         4,4,4,4,4,4,
6795 };
6796
6797 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
6798         8,0,8,8,8,8,8,8,8,8,
6799         4,4,4,4,4,4,4,4,4,4,
6800         4,4,4,4,4,4,4,4,4,4,
6801         4,4,4,4,4,4,4,4,4,4,
6802         4,4,4,4,4,4,
6803 };
6804
6805 #define BNX2_NUM_TESTS 6
6806
6807 static struct {
6808         char string[ETH_GSTRING_LEN];
6809 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
6810         { "register_test (offline)" },
6811         { "memory_test (offline)" },
6812         { "loopback_test (offline)" },
6813         { "nvram_test (online)" },
6814         { "interrupt_test (online)" },
6815         { "link_test (online)" },
6816 };
6817
6818 static int
6819 bnx2_get_sset_count(struct net_device *dev, int sset)
6820 {
6821         switch (sset) {
6822         case ETH_SS_TEST:
6823                 return BNX2_NUM_TESTS;
6824         case ETH_SS_STATS:
6825                 return BNX2_NUM_STATS;
6826         default:
6827                 return -EOPNOTSUPP;
6828         }
6829 }
6830
6831 static void
6832 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
6833 {
6834         struct bnx2 *bp = netdev_priv(dev);
6835
6836         memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
6837         if (etest->flags & ETH_TEST_FL_OFFLINE) {
6838                 int i;
6839
6840                 bnx2_netif_stop(bp);
6841                 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
6842                 bnx2_free_skbs(bp);
6843
6844                 if (bnx2_test_registers(bp) != 0) {
6845                         buf[0] = 1;
6846                         etest->flags |= ETH_TEST_FL_FAILED;
6847                 }
6848                 if (bnx2_test_memory(bp) != 0) {
6849                         buf[1] = 1;
6850                         etest->flags |= ETH_TEST_FL_FAILED;
6851                 }
6852                 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
6853                         etest->flags |= ETH_TEST_FL_FAILED;
6854
6855                 if (!netif_running(bp->dev)) {
6856                         bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
6857                 }
6858                 else {
6859                         bnx2_init_nic(bp, 1);
6860                         bnx2_netif_start(bp);
6861                 }
6862
6863                 /* wait for link up */
6864                 for (i = 0; i < 7; i++) {
6865                         if (bp->link_up)
6866                                 break;
6867                         msleep_interruptible(1000);
6868                 }
6869         }
6870
6871         if (bnx2_test_nvram(bp) != 0) {
6872                 buf[3] = 1;
6873                 etest->flags |= ETH_TEST_FL_FAILED;
6874         }
6875         if (bnx2_test_intr(bp) != 0) {
6876                 buf[4] = 1;
6877                 etest->flags |= ETH_TEST_FL_FAILED;
6878         }
6879
6880         if (bnx2_test_link(bp) != 0) {
6881                 buf[5] = 1;
6882                 etest->flags |= ETH_TEST_FL_FAILED;
6883
6884         }
6885 }
6886
6887 static void
6888 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
6889 {
6890         switch (stringset) {
6891         case ETH_SS_STATS:
6892                 memcpy(buf, bnx2_stats_str_arr,
6893                         sizeof(bnx2_stats_str_arr));
6894                 break;
6895         case ETH_SS_TEST:
6896                 memcpy(buf, bnx2_tests_str_arr,
6897                         sizeof(bnx2_tests_str_arr));
6898                 break;
6899         }
6900 }
6901
6902 static void
6903 bnx2_get_ethtool_stats(struct net_device *dev,
6904                 struct ethtool_stats *stats, u64 *buf)
6905 {
6906         struct bnx2 *bp = netdev_priv(dev);
6907         int i;
6908         u32 *hw_stats = (u32 *) bp->stats_blk;
6909         u8 *stats_len_arr = NULL;
6910
6911         if (hw_stats == NULL) {
6912                 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
6913                 return;
6914         }
6915
6916         if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
6917             (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
6918             (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
6919             (CHIP_ID(bp) == CHIP_ID_5708_A0))
6920                 stats_len_arr = bnx2_5706_stats_len_arr;
6921         else
6922                 stats_len_arr = bnx2_5708_stats_len_arr;
6923
6924         for (i = 0; i < BNX2_NUM_STATS; i++) {
6925                 if (stats_len_arr[i] == 0) {
6926                         /* skip this counter */
6927                         buf[i] = 0;
6928                         continue;
6929                 }
6930                 if (stats_len_arr[i] == 4) {
6931                         /* 4-byte counter */
6932                         buf[i] = (u64)
6933                                 *(hw_stats + bnx2_stats_offset_arr[i]);
6934                         continue;
6935                 }
6936                 /* 8-byte counter */
6937                 buf[i] = (((u64) *(hw_stats +
6938                                         bnx2_stats_offset_arr[i])) << 32) +
6939                                 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
6940         }
6941 }
6942
6943 static int
6944 bnx2_phys_id(struct net_device *dev, u32 data)
6945 {
6946         struct bnx2 *bp = netdev_priv(dev);
6947         int i;
6948         u32 save;
6949
6950         if (data == 0)
6951                 data = 2;
6952
6953         save = REG_RD(bp, BNX2_MISC_CFG);
6954         REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
6955
6956         for (i = 0; i < (data * 2); i++) {
6957                 if ((i % 2) == 0) {
6958                         REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
6959                 }
6960                 else {
6961                         REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
6962                                 BNX2_EMAC_LED_1000MB_OVERRIDE |
6963                                 BNX2_EMAC_LED_100MB_OVERRIDE |
6964                                 BNX2_EMAC_LED_10MB_OVERRIDE |
6965                                 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
6966                                 BNX2_EMAC_LED_TRAFFIC);
6967                 }
6968                 msleep_interruptible(500);
6969                 if (signal_pending(current))
6970                         break;
6971         }
6972         REG_WR(bp, BNX2_EMAC_LED, 0);
6973         REG_WR(bp, BNX2_MISC_CFG, save);
6974         return 0;
6975 }
6976
6977 static int
6978 bnx2_set_tx_csum(struct net_device *dev, u32 data)
6979 {
6980         struct bnx2 *bp = netdev_priv(dev);
6981
6982         if (CHIP_NUM(bp) == CHIP_NUM_5709)
6983                 return (ethtool_op_set_tx_ipv6_csum(dev, data));
6984         else
6985                 return (ethtool_op_set_tx_csum(dev, data));
6986 }
6987
6988 static const struct ethtool_ops bnx2_ethtool_ops = {
6989         .get_settings           = bnx2_get_settings,
6990         .set_settings           = bnx2_set_settings,
6991         .get_drvinfo            = bnx2_get_drvinfo,
6992         .get_regs_len           = bnx2_get_regs_len,
6993         .get_regs               = bnx2_get_regs,
6994         .get_wol                = bnx2_get_wol,
6995         .set_wol                = bnx2_set_wol,
6996         .nway_reset             = bnx2_nway_reset,
6997         .get_link               = ethtool_op_get_link,
6998         .get_eeprom_len         = bnx2_get_eeprom_len,
6999         .get_eeprom             = bnx2_get_eeprom,
7000         .set_eeprom             = bnx2_set_eeprom,
7001         .get_coalesce           = bnx2_get_coalesce,
7002         .set_coalesce           = bnx2_set_coalesce,
7003         .get_ringparam          = bnx2_get_ringparam,
7004         .set_ringparam          = bnx2_set_ringparam,
7005         .get_pauseparam         = bnx2_get_pauseparam,
7006         .set_pauseparam         = bnx2_set_pauseparam,
7007         .get_rx_csum            = bnx2_get_rx_csum,
7008         .set_rx_csum            = bnx2_set_rx_csum,
7009         .set_tx_csum            = bnx2_set_tx_csum,
7010         .set_sg                 = ethtool_op_set_sg,
7011         .set_tso                = bnx2_set_tso,
7012         .self_test              = bnx2_self_test,
7013         .get_strings            = bnx2_get_strings,
7014         .phys_id                = bnx2_phys_id,
7015         .get_ethtool_stats      = bnx2_get_ethtool_stats,
7016         .get_sset_count         = bnx2_get_sset_count,
7017 };
7018
7019 /* Called with rtnl_lock */
7020 static int
7021 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7022 {
7023         struct mii_ioctl_data *data = if_mii(ifr);
7024         struct bnx2 *bp = netdev_priv(dev);
7025         int err;
7026
7027         switch(cmd) {
7028         case SIOCGMIIPHY:
7029                 data->phy_id = bp->phy_addr;
7030
7031                 /* fallthru */
7032         case SIOCGMIIREG: {
7033                 u32 mii_regval;
7034
7035                 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7036                         return -EOPNOTSUPP;
7037
7038                 if (!netif_running(dev))
7039                         return -EAGAIN;
7040
7041                 spin_lock_bh(&bp->phy_lock);
7042                 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
7043                 spin_unlock_bh(&bp->phy_lock);
7044
7045                 data->val_out = mii_regval;
7046
7047                 return err;
7048         }
7049
7050         case SIOCSMIIREG:
7051                 if (!capable(CAP_NET_ADMIN))
7052                         return -EPERM;
7053
7054                 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7055                         return -EOPNOTSUPP;
7056
7057                 if (!netif_running(dev))
7058                         return -EAGAIN;
7059
7060                 spin_lock_bh(&bp->phy_lock);
7061                 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
7062                 spin_unlock_bh(&bp->phy_lock);
7063
7064                 return err;
7065
7066         default:
7067                 /* do nothing */
7068                 break;
7069         }
7070         return -EOPNOTSUPP;
7071 }
7072
7073 /* Called with rtnl_lock */
7074 static int
7075 bnx2_change_mac_addr(struct net_device *dev, void *p)
7076 {
7077         struct sockaddr *addr = p;
7078         struct bnx2 *bp = netdev_priv(dev);
7079
7080         if (!is_valid_ether_addr(addr->sa_data))
7081                 return -EINVAL;
7082
7083         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7084         if (netif_running(dev))
7085                 bnx2_set_mac_addr(bp);
7086
7087         return 0;
7088 }
7089
7090 /* Called with rtnl_lock */
7091 static int
7092 bnx2_change_mtu(struct net_device *dev, int new_mtu)
7093 {
7094         struct bnx2 *bp = netdev_priv(dev);
7095
7096         if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
7097                 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
7098                 return -EINVAL;
7099
7100         dev->mtu = new_mtu;
7101         return (bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size));
7102 }
7103
7104 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
7105 static void
7106 poll_bnx2(struct net_device *dev)
7107 {
7108         struct bnx2 *bp = netdev_priv(dev);
7109
7110         disable_irq(bp->pdev->irq);
7111         bnx2_interrupt(bp->pdev->irq, dev);
7112         enable_irq(bp->pdev->irq);
7113 }
7114 #endif
7115
7116 static void __devinit
7117 bnx2_get_5709_media(struct bnx2 *bp)
7118 {
7119         u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
7120         u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
7121         u32 strap;
7122
7123         if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
7124                 return;
7125         else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
7126                 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7127                 return;
7128         }
7129
7130         if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
7131                 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
7132         else
7133                 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
7134
7135         if (PCI_FUNC(bp->pdev->devfn) == 0) {
7136                 switch (strap) {
7137                 case 0x4:
7138                 case 0x5:
7139                 case 0x6:
7140                         bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7141                         return;
7142                 }
7143         } else {
7144                 switch (strap) {
7145                 case 0x1:
7146                 case 0x2:
7147                 case 0x4:
7148                         bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7149                         return;
7150                 }
7151         }
7152 }
7153
7154 static void __devinit
7155 bnx2_get_pci_speed(struct bnx2 *bp)
7156 {
7157         u32 reg;
7158
7159         reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
7160         if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
7161                 u32 clkreg;
7162
7163                 bp->flags |= BNX2_FLAG_PCIX;
7164
7165                 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
7166
7167                 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
7168                 switch (clkreg) {
7169                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
7170                         bp->bus_speed_mhz = 133;
7171                         break;
7172
7173                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
7174                         bp->bus_speed_mhz = 100;
7175                         break;
7176
7177                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
7178                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
7179                         bp->bus_speed_mhz = 66;
7180                         break;
7181
7182                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
7183                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
7184                         bp->bus_speed_mhz = 50;
7185                         break;
7186
7187                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
7188                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
7189                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
7190                         bp->bus_speed_mhz = 33;
7191                         break;
7192                 }
7193         }
7194         else {
7195                 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
7196                         bp->bus_speed_mhz = 66;
7197                 else
7198                         bp->bus_speed_mhz = 33;
7199         }
7200
7201         if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
7202                 bp->flags |= BNX2_FLAG_PCI_32BIT;
7203
7204 }
7205
7206 static int __devinit
7207 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
7208 {
7209         struct bnx2 *bp;
7210         unsigned long mem_len;
7211         int rc, i, j;
7212         u32 reg;
7213         u64 dma_mask, persist_dma_mask;
7214
7215         SET_NETDEV_DEV(dev, &pdev->dev);
7216         bp = netdev_priv(dev);
7217
7218         bp->flags = 0;
7219         bp->phy_flags = 0;
7220
7221         /* enable device (incl. PCI PM wakeup), and bus-mastering */
7222         rc = pci_enable_device(pdev);
7223         if (rc) {
7224                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.\n");
7225                 goto err_out;
7226         }
7227
7228         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
7229                 dev_err(&pdev->dev,
7230                         "Cannot find PCI device base address, aborting.\n");
7231                 rc = -ENODEV;
7232                 goto err_out_disable;
7233         }
7234
7235         rc = pci_request_regions(pdev, DRV_MODULE_NAME);
7236         if (rc) {
7237                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
7238                 goto err_out_disable;
7239         }
7240
7241         pci_set_master(pdev);
7242         pci_save_state(pdev);
7243
7244         bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
7245         if (bp->pm_cap == 0) {
7246                 dev_err(&pdev->dev,
7247                         "Cannot find power management capability, aborting.\n");
7248                 rc = -EIO;
7249                 goto err_out_release;
7250         }
7251
7252         bp->dev = dev;
7253         bp->pdev = pdev;
7254
7255         spin_lock_init(&bp->phy_lock);
7256         spin_lock_init(&bp->indirect_lock);
7257         INIT_WORK(&bp->reset_task, bnx2_reset_task);
7258
7259         dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
7260         mem_len = MB_GET_CID_ADDR(TX_TSS_CID + 1);
7261         dev->mem_end = dev->mem_start + mem_len;
7262         dev->irq = pdev->irq;
7263
7264         bp->regview = ioremap_nocache(dev->base_addr, mem_len);
7265
7266         if (!bp->regview) {
7267                 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
7268                 rc = -ENOMEM;
7269                 goto err_out_release;
7270         }
7271
7272         /* Configure byte swap and enable write to the reg_window registers.
7273          * Rely on CPU to do target byte swapping on big endian systems
7274          * The chip's target access swapping will not swap all accesses
7275          */
7276         pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
7277                                BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
7278                                BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
7279
7280         bnx2_set_power_state(bp, PCI_D0);
7281
7282         bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
7283
7284         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
7285                 if (pci_find_capability(pdev, PCI_CAP_ID_EXP) == 0) {
7286                         dev_err(&pdev->dev,
7287                                 "Cannot find PCIE capability, aborting.\n");
7288                         rc = -EIO;
7289                         goto err_out_unmap;
7290                 }
7291                 bp->flags |= BNX2_FLAG_PCIE;
7292                 if (CHIP_REV(bp) == CHIP_REV_Ax)
7293                         bp->flags |= BNX2_FLAG_JUMBO_BROKEN;
7294         } else {
7295                 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
7296                 if (bp->pcix_cap == 0) {
7297                         dev_err(&pdev->dev,
7298                                 "Cannot find PCIX capability, aborting.\n");
7299                         rc = -EIO;
7300                         goto err_out_unmap;
7301                 }
7302         }
7303
7304         if (CHIP_NUM(bp) == CHIP_NUM_5709 && CHIP_REV(bp) != CHIP_REV_Ax) {
7305                 if (pci_find_capability(pdev, PCI_CAP_ID_MSIX))
7306                         bp->flags |= BNX2_FLAG_MSIX_CAP;
7307         }
7308
7309         if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) {
7310                 if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
7311                         bp->flags |= BNX2_FLAG_MSI_CAP;
7312         }
7313
7314         /* 5708 cannot support DMA addresses > 40-bit.  */
7315         if (CHIP_NUM(bp) == CHIP_NUM_5708)
7316                 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
7317         else
7318                 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
7319
7320         /* Configure DMA attributes. */
7321         if (pci_set_dma_mask(pdev, dma_mask) == 0) {
7322                 dev->features |= NETIF_F_HIGHDMA;
7323                 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
7324                 if (rc) {
7325                         dev_err(&pdev->dev,
7326                                 "pci_set_consistent_dma_mask failed, aborting.\n");
7327                         goto err_out_unmap;
7328                 }
7329         } else if ((rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
7330                 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
7331                 goto err_out_unmap;
7332         }
7333
7334         if (!(bp->flags & BNX2_FLAG_PCIE))
7335                 bnx2_get_pci_speed(bp);
7336
7337         /* 5706A0 may falsely detect SERR and PERR. */
7338         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
7339                 reg = REG_RD(bp, PCI_COMMAND);
7340                 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
7341                 REG_WR(bp, PCI_COMMAND, reg);
7342         }
7343         else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
7344                 !(bp->flags & BNX2_FLAG_PCIX)) {
7345
7346                 dev_err(&pdev->dev,
7347                         "5706 A1 can only be used in a PCIX bus, aborting.\n");
7348                 goto err_out_unmap;
7349         }
7350
7351         bnx2_init_nvram(bp);
7352
7353         reg = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_SIGNATURE);
7354
7355         if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
7356             BNX2_SHM_HDR_SIGNATURE_SIG) {
7357                 u32 off = PCI_FUNC(pdev->devfn) << 2;
7358
7359                 bp->shmem_base = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_ADDR_0 + off);
7360         } else
7361                 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
7362
7363         /* Get the permanent MAC address.  First we need to make sure the
7364          * firmware is actually running.
7365          */
7366         reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE);
7367
7368         if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
7369             BNX2_DEV_INFO_SIGNATURE_MAGIC) {
7370                 dev_err(&pdev->dev, "Firmware not running, aborting.\n");
7371                 rc = -ENODEV;
7372                 goto err_out_unmap;
7373         }
7374
7375         reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_BC_REV);
7376         for (i = 0, j = 0; i < 3; i++) {
7377                 u8 num, k, skip0;
7378
7379                 num = (u8) (reg >> (24 - (i * 8)));
7380                 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
7381                         if (num >= k || !skip0 || k == 1) {
7382                                 bp->fw_version[j++] = (num / k) + '0';
7383                                 skip0 = 0;
7384                         }
7385                 }
7386                 if (i != 2)
7387                         bp->fw_version[j++] = '.';
7388         }
7389         reg = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
7390         if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
7391                 bp->wol = 1;
7392
7393         if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
7394                 bp->flags |= BNX2_FLAG_ASF_ENABLE;
7395
7396                 for (i = 0; i < 30; i++) {
7397                         reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
7398                         if (reg & BNX2_CONDITION_MFW_RUN_MASK)
7399                                 break;
7400                         msleep(10);
7401                 }
7402         }
7403         reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
7404         reg &= BNX2_CONDITION_MFW_RUN_MASK;
7405         if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
7406             reg != BNX2_CONDITION_MFW_RUN_NONE) {
7407                 int i;
7408                 u32 addr = bnx2_shmem_rd(bp, BNX2_MFW_VER_PTR);
7409
7410                 bp->fw_version[j++] = ' ';
7411                 for (i = 0; i < 3; i++) {
7412                         reg = bnx2_reg_rd_ind(bp, addr + i * 4);
7413                         reg = swab32(reg);
7414                         memcpy(&bp->fw_version[j], &reg, 4);
7415                         j += 4;
7416                 }
7417         }
7418
7419         reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_UPPER);
7420         bp->mac_addr[0] = (u8) (reg >> 8);
7421         bp->mac_addr[1] = (u8) reg;
7422
7423         reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_LOWER);
7424         bp->mac_addr[2] = (u8) (reg >> 24);
7425         bp->mac_addr[3] = (u8) (reg >> 16);
7426         bp->mac_addr[4] = (u8) (reg >> 8);
7427         bp->mac_addr[5] = (u8) reg;
7428
7429         bp->tx_ring_size = MAX_TX_DESC_CNT;
7430         bnx2_set_rx_ring_size(bp, 255);
7431
7432         bp->rx_csum = 1;
7433
7434         bp->tx_quick_cons_trip_int = 20;
7435         bp->tx_quick_cons_trip = 20;
7436         bp->tx_ticks_int = 80;
7437         bp->tx_ticks = 80;
7438
7439         bp->rx_quick_cons_trip_int = 6;
7440         bp->rx_quick_cons_trip = 6;
7441         bp->rx_ticks_int = 18;
7442         bp->rx_ticks = 18;
7443
7444         bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7445
7446         bp->timer_interval =  HZ;
7447         bp->current_interval =  HZ;
7448
7449         bp->phy_addr = 1;
7450
7451         /* Disable WOL support if we are running on a SERDES chip. */
7452         if (CHIP_NUM(bp) == CHIP_NUM_5709)
7453                 bnx2_get_5709_media(bp);
7454         else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
7455                 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7456
7457         bp->phy_port = PORT_TP;
7458         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
7459                 bp->phy_port = PORT_FIBRE;
7460                 reg = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
7461                 if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
7462                         bp->flags |= BNX2_FLAG_NO_WOL;
7463                         bp->wol = 0;
7464                 }
7465                 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
7466                         /* Don't do parallel detect on this board because of
7467                          * some board problems.  The link will not go down
7468                          * if we do parallel detect.
7469                          */
7470                         if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
7471                             pdev->subsystem_device == 0x310c)
7472                                 bp->phy_flags |= BNX2_PHY_FLAG_NO_PARALLEL;
7473                 } else {
7474                         bp->phy_addr = 2;
7475                         if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
7476                                 bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE;
7477                 }
7478                 bnx2_init_remote_phy(bp);
7479
7480         } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
7481                    CHIP_NUM(bp) == CHIP_NUM_5708)
7482                 bp->phy_flags |= BNX2_PHY_FLAG_CRC_FIX;
7483         else if (CHIP_NUM(bp) == CHIP_NUM_5709 &&
7484                  (CHIP_REV(bp) == CHIP_REV_Ax ||
7485                   CHIP_REV(bp) == CHIP_REV_Bx))
7486                 bp->phy_flags |= BNX2_PHY_FLAG_DIS_EARLY_DAC;
7487
7488         if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
7489             (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
7490             (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
7491                 bp->flags |= BNX2_FLAG_NO_WOL;
7492                 bp->wol = 0;
7493         }
7494
7495         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
7496                 bp->tx_quick_cons_trip_int =
7497                         bp->tx_quick_cons_trip;
7498                 bp->tx_ticks_int = bp->tx_ticks;
7499                 bp->rx_quick_cons_trip_int =
7500                         bp->rx_quick_cons_trip;
7501                 bp->rx_ticks_int = bp->rx_ticks;
7502                 bp->comp_prod_trip_int = bp->comp_prod_trip;
7503                 bp->com_ticks_int = bp->com_ticks;
7504                 bp->cmd_ticks_int = bp->cmd_ticks;
7505         }
7506
7507         /* Disable MSI on 5706 if AMD 8132 bridge is found.
7508          *
7509          * MSI is defined to be 32-bit write.  The 5706 does 64-bit MSI writes
7510          * with byte enables disabled on the unused 32-bit word.  This is legal
7511          * but causes problems on the AMD 8132 which will eventually stop
7512          * responding after a while.
7513          *
7514          * AMD believes this incompatibility is unique to the 5706, and
7515          * prefers to locally disable MSI rather than globally disabling it.
7516          */
7517         if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
7518                 struct pci_dev *amd_8132 = NULL;
7519
7520                 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
7521                                                   PCI_DEVICE_ID_AMD_8132_BRIDGE,
7522                                                   amd_8132))) {
7523
7524                         if (amd_8132->revision >= 0x10 &&
7525                             amd_8132->revision <= 0x13) {
7526                                 disable_msi = 1;
7527                                 pci_dev_put(amd_8132);
7528                                 break;
7529                         }
7530                 }
7531         }
7532
7533         bnx2_set_default_link(bp);
7534         bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
7535
7536         init_timer(&bp->timer);
7537         bp->timer.expires = RUN_AT(bp->timer_interval);
7538         bp->timer.data = (unsigned long) bp;
7539         bp->timer.function = bnx2_timer;
7540
7541         return 0;
7542
7543 err_out_unmap:
7544         if (bp->regview) {
7545                 iounmap(bp->regview);
7546                 bp->regview = NULL;
7547         }
7548
7549 err_out_release:
7550         pci_release_regions(pdev);
7551
7552 err_out_disable:
7553         pci_disable_device(pdev);
7554         pci_set_drvdata(pdev, NULL);
7555
7556 err_out:
7557         return rc;
7558 }
7559
7560 static char * __devinit
7561 bnx2_bus_string(struct bnx2 *bp, char *str)
7562 {
7563         char *s = str;
7564
7565         if (bp->flags & BNX2_FLAG_PCIE) {
7566                 s += sprintf(s, "PCI Express");
7567         } else {
7568                 s += sprintf(s, "PCI");
7569                 if (bp->flags & BNX2_FLAG_PCIX)
7570                         s += sprintf(s, "-X");
7571                 if (bp->flags & BNX2_FLAG_PCI_32BIT)
7572                         s += sprintf(s, " 32-bit");
7573                 else
7574                         s += sprintf(s, " 64-bit");
7575                 s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
7576         }
7577         return str;
7578 }
7579
7580 static void __devinit
7581 bnx2_init_napi(struct bnx2 *bp)
7582 {
7583         int i;
7584
7585         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
7586                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
7587                 int (*poll)(struct napi_struct *, int);
7588
7589                 if (i == 0)
7590                         poll = bnx2_poll;
7591                 else
7592                         poll = bnx2_poll_msix;
7593
7594                 netif_napi_add(bp->dev, &bp->bnx2_napi[i].napi, poll, 64);
7595                 bnapi->bp = bp;
7596         }
7597 }
7598
7599 static int __devinit
7600 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
7601 {
7602         static int version_printed = 0;
7603         struct net_device *dev = NULL;
7604         struct bnx2 *bp;
7605         int rc;
7606         char str[40];
7607         DECLARE_MAC_BUF(mac);
7608
7609         if (version_printed++ == 0)
7610                 printk(KERN_INFO "%s", version);
7611
7612         /* dev zeroed in init_etherdev */
7613         dev = alloc_etherdev(sizeof(*bp));
7614
7615         if (!dev)
7616                 return -ENOMEM;
7617
7618         rc = bnx2_init_board(pdev, dev);
7619         if (rc < 0) {
7620                 free_netdev(dev);
7621                 return rc;
7622         }
7623
7624         dev->open = bnx2_open;
7625         dev->hard_start_xmit = bnx2_start_xmit;
7626         dev->stop = bnx2_close;
7627         dev->get_stats = bnx2_get_stats;
7628         dev->set_multicast_list = bnx2_set_rx_mode;
7629         dev->do_ioctl = bnx2_ioctl;
7630         dev->set_mac_address = bnx2_change_mac_addr;
7631         dev->change_mtu = bnx2_change_mtu;
7632         dev->tx_timeout = bnx2_tx_timeout;
7633         dev->watchdog_timeo = TX_TIMEOUT;
7634 #ifdef BCM_VLAN
7635         dev->vlan_rx_register = bnx2_vlan_rx_register;
7636 #endif
7637         dev->ethtool_ops = &bnx2_ethtool_ops;
7638
7639         bp = netdev_priv(dev);
7640         bnx2_init_napi(bp);
7641
7642 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
7643         dev->poll_controller = poll_bnx2;
7644 #endif
7645
7646         pci_set_drvdata(pdev, dev);
7647
7648         memcpy(dev->dev_addr, bp->mac_addr, 6);
7649         memcpy(dev->perm_addr, bp->mac_addr, 6);
7650         bp->name = board_info[ent->driver_data].name;
7651
7652         dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
7653         if (CHIP_NUM(bp) == CHIP_NUM_5709)
7654                 dev->features |= NETIF_F_IPV6_CSUM;
7655
7656 #ifdef BCM_VLAN
7657         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
7658 #endif
7659         dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
7660         if (CHIP_NUM(bp) == CHIP_NUM_5709)
7661                 dev->features |= NETIF_F_TSO6;
7662
7663         if ((rc = register_netdev(dev))) {
7664                 dev_err(&pdev->dev, "Cannot register net device\n");
7665                 if (bp->regview)
7666                         iounmap(bp->regview);
7667                 pci_release_regions(pdev);
7668                 pci_disable_device(pdev);
7669                 pci_set_drvdata(pdev, NULL);
7670                 free_netdev(dev);
7671                 return rc;
7672         }
7673
7674         printk(KERN_INFO "%s: %s (%c%d) %s found at mem %lx, "
7675                 "IRQ %d, node addr %s\n",
7676                 dev->name,
7677                 bp->name,
7678                 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
7679                 ((CHIP_ID(bp) & 0x0ff0) >> 4),
7680                 bnx2_bus_string(bp, str),
7681                 dev->base_addr,
7682                 bp->pdev->irq, print_mac(mac, dev->dev_addr));
7683
7684         return 0;
7685 }
7686
7687 static void __devexit
7688 bnx2_remove_one(struct pci_dev *pdev)
7689 {
7690         struct net_device *dev = pci_get_drvdata(pdev);
7691         struct bnx2 *bp = netdev_priv(dev);
7692
7693         flush_scheduled_work();
7694
7695         unregister_netdev(dev);
7696
7697         if (bp->regview)
7698                 iounmap(bp->regview);
7699
7700         free_netdev(dev);
7701         pci_release_regions(pdev);
7702         pci_disable_device(pdev);
7703         pci_set_drvdata(pdev, NULL);
7704 }
7705
7706 static int
7707 bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
7708 {
7709         struct net_device *dev = pci_get_drvdata(pdev);
7710         struct bnx2 *bp = netdev_priv(dev);
7711         u32 reset_code;
7712
7713         /* PCI register 4 needs to be saved whether netif_running() or not.
7714          * MSI address and data need to be saved if using MSI and
7715          * netif_running().
7716          */
7717         pci_save_state(pdev);
7718         if (!netif_running(dev))
7719                 return 0;
7720
7721         flush_scheduled_work();
7722         bnx2_netif_stop(bp);
7723         netif_device_detach(dev);
7724         del_timer_sync(&bp->timer);
7725         if (bp->flags & BNX2_FLAG_NO_WOL)
7726                 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
7727         else if (bp->wol)
7728                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
7729         else
7730                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
7731         bnx2_reset_chip(bp, reset_code);
7732         bnx2_free_skbs(bp);
7733         bnx2_set_power_state(bp, pci_choose_state(pdev, state));
7734         return 0;
7735 }
7736
7737 static int
7738 bnx2_resume(struct pci_dev *pdev)
7739 {
7740         struct net_device *dev = pci_get_drvdata(pdev);
7741         struct bnx2 *bp = netdev_priv(dev);
7742
7743         pci_restore_state(pdev);
7744         if (!netif_running(dev))
7745                 return 0;
7746
7747         bnx2_set_power_state(bp, PCI_D0);
7748         netif_device_attach(dev);
7749         bnx2_init_nic(bp, 1);
7750         bnx2_netif_start(bp);
7751         return 0;
7752 }
7753
7754 /**
7755  * bnx2_io_error_detected - called when PCI error is detected
7756  * @pdev: Pointer to PCI device
7757  * @state: The current pci connection state
7758  *
7759  * This function is called after a PCI bus error affecting
7760  * this device has been detected.
7761  */
7762 static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev,
7763                                                pci_channel_state_t state)
7764 {
7765         struct net_device *dev = pci_get_drvdata(pdev);
7766         struct bnx2 *bp = netdev_priv(dev);
7767
7768         rtnl_lock();
7769         netif_device_detach(dev);
7770
7771         if (netif_running(dev)) {
7772                 bnx2_netif_stop(bp);
7773                 del_timer_sync(&bp->timer);
7774                 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
7775         }
7776
7777         pci_disable_device(pdev);
7778         rtnl_unlock();
7779
7780         /* Request a slot slot reset. */
7781         return PCI_ERS_RESULT_NEED_RESET;
7782 }
7783
7784 /**
7785  * bnx2_io_slot_reset - called after the pci bus has been reset.
7786  * @pdev: Pointer to PCI device
7787  *
7788  * Restart the card from scratch, as if from a cold-boot.
7789  */
7790 static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
7791 {
7792         struct net_device *dev = pci_get_drvdata(pdev);
7793         struct bnx2 *bp = netdev_priv(dev);
7794
7795         rtnl_lock();
7796         if (pci_enable_device(pdev)) {
7797                 dev_err(&pdev->dev,
7798                         "Cannot re-enable PCI device after reset.\n");
7799                 rtnl_unlock();
7800                 return PCI_ERS_RESULT_DISCONNECT;
7801         }
7802         pci_set_master(pdev);
7803         pci_restore_state(pdev);
7804
7805         if (netif_running(dev)) {
7806                 bnx2_set_power_state(bp, PCI_D0);
7807                 bnx2_init_nic(bp, 1);
7808         }
7809
7810         rtnl_unlock();
7811         return PCI_ERS_RESULT_RECOVERED;
7812 }
7813
7814 /**
7815  * bnx2_io_resume - called when traffic can start flowing again.
7816  * @pdev: Pointer to PCI device
7817  *
7818  * This callback is called when the error recovery driver tells us that
7819  * its OK to resume normal operation.
7820  */
7821 static void bnx2_io_resume(struct pci_dev *pdev)
7822 {
7823         struct net_device *dev = pci_get_drvdata(pdev);
7824         struct bnx2 *bp = netdev_priv(dev);
7825
7826         rtnl_lock();
7827         if (netif_running(dev))
7828                 bnx2_netif_start(bp);
7829
7830         netif_device_attach(dev);
7831         rtnl_unlock();
7832 }
7833
7834 static struct pci_error_handlers bnx2_err_handler = {
7835         .error_detected = bnx2_io_error_detected,
7836         .slot_reset     = bnx2_io_slot_reset,
7837         .resume         = bnx2_io_resume,
7838 };
7839
7840 static struct pci_driver bnx2_pci_driver = {
7841         .name           = DRV_MODULE_NAME,
7842         .id_table       = bnx2_pci_tbl,
7843         .probe          = bnx2_init_one,
7844         .remove         = __devexit_p(bnx2_remove_one),
7845         .suspend        = bnx2_suspend,
7846         .resume         = bnx2_resume,
7847         .err_handler    = &bnx2_err_handler,
7848 };
7849
7850 static int __init bnx2_init(void)
7851 {
7852         return pci_register_driver(&bnx2_pci_driver);
7853 }
7854
7855 static void __exit bnx2_cleanup(void)
7856 {
7857         pci_unregister_driver(&bnx2_pci_driver);
7858 }
7859
7860 module_init(bnx2_init);
7861 module_exit(bnx2_cleanup);
7862
7863
7864