1 /* bnx2.c: Broadcom NX2 network driver.
3 * Copyright (c) 2004, 2005, 2006 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Written by: Michael Chan (mchan@broadcom.com)
15 #define DRV_MODULE_NAME "bnx2"
16 #define PFX DRV_MODULE_NAME ": "
17 #define DRV_MODULE_VERSION "1.4.31"
18 #define DRV_MODULE_RELDATE "January 19, 2006"
20 #define RUN_AT(x) (jiffies + (x))
22 /* Time in jiffies before concluding the transmitter is hung. */
23 #define TX_TIMEOUT (5*HZ)
25 static char version[] __devinitdata =
26 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
28 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
29 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
30 MODULE_LICENSE("GPL");
31 MODULE_VERSION(DRV_MODULE_VERSION);
33 static int disable_msi = 0;
35 module_param(disable_msi, int, 0);
36 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
48 /* indexed by board_t, above */
51 } board_info[] __devinitdata = {
52 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
53 { "HP NC370T Multifunction Gigabit Server Adapter" },
54 { "HP NC370i Multifunction Gigabit Server Adapter" },
55 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
56 { "HP NC370F Multifunction Gigabit Server Adapter" },
57 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
58 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
61 static struct pci_device_id bnx2_pci_tbl[] = {
62 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
63 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
64 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
65 PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
66 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
67 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
68 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
69 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
70 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
71 PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
72 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
73 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
74 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
75 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
79 static struct flash_spec flash_table[] =
82 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
83 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
84 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
86 /* Expansion entry 0001 */
87 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
88 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
89 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
91 /* Saifun SA25F010 (non-buffered flash) */
92 /* strap, cfg1, & write1 need updates */
93 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
94 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
95 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
96 "Non-buffered flash (128kB)"},
97 /* Saifun SA25F020 (non-buffered flash) */
98 /* strap, cfg1, & write1 need updates */
99 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
100 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
101 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
102 "Non-buffered flash (256kB)"},
103 /* Expansion entry 0100 */
104 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
105 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
106 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
108 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
109 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
110 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
111 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
112 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
113 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
114 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
115 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
116 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
117 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
118 /* Saifun SA25F005 (non-buffered flash) */
119 /* strap, cfg1, & write1 need updates */
120 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
121 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
122 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
123 "Non-buffered flash (64kB)"},
125 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
126 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
127 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
129 /* Expansion entry 1001 */
130 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
131 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
132 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
134 /* Expansion entry 1010 */
135 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
136 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
137 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
139 /* ATMEL AT45DB011B (buffered flash) */
140 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
141 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
142 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
143 "Buffered flash (128kB)"},
144 /* Expansion entry 1100 */
145 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
146 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
147 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
149 /* Expansion entry 1101 */
150 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
151 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
152 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
154 /* Ateml Expansion entry 1110 */
155 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
156 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
157 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
158 "Entry 1110 (Atmel)"},
159 /* ATMEL AT45DB021B (buffered flash) */
160 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
161 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
162 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
163 "Buffered flash (256kB)"},
166 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
168 static inline u32 bnx2_tx_avail(struct bnx2 *bp)
170 u32 diff = TX_RING_IDX(bp->tx_prod) - TX_RING_IDX(bp->tx_cons);
172 if (diff > MAX_TX_DESC_CNT)
173 diff = (diff & MAX_TX_DESC_CNT) - 1;
174 return (bp->tx_ring_size - diff);
178 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
180 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
181 return (REG_RD(bp, BNX2_PCICFG_REG_WINDOW));
185 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
187 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
188 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
192 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
195 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
196 REG_WR(bp, BNX2_CTX_DATA, val);
200 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
205 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
206 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
207 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
209 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
210 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
215 val1 = (bp->phy_addr << 21) | (reg << 16) |
216 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
217 BNX2_EMAC_MDIO_COMM_START_BUSY;
218 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
220 for (i = 0; i < 50; i++) {
223 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
224 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
227 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
228 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
234 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
243 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
244 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
245 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
247 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
248 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
257 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
262 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
263 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
264 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
266 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
267 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
272 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
273 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
274 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
275 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
277 for (i = 0; i < 50; i++) {
280 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
281 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
287 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
292 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
293 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
294 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
296 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
297 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
306 bnx2_disable_int(struct bnx2 *bp)
308 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
309 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
310 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
314 bnx2_enable_int(struct bnx2 *bp)
318 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
319 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
320 BNX2_PCICFG_INT_ACK_CMD_MASK_INT | bp->last_status_idx);
322 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
323 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | bp->last_status_idx);
325 val = REG_RD(bp, BNX2_HC_COMMAND);
326 REG_WR(bp, BNX2_HC_COMMAND, val | BNX2_HC_COMMAND_COAL_NOW);
330 bnx2_disable_int_sync(struct bnx2 *bp)
332 atomic_inc(&bp->intr_sem);
333 bnx2_disable_int(bp);
334 synchronize_irq(bp->pdev->irq);
338 bnx2_netif_stop(struct bnx2 *bp)
340 bnx2_disable_int_sync(bp);
341 if (netif_running(bp->dev)) {
342 netif_poll_disable(bp->dev);
343 netif_tx_disable(bp->dev);
344 bp->dev->trans_start = jiffies; /* prevent tx timeout */
349 bnx2_netif_start(struct bnx2 *bp)
351 if (atomic_dec_and_test(&bp->intr_sem)) {
352 if (netif_running(bp->dev)) {
353 netif_wake_queue(bp->dev);
354 netif_poll_enable(bp->dev);
361 bnx2_free_mem(struct bnx2 *bp)
364 pci_free_consistent(bp->pdev, sizeof(struct statistics_block),
365 bp->stats_blk, bp->stats_blk_mapping);
366 bp->stats_blk = NULL;
368 if (bp->status_blk) {
369 pci_free_consistent(bp->pdev, sizeof(struct status_block),
370 bp->status_blk, bp->status_blk_mapping);
371 bp->status_blk = NULL;
373 if (bp->tx_desc_ring) {
374 pci_free_consistent(bp->pdev,
375 sizeof(struct tx_bd) * TX_DESC_CNT,
376 bp->tx_desc_ring, bp->tx_desc_mapping);
377 bp->tx_desc_ring = NULL;
379 kfree(bp->tx_buf_ring);
380 bp->tx_buf_ring = NULL;
381 if (bp->rx_desc_ring) {
382 pci_free_consistent(bp->pdev,
383 sizeof(struct rx_bd) * RX_DESC_CNT,
384 bp->rx_desc_ring, bp->rx_desc_mapping);
385 bp->rx_desc_ring = NULL;
387 kfree(bp->rx_buf_ring);
388 bp->rx_buf_ring = NULL;
392 bnx2_alloc_mem(struct bnx2 *bp)
394 bp->tx_buf_ring = kmalloc(sizeof(struct sw_bd) * TX_DESC_CNT,
396 if (bp->tx_buf_ring == NULL)
399 memset(bp->tx_buf_ring, 0, sizeof(struct sw_bd) * TX_DESC_CNT);
400 bp->tx_desc_ring = pci_alloc_consistent(bp->pdev,
401 sizeof(struct tx_bd) *
403 &bp->tx_desc_mapping);
404 if (bp->tx_desc_ring == NULL)
407 bp->rx_buf_ring = kmalloc(sizeof(struct sw_bd) * RX_DESC_CNT,
409 if (bp->rx_buf_ring == NULL)
412 memset(bp->rx_buf_ring, 0, sizeof(struct sw_bd) * RX_DESC_CNT);
413 bp->rx_desc_ring = pci_alloc_consistent(bp->pdev,
414 sizeof(struct rx_bd) *
416 &bp->rx_desc_mapping);
417 if (bp->rx_desc_ring == NULL)
420 bp->status_blk = pci_alloc_consistent(bp->pdev,
421 sizeof(struct status_block),
422 &bp->status_blk_mapping);
423 if (bp->status_blk == NULL)
426 memset(bp->status_blk, 0, sizeof(struct status_block));
428 bp->stats_blk = pci_alloc_consistent(bp->pdev,
429 sizeof(struct statistics_block),
430 &bp->stats_blk_mapping);
431 if (bp->stats_blk == NULL)
434 memset(bp->stats_blk, 0, sizeof(struct statistics_block));
444 bnx2_report_fw_link(struct bnx2 *bp)
446 u32 fw_link_status = 0;
451 switch (bp->line_speed) {
453 if (bp->duplex == DUPLEX_HALF)
454 fw_link_status = BNX2_LINK_STATUS_10HALF;
456 fw_link_status = BNX2_LINK_STATUS_10FULL;
459 if (bp->duplex == DUPLEX_HALF)
460 fw_link_status = BNX2_LINK_STATUS_100HALF;
462 fw_link_status = BNX2_LINK_STATUS_100FULL;
465 if (bp->duplex == DUPLEX_HALF)
466 fw_link_status = BNX2_LINK_STATUS_1000HALF;
468 fw_link_status = BNX2_LINK_STATUS_1000FULL;
471 if (bp->duplex == DUPLEX_HALF)
472 fw_link_status = BNX2_LINK_STATUS_2500HALF;
474 fw_link_status = BNX2_LINK_STATUS_2500FULL;
478 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
481 fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
483 bnx2_read_phy(bp, MII_BMSR, &bmsr);
484 bnx2_read_phy(bp, MII_BMSR, &bmsr);
486 if (!(bmsr & BMSR_ANEGCOMPLETE) ||
487 bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)
488 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
490 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
494 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
496 REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status);
500 bnx2_report_link(struct bnx2 *bp)
503 netif_carrier_on(bp->dev);
504 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
506 printk("%d Mbps ", bp->line_speed);
508 if (bp->duplex == DUPLEX_FULL)
509 printk("full duplex");
511 printk("half duplex");
514 if (bp->flow_ctrl & FLOW_CTRL_RX) {
515 printk(", receive ");
516 if (bp->flow_ctrl & FLOW_CTRL_TX)
517 printk("& transmit ");
520 printk(", transmit ");
522 printk("flow control ON");
527 netif_carrier_off(bp->dev);
528 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
531 bnx2_report_fw_link(bp);
535 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
537 u32 local_adv, remote_adv;
540 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
541 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
543 if (bp->duplex == DUPLEX_FULL) {
544 bp->flow_ctrl = bp->req_flow_ctrl;
549 if (bp->duplex != DUPLEX_FULL) {
553 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
554 (CHIP_NUM(bp) == CHIP_NUM_5708)) {
557 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
558 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
559 bp->flow_ctrl |= FLOW_CTRL_TX;
560 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
561 bp->flow_ctrl |= FLOW_CTRL_RX;
565 bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
566 bnx2_read_phy(bp, MII_LPA, &remote_adv);
568 if (bp->phy_flags & PHY_SERDES_FLAG) {
569 u32 new_local_adv = 0;
570 u32 new_remote_adv = 0;
572 if (local_adv & ADVERTISE_1000XPAUSE)
573 new_local_adv |= ADVERTISE_PAUSE_CAP;
574 if (local_adv & ADVERTISE_1000XPSE_ASYM)
575 new_local_adv |= ADVERTISE_PAUSE_ASYM;
576 if (remote_adv & ADVERTISE_1000XPAUSE)
577 new_remote_adv |= ADVERTISE_PAUSE_CAP;
578 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
579 new_remote_adv |= ADVERTISE_PAUSE_ASYM;
581 local_adv = new_local_adv;
582 remote_adv = new_remote_adv;
585 /* See Table 28B-3 of 802.3ab-1999 spec. */
586 if (local_adv & ADVERTISE_PAUSE_CAP) {
587 if(local_adv & ADVERTISE_PAUSE_ASYM) {
588 if (remote_adv & ADVERTISE_PAUSE_CAP) {
589 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
591 else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
592 bp->flow_ctrl = FLOW_CTRL_RX;
596 if (remote_adv & ADVERTISE_PAUSE_CAP) {
597 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
601 else if (local_adv & ADVERTISE_PAUSE_ASYM) {
602 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
603 (remote_adv & ADVERTISE_PAUSE_ASYM)) {
605 bp->flow_ctrl = FLOW_CTRL_TX;
611 bnx2_5708s_linkup(struct bnx2 *bp)
616 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
617 switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
618 case BCM5708S_1000X_STAT1_SPEED_10:
619 bp->line_speed = SPEED_10;
621 case BCM5708S_1000X_STAT1_SPEED_100:
622 bp->line_speed = SPEED_100;
624 case BCM5708S_1000X_STAT1_SPEED_1G:
625 bp->line_speed = SPEED_1000;
627 case BCM5708S_1000X_STAT1_SPEED_2G5:
628 bp->line_speed = SPEED_2500;
631 if (val & BCM5708S_1000X_STAT1_FD)
632 bp->duplex = DUPLEX_FULL;
634 bp->duplex = DUPLEX_HALF;
640 bnx2_5706s_linkup(struct bnx2 *bp)
642 u32 bmcr, local_adv, remote_adv, common;
645 bp->line_speed = SPEED_1000;
647 bnx2_read_phy(bp, MII_BMCR, &bmcr);
648 if (bmcr & BMCR_FULLDPLX) {
649 bp->duplex = DUPLEX_FULL;
652 bp->duplex = DUPLEX_HALF;
655 if (!(bmcr & BMCR_ANENABLE)) {
659 bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
660 bnx2_read_phy(bp, MII_LPA, &remote_adv);
662 common = local_adv & remote_adv;
663 if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
665 if (common & ADVERTISE_1000XFULL) {
666 bp->duplex = DUPLEX_FULL;
669 bp->duplex = DUPLEX_HALF;
677 bnx2_copper_linkup(struct bnx2 *bp)
681 bnx2_read_phy(bp, MII_BMCR, &bmcr);
682 if (bmcr & BMCR_ANENABLE) {
683 u32 local_adv, remote_adv, common;
685 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
686 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
688 common = local_adv & (remote_adv >> 2);
689 if (common & ADVERTISE_1000FULL) {
690 bp->line_speed = SPEED_1000;
691 bp->duplex = DUPLEX_FULL;
693 else if (common & ADVERTISE_1000HALF) {
694 bp->line_speed = SPEED_1000;
695 bp->duplex = DUPLEX_HALF;
698 bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
699 bnx2_read_phy(bp, MII_LPA, &remote_adv);
701 common = local_adv & remote_adv;
702 if (common & ADVERTISE_100FULL) {
703 bp->line_speed = SPEED_100;
704 bp->duplex = DUPLEX_FULL;
706 else if (common & ADVERTISE_100HALF) {
707 bp->line_speed = SPEED_100;
708 bp->duplex = DUPLEX_HALF;
710 else if (common & ADVERTISE_10FULL) {
711 bp->line_speed = SPEED_10;
712 bp->duplex = DUPLEX_FULL;
714 else if (common & ADVERTISE_10HALF) {
715 bp->line_speed = SPEED_10;
716 bp->duplex = DUPLEX_HALF;
725 if (bmcr & BMCR_SPEED100) {
726 bp->line_speed = SPEED_100;
729 bp->line_speed = SPEED_10;
731 if (bmcr & BMCR_FULLDPLX) {
732 bp->duplex = DUPLEX_FULL;
735 bp->duplex = DUPLEX_HALF;
743 bnx2_set_mac_link(struct bnx2 *bp)
747 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
748 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
749 (bp->duplex == DUPLEX_HALF)) {
750 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
753 /* Configure the EMAC mode register. */
754 val = REG_RD(bp, BNX2_EMAC_MODE);
756 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
757 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
761 switch (bp->line_speed) {
763 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
764 val |= BNX2_EMAC_MODE_PORT_MII_10;
769 val |= BNX2_EMAC_MODE_PORT_MII;
772 val |= BNX2_EMAC_MODE_25G;
775 val |= BNX2_EMAC_MODE_PORT_GMII;
780 val |= BNX2_EMAC_MODE_PORT_GMII;
783 /* Set the MAC to operate in the appropriate duplex mode. */
784 if (bp->duplex == DUPLEX_HALF)
785 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
786 REG_WR(bp, BNX2_EMAC_MODE, val);
788 /* Enable/disable rx PAUSE. */
789 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
791 if (bp->flow_ctrl & FLOW_CTRL_RX)
792 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
793 REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
795 /* Enable/disable tx PAUSE. */
796 val = REG_RD(bp, BNX2_EMAC_TX_MODE);
797 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
799 if (bp->flow_ctrl & FLOW_CTRL_TX)
800 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
801 REG_WR(bp, BNX2_EMAC_TX_MODE, val);
803 /* Acknowledge the interrupt. */
804 REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
810 bnx2_set_link(struct bnx2 *bp)
815 if (bp->loopback == MAC_LOOPBACK) {
820 link_up = bp->link_up;
822 bnx2_read_phy(bp, MII_BMSR, &bmsr);
823 bnx2_read_phy(bp, MII_BMSR, &bmsr);
825 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
826 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
829 val = REG_RD(bp, BNX2_EMAC_STATUS);
830 if (val & BNX2_EMAC_STATUS_LINK)
831 bmsr |= BMSR_LSTATUS;
833 bmsr &= ~BMSR_LSTATUS;
836 if (bmsr & BMSR_LSTATUS) {
839 if (bp->phy_flags & PHY_SERDES_FLAG) {
840 if (CHIP_NUM(bp) == CHIP_NUM_5706)
841 bnx2_5706s_linkup(bp);
842 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
843 bnx2_5708s_linkup(bp);
846 bnx2_copper_linkup(bp);
848 bnx2_resolve_flow_ctrl(bp);
851 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
852 (bp->autoneg & AUTONEG_SPEED)) {
856 bnx2_read_phy(bp, MII_BMCR, &bmcr);
857 if (!(bmcr & BMCR_ANENABLE)) {
858 bnx2_write_phy(bp, MII_BMCR, bmcr |
862 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
866 if (bp->link_up != link_up) {
867 bnx2_report_link(bp);
870 bnx2_set_mac_link(bp);
876 bnx2_reset_phy(struct bnx2 *bp)
881 bnx2_write_phy(bp, MII_BMCR, BMCR_RESET);
883 #define PHY_RESET_MAX_WAIT 100
884 for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
887 bnx2_read_phy(bp, MII_BMCR, ®);
888 if (!(reg & BMCR_RESET)) {
893 if (i == PHY_RESET_MAX_WAIT) {
900 bnx2_phy_get_pause_adv(struct bnx2 *bp)
904 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
905 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
907 if (bp->phy_flags & PHY_SERDES_FLAG) {
908 adv = ADVERTISE_1000XPAUSE;
911 adv = ADVERTISE_PAUSE_CAP;
914 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
915 if (bp->phy_flags & PHY_SERDES_FLAG) {
916 adv = ADVERTISE_1000XPSE_ASYM;
919 adv = ADVERTISE_PAUSE_ASYM;
922 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
923 if (bp->phy_flags & PHY_SERDES_FLAG) {
924 adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
927 adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
934 bnx2_setup_serdes_phy(struct bnx2 *bp)
939 if (!(bp->autoneg & AUTONEG_SPEED)) {
941 int force_link_down = 0;
943 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
944 bnx2_read_phy(bp, BCM5708S_UP1, &up1);
945 if (up1 & BCM5708S_UP1_2G5) {
946 up1 &= ~BCM5708S_UP1_2G5;
947 bnx2_write_phy(bp, BCM5708S_UP1, up1);
952 bnx2_read_phy(bp, MII_ADVERTISE, &adv);
953 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
955 bnx2_read_phy(bp, MII_BMCR, &bmcr);
956 new_bmcr = bmcr & ~BMCR_ANENABLE;
957 new_bmcr |= BMCR_SPEED1000;
958 if (bp->req_duplex == DUPLEX_FULL) {
959 adv |= ADVERTISE_1000XFULL;
960 new_bmcr |= BMCR_FULLDPLX;
963 adv |= ADVERTISE_1000XHALF;
964 new_bmcr &= ~BMCR_FULLDPLX;
966 if ((new_bmcr != bmcr) || (force_link_down)) {
967 /* Force a link down visible on the other side */
969 bnx2_write_phy(bp, MII_ADVERTISE, adv &
970 ~(ADVERTISE_1000XFULL |
971 ADVERTISE_1000XHALF));
972 bnx2_write_phy(bp, MII_BMCR, bmcr |
973 BMCR_ANRESTART | BMCR_ANENABLE);
976 netif_carrier_off(bp->dev);
977 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
979 bnx2_write_phy(bp, MII_ADVERTISE, adv);
980 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
985 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
986 bnx2_read_phy(bp, BCM5708S_UP1, &up1);
987 up1 |= BCM5708S_UP1_2G5;
988 bnx2_write_phy(bp, BCM5708S_UP1, up1);
991 if (bp->advertising & ADVERTISED_1000baseT_Full)
992 new_adv |= ADVERTISE_1000XFULL;
994 new_adv |= bnx2_phy_get_pause_adv(bp);
996 bnx2_read_phy(bp, MII_ADVERTISE, &adv);
997 bnx2_read_phy(bp, MII_BMCR, &bmcr);
999 bp->serdes_an_pending = 0;
1000 if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1001 /* Force a link down visible on the other side */
1005 bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
1006 for (i = 0; i < 110; i++) {
1011 bnx2_write_phy(bp, MII_ADVERTISE, new_adv);
1012 bnx2_write_phy(bp, MII_BMCR, bmcr | BMCR_ANRESTART |
1014 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
1015 /* Speed up link-up time when the link partner
1016 * does not autonegotiate which is very common
1017 * in blade servers. Some blade servers use
1018 * IPMI for kerboard input and it's important
1019 * to minimize link disruptions. Autoneg. involves
1020 * exchanging base pages plus 3 next pages and
1021 * normally completes in about 120 msec.
1023 bp->current_interval = SERDES_AN_TIMEOUT;
1024 bp->serdes_an_pending = 1;
1025 mod_timer(&bp->timer, jiffies + bp->current_interval);
1032 #define ETHTOOL_ALL_FIBRE_SPEED \
1033 (ADVERTISED_1000baseT_Full)
1035 #define ETHTOOL_ALL_COPPER_SPEED \
1036 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1037 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1038 ADVERTISED_1000baseT_Full)
1040 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1041 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1043 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1046 bnx2_setup_copper_phy(struct bnx2 *bp)
1051 bnx2_read_phy(bp, MII_BMCR, &bmcr);
1053 if (bp->autoneg & AUTONEG_SPEED) {
1054 u32 adv_reg, adv1000_reg;
1055 u32 new_adv_reg = 0;
1056 u32 new_adv1000_reg = 0;
1058 bnx2_read_phy(bp, MII_ADVERTISE, &adv_reg);
1059 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1060 ADVERTISE_PAUSE_ASYM);
1062 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1063 adv1000_reg &= PHY_ALL_1000_SPEED;
1065 if (bp->advertising & ADVERTISED_10baseT_Half)
1066 new_adv_reg |= ADVERTISE_10HALF;
1067 if (bp->advertising & ADVERTISED_10baseT_Full)
1068 new_adv_reg |= ADVERTISE_10FULL;
1069 if (bp->advertising & ADVERTISED_100baseT_Half)
1070 new_adv_reg |= ADVERTISE_100HALF;
1071 if (bp->advertising & ADVERTISED_100baseT_Full)
1072 new_adv_reg |= ADVERTISE_100FULL;
1073 if (bp->advertising & ADVERTISED_1000baseT_Full)
1074 new_adv1000_reg |= ADVERTISE_1000FULL;
1076 new_adv_reg |= ADVERTISE_CSMA;
1078 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1080 if ((adv1000_reg != new_adv1000_reg) ||
1081 (adv_reg != new_adv_reg) ||
1082 ((bmcr & BMCR_ANENABLE) == 0)) {
1084 bnx2_write_phy(bp, MII_ADVERTISE, new_adv_reg);
1085 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
1086 bnx2_write_phy(bp, MII_BMCR, BMCR_ANRESTART |
1089 else if (bp->link_up) {
1090 /* Flow ctrl may have changed from auto to forced */
1091 /* or vice-versa. */
1093 bnx2_resolve_flow_ctrl(bp);
1094 bnx2_set_mac_link(bp);
1100 if (bp->req_line_speed == SPEED_100) {
1101 new_bmcr |= BMCR_SPEED100;
1103 if (bp->req_duplex == DUPLEX_FULL) {
1104 new_bmcr |= BMCR_FULLDPLX;
1106 if (new_bmcr != bmcr) {
1110 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1111 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1113 if (bmsr & BMSR_LSTATUS) {
1114 /* Force link down */
1115 bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
1118 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1119 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1121 } while ((bmsr & BMSR_LSTATUS) && (i < 620));
1124 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
1126 /* Normally, the new speed is setup after the link has
1127 * gone down and up again. In some cases, link will not go
1128 * down so we need to set up the new speed here.
1130 if (bmsr & BMSR_LSTATUS) {
1131 bp->line_speed = bp->req_line_speed;
1132 bp->duplex = bp->req_duplex;
1133 bnx2_resolve_flow_ctrl(bp);
1134 bnx2_set_mac_link(bp);
1141 bnx2_setup_phy(struct bnx2 *bp)
1143 if (bp->loopback == MAC_LOOPBACK)
1146 if (bp->phy_flags & PHY_SERDES_FLAG) {
1147 return (bnx2_setup_serdes_phy(bp));
1150 return (bnx2_setup_copper_phy(bp));
1155 bnx2_init_5708s_phy(struct bnx2 *bp)
1159 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1160 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1161 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1163 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1164 val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1165 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1167 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1168 val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1169 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1171 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1172 bnx2_read_phy(bp, BCM5708S_UP1, &val);
1173 val |= BCM5708S_UP1_2G5;
1174 bnx2_write_phy(bp, BCM5708S_UP1, val);
1177 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
1178 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
1179 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
1180 /* increase tx signal amplitude */
1181 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1182 BCM5708S_BLK_ADDR_TX_MISC);
1183 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1184 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1185 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1186 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1189 val = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG) &
1190 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
1195 is_backplane = REG_RD_IND(bp, bp->shmem_base +
1196 BNX2_SHARED_HW_CFG_CONFIG);
1197 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
1198 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1199 BCM5708S_BLK_ADDR_TX_MISC);
1200 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
1201 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1202 BCM5708S_BLK_ADDR_DIG);
1209 bnx2_init_5706s_phy(struct bnx2 *bp)
1211 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1213 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
1214 REG_WR(bp, BNX2_MISC_UNUSED0, 0x300);
1217 if (bp->dev->mtu > 1500) {
1220 /* Set extended packet length bit */
1221 bnx2_write_phy(bp, 0x18, 0x7);
1222 bnx2_read_phy(bp, 0x18, &val);
1223 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
1225 bnx2_write_phy(bp, 0x1c, 0x6c00);
1226 bnx2_read_phy(bp, 0x1c, &val);
1227 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
1232 bnx2_write_phy(bp, 0x18, 0x7);
1233 bnx2_read_phy(bp, 0x18, &val);
1234 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1236 bnx2_write_phy(bp, 0x1c, 0x6c00);
1237 bnx2_read_phy(bp, 0x1c, &val);
1238 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
1245 bnx2_init_copper_phy(struct bnx2 *bp)
1249 bp->phy_flags |= PHY_CRC_FIX_FLAG;
1251 if (bp->phy_flags & PHY_CRC_FIX_FLAG) {
1252 bnx2_write_phy(bp, 0x18, 0x0c00);
1253 bnx2_write_phy(bp, 0x17, 0x000a);
1254 bnx2_write_phy(bp, 0x15, 0x310b);
1255 bnx2_write_phy(bp, 0x17, 0x201f);
1256 bnx2_write_phy(bp, 0x15, 0x9506);
1257 bnx2_write_phy(bp, 0x17, 0x401f);
1258 bnx2_write_phy(bp, 0x15, 0x14e2);
1259 bnx2_write_phy(bp, 0x18, 0x0400);
1262 if (bp->dev->mtu > 1500) {
1263 /* Set extended packet length bit */
1264 bnx2_write_phy(bp, 0x18, 0x7);
1265 bnx2_read_phy(bp, 0x18, &val);
1266 bnx2_write_phy(bp, 0x18, val | 0x4000);
1268 bnx2_read_phy(bp, 0x10, &val);
1269 bnx2_write_phy(bp, 0x10, val | 0x1);
1272 bnx2_write_phy(bp, 0x18, 0x7);
1273 bnx2_read_phy(bp, 0x18, &val);
1274 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1276 bnx2_read_phy(bp, 0x10, &val);
1277 bnx2_write_phy(bp, 0x10, val & ~0x1);
1280 /* ethernet@wirespeed */
1281 bnx2_write_phy(bp, 0x18, 0x7007);
1282 bnx2_read_phy(bp, 0x18, &val);
1283 bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
1289 bnx2_init_phy(struct bnx2 *bp)
1294 bp->phy_flags &= ~PHY_INT_MODE_MASK_FLAG;
1295 bp->phy_flags |= PHY_INT_MODE_LINK_READY_FLAG;
1297 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
1301 bnx2_read_phy(bp, MII_PHYSID1, &val);
1302 bp->phy_id = val << 16;
1303 bnx2_read_phy(bp, MII_PHYSID2, &val);
1304 bp->phy_id |= val & 0xffff;
1306 if (bp->phy_flags & PHY_SERDES_FLAG) {
1307 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1308 rc = bnx2_init_5706s_phy(bp);
1309 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1310 rc = bnx2_init_5708s_phy(bp);
1313 rc = bnx2_init_copper_phy(bp);
1322 bnx2_set_mac_loopback(struct bnx2 *bp)
1326 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1327 mac_mode &= ~BNX2_EMAC_MODE_PORT;
1328 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
1329 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1334 static int bnx2_test_link(struct bnx2 *);
1337 bnx2_set_phy_loopback(struct bnx2 *bp)
1342 spin_lock_bh(&bp->phy_lock);
1343 rc = bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK | BMCR_FULLDPLX |
1345 spin_unlock_bh(&bp->phy_lock);
1349 for (i = 0; i < 10; i++) {
1350 if (bnx2_test_link(bp) == 0)
1355 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1356 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1357 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1358 BNX2_EMAC_MODE_25G);
1360 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
1361 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1367 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
1373 msg_data |= bp->fw_wr_seq;
1375 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
1377 /* wait for an acknowledgement. */
1378 for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
1381 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_MB);
1383 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
1386 if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
1389 /* If we timed out, inform the firmware that this is the case. */
1390 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
1392 printk(KERN_ERR PFX "fw sync timeout, reset code = "
1395 msg_data &= ~BNX2_DRV_MSG_CODE;
1396 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
1398 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
1403 if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
1410 bnx2_init_context(struct bnx2 *bp)
1416 u32 vcid_addr, pcid_addr, offset;
1420 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
1423 vcid_addr = GET_PCID_ADDR(vcid);
1425 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
1430 pcid_addr = GET_PCID_ADDR(new_vcid);
1433 vcid_addr = GET_CID_ADDR(vcid);
1434 pcid_addr = vcid_addr;
1437 REG_WR(bp, BNX2_CTX_VIRT_ADDR, 0x00);
1438 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1440 /* Zero out the context. */
1441 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4) {
1442 CTX_WR(bp, 0x00, offset, 0);
1445 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
1446 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1451 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
1457 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
1458 if (good_mbuf == NULL) {
1459 printk(KERN_ERR PFX "Failed to allocate memory in "
1460 "bnx2_alloc_bad_rbuf\n");
1464 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
1465 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
1469 /* Allocate a bunch of mbufs and save the good ones in an array. */
1470 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1471 while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
1472 REG_WR_IND(bp, BNX2_RBUF_COMMAND, BNX2_RBUF_COMMAND_ALLOC_REQ);
1474 val = REG_RD_IND(bp, BNX2_RBUF_FW_BUF_ALLOC);
1476 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
1478 /* The addresses with Bit 9 set are bad memory blocks. */
1479 if (!(val & (1 << 9))) {
1480 good_mbuf[good_mbuf_cnt] = (u16) val;
1484 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1487 /* Free the good ones back to the mbuf pool thus discarding
1488 * all the bad ones. */
1489 while (good_mbuf_cnt) {
1492 val = good_mbuf[good_mbuf_cnt];
1493 val = (val << 9) | val | 1;
1495 REG_WR_IND(bp, BNX2_RBUF_FW_BUF_FREE, val);
1502 bnx2_set_mac_addr(struct bnx2 *bp)
1505 u8 *mac_addr = bp->dev->dev_addr;
1507 val = (mac_addr[0] << 8) | mac_addr[1];
1509 REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
1511 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
1512 (mac_addr[4] << 8) | mac_addr[5];
1514 REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
1518 bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
1520 struct sk_buff *skb;
1521 struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
1523 struct rx_bd *rxbd = &bp->rx_desc_ring[index];
1524 unsigned long align;
1526 skb = dev_alloc_skb(bp->rx_buf_size);
1531 if (unlikely((align = (unsigned long) skb->data & 0x7))) {
1532 skb_reserve(skb, 8 - align);
1536 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
1537 PCI_DMA_FROMDEVICE);
1540 pci_unmap_addr_set(rx_buf, mapping, mapping);
1542 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
1543 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
1545 bp->rx_prod_bseq += bp->rx_buf_use_size;
1551 bnx2_phy_int(struct bnx2 *bp)
1553 u32 new_link_state, old_link_state;
1555 new_link_state = bp->status_blk->status_attn_bits &
1556 STATUS_ATTN_BITS_LINK_STATE;
1557 old_link_state = bp->status_blk->status_attn_bits_ack &
1558 STATUS_ATTN_BITS_LINK_STATE;
1559 if (new_link_state != old_link_state) {
1560 if (new_link_state) {
1561 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD,
1562 STATUS_ATTN_BITS_LINK_STATE);
1565 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD,
1566 STATUS_ATTN_BITS_LINK_STATE);
1573 bnx2_tx_int(struct bnx2 *bp)
1575 struct status_block *sblk = bp->status_blk;
1576 u16 hw_cons, sw_cons, sw_ring_cons;
1579 hw_cons = bp->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
1580 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
1583 sw_cons = bp->tx_cons;
1585 while (sw_cons != hw_cons) {
1586 struct sw_bd *tx_buf;
1587 struct sk_buff *skb;
1590 sw_ring_cons = TX_RING_IDX(sw_cons);
1592 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
1595 /* partial BD completions possible with TSO packets */
1596 if (skb_shinfo(skb)->tso_size) {
1597 u16 last_idx, last_ring_idx;
1599 last_idx = sw_cons +
1600 skb_shinfo(skb)->nr_frags + 1;
1601 last_ring_idx = sw_ring_cons +
1602 skb_shinfo(skb)->nr_frags + 1;
1603 if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
1606 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
1611 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
1612 skb_headlen(skb), PCI_DMA_TODEVICE);
1615 last = skb_shinfo(skb)->nr_frags;
1617 for (i = 0; i < last; i++) {
1618 sw_cons = NEXT_TX_BD(sw_cons);
1620 pci_unmap_page(bp->pdev,
1622 &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
1624 skb_shinfo(skb)->frags[i].size,
1628 sw_cons = NEXT_TX_BD(sw_cons);
1630 tx_free_bd += last + 1;
1632 dev_kfree_skb_irq(skb);
1634 hw_cons = bp->hw_tx_cons =
1635 sblk->status_tx_quick_consumer_index0;
1637 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
1642 bp->tx_cons = sw_cons;
1644 if (unlikely(netif_queue_stopped(bp->dev))) {
1645 spin_lock(&bp->tx_lock);
1646 if ((netif_queue_stopped(bp->dev)) &&
1647 (bnx2_tx_avail(bp) > MAX_SKB_FRAGS)) {
1649 netif_wake_queue(bp->dev);
1651 spin_unlock(&bp->tx_lock);
1656 bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb,
1659 struct sw_bd *cons_rx_buf = &bp->rx_buf_ring[cons];
1660 struct sw_bd *prod_rx_buf = &bp->rx_buf_ring[prod];
1661 struct rx_bd *cons_bd = &bp->rx_desc_ring[cons];
1662 struct rx_bd *prod_bd = &bp->rx_desc_ring[prod];
1664 pci_dma_sync_single_for_device(bp->pdev,
1665 pci_unmap_addr(cons_rx_buf, mapping),
1666 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1668 prod_rx_buf->skb = cons_rx_buf->skb;
1669 pci_unmap_addr_set(prod_rx_buf, mapping,
1670 pci_unmap_addr(cons_rx_buf, mapping));
1672 memcpy(prod_bd, cons_bd, 8);
1674 bp->rx_prod_bseq += bp->rx_buf_use_size;
1679 bnx2_rx_int(struct bnx2 *bp, int budget)
1681 struct status_block *sblk = bp->status_blk;
1682 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
1683 struct l2_fhdr *rx_hdr;
1686 hw_cons = bp->hw_rx_cons = sblk->status_rx_quick_consumer_index0;
1687 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT) {
1690 sw_cons = bp->rx_cons;
1691 sw_prod = bp->rx_prod;
1693 /* Memory barrier necessary as speculative reads of the rx
1694 * buffer can be ahead of the index in the status block
1697 while (sw_cons != hw_cons) {
1700 struct sw_bd *rx_buf;
1701 struct sk_buff *skb;
1703 sw_ring_cons = RX_RING_IDX(sw_cons);
1704 sw_ring_prod = RX_RING_IDX(sw_prod);
1706 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
1708 pci_dma_sync_single_for_cpu(bp->pdev,
1709 pci_unmap_addr(rx_buf, mapping),
1710 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1712 rx_hdr = (struct l2_fhdr *) skb->data;
1713 len = rx_hdr->l2_fhdr_pkt_len - 4;
1715 if ((status = rx_hdr->l2_fhdr_status) &
1716 (L2_FHDR_ERRORS_BAD_CRC |
1717 L2_FHDR_ERRORS_PHY_DECODE |
1718 L2_FHDR_ERRORS_ALIGNMENT |
1719 L2_FHDR_ERRORS_TOO_SHORT |
1720 L2_FHDR_ERRORS_GIANT_FRAME)) {
1725 /* Since we don't have a jumbo ring, copy small packets
1728 if ((bp->dev->mtu > 1500) && (len <= RX_COPY_THRESH)) {
1729 struct sk_buff *new_skb;
1731 new_skb = dev_alloc_skb(len + 2);
1732 if (new_skb == NULL)
1736 memcpy(new_skb->data,
1737 skb->data + bp->rx_offset - 2,
1740 skb_reserve(new_skb, 2);
1741 skb_put(new_skb, len);
1742 new_skb->dev = bp->dev;
1744 bnx2_reuse_rx_skb(bp, skb,
1745 sw_ring_cons, sw_ring_prod);
1749 else if (bnx2_alloc_rx_skb(bp, sw_ring_prod) == 0) {
1750 pci_unmap_single(bp->pdev,
1751 pci_unmap_addr(rx_buf, mapping),
1752 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
1754 skb_reserve(skb, bp->rx_offset);
1759 bnx2_reuse_rx_skb(bp, skb,
1760 sw_ring_cons, sw_ring_prod);
1764 skb->protocol = eth_type_trans(skb, bp->dev);
1766 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
1767 (htons(skb->protocol) != 0x8100)) {
1769 dev_kfree_skb_irq(skb);
1774 skb->ip_summed = CHECKSUM_NONE;
1776 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
1777 L2_FHDR_STATUS_UDP_DATAGRAM))) {
1779 if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
1780 L2_FHDR_ERRORS_UDP_XSUM)) == 0))
1781 skb->ip_summed = CHECKSUM_UNNECESSARY;
1785 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && (bp->vlgrp != 0)) {
1786 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1787 rx_hdr->l2_fhdr_vlan_tag);
1791 netif_receive_skb(skb);
1793 bp->dev->last_rx = jiffies;
1799 sw_cons = NEXT_RX_BD(sw_cons);
1800 sw_prod = NEXT_RX_BD(sw_prod);
1802 if ((rx_pkt == budget))
1805 /* Refresh hw_cons to see if there is new work */
1806 if (sw_cons == hw_cons) {
1807 hw_cons = bp->hw_rx_cons =
1808 sblk->status_rx_quick_consumer_index0;
1809 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT)
1814 bp->rx_cons = sw_cons;
1815 bp->rx_prod = sw_prod;
1817 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
1819 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
1827 /* MSI ISR - The only difference between this and the INTx ISR
1828 * is that the MSI interrupt is always serviced.
1831 bnx2_msi(int irq, void *dev_instance, struct pt_regs *regs)
1833 struct net_device *dev = dev_instance;
1834 struct bnx2 *bp = netdev_priv(dev);
1836 prefetch(bp->status_blk);
1837 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
1838 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
1839 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
1841 /* Return here if interrupt is disabled. */
1842 if (unlikely(atomic_read(&bp->intr_sem) != 0))
1845 netif_rx_schedule(dev);
1851 bnx2_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
1853 struct net_device *dev = dev_instance;
1854 struct bnx2 *bp = netdev_priv(dev);
1856 /* When using INTx, it is possible for the interrupt to arrive
1857 * at the CPU before the status block posted prior to the
1858 * interrupt. Reading a register will flush the status block.
1859 * When using MSI, the MSI message will always complete after
1860 * the status block write.
1862 if ((bp->status_blk->status_idx == bp->last_status_idx) &&
1863 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
1864 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
1867 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
1868 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
1869 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
1871 /* Return here if interrupt is shared and is disabled. */
1872 if (unlikely(atomic_read(&bp->intr_sem) != 0))
1875 netif_rx_schedule(dev);
1881 bnx2_has_work(struct bnx2 *bp)
1883 struct status_block *sblk = bp->status_blk;
1885 if ((sblk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) ||
1886 (sblk->status_tx_quick_consumer_index0 != bp->hw_tx_cons))
1889 if (((sblk->status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) != 0) !=
1897 bnx2_poll(struct net_device *dev, int *budget)
1899 struct bnx2 *bp = netdev_priv(dev);
1901 if ((bp->status_blk->status_attn_bits &
1902 STATUS_ATTN_BITS_LINK_STATE) !=
1903 (bp->status_blk->status_attn_bits_ack &
1904 STATUS_ATTN_BITS_LINK_STATE)) {
1906 spin_lock(&bp->phy_lock);
1908 spin_unlock(&bp->phy_lock);
1911 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->hw_tx_cons)
1914 if (bp->status_blk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) {
1915 int orig_budget = *budget;
1918 if (orig_budget > dev->quota)
1919 orig_budget = dev->quota;
1921 work_done = bnx2_rx_int(bp, orig_budget);
1922 *budget -= work_done;
1923 dev->quota -= work_done;
1926 bp->last_status_idx = bp->status_blk->status_idx;
1929 if (!bnx2_has_work(bp)) {
1930 netif_rx_complete(dev);
1931 if (likely(bp->flags & USING_MSI_FLAG)) {
1932 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
1933 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
1934 bp->last_status_idx);
1937 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
1938 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
1939 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
1940 bp->last_status_idx);
1942 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
1943 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
1944 bp->last_status_idx);
1951 /* Called with rtnl_lock from vlan functions and also dev->xmit_lock
1952 * from set_multicast.
1955 bnx2_set_rx_mode(struct net_device *dev)
1957 struct bnx2 *bp = netdev_priv(dev);
1958 u32 rx_mode, sort_mode;
1961 spin_lock_bh(&bp->phy_lock);
1963 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
1964 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
1965 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
1967 if (!bp->vlgrp && !(bp->flags & ASF_ENABLE_FLAG))
1968 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
1970 if (!(bp->flags & ASF_ENABLE_FLAG))
1971 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
1973 if (dev->flags & IFF_PROMISC) {
1974 /* Promiscuous mode. */
1975 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
1976 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN;
1978 else if (dev->flags & IFF_ALLMULTI) {
1979 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
1980 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
1983 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
1986 /* Accept one or more multicast(s). */
1987 struct dev_mc_list *mclist;
1988 u32 mc_filter[NUM_MC_HASH_REGISTERS];
1993 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
1995 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
1996 i++, mclist = mclist->next) {
1998 crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
2000 regidx = (bit & 0xe0) >> 5;
2002 mc_filter[regidx] |= (1 << bit);
2005 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2006 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2010 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
2013 if (rx_mode != bp->rx_mode) {
2014 bp->rx_mode = rx_mode;
2015 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
2018 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2019 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
2020 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
2022 spin_unlock_bh(&bp->phy_lock);
2026 load_rv2p_fw(struct bnx2 *bp, u32 *rv2p_code, u32 rv2p_code_len,
2033 for (i = 0; i < rv2p_code_len; i += 8) {
2034 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, *rv2p_code);
2036 REG_WR(bp, BNX2_RV2P_INSTR_LOW, *rv2p_code);
2039 if (rv2p_proc == RV2P_PROC1) {
2040 val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
2041 REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
2044 val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
2045 REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
2049 /* Reset the processor, un-stall is done later. */
2050 if (rv2p_proc == RV2P_PROC1) {
2051 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
2054 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
2059 load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
2065 val = REG_RD_IND(bp, cpu_reg->mode);
2066 val |= cpu_reg->mode_value_halt;
2067 REG_WR_IND(bp, cpu_reg->mode, val);
2068 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2070 /* Load the Text area. */
2071 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
2075 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
2076 REG_WR_IND(bp, offset, fw->text[j]);
2080 /* Load the Data area. */
2081 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2085 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
2086 REG_WR_IND(bp, offset, fw->data[j]);
2090 /* Load the SBSS area. */
2091 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2095 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
2096 REG_WR_IND(bp, offset, fw->sbss[j]);
2100 /* Load the BSS area. */
2101 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2105 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
2106 REG_WR_IND(bp, offset, fw->bss[j]);
2110 /* Load the Read-Only area. */
2111 offset = cpu_reg->spad_base +
2112 (fw->rodata_addr - cpu_reg->mips_view_base);
2116 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
2117 REG_WR_IND(bp, offset, fw->rodata[j]);
2121 /* Clear the pre-fetch instruction. */
2122 REG_WR_IND(bp, cpu_reg->inst, 0);
2123 REG_WR_IND(bp, cpu_reg->pc, fw->start_addr);
2125 /* Start the CPU. */
2126 val = REG_RD_IND(bp, cpu_reg->mode);
2127 val &= ~cpu_reg->mode_value_halt;
2128 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2129 REG_WR_IND(bp, cpu_reg->mode, val);
2133 bnx2_init_cpus(struct bnx2 *bp)
2135 struct cpu_reg cpu_reg;
2138 /* Initialize the RV2P processor. */
2139 load_rv2p_fw(bp, bnx2_rv2p_proc1, sizeof(bnx2_rv2p_proc1), RV2P_PROC1);
2140 load_rv2p_fw(bp, bnx2_rv2p_proc2, sizeof(bnx2_rv2p_proc2), RV2P_PROC2);
2142 /* Initialize the RX Processor. */
2143 cpu_reg.mode = BNX2_RXP_CPU_MODE;
2144 cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
2145 cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
2146 cpu_reg.state = BNX2_RXP_CPU_STATE;
2147 cpu_reg.state_value_clear = 0xffffff;
2148 cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
2149 cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
2150 cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
2151 cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
2152 cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
2153 cpu_reg.spad_base = BNX2_RXP_SCRATCH;
2154 cpu_reg.mips_view_base = 0x8000000;
2156 fw.ver_major = bnx2_RXP_b06FwReleaseMajor;
2157 fw.ver_minor = bnx2_RXP_b06FwReleaseMinor;
2158 fw.ver_fix = bnx2_RXP_b06FwReleaseFix;
2159 fw.start_addr = bnx2_RXP_b06FwStartAddr;
2161 fw.text_addr = bnx2_RXP_b06FwTextAddr;
2162 fw.text_len = bnx2_RXP_b06FwTextLen;
2164 fw.text = bnx2_RXP_b06FwText;
2166 fw.data_addr = bnx2_RXP_b06FwDataAddr;
2167 fw.data_len = bnx2_RXP_b06FwDataLen;
2169 fw.data = bnx2_RXP_b06FwData;
2171 fw.sbss_addr = bnx2_RXP_b06FwSbssAddr;
2172 fw.sbss_len = bnx2_RXP_b06FwSbssLen;
2174 fw.sbss = bnx2_RXP_b06FwSbss;
2176 fw.bss_addr = bnx2_RXP_b06FwBssAddr;
2177 fw.bss_len = bnx2_RXP_b06FwBssLen;
2179 fw.bss = bnx2_RXP_b06FwBss;
2181 fw.rodata_addr = bnx2_RXP_b06FwRodataAddr;
2182 fw.rodata_len = bnx2_RXP_b06FwRodataLen;
2183 fw.rodata_index = 0;
2184 fw.rodata = bnx2_RXP_b06FwRodata;
2186 load_cpu_fw(bp, &cpu_reg, &fw);
2188 /* Initialize the TX Processor. */
2189 cpu_reg.mode = BNX2_TXP_CPU_MODE;
2190 cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
2191 cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
2192 cpu_reg.state = BNX2_TXP_CPU_STATE;
2193 cpu_reg.state_value_clear = 0xffffff;
2194 cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
2195 cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
2196 cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
2197 cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
2198 cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
2199 cpu_reg.spad_base = BNX2_TXP_SCRATCH;
2200 cpu_reg.mips_view_base = 0x8000000;
2202 fw.ver_major = bnx2_TXP_b06FwReleaseMajor;
2203 fw.ver_minor = bnx2_TXP_b06FwReleaseMinor;
2204 fw.ver_fix = bnx2_TXP_b06FwReleaseFix;
2205 fw.start_addr = bnx2_TXP_b06FwStartAddr;
2207 fw.text_addr = bnx2_TXP_b06FwTextAddr;
2208 fw.text_len = bnx2_TXP_b06FwTextLen;
2210 fw.text = bnx2_TXP_b06FwText;
2212 fw.data_addr = bnx2_TXP_b06FwDataAddr;
2213 fw.data_len = bnx2_TXP_b06FwDataLen;
2215 fw.data = bnx2_TXP_b06FwData;
2217 fw.sbss_addr = bnx2_TXP_b06FwSbssAddr;
2218 fw.sbss_len = bnx2_TXP_b06FwSbssLen;
2220 fw.sbss = bnx2_TXP_b06FwSbss;
2222 fw.bss_addr = bnx2_TXP_b06FwBssAddr;
2223 fw.bss_len = bnx2_TXP_b06FwBssLen;
2225 fw.bss = bnx2_TXP_b06FwBss;
2227 fw.rodata_addr = bnx2_TXP_b06FwRodataAddr;
2228 fw.rodata_len = bnx2_TXP_b06FwRodataLen;
2229 fw.rodata_index = 0;
2230 fw.rodata = bnx2_TXP_b06FwRodata;
2232 load_cpu_fw(bp, &cpu_reg, &fw);
2234 /* Initialize the TX Patch-up Processor. */
2235 cpu_reg.mode = BNX2_TPAT_CPU_MODE;
2236 cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
2237 cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
2238 cpu_reg.state = BNX2_TPAT_CPU_STATE;
2239 cpu_reg.state_value_clear = 0xffffff;
2240 cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
2241 cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
2242 cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
2243 cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
2244 cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
2245 cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
2246 cpu_reg.mips_view_base = 0x8000000;
2248 fw.ver_major = bnx2_TPAT_b06FwReleaseMajor;
2249 fw.ver_minor = bnx2_TPAT_b06FwReleaseMinor;
2250 fw.ver_fix = bnx2_TPAT_b06FwReleaseFix;
2251 fw.start_addr = bnx2_TPAT_b06FwStartAddr;
2253 fw.text_addr = bnx2_TPAT_b06FwTextAddr;
2254 fw.text_len = bnx2_TPAT_b06FwTextLen;
2256 fw.text = bnx2_TPAT_b06FwText;
2258 fw.data_addr = bnx2_TPAT_b06FwDataAddr;
2259 fw.data_len = bnx2_TPAT_b06FwDataLen;
2261 fw.data = bnx2_TPAT_b06FwData;
2263 fw.sbss_addr = bnx2_TPAT_b06FwSbssAddr;
2264 fw.sbss_len = bnx2_TPAT_b06FwSbssLen;
2266 fw.sbss = bnx2_TPAT_b06FwSbss;
2268 fw.bss_addr = bnx2_TPAT_b06FwBssAddr;
2269 fw.bss_len = bnx2_TPAT_b06FwBssLen;
2271 fw.bss = bnx2_TPAT_b06FwBss;
2273 fw.rodata_addr = bnx2_TPAT_b06FwRodataAddr;
2274 fw.rodata_len = bnx2_TPAT_b06FwRodataLen;
2275 fw.rodata_index = 0;
2276 fw.rodata = bnx2_TPAT_b06FwRodata;
2278 load_cpu_fw(bp, &cpu_reg, &fw);
2280 /* Initialize the Completion Processor. */
2281 cpu_reg.mode = BNX2_COM_CPU_MODE;
2282 cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
2283 cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
2284 cpu_reg.state = BNX2_COM_CPU_STATE;
2285 cpu_reg.state_value_clear = 0xffffff;
2286 cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
2287 cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
2288 cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
2289 cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
2290 cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
2291 cpu_reg.spad_base = BNX2_COM_SCRATCH;
2292 cpu_reg.mips_view_base = 0x8000000;
2294 fw.ver_major = bnx2_COM_b06FwReleaseMajor;
2295 fw.ver_minor = bnx2_COM_b06FwReleaseMinor;
2296 fw.ver_fix = bnx2_COM_b06FwReleaseFix;
2297 fw.start_addr = bnx2_COM_b06FwStartAddr;
2299 fw.text_addr = bnx2_COM_b06FwTextAddr;
2300 fw.text_len = bnx2_COM_b06FwTextLen;
2302 fw.text = bnx2_COM_b06FwText;
2304 fw.data_addr = bnx2_COM_b06FwDataAddr;
2305 fw.data_len = bnx2_COM_b06FwDataLen;
2307 fw.data = bnx2_COM_b06FwData;
2309 fw.sbss_addr = bnx2_COM_b06FwSbssAddr;
2310 fw.sbss_len = bnx2_COM_b06FwSbssLen;
2312 fw.sbss = bnx2_COM_b06FwSbss;
2314 fw.bss_addr = bnx2_COM_b06FwBssAddr;
2315 fw.bss_len = bnx2_COM_b06FwBssLen;
2317 fw.bss = bnx2_COM_b06FwBss;
2319 fw.rodata_addr = bnx2_COM_b06FwRodataAddr;
2320 fw.rodata_len = bnx2_COM_b06FwRodataLen;
2321 fw.rodata_index = 0;
2322 fw.rodata = bnx2_COM_b06FwRodata;
2324 load_cpu_fw(bp, &cpu_reg, &fw);
2329 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
2333 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
2339 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2340 (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
2341 PCI_PM_CTRL_PME_STATUS);
2343 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
2344 /* delay required during transition out of D3hot */
2347 val = REG_RD(bp, BNX2_EMAC_MODE);
2348 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
2349 val &= ~BNX2_EMAC_MODE_MPKT;
2350 REG_WR(bp, BNX2_EMAC_MODE, val);
2352 val = REG_RD(bp, BNX2_RPM_CONFIG);
2353 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2354 REG_WR(bp, BNX2_RPM_CONFIG, val);
2365 autoneg = bp->autoneg;
2366 advertising = bp->advertising;
2368 bp->autoneg = AUTONEG_SPEED;
2369 bp->advertising = ADVERTISED_10baseT_Half |
2370 ADVERTISED_10baseT_Full |
2371 ADVERTISED_100baseT_Half |
2372 ADVERTISED_100baseT_Full |
2375 bnx2_setup_copper_phy(bp);
2377 bp->autoneg = autoneg;
2378 bp->advertising = advertising;
2380 bnx2_set_mac_addr(bp);
2382 val = REG_RD(bp, BNX2_EMAC_MODE);
2384 /* Enable port mode. */
2385 val &= ~BNX2_EMAC_MODE_PORT;
2386 val |= BNX2_EMAC_MODE_PORT_MII |
2387 BNX2_EMAC_MODE_MPKT_RCVD |
2388 BNX2_EMAC_MODE_ACPI_RCVD |
2389 BNX2_EMAC_MODE_MPKT;
2391 REG_WR(bp, BNX2_EMAC_MODE, val);
2393 /* receive all multicast */
2394 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2395 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2398 REG_WR(bp, BNX2_EMAC_RX_MODE,
2399 BNX2_EMAC_RX_MODE_SORT_MODE);
2401 val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
2402 BNX2_RPM_SORT_USER0_MC_EN;
2403 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2404 REG_WR(bp, BNX2_RPM_SORT_USER0, val);
2405 REG_WR(bp, BNX2_RPM_SORT_USER0, val |
2406 BNX2_RPM_SORT_USER0_ENA);
2408 /* Need to enable EMAC and RPM for WOL. */
2409 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2410 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
2411 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
2412 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
2414 val = REG_RD(bp, BNX2_RPM_CONFIG);
2415 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2416 REG_WR(bp, BNX2_RPM_CONFIG, val);
2418 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
2421 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
2424 if (!(bp->flags & NO_WOL_FLAG))
2425 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
2427 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
2428 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
2429 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
2438 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2440 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2443 /* No more memory access after this point until
2444 * device is brought back to D0.
2456 bnx2_acquire_nvram_lock(struct bnx2 *bp)
2461 /* Request access to the flash interface. */
2462 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
2463 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2464 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2465 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
2471 if (j >= NVRAM_TIMEOUT_COUNT)
2478 bnx2_release_nvram_lock(struct bnx2 *bp)
2483 /* Relinquish nvram interface. */
2484 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
2486 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2487 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2488 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
2494 if (j >= NVRAM_TIMEOUT_COUNT)
2502 bnx2_enable_nvram_write(struct bnx2 *bp)
2506 val = REG_RD(bp, BNX2_MISC_CFG);
2507 REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
2509 if (!bp->flash_info->buffered) {
2512 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2513 REG_WR(bp, BNX2_NVM_COMMAND,
2514 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
2516 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2519 val = REG_RD(bp, BNX2_NVM_COMMAND);
2520 if (val & BNX2_NVM_COMMAND_DONE)
2524 if (j >= NVRAM_TIMEOUT_COUNT)
2531 bnx2_disable_nvram_write(struct bnx2 *bp)
2535 val = REG_RD(bp, BNX2_MISC_CFG);
2536 REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
2541 bnx2_enable_nvram_access(struct bnx2 *bp)
2545 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
2546 /* Enable both bits, even on read. */
2547 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
2548 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
2552 bnx2_disable_nvram_access(struct bnx2 *bp)
2556 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
2557 /* Disable both bits, even after read. */
2558 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
2559 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
2560 BNX2_NVM_ACCESS_ENABLE_WR_EN));
2564 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
2569 if (bp->flash_info->buffered)
2570 /* Buffered flash, no erase needed */
2573 /* Build an erase command */
2574 cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
2575 BNX2_NVM_COMMAND_DOIT;
2577 /* Need to clear DONE bit separately. */
2578 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2580 /* Address of the NVRAM to read from. */
2581 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2583 /* Issue an erase command. */
2584 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2586 /* Wait for completion. */
2587 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2592 val = REG_RD(bp, BNX2_NVM_COMMAND);
2593 if (val & BNX2_NVM_COMMAND_DONE)
2597 if (j >= NVRAM_TIMEOUT_COUNT)
2604 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
2609 /* Build the command word. */
2610 cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
2612 /* Calculate an offset of a buffered flash. */
2613 if (bp->flash_info->buffered) {
2614 offset = ((offset / bp->flash_info->page_size) <<
2615 bp->flash_info->page_bits) +
2616 (offset % bp->flash_info->page_size);
2619 /* Need to clear DONE bit separately. */
2620 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2622 /* Address of the NVRAM to read from. */
2623 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2625 /* Issue a read command. */
2626 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2628 /* Wait for completion. */
2629 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2634 val = REG_RD(bp, BNX2_NVM_COMMAND);
2635 if (val & BNX2_NVM_COMMAND_DONE) {
2636 val = REG_RD(bp, BNX2_NVM_READ);
2638 val = be32_to_cpu(val);
2639 memcpy(ret_val, &val, 4);
2643 if (j >= NVRAM_TIMEOUT_COUNT)
2651 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
2656 /* Build the command word. */
2657 cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
2659 /* Calculate an offset of a buffered flash. */
2660 if (bp->flash_info->buffered) {
2661 offset = ((offset / bp->flash_info->page_size) <<
2662 bp->flash_info->page_bits) +
2663 (offset % bp->flash_info->page_size);
2666 /* Need to clear DONE bit separately. */
2667 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2669 memcpy(&val32, val, 4);
2670 val32 = cpu_to_be32(val32);
2672 /* Write the data. */
2673 REG_WR(bp, BNX2_NVM_WRITE, val32);
2675 /* Address of the NVRAM to write to. */
2676 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2678 /* Issue the write command. */
2679 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2681 /* Wait for completion. */
2682 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2685 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
2688 if (j >= NVRAM_TIMEOUT_COUNT)
2695 bnx2_init_nvram(struct bnx2 *bp)
2698 int j, entry_count, rc;
2699 struct flash_spec *flash;
2701 /* Determine the selected interface. */
2702 val = REG_RD(bp, BNX2_NVM_CFG1);
2704 entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
2707 if (val & 0x40000000) {
2709 /* Flash interface has been reconfigured */
2710 for (j = 0, flash = &flash_table[0]; j < entry_count;
2712 if ((val & FLASH_BACKUP_STRAP_MASK) ==
2713 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
2714 bp->flash_info = flash;
2721 /* Not yet been reconfigured */
2723 if (val & (1 << 23))
2724 mask = FLASH_BACKUP_STRAP_MASK;
2726 mask = FLASH_STRAP_MASK;
2728 for (j = 0, flash = &flash_table[0]; j < entry_count;
2731 if ((val & mask) == (flash->strapping & mask)) {
2732 bp->flash_info = flash;
2734 /* Request access to the flash interface. */
2735 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
2738 /* Enable access to flash interface */
2739 bnx2_enable_nvram_access(bp);
2741 /* Reconfigure the flash interface */
2742 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
2743 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
2744 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
2745 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
2747 /* Disable access to flash interface */
2748 bnx2_disable_nvram_access(bp);
2749 bnx2_release_nvram_lock(bp);
2754 } /* if (val & 0x40000000) */
2756 if (j == entry_count) {
2757 bp->flash_info = NULL;
2758 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
2762 val = REG_RD_IND(bp, bp->shmem_base + BNX2_SHARED_HW_CFG_CONFIG2);
2763 val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
2765 bp->flash_size = val;
2767 bp->flash_size = bp->flash_info->total_size;
2773 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
2777 u32 cmd_flags, offset32, len32, extra;
2782 /* Request access to the flash interface. */
2783 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
2786 /* Enable access to flash interface */
2787 bnx2_enable_nvram_access(bp);
2800 pre_len = 4 - (offset & 3);
2802 if (pre_len >= len32) {
2804 cmd_flags = BNX2_NVM_COMMAND_FIRST |
2805 BNX2_NVM_COMMAND_LAST;
2808 cmd_flags = BNX2_NVM_COMMAND_FIRST;
2811 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
2816 memcpy(ret_buf, buf + (offset & 3), pre_len);
2823 extra = 4 - (len32 & 3);
2824 len32 = (len32 + 4) & ~3;
2831 cmd_flags = BNX2_NVM_COMMAND_LAST;
2833 cmd_flags = BNX2_NVM_COMMAND_FIRST |
2834 BNX2_NVM_COMMAND_LAST;
2836 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
2838 memcpy(ret_buf, buf, 4 - extra);
2840 else if (len32 > 0) {
2843 /* Read the first word. */
2847 cmd_flags = BNX2_NVM_COMMAND_FIRST;
2849 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
2851 /* Advance to the next dword. */
2856 while (len32 > 4 && rc == 0) {
2857 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
2859 /* Advance to the next dword. */
2868 cmd_flags = BNX2_NVM_COMMAND_LAST;
2869 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
2871 memcpy(ret_buf, buf, 4 - extra);
2874 /* Disable access to flash interface */
2875 bnx2_disable_nvram_access(bp);
2877 bnx2_release_nvram_lock(bp);
2883 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
2886 u32 written, offset32, len32;
2887 u8 *buf, start[4], end[4];
2889 int align_start, align_end;
2894 align_start = align_end = 0;
2896 if ((align_start = (offset32 & 3))) {
2898 len32 += align_start;
2899 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
2904 if ((len32 > 4) || !align_start) {
2905 align_end = 4 - (len32 & 3);
2907 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4,
2914 if (align_start || align_end) {
2915 buf = kmalloc(len32, GFP_KERNEL);
2919 memcpy(buf, start, 4);
2922 memcpy(buf + len32 - 4, end, 4);
2924 memcpy(buf + align_start, data_buf, buf_size);
2928 while ((written < len32) && (rc == 0)) {
2929 u32 page_start, page_end, data_start, data_end;
2930 u32 addr, cmd_flags;
2932 u8 flash_buffer[264];
2934 /* Find the page_start addr */
2935 page_start = offset32 + written;
2936 page_start -= (page_start % bp->flash_info->page_size);
2937 /* Find the page_end addr */
2938 page_end = page_start + bp->flash_info->page_size;
2939 /* Find the data_start addr */
2940 data_start = (written == 0) ? offset32 : page_start;
2941 /* Find the data_end addr */
2942 data_end = (page_end > offset32 + len32) ?
2943 (offset32 + len32) : page_end;
2945 /* Request access to the flash interface. */
2946 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
2947 goto nvram_write_end;
2949 /* Enable access to flash interface */
2950 bnx2_enable_nvram_access(bp);
2952 cmd_flags = BNX2_NVM_COMMAND_FIRST;
2953 if (bp->flash_info->buffered == 0) {
2956 /* Read the whole page into the buffer
2957 * (non-buffer flash only) */
2958 for (j = 0; j < bp->flash_info->page_size; j += 4) {
2959 if (j == (bp->flash_info->page_size - 4)) {
2960 cmd_flags |= BNX2_NVM_COMMAND_LAST;
2962 rc = bnx2_nvram_read_dword(bp,
2968 goto nvram_write_end;
2974 /* Enable writes to flash interface (unlock write-protect) */
2975 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
2976 goto nvram_write_end;
2978 /* Erase the page */
2979 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
2980 goto nvram_write_end;
2982 /* Re-enable the write again for the actual write */
2983 bnx2_enable_nvram_write(bp);
2985 /* Loop to write back the buffer data from page_start to
2988 if (bp->flash_info->buffered == 0) {
2989 for (addr = page_start; addr < data_start;
2990 addr += 4, i += 4) {
2992 rc = bnx2_nvram_write_dword(bp, addr,
2993 &flash_buffer[i], cmd_flags);
2996 goto nvram_write_end;
3002 /* Loop to write the new data from data_start to data_end */
3003 for (addr = data_start; addr < data_end; addr += 4, i++) {
3004 if ((addr == page_end - 4) ||
3005 ((bp->flash_info->buffered) &&
3006 (addr == data_end - 4))) {
3008 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3010 rc = bnx2_nvram_write_dword(bp, addr, buf,
3014 goto nvram_write_end;
3020 /* Loop to write back the buffer data from data_end
3022 if (bp->flash_info->buffered == 0) {
3023 for (addr = data_end; addr < page_end;
3024 addr += 4, i += 4) {
3026 if (addr == page_end-4) {
3027 cmd_flags = BNX2_NVM_COMMAND_LAST;
3029 rc = bnx2_nvram_write_dword(bp, addr,
3030 &flash_buffer[i], cmd_flags);
3033 goto nvram_write_end;
3039 /* Disable writes to flash interface (lock write-protect) */
3040 bnx2_disable_nvram_write(bp);
3042 /* Disable access to flash interface */
3043 bnx2_disable_nvram_access(bp);
3044 bnx2_release_nvram_lock(bp);
3046 /* Increment written */
3047 written += data_end - data_start;
3051 if (align_start || align_end)
3057 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
3062 /* Wait for the current PCI transaction to complete before
3063 * issuing a reset. */
3064 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
3065 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
3066 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
3067 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
3068 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
3069 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
3072 /* Wait for the firmware to tell us it is ok to issue a reset. */
3073 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
3075 /* Deposit a driver reset signature so the firmware knows that
3076 * this is a soft reset. */
3077 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_RESET_SIGNATURE,
3078 BNX2_DRV_RESET_SIGNATURE_MAGIC);
3080 /* Do a dummy read to force the chip to complete all current transaction
3081 * before we issue a reset. */
3082 val = REG_RD(bp, BNX2_MISC_ID);
3084 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3085 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3086 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3089 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
3091 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3092 (CHIP_ID(bp) == CHIP_ID_5706_A1))
3095 /* Reset takes approximate 30 usec */
3096 for (i = 0; i < 10; i++) {
3097 val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
3098 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3099 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0) {
3105 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3106 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3107 printk(KERN_ERR PFX "Chip reset did not complete\n");
3111 /* Make sure byte swapping is properly configured. */
3112 val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
3113 if (val != 0x01020304) {
3114 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
3118 /* Wait for the firmware to finish its initialization. */
3119 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
3123 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3124 /* Adjust the voltage regular to two steps lower. The default
3125 * of this register is 0x0000000e. */
3126 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
3128 /* Remove bad rbuf memory from the free pool. */
3129 rc = bnx2_alloc_bad_rbuf(bp);
3136 bnx2_init_chip(struct bnx2 *bp)
3141 /* Make sure the interrupt is not active. */
3142 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3144 val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
3145 BNX2_DMA_CONFIG_DATA_WORD_SWAP |
3147 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
3149 BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
3150 DMA_READ_CHANS << 12 |
3151 DMA_WRITE_CHANS << 16;
3153 val |= (0x2 << 20) | (1 << 11);
3155 if ((bp->flags & PCIX_FLAG) && (bp->bus_speed_mhz == 133))
3158 if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
3159 (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & PCIX_FLAG))
3160 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
3162 REG_WR(bp, BNX2_DMA_CONFIG, val);
3164 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3165 val = REG_RD(bp, BNX2_TDMA_CONFIG);
3166 val |= BNX2_TDMA_CONFIG_ONE_DMA;
3167 REG_WR(bp, BNX2_TDMA_CONFIG, val);
3170 if (bp->flags & PCIX_FLAG) {
3173 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3175 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3176 val16 & ~PCI_X_CMD_ERO);
3179 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3180 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
3181 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
3182 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
3184 /* Initialize context mapping and zero out the quick contexts. The
3185 * context block must have already been enabled. */
3186 bnx2_init_context(bp);
3189 bnx2_init_nvram(bp);
3191 bnx2_set_mac_addr(bp);
3193 val = REG_RD(bp, BNX2_MQ_CONFIG);
3194 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
3195 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
3196 REG_WR(bp, BNX2_MQ_CONFIG, val);
3198 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
3199 REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
3200 REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
3202 val = (BCM_PAGE_BITS - 8) << 24;
3203 REG_WR(bp, BNX2_RV2P_CONFIG, val);
3205 /* Configure page size. */
3206 val = REG_RD(bp, BNX2_TBDR_CONFIG);
3207 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
3208 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
3209 REG_WR(bp, BNX2_TBDR_CONFIG, val);
3211 val = bp->mac_addr[0] +
3212 (bp->mac_addr[1] << 8) +
3213 (bp->mac_addr[2] << 16) +
3215 (bp->mac_addr[4] << 8) +
3216 (bp->mac_addr[5] << 16);
3217 REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
3219 /* Program the MTU. Also include 4 bytes for CRC32. */
3220 val = bp->dev->mtu + ETH_HLEN + 4;
3221 if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
3222 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
3223 REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
3225 bp->last_status_idx = 0;
3226 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
3228 /* Set up how to generate a link change interrupt. */
3229 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
3231 REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
3232 (u64) bp->status_blk_mapping & 0xffffffff);
3233 REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
3235 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
3236 (u64) bp->stats_blk_mapping & 0xffffffff);
3237 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
3238 (u64) bp->stats_blk_mapping >> 32);
3240 REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
3241 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
3243 REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
3244 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
3246 REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
3247 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
3249 REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
3251 REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
3253 REG_WR(bp, BNX2_HC_COM_TICKS,
3254 (bp->com_ticks_int << 16) | bp->com_ticks);
3256 REG_WR(bp, BNX2_HC_CMD_TICKS,
3257 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
3259 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks & 0xffff00);
3260 REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
3262 if (CHIP_ID(bp) == CHIP_ID_5706_A1)
3263 REG_WR(bp, BNX2_HC_CONFIG, BNX2_HC_CONFIG_COLLECT_STATS);
3265 REG_WR(bp, BNX2_HC_CONFIG, BNX2_HC_CONFIG_RX_TMR_MODE |
3266 BNX2_HC_CONFIG_TX_TMR_MODE |
3267 BNX2_HC_CONFIG_COLLECT_STATS);
3270 /* Clear internal stats counters. */
3271 REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
3273 REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_BITS_LINK_STATE);
3275 if (REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_FEATURE) &
3276 BNX2_PORT_FEATURE_ASF_ENABLED)
3277 bp->flags |= ASF_ENABLE_FLAG;
3279 /* Initialize the receive filter. */
3280 bnx2_set_rx_mode(bp->dev);
3282 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
3285 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, 0x5ffffff);
3286 REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
3295 bnx2_init_tx_ring(struct bnx2 *bp)
3300 txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
3302 txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
3303 txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff;
3308 bp->tx_prod_bseq = 0;
3310 val = BNX2_L2CTX_TYPE_TYPE_L2;
3311 val |= BNX2_L2CTX_TYPE_SIZE_L2;
3312 CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_TYPE, val);
3314 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2;
3316 CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_CMD_TYPE, val);
3318 val = (u64) bp->tx_desc_mapping >> 32;
3319 CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_TBDR_BHADDR_HI, val);
3321 val = (u64) bp->tx_desc_mapping & 0xffffffff;
3322 CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_TBDR_BHADDR_LO, val);
3326 bnx2_init_rx_ring(struct bnx2 *bp)
3330 u16 prod, ring_prod;
3333 /* 8 for CRC and VLAN */
3334 bp->rx_buf_use_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
3335 /* 8 for alignment */
3336 bp->rx_buf_size = bp->rx_buf_use_size + 8;
3338 ring_prod = prod = bp->rx_prod = 0;
3341 bp->rx_prod_bseq = 0;
3343 rxbd = &bp->rx_desc_ring[0];
3344 for (i = 0; i < MAX_RX_DESC_CNT; i++, rxbd++) {
3345 rxbd->rx_bd_len = bp->rx_buf_use_size;
3346 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
3349 rxbd->rx_bd_haddr_hi = (u64) bp->rx_desc_mapping >> 32;
3350 rxbd->rx_bd_haddr_lo = (u64) bp->rx_desc_mapping & 0xffffffff;
3352 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
3353 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
3355 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_CTX_TYPE, val);
3357 val = (u64) bp->rx_desc_mapping >> 32;
3358 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_HI, val);
3360 val = (u64) bp->rx_desc_mapping & 0xffffffff;
3361 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_LO, val);
3363 for ( ;ring_prod < bp->rx_ring_size; ) {
3364 if (bnx2_alloc_rx_skb(bp, ring_prod) < 0) {
3367 prod = NEXT_RX_BD(prod);
3368 ring_prod = RX_RING_IDX(prod);
3372 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
3374 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
3378 bnx2_free_tx_skbs(struct bnx2 *bp)
3382 if (bp->tx_buf_ring == NULL)
3385 for (i = 0; i < TX_DESC_CNT; ) {
3386 struct sw_bd *tx_buf = &bp->tx_buf_ring[i];
3387 struct sk_buff *skb = tx_buf->skb;
3395 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
3396 skb_headlen(skb), PCI_DMA_TODEVICE);
3400 last = skb_shinfo(skb)->nr_frags;
3401 for (j = 0; j < last; j++) {
3402 tx_buf = &bp->tx_buf_ring[i + j + 1];
3403 pci_unmap_page(bp->pdev,
3404 pci_unmap_addr(tx_buf, mapping),
3405 skb_shinfo(skb)->frags[j].size,
3408 dev_kfree_skb_any(skb);
3415 bnx2_free_rx_skbs(struct bnx2 *bp)
3419 if (bp->rx_buf_ring == NULL)
3422 for (i = 0; i < RX_DESC_CNT; i++) {
3423 struct sw_bd *rx_buf = &bp->rx_buf_ring[i];
3424 struct sk_buff *skb = rx_buf->skb;
3429 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
3430 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
3434 dev_kfree_skb_any(skb);
3439 bnx2_free_skbs(struct bnx2 *bp)
3441 bnx2_free_tx_skbs(bp);
3442 bnx2_free_rx_skbs(bp);
3446 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
3450 rc = bnx2_reset_chip(bp, reset_code);
3456 bnx2_init_tx_ring(bp);
3457 bnx2_init_rx_ring(bp);
3462 bnx2_init_nic(struct bnx2 *bp)
3466 if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
3475 bnx2_test_registers(struct bnx2 *bp)
3479 static const struct {
3485 { 0x006c, 0, 0x00000000, 0x0000003f },
3486 { 0x0090, 0, 0xffffffff, 0x00000000 },
3487 { 0x0094, 0, 0x00000000, 0x00000000 },
3489 { 0x0404, 0, 0x00003f00, 0x00000000 },
3490 { 0x0418, 0, 0x00000000, 0xffffffff },
3491 { 0x041c, 0, 0x00000000, 0xffffffff },
3492 { 0x0420, 0, 0x00000000, 0x80ffffff },
3493 { 0x0424, 0, 0x00000000, 0x00000000 },
3494 { 0x0428, 0, 0x00000000, 0x00000001 },
3495 { 0x0450, 0, 0x00000000, 0x0000ffff },
3496 { 0x0454, 0, 0x00000000, 0xffffffff },
3497 { 0x0458, 0, 0x00000000, 0xffffffff },
3499 { 0x0808, 0, 0x00000000, 0xffffffff },
3500 { 0x0854, 0, 0x00000000, 0xffffffff },
3501 { 0x0868, 0, 0x00000000, 0x77777777 },
3502 { 0x086c, 0, 0x00000000, 0x77777777 },
3503 { 0x0870, 0, 0x00000000, 0x77777777 },
3504 { 0x0874, 0, 0x00000000, 0x77777777 },
3506 { 0x0c00, 0, 0x00000000, 0x00000001 },
3507 { 0x0c04, 0, 0x00000000, 0x03ff0001 },
3508 { 0x0c08, 0, 0x0f0ff073, 0x00000000 },
3510 { 0x1000, 0, 0x00000000, 0x00000001 },
3511 { 0x1004, 0, 0x00000000, 0x000f0001 },
3513 { 0x1408, 0, 0x01c00800, 0x00000000 },
3514 { 0x149c, 0, 0x8000ffff, 0x00000000 },
3515 { 0x14a8, 0, 0x00000000, 0x000001ff },
3516 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
3517 { 0x14b0, 0, 0x00000002, 0x00000001 },
3518 { 0x14b8, 0, 0x00000000, 0x00000000 },
3519 { 0x14c0, 0, 0x00000000, 0x00000009 },
3520 { 0x14c4, 0, 0x00003fff, 0x00000000 },
3521 { 0x14cc, 0, 0x00000000, 0x00000001 },
3522 { 0x14d0, 0, 0xffffffff, 0x00000000 },
3524 { 0x1800, 0, 0x00000000, 0x00000001 },
3525 { 0x1804, 0, 0x00000000, 0x00000003 },
3527 { 0x2800, 0, 0x00000000, 0x00000001 },
3528 { 0x2804, 0, 0x00000000, 0x00003f01 },
3529 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
3530 { 0x2810, 0, 0xffff0000, 0x00000000 },
3531 { 0x2814, 0, 0xffff0000, 0x00000000 },
3532 { 0x2818, 0, 0xffff0000, 0x00000000 },
3533 { 0x281c, 0, 0xffff0000, 0x00000000 },
3534 { 0x2834, 0, 0xffffffff, 0x00000000 },
3535 { 0x2840, 0, 0x00000000, 0xffffffff },
3536 { 0x2844, 0, 0x00000000, 0xffffffff },
3537 { 0x2848, 0, 0xffffffff, 0x00000000 },
3538 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
3540 { 0x2c00, 0, 0x00000000, 0x00000011 },
3541 { 0x2c04, 0, 0x00000000, 0x00030007 },
3543 { 0x3c00, 0, 0x00000000, 0x00000001 },
3544 { 0x3c04, 0, 0x00000000, 0x00070000 },
3545 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
3546 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
3547 { 0x3c10, 0, 0xffffffff, 0x00000000 },
3548 { 0x3c14, 0, 0x00000000, 0xffffffff },
3549 { 0x3c18, 0, 0x00000000, 0xffffffff },
3550 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
3551 { 0x3c20, 0, 0xffffff00, 0x00000000 },
3553 { 0x5004, 0, 0x00000000, 0x0000007f },
3554 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
3555 { 0x500c, 0, 0xf800f800, 0x07ff07ff },
3557 { 0x5c00, 0, 0x00000000, 0x00000001 },
3558 { 0x5c04, 0, 0x00000000, 0x0003000f },
3559 { 0x5c08, 0, 0x00000003, 0x00000000 },
3560 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
3561 { 0x5c10, 0, 0x00000000, 0xffffffff },
3562 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
3563 { 0x5c84, 0, 0x00000000, 0x0000f333 },
3564 { 0x5c88, 0, 0x00000000, 0x00077373 },
3565 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
3567 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
3568 { 0x680c, 0, 0xffffffff, 0x00000000 },
3569 { 0x6810, 0, 0xffffffff, 0x00000000 },
3570 { 0x6814, 0, 0xffffffff, 0x00000000 },
3571 { 0x6818, 0, 0xffffffff, 0x00000000 },
3572 { 0x681c, 0, 0xffffffff, 0x00000000 },
3573 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
3574 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
3575 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
3576 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
3577 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
3578 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
3579 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
3580 { 0x683c, 0, 0x0000ffff, 0x00000000 },
3581 { 0x6840, 0, 0x00000ff0, 0x00000000 },
3582 { 0x6844, 0, 0x00ffff00, 0x00000000 },
3583 { 0x684c, 0, 0xffffffff, 0x00000000 },
3584 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
3585 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
3586 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
3587 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
3588 { 0x6908, 0, 0x00000000, 0x0001ff0f },
3589 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
3591 { 0xffff, 0, 0x00000000, 0x00000000 },
3595 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
3596 u32 offset, rw_mask, ro_mask, save_val, val;
3598 offset = (u32) reg_tbl[i].offset;
3599 rw_mask = reg_tbl[i].rw_mask;
3600 ro_mask = reg_tbl[i].ro_mask;
3602 save_val = readl(bp->regview + offset);
3604 writel(0, bp->regview + offset);
3606 val = readl(bp->regview + offset);
3607 if ((val & rw_mask) != 0) {
3611 if ((val & ro_mask) != (save_val & ro_mask)) {
3615 writel(0xffffffff, bp->regview + offset);
3617 val = readl(bp->regview + offset);
3618 if ((val & rw_mask) != rw_mask) {
3622 if ((val & ro_mask) != (save_val & ro_mask)) {
3626 writel(save_val, bp->regview + offset);
3630 writel(save_val, bp->regview + offset);
3638 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
3640 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
3641 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
3644 for (i = 0; i < sizeof(test_pattern) / 4; i++) {
3647 for (offset = 0; offset < size; offset += 4) {
3649 REG_WR_IND(bp, start + offset, test_pattern[i]);
3651 if (REG_RD_IND(bp, start + offset) !=
3661 bnx2_test_memory(struct bnx2 *bp)
3665 static const struct {
3669 { 0x60000, 0x4000 },
3670 { 0xa0000, 0x3000 },
3671 { 0xe0000, 0x4000 },
3672 { 0x120000, 0x4000 },
3673 { 0x1a0000, 0x4000 },
3674 { 0x160000, 0x4000 },
3678 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
3679 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
3680 mem_tbl[i].len)) != 0) {
3688 #define BNX2_MAC_LOOPBACK 0
3689 #define BNX2_PHY_LOOPBACK 1
3692 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
3694 unsigned int pkt_size, num_pkts, i;
3695 struct sk_buff *skb, *rx_skb;
3696 unsigned char *packet;
3697 u16 rx_start_idx, rx_idx;
3701 struct sw_bd *rx_buf;
3702 struct l2_fhdr *rx_hdr;
3705 if (loopback_mode == BNX2_MAC_LOOPBACK) {
3706 bp->loopback = MAC_LOOPBACK;
3707 bnx2_set_mac_loopback(bp);
3709 else if (loopback_mode == BNX2_PHY_LOOPBACK) {
3711 bnx2_set_phy_loopback(bp);
3717 skb = dev_alloc_skb(pkt_size);
3720 packet = skb_put(skb, pkt_size);
3721 memcpy(packet, bp->mac_addr, 6);
3722 memset(packet + 6, 0x0, 8);
3723 for (i = 14; i < pkt_size; i++)
3724 packet[i] = (unsigned char) (i & 0xff);
3726 map = pci_map_single(bp->pdev, skb->data, pkt_size,
3729 val = REG_RD(bp, BNX2_HC_COMMAND);
3730 REG_WR(bp, BNX2_HC_COMMAND, val | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3731 REG_RD(bp, BNX2_HC_COMMAND);
3734 rx_start_idx = bp->status_blk->status_rx_quick_consumer_index0;
3738 txbd = &bp->tx_desc_ring[TX_RING_IDX(bp->tx_prod)];
3740 txbd->tx_bd_haddr_hi = (u64) map >> 32;
3741 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
3742 txbd->tx_bd_mss_nbytes = pkt_size;
3743 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
3746 bp->tx_prod = NEXT_TX_BD(bp->tx_prod);
3747 bp->tx_prod_bseq += pkt_size;
3749 REG_WR16(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BIDX, bp->tx_prod);
3750 REG_WR(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BSEQ, bp->tx_prod_bseq);
3754 val = REG_RD(bp, BNX2_HC_COMMAND);
3755 REG_WR(bp, BNX2_HC_COMMAND, val | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3756 REG_RD(bp, BNX2_HC_COMMAND);
3760 pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
3761 dev_kfree_skb_irq(skb);
3763 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->tx_prod) {
3764 goto loopback_test_done;
3767 rx_idx = bp->status_blk->status_rx_quick_consumer_index0;
3768 if (rx_idx != rx_start_idx + num_pkts) {
3769 goto loopback_test_done;
3772 rx_buf = &bp->rx_buf_ring[rx_start_idx];
3773 rx_skb = rx_buf->skb;
3775 rx_hdr = (struct l2_fhdr *) rx_skb->data;
3776 skb_reserve(rx_skb, bp->rx_offset);
3778 pci_dma_sync_single_for_cpu(bp->pdev,
3779 pci_unmap_addr(rx_buf, mapping),
3780 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
3782 if (rx_hdr->l2_fhdr_status &
3783 (L2_FHDR_ERRORS_BAD_CRC |
3784 L2_FHDR_ERRORS_PHY_DECODE |
3785 L2_FHDR_ERRORS_ALIGNMENT |
3786 L2_FHDR_ERRORS_TOO_SHORT |
3787 L2_FHDR_ERRORS_GIANT_FRAME)) {
3789 goto loopback_test_done;
3792 if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
3793 goto loopback_test_done;
3796 for (i = 14; i < pkt_size; i++) {
3797 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
3798 goto loopback_test_done;
3809 #define BNX2_MAC_LOOPBACK_FAILED 1
3810 #define BNX2_PHY_LOOPBACK_FAILED 2
3811 #define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
3812 BNX2_PHY_LOOPBACK_FAILED)
3815 bnx2_test_loopback(struct bnx2 *bp)
3819 if (!netif_running(bp->dev))
3820 return BNX2_LOOPBACK_FAILED;
3822 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
3823 spin_lock_bh(&bp->phy_lock);
3825 spin_unlock_bh(&bp->phy_lock);
3826 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
3827 rc |= BNX2_MAC_LOOPBACK_FAILED;
3828 if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
3829 rc |= BNX2_PHY_LOOPBACK_FAILED;
3833 #define NVRAM_SIZE 0x200
3834 #define CRC32_RESIDUAL 0xdebb20e3
3837 bnx2_test_nvram(struct bnx2 *bp)
3839 u32 buf[NVRAM_SIZE / 4];
3840 u8 *data = (u8 *) buf;
3844 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
3845 goto test_nvram_done;
3847 magic = be32_to_cpu(buf[0]);
3848 if (magic != 0x669955aa) {
3850 goto test_nvram_done;
3853 if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
3854 goto test_nvram_done;
3856 csum = ether_crc_le(0x100, data);
3857 if (csum != CRC32_RESIDUAL) {
3859 goto test_nvram_done;
3862 csum = ether_crc_le(0x100, data + 0x100);
3863 if (csum != CRC32_RESIDUAL) {
3872 bnx2_test_link(struct bnx2 *bp)
3876 spin_lock_bh(&bp->phy_lock);
3877 bnx2_read_phy(bp, MII_BMSR, &bmsr);
3878 bnx2_read_phy(bp, MII_BMSR, &bmsr);
3879 spin_unlock_bh(&bp->phy_lock);
3881 if (bmsr & BMSR_LSTATUS) {
3888 bnx2_test_intr(struct bnx2 *bp)
3894 if (!netif_running(bp->dev))
3897 status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
3899 /* This register is not touched during run-time. */
3900 val = REG_RD(bp, BNX2_HC_COMMAND);
3901 REG_WR(bp, BNX2_HC_COMMAND, val | BNX2_HC_COMMAND_COAL_NOW);
3902 REG_RD(bp, BNX2_HC_COMMAND);
3904 for (i = 0; i < 10; i++) {
3905 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
3911 msleep_interruptible(10);
3920 bnx2_timer(unsigned long data)
3922 struct bnx2 *bp = (struct bnx2 *) data;
3925 if (!netif_running(bp->dev))
3928 if (atomic_read(&bp->intr_sem) != 0)
3929 goto bnx2_restart_timer;
3931 msg = (u32) ++bp->fw_drv_pulse_wr_seq;
3932 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_PULSE_MB, msg);
3934 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
3935 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
3937 spin_lock(&bp->phy_lock);
3938 if (bp->serdes_an_pending) {
3939 bp->serdes_an_pending--;
3941 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
3944 bp->current_interval = bp->timer_interval;
3946 bnx2_read_phy(bp, MII_BMCR, &bmcr);
3948 if (bmcr & BMCR_ANENABLE) {
3951 bnx2_write_phy(bp, 0x1c, 0x7c00);
3952 bnx2_read_phy(bp, 0x1c, &phy1);
3954 bnx2_write_phy(bp, 0x17, 0x0f01);
3955 bnx2_read_phy(bp, 0x15, &phy2);
3956 bnx2_write_phy(bp, 0x17, 0x0f01);
3957 bnx2_read_phy(bp, 0x15, &phy2);
3959 if ((phy1 & 0x10) && /* SIGNAL DETECT */
3960 !(phy2 & 0x20)) { /* no CONFIG */
3962 bmcr &= ~BMCR_ANENABLE;
3963 bmcr |= BMCR_SPEED1000 |
3965 bnx2_write_phy(bp, MII_BMCR, bmcr);
3967 PHY_PARALLEL_DETECT_FLAG;
3971 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
3972 (bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)) {
3975 bnx2_write_phy(bp, 0x17, 0x0f01);
3976 bnx2_read_phy(bp, 0x15, &phy2);
3980 bnx2_read_phy(bp, MII_BMCR, &bmcr);
3981 bmcr |= BMCR_ANENABLE;
3982 bnx2_write_phy(bp, MII_BMCR, bmcr);
3984 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
3989 bp->current_interval = bp->timer_interval;
3991 spin_unlock(&bp->phy_lock);
3995 mod_timer(&bp->timer, jiffies + bp->current_interval);
3998 /* Called with rtnl_lock */
4000 bnx2_open(struct net_device *dev)
4002 struct bnx2 *bp = netdev_priv(dev);
4005 bnx2_set_power_state(bp, PCI_D0);
4006 bnx2_disable_int(bp);
4008 rc = bnx2_alloc_mem(bp);
4012 if ((CHIP_ID(bp) != CHIP_ID_5706_A0) &&
4013 (CHIP_ID(bp) != CHIP_ID_5706_A1) &&
4016 if (pci_enable_msi(bp->pdev) == 0) {
4017 bp->flags |= USING_MSI_FLAG;
4018 rc = request_irq(bp->pdev->irq, bnx2_msi, 0, dev->name,
4022 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
4023 SA_SHIRQ, dev->name, dev);
4027 rc = request_irq(bp->pdev->irq, bnx2_interrupt, SA_SHIRQ,
4035 rc = bnx2_init_nic(bp);
4038 free_irq(bp->pdev->irq, dev);
4039 if (bp->flags & USING_MSI_FLAG) {
4040 pci_disable_msi(bp->pdev);
4041 bp->flags &= ~USING_MSI_FLAG;
4048 mod_timer(&bp->timer, jiffies + bp->current_interval);
4050 atomic_set(&bp->intr_sem, 0);
4052 bnx2_enable_int(bp);
4054 if (bp->flags & USING_MSI_FLAG) {
4055 /* Test MSI to make sure it is working
4056 * If MSI test fails, go back to INTx mode
4058 if (bnx2_test_intr(bp) != 0) {
4059 printk(KERN_WARNING PFX "%s: No interrupt was generated"
4060 " using MSI, switching to INTx mode. Please"
4061 " report this failure to the PCI maintainer"
4062 " and include system chipset information.\n",
4065 bnx2_disable_int(bp);
4066 free_irq(bp->pdev->irq, dev);
4067 pci_disable_msi(bp->pdev);
4068 bp->flags &= ~USING_MSI_FLAG;
4070 rc = bnx2_init_nic(bp);
4073 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
4074 SA_SHIRQ, dev->name, dev);
4079 del_timer_sync(&bp->timer);
4082 bnx2_enable_int(bp);
4085 if (bp->flags & USING_MSI_FLAG) {
4086 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
4089 netif_start_queue(dev);
4095 bnx2_reset_task(void *data)
4097 struct bnx2 *bp = data;
4099 if (!netif_running(bp->dev))
4102 bp->in_reset_task = 1;
4103 bnx2_netif_stop(bp);
4107 atomic_set(&bp->intr_sem, 1);
4108 bnx2_netif_start(bp);
4109 bp->in_reset_task = 0;
4113 bnx2_tx_timeout(struct net_device *dev)
4115 struct bnx2 *bp = netdev_priv(dev);
4117 /* This allows the netif to be shutdown gracefully before resetting */
4118 schedule_work(&bp->reset_task);
4122 /* Called with rtnl_lock */
4124 bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
4126 struct bnx2 *bp = netdev_priv(dev);
4128 bnx2_netif_stop(bp);
4131 bnx2_set_rx_mode(dev);
4133 bnx2_netif_start(bp);
4136 /* Called with rtnl_lock */
4138 bnx2_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid)
4140 struct bnx2 *bp = netdev_priv(dev);
4142 bnx2_netif_stop(bp);
4145 bp->vlgrp->vlan_devices[vid] = NULL;
4146 bnx2_set_rx_mode(dev);
4148 bnx2_netif_start(bp);
4152 /* Called with dev->xmit_lock.
4153 * hard_start_xmit is pseudo-lockless - a lock is only required when
4154 * the tx queue is full. This way, we get the benefit of lockless
4155 * operations most of the time without the complexities to handle
4156 * netif_stop_queue/wake_queue race conditions.
4159 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
4161 struct bnx2 *bp = netdev_priv(dev);
4164 struct sw_bd *tx_buf;
4165 u32 len, vlan_tag_flags, last_frag, mss;
4166 u16 prod, ring_prod;
4169 if (unlikely(bnx2_tx_avail(bp) < (skb_shinfo(skb)->nr_frags + 1))) {
4170 netif_stop_queue(dev);
4171 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
4174 return NETDEV_TX_BUSY;
4176 len = skb_headlen(skb);
4178 ring_prod = TX_RING_IDX(prod);
4181 if (skb->ip_summed == CHECKSUM_HW) {
4182 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
4185 if (bp->vlgrp != 0 && vlan_tx_tag_present(skb)) {
4187 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
4190 if ((mss = skb_shinfo(skb)->tso_size) &&
4191 (skb->len > (bp->dev->mtu + ETH_HLEN))) {
4192 u32 tcp_opt_len, ip_tcp_len;
4194 if (skb_header_cloned(skb) &&
4195 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4197 return NETDEV_TX_OK;
4200 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
4201 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
4204 if (skb->h.th->doff > 5) {
4205 tcp_opt_len = (skb->h.th->doff - 5) << 2;
4207 ip_tcp_len = (skb->nh.iph->ihl << 2) + sizeof(struct tcphdr);
4209 skb->nh.iph->check = 0;
4210 skb->nh.iph->tot_len = ntohs(mss + ip_tcp_len + tcp_opt_len);
4212 ~csum_tcpudp_magic(skb->nh.iph->saddr,
4216 if (tcp_opt_len || (skb->nh.iph->ihl > 5)) {
4217 vlan_tag_flags |= ((skb->nh.iph->ihl - 5) +
4218 (tcp_opt_len >> 2)) << 8;
4227 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4229 tx_buf = &bp->tx_buf_ring[ring_prod];
4231 pci_unmap_addr_set(tx_buf, mapping, mapping);
4233 txbd = &bp->tx_desc_ring[ring_prod];
4235 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4236 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4237 txbd->tx_bd_mss_nbytes = len | (mss << 16);
4238 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
4240 last_frag = skb_shinfo(skb)->nr_frags;
4242 for (i = 0; i < last_frag; i++) {
4243 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4245 prod = NEXT_TX_BD(prod);
4246 ring_prod = TX_RING_IDX(prod);
4247 txbd = &bp->tx_desc_ring[ring_prod];
4250 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
4251 len, PCI_DMA_TODEVICE);
4252 pci_unmap_addr_set(&bp->tx_buf_ring[ring_prod],
4255 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4256 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4257 txbd->tx_bd_mss_nbytes = len | (mss << 16);
4258 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
4261 txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
4263 prod = NEXT_TX_BD(prod);
4264 bp->tx_prod_bseq += skb->len;
4266 REG_WR16(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BIDX, prod);
4267 REG_WR(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BSEQ, bp->tx_prod_bseq);
4272 dev->trans_start = jiffies;
4274 if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) {
4275 spin_lock(&bp->tx_lock);
4276 netif_stop_queue(dev);
4278 if (bnx2_tx_avail(bp) > MAX_SKB_FRAGS)
4279 netif_wake_queue(dev);
4280 spin_unlock(&bp->tx_lock);
4283 return NETDEV_TX_OK;
4286 /* Called with rtnl_lock */
4288 bnx2_close(struct net_device *dev)
4290 struct bnx2 *bp = netdev_priv(dev);
4293 /* Calling flush_scheduled_work() may deadlock because
4294 * linkwatch_event() may be on the workqueue and it will try to get
4295 * the rtnl_lock which we are holding.
4297 while (bp->in_reset_task)
4300 bnx2_netif_stop(bp);
4301 del_timer_sync(&bp->timer);
4302 if (bp->flags & NO_WOL_FLAG)
4303 reset_code = BNX2_DRV_MSG_CODE_UNLOAD;
4305 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
4307 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
4308 bnx2_reset_chip(bp, reset_code);
4309 free_irq(bp->pdev->irq, dev);
4310 if (bp->flags & USING_MSI_FLAG) {
4311 pci_disable_msi(bp->pdev);
4312 bp->flags &= ~USING_MSI_FLAG;
4317 netif_carrier_off(bp->dev);
4318 bnx2_set_power_state(bp, PCI_D3hot);
4322 #define GET_NET_STATS64(ctr) \
4323 (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \
4324 (unsigned long) (ctr##_lo)
4326 #define GET_NET_STATS32(ctr) \
4329 #if (BITS_PER_LONG == 64)
4330 #define GET_NET_STATS GET_NET_STATS64
4332 #define GET_NET_STATS GET_NET_STATS32
4335 static struct net_device_stats *
4336 bnx2_get_stats(struct net_device *dev)
4338 struct bnx2 *bp = netdev_priv(dev);
4339 struct statistics_block *stats_blk = bp->stats_blk;
4340 struct net_device_stats *net_stats = &bp->net_stats;
4342 if (bp->stats_blk == NULL) {
4345 net_stats->rx_packets =
4346 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
4347 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
4348 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
4350 net_stats->tx_packets =
4351 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
4352 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
4353 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
4355 net_stats->rx_bytes =
4356 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
4358 net_stats->tx_bytes =
4359 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
4361 net_stats->multicast =
4362 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
4364 net_stats->collisions =
4365 (unsigned long) stats_blk->stat_EtherStatsCollisions;
4367 net_stats->rx_length_errors =
4368 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
4369 stats_blk->stat_EtherStatsOverrsizePkts);
4371 net_stats->rx_over_errors =
4372 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
4374 net_stats->rx_frame_errors =
4375 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
4377 net_stats->rx_crc_errors =
4378 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
4380 net_stats->rx_errors = net_stats->rx_length_errors +
4381 net_stats->rx_over_errors + net_stats->rx_frame_errors +
4382 net_stats->rx_crc_errors;
4384 net_stats->tx_aborted_errors =
4385 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
4386 stats_blk->stat_Dot3StatsLateCollisions);
4388 if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
4389 (CHIP_ID(bp) == CHIP_ID_5708_A0))
4390 net_stats->tx_carrier_errors = 0;
4392 net_stats->tx_carrier_errors =
4394 stats_blk->stat_Dot3StatsCarrierSenseErrors;
4397 net_stats->tx_errors =
4399 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
4401 net_stats->tx_aborted_errors +
4402 net_stats->tx_carrier_errors;
4407 /* All ethtool functions called with rtnl_lock */
4410 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4412 struct bnx2 *bp = netdev_priv(dev);
4414 cmd->supported = SUPPORTED_Autoneg;
4415 if (bp->phy_flags & PHY_SERDES_FLAG) {
4416 cmd->supported |= SUPPORTED_1000baseT_Full |
4419 cmd->port = PORT_FIBRE;
4422 cmd->supported |= SUPPORTED_10baseT_Half |
4423 SUPPORTED_10baseT_Full |
4424 SUPPORTED_100baseT_Half |
4425 SUPPORTED_100baseT_Full |
4426 SUPPORTED_1000baseT_Full |
4429 cmd->port = PORT_TP;
4432 cmd->advertising = bp->advertising;
4434 if (bp->autoneg & AUTONEG_SPEED) {
4435 cmd->autoneg = AUTONEG_ENABLE;
4438 cmd->autoneg = AUTONEG_DISABLE;
4441 if (netif_carrier_ok(dev)) {
4442 cmd->speed = bp->line_speed;
4443 cmd->duplex = bp->duplex;
4450 cmd->transceiver = XCVR_INTERNAL;
4451 cmd->phy_address = bp->phy_addr;
4457 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4459 struct bnx2 *bp = netdev_priv(dev);
4460 u8 autoneg = bp->autoneg;
4461 u8 req_duplex = bp->req_duplex;
4462 u16 req_line_speed = bp->req_line_speed;
4463 u32 advertising = bp->advertising;
4465 if (cmd->autoneg == AUTONEG_ENABLE) {
4466 autoneg |= AUTONEG_SPEED;
4468 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
4470 /* allow advertising 1 speed */
4471 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
4472 (cmd->advertising == ADVERTISED_10baseT_Full) ||
4473 (cmd->advertising == ADVERTISED_100baseT_Half) ||
4474 (cmd->advertising == ADVERTISED_100baseT_Full)) {
4476 if (bp->phy_flags & PHY_SERDES_FLAG)
4479 advertising = cmd->advertising;
4482 else if (cmd->advertising == ADVERTISED_1000baseT_Full) {
4483 advertising = cmd->advertising;
4485 else if (cmd->advertising == ADVERTISED_1000baseT_Half) {
4489 if (bp->phy_flags & PHY_SERDES_FLAG) {
4490 advertising = ETHTOOL_ALL_FIBRE_SPEED;
4493 advertising = ETHTOOL_ALL_COPPER_SPEED;
4496 advertising |= ADVERTISED_Autoneg;
4499 if (bp->phy_flags & PHY_SERDES_FLAG) {
4500 if ((cmd->speed != SPEED_1000) ||
4501 (cmd->duplex != DUPLEX_FULL)) {
4505 else if (cmd->speed == SPEED_1000) {
4508 autoneg &= ~AUTONEG_SPEED;
4509 req_line_speed = cmd->speed;
4510 req_duplex = cmd->duplex;
4514 bp->autoneg = autoneg;
4515 bp->advertising = advertising;
4516 bp->req_line_speed = req_line_speed;
4517 bp->req_duplex = req_duplex;
4519 spin_lock_bh(&bp->phy_lock);
4523 spin_unlock_bh(&bp->phy_lock);
4529 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
4531 struct bnx2 *bp = netdev_priv(dev);
4533 strcpy(info->driver, DRV_MODULE_NAME);
4534 strcpy(info->version, DRV_MODULE_VERSION);
4535 strcpy(info->bus_info, pci_name(bp->pdev));
4536 info->fw_version[0] = ((bp->fw_ver & 0xff000000) >> 24) + '0';
4537 info->fw_version[2] = ((bp->fw_ver & 0xff0000) >> 16) + '0';
4538 info->fw_version[4] = ((bp->fw_ver & 0xff00) >> 8) + '0';
4539 info->fw_version[1] = info->fw_version[3] = '.';
4540 info->fw_version[5] = 0;
4543 #define BNX2_REGDUMP_LEN (32 * 1024)
4546 bnx2_get_regs_len(struct net_device *dev)
4548 return BNX2_REGDUMP_LEN;
4552 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
4554 u32 *p = _p, i, offset;
4556 struct bnx2 *bp = netdev_priv(dev);
4557 u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
4558 0x0800, 0x0880, 0x0c00, 0x0c10,
4559 0x0c30, 0x0d08, 0x1000, 0x101c,
4560 0x1040, 0x1048, 0x1080, 0x10a4,
4561 0x1400, 0x1490, 0x1498, 0x14f0,
4562 0x1500, 0x155c, 0x1580, 0x15dc,
4563 0x1600, 0x1658, 0x1680, 0x16d8,
4564 0x1800, 0x1820, 0x1840, 0x1854,
4565 0x1880, 0x1894, 0x1900, 0x1984,
4566 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
4567 0x1c80, 0x1c94, 0x1d00, 0x1d84,
4568 0x2000, 0x2030, 0x23c0, 0x2400,
4569 0x2800, 0x2820, 0x2830, 0x2850,
4570 0x2b40, 0x2c10, 0x2fc0, 0x3058,
4571 0x3c00, 0x3c94, 0x4000, 0x4010,
4572 0x4080, 0x4090, 0x43c0, 0x4458,
4573 0x4c00, 0x4c18, 0x4c40, 0x4c54,
4574 0x4fc0, 0x5010, 0x53c0, 0x5444,
4575 0x5c00, 0x5c18, 0x5c80, 0x5c90,
4576 0x5fc0, 0x6000, 0x6400, 0x6428,
4577 0x6800, 0x6848, 0x684c, 0x6860,
4578 0x6888, 0x6910, 0x8000 };
4582 memset(p, 0, BNX2_REGDUMP_LEN);
4584 if (!netif_running(bp->dev))
4588 offset = reg_boundaries[0];
4590 while (offset < BNX2_REGDUMP_LEN) {
4591 *p++ = REG_RD(bp, offset);
4593 if (offset == reg_boundaries[i + 1]) {
4594 offset = reg_boundaries[i + 2];
4595 p = (u32 *) (orig_p + offset);
4602 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
4604 struct bnx2 *bp = netdev_priv(dev);
4606 if (bp->flags & NO_WOL_FLAG) {
4611 wol->supported = WAKE_MAGIC;
4613 wol->wolopts = WAKE_MAGIC;
4617 memset(&wol->sopass, 0, sizeof(wol->sopass));
4621 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
4623 struct bnx2 *bp = netdev_priv(dev);
4625 if (wol->wolopts & ~WAKE_MAGIC)
4628 if (wol->wolopts & WAKE_MAGIC) {
4629 if (bp->flags & NO_WOL_FLAG)
4641 bnx2_nway_reset(struct net_device *dev)
4643 struct bnx2 *bp = netdev_priv(dev);
4646 if (!(bp->autoneg & AUTONEG_SPEED)) {
4650 spin_lock_bh(&bp->phy_lock);
4652 /* Force a link down visible on the other side */
4653 if (bp->phy_flags & PHY_SERDES_FLAG) {
4654 bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
4655 spin_unlock_bh(&bp->phy_lock);
4659 spin_lock_bh(&bp->phy_lock);
4660 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
4661 bp->current_interval = SERDES_AN_TIMEOUT;
4662 bp->serdes_an_pending = 1;
4663 mod_timer(&bp->timer, jiffies + bp->current_interval);
4667 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4668 bmcr &= ~BMCR_LOOPBACK;
4669 bnx2_write_phy(bp, MII_BMCR, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
4671 spin_unlock_bh(&bp->phy_lock);
4677 bnx2_get_eeprom_len(struct net_device *dev)
4679 struct bnx2 *bp = netdev_priv(dev);
4681 if (bp->flash_info == NULL)
4684 return (int) bp->flash_size;
4688 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
4691 struct bnx2 *bp = netdev_priv(dev);
4694 /* parameters already validated in ethtool_get_eeprom */
4696 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
4702 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
4705 struct bnx2 *bp = netdev_priv(dev);
4708 /* parameters already validated in ethtool_set_eeprom */
4710 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
4716 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
4718 struct bnx2 *bp = netdev_priv(dev);
4720 memset(coal, 0, sizeof(struct ethtool_coalesce));
4722 coal->rx_coalesce_usecs = bp->rx_ticks;
4723 coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
4724 coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
4725 coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
4727 coal->tx_coalesce_usecs = bp->tx_ticks;
4728 coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
4729 coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
4730 coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
4732 coal->stats_block_coalesce_usecs = bp->stats_ticks;
4738 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
4740 struct bnx2 *bp = netdev_priv(dev);
4742 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
4743 if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
4745 bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
4746 if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
4748 bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
4749 if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
4751 bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
4752 if (bp->rx_quick_cons_trip_int > 0xff)
4753 bp->rx_quick_cons_trip_int = 0xff;
4755 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
4756 if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
4758 bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
4759 if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
4761 bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
4762 if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
4764 bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
4765 if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
4768 bp->stats_ticks = coal->stats_block_coalesce_usecs;
4769 if (bp->stats_ticks > 0xffff00) bp->stats_ticks = 0xffff00;
4770 bp->stats_ticks &= 0xffff00;
4772 if (netif_running(bp->dev)) {
4773 bnx2_netif_stop(bp);
4775 bnx2_netif_start(bp);
4782 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
4784 struct bnx2 *bp = netdev_priv(dev);
4786 ering->rx_max_pending = MAX_RX_DESC_CNT;
4787 ering->rx_mini_max_pending = 0;
4788 ering->rx_jumbo_max_pending = 0;
4790 ering->rx_pending = bp->rx_ring_size;
4791 ering->rx_mini_pending = 0;
4792 ering->rx_jumbo_pending = 0;
4794 ering->tx_max_pending = MAX_TX_DESC_CNT;
4795 ering->tx_pending = bp->tx_ring_size;
4799 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
4801 struct bnx2 *bp = netdev_priv(dev);
4803 if ((ering->rx_pending > MAX_RX_DESC_CNT) ||
4804 (ering->tx_pending > MAX_TX_DESC_CNT) ||
4805 (ering->tx_pending <= MAX_SKB_FRAGS)) {
4809 bp->rx_ring_size = ering->rx_pending;
4810 bp->tx_ring_size = ering->tx_pending;
4812 if (netif_running(bp->dev)) {
4813 bnx2_netif_stop(bp);
4815 bnx2_netif_start(bp);
4822 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
4824 struct bnx2 *bp = netdev_priv(dev);
4826 epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
4827 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
4828 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
4832 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
4834 struct bnx2 *bp = netdev_priv(dev);
4836 bp->req_flow_ctrl = 0;
4837 if (epause->rx_pause)
4838 bp->req_flow_ctrl |= FLOW_CTRL_RX;
4839 if (epause->tx_pause)
4840 bp->req_flow_ctrl |= FLOW_CTRL_TX;
4842 if (epause->autoneg) {
4843 bp->autoneg |= AUTONEG_FLOW_CTRL;
4846 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
4849 spin_lock_bh(&bp->phy_lock);
4853 spin_unlock_bh(&bp->phy_lock);
4859 bnx2_get_rx_csum(struct net_device *dev)
4861 struct bnx2 *bp = netdev_priv(dev);
4867 bnx2_set_rx_csum(struct net_device *dev, u32 data)
4869 struct bnx2 *bp = netdev_priv(dev);
4875 #define BNX2_NUM_STATS 45
4878 char string[ETH_GSTRING_LEN];
4879 } bnx2_stats_str_arr[BNX2_NUM_STATS] = {
4881 { "rx_error_bytes" },
4883 { "tx_error_bytes" },
4884 { "rx_ucast_packets" },
4885 { "rx_mcast_packets" },
4886 { "rx_bcast_packets" },
4887 { "tx_ucast_packets" },
4888 { "tx_mcast_packets" },
4889 { "tx_bcast_packets" },
4890 { "tx_mac_errors" },
4891 { "tx_carrier_errors" },
4892 { "rx_crc_errors" },
4893 { "rx_align_errors" },
4894 { "tx_single_collisions" },
4895 { "tx_multi_collisions" },
4897 { "tx_excess_collisions" },
4898 { "tx_late_collisions" },
4899 { "tx_total_collisions" },
4902 { "rx_undersize_packets" },
4903 { "rx_oversize_packets" },
4904 { "rx_64_byte_packets" },
4905 { "rx_65_to_127_byte_packets" },
4906 { "rx_128_to_255_byte_packets" },
4907 { "rx_256_to_511_byte_packets" },
4908 { "rx_512_to_1023_byte_packets" },
4909 { "rx_1024_to_1522_byte_packets" },
4910 { "rx_1523_to_9022_byte_packets" },
4911 { "tx_64_byte_packets" },
4912 { "tx_65_to_127_byte_packets" },
4913 { "tx_128_to_255_byte_packets" },
4914 { "tx_256_to_511_byte_packets" },
4915 { "tx_512_to_1023_byte_packets" },
4916 { "tx_1024_to_1522_byte_packets" },
4917 { "tx_1523_to_9022_byte_packets" },
4918 { "rx_xon_frames" },
4919 { "rx_xoff_frames" },
4920 { "tx_xon_frames" },
4921 { "tx_xoff_frames" },
4922 { "rx_mac_ctrl_frames" },
4923 { "rx_filtered_packets" },
4927 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
4929 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
4930 STATS_OFFSET32(stat_IfHCInOctets_hi),
4931 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
4932 STATS_OFFSET32(stat_IfHCOutOctets_hi),
4933 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
4934 STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
4935 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
4936 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
4937 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
4938 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
4939 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
4940 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
4941 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
4942 STATS_OFFSET32(stat_Dot3StatsFCSErrors),
4943 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
4944 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
4945 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
4946 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
4947 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
4948 STATS_OFFSET32(stat_Dot3StatsLateCollisions),
4949 STATS_OFFSET32(stat_EtherStatsCollisions),
4950 STATS_OFFSET32(stat_EtherStatsFragments),
4951 STATS_OFFSET32(stat_EtherStatsJabbers),
4952 STATS_OFFSET32(stat_EtherStatsUndersizePkts),
4953 STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
4954 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
4955 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
4956 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
4957 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
4958 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
4959 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
4960 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
4961 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
4962 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
4963 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
4964 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
4965 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
4966 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
4967 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
4968 STATS_OFFSET32(stat_XonPauseFramesReceived),
4969 STATS_OFFSET32(stat_XoffPauseFramesReceived),
4970 STATS_OFFSET32(stat_OutXonSent),
4971 STATS_OFFSET32(stat_OutXoffSent),
4972 STATS_OFFSET32(stat_MacControlFramesReceived),
4973 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
4974 STATS_OFFSET32(stat_IfInMBUFDiscards),
4977 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
4978 * skipped because of errata.
4980 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
4981 8,0,8,8,8,8,8,8,8,8,
4982 4,0,4,4,4,4,4,4,4,4,
4983 4,4,4,4,4,4,4,4,4,4,
4984 4,4,4,4,4,4,4,4,4,4,
4988 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
4989 8,0,8,8,8,8,8,8,8,8,
4990 4,4,4,4,4,4,4,4,4,4,
4991 4,4,4,4,4,4,4,4,4,4,
4992 4,4,4,4,4,4,4,4,4,4,
4996 #define BNX2_NUM_TESTS 6
4999 char string[ETH_GSTRING_LEN];
5000 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
5001 { "register_test (offline)" },
5002 { "memory_test (offline)" },
5003 { "loopback_test (offline)" },
5004 { "nvram_test (online)" },
5005 { "interrupt_test (online)" },
5006 { "link_test (online)" },
5010 bnx2_self_test_count(struct net_device *dev)
5012 return BNX2_NUM_TESTS;
5016 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
5018 struct bnx2 *bp = netdev_priv(dev);
5020 memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
5021 if (etest->flags & ETH_TEST_FL_OFFLINE) {
5022 bnx2_netif_stop(bp);
5023 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
5026 if (bnx2_test_registers(bp) != 0) {
5028 etest->flags |= ETH_TEST_FL_FAILED;
5030 if (bnx2_test_memory(bp) != 0) {
5032 etest->flags |= ETH_TEST_FL_FAILED;
5034 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
5035 etest->flags |= ETH_TEST_FL_FAILED;
5037 if (!netif_running(bp->dev)) {
5038 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5042 bnx2_netif_start(bp);
5045 /* wait for link up */
5046 msleep_interruptible(3000);
5047 if ((!bp->link_up) && !(bp->phy_flags & PHY_SERDES_FLAG))
5048 msleep_interruptible(4000);
5051 if (bnx2_test_nvram(bp) != 0) {
5053 etest->flags |= ETH_TEST_FL_FAILED;
5055 if (bnx2_test_intr(bp) != 0) {
5057 etest->flags |= ETH_TEST_FL_FAILED;
5060 if (bnx2_test_link(bp) != 0) {
5062 etest->flags |= ETH_TEST_FL_FAILED;
5068 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
5070 switch (stringset) {
5072 memcpy(buf, bnx2_stats_str_arr,
5073 sizeof(bnx2_stats_str_arr));
5076 memcpy(buf, bnx2_tests_str_arr,
5077 sizeof(bnx2_tests_str_arr));
5083 bnx2_get_stats_count(struct net_device *dev)
5085 return BNX2_NUM_STATS;
5089 bnx2_get_ethtool_stats(struct net_device *dev,
5090 struct ethtool_stats *stats, u64 *buf)
5092 struct bnx2 *bp = netdev_priv(dev);
5094 u32 *hw_stats = (u32 *) bp->stats_blk;
5095 u8 *stats_len_arr = NULL;
5097 if (hw_stats == NULL) {
5098 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
5102 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
5103 (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
5104 (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
5105 (CHIP_ID(bp) == CHIP_ID_5708_A0))
5106 stats_len_arr = bnx2_5706_stats_len_arr;
5108 stats_len_arr = bnx2_5708_stats_len_arr;
5110 for (i = 0; i < BNX2_NUM_STATS; i++) {
5111 if (stats_len_arr[i] == 0) {
5112 /* skip this counter */
5116 if (stats_len_arr[i] == 4) {
5117 /* 4-byte counter */
5119 *(hw_stats + bnx2_stats_offset_arr[i]);
5122 /* 8-byte counter */
5123 buf[i] = (((u64) *(hw_stats +
5124 bnx2_stats_offset_arr[i])) << 32) +
5125 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
5130 bnx2_phys_id(struct net_device *dev, u32 data)
5132 struct bnx2 *bp = netdev_priv(dev);
5139 save = REG_RD(bp, BNX2_MISC_CFG);
5140 REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
5142 for (i = 0; i < (data * 2); i++) {
5144 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
5147 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
5148 BNX2_EMAC_LED_1000MB_OVERRIDE |
5149 BNX2_EMAC_LED_100MB_OVERRIDE |
5150 BNX2_EMAC_LED_10MB_OVERRIDE |
5151 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
5152 BNX2_EMAC_LED_TRAFFIC);
5154 msleep_interruptible(500);
5155 if (signal_pending(current))
5158 REG_WR(bp, BNX2_EMAC_LED, 0);
5159 REG_WR(bp, BNX2_MISC_CFG, save);
5163 static struct ethtool_ops bnx2_ethtool_ops = {
5164 .get_settings = bnx2_get_settings,
5165 .set_settings = bnx2_set_settings,
5166 .get_drvinfo = bnx2_get_drvinfo,
5167 .get_regs_len = bnx2_get_regs_len,
5168 .get_regs = bnx2_get_regs,
5169 .get_wol = bnx2_get_wol,
5170 .set_wol = bnx2_set_wol,
5171 .nway_reset = bnx2_nway_reset,
5172 .get_link = ethtool_op_get_link,
5173 .get_eeprom_len = bnx2_get_eeprom_len,
5174 .get_eeprom = bnx2_get_eeprom,
5175 .set_eeprom = bnx2_set_eeprom,
5176 .get_coalesce = bnx2_get_coalesce,
5177 .set_coalesce = bnx2_set_coalesce,
5178 .get_ringparam = bnx2_get_ringparam,
5179 .set_ringparam = bnx2_set_ringparam,
5180 .get_pauseparam = bnx2_get_pauseparam,
5181 .set_pauseparam = bnx2_set_pauseparam,
5182 .get_rx_csum = bnx2_get_rx_csum,
5183 .set_rx_csum = bnx2_set_rx_csum,
5184 .get_tx_csum = ethtool_op_get_tx_csum,
5185 .set_tx_csum = ethtool_op_set_tx_csum,
5186 .get_sg = ethtool_op_get_sg,
5187 .set_sg = ethtool_op_set_sg,
5189 .get_tso = ethtool_op_get_tso,
5190 .set_tso = ethtool_op_set_tso,
5192 .self_test_count = bnx2_self_test_count,
5193 .self_test = bnx2_self_test,
5194 .get_strings = bnx2_get_strings,
5195 .phys_id = bnx2_phys_id,
5196 .get_stats_count = bnx2_get_stats_count,
5197 .get_ethtool_stats = bnx2_get_ethtool_stats,
5198 .get_perm_addr = ethtool_op_get_perm_addr,
5201 /* Called with rtnl_lock */
5203 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
5205 struct mii_ioctl_data *data = if_mii(ifr);
5206 struct bnx2 *bp = netdev_priv(dev);
5211 data->phy_id = bp->phy_addr;
5217 spin_lock_bh(&bp->phy_lock);
5218 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
5219 spin_unlock_bh(&bp->phy_lock);
5221 data->val_out = mii_regval;
5227 if (!capable(CAP_NET_ADMIN))
5230 spin_lock_bh(&bp->phy_lock);
5231 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
5232 spin_unlock_bh(&bp->phy_lock);
5243 /* Called with rtnl_lock */
5245 bnx2_change_mac_addr(struct net_device *dev, void *p)
5247 struct sockaddr *addr = p;
5248 struct bnx2 *bp = netdev_priv(dev);
5250 if (!is_valid_ether_addr(addr->sa_data))
5253 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5254 if (netif_running(dev))
5255 bnx2_set_mac_addr(bp);
5260 /* Called with rtnl_lock */
5262 bnx2_change_mtu(struct net_device *dev, int new_mtu)
5264 struct bnx2 *bp = netdev_priv(dev);
5266 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
5267 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
5271 if (netif_running(dev)) {
5272 bnx2_netif_stop(bp);
5276 bnx2_netif_start(bp);
5281 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
5283 poll_bnx2(struct net_device *dev)
5285 struct bnx2 *bp = netdev_priv(dev);
5287 disable_irq(bp->pdev->irq);
5288 bnx2_interrupt(bp->pdev->irq, dev, NULL);
5289 enable_irq(bp->pdev->irq);
5293 static int __devinit
5294 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
5297 unsigned long mem_len;
5301 SET_MODULE_OWNER(dev);
5302 SET_NETDEV_DEV(dev, &pdev->dev);
5303 bp = netdev_priv(dev);
5308 /* enable device (incl. PCI PM wakeup), and bus-mastering */
5309 rc = pci_enable_device(pdev);
5311 printk(KERN_ERR PFX "Cannot enable PCI device, aborting.");
5315 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
5316 printk(KERN_ERR PFX "Cannot find PCI device base address, "
5319 goto err_out_disable;
5322 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
5324 printk(KERN_ERR PFX "Cannot obtain PCI resources, aborting.\n");
5325 goto err_out_disable;
5328 pci_set_master(pdev);
5330 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
5331 if (bp->pm_cap == 0) {
5332 printk(KERN_ERR PFX "Cannot find power management capability, "
5335 goto err_out_release;
5338 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
5339 if (bp->pcix_cap == 0) {
5340 printk(KERN_ERR PFX "Cannot find PCIX capability, aborting.\n");
5342 goto err_out_release;
5345 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
5346 bp->flags |= USING_DAC_FLAG;
5347 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
5348 printk(KERN_ERR PFX "pci_set_consistent_dma_mask "
5349 "failed, aborting.\n");
5351 goto err_out_release;
5354 else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
5355 printk(KERN_ERR PFX "System does not support DMA, aborting.\n");
5357 goto err_out_release;
5363 spin_lock_init(&bp->phy_lock);
5364 spin_lock_init(&bp->tx_lock);
5365 INIT_WORK(&bp->reset_task, bnx2_reset_task, bp);
5367 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
5368 mem_len = MB_GET_CID_ADDR(17);
5369 dev->mem_end = dev->mem_start + mem_len;
5370 dev->irq = pdev->irq;
5372 bp->regview = ioremap_nocache(dev->base_addr, mem_len);
5375 printk(KERN_ERR PFX "Cannot map register space, aborting.\n");
5377 goto err_out_release;
5380 /* Configure byte swap and enable write to the reg_window registers.
5381 * Rely on CPU to do target byte swapping on big endian systems
5382 * The chip's target access swapping will not swap all accesses
5384 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
5385 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
5386 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
5388 bnx2_set_power_state(bp, PCI_D0);
5390 bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
5392 /* Get bus information. */
5393 reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
5394 if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
5397 bp->flags |= PCIX_FLAG;
5399 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
5401 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
5403 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
5404 bp->bus_speed_mhz = 133;
5407 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
5408 bp->bus_speed_mhz = 100;
5411 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
5412 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
5413 bp->bus_speed_mhz = 66;
5416 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
5417 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
5418 bp->bus_speed_mhz = 50;
5421 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
5422 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
5423 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
5424 bp->bus_speed_mhz = 33;
5429 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
5430 bp->bus_speed_mhz = 66;
5432 bp->bus_speed_mhz = 33;
5435 if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
5436 bp->flags |= PCI_32BIT_FLAG;
5438 /* 5706A0 may falsely detect SERR and PERR. */
5439 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
5440 reg = REG_RD(bp, PCI_COMMAND);
5441 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
5442 REG_WR(bp, PCI_COMMAND, reg);
5444 else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
5445 !(bp->flags & PCIX_FLAG)) {
5447 printk(KERN_ERR PFX "5706 A1 can only be used in a PCIX bus, "
5452 bnx2_init_nvram(bp);
5454 reg = REG_RD_IND(bp, BNX2_SHM_HDR_SIGNATURE);
5456 if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
5457 BNX2_SHM_HDR_SIGNATURE_SIG)
5458 bp->shmem_base = REG_RD_IND(bp, BNX2_SHM_HDR_ADDR_0);
5460 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
5462 /* Get the permanent MAC address. First we need to make sure the
5463 * firmware is actually running.
5465 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_SIGNATURE);
5467 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
5468 BNX2_DEV_INFO_SIGNATURE_MAGIC) {
5469 printk(KERN_ERR PFX "Firmware not running, aborting.\n");
5474 bp->fw_ver = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_BC_REV);
5476 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_UPPER);
5477 bp->mac_addr[0] = (u8) (reg >> 8);
5478 bp->mac_addr[1] = (u8) reg;
5480 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_LOWER);
5481 bp->mac_addr[2] = (u8) (reg >> 24);
5482 bp->mac_addr[3] = (u8) (reg >> 16);
5483 bp->mac_addr[4] = (u8) (reg >> 8);
5484 bp->mac_addr[5] = (u8) reg;
5486 bp->tx_ring_size = MAX_TX_DESC_CNT;
5487 bp->rx_ring_size = 100;
5491 bp->rx_offset = sizeof(struct l2_fhdr) + 2;
5493 bp->tx_quick_cons_trip_int = 20;
5494 bp->tx_quick_cons_trip = 20;
5495 bp->tx_ticks_int = 80;
5498 bp->rx_quick_cons_trip_int = 6;
5499 bp->rx_quick_cons_trip = 6;
5500 bp->rx_ticks_int = 18;
5503 bp->stats_ticks = 1000000 & 0xffff00;
5505 bp->timer_interval = HZ;
5506 bp->current_interval = HZ;
5510 /* Disable WOL support if we are running on a SERDES chip. */
5511 if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT) {
5512 bp->phy_flags |= PHY_SERDES_FLAG;
5513 bp->flags |= NO_WOL_FLAG;
5514 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
5516 reg = REG_RD_IND(bp, bp->shmem_base +
5517 BNX2_SHARED_HW_CFG_CONFIG);
5518 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
5519 bp->phy_flags |= PHY_2_5G_CAPABLE_FLAG;
5523 if (CHIP_NUM(bp) == CHIP_NUM_5708)
5524 bp->flags |= NO_WOL_FLAG;
5526 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
5527 bp->tx_quick_cons_trip_int =
5528 bp->tx_quick_cons_trip;
5529 bp->tx_ticks_int = bp->tx_ticks;
5530 bp->rx_quick_cons_trip_int =
5531 bp->rx_quick_cons_trip;
5532 bp->rx_ticks_int = bp->rx_ticks;
5533 bp->comp_prod_trip_int = bp->comp_prod_trip;
5534 bp->com_ticks_int = bp->com_ticks;
5535 bp->cmd_ticks_int = bp->cmd_ticks;
5538 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
5539 bp->req_line_speed = 0;
5540 if (bp->phy_flags & PHY_SERDES_FLAG) {
5541 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
5543 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG);
5544 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
5545 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
5547 bp->req_line_speed = bp->line_speed = SPEED_1000;
5548 bp->req_duplex = DUPLEX_FULL;
5552 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
5555 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
5557 init_timer(&bp->timer);
5558 bp->timer.expires = RUN_AT(bp->timer_interval);
5559 bp->timer.data = (unsigned long) bp;
5560 bp->timer.function = bnx2_timer;
5566 iounmap(bp->regview);
5571 pci_release_regions(pdev);
5574 pci_disable_device(pdev);
5575 pci_set_drvdata(pdev, NULL);
5581 static int __devinit
5582 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
5584 static int version_printed = 0;
5585 struct net_device *dev = NULL;
5589 if (version_printed++ == 0)
5590 printk(KERN_INFO "%s", version);
5592 /* dev zeroed in init_etherdev */
5593 dev = alloc_etherdev(sizeof(*bp));
5598 rc = bnx2_init_board(pdev, dev);
5604 dev->open = bnx2_open;
5605 dev->hard_start_xmit = bnx2_start_xmit;
5606 dev->stop = bnx2_close;
5607 dev->get_stats = bnx2_get_stats;
5608 dev->set_multicast_list = bnx2_set_rx_mode;
5609 dev->do_ioctl = bnx2_ioctl;
5610 dev->set_mac_address = bnx2_change_mac_addr;
5611 dev->change_mtu = bnx2_change_mtu;
5612 dev->tx_timeout = bnx2_tx_timeout;
5613 dev->watchdog_timeo = TX_TIMEOUT;
5615 dev->vlan_rx_register = bnx2_vlan_rx_register;
5616 dev->vlan_rx_kill_vid = bnx2_vlan_rx_kill_vid;
5618 dev->poll = bnx2_poll;
5619 dev->ethtool_ops = &bnx2_ethtool_ops;
5622 bp = netdev_priv(dev);
5624 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
5625 dev->poll_controller = poll_bnx2;
5628 if ((rc = register_netdev(dev))) {
5629 printk(KERN_ERR PFX "Cannot register net device\n");
5631 iounmap(bp->regview);
5632 pci_release_regions(pdev);
5633 pci_disable_device(pdev);
5634 pci_set_drvdata(pdev, NULL);
5639 pci_set_drvdata(pdev, dev);
5641 memcpy(dev->dev_addr, bp->mac_addr, 6);
5642 memcpy(dev->perm_addr, bp->mac_addr, 6);
5643 bp->name = board_info[ent->driver_data].name,
5644 printk(KERN_INFO "%s: %s (%c%d) PCI%s %s %dMHz found at mem %lx, "
5648 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
5649 ((CHIP_ID(bp) & 0x0ff0) >> 4),
5650 ((bp->flags & PCIX_FLAG) ? "-X" : ""),
5651 ((bp->flags & PCI_32BIT_FLAG) ? "32-bit" : "64-bit"),
5656 printk("node addr ");
5657 for (i = 0; i < 6; i++)
5658 printk("%2.2x", dev->dev_addr[i]);
5661 dev->features |= NETIF_F_SG;
5662 if (bp->flags & USING_DAC_FLAG)
5663 dev->features |= NETIF_F_HIGHDMA;
5664 dev->features |= NETIF_F_IP_CSUM;
5666 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
5669 dev->features |= NETIF_F_TSO;
5672 netif_carrier_off(bp->dev);
5677 static void __devexit
5678 bnx2_remove_one(struct pci_dev *pdev)
5680 struct net_device *dev = pci_get_drvdata(pdev);
5681 struct bnx2 *bp = netdev_priv(dev);
5683 flush_scheduled_work();
5685 unregister_netdev(dev);
5688 iounmap(bp->regview);
5691 pci_release_regions(pdev);
5692 pci_disable_device(pdev);
5693 pci_set_drvdata(pdev, NULL);
5697 bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
5699 struct net_device *dev = pci_get_drvdata(pdev);
5700 struct bnx2 *bp = netdev_priv(dev);
5703 if (!netif_running(dev))
5706 bnx2_netif_stop(bp);
5707 netif_device_detach(dev);
5708 del_timer_sync(&bp->timer);
5709 if (bp->flags & NO_WOL_FLAG)
5710 reset_code = BNX2_DRV_MSG_CODE_UNLOAD;
5712 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5714 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5715 bnx2_reset_chip(bp, reset_code);
5717 bnx2_set_power_state(bp, pci_choose_state(pdev, state));
5722 bnx2_resume(struct pci_dev *pdev)
5724 struct net_device *dev = pci_get_drvdata(pdev);
5725 struct bnx2 *bp = netdev_priv(dev);
5727 if (!netif_running(dev))
5730 bnx2_set_power_state(bp, PCI_D0);
5731 netif_device_attach(dev);
5733 bnx2_netif_start(bp);
5737 static struct pci_driver bnx2_pci_driver = {
5738 .name = DRV_MODULE_NAME,
5739 .id_table = bnx2_pci_tbl,
5740 .probe = bnx2_init_one,
5741 .remove = __devexit_p(bnx2_remove_one),
5742 .suspend = bnx2_suspend,
5743 .resume = bnx2_resume,
5746 static int __init bnx2_init(void)
5748 return pci_module_init(&bnx2_pci_driver);
5751 static void __exit bnx2_cleanup(void)
5753 pci_unregister_driver(&bnx2_pci_driver);
5756 module_init(bnx2_init);
5757 module_exit(bnx2_cleanup);