2 * Copyright (C) 2003 - 2006 NetXen, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
20 * The full GNU General Public License is included in this distribution
21 * in the file called LICENSE.
23 * Contact Information:
26 * 3965 Freedom Circle, Fourth floor,
27 * Santa Clara, CA 95054
30 * Main source file for NetXen NIC Driver on Linux
34 #include <linux/vmalloc.h>
35 #include <linux/highmem.h>
36 #include "netxen_nic_hw.h"
38 #include "netxen_nic.h"
39 #include "netxen_nic_phan_reg.h"
41 #include <linux/dma-mapping.h>
42 #include <linux/if_vlan.h>
44 #include <linux/ipv6.h>
46 MODULE_DESCRIPTION("NetXen Multi port (1/10) Gigabit Network Driver");
47 MODULE_LICENSE("GPL");
48 MODULE_VERSION(NETXEN_NIC_LINUX_VERSIONID);
50 char netxen_nic_driver_name[] = "netxen_nic";
51 static char netxen_nic_driver_string[] = "NetXen Network Driver version "
52 NETXEN_NIC_LINUX_VERSIONID;
54 static int port_mode = NETXEN_PORT_MODE_AUTO_NEG;
56 /* Default to restricted 1G auto-neg mode */
57 static int wol_port_mode = 5;
59 static int use_msi = 1;
61 static int use_msi_x = 1;
63 /* Local functions to NetXen NIC driver */
64 static int __devinit netxen_nic_probe(struct pci_dev *pdev,
65 const struct pci_device_id *ent);
66 static void __devexit netxen_nic_remove(struct pci_dev *pdev);
67 static int netxen_nic_open(struct net_device *netdev);
68 static int netxen_nic_close(struct net_device *netdev);
69 static int netxen_nic_xmit_frame(struct sk_buff *, struct net_device *);
70 static void netxen_tx_timeout(struct net_device *netdev);
71 static void netxen_tx_timeout_task(struct work_struct *work);
72 static void netxen_watchdog(unsigned long);
73 static int netxen_nic_poll(struct napi_struct *napi, int budget);
74 #ifdef CONFIG_NET_POLL_CONTROLLER
75 static void netxen_nic_poll_controller(struct net_device *netdev);
77 static irqreturn_t netxen_intr(int irq, void *data);
78 static irqreturn_t netxen_msi_intr(int irq, void *data);
79 static irqreturn_t netxen_msix_intr(int irq, void *data);
81 /* PCI Device ID Table */
82 #define ENTRY(device) \
83 {PCI_DEVICE(PCI_VENDOR_ID_NETXEN, (device)), \
84 .class = PCI_CLASS_NETWORK_ETHERNET << 8, .class_mask = ~0}
86 static struct pci_device_id netxen_pci_tbl[] __devinitdata = {
87 ENTRY(PCI_DEVICE_ID_NX2031_10GXSR),
88 ENTRY(PCI_DEVICE_ID_NX2031_10GCX4),
89 ENTRY(PCI_DEVICE_ID_NX2031_4GCU),
90 ENTRY(PCI_DEVICE_ID_NX2031_IMEZ),
91 ENTRY(PCI_DEVICE_ID_NX2031_HMEZ),
92 ENTRY(PCI_DEVICE_ID_NX2031_XG_MGMT),
93 ENTRY(PCI_DEVICE_ID_NX2031_XG_MGMT2),
94 ENTRY(PCI_DEVICE_ID_NX3031),
98 MODULE_DEVICE_TABLE(pci, netxen_pci_tbl);
101 * In netxen_nic_down(), we must wait for any pending callback requests into
102 * netxen_watchdog_task() to complete; eg otherwise the watchdog_timer could be
103 * reenabled right after it is deleted in netxen_nic_down().
104 * FLUSH_SCHEDULED_WORK() does this synchronization.
106 * Normally, schedule_work()/flush_scheduled_work() could have worked, but
107 * netxen_nic_close() is invoked with kernel rtnl lock held. netif_carrier_off()
108 * call in netxen_nic_close() triggers a schedule_work(&linkwatch_work), and a
109 * subsequent call to flush_scheduled_work() in netxen_nic_down() would cause
110 * linkwatch_event() to be executed which also attempts to acquire the rtnl
111 * lock thus causing a deadlock.
114 static struct workqueue_struct *netxen_workq;
115 #define SCHEDULE_WORK(tp) queue_work(netxen_workq, tp)
116 #define FLUSH_SCHEDULED_WORK() flush_workqueue(netxen_workq)
118 static void netxen_watchdog(unsigned long);
120 static uint32_t crb_cmd_producer[4] = {
121 CRB_CMD_PRODUCER_OFFSET, CRB_CMD_PRODUCER_OFFSET_1,
122 CRB_CMD_PRODUCER_OFFSET_2, CRB_CMD_PRODUCER_OFFSET_3
126 netxen_nic_update_cmd_producer(struct netxen_adapter *adapter,
127 uint32_t crb_producer)
129 adapter->pci_write_normalize(adapter,
130 adapter->crb_addr_cmd_producer, crb_producer);
133 static uint32_t crb_cmd_consumer[4] = {
134 CRB_CMD_CONSUMER_OFFSET, CRB_CMD_CONSUMER_OFFSET_1,
135 CRB_CMD_CONSUMER_OFFSET_2, CRB_CMD_CONSUMER_OFFSET_3
139 netxen_nic_update_cmd_consumer(struct netxen_adapter *adapter,
142 adapter->pci_write_normalize(adapter,
143 adapter->crb_addr_cmd_consumer, crb_consumer);
146 static uint32_t msi_tgt_status[8] = {
147 ISR_INT_TARGET_STATUS, ISR_INT_TARGET_STATUS_F1,
148 ISR_INT_TARGET_STATUS_F2, ISR_INT_TARGET_STATUS_F3,
149 ISR_INT_TARGET_STATUS_F4, ISR_INT_TARGET_STATUS_F5,
150 ISR_INT_TARGET_STATUS_F6, ISR_INT_TARGET_STATUS_F7
153 static struct netxen_legacy_intr_set legacy_intr[] = NX_LEGACY_INTR_CONFIG;
155 static inline void netxen_nic_disable_int(struct netxen_adapter *adapter)
157 adapter->pci_write_normalize(adapter, adapter->crb_intr_mask, 0);
160 static inline void netxen_nic_enable_int(struct netxen_adapter *adapter)
162 adapter->pci_write_normalize(adapter, adapter->crb_intr_mask, 0x1);
164 if (!NETXEN_IS_MSI_FAMILY(adapter))
165 adapter->pci_write_immediate(adapter,
166 adapter->legacy_intr.tgt_mask_reg, 0xfbff);
169 static int nx_set_dma_mask(struct netxen_adapter *adapter, uint8_t revision_id)
171 struct pci_dev *pdev = adapter->pdev;
176 adapter->dma_mask = DMA_32BIT_MASK;
178 if (revision_id >= NX_P3_B0) {
179 /* should go to DMA_64BIT_MASK */
180 adapter->dma_mask = DMA_39BIT_MASK;
181 mask = DMA_39BIT_MASK;
182 } else if (revision_id == NX_P3_A2) {
183 adapter->dma_mask = DMA_39BIT_MASK;
184 mask = DMA_39BIT_MASK;
185 } else if (revision_id == NX_P2_C1) {
186 adapter->dma_mask = DMA_35BIT_MASK;
187 mask = DMA_35BIT_MASK;
189 adapter->dma_mask = DMA_32BIT_MASK;
190 mask = DMA_32BIT_MASK;
191 goto set_32_bit_mask;
195 * Consistent DMA mask is set to 32 bit because it cannot be set to
196 * 35 bits. For P3 also leave it at 32 bits for now. Only the rings
197 * come off this pool.
199 if (pci_set_dma_mask(pdev, mask) == 0 &&
200 pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK) == 0) {
201 adapter->pci_using_dac = 1;
205 #endif /* CONFIG_IA64 */
207 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
209 err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
211 DPRINTK(ERR, "No usable DMA configuration, aborting:%d\n", err);
215 adapter->pci_using_dac = 0;
219 static void netxen_check_options(struct netxen_adapter *adapter)
221 switch (adapter->ahw.boardcfg.board_type) {
222 case NETXEN_BRDTYPE_P3_HMEZ:
223 case NETXEN_BRDTYPE_P3_XG_LOM:
224 case NETXEN_BRDTYPE_P3_10G_CX4:
225 case NETXEN_BRDTYPE_P3_10G_CX4_LP:
226 case NETXEN_BRDTYPE_P3_IMEZ:
227 case NETXEN_BRDTYPE_P3_10G_SFP_PLUS:
228 case NETXEN_BRDTYPE_P3_10G_SFP_QT:
229 case NETXEN_BRDTYPE_P3_10G_SFP_CT:
230 case NETXEN_BRDTYPE_P3_10G_XFP:
231 case NETXEN_BRDTYPE_P3_10000_BASE_T:
232 adapter->msix_supported = !!use_msi_x;
233 adapter->max_rx_desc_count = MAX_RCV_DESCRIPTORS_10G;
236 case NETXEN_BRDTYPE_P2_SB31_10G:
237 case NETXEN_BRDTYPE_P2_SB31_10G_CX4:
238 case NETXEN_BRDTYPE_P2_SB31_10G_IMEZ:
239 case NETXEN_BRDTYPE_P2_SB31_10G_HMEZ:
240 adapter->msix_supported = 0;
241 adapter->max_rx_desc_count = MAX_RCV_DESCRIPTORS_10G;
244 case NETXEN_BRDTYPE_P3_REF_QG:
245 case NETXEN_BRDTYPE_P3_4_GB:
246 case NETXEN_BRDTYPE_P3_4_GB_MM:
247 adapter->msix_supported = !!use_msi_x;
248 adapter->max_rx_desc_count = MAX_RCV_DESCRIPTORS_1G;
251 case NETXEN_BRDTYPE_P2_SB35_4G:
252 case NETXEN_BRDTYPE_P2_SB31_2G:
253 adapter->msix_supported = 0;
254 adapter->max_rx_desc_count = MAX_RCV_DESCRIPTORS_1G;
257 case NETXEN_BRDTYPE_P3_10G_TP:
258 adapter->msix_supported = !!use_msi_x;
259 if (adapter->ahw.board_type == NETXEN_NIC_XGBE)
260 adapter->max_rx_desc_count = MAX_RCV_DESCRIPTORS_10G;
262 adapter->max_rx_desc_count = MAX_RCV_DESCRIPTORS_1G;
266 adapter->msix_supported = 0;
267 adapter->max_rx_desc_count = MAX_RCV_DESCRIPTORS_1G;
269 printk(KERN_WARNING "Unknown board type(0x%x)\n",
270 adapter->ahw.boardcfg.board_type);
274 adapter->max_tx_desc_count = MAX_CMD_DESCRIPTORS_HOST;
275 adapter->max_jumbo_rx_desc_count = MAX_JUMBO_RCV_DESCRIPTORS;
276 adapter->max_lro_rx_desc_count = MAX_LRO_RCV_DESCRIPTORS;
278 adapter->max_possible_rss_rings = 1;
283 netxen_check_hw_init(struct netxen_adapter *adapter, int first_boot)
287 if (first_boot == 0x55555555) {
288 /* This is the first boot after power up */
289 adapter->pci_write_normalize(adapter,
290 NETXEN_CAM_RAM(0x1fc), NETXEN_BDINFO_MAGIC);
292 if (!NX_IS_REVISION_P2(adapter->ahw.revision_id))
295 /* PCI bus master workaround */
296 adapter->hw_read_wx(adapter,
297 NETXEN_PCIE_REG(0x4), &first_boot, 4);
298 if (!(first_boot & 0x4)) {
300 adapter->hw_write_wx(adapter,
301 NETXEN_PCIE_REG(0x4), &first_boot, 4);
302 adapter->hw_read_wx(adapter,
303 NETXEN_PCIE_REG(0x4), &first_boot, 4);
306 /* This is the first boot after power up */
307 adapter->hw_read_wx(adapter,
308 NETXEN_ROMUSB_GLB_SW_RESET, &first_boot, 4);
309 if (first_boot != 0x80000f) {
310 /* clear the register for future unloads/loads */
311 adapter->pci_write_normalize(adapter,
312 NETXEN_CAM_RAM(0x1fc), 0);
316 /* Start P2 boot loader */
317 val = adapter->pci_read_normalize(adapter,
318 NETXEN_ROMUSB_GLB_PEGTUNE_DONE);
319 adapter->pci_write_normalize(adapter,
320 NETXEN_ROMUSB_GLB_PEGTUNE_DONE, val | 0x1);
324 val = adapter->pci_read_normalize(adapter,
325 NETXEN_CAM_RAM(0x1fc));
327 if (++timeout > 5000)
330 } while (val == NETXEN_BDINFO_MAGIC);
335 static void netxen_set_port_mode(struct netxen_adapter *adapter)
339 val = adapter->ahw.boardcfg.board_type;
340 if ((val == NETXEN_BRDTYPE_P3_HMEZ) ||
341 (val == NETXEN_BRDTYPE_P3_XG_LOM)) {
342 if (port_mode == NETXEN_PORT_MODE_802_3_AP) {
343 data = NETXEN_PORT_MODE_802_3_AP;
344 adapter->hw_write_wx(adapter,
345 NETXEN_PORT_MODE_ADDR, &data, 4);
346 } else if (port_mode == NETXEN_PORT_MODE_XG) {
347 data = NETXEN_PORT_MODE_XG;
348 adapter->hw_write_wx(adapter,
349 NETXEN_PORT_MODE_ADDR, &data, 4);
350 } else if (port_mode == NETXEN_PORT_MODE_AUTO_NEG_1G) {
351 data = NETXEN_PORT_MODE_AUTO_NEG_1G;
352 adapter->hw_write_wx(adapter,
353 NETXEN_PORT_MODE_ADDR, &data, 4);
354 } else if (port_mode == NETXEN_PORT_MODE_AUTO_NEG_XG) {
355 data = NETXEN_PORT_MODE_AUTO_NEG_XG;
356 adapter->hw_write_wx(adapter,
357 NETXEN_PORT_MODE_ADDR, &data, 4);
359 data = NETXEN_PORT_MODE_AUTO_NEG;
360 adapter->hw_write_wx(adapter,
361 NETXEN_PORT_MODE_ADDR, &data, 4);
364 if ((wol_port_mode != NETXEN_PORT_MODE_802_3_AP) &&
365 (wol_port_mode != NETXEN_PORT_MODE_XG) &&
366 (wol_port_mode != NETXEN_PORT_MODE_AUTO_NEG_1G) &&
367 (wol_port_mode != NETXEN_PORT_MODE_AUTO_NEG_XG)) {
368 wol_port_mode = NETXEN_PORT_MODE_AUTO_NEG;
370 adapter->hw_write_wx(adapter, NETXEN_WOL_PORT_MODE,
375 static void netxen_set_msix_bit(struct pci_dev *pdev, int enable)
380 pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
382 pci_read_config_dword(pdev, pos, &control);
384 control |= PCI_MSIX_FLAGS_ENABLE;
387 pci_write_config_dword(pdev, pos, control);
391 static void netxen_init_msix_entries(struct netxen_adapter *adapter)
395 for (i = 0; i < MSIX_ENTRIES_PER_ADAPTER; i++)
396 adapter->msix_entries[i].entry = i;
400 netxen_read_mac_addr(struct netxen_adapter *adapter)
405 struct net_device *netdev = adapter->netdev;
406 struct pci_dev *pdev = adapter->pdev;
408 if (netxen_is_flash_supported(adapter) != 0)
411 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
412 if (netxen_p3_get_mac_addr(adapter, &mac_addr) != 0)
415 if (netxen_get_flash_mac_addr(adapter, &mac_addr) != 0)
419 p = (unsigned char *)&mac_addr;
420 for (i = 0; i < 6; i++)
421 netdev->dev_addr[i] = *(p + 5 - i);
423 memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
425 /* set station address */
427 if (!is_valid_ether_addr(netdev->perm_addr))
428 dev_warn(&pdev->dev, "Bad MAC address %pM.\n", netdev->dev_addr);
430 adapter->macaddr_set(adapter, netdev->dev_addr);
435 static void netxen_set_multicast_list(struct net_device *dev)
437 struct netxen_adapter *adapter = netdev_priv(dev);
439 if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
440 netxen_p3_nic_set_multi(dev);
442 netxen_p2_nic_set_multi(dev);
445 static const struct net_device_ops netxen_netdev_ops = {
446 .ndo_open = netxen_nic_open,
447 .ndo_stop = netxen_nic_close,
448 .ndo_start_xmit = netxen_nic_xmit_frame,
449 .ndo_get_stats = netxen_nic_get_stats,
450 .ndo_validate_addr = eth_validate_addr,
451 .ndo_set_multicast_list = netxen_set_multicast_list,
452 .ndo_set_mac_address = netxen_nic_set_mac,
453 .ndo_change_mtu = netxen_nic_change_mtu,
454 .ndo_tx_timeout = netxen_tx_timeout,
455 #ifdef CONFIG_NET_POLL_CONTROLLER
456 .ndo_poll_controller = netxen_nic_poll_controller,
461 netxen_setup_intr(struct netxen_adapter *adapter)
463 struct netxen_legacy_intr_set *legacy_intrp;
464 struct pci_dev *pdev = adapter->pdev;
466 adapter->flags &= ~(NETXEN_NIC_MSI_ENABLED | NETXEN_NIC_MSIX_ENABLED);
467 adapter->intr_scheme = -1;
468 adapter->msi_mode = -1;
470 if (adapter->ahw.revision_id >= NX_P3_B0)
471 legacy_intrp = &legacy_intr[adapter->ahw.pci_func];
473 legacy_intrp = &legacy_intr[0];
474 adapter->legacy_intr.int_vec_bit = legacy_intrp->int_vec_bit;
475 adapter->legacy_intr.tgt_status_reg = legacy_intrp->tgt_status_reg;
476 adapter->legacy_intr.tgt_mask_reg = legacy_intrp->tgt_mask_reg;
477 adapter->legacy_intr.pci_int_reg = legacy_intrp->pci_int_reg;
479 netxen_set_msix_bit(pdev, 0);
481 if (adapter->msix_supported) {
483 netxen_init_msix_entries(adapter);
484 if (pci_enable_msix(pdev, adapter->msix_entries,
485 MSIX_ENTRIES_PER_ADAPTER))
488 adapter->flags |= NETXEN_NIC_MSIX_ENABLED;
489 netxen_set_msix_bit(pdev, 1);
490 dev_info(&pdev->dev, "using msi-x interrupts\n");
494 if (use_msi && !pci_enable_msi(pdev)) {
495 adapter->flags |= NETXEN_NIC_MSI_ENABLED;
496 dev_info(&pdev->dev, "using msi interrupts\n");
498 dev_info(&pdev->dev, "using legacy interrupts\n");
503 netxen_teardown_intr(struct netxen_adapter *adapter)
505 if (adapter->flags & NETXEN_NIC_MSIX_ENABLED)
506 pci_disable_msix(adapter->pdev);
507 if (adapter->flags & NETXEN_NIC_MSI_ENABLED)
508 pci_disable_msi(adapter->pdev);
512 netxen_cleanup_pci_map(struct netxen_adapter *adapter)
514 if (adapter->ahw.db_base != NULL)
515 iounmap(adapter->ahw.db_base);
516 if (adapter->ahw.pci_base0 != NULL)
517 iounmap(adapter->ahw.pci_base0);
518 if (adapter->ahw.pci_base1 != NULL)
519 iounmap(adapter->ahw.pci_base1);
520 if (adapter->ahw.pci_base2 != NULL)
521 iounmap(adapter->ahw.pci_base2);
525 netxen_setup_pci_map(struct netxen_adapter *adapter)
527 void __iomem *mem_ptr0 = NULL;
528 void __iomem *mem_ptr1 = NULL;
529 void __iomem *mem_ptr2 = NULL;
530 void __iomem *db_ptr = NULL;
532 unsigned long first_page_group_end;
533 unsigned long first_page_group_start;
534 unsigned long mem_base, mem_len, db_base, db_len = 0, pci_len0 = 0;
536 struct pci_dev *pdev = adapter->pdev;
537 int pci_func = adapter->ahw.pci_func;
542 * Set the CRB window to invalid. If any register in window 0 is
543 * accessed it should set the window to 0 and then reset it to 1.
545 adapter->curr_window = 255;
546 adapter->ahw.qdr_sn_window = -1;
547 adapter->ahw.ddr_mn_window = -1;
549 /* remap phys address */
550 mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */
551 mem_len = pci_resource_len(pdev, 0);
554 adapter->hw_write_wx = netxen_nic_hw_write_wx_128M;
555 adapter->hw_read_wx = netxen_nic_hw_read_wx_128M;
556 adapter->pci_read_immediate = netxen_nic_pci_read_immediate_128M;
557 adapter->pci_write_immediate = netxen_nic_pci_write_immediate_128M;
558 adapter->pci_read_normalize = netxen_nic_pci_read_normalize_128M;
559 adapter->pci_write_normalize = netxen_nic_pci_write_normalize_128M;
560 adapter->pci_set_window = netxen_nic_pci_set_window_128M;
561 adapter->pci_mem_read = netxen_nic_pci_mem_read_128M;
562 adapter->pci_mem_write = netxen_nic_pci_mem_write_128M;
564 /* 128 Meg of memory */
565 if (mem_len == NETXEN_PCI_128MB_SIZE) {
566 mem_ptr0 = ioremap(mem_base, FIRST_PAGE_GROUP_SIZE);
567 mem_ptr1 = ioremap(mem_base + SECOND_PAGE_GROUP_START,
568 SECOND_PAGE_GROUP_SIZE);
569 mem_ptr2 = ioremap(mem_base + THIRD_PAGE_GROUP_START,
570 THIRD_PAGE_GROUP_SIZE);
571 first_page_group_start = FIRST_PAGE_GROUP_START;
572 first_page_group_end = FIRST_PAGE_GROUP_END;
573 } else if (mem_len == NETXEN_PCI_32MB_SIZE) {
574 mem_ptr1 = ioremap(mem_base, SECOND_PAGE_GROUP_SIZE);
575 mem_ptr2 = ioremap(mem_base + THIRD_PAGE_GROUP_START -
576 SECOND_PAGE_GROUP_START, THIRD_PAGE_GROUP_SIZE);
577 first_page_group_start = 0;
578 first_page_group_end = 0;
579 } else if (mem_len == NETXEN_PCI_2MB_SIZE) {
580 adapter->hw_write_wx = netxen_nic_hw_write_wx_2M;
581 adapter->hw_read_wx = netxen_nic_hw_read_wx_2M;
582 adapter->pci_read_immediate = netxen_nic_pci_read_immediate_2M;
583 adapter->pci_write_immediate =
584 netxen_nic_pci_write_immediate_2M;
585 adapter->pci_read_normalize = netxen_nic_pci_read_normalize_2M;
586 adapter->pci_write_normalize =
587 netxen_nic_pci_write_normalize_2M;
588 adapter->pci_set_window = netxen_nic_pci_set_window_2M;
589 adapter->pci_mem_read = netxen_nic_pci_mem_read_2M;
590 adapter->pci_mem_write = netxen_nic_pci_mem_write_2M;
592 mem_ptr0 = pci_ioremap_bar(pdev, 0);
593 if (mem_ptr0 == NULL) {
594 dev_err(&pdev->dev, "failed to map PCI bar 0\n");
598 first_page_group_start = 0;
599 first_page_group_end = 0;
601 adapter->ahw.ddr_mn_window = 0;
602 adapter->ahw.qdr_sn_window = 0;
604 adapter->ahw.mn_win_crb = 0x100000 + PCIX_MN_WINDOW +
606 adapter->ahw.ms_win_crb = 0x100000 + PCIX_SN_WINDOW;
608 adapter->ahw.ms_win_crb += (pci_func * 0x20);
610 adapter->ahw.ms_win_crb +=
611 0xA0 + ((pci_func - 4) * 0x10);
616 dev_info(&pdev->dev, "%dMB memory map\n", (int)(mem_len>>20));
618 adapter->ahw.pci_base0 = mem_ptr0;
619 adapter->ahw.pci_len0 = pci_len0;
620 adapter->ahw.first_page_group_start = first_page_group_start;
621 adapter->ahw.first_page_group_end = first_page_group_end;
622 adapter->ahw.pci_base1 = mem_ptr1;
623 adapter->ahw.pci_base2 = mem_ptr2;
625 if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
628 db_base = pci_resource_start(pdev, 4); /* doorbell is on bar 4 */
629 db_len = pci_resource_len(pdev, 4);
632 printk(KERN_ERR "%s: doorbell is disabled\n",
633 netxen_nic_driver_name);
638 db_ptr = ioremap(db_base, NETXEN_DB_MAPSIZE_BYTES);
640 printk(KERN_ERR "%s: Failed to allocate doorbell map.",
641 netxen_nic_driver_name);
647 adapter->ahw.db_base = db_ptr;
648 adapter->ahw.db_len = db_len;
652 netxen_cleanup_pci_map(adapter);
657 netxen_start_firmware(struct netxen_adapter *adapter)
659 int val, err, first_boot;
660 struct pci_dev *pdev = adapter->pdev;
662 first_boot = adapter->pci_read_normalize(adapter,
663 NETXEN_CAM_RAM(0x1fc));
665 err = netxen_check_hw_init(adapter, first_boot);
667 dev_err(&pdev->dev, "error in init HW init sequence\n");
671 if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
672 netxen_set_port_mode(adapter);
674 if (first_boot != 0x55555555) {
675 adapter->pci_write_normalize(adapter,
676 CRB_CMDPEG_STATE, 0);
677 netxen_pinit_from_rom(adapter, 0);
680 netxen_load_firmware(adapter);
682 if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
684 /* Initialize multicast addr pool owners */
686 if (adapter->ahw.board_type == NETXEN_NIC_XGBE)
688 netxen_crb_writelit_adapter(adapter,
689 NETXEN_MAC_ADDR_CNTL_REG, val);
693 err = netxen_initialize_adapter_offload(adapter);
698 * Tell the hardware our version number.
700 val = (_NETXEN_NIC_LINUX_MAJOR << 16)
701 | ((_NETXEN_NIC_LINUX_MINOR << 8))
702 | (_NETXEN_NIC_LINUX_SUBVERSION);
703 adapter->pci_write_normalize(adapter, CRB_DRIVER_VERSION, val);
705 /* Handshake with the card before we register the devices. */
706 err = netxen_phantom_init(adapter, NETXEN_NIC_PEG_TUNE);
708 netxen_free_adapter_offload(adapter);
716 netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
718 struct net_device *netdev = NULL;
719 struct netxen_adapter *adapter = NULL;
723 int pci_func_id = PCI_FUNC(pdev->devfn);
726 if (pci_func_id == 0)
727 printk(KERN_INFO "%s\n", netxen_nic_driver_string);
729 if (pdev->class != 0x020000) {
730 printk(KERN_DEBUG "NetXen function %d, class %x will not "
731 "be enabled.\n",pci_func_id, pdev->class);
735 if (pdev->revision >= NX_P3_A0 && pdev->revision < NX_P3_B1) {
736 printk(KERN_WARNING "NetXen chip revisions between 0x%x-0x%x"
737 "will not be enabled.\n",
742 if ((err = pci_enable_device(pdev)))
745 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
747 goto err_out_disable_pdev;
750 if ((err = pci_request_regions(pdev, netxen_nic_driver_name)))
751 goto err_out_disable_pdev;
753 pci_set_master(pdev);
755 netdev = alloc_etherdev(sizeof(struct netxen_adapter));
757 printk(KERN_ERR"%s: Failed to allocate memory for the "
758 "device block.Check system memory resource"
759 " usage.\n", netxen_nic_driver_name);
760 goto err_out_free_res;
763 SET_NETDEV_DEV(netdev, &pdev->dev);
765 adapter = netdev_priv(netdev);
766 adapter->netdev = netdev;
767 adapter->pdev = pdev;
768 adapter->ahw.pci_func = pci_func_id;
770 revision_id = pdev->revision;
771 adapter->ahw.revision_id = revision_id;
773 err = nx_set_dma_mask(adapter, revision_id);
775 goto err_out_free_netdev;
777 rwlock_init(&adapter->adapter_lock);
779 err = netxen_setup_pci_map(adapter);
781 goto err_out_free_netdev;
783 netif_napi_add(netdev, &adapter->napi,
784 netxen_nic_poll, NETXEN_NETDEV_WEIGHT);
786 /* This will be reset for mezz cards */
787 adapter->portnum = pci_func_id;
788 adapter->status &= ~NETXEN_NETDEV_STATUS;
789 adapter->rx_csum = 1;
790 adapter->mc_enabled = 0;
791 if (NX_IS_REVISION_P3(revision_id))
792 adapter->max_mc_count = 38;
794 adapter->max_mc_count = 16;
796 netdev->netdev_ops = &netxen_netdev_ops;
797 netdev->watchdog_timeo = 2*HZ;
799 netxen_nic_change_mtu(netdev, netdev->mtu);
801 SET_ETHTOOL_OPS(netdev, &netxen_nic_ethtool_ops);
803 netdev->features |= (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO);
804 netdev->vlan_features |= (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO);
806 if (NX_IS_REVISION_P3(revision_id)) {
807 netdev->features |= (NETIF_F_IPV6_CSUM | NETIF_F_TSO6);
808 netdev->vlan_features |= (NETIF_F_IPV6_CSUM | NETIF_F_TSO6);
811 if (adapter->pci_using_dac) {
812 netdev->features |= NETIF_F_HIGHDMA;
813 netdev->vlan_features |= NETIF_F_HIGHDMA;
816 if (netxen_nic_get_board_info(adapter) != 0) {
817 printk("%s: Error getting board config info.\n",
818 netxen_nic_driver_name);
820 goto err_out_iounmap;
823 netxen_initialize_adapter_ops(adapter);
825 /* Mezz cards have PCI function 0,2,3 enabled */
826 switch (adapter->ahw.boardcfg.board_type) {
827 case NETXEN_BRDTYPE_P2_SB31_10G_IMEZ:
828 case NETXEN_BRDTYPE_P2_SB31_10G_HMEZ:
829 if (pci_func_id >= 2)
830 adapter->portnum = pci_func_id - 2;
837 * This call will setup various max rx/tx counts.
838 * It must be done before any buffer/ring allocations.
840 netxen_check_options(adapter);
843 if (NX_IS_REVISION_P3(revision_id)) {
844 if (adapter->ahw.pci_func == 0)
847 if (adapter->portnum == 0)
852 err = netxen_start_firmware(adapter);
854 goto err_out_iounmap;
857 netxen_nic_flash_print(adapter);
859 if (NX_IS_REVISION_P3(revision_id)) {
860 adapter->hw_read_wx(adapter,
861 NETXEN_MIU_MN_CONTROL, &val, 4);
862 adapter->ahw.cut_through = (val & 0x4) ? 1 : 0;
863 dev_info(&pdev->dev, "firmware running in %s mode\n",
864 adapter->ahw.cut_through ? "cut through" : "legacy");
868 * See if the firmware gave us a virtual-physical port mapping.
870 adapter->physical_port = adapter->portnum;
871 if (adapter->fw_major < 4) {
872 i = adapter->pci_read_normalize(adapter,
873 CRB_V2P(adapter->portnum));
875 adapter->physical_port = i;
878 adapter->flags &= ~(NETXEN_NIC_MSI_ENABLED | NETXEN_NIC_MSIX_ENABLED);
880 netxen_set_msix_bit(pdev, 0);
882 netxen_setup_intr(adapter);
884 if (adapter->flags & NETXEN_NIC_MSIX_ENABLED)
885 netdev->irq = adapter->msix_entries[0].vector;
887 netdev->irq = pdev->irq;
889 err = netxen_receive_peg_ready(adapter);
891 goto err_out_disable_msi;
893 init_timer(&adapter->watchdog_timer);
894 adapter->watchdog_timer.function = &netxen_watchdog;
895 adapter->watchdog_timer.data = (unsigned long)adapter;
896 INIT_WORK(&adapter->watchdog_task, netxen_watchdog_task);
897 INIT_WORK(&adapter->tx_timeout_task, netxen_tx_timeout_task);
899 err = netxen_read_mac_addr(adapter);
901 dev_warn(&pdev->dev, "failed to read mac addr\n");
903 netif_carrier_off(netdev);
904 netif_stop_queue(netdev);
906 if ((err = register_netdev(netdev))) {
907 printk(KERN_ERR "%s: register_netdev failed port #%d"
908 " aborting\n", netxen_nic_driver_name,
911 goto err_out_disable_msi;
914 pci_set_drvdata(pdev, adapter);
916 switch (adapter->ahw.board_type) {
918 dev_info(&adapter->pdev->dev, "%s: GbE port initialized\n",
919 adapter->netdev->name);
921 case NETXEN_NIC_XGBE:
922 dev_info(&adapter->pdev->dev, "%s: XGbE port initialized\n",
923 adapter->netdev->name);
930 netxen_teardown_intr(adapter);
933 netxen_free_adapter_offload(adapter);
936 netxen_cleanup_pci_map(adapter);
942 pci_release_regions(pdev);
944 err_out_disable_pdev:
945 pci_set_drvdata(pdev, NULL);
946 pci_disable_device(pdev);
950 static void __devexit netxen_nic_remove(struct pci_dev *pdev)
952 struct netxen_adapter *adapter;
953 struct net_device *netdev;
955 adapter = pci_get_drvdata(pdev);
959 netdev = adapter->netdev;
961 unregister_netdev(netdev);
963 if (adapter->is_up == NETXEN_ADAPTER_UP_MAGIC) {
964 netxen_free_hw_resources(adapter);
965 netxen_release_rx_buffers(adapter);
966 netxen_free_sw_resources(adapter);
968 if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
969 netxen_p3_free_mac_list(adapter);
972 if (adapter->portnum == 0)
973 netxen_free_adapter_offload(adapter);
976 free_irq(adapter->irq, adapter);
978 netxen_teardown_intr(adapter);
980 netxen_cleanup_pci_map(adapter);
982 pci_release_regions(pdev);
983 pci_disable_device(pdev);
984 pci_set_drvdata(pdev, NULL);
990 * Called when a network interface is made active
991 * @returns 0 on success, negative value on failure
993 static int netxen_nic_open(struct net_device *netdev)
995 struct netxen_adapter *adapter = netdev_priv(netdev);
998 irq_handler_t handler;
999 unsigned long flags = IRQF_SAMPLE_RANDOM;
1001 if (adapter->driver_mismatch)
1004 if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC) {
1005 err = netxen_init_firmware(adapter);
1007 printk(KERN_ERR "Failed to init firmware\n");
1011 if (adapter->fw_major < 4)
1012 adapter->max_rds_rings = 3;
1014 adapter->max_rds_rings = 2;
1016 err = netxen_alloc_sw_resources(adapter);
1018 printk(KERN_ERR "%s: Error in setting sw resources\n",
1023 netxen_nic_clear_stats(adapter);
1025 err = netxen_alloc_hw_resources(adapter);
1027 printk(KERN_ERR "%s: Error in setting hw resources\n",
1029 goto err_out_free_sw;
1032 if ((adapter->msi_mode != MSI_MODE_MULTIFUNC) ||
1033 (adapter->intr_scheme != INTR_SCHEME_PERPORT)) {
1034 printk(KERN_ERR "%s: Firmware interrupt scheme is "
1035 "incompatible with driver\n",
1037 adapter->driver_mismatch = 1;
1038 goto err_out_free_hw;
1041 if (adapter->fw_major < 4) {
1042 adapter->crb_addr_cmd_producer =
1043 crb_cmd_producer[adapter->portnum];
1044 adapter->crb_addr_cmd_consumer =
1045 crb_cmd_consumer[adapter->portnum];
1047 netxen_nic_update_cmd_producer(adapter, 0);
1048 netxen_nic_update_cmd_consumer(adapter, 0);
1051 for (ctx = 0; ctx < MAX_RCV_CTX; ++ctx) {
1052 for (ring = 0; ring < adapter->max_rds_rings; ring++)
1053 netxen_post_rx_buffers(adapter, ctx, ring);
1055 if (adapter->flags & NETXEN_NIC_MSIX_ENABLED)
1056 handler = netxen_msix_intr;
1057 else if (adapter->flags & NETXEN_NIC_MSI_ENABLED)
1058 handler = netxen_msi_intr;
1060 flags |= IRQF_SHARED;
1061 handler = netxen_intr;
1063 adapter->irq = netdev->irq;
1064 err = request_irq(adapter->irq, handler,
1065 flags, netdev->name, adapter);
1067 printk(KERN_ERR "request_irq failed with: %d\n", err);
1068 goto err_out_free_rxbuf;
1071 adapter->is_up = NETXEN_ADAPTER_UP_MAGIC;
1074 /* Done here again so that even if phantom sw overwrote it,
1076 err = adapter->init_port(adapter, adapter->physical_port);
1078 printk(KERN_ERR "%s: Failed to initialize port %d\n",
1079 netxen_nic_driver_name, adapter->portnum);
1080 goto err_out_free_irq;
1082 adapter->macaddr_set(adapter, netdev->dev_addr);
1084 netxen_nic_set_link_parameters(adapter);
1086 netxen_set_multicast_list(netdev);
1087 if (adapter->set_mtu)
1088 adapter->set_mtu(adapter, netdev->mtu);
1090 adapter->ahw.linkup = 0;
1091 mod_timer(&adapter->watchdog_timer, jiffies);
1093 napi_enable(&adapter->napi);
1094 netxen_nic_enable_int(adapter);
1096 netif_start_queue(netdev);
1101 free_irq(adapter->irq, adapter);
1103 netxen_release_rx_buffers(adapter);
1105 netxen_free_hw_resources(adapter);
1107 netxen_free_sw_resources(adapter);
1112 * netxen_nic_close - Disables a network interface entry point
1114 static int netxen_nic_close(struct net_device *netdev)
1116 struct netxen_adapter *adapter = netdev_priv(netdev);
1118 netif_carrier_off(netdev);
1119 netif_stop_queue(netdev);
1120 napi_disable(&adapter->napi);
1122 if (adapter->stop_port)
1123 adapter->stop_port(adapter);
1125 netxen_nic_disable_int(adapter);
1127 netxen_release_tx_buffers(adapter);
1129 FLUSH_SCHEDULED_WORK();
1130 del_timer_sync(&adapter->watchdog_timer);
1135 static bool netxen_tso_check(struct net_device *netdev,
1136 struct cmd_desc_type0 *desc, struct sk_buff *skb)
1139 u8 opcode = TX_ETHER_PKT;
1140 __be16 protocol = skb->protocol;
1143 if (protocol == cpu_to_be16(ETH_P_8021Q)) {
1144 struct vlan_ethhdr *vh = (struct vlan_ethhdr *)skb->data;
1145 protocol = vh->h_vlan_encapsulated_proto;
1146 flags = FLAGS_VLAN_TAGGED;
1149 if ((netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) &&
1150 skb_shinfo(skb)->gso_size > 0) {
1152 desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
1153 desc->total_hdr_length =
1154 skb_transport_offset(skb) + tcp_hdrlen(skb);
1156 opcode = (protocol == cpu_to_be16(ETH_P_IPV6)) ?
1157 TX_TCP_LSO6 : TX_TCP_LSO;
1160 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
1163 if (protocol == cpu_to_be16(ETH_P_IP)) {
1164 l4proto = ip_hdr(skb)->protocol;
1166 if (l4proto == IPPROTO_TCP)
1167 opcode = TX_TCP_PKT;
1168 else if(l4proto == IPPROTO_UDP)
1169 opcode = TX_UDP_PKT;
1170 } else if (protocol == cpu_to_be16(ETH_P_IPV6)) {
1171 l4proto = ipv6_hdr(skb)->nexthdr;
1173 if (l4proto == IPPROTO_TCP)
1174 opcode = TX_TCPV6_PKT;
1175 else if(l4proto == IPPROTO_UDP)
1176 opcode = TX_UDPV6_PKT;
1179 desc->tcp_hdr_offset = skb_transport_offset(skb);
1180 desc->ip_hdr_offset = skb_network_offset(skb);
1181 netxen_set_tx_flags_opcode(desc, flags, opcode);
1186 netxen_clean_tx_dma_mapping(struct pci_dev *pdev,
1187 struct netxen_cmd_buffer *pbuf, int last)
1190 struct netxen_skb_frag *buffrag;
1192 buffrag = &pbuf->frag_array[0];
1193 pci_unmap_single(pdev, buffrag->dma,
1194 buffrag->length, PCI_DMA_TODEVICE);
1196 for (k = 1; k < last; k++) {
1197 buffrag = &pbuf->frag_array[k];
1198 pci_unmap_page(pdev, buffrag->dma,
1199 buffrag->length, PCI_DMA_TODEVICE);
1203 static int netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1205 struct netxen_adapter *adapter = netdev_priv(netdev);
1206 struct netxen_hardware_context *hw = &adapter->ahw;
1207 unsigned int first_seg_len = skb->len - skb->data_len;
1208 struct netxen_cmd_buffer *pbuf;
1209 struct netxen_skb_frag *buffrag;
1210 struct cmd_desc_type0 *hwdesc;
1211 struct pci_dev *pdev = adapter->pdev;
1212 dma_addr_t temp_dma;
1215 u32 producer, consumer;
1216 int frag_count, no_of_desc;
1217 u32 num_txd = adapter->max_tx_desc_count;
1218 bool is_tso = false;
1220 frag_count = skb_shinfo(skb)->nr_frags + 1;
1222 /* There 4 fragments per descriptor */
1223 no_of_desc = (frag_count + 3) >> 2;
1225 producer = adapter->cmd_producer;
1227 consumer = adapter->last_cmd_consumer;
1228 if ((no_of_desc+2) > find_diff_among(producer, consumer, num_txd)) {
1229 netif_stop_queue(netdev);
1231 return NETDEV_TX_BUSY;
1234 /* Copy the descriptors into the hardware */
1235 hwdesc = &hw->cmd_desc_head[producer];
1236 memset(hwdesc, 0, sizeof(struct cmd_desc_type0));
1237 /* Take skb->data itself */
1238 pbuf = &adapter->cmd_buf_arr[producer];
1240 is_tso = netxen_tso_check(netdev, hwdesc, skb);
1243 pbuf->frag_count = frag_count;
1244 buffrag = &pbuf->frag_array[0];
1245 temp_dma = pci_map_single(pdev, skb->data, first_seg_len,
1247 if (pci_dma_mapping_error(pdev, temp_dma))
1250 buffrag->dma = temp_dma;
1251 buffrag->length = first_seg_len;
1252 netxen_set_tx_frags_len(hwdesc, frag_count, skb->len);
1253 netxen_set_tx_port(hwdesc, adapter->portnum);
1255 hwdesc->buffer1_length = cpu_to_le16(first_seg_len);
1256 hwdesc->addr_buffer1 = cpu_to_le64(buffrag->dma);
1258 for (i = 1, k = 1; i < frag_count; i++, k++) {
1259 struct skb_frag_struct *frag;
1261 unsigned long offset;
1263 /* move to next desc. if there is a need */
1264 if ((i & 0x3) == 0) {
1266 producer = get_next_index(producer, num_txd);
1267 hwdesc = &hw->cmd_desc_head[producer];
1268 memset(hwdesc, 0, sizeof(struct cmd_desc_type0));
1269 pbuf = &adapter->cmd_buf_arr[producer];
1272 frag = &skb_shinfo(skb)->frags[i - 1];
1274 offset = frag->page_offset;
1277 temp_dma = pci_map_page(pdev, frag->page, offset,
1278 len, PCI_DMA_TODEVICE);
1279 if (pci_dma_mapping_error(pdev, temp_dma)) {
1280 netxen_clean_tx_dma_mapping(pdev, pbuf, i);
1285 buffrag->dma = temp_dma;
1286 buffrag->length = temp_len;
1290 hwdesc->buffer1_length = cpu_to_le16(temp_len);
1291 hwdesc->addr_buffer1 = cpu_to_le64(temp_dma);
1294 hwdesc->buffer2_length = cpu_to_le16(temp_len);
1295 hwdesc->addr_buffer2 = cpu_to_le64(temp_dma);
1298 hwdesc->buffer3_length = cpu_to_le16(temp_len);
1299 hwdesc->addr_buffer3 = cpu_to_le64(temp_dma);
1302 hwdesc->buffer4_length = cpu_to_le16(temp_len);
1303 hwdesc->addr_buffer4 = cpu_to_le64(temp_dma);
1308 producer = get_next_index(producer, num_txd);
1310 /* For LSO, we need to copy the MAC/IP/TCP headers into
1311 * the descriptor ring
1314 int hdr_len, first_hdr_len, more_hdr;
1315 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1316 if (hdr_len > (sizeof(struct cmd_desc_type0) - 2)) {
1317 first_hdr_len = sizeof(struct cmd_desc_type0) - 2;
1320 first_hdr_len = hdr_len;
1323 /* copy the MAC/IP/TCP headers to the cmd descriptor list */
1324 hwdesc = &hw->cmd_desc_head[producer];
1325 pbuf = &adapter->cmd_buf_arr[producer];
1328 /* copy the first 64 bytes */
1329 memcpy(((void *)hwdesc) + 2,
1330 (void *)(skb->data), first_hdr_len);
1331 producer = get_next_index(producer, num_txd);
1334 hwdesc = &hw->cmd_desc_head[producer];
1335 pbuf = &adapter->cmd_buf_arr[producer];
1337 /* copy the next 64 bytes - should be enough except
1338 * for pathological case
1340 skb_copy_from_linear_data_offset(skb, first_hdr_len,
1344 producer = get_next_index(producer, num_txd);
1348 adapter->cmd_producer = producer;
1349 adapter->stats.txbytes += skb->len;
1351 netxen_nic_update_cmd_producer(adapter, adapter->cmd_producer);
1353 adapter->stats.xmitcalled++;
1354 netdev->trans_start = jiffies;
1356 return NETDEV_TX_OK;
1359 adapter->stats.txdropped++;
1360 dev_kfree_skb_any(skb);
1361 return NETDEV_TX_OK;
1364 static int netxen_nic_check_temp(struct netxen_adapter *adapter)
1366 struct net_device *netdev = adapter->netdev;
1367 uint32_t temp, temp_state, temp_val;
1370 temp = adapter->pci_read_normalize(adapter, CRB_TEMP_STATE);
1372 temp_state = nx_get_temp_state(temp);
1373 temp_val = nx_get_temp_val(temp);
1375 if (temp_state == NX_TEMP_PANIC) {
1377 "%s: Device temperature %d degrees C exceeds"
1378 " maximum allowed. Hardware has been shut down.\n",
1379 netxen_nic_driver_name, temp_val);
1381 netif_carrier_off(netdev);
1382 netif_stop_queue(netdev);
1384 } else if (temp_state == NX_TEMP_WARN) {
1385 if (adapter->temp == NX_TEMP_NORMAL) {
1387 "%s: Device temperature %d degrees C "
1388 "exceeds operating range."
1389 " Immediate action needed.\n",
1390 netxen_nic_driver_name, temp_val);
1393 if (adapter->temp == NX_TEMP_WARN) {
1395 "%s: Device temperature is now %d degrees C"
1396 " in normal range.\n", netxen_nic_driver_name,
1400 adapter->temp = temp_state;
1404 static void netxen_nic_handle_phy_intr(struct netxen_adapter *adapter)
1406 struct net_device *netdev = adapter->netdev;
1407 u32 val, port, linkup;
1409 port = adapter->physical_port;
1411 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
1412 val = adapter->pci_read_normalize(adapter, CRB_XG_STATE_P3);
1413 val = XG_LINK_STATE_P3(adapter->ahw.pci_func, val);
1414 linkup = (val == XG_LINK_UP_P3);
1416 val = adapter->pci_read_normalize(adapter, CRB_XG_STATE);
1417 if (adapter->ahw.board_type == NETXEN_NIC_GBE)
1418 linkup = (val >> port) & 1;
1420 val = (val >> port*8) & 0xff;
1421 linkup = (val == XG_LINK_UP);
1425 if (adapter->ahw.linkup && !linkup) {
1426 printk(KERN_INFO "%s: %s NIC Link is down\n",
1427 netxen_nic_driver_name, netdev->name);
1428 adapter->ahw.linkup = 0;
1429 if (netif_running(netdev)) {
1430 netif_carrier_off(netdev);
1431 netif_stop_queue(netdev);
1434 netxen_nic_set_link_parameters(adapter);
1435 } else if (!adapter->ahw.linkup && linkup) {
1436 printk(KERN_INFO "%s: %s NIC Link is up\n",
1437 netxen_nic_driver_name, netdev->name);
1438 adapter->ahw.linkup = 1;
1439 if (netif_running(netdev)) {
1440 netif_carrier_on(netdev);
1441 netif_wake_queue(netdev);
1444 netxen_nic_set_link_parameters(adapter);
1448 static void netxen_watchdog(unsigned long v)
1450 struct netxen_adapter *adapter = (struct netxen_adapter *)v;
1452 SCHEDULE_WORK(&adapter->watchdog_task);
1455 void netxen_watchdog_task(struct work_struct *work)
1457 struct netxen_adapter *adapter =
1458 container_of(work, struct netxen_adapter, watchdog_task);
1460 if ((adapter->portnum == 0) && netxen_nic_check_temp(adapter))
1463 netxen_nic_handle_phy_intr(adapter);
1465 if (netif_running(adapter->netdev))
1466 mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ);
1469 static void netxen_tx_timeout(struct net_device *netdev)
1471 struct netxen_adapter *adapter = (struct netxen_adapter *)
1472 netdev_priv(netdev);
1473 SCHEDULE_WORK(&adapter->tx_timeout_task);
1476 static void netxen_tx_timeout_task(struct work_struct *work)
1478 struct netxen_adapter *adapter =
1479 container_of(work, struct netxen_adapter, tx_timeout_task);
1481 printk(KERN_ERR "%s %s: transmit timeout, resetting.\n",
1482 netxen_nic_driver_name, adapter->netdev->name);
1484 netxen_nic_disable_int(adapter);
1485 napi_disable(&adapter->napi);
1487 adapter->netdev->trans_start = jiffies;
1489 napi_enable(&adapter->napi);
1490 netxen_nic_enable_int(adapter);
1491 netif_wake_queue(adapter->netdev);
1495 * netxen_nic_get_stats - Get System Network Statistics
1496 * @netdev: network interface device structure
1498 struct net_device_stats *netxen_nic_get_stats(struct net_device *netdev)
1500 struct netxen_adapter *adapter = netdev_priv(netdev);
1501 struct net_device_stats *stats = &adapter->net_stats;
1503 memset(stats, 0, sizeof(*stats));
1505 /* total packets received */
1506 stats->rx_packets = adapter->stats.no_rcv;
1507 /* total packets transmitted */
1508 stats->tx_packets = adapter->stats.xmitedframes +
1509 adapter->stats.xmitfinished;
1510 /* total bytes received */
1511 stats->rx_bytes = adapter->stats.rxbytes;
1512 /* total bytes transmitted */
1513 stats->tx_bytes = adapter->stats.txbytes;
1514 /* bad packets received */
1515 stats->rx_errors = adapter->stats.rcvdbadskb;
1516 /* packet transmit problems */
1517 stats->tx_errors = adapter->stats.nocmddescriptor;
1518 /* no space in linux buffers */
1519 stats->rx_dropped = adapter->stats.rxdropped;
1520 /* no space available in linux */
1521 stats->tx_dropped = adapter->stats.txdropped;
1526 static irqreturn_t netxen_intr(int irq, void *data)
1528 struct netxen_adapter *adapter = data;
1531 status = adapter->pci_read_immediate(adapter, ISR_INT_VECTOR);
1533 if (!(status & adapter->legacy_intr.int_vec_bit))
1536 if (adapter->ahw.revision_id >= NX_P3_B1) {
1537 /* check interrupt state machine, to be sure */
1538 status = adapter->pci_read_immediate(adapter,
1540 if (!ISR_LEGACY_INT_TRIGGERED(status))
1544 unsigned long our_int = 0;
1546 our_int = adapter->pci_read_normalize(adapter, CRB_INT_VECTOR);
1548 /* not our interrupt */
1549 if (!test_and_clear_bit((7 + adapter->portnum), &our_int))
1552 /* claim interrupt */
1553 adapter->pci_write_normalize(adapter,
1554 CRB_INT_VECTOR, (our_int & 0xffffffff));
1557 /* clear interrupt */
1558 if (adapter->fw_major < 4)
1559 netxen_nic_disable_int(adapter);
1561 adapter->pci_write_immediate(adapter,
1562 adapter->legacy_intr.tgt_status_reg,
1564 /* read twice to ensure write is flushed */
1565 adapter->pci_read_immediate(adapter, ISR_INT_VECTOR);
1566 adapter->pci_read_immediate(adapter, ISR_INT_VECTOR);
1568 napi_schedule(&adapter->napi);
1573 static irqreturn_t netxen_msi_intr(int irq, void *data)
1575 struct netxen_adapter *adapter = data;
1577 /* clear interrupt */
1578 adapter->pci_write_immediate(adapter,
1579 msi_tgt_status[adapter->ahw.pci_func], 0xffffffff);
1581 napi_schedule(&adapter->napi);
1585 static irqreturn_t netxen_msix_intr(int irq, void *data)
1587 struct netxen_adapter *adapter = data;
1589 napi_schedule(&adapter->napi);
1593 static int netxen_nic_poll(struct napi_struct *napi, int budget)
1595 struct netxen_adapter *adapter = container_of(napi, struct netxen_adapter, napi);
1600 tx_complete = netxen_process_cmd_ring(adapter);
1603 for (ctx = 0; ctx < MAX_RCV_CTX; ++ctx) {
1605 * Fairness issue. This will give undue weight to the
1606 * receive context 0.
1610 * To avoid starvation, we give each of our receivers,
1611 * a fraction of the quota. Sometimes, it might happen that we
1612 * have enough quota to process every packet, but since all the
1613 * packets are on one context, it gets only half of the quota,
1614 * and ends up not processing it.
1616 work_done += netxen_process_rcv_ring(adapter, ctx,
1617 budget / MAX_RCV_CTX);
1620 if ((work_done < budget) && tx_complete) {
1621 napi_complete(&adapter->napi);
1622 netxen_nic_enable_int(adapter);
1628 #ifdef CONFIG_NET_POLL_CONTROLLER
1629 static void netxen_nic_poll_controller(struct net_device *netdev)
1631 struct netxen_adapter *adapter = netdev_priv(netdev);
1632 disable_irq(adapter->irq);
1633 netxen_intr(adapter->irq, adapter);
1634 enable_irq(adapter->irq);
1638 static struct pci_driver netxen_driver = {
1639 .name = netxen_nic_driver_name,
1640 .id_table = netxen_pci_tbl,
1641 .probe = netxen_nic_probe,
1642 .remove = __devexit_p(netxen_nic_remove)
1645 /* Driver Registration on NetXen card */
1647 static int __init netxen_init_module(void)
1649 if ((netxen_workq = create_singlethread_workqueue("netxen")) == NULL)
1652 return pci_register_driver(&netxen_driver);
1655 module_init(netxen_init_module);
1657 static void __exit netxen_exit_module(void)
1659 pci_unregister_driver(&netxen_driver);
1660 destroy_workqueue(netxen_workq);
1663 module_exit(netxen_exit_module);