2 * Copyright (C) 2003 - 2006 NetXen, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
20 * The full GNU General Public License is included in this distribution
21 * in the file called LICENSE.
23 * Contact Information:
26 * 3965 Freedom Circle, Fourth floor,
27 * Santa Clara, CA 95054
30 * Main source file for NetXen NIC Driver on Linux
34 #include <linux/vmalloc.h>
35 #include <linux/highmem.h>
36 #include "netxen_nic_hw.h"
38 #include "netxen_nic.h"
39 #include "netxen_nic_phan_reg.h"
41 #include <linux/dma-mapping.h>
42 #include <linux/vmalloc.h>
45 MODULE_DESCRIPTION("NetXen Multi port (1/10) Gigabit Network Driver");
46 MODULE_LICENSE("GPL");
47 MODULE_VERSION(NETXEN_NIC_LINUX_VERSIONID);
49 char netxen_nic_driver_name[] = "netxen-nic";
50 static char netxen_nic_driver_string[] = "NetXen Network Driver version "
51 NETXEN_NIC_LINUX_VERSIONID;
53 #define NETXEN_NETDEV_WEIGHT 120
54 #define NETXEN_ADAPTER_UP_MAGIC 777
55 #define NETXEN_NIC_PEG_TUNE 0
57 u8 nx_p2_id = NX_P2_C0;
59 #define DMA_32BIT_MASK 0x00000000ffffffffULL
60 #define DMA_35BIT_MASK 0x00000007ffffffffULL
62 /* Local functions to NetXen NIC driver */
63 static int __devinit netxen_nic_probe(struct pci_dev *pdev,
64 const struct pci_device_id *ent);
65 static void __devexit netxen_nic_remove(struct pci_dev *pdev);
66 static int netxen_nic_open(struct net_device *netdev);
67 static int netxen_nic_close(struct net_device *netdev);
68 static int netxen_nic_xmit_frame(struct sk_buff *, struct net_device *);
69 static void netxen_tx_timeout(struct net_device *netdev);
70 static void netxen_tx_timeout_task(struct work_struct *work);
71 static void netxen_watchdog(unsigned long);
72 static int netxen_handle_int(struct netxen_adapter *, struct net_device *);
73 static int netxen_nic_poll(struct net_device *dev, int *budget);
74 #ifdef CONFIG_NET_POLL_CONTROLLER
75 static void netxen_nic_poll_controller(struct net_device *netdev);
77 static irqreturn_t netxen_intr(int irq, void *data);
79 /* PCI Device ID Table */
80 static struct pci_device_id netxen_pci_tbl[] __devinitdata = {
81 {PCI_DEVICE(0x4040, 0x0001)},
82 {PCI_DEVICE(0x4040, 0x0002)},
83 {PCI_DEVICE(0x4040, 0x0003)},
84 {PCI_DEVICE(0x4040, 0x0004)},
85 {PCI_DEVICE(0x4040, 0x0005)},
86 {PCI_DEVICE(0x4040, 0x0024)},
87 {PCI_DEVICE(0x4040, 0x0025)},
91 MODULE_DEVICE_TABLE(pci, netxen_pci_tbl);
93 struct workqueue_struct *netxen_workq;
94 static void netxen_watchdog(unsigned long);
96 static inline void netxen_nic_update_cmd_producer(struct netxen_adapter *adapter,
97 uint32_t crb_producer)
99 switch (adapter->portnum) {
101 writel(crb_producer, NETXEN_CRB_NORMALIZE
102 (adapter, CRB_CMD_PRODUCER_OFFSET));
105 writel(crb_producer, NETXEN_CRB_NORMALIZE
106 (adapter, CRB_CMD_PRODUCER_OFFSET_1));
109 writel(crb_producer, NETXEN_CRB_NORMALIZE
110 (adapter, CRB_CMD_PRODUCER_OFFSET_2));
113 writel(crb_producer, NETXEN_CRB_NORMALIZE
114 (adapter, CRB_CMD_PRODUCER_OFFSET_3));
117 printk(KERN_WARNING "We tried to update "
118 "CRB_CMD_PRODUCER_OFFSET for invalid "
119 "PCI function id %d\n",
125 static inline void netxen_nic_update_cmd_consumer(struct netxen_adapter *adapter,
128 switch (adapter->portnum) {
130 writel(crb_consumer, NETXEN_CRB_NORMALIZE
131 (adapter, CRB_CMD_CONSUMER_OFFSET));
134 writel(crb_consumer, NETXEN_CRB_NORMALIZE
135 (adapter, CRB_CMD_CONSUMER_OFFSET_1));
138 writel(crb_consumer, NETXEN_CRB_NORMALIZE
139 (adapter, CRB_CMD_CONSUMER_OFFSET_2));
142 writel(crb_consumer, NETXEN_CRB_NORMALIZE
143 (adapter, CRB_CMD_CONSUMER_OFFSET_3));
146 printk(KERN_WARNING "We tried to update "
147 "CRB_CMD_PRODUCER_OFFSET for invalid "
148 "PCI function id %d\n",
154 #define ADAPTER_LIST_SIZE 12
155 int netxen_cards_found;
160 * The Linux system will invoke this after identifying the vendor ID and
161 * device Id in the pci_tbl supported by this module.
163 * A quad port card has one operational PCI config space, (function 0),
164 * which is used to access all four ports.
166 * This routine will initialize the adapter, and setup the global parameters
167 * along with the port's specific structure.
170 netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
172 struct net_device *netdev = NULL;
173 struct netxen_adapter *adapter = NULL;
174 void __iomem *mem_ptr0 = NULL;
175 void __iomem *mem_ptr1 = NULL;
176 void __iomem *mem_ptr2 = NULL;
178 u8 __iomem *db_ptr = NULL;
179 unsigned long mem_base, mem_len, db_base, db_len;
180 int pci_using_dac, i = 0, err;
182 struct netxen_recv_context *recv_ctx = NULL;
183 struct netxen_rcv_desc_ctx *rcv_desc = NULL;
184 struct netxen_cmd_buffer *cmd_buf_arr = NULL;
185 u64 mac_addr[FLASH_NUM_PORTS + 1];
186 static int valid_mac = 0;
187 static int netxen_probe_flag;
188 int pci_func_id = PCI_FUNC(pdev->devfn);
190 printk(KERN_INFO "%s \n", netxen_nic_driver_string);
191 if (pdev->class != 0x020000) {
192 printk(KERN_ERR"NetXen function %d, class %x will not"
193 "be enabled.\n",pci_func_id, pdev->class);
196 if ((err = pci_enable_device(pdev)))
198 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
200 goto err_out_disable_pdev;
203 if ((err = pci_request_regions(pdev, netxen_nic_driver_name)))
204 goto err_out_disable_pdev;
206 pci_set_master(pdev);
207 pci_read_config_byte(pdev, PCI_REVISION_ID, &nx_p2_id);
208 if (nx_p2_id == NX_P2_C1 &&
209 (pci_set_dma_mask(pdev, DMA_35BIT_MASK) == 0) &&
210 (pci_set_consistent_dma_mask(pdev, DMA_35BIT_MASK) == 0)) {
213 if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) ||
214 (err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK)))
215 goto err_out_free_res;
221 netdev = alloc_etherdev(sizeof(struct netxen_adapter));
223 printk(KERN_ERR"%s: Failed to allocate memory for the "
224 "device block.Check system memory resource"
225 " usage.\n", netxen_nic_driver_name);
226 goto err_out_free_res;
229 SET_MODULE_OWNER(netdev);
230 SET_NETDEV_DEV(netdev, &pdev->dev);
232 adapter = netdev->priv;
233 memset(adapter, 0 , sizeof(struct netxen_adapter));
235 adapter->ahw.pdev = pdev;
236 /* remap phys address */
237 mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */
238 mem_len = pci_resource_len(pdev, 0);
240 /* 128 Meg of memory */
241 mem_ptr0 = ioremap(mem_base, FIRST_PAGE_GROUP_SIZE);
243 ioremap(mem_base + SECOND_PAGE_GROUP_START, SECOND_PAGE_GROUP_SIZE);
245 ioremap(mem_base + THIRD_PAGE_GROUP_START, THIRD_PAGE_GROUP_SIZE);
247 if ((mem_ptr0 == 0UL) || (mem_ptr1 == 0UL) || (mem_ptr2 == 0UL)) {
249 "Cannot remap adapter memory aborting.:"
250 "0 -> %p, 1 -> %p, 2 -> %p\n",
251 mem_ptr0, mem_ptr1, mem_ptr2);
254 goto err_out_iounmap;
256 db_base = pci_resource_start(pdev, 4); /* doorbell is on bar 4 */
257 db_len = pci_resource_len(pdev, 4);
260 printk(KERN_ERR "%s: doorbell is disabled\n",
261 netxen_nic_driver_name);
263 goto err_out_iounmap;
265 DPRINTK(INFO, "doorbell ioremap from %lx a size of %lx\n", db_base,
268 db_ptr = ioremap(db_base, NETXEN_DB_MAPSIZE_BYTES);
270 printk(KERN_ERR "%s: Failed to allocate doorbell map.",
271 netxen_nic_driver_name);
273 goto err_out_iounmap;
275 DPRINTK(INFO, "doorbell ioremaped at %p\n", db_ptr);
277 adapter->max_tx_desc_count = MAX_CMD_DESCRIPTORS;
278 adapter->max_rx_desc_count = MAX_RCV_DESCRIPTORS;
279 adapter->max_jumbo_rx_desc_count = MAX_JUMBO_RCV_DESCRIPTORS;
280 adapter->max_lro_rx_desc_count = MAX_LRO_RCV_DESCRIPTORS;
282 pci_set_drvdata(pdev, adapter);
284 adapter->netdev = netdev;
285 adapter->pdev = pdev;
286 adapter->portnum = pci_func_id;
288 netdev->open = netxen_nic_open;
289 netdev->stop = netxen_nic_close;
290 netdev->hard_start_xmit = netxen_nic_xmit_frame;
291 netdev->set_multicast_list = netxen_nic_set_multi;
292 netdev->set_mac_address = netxen_nic_set_mac;
293 netdev->change_mtu = netxen_nic_change_mtu;
294 netdev->tx_timeout = netxen_tx_timeout;
295 netdev->watchdog_timeo = HZ;
297 netxen_nic_change_mtu(netdev, netdev->mtu);
299 SET_ETHTOOL_OPS(netdev, &netxen_nic_ethtool_ops);
300 netdev->poll = netxen_nic_poll;
301 netdev->weight = NETXEN_NETDEV_WEIGHT;
302 #ifdef CONFIG_NET_POLL_CONTROLLER
303 netdev->poll_controller = netxen_nic_poll_controller;
305 /* ScatterGather support */
306 netdev->features = NETIF_F_SG;
307 netdev->features |= NETIF_F_IP_CSUM;
308 netdev->features |= NETIF_F_TSO;
311 netdev->features |= NETIF_F_HIGHDMA;
313 if (pci_enable_msi(pdev)) {
314 adapter->flags &= ~NETXEN_NIC_MSI_ENABLED;
315 printk(KERN_WARNING "%s: unable to allocate MSI interrupt"
316 " error\n", netxen_nic_driver_name);
318 adapter->flags |= NETXEN_NIC_MSI_ENABLED;
320 cmd_buf_arr = (struct netxen_cmd_buffer *)vmalloc(TX_RINGSIZE);
321 if (cmd_buf_arr == NULL) {
323 "%s: Could not allocate cmd_buf_arr memory:%d\n",
324 netxen_nic_driver_name, (int)TX_RINGSIZE);
326 goto err_out_free_adapter;
328 memset(cmd_buf_arr, 0, TX_RINGSIZE);
330 for (i = 0; i < MAX_RCV_CTX; ++i) {
331 recv_ctx = &adapter->recv_ctx[i];
332 for (ring = 0; ring < NUM_RCV_DESC_RINGS; ring++) {
333 rcv_desc = &recv_ctx->rcv_desc[ring];
334 switch (RCV_DESC_TYPE(ring)) {
335 case RCV_DESC_NORMAL:
336 rcv_desc->max_rx_desc_count =
337 adapter->max_rx_desc_count;
338 rcv_desc->flags = RCV_DESC_NORMAL;
339 rcv_desc->dma_size = RX_DMA_MAP_LEN;
340 rcv_desc->skb_size = MAX_RX_BUFFER_LENGTH;
344 rcv_desc->max_rx_desc_count =
345 adapter->max_jumbo_rx_desc_count;
346 rcv_desc->flags = RCV_DESC_JUMBO;
347 rcv_desc->dma_size = RX_JUMBO_DMA_MAP_LEN;
348 rcv_desc->skb_size = MAX_RX_JUMBO_BUFFER_LENGTH;
352 rcv_desc->max_rx_desc_count =
353 adapter->max_lro_rx_desc_count;
354 rcv_desc->flags = RCV_DESC_LRO;
355 rcv_desc->dma_size = RX_LRO_DMA_MAP_LEN;
356 rcv_desc->skb_size = MAX_RX_LRO_BUFFER_LENGTH;
360 rcv_desc->rx_buf_arr = (struct netxen_rx_buffer *)
361 vmalloc(RCV_BUFFSIZE);
363 if (rcv_desc->rx_buf_arr == NULL) {
364 printk(KERN_ERR "%s: Could not allocate"
365 "rcv_desc->rx_buf_arr memory:%d\n",
366 netxen_nic_driver_name,
369 goto err_out_free_rx_buffer;
371 memset(rcv_desc->rx_buf_arr, 0, RCV_BUFFSIZE);
376 adapter->cmd_buf_arr = cmd_buf_arr;
377 adapter->ahw.pci_base0 = mem_ptr0;
378 adapter->ahw.pci_base1 = mem_ptr1;
379 adapter->ahw.pci_base2 = mem_ptr2;
380 adapter->ahw.db_base = db_ptr;
381 adapter->ahw.db_len = db_len;
382 spin_lock_init(&adapter->tx_lock);
383 spin_lock_init(&adapter->lock);
384 netxen_initialize_adapter_sw(adapter); /* initialize the buffers in adapter */
386 if(netxen_probe_flag == 0) {
387 netxen_pinit_from_rom(adapter, 0);
389 netxen_load_firmware(adapter);
394 * Set the CRB window to invalid. If any register in window 0 is
395 * accessed it should set the window to 0 and then reset it to 1.
397 adapter->curr_window = 255;
399 * Adapter in our case is quad port so initialize it before
400 * initializing the ports
402 netxen_initialize_adapter_hw(adapter); /* initialize the adapter */
404 if (adapter->ahw.boardcfg.board_type == NETXEN_BRDTYPE_P2_SB31_10G_IMEZ)
405 if (pci_func_id >= 2)
406 adapter->portnum = pci_func_id - 2;
408 netxen_initialize_adapter_ops(adapter);
410 init_timer(&adapter->watchdog_timer);
411 adapter->ahw.xg_linkup = 0;
412 adapter->watchdog_timer.function = &netxen_watchdog;
413 adapter->watchdog_timer.data = (unsigned long)adapter;
414 INIT_WORK(&adapter->watchdog_task, netxen_watchdog_task);
415 adapter->ahw.pdev = pdev;
416 adapter->proc_cmd_buf_counter = 0;
417 adapter->ahw.revision_id = nx_p2_id;
419 netxen_nic_update_cmd_producer(adapter, 0);
420 netxen_nic_update_cmd_consumer(adapter, 0);
422 if (netxen_is_flash_supported(adapter) == 0 &&
423 netxen_get_flash_mac_addr(adapter, mac_addr) == 0)
429 unsigned char *p = (unsigned char *)&mac_addr[i];
430 netdev->dev_addr[0] = *(p + 5);
431 netdev->dev_addr[1] = *(p + 4);
432 netdev->dev_addr[2] = *(p + 3);
433 netdev->dev_addr[3] = *(p + 2);
434 netdev->dev_addr[4] = *(p + 1);
435 netdev->dev_addr[5] = *(p + 0);
437 memcpy(netdev->perm_addr, netdev->dev_addr,
439 if (!is_valid_ether_addr(netdev->perm_addr)) {
440 printk(KERN_ERR "%s: Bad MAC address "
441 "%02x:%02x:%02x:%02x:%02x:%02x.\n",
442 netxen_nic_driver_name,
448 netdev->dev_addr[5]);
450 if (adapter->macaddr_set)
451 adapter->macaddr_set(adapter,
457 * Initialize all the CRB registers here.
459 writel(0, NETXEN_CRB_NORMALIZE(adapter, CRB_CMD_PRODUCER_OFFSET));
460 writel(0, NETXEN_CRB_NORMALIZE(adapter, CRB_CMD_CONSUMER_OFFSET));
461 writel(0, NETXEN_CRB_NORMALIZE(adapter, CRB_HOST_CMD_ADDR_LO));
463 /* do this before waking up pegs so that we have valid dummy dma addr */
464 err = netxen_initialize_adapter_offload(adapter);
466 goto err_out_free_dev;
468 if (netxen_probe_flag == 0) {
469 /* Unlock the HW, prompting the boot sequence */
471 NETXEN_CRB_NORMALIZE(adapter,
472 NETXEN_ROMUSB_GLB_PEGTUNE_DONE));
473 /* Handshake with the card before we register the devices. */
474 netxen_phantom_init(adapter, NETXEN_NIC_PEG_TUNE);
477 if(netxen_probe_flag == 0) {
478 writel(0, NETXEN_CRB_NORMALIZE(adapter, CRB_CMDPEG_STATE));
479 netxen_pinit_from_rom(adapter, 0);
481 netxen_load_firmware(adapter);
482 netxen_phantom_init(adapter, NETXEN_NIC_PEG_TUNE);
485 * delay a while to ensure that the Pegs are up & running.
486 * Otherwise, we might see some flaky behaviour.
489 INIT_WORK(&adapter->tx_timeout_task, netxen_tx_timeout_task);
490 netif_carrier_off(netdev);
491 netif_stop_queue(netdev);
493 if((err = register_netdev(netdev)))
494 DPRINTK(1, ERR, "register_netdev failed port #%d"
497 switch (adapter->ahw.board_type) {
499 printk(KERN_INFO "%s: QUAD GbE board initialized\n",
500 netxen_nic_driver_name);
503 case NETXEN_NIC_XGBE:
504 printk(KERN_INFO "%s: XGbE board initialized\n",
505 netxen_nic_driver_name);
509 adapter->driver_mismatch = 0;
510 if(netxen_probe_flag == 0)
511 netxen_probe_flag ++;
516 if (adapter->flags & NETXEN_NIC_MSI_ENABLED)
517 pci_disable_msi(pdev);
519 unregister_netdev(adapter->netdev);
520 free_netdev(adapter->netdev);
522 netxen_free_adapter_offload(adapter);
524 err_out_free_rx_buffer:
525 for (i = 0; i < MAX_RCV_CTX; ++i) {
526 recv_ctx = &adapter->recv_ctx[i];
527 for (ring = 0; ring < NUM_RCV_DESC_RINGS; ring++) {
528 rcv_desc = &recv_ctx->rcv_desc[ring];
529 if (rcv_desc->rx_buf_arr != NULL) {
530 vfree(rcv_desc->rx_buf_arr);
531 rcv_desc->rx_buf_arr = NULL;
537 err_out_free_adapter:
538 pci_set_drvdata(pdev, NULL);
553 pci_release_regions(pdev);
554 err_out_disable_pdev:
555 pci_disable_device(pdev);
559 static void __devexit netxen_nic_remove(struct pci_dev *pdev)
561 struct netxen_adapter *adapter;
562 struct net_device *netdev;
563 struct netxen_rx_buffer *buffer;
564 struct netxen_recv_context *recv_ctx;
565 struct netxen_rcv_desc_ctx *rcv_desc;
569 netdev = pci_get_drvdata(pdev);
570 adapter = netdev_priv(netdev);
574 if (adapter->stop_port)
575 adapter->stop_port(adapter);
578 free_irq(adapter->irq, adapter);
579 /* leave the hw in the same state as reboot */
580 writel(0, NETXEN_CRB_NORMALIZE(adapter, CRB_CMDPEG_STATE));
581 netxen_pinit_from_rom(adapter, 0);
582 netxen_load_firmware(adapter);
583 netxen_free_adapter_offload(adapter);
586 unregister_netdev(netdev);
589 if ((adapter->flags & NETXEN_NIC_MSI_ENABLED))
590 pci_disable_msi(pdev);
591 if (adapter->is_up == NETXEN_ADAPTER_UP_MAGIC)
592 netxen_free_hw_resources(adapter);
594 iounmap(adapter->ahw.db_base);
595 iounmap(adapter->ahw.pci_base0);
596 iounmap(adapter->ahw.pci_base1);
597 iounmap(adapter->ahw.pci_base2);
599 pci_release_regions(pdev);
600 pci_disable_device(pdev);
601 pci_set_drvdata(pdev, NULL);
603 for (ctxid = 0; ctxid < MAX_RCV_CTX; ++ctxid) {
604 recv_ctx = &adapter->recv_ctx[ctxid];
605 for (ring = 0; ring < NUM_RCV_DESC_RINGS; ring++) {
606 rcv_desc = &recv_ctx->rcv_desc[ring];
607 for (i = 0; i < rcv_desc->max_rx_desc_count; ++i) {
608 buffer = &(rcv_desc->rx_buf_arr[i]);
609 if (buffer->state == NETXEN_BUFFER_FREE)
611 pci_unmap_single(pdev, buffer->dma,
614 if (buffer->skb != NULL)
615 dev_kfree_skb_any(buffer->skb);
617 vfree(rcv_desc->rx_buf_arr);
621 vfree(adapter->cmd_buf_arr);
626 * Called when a network interface is made active
627 * @returns 0 on success, negative value on failure
629 static int netxen_nic_open(struct net_device *netdev)
631 struct netxen_adapter *adapter = (struct netxen_adapter *)netdev->priv;
635 if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC) {
636 err = netxen_init_firmware(adapter);
638 printk(KERN_ERR "Failed to init firmware\n");
641 netxen_nic_flash_print(adapter);
642 if (adapter->init_niu)
643 adapter->init_niu(adapter);
645 /* setup all the resources for the Phantom... */
646 /* this include the descriptors for rcv, tx, and status */
647 netxen_nic_clear_stats(adapter);
648 err = netxen_nic_hw_resources(adapter);
650 printk(KERN_ERR "Error in setting hw resources:%d\n",
654 if (adapter->init_port
655 && adapter->init_port(adapter, adapter->portnum) != 0) {
656 printk(KERN_ERR "%s: Failed to initialize port %d\n",
657 netxen_nic_driver_name, adapter->portnum);
658 netxen_free_hw_resources(adapter);
661 for (ctx = 0; ctx < MAX_RCV_CTX; ++ctx) {
662 for (ring = 0; ring < NUM_RCV_DESC_RINGS; ring++)
663 netxen_post_rx_buffers(adapter, ctx, ring);
665 adapter->irq = adapter->ahw.pdev->irq;
666 err = request_irq(adapter->ahw.pdev->irq, &netxen_intr,
667 IRQF_SHARED | IRQF_SAMPLE_RANDOM,
668 netdev->name, adapter);
670 printk(KERN_ERR "request_irq failed with: %d\n", err);
671 netxen_free_hw_resources(adapter);
675 adapter->is_up = NETXEN_ADAPTER_UP_MAGIC;
677 if (!adapter->driver_mismatch)
678 mod_timer(&adapter->watchdog_timer, jiffies);
680 netxen_nic_enable_int(adapter);
682 /* Done here again so that even if phantom sw overwrote it,
684 if (adapter->macaddr_set)
685 adapter->macaddr_set(adapter, netdev->dev_addr);
686 netxen_nic_set_link_parameters(adapter);
688 netxen_nic_set_multi(netdev);
689 if (adapter->set_mtu)
690 adapter->set_mtu(adapter, netdev->mtu);
692 if (!adapter->driver_mismatch)
693 netif_start_queue(netdev);
699 * netxen_nic_close - Disables a network interface entry point
701 static int netxen_nic_close(struct net_device *netdev)
703 struct netxen_adapter *adapter = netdev_priv(netdev);
705 struct netxen_cmd_buffer *cmd_buff;
706 struct netxen_skb_frag *buffrag;
708 netif_carrier_off(netdev);
709 netif_stop_queue(netdev);
711 netxen_nic_disable_int(adapter);
713 free_irq(adapter->irq, adapter);
715 cmd_buff = adapter->cmd_buf_arr;
716 for (i = 0; i < adapter->max_tx_desc_count; i++) {
717 buffrag = cmd_buff->frag_array;
719 pci_unmap_single(adapter->pdev, buffrag->dma,
720 buffrag->length, PCI_DMA_TODEVICE);
721 buffrag->dma = (u64) NULL;
723 for (j = 0; j < cmd_buff->frag_count; j++) {
726 pci_unmap_page(adapter->pdev, buffrag->dma,
729 buffrag->dma = (u64) NULL;
732 /* Free the skb we received in netxen_nic_xmit_frame */
734 dev_kfree_skb_any(cmd_buff->skb);
735 cmd_buff->skb = NULL;
739 FLUSH_SCHEDULED_WORK();
740 del_timer_sync(&adapter->watchdog_timer);
745 static int netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
747 struct netxen_adapter *adapter = netdev_priv(netdev);
748 struct netxen_hardware_context *hw = &adapter->ahw;
749 unsigned int first_seg_len = skb->len - skb->data_len;
750 struct netxen_skb_frag *buffrag;
754 u32 saved_producer = 0;
755 struct cmd_desc_type0 *hwdesc;
757 struct netxen_cmd_buffer *pbuf = NULL;
758 static int dropped_packet = 0;
760 u32 local_producer = 0;
761 u32 max_tx_desc_count = 0;
762 u32 last_cmd_consumer = 0;
765 adapter->stats.xmitcalled++;
766 frag_count = skb_shinfo(skb)->nr_frags + 1;
768 if (unlikely(skb->len <= 0)) {
769 dev_kfree_skb_any(skb);
770 adapter->stats.badskblen++;
774 if (frag_count > MAX_BUFFERS_PER_CMD) {
775 printk("%s: %s netxen_nic_xmit_frame: frag_count (%d)"
776 "too large, can handle only %d frags\n",
777 netxen_nic_driver_name, netdev->name,
778 frag_count, MAX_BUFFERS_PER_CMD);
779 adapter->stats.txdropped++;
780 if ((++dropped_packet & 0xff) == 0xff)
781 printk("%s: %s droppped packets = %d\n",
782 netxen_nic_driver_name, netdev->name,
789 * Everything is set up. Now, we just need to transmit it out.
790 * Note that we have to copy the contents of buffer over to
791 * right place. Later on, this can be optimized out by de-coupling the
792 * producer index from the buffer index.
794 retry_getting_window:
795 spin_lock_bh(&adapter->tx_lock);
796 if (adapter->total_threads >= MAX_XMIT_PRODUCERS) {
797 spin_unlock_bh(&adapter->tx_lock);
804 for (i = 0; i < 20; i++)
805 cpu_relax(); /*This a nop instr on i386 */
807 goto retry_getting_window;
809 local_producer = adapter->cmd_producer;
810 /* There 4 fragments per descriptor */
811 no_of_desc = (frag_count + 3) >> 2;
812 if (netdev->features & NETIF_F_TSO) {
813 if (skb_shinfo(skb)->gso_size > 0) {
816 if ((ip_hdrlen(skb) + tcp_hdrlen(skb) +
817 sizeof(struct ethhdr)) >
818 (sizeof(struct cmd_desc_type0) - 2)) {
823 k = adapter->cmd_producer;
824 max_tx_desc_count = adapter->max_tx_desc_count;
825 last_cmd_consumer = adapter->last_cmd_consumer;
826 if ((k + no_of_desc) >=
827 ((last_cmd_consumer <= k) ? last_cmd_consumer + max_tx_desc_count :
828 last_cmd_consumer)) {
829 netif_stop_queue(netdev);
830 adapter->flags |= NETXEN_NETDEV_STATUS;
831 spin_unlock_bh(&adapter->tx_lock);
832 return NETDEV_TX_BUSY;
834 k = get_index_range(k, max_tx_desc_count, no_of_desc);
835 adapter->cmd_producer = k;
836 adapter->total_threads++;
837 adapter->num_threads++;
839 spin_unlock_bh(&adapter->tx_lock);
840 /* Copy the descriptors into the hardware */
841 producer = local_producer;
842 saved_producer = producer;
843 hwdesc = &hw->cmd_desc_head[producer];
844 memset(hwdesc, 0, sizeof(struct cmd_desc_type0));
845 /* Take skb->data itself */
846 pbuf = &adapter->cmd_buf_arr[producer];
847 if ((netdev->features & NETIF_F_TSO) && skb_shinfo(skb)->gso_size > 0) {
848 pbuf->mss = skb_shinfo(skb)->gso_size;
849 hwdesc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
854 pbuf->total_length = skb->len;
856 pbuf->cmd = TX_ETHER_PKT;
857 pbuf->frag_count = frag_count;
858 pbuf->port = adapter->portnum;
859 buffrag = &pbuf->frag_array[0];
860 buffrag->dma = pci_map_single(adapter->pdev, skb->data, first_seg_len,
862 buffrag->length = first_seg_len;
863 netxen_set_cmd_desc_totallength(hwdesc, skb->len);
864 netxen_set_cmd_desc_num_of_buff(hwdesc, frag_count);
865 netxen_set_cmd_desc_opcode(hwdesc, TX_ETHER_PKT);
867 netxen_set_cmd_desc_port(hwdesc, adapter->portnum);
868 hwdesc->buffer1_length = cpu_to_le16(first_seg_len);
869 hwdesc->addr_buffer1 = cpu_to_le64(buffrag->dma);
871 for (i = 1, k = 1; i < frag_count; i++, k++) {
872 struct skb_frag_struct *frag;
874 unsigned long offset;
877 /* move to next desc. if there is a need */
878 if ((i & 0x3) == 0) {
880 producer = get_next_index(producer,
881 adapter->max_tx_desc_count);
882 hwdesc = &hw->cmd_desc_head[producer];
883 memset(hwdesc, 0, sizeof(struct cmd_desc_type0));
885 frag = &skb_shinfo(skb)->frags[i - 1];
887 offset = frag->page_offset;
890 temp_dma = pci_map_page(adapter->pdev, frag->page, offset,
891 len, PCI_DMA_TODEVICE);
894 buffrag->dma = temp_dma;
895 buffrag->length = temp_len;
897 DPRINTK(INFO, "for loop. i=%d k=%d\n", i, k);
900 hwdesc->buffer1_length = cpu_to_le16(temp_len);
901 hwdesc->addr_buffer1 = cpu_to_le64(temp_dma);
904 hwdesc->buffer2_length = cpu_to_le16(temp_len);
905 hwdesc->addr_buffer2 = cpu_to_le64(temp_dma);
908 hwdesc->buffer3_length = cpu_to_le16(temp_len);
909 hwdesc->addr_buffer3 = cpu_to_le64(temp_dma);
912 hwdesc->buffer4_length = cpu_to_le16(temp_len);
913 hwdesc->addr_buffer4 = cpu_to_le64(temp_dma);
918 producer = get_next_index(producer, adapter->max_tx_desc_count);
920 /* might change opcode to TX_TCP_LSO */
921 netxen_tso_check(adapter, &hw->cmd_desc_head[saved_producer], skb);
923 /* For LSO, we need to copy the MAC/IP/TCP headers into
924 * the descriptor ring
926 if (netxen_get_cmd_desc_opcode(&hw->cmd_desc_head[saved_producer])
928 int hdr_len, first_hdr_len, more_hdr;
929 hdr_len = hw->cmd_desc_head[saved_producer].total_hdr_length;
930 if (hdr_len > (sizeof(struct cmd_desc_type0) - 2)) {
931 first_hdr_len = sizeof(struct cmd_desc_type0) - 2;
934 first_hdr_len = hdr_len;
937 /* copy the MAC/IP/TCP headers to the cmd descriptor list */
938 hwdesc = &hw->cmd_desc_head[producer];
940 /* copy the first 64 bytes */
941 memcpy(((void *)hwdesc) + 2,
942 (void *)(skb->data), first_hdr_len);
943 producer = get_next_index(producer, max_tx_desc_count);
946 hwdesc = &hw->cmd_desc_head[producer];
947 /* copy the next 64 bytes - should be enough except
948 * for pathological case
950 skb_copy_from_linear_data_offset(skb, first_hdr_len,
954 producer = get_next_index(producer, max_tx_desc_count);
957 spin_lock_bh(&adapter->tx_lock);
958 adapter->stats.txbytes +=
959 netxen_get_cmd_desc_totallength(&hw->cmd_desc_head[saved_producer]);
960 /* Code to update the adapter considering how many producer threads
961 are currently working */
962 if ((--adapter->num_threads) == 0) {
963 /* This is the last thread */
964 u32 crb_producer = adapter->cmd_producer;
965 netxen_nic_update_cmd_producer(adapter, crb_producer);
967 adapter->total_threads = 0;
970 adapter->stats.xmitfinished++;
971 spin_unlock_bh(&adapter->tx_lock);
973 netdev->trans_start = jiffies;
975 DPRINTK(INFO, "wrote CMD producer %x to phantom\n", producer);
977 DPRINTK(INFO, "Done. Send\n");
981 static void netxen_watchdog(unsigned long v)
983 struct netxen_adapter *adapter = (struct netxen_adapter *)v;
985 SCHEDULE_WORK(&adapter->watchdog_task);
988 static void netxen_tx_timeout(struct net_device *netdev)
990 struct netxen_adapter *adapter = (struct netxen_adapter *)
992 SCHEDULE_WORK(&adapter->tx_timeout_task);
995 static void netxen_tx_timeout_task(struct work_struct *work)
997 struct netxen_adapter *adapter =
998 container_of(work, struct netxen_adapter, tx_timeout_task);
999 struct net_device *netdev = adapter->netdev;
1000 unsigned long flags;
1002 printk(KERN_ERR "%s %s: transmit timeout, resetting.\n",
1003 netxen_nic_driver_name, netdev->name);
1005 spin_lock_irqsave(&adapter->lock, flags);
1006 netxen_nic_close(netdev);
1007 netxen_nic_open(netdev);
1008 spin_unlock_irqrestore(&adapter->lock, flags);
1009 netdev->trans_start = jiffies;
1010 netif_wake_queue(netdev);
1014 netxen_handle_int(struct netxen_adapter *adapter, struct net_device *netdev)
1018 DPRINTK(INFO, "Entered handle ISR\n");
1020 if (!(adapter->flags & NETXEN_NIC_MSI_ENABLED)) {
1024 our_int = readl(NETXEN_CRB_NORMALIZE(adapter, CRB_INT_VECTOR));
1025 /* not our interrupt */
1026 if ((our_int & (0x80 << adapter->portnum)) == 0)
1028 netxen_nic_disable_int(adapter);
1029 /* Window = 0 or 1 */
1031 writel(0xffffffff, PCI_OFFSET_SECOND_RANGE(adapter,
1032 ISR_INT_TARGET_STATUS));
1033 mask = readl(pci_base_offset(adapter, ISR_INT_VECTOR));
1034 } while (((mask & 0x80) != 0) && (++count < 32));
1035 if ((mask & 0x80) != 0)
1036 printk("Could not disable interrupt completely\n");
1040 if (netxen_nic_rx_has_work(adapter) || netxen_nic_tx_has_work(adapter)) {
1041 if (netif_rx_schedule_prep(netdev)) {
1043 * Interrupts are already disabled.
1045 __netif_rx_schedule(netdev);
1047 static unsigned int intcount = 0;
1048 if ((++intcount & 0xfff) == 0xfff)
1050 "%s: %s interrupt %d while in poll\n",
1051 netxen_nic_driver_name, netdev->name,
1058 netxen_nic_enable_int(adapter);
1065 * netxen_intr - Interrupt Handler
1066 * @irq: interrupt number
1067 * data points to adapter stucture (which may be handling more than 1 port
1069 irqreturn_t netxen_intr(int irq, void *data)
1071 struct netxen_adapter *adapter;
1072 struct net_device *netdev;
1074 if (unlikely(!irq)) {
1075 return IRQ_NONE; /* Not our interrupt */
1078 adapter = (struct netxen_adapter *)data;
1079 netdev = adapter->netdev;
1080 /* process our status queue (for all 4 ports) */
1081 if (netif_running(netdev))
1082 netxen_handle_int(adapter, netdev);
1087 static int netxen_nic_poll(struct net_device *netdev, int *budget)
1089 struct netxen_adapter *adapter = netdev_priv(netdev);
1090 int work_to_do = min(*budget, netdev->quota);
1096 DPRINTK(INFO, "polling for %d descriptors\n", *budget);
1099 for (ctx = 0; ctx < MAX_RCV_CTX; ++ctx) {
1101 * Fairness issue. This will give undue weight to the
1102 * receive context 0.
1106 * To avoid starvation, we give each of our receivers,
1107 * a fraction of the quota. Sometimes, it might happen that we
1108 * have enough quota to process every packet, but since all the
1109 * packets are on one context, it gets only half of the quota,
1110 * and ends up not processing it.
1112 this_work_done = netxen_process_rcv_ring(adapter, ctx,
1115 work_done += this_work_done;
1118 netdev->quota -= work_done;
1119 *budget -= work_done;
1121 if (work_done >= work_to_do && netxen_nic_rx_has_work(adapter) != 0)
1124 if (netxen_process_cmd_ring((unsigned long)adapter) == 0)
1127 DPRINTK(INFO, "new work_done: %d work_to_do: %d\n",
1128 work_done, work_to_do);
1130 netif_rx_complete(netdev);
1131 netxen_nic_enable_int(adapter);
1137 #ifdef CONFIG_NET_POLL_CONTROLLER
1138 static void netxen_nic_poll_controller(struct net_device *netdev)
1140 struct netxen_adapter *adapter = netdev_priv(netdev);
1141 disable_irq(adapter->irq);
1142 netxen_intr(adapter->irq, adapter);
1143 enable_irq(adapter->irq);
1147 static struct pci_driver netxen_driver = {
1148 .name = netxen_nic_driver_name,
1149 .id_table = netxen_pci_tbl,
1150 .probe = netxen_nic_probe,
1151 .remove = __devexit_p(netxen_nic_remove)
1154 /* Driver Registration on NetXen card */
1156 static int __init netxen_init_module(void)
1158 if ((netxen_workq = create_singlethread_workqueue("netxen")) == 0)
1161 return pci_register_driver(&netxen_driver);
1164 module_init(netxen_init_module);
1166 static void __exit netxen_exit_module(void)
1169 * Wait for some time to allow the dma to drain, if any.
1171 pci_unregister_driver(&netxen_driver);
1172 destroy_workqueue(netxen_workq);
1175 module_exit(netxen_exit_module);