2 * QLogic qlge NIC HBA Driver
3 * Copyright (c) 2003-2008 QLogic Corporation
4 * See LICENSE.qlge for copyright and licensing details.
5 * Author: Linux qlge network device driver by
6 * Ron Mercer <ron.mercer@qlogic.com>
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/types.h>
11 #include <linux/module.h>
12 #include <linux/list.h>
13 #include <linux/pci.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/pagemap.h>
16 #include <linux/sched.h>
17 #include <linux/slab.h>
18 #include <linux/dmapool.h>
19 #include <linux/mempool.h>
20 #include <linux/spinlock.h>
21 #include <linux/kthread.h>
22 #include <linux/interrupt.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
27 #include <linux/ipv6.h>
29 #include <linux/tcp.h>
30 #include <linux/udp.h>
31 #include <linux/if_arp.h>
32 #include <linux/if_ether.h>
33 #include <linux/netdevice.h>
34 #include <linux/etherdevice.h>
35 #include <linux/ethtool.h>
36 #include <linux/skbuff.h>
37 #include <linux/if_vlan.h>
38 #include <linux/delay.h>
40 #include <linux/vmalloc.h>
41 #include <net/ip6_checksum.h>
45 char qlge_driver_name[] = DRV_NAME;
46 const char qlge_driver_version[] = DRV_VERSION;
48 MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>");
49 MODULE_DESCRIPTION(DRV_STRING " ");
50 MODULE_LICENSE("GPL");
51 MODULE_VERSION(DRV_VERSION);
53 static const u32 default_msg =
54 NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK |
55 /* NETIF_MSG_TIMER | */
60 /* NETIF_MSG_TX_QUEUED | */
61 /* NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS | */
62 /* NETIF_MSG_PKTDATA | */
63 NETIF_MSG_HW | NETIF_MSG_WOL | 0;
65 static int debug = 0x00007fff; /* defaults above */
66 module_param(debug, int, 0);
67 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
72 static int qlge_irq_type = MSIX_IRQ;
73 module_param(qlge_irq_type, int, MSIX_IRQ);
74 MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
76 static int qlge_mpi_coredump;
77 module_param(qlge_mpi_coredump, int, 0);
78 MODULE_PARM_DESC(qlge_mpi_coredump,
79 "Option to enable MPI firmware dump. "
80 "Default is OFF - Do Not allocate memory. ");
82 static int qlge_force_coredump;
83 module_param(qlge_force_coredump, int, 0);
84 MODULE_PARM_DESC(qlge_force_coredump,
85 "Option to allow force of firmware core dump. "
86 "Default is OFF - Do not allow.");
88 static DEFINE_PCI_DEVICE_TABLE(qlge_pci_tbl) = {
89 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)},
90 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)},
91 /* required last entry */
95 MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
97 static int ql_wol(struct ql_adapter *qdev);
98 static void qlge_set_multicast_list(struct net_device *ndev);
100 /* This hardware semaphore causes exclusive access to
101 * resources shared between the NIC driver, MPI firmware,
102 * FCOE firmware and the FC driver.
104 static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask)
109 case SEM_XGMAC0_MASK:
110 sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
112 case SEM_XGMAC1_MASK:
113 sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
116 sem_bits = SEM_SET << SEM_ICB_SHIFT;
118 case SEM_MAC_ADDR_MASK:
119 sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
122 sem_bits = SEM_SET << SEM_FLASH_SHIFT;
125 sem_bits = SEM_SET << SEM_PROBE_SHIFT;
127 case SEM_RT_IDX_MASK:
128 sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
130 case SEM_PROC_REG_MASK:
131 sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
134 netif_alert(qdev, probe, qdev->ndev, "bad Semaphore mask!.\n");
138 ql_write32(qdev, SEM, sem_bits | sem_mask);
139 return !(ql_read32(qdev, SEM) & sem_bits);
142 int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask)
144 unsigned int wait_count = 30;
146 if (!ql_sem_trylock(qdev, sem_mask))
149 } while (--wait_count);
153 void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask)
155 ql_write32(qdev, SEM, sem_mask);
156 ql_read32(qdev, SEM); /* flush */
159 /* This function waits for a specific bit to come ready
160 * in a given register. It is used mostly by the initialize
161 * process, but is also used in kernel thread API such as
162 * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
164 int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
167 int count = UDELAY_COUNT;
170 temp = ql_read32(qdev, reg);
172 /* check for errors */
173 if (temp & err_bit) {
174 netif_alert(qdev, probe, qdev->ndev,
175 "register 0x%.08x access error, value = 0x%.08x!.\n",
178 } else if (temp & bit)
180 udelay(UDELAY_DELAY);
183 netif_alert(qdev, probe, qdev->ndev,
184 "Timed out waiting for reg %x to come ready.\n", reg);
188 /* The CFG register is used to download TX and RX control blocks
189 * to the chip. This function waits for an operation to complete.
191 static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit)
193 int count = UDELAY_COUNT;
197 temp = ql_read32(qdev, CFG);
202 udelay(UDELAY_DELAY);
209 /* Used to issue init control blocks to hw. Maps control block,
210 * sets address, triggers download, waits for completion.
212 int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
222 (bit & (CFG_LRQ | CFG_LR | CFG_LCQ)) ? PCI_DMA_TODEVICE :
225 map = pci_map_single(qdev->pdev, ptr, size, direction);
226 if (pci_dma_mapping_error(qdev->pdev, map)) {
227 netif_err(qdev, ifup, qdev->ndev, "Couldn't map DMA area.\n");
231 status = ql_sem_spinlock(qdev, SEM_ICB_MASK);
235 status = ql_wait_cfg(qdev, bit);
237 netif_err(qdev, ifup, qdev->ndev,
238 "Timed out waiting for CFG to come ready.\n");
242 ql_write32(qdev, ICB_L, (u32) map);
243 ql_write32(qdev, ICB_H, (u32) (map >> 32));
245 mask = CFG_Q_MASK | (bit << 16);
246 value = bit | (q_id << CFG_Q_SHIFT);
247 ql_write32(qdev, CFG, (mask | value));
250 * Wait for the bit to clear after signaling hw.
252 status = ql_wait_cfg(qdev, bit);
254 ql_sem_unlock(qdev, SEM_ICB_MASK); /* does flush too */
255 pci_unmap_single(qdev->pdev, map, size, direction);
259 /* Get a specific MAC address from the CAM. Used for debug and reg dump. */
260 int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
267 case MAC_ADDR_TYPE_MULTI_MAC:
268 case MAC_ADDR_TYPE_CAM_MAC:
271 ql_wait_reg_rdy(qdev,
272 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
275 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
276 (index << MAC_ADDR_IDX_SHIFT) | /* index */
277 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
279 ql_wait_reg_rdy(qdev,
280 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
283 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
285 ql_wait_reg_rdy(qdev,
286 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
289 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
290 (index << MAC_ADDR_IDX_SHIFT) | /* index */
291 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
293 ql_wait_reg_rdy(qdev,
294 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
297 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
298 if (type == MAC_ADDR_TYPE_CAM_MAC) {
300 ql_wait_reg_rdy(qdev,
301 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
304 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
305 (index << MAC_ADDR_IDX_SHIFT) | /* index */
306 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
308 ql_wait_reg_rdy(qdev, MAC_ADDR_IDX,
312 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
316 case MAC_ADDR_TYPE_VLAN:
317 case MAC_ADDR_TYPE_MULTI_FLTR:
319 netif_crit(qdev, ifup, qdev->ndev,
320 "Address type %d not yet supported.\n", type);
327 /* Set up a MAC, multicast or VLAN address for the
328 * inbound frame matching.
330 static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
337 case MAC_ADDR_TYPE_MULTI_MAC:
339 u32 upper = (addr[0] << 8) | addr[1];
340 u32 lower = (addr[2] << 24) | (addr[3] << 16) |
341 (addr[4] << 8) | (addr[5]);
344 ql_wait_reg_rdy(qdev,
345 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
348 ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
349 (index << MAC_ADDR_IDX_SHIFT) |
351 ql_write32(qdev, MAC_ADDR_DATA, lower);
353 ql_wait_reg_rdy(qdev,
354 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
357 ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
358 (index << MAC_ADDR_IDX_SHIFT) |
361 ql_write32(qdev, MAC_ADDR_DATA, upper);
363 ql_wait_reg_rdy(qdev,
364 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
369 case MAC_ADDR_TYPE_CAM_MAC:
372 u32 upper = (addr[0] << 8) | addr[1];
374 (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
377 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
378 "Adding %s address %pM at index %d in the CAM.\n",
379 type == MAC_ADDR_TYPE_MULTI_MAC ?
380 "MULTICAST" : "UNICAST",
384 ql_wait_reg_rdy(qdev,
385 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
388 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
389 (index << MAC_ADDR_IDX_SHIFT) | /* index */
391 ql_write32(qdev, MAC_ADDR_DATA, lower);
393 ql_wait_reg_rdy(qdev,
394 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
397 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
398 (index << MAC_ADDR_IDX_SHIFT) | /* index */
400 ql_write32(qdev, MAC_ADDR_DATA, upper);
402 ql_wait_reg_rdy(qdev,
403 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
406 ql_write32(qdev, MAC_ADDR_IDX, (offset) | /* offset */
407 (index << MAC_ADDR_IDX_SHIFT) | /* index */
409 /* This field should also include the queue id
410 and possibly the function id. Right now we hardcode
411 the route field to NIC core.
413 cam_output = (CAM_OUT_ROUTE_NIC |
415 func << CAM_OUT_FUNC_SHIFT) |
416 (0 << CAM_OUT_CQ_ID_SHIFT));
418 cam_output |= CAM_OUT_RV;
419 /* route to NIC core */
420 ql_write32(qdev, MAC_ADDR_DATA, cam_output);
423 case MAC_ADDR_TYPE_VLAN:
425 u32 enable_bit = *((u32 *) &addr[0]);
426 /* For VLAN, the addr actually holds a bit that
427 * either enables or disables the vlan id we are
428 * addressing. It's either MAC_ADDR_E on or off.
429 * That's bit-27 we're talking about.
431 netif_info(qdev, ifup, qdev->ndev,
432 "%s VLAN ID %d %s the CAM.\n",
433 enable_bit ? "Adding" : "Removing",
435 enable_bit ? "to" : "from");
438 ql_wait_reg_rdy(qdev,
439 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
442 ql_write32(qdev, MAC_ADDR_IDX, offset | /* offset */
443 (index << MAC_ADDR_IDX_SHIFT) | /* index */
445 enable_bit); /* enable/disable */
448 case MAC_ADDR_TYPE_MULTI_FLTR:
450 netif_crit(qdev, ifup, qdev->ndev,
451 "Address type %d not yet supported.\n", type);
458 /* Set or clear MAC address in hardware. We sometimes
459 * have to clear it to prevent wrong frame routing
460 * especially in a bonding environment.
462 static int ql_set_mac_addr(struct ql_adapter *qdev, int set)
465 char zero_mac_addr[ETH_ALEN];
469 addr = &qdev->current_mac_addr[0];
470 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
471 "Set Mac addr %pM\n", addr);
473 memset(zero_mac_addr, 0, ETH_ALEN);
474 addr = &zero_mac_addr[0];
475 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
476 "Clearing MAC address\n");
478 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
481 status = ql_set_mac_addr_reg(qdev, (u8 *) addr,
482 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
483 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
485 netif_err(qdev, ifup, qdev->ndev,
486 "Failed to init mac address.\n");
490 void ql_link_on(struct ql_adapter *qdev)
492 netif_err(qdev, link, qdev->ndev, "Link is up.\n");
493 netif_carrier_on(qdev->ndev);
494 ql_set_mac_addr(qdev, 1);
497 void ql_link_off(struct ql_adapter *qdev)
499 netif_err(qdev, link, qdev->ndev, "Link is down.\n");
500 netif_carrier_off(qdev->ndev);
501 ql_set_mac_addr(qdev, 0);
504 /* Get a specific frame routing value from the CAM.
505 * Used for debug and reg dump.
507 int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value)
511 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
515 ql_write32(qdev, RT_IDX,
516 RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT));
517 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0);
520 *value = ql_read32(qdev, RT_DATA);
525 /* The NIC function for this chip has 16 routing indexes. Each one can be used
526 * to route different frame types to various inbound queues. We send broadcast/
527 * multicast/error frames to the default queue for slow handling,
528 * and CAM hit/RSS frames to the fast handling queues.
530 static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
533 int status = -EINVAL; /* Return error if no mask match. */
536 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
537 "%s %s mask %s the routing reg.\n",
538 enable ? "Adding" : "Removing",
539 index == RT_IDX_ALL_ERR_SLOT ? "MAC ERROR/ALL ERROR" :
540 index == RT_IDX_IP_CSUM_ERR_SLOT ? "IP CSUM ERROR" :
541 index == RT_IDX_TCP_UDP_CSUM_ERR_SLOT ? "TCP/UDP CSUM ERROR" :
542 index == RT_IDX_BCAST_SLOT ? "BROADCAST" :
543 index == RT_IDX_MCAST_MATCH_SLOT ? "MULTICAST MATCH" :
544 index == RT_IDX_ALLMULTI_SLOT ? "ALL MULTICAST MATCH" :
545 index == RT_IDX_UNUSED6_SLOT ? "UNUSED6" :
546 index == RT_IDX_UNUSED7_SLOT ? "UNUSED7" :
547 index == RT_IDX_RSS_MATCH_SLOT ? "RSS ALL/IPV4 MATCH" :
548 index == RT_IDX_RSS_IPV6_SLOT ? "RSS IPV6" :
549 index == RT_IDX_RSS_TCP4_SLOT ? "RSS TCP4" :
550 index == RT_IDX_RSS_TCP6_SLOT ? "RSS TCP6" :
551 index == RT_IDX_CAM_HIT_SLOT ? "CAM HIT" :
552 index == RT_IDX_UNUSED013 ? "UNUSED13" :
553 index == RT_IDX_UNUSED014 ? "UNUSED14" :
554 index == RT_IDX_PROMISCUOUS_SLOT ? "PROMISCUOUS" :
555 "(Bad index != RT_IDX)",
556 enable ? "to" : "from");
561 value = RT_IDX_DST_CAM_Q | /* dest */
562 RT_IDX_TYPE_NICQ | /* type */
563 (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);/* index */
566 case RT_IDX_VALID: /* Promiscuous Mode frames. */
568 value = RT_IDX_DST_DFLT_Q | /* dest */
569 RT_IDX_TYPE_NICQ | /* type */
570 (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);/* index */
573 case RT_IDX_ERR: /* Pass up MAC,IP,TCP/UDP error frames. */
575 value = RT_IDX_DST_DFLT_Q | /* dest */
576 RT_IDX_TYPE_NICQ | /* type */
577 (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */
580 case RT_IDX_IP_CSUM_ERR: /* Pass up IP CSUM error frames. */
582 value = RT_IDX_DST_DFLT_Q | /* dest */
583 RT_IDX_TYPE_NICQ | /* type */
584 (RT_IDX_IP_CSUM_ERR_SLOT <<
585 RT_IDX_IDX_SHIFT); /* index */
588 case RT_IDX_TU_CSUM_ERR: /* Pass up TCP/UDP CSUM error frames. */
590 value = RT_IDX_DST_DFLT_Q | /* dest */
591 RT_IDX_TYPE_NICQ | /* type */
592 (RT_IDX_TCP_UDP_CSUM_ERR_SLOT <<
593 RT_IDX_IDX_SHIFT); /* index */
596 case RT_IDX_BCAST: /* Pass up Broadcast frames to default Q. */
598 value = RT_IDX_DST_DFLT_Q | /* dest */
599 RT_IDX_TYPE_NICQ | /* type */
600 (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);/* index */
603 case RT_IDX_MCAST: /* Pass up All Multicast frames. */
605 value = RT_IDX_DST_DFLT_Q | /* dest */
606 RT_IDX_TYPE_NICQ | /* type */
607 (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */
610 case RT_IDX_MCAST_MATCH: /* Pass up matched Multicast frames. */
612 value = RT_IDX_DST_DFLT_Q | /* dest */
613 RT_IDX_TYPE_NICQ | /* type */
614 (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
617 case RT_IDX_RSS_MATCH: /* Pass up matched RSS frames. */
619 value = RT_IDX_DST_RSS | /* dest */
620 RT_IDX_TYPE_NICQ | /* type */
621 (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
624 case 0: /* Clear the E-bit on an entry. */
626 value = RT_IDX_DST_DFLT_Q | /* dest */
627 RT_IDX_TYPE_NICQ | /* type */
628 (index << RT_IDX_IDX_SHIFT);/* index */
632 netif_err(qdev, ifup, qdev->ndev,
633 "Mask type %d not yet supported.\n", mask);
639 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
642 value |= (enable ? RT_IDX_E : 0);
643 ql_write32(qdev, RT_IDX, value);
644 ql_write32(qdev, RT_DATA, enable ? mask : 0);
650 static void ql_enable_interrupts(struct ql_adapter *qdev)
652 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI);
655 static void ql_disable_interrupts(struct ql_adapter *qdev)
657 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16));
660 /* If we're running with multiple MSI-X vectors then we enable on the fly.
661 * Otherwise, we may have multiple outstanding workers and don't want to
662 * enable until the last one finishes. In this case, the irq_cnt gets
663 * incremented everytime we queue a worker and decremented everytime
664 * a worker finishes. Once it hits zero we enable the interrupt.
666 u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
669 unsigned long hw_flags = 0;
670 struct intr_context *ctx = qdev->intr_context + intr;
672 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) {
673 /* Always enable if we're MSIX multi interrupts and
674 * it's not the default (zeroeth) interrupt.
676 ql_write32(qdev, INTR_EN,
678 var = ql_read32(qdev, STS);
682 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
683 if (atomic_dec_and_test(&ctx->irq_cnt)) {
684 ql_write32(qdev, INTR_EN,
686 var = ql_read32(qdev, STS);
688 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
692 static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
695 struct intr_context *ctx;
697 /* HW disables for us if we're MSIX multi interrupts and
698 * it's not the default (zeroeth) interrupt.
700 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr))
703 ctx = qdev->intr_context + intr;
704 spin_lock(&qdev->hw_lock);
705 if (!atomic_read(&ctx->irq_cnt)) {
706 ql_write32(qdev, INTR_EN,
708 var = ql_read32(qdev, STS);
710 atomic_inc(&ctx->irq_cnt);
711 spin_unlock(&qdev->hw_lock);
715 static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev)
718 for (i = 0; i < qdev->intr_count; i++) {
719 /* The enable call does a atomic_dec_and_test
720 * and enables only if the result is zero.
721 * So we precharge it here.
723 if (unlikely(!test_bit(QL_MSIX_ENABLED, &qdev->flags) ||
725 atomic_set(&qdev->intr_context[i].irq_cnt, 1);
726 ql_enable_completion_interrupt(qdev, i);
731 static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str)
735 __le16 *flash = (__le16 *)&qdev->flash;
737 status = strncmp((char *)&qdev->flash, str, 4);
739 netif_err(qdev, ifup, qdev->ndev, "Invalid flash signature.\n");
743 for (i = 0; i < size; i++)
744 csum += le16_to_cpu(*flash++);
747 netif_err(qdev, ifup, qdev->ndev,
748 "Invalid flash checksum, csum = 0x%.04x.\n", csum);
753 static int ql_read_flash_word(struct ql_adapter *qdev, int offset, __le32 *data)
756 /* wait for reg to come ready */
757 status = ql_wait_reg_rdy(qdev,
758 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
761 /* set up for reg read */
762 ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset);
763 /* wait for reg to come ready */
764 status = ql_wait_reg_rdy(qdev,
765 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
768 /* This data is stored on flash as an array of
769 * __le32. Since ql_read32() returns cpu endian
770 * we need to swap it back.
772 *data = cpu_to_le32(ql_read32(qdev, FLASH_DATA));
777 static int ql_get_8000_flash_params(struct ql_adapter *qdev)
781 __le32 *p = (__le32 *)&qdev->flash;
785 /* Get flash offset for function and adjust
789 offset = FUNC0_FLASH_OFFSET / sizeof(u32);
791 offset = FUNC1_FLASH_OFFSET / sizeof(u32);
793 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
796 size = sizeof(struct flash_params_8000) / sizeof(u32);
797 for (i = 0; i < size; i++, p++) {
798 status = ql_read_flash_word(qdev, i+offset, p);
800 netif_err(qdev, ifup, qdev->ndev,
801 "Error reading flash.\n");
806 status = ql_validate_flash(qdev,
807 sizeof(struct flash_params_8000) / sizeof(u16),
810 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
815 /* Extract either manufacturer or BOFM modified
818 if (qdev->flash.flash_params_8000.data_type1 == 2)
820 qdev->flash.flash_params_8000.mac_addr1,
821 qdev->ndev->addr_len);
824 qdev->flash.flash_params_8000.mac_addr,
825 qdev->ndev->addr_len);
827 if (!is_valid_ether_addr(mac_addr)) {
828 netif_err(qdev, ifup, qdev->ndev, "Invalid MAC address.\n");
833 memcpy(qdev->ndev->dev_addr,
835 qdev->ndev->addr_len);
838 ql_sem_unlock(qdev, SEM_FLASH_MASK);
842 static int ql_get_8012_flash_params(struct ql_adapter *qdev)
846 __le32 *p = (__le32 *)&qdev->flash;
848 u32 size = sizeof(struct flash_params_8012) / sizeof(u32);
850 /* Second function's parameters follow the first
856 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
859 for (i = 0; i < size; i++, p++) {
860 status = ql_read_flash_word(qdev, i+offset, p);
862 netif_err(qdev, ifup, qdev->ndev,
863 "Error reading flash.\n");
869 status = ql_validate_flash(qdev,
870 sizeof(struct flash_params_8012) / sizeof(u16),
873 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
878 if (!is_valid_ether_addr(qdev->flash.flash_params_8012.mac_addr)) {
883 memcpy(qdev->ndev->dev_addr,
884 qdev->flash.flash_params_8012.mac_addr,
885 qdev->ndev->addr_len);
888 ql_sem_unlock(qdev, SEM_FLASH_MASK);
892 /* xgmac register are located behind the xgmac_addr and xgmac_data
893 * register pair. Each read/write requires us to wait for the ready
894 * bit before reading/writing the data.
896 static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data)
899 /* wait for reg to come ready */
900 status = ql_wait_reg_rdy(qdev,
901 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
904 /* write the data to the data reg */
905 ql_write32(qdev, XGMAC_DATA, data);
906 /* trigger the write */
907 ql_write32(qdev, XGMAC_ADDR, reg);
911 /* xgmac register are located behind the xgmac_addr and xgmac_data
912 * register pair. Each read/write requires us to wait for the ready
913 * bit before reading/writing the data.
915 int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
918 /* wait for reg to come ready */
919 status = ql_wait_reg_rdy(qdev,
920 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
923 /* set up for reg read */
924 ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R);
925 /* wait for reg to come ready */
926 status = ql_wait_reg_rdy(qdev,
927 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
931 *data = ql_read32(qdev, XGMAC_DATA);
936 /* This is used for reading the 64-bit statistics regs. */
937 int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data)
943 status = ql_read_xgmac_reg(qdev, reg, &lo);
947 status = ql_read_xgmac_reg(qdev, reg + 4, &hi);
951 *data = (u64) lo | ((u64) hi << 32);
957 static int ql_8000_port_initialize(struct ql_adapter *qdev)
961 * Get MPI firmware version for driver banner
964 status = ql_mb_about_fw(qdev);
967 status = ql_mb_get_fw_state(qdev);
970 /* Wake up a worker to get/set the TX/RX frame sizes. */
971 queue_delayed_work(qdev->workqueue, &qdev->mpi_port_cfg_work, 0);
976 /* Take the MAC Core out of reset.
977 * Enable statistics counting.
978 * Take the transmitter/receiver out of reset.
979 * This functionality may be done in the MPI firmware at a
982 static int ql_8012_port_initialize(struct ql_adapter *qdev)
987 if (ql_sem_trylock(qdev, qdev->xg_sem_mask)) {
988 /* Another function has the semaphore, so
989 * wait for the port init bit to come ready.
991 netif_info(qdev, link, qdev->ndev,
992 "Another function has the semaphore, so wait for the port init bit to come ready.\n");
993 status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
995 netif_crit(qdev, link, qdev->ndev,
996 "Port initialize timed out.\n");
1001 netif_info(qdev, link, qdev->ndev, "Got xgmac semaphore!.\n");
1002 /* Set the core reset. */
1003 status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
1006 data |= GLOBAL_CFG_RESET;
1007 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
1011 /* Clear the core reset and turn on jumbo for receiver. */
1012 data &= ~GLOBAL_CFG_RESET; /* Clear core reset. */
1013 data |= GLOBAL_CFG_JUMBO; /* Turn on jumbo. */
1014 data |= GLOBAL_CFG_TX_STAT_EN;
1015 data |= GLOBAL_CFG_RX_STAT_EN;
1016 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
1020 /* Enable transmitter, and clear it's reset. */
1021 status = ql_read_xgmac_reg(qdev, TX_CFG, &data);
1024 data &= ~TX_CFG_RESET; /* Clear the TX MAC reset. */
1025 data |= TX_CFG_EN; /* Enable the transmitter. */
1026 status = ql_write_xgmac_reg(qdev, TX_CFG, data);
1030 /* Enable receiver and clear it's reset. */
1031 status = ql_read_xgmac_reg(qdev, RX_CFG, &data);
1034 data &= ~RX_CFG_RESET; /* Clear the RX MAC reset. */
1035 data |= RX_CFG_EN; /* Enable the receiver. */
1036 status = ql_write_xgmac_reg(qdev, RX_CFG, data);
1040 /* Turn on jumbo. */
1042 ql_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16));
1046 ql_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580);
1050 /* Signal to the world that the port is enabled. */
1051 ql_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init));
1053 ql_sem_unlock(qdev, qdev->xg_sem_mask);
1057 static inline unsigned int ql_lbq_block_size(struct ql_adapter *qdev)
1059 return PAGE_SIZE << qdev->lbq_buf_order;
1062 /* Get the next large buffer. */
1063 static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring)
1065 struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx];
1066 rx_ring->lbq_curr_idx++;
1067 if (rx_ring->lbq_curr_idx == rx_ring->lbq_len)
1068 rx_ring->lbq_curr_idx = 0;
1069 rx_ring->lbq_free_cnt++;
1073 static struct bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev,
1074 struct rx_ring *rx_ring)
1076 struct bq_desc *lbq_desc = ql_get_curr_lbuf(rx_ring);
1078 pci_dma_sync_single_for_cpu(qdev->pdev,
1079 dma_unmap_addr(lbq_desc, mapaddr),
1080 rx_ring->lbq_buf_size,
1081 PCI_DMA_FROMDEVICE);
1083 /* If it's the last chunk of our master page then
1086 if ((lbq_desc->p.pg_chunk.offset + rx_ring->lbq_buf_size)
1087 == ql_lbq_block_size(qdev))
1088 pci_unmap_page(qdev->pdev,
1089 lbq_desc->p.pg_chunk.map,
1090 ql_lbq_block_size(qdev),
1091 PCI_DMA_FROMDEVICE);
1095 /* Get the next small buffer. */
1096 static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring)
1098 struct bq_desc *sbq_desc = &rx_ring->sbq[rx_ring->sbq_curr_idx];
1099 rx_ring->sbq_curr_idx++;
1100 if (rx_ring->sbq_curr_idx == rx_ring->sbq_len)
1101 rx_ring->sbq_curr_idx = 0;
1102 rx_ring->sbq_free_cnt++;
1106 /* Update an rx ring index. */
1107 static void ql_update_cq(struct rx_ring *rx_ring)
1109 rx_ring->cnsmr_idx++;
1110 rx_ring->curr_entry++;
1111 if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) {
1112 rx_ring->cnsmr_idx = 0;
1113 rx_ring->curr_entry = rx_ring->cq_base;
1117 static void ql_write_cq_idx(struct rx_ring *rx_ring)
1119 ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
1122 static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring,
1123 struct bq_desc *lbq_desc)
1125 if (!rx_ring->pg_chunk.page) {
1127 rx_ring->pg_chunk.page = alloc_pages(__GFP_COLD | __GFP_COMP |
1129 qdev->lbq_buf_order);
1130 if (unlikely(!rx_ring->pg_chunk.page)) {
1131 netif_err(qdev, drv, qdev->ndev,
1132 "page allocation failed.\n");
1135 rx_ring->pg_chunk.offset = 0;
1136 map = pci_map_page(qdev->pdev, rx_ring->pg_chunk.page,
1137 0, ql_lbq_block_size(qdev),
1138 PCI_DMA_FROMDEVICE);
1139 if (pci_dma_mapping_error(qdev->pdev, map)) {
1140 __free_pages(rx_ring->pg_chunk.page,
1141 qdev->lbq_buf_order);
1142 netif_err(qdev, drv, qdev->ndev,
1143 "PCI mapping failed.\n");
1146 rx_ring->pg_chunk.map = map;
1147 rx_ring->pg_chunk.va = page_address(rx_ring->pg_chunk.page);
1150 /* Copy the current master pg_chunk info
1151 * to the current descriptor.
1153 lbq_desc->p.pg_chunk = rx_ring->pg_chunk;
1155 /* Adjust the master page chunk for next
1158 rx_ring->pg_chunk.offset += rx_ring->lbq_buf_size;
1159 if (rx_ring->pg_chunk.offset == ql_lbq_block_size(qdev)) {
1160 rx_ring->pg_chunk.page = NULL;
1161 lbq_desc->p.pg_chunk.last_flag = 1;
1163 rx_ring->pg_chunk.va += rx_ring->lbq_buf_size;
1164 get_page(rx_ring->pg_chunk.page);
1165 lbq_desc->p.pg_chunk.last_flag = 0;
1169 /* Process (refill) a large buffer queue. */
1170 static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1172 u32 clean_idx = rx_ring->lbq_clean_idx;
1173 u32 start_idx = clean_idx;
1174 struct bq_desc *lbq_desc;
1178 while (rx_ring->lbq_free_cnt > 32) {
1179 for (i = 0; i < 16; i++) {
1180 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1181 "lbq: try cleaning clean_idx = %d.\n",
1183 lbq_desc = &rx_ring->lbq[clean_idx];
1184 if (ql_get_next_chunk(qdev, rx_ring, lbq_desc)) {
1185 netif_err(qdev, ifup, qdev->ndev,
1186 "Could not get a page chunk.\n");
1190 map = lbq_desc->p.pg_chunk.map +
1191 lbq_desc->p.pg_chunk.offset;
1192 dma_unmap_addr_set(lbq_desc, mapaddr, map);
1193 dma_unmap_len_set(lbq_desc, maplen,
1194 rx_ring->lbq_buf_size);
1195 *lbq_desc->addr = cpu_to_le64(map);
1197 pci_dma_sync_single_for_device(qdev->pdev, map,
1198 rx_ring->lbq_buf_size,
1199 PCI_DMA_FROMDEVICE);
1201 if (clean_idx == rx_ring->lbq_len)
1205 rx_ring->lbq_clean_idx = clean_idx;
1206 rx_ring->lbq_prod_idx += 16;
1207 if (rx_ring->lbq_prod_idx == rx_ring->lbq_len)
1208 rx_ring->lbq_prod_idx = 0;
1209 rx_ring->lbq_free_cnt -= 16;
1212 if (start_idx != clean_idx) {
1213 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1214 "lbq: updating prod idx = %d.\n",
1215 rx_ring->lbq_prod_idx);
1216 ql_write_db_reg(rx_ring->lbq_prod_idx,
1217 rx_ring->lbq_prod_idx_db_reg);
1221 /* Process (refill) a small buffer queue. */
1222 static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1224 u32 clean_idx = rx_ring->sbq_clean_idx;
1225 u32 start_idx = clean_idx;
1226 struct bq_desc *sbq_desc;
1230 while (rx_ring->sbq_free_cnt > 16) {
1231 for (i = 0; i < 16; i++) {
1232 sbq_desc = &rx_ring->sbq[clean_idx];
1233 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1234 "sbq: try cleaning clean_idx = %d.\n",
1236 if (sbq_desc->p.skb == NULL) {
1237 netif_printk(qdev, rx_status, KERN_DEBUG,
1239 "sbq: getting new skb for index %d.\n",
1242 netdev_alloc_skb(qdev->ndev,
1244 if (sbq_desc->p.skb == NULL) {
1245 netif_err(qdev, probe, qdev->ndev,
1246 "Couldn't get an skb.\n");
1247 rx_ring->sbq_clean_idx = clean_idx;
1250 skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD);
1251 map = pci_map_single(qdev->pdev,
1252 sbq_desc->p.skb->data,
1253 rx_ring->sbq_buf_size,
1254 PCI_DMA_FROMDEVICE);
1255 if (pci_dma_mapping_error(qdev->pdev, map)) {
1256 netif_err(qdev, ifup, qdev->ndev,
1257 "PCI mapping failed.\n");
1258 rx_ring->sbq_clean_idx = clean_idx;
1259 dev_kfree_skb_any(sbq_desc->p.skb);
1260 sbq_desc->p.skb = NULL;
1263 dma_unmap_addr_set(sbq_desc, mapaddr, map);
1264 dma_unmap_len_set(sbq_desc, maplen,
1265 rx_ring->sbq_buf_size);
1266 *sbq_desc->addr = cpu_to_le64(map);
1270 if (clean_idx == rx_ring->sbq_len)
1273 rx_ring->sbq_clean_idx = clean_idx;
1274 rx_ring->sbq_prod_idx += 16;
1275 if (rx_ring->sbq_prod_idx == rx_ring->sbq_len)
1276 rx_ring->sbq_prod_idx = 0;
1277 rx_ring->sbq_free_cnt -= 16;
1280 if (start_idx != clean_idx) {
1281 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1282 "sbq: updating prod idx = %d.\n",
1283 rx_ring->sbq_prod_idx);
1284 ql_write_db_reg(rx_ring->sbq_prod_idx,
1285 rx_ring->sbq_prod_idx_db_reg);
1289 static void ql_update_buffer_queues(struct ql_adapter *qdev,
1290 struct rx_ring *rx_ring)
1292 ql_update_sbq(qdev, rx_ring);
1293 ql_update_lbq(qdev, rx_ring);
1296 /* Unmaps tx buffers. Can be called from send() if a pci mapping
1297 * fails at some stage, or from the interrupt when a tx completes.
1299 static void ql_unmap_send(struct ql_adapter *qdev,
1300 struct tx_ring_desc *tx_ring_desc, int mapped)
1303 for (i = 0; i < mapped; i++) {
1304 if (i == 0 || (i == 7 && mapped > 7)) {
1306 * Unmap the skb->data area, or the
1307 * external sglist (AKA the Outbound
1308 * Address List (OAL)).
1309 * If its the zeroeth element, then it's
1310 * the skb->data area. If it's the 7th
1311 * element and there is more than 6 frags,
1315 netif_printk(qdev, tx_done, KERN_DEBUG,
1317 "unmapping OAL area.\n");
1319 pci_unmap_single(qdev->pdev,
1320 dma_unmap_addr(&tx_ring_desc->map[i],
1322 dma_unmap_len(&tx_ring_desc->map[i],
1326 netif_printk(qdev, tx_done, KERN_DEBUG, qdev->ndev,
1327 "unmapping frag %d.\n", i);
1328 pci_unmap_page(qdev->pdev,
1329 dma_unmap_addr(&tx_ring_desc->map[i],
1331 dma_unmap_len(&tx_ring_desc->map[i],
1332 maplen), PCI_DMA_TODEVICE);
1338 /* Map the buffers for this transmit. This will return
1339 * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
1341 static int ql_map_send(struct ql_adapter *qdev,
1342 struct ob_mac_iocb_req *mac_iocb_ptr,
1343 struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc)
1345 int len = skb_headlen(skb);
1347 int frag_idx, err, map_idx = 0;
1348 struct tx_buf_desc *tbd = mac_iocb_ptr->tbd;
1349 int frag_cnt = skb_shinfo(skb)->nr_frags;
1352 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
1353 "frag_cnt = %d.\n", frag_cnt);
1356 * Map the skb buffer first.
1358 map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
1360 err = pci_dma_mapping_error(qdev->pdev, map);
1362 netif_err(qdev, tx_queued, qdev->ndev,
1363 "PCI mapping failed with error: %d\n", err);
1365 return NETDEV_TX_BUSY;
1368 tbd->len = cpu_to_le32(len);
1369 tbd->addr = cpu_to_le64(map);
1370 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1371 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
1375 * This loop fills the remainder of the 8 address descriptors
1376 * in the IOCB. If there are more than 7 fragments, then the
1377 * eighth address desc will point to an external list (OAL).
1378 * When this happens, the remainder of the frags will be stored
1381 for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) {
1382 skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx];
1384 if (frag_idx == 6 && frag_cnt > 7) {
1385 /* Let's tack on an sglist.
1386 * Our control block will now
1388 * iocb->seg[0] = skb->data
1389 * iocb->seg[1] = frag[0]
1390 * iocb->seg[2] = frag[1]
1391 * iocb->seg[3] = frag[2]
1392 * iocb->seg[4] = frag[3]
1393 * iocb->seg[5] = frag[4]
1394 * iocb->seg[6] = frag[5]
1395 * iocb->seg[7] = ptr to OAL (external sglist)
1396 * oal->seg[0] = frag[6]
1397 * oal->seg[1] = frag[7]
1398 * oal->seg[2] = frag[8]
1399 * oal->seg[3] = frag[9]
1400 * oal->seg[4] = frag[10]
1403 /* Tack on the OAL in the eighth segment of IOCB. */
1404 map = pci_map_single(qdev->pdev, &tx_ring_desc->oal,
1407 err = pci_dma_mapping_error(qdev->pdev, map);
1409 netif_err(qdev, tx_queued, qdev->ndev,
1410 "PCI mapping outbound address list with error: %d\n",
1415 tbd->addr = cpu_to_le64(map);
1417 * The length is the number of fragments
1418 * that remain to be mapped times the length
1419 * of our sglist (OAL).
1422 cpu_to_le32((sizeof(struct tx_buf_desc) *
1423 (frag_cnt - frag_idx)) | TX_DESC_C);
1424 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
1426 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1427 sizeof(struct oal));
1428 tbd = (struct tx_buf_desc *)&tx_ring_desc->oal;
1433 pci_map_page(qdev->pdev, frag->page,
1434 frag->page_offset, frag->size,
1437 err = pci_dma_mapping_error(qdev->pdev, map);
1439 netif_err(qdev, tx_queued, qdev->ndev,
1440 "PCI mapping frags failed with error: %d.\n",
1445 tbd->addr = cpu_to_le64(map);
1446 tbd->len = cpu_to_le32(frag->size);
1447 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1448 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1452 /* Save the number of segments we've mapped. */
1453 tx_ring_desc->map_cnt = map_idx;
1454 /* Terminate the last segment. */
1455 tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E);
1456 return NETDEV_TX_OK;
1460 * If the first frag mapping failed, then i will be zero.
1461 * This causes the unmap of the skb->data area. Otherwise
1462 * we pass in the number of frags that mapped successfully
1463 * so they can be umapped.
1465 ql_unmap_send(qdev, tx_ring_desc, map_idx);
1466 return NETDEV_TX_BUSY;
1469 /* Process an inbound completion from an rx ring. */
1470 static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
1471 struct rx_ring *rx_ring,
1472 struct ib_mac_iocb_rsp *ib_mac_rsp,
1476 struct sk_buff *skb;
1477 struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1478 struct skb_frag_struct *rx_frag;
1480 struct napi_struct *napi = &rx_ring->napi;
1482 napi->dev = qdev->ndev;
1484 skb = napi_get_frags(napi);
1486 netif_err(qdev, drv, qdev->ndev,
1487 "Couldn't get an skb, exiting.\n");
1488 rx_ring->rx_dropped++;
1489 put_page(lbq_desc->p.pg_chunk.page);
1492 prefetch(lbq_desc->p.pg_chunk.va);
1493 rx_frag = skb_shinfo(skb)->frags;
1494 nr_frags = skb_shinfo(skb)->nr_frags;
1495 rx_frag += nr_frags;
1496 rx_frag->page = lbq_desc->p.pg_chunk.page;
1497 rx_frag->page_offset = lbq_desc->p.pg_chunk.offset;
1498 rx_frag->size = length;
1501 skb->data_len += length;
1502 skb->truesize += length;
1503 skb_shinfo(skb)->nr_frags++;
1505 rx_ring->rx_packets++;
1506 rx_ring->rx_bytes += length;
1507 skb->ip_summed = CHECKSUM_UNNECESSARY;
1508 skb_record_rx_queue(skb, rx_ring->cq_id);
1509 if (qdev->vlgrp && (vlan_id != 0xffff))
1510 vlan_gro_frags(&rx_ring->napi, qdev->vlgrp, vlan_id);
1512 napi_gro_frags(napi);
1515 /* Process an inbound completion from an rx ring. */
1516 static void ql_process_mac_rx_page(struct ql_adapter *qdev,
1517 struct rx_ring *rx_ring,
1518 struct ib_mac_iocb_rsp *ib_mac_rsp,
1522 struct net_device *ndev = qdev->ndev;
1523 struct sk_buff *skb = NULL;
1525 struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1526 struct napi_struct *napi = &rx_ring->napi;
1528 skb = netdev_alloc_skb(ndev, length);
1530 netif_err(qdev, drv, qdev->ndev,
1531 "Couldn't get an skb, need to unwind!.\n");
1532 rx_ring->rx_dropped++;
1533 put_page(lbq_desc->p.pg_chunk.page);
1537 addr = lbq_desc->p.pg_chunk.va;
1541 /* Frame error, so drop the packet. */
1542 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1543 netif_info(qdev, drv, qdev->ndev,
1544 "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
1545 rx_ring->rx_errors++;
1549 /* The max framesize filter on this chip is set higher than
1550 * MTU since FCoE uses 2k frames.
1552 if (skb->len > ndev->mtu + ETH_HLEN) {
1553 netif_err(qdev, drv, qdev->ndev,
1554 "Segment too small, dropping.\n");
1555 rx_ring->rx_dropped++;
1558 memcpy(skb_put(skb, ETH_HLEN), addr, ETH_HLEN);
1559 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1560 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1562 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1563 lbq_desc->p.pg_chunk.offset+ETH_HLEN,
1565 skb->len += length-ETH_HLEN;
1566 skb->data_len += length-ETH_HLEN;
1567 skb->truesize += length-ETH_HLEN;
1569 rx_ring->rx_packets++;
1570 rx_ring->rx_bytes += skb->len;
1571 skb->protocol = eth_type_trans(skb, ndev);
1572 skb_checksum_none_assert(skb);
1574 if (qdev->rx_csum &&
1575 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1577 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1578 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1579 "TCP checksum done!\n");
1580 skb->ip_summed = CHECKSUM_UNNECESSARY;
1581 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1582 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1583 /* Unfragmented ipv4 UDP frame. */
1584 struct iphdr *iph = (struct iphdr *) skb->data;
1585 if (!(iph->frag_off &
1586 cpu_to_be16(IP_MF|IP_OFFSET))) {
1587 skb->ip_summed = CHECKSUM_UNNECESSARY;
1588 netif_printk(qdev, rx_status, KERN_DEBUG,
1590 "TCP checksum done!\n");
1595 skb_record_rx_queue(skb, rx_ring->cq_id);
1596 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
1597 if (qdev->vlgrp && (vlan_id != 0xffff))
1598 vlan_gro_receive(napi, qdev->vlgrp, vlan_id, skb);
1600 napi_gro_receive(napi, skb);
1602 if (qdev->vlgrp && (vlan_id != 0xffff))
1603 vlan_hwaccel_receive_skb(skb, qdev->vlgrp, vlan_id);
1605 netif_receive_skb(skb);
1609 dev_kfree_skb_any(skb);
1610 put_page(lbq_desc->p.pg_chunk.page);
1613 /* Process an inbound completion from an rx ring. */
1614 static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
1615 struct rx_ring *rx_ring,
1616 struct ib_mac_iocb_rsp *ib_mac_rsp,
1620 struct net_device *ndev = qdev->ndev;
1621 struct sk_buff *skb = NULL;
1622 struct sk_buff *new_skb = NULL;
1623 struct bq_desc *sbq_desc = ql_get_curr_sbuf(rx_ring);
1625 skb = sbq_desc->p.skb;
1626 /* Allocate new_skb and copy */
1627 new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN);
1628 if (new_skb == NULL) {
1629 netif_err(qdev, probe, qdev->ndev,
1630 "No skb available, drop the packet.\n");
1631 rx_ring->rx_dropped++;
1634 skb_reserve(new_skb, NET_IP_ALIGN);
1635 memcpy(skb_put(new_skb, length), skb->data, length);
1638 /* Frame error, so drop the packet. */
1639 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1640 netif_info(qdev, drv, qdev->ndev,
1641 "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
1642 dev_kfree_skb_any(skb);
1643 rx_ring->rx_errors++;
1647 /* loopback self test for ethtool */
1648 if (test_bit(QL_SELFTEST, &qdev->flags)) {
1649 ql_check_lb_frame(qdev, skb);
1650 dev_kfree_skb_any(skb);
1654 /* The max framesize filter on this chip is set higher than
1655 * MTU since FCoE uses 2k frames.
1657 if (skb->len > ndev->mtu + ETH_HLEN) {
1658 dev_kfree_skb_any(skb);
1659 rx_ring->rx_dropped++;
1663 prefetch(skb->data);
1665 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1666 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1668 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1669 IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1670 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1671 IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1672 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1673 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1675 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P)
1676 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1677 "Promiscuous Packet.\n");
1679 rx_ring->rx_packets++;
1680 rx_ring->rx_bytes += skb->len;
1681 skb->protocol = eth_type_trans(skb, ndev);
1682 skb_checksum_none_assert(skb);
1684 /* If rx checksum is on, and there are no
1685 * csum or frame errors.
1687 if (qdev->rx_csum &&
1688 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1690 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1691 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1692 "TCP checksum done!\n");
1693 skb->ip_summed = CHECKSUM_UNNECESSARY;
1694 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1695 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1696 /* Unfragmented ipv4 UDP frame. */
1697 struct iphdr *iph = (struct iphdr *) skb->data;
1698 if (!(iph->frag_off &
1699 ntohs(IP_MF|IP_OFFSET))) {
1700 skb->ip_summed = CHECKSUM_UNNECESSARY;
1701 netif_printk(qdev, rx_status, KERN_DEBUG,
1703 "TCP checksum done!\n");
1708 skb_record_rx_queue(skb, rx_ring->cq_id);
1709 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
1710 if (qdev->vlgrp && (vlan_id != 0xffff))
1711 vlan_gro_receive(&rx_ring->napi, qdev->vlgrp,
1714 napi_gro_receive(&rx_ring->napi, skb);
1716 if (qdev->vlgrp && (vlan_id != 0xffff))
1717 vlan_hwaccel_receive_skb(skb, qdev->vlgrp, vlan_id);
1719 netif_receive_skb(skb);
1723 static void ql_realign_skb(struct sk_buff *skb, int len)
1725 void *temp_addr = skb->data;
1727 /* Undo the skb_reserve(skb,32) we did before
1728 * giving to hardware, and realign data on
1729 * a 2-byte boundary.
1731 skb->data -= QLGE_SB_PAD - NET_IP_ALIGN;
1732 skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN;
1733 skb_copy_to_linear_data(skb, temp_addr,
1738 * This function builds an skb for the given inbound
1739 * completion. It will be rewritten for readability in the near
1740 * future, but for not it works well.
1742 static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1743 struct rx_ring *rx_ring,
1744 struct ib_mac_iocb_rsp *ib_mac_rsp)
1746 struct bq_desc *lbq_desc;
1747 struct bq_desc *sbq_desc;
1748 struct sk_buff *skb = NULL;
1749 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
1750 u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
1753 * Handle the header buffer if present.
1755 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV &&
1756 ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1757 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1758 "Header of %d bytes in small buffer.\n", hdr_len);
1760 * Headers fit nicely into a small buffer.
1762 sbq_desc = ql_get_curr_sbuf(rx_ring);
1763 pci_unmap_single(qdev->pdev,
1764 dma_unmap_addr(sbq_desc, mapaddr),
1765 dma_unmap_len(sbq_desc, maplen),
1766 PCI_DMA_FROMDEVICE);
1767 skb = sbq_desc->p.skb;
1768 ql_realign_skb(skb, hdr_len);
1769 skb_put(skb, hdr_len);
1770 sbq_desc->p.skb = NULL;
1774 * Handle the data buffer(s).
1776 if (unlikely(!length)) { /* Is there data too? */
1777 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1778 "No Data buffer in this packet.\n");
1782 if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
1783 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1784 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1785 "Headers in small, data of %d bytes in small, combine them.\n",
1788 * Data is less than small buffer size so it's
1789 * stuffed in a small buffer.
1790 * For this case we append the data
1791 * from the "data" small buffer to the "header" small
1794 sbq_desc = ql_get_curr_sbuf(rx_ring);
1795 pci_dma_sync_single_for_cpu(qdev->pdev,
1797 (sbq_desc, mapaddr),
1800 PCI_DMA_FROMDEVICE);
1801 memcpy(skb_put(skb, length),
1802 sbq_desc->p.skb->data, length);
1803 pci_dma_sync_single_for_device(qdev->pdev,
1810 PCI_DMA_FROMDEVICE);
1812 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1813 "%d bytes in a single small buffer.\n",
1815 sbq_desc = ql_get_curr_sbuf(rx_ring);
1816 skb = sbq_desc->p.skb;
1817 ql_realign_skb(skb, length);
1818 skb_put(skb, length);
1819 pci_unmap_single(qdev->pdev,
1820 dma_unmap_addr(sbq_desc,
1822 dma_unmap_len(sbq_desc,
1824 PCI_DMA_FROMDEVICE);
1825 sbq_desc->p.skb = NULL;
1827 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
1828 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1829 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1830 "Header in small, %d bytes in large. Chain large to small!\n",
1833 * The data is in a single large buffer. We
1834 * chain it to the header buffer's skb and let
1837 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1838 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1839 "Chaining page at offset = %d, for %d bytes to skb.\n",
1840 lbq_desc->p.pg_chunk.offset, length);
1841 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1842 lbq_desc->p.pg_chunk.offset,
1845 skb->data_len += length;
1846 skb->truesize += length;
1849 * The headers and data are in a single large buffer. We
1850 * copy it to a new skb and let it go. This can happen with
1851 * jumbo mtu on a non-TCP/UDP frame.
1853 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1854 skb = netdev_alloc_skb(qdev->ndev, length);
1856 netif_printk(qdev, probe, KERN_DEBUG, qdev->ndev,
1857 "No skb available, drop the packet.\n");
1860 pci_unmap_page(qdev->pdev,
1861 dma_unmap_addr(lbq_desc,
1863 dma_unmap_len(lbq_desc, maplen),
1864 PCI_DMA_FROMDEVICE);
1865 skb_reserve(skb, NET_IP_ALIGN);
1866 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1867 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1869 skb_fill_page_desc(skb, 0,
1870 lbq_desc->p.pg_chunk.page,
1871 lbq_desc->p.pg_chunk.offset,
1874 skb->data_len += length;
1875 skb->truesize += length;
1877 __pskb_pull_tail(skb,
1878 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1879 VLAN_ETH_HLEN : ETH_HLEN);
1883 * The data is in a chain of large buffers
1884 * pointed to by a small buffer. We loop
1885 * thru and chain them to the our small header
1887 * frags: There are 18 max frags and our small
1888 * buffer will hold 32 of them. The thing is,
1889 * we'll use 3 max for our 9000 byte jumbo
1890 * frames. If the MTU goes up we could
1891 * eventually be in trouble.
1894 sbq_desc = ql_get_curr_sbuf(rx_ring);
1895 pci_unmap_single(qdev->pdev,
1896 dma_unmap_addr(sbq_desc, mapaddr),
1897 dma_unmap_len(sbq_desc, maplen),
1898 PCI_DMA_FROMDEVICE);
1899 if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
1901 * This is an non TCP/UDP IP frame, so
1902 * the headers aren't split into a small
1903 * buffer. We have to use the small buffer
1904 * that contains our sg list as our skb to
1905 * send upstairs. Copy the sg list here to
1906 * a local buffer and use it to find the
1909 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1910 "%d bytes of headers & data in chain of large.\n",
1912 skb = sbq_desc->p.skb;
1913 sbq_desc->p.skb = NULL;
1914 skb_reserve(skb, NET_IP_ALIGN);
1916 while (length > 0) {
1917 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1918 size = (length < rx_ring->lbq_buf_size) ? length :
1919 rx_ring->lbq_buf_size;
1921 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1922 "Adding page %d to skb for %d bytes.\n",
1924 skb_fill_page_desc(skb, i,
1925 lbq_desc->p.pg_chunk.page,
1926 lbq_desc->p.pg_chunk.offset,
1929 skb->data_len += size;
1930 skb->truesize += size;
1934 __pskb_pull_tail(skb, (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1935 VLAN_ETH_HLEN : ETH_HLEN);
1940 /* Process an inbound completion from an rx ring. */
1941 static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
1942 struct rx_ring *rx_ring,
1943 struct ib_mac_iocb_rsp *ib_mac_rsp,
1946 struct net_device *ndev = qdev->ndev;
1947 struct sk_buff *skb = NULL;
1949 QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
1951 skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
1952 if (unlikely(!skb)) {
1953 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1954 "No skb available, drop packet.\n");
1955 rx_ring->rx_dropped++;
1959 /* Frame error, so drop the packet. */
1960 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1961 netif_info(qdev, drv, qdev->ndev,
1962 "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
1963 dev_kfree_skb_any(skb);
1964 rx_ring->rx_errors++;
1968 /* The max framesize filter on this chip is set higher than
1969 * MTU since FCoE uses 2k frames.
1971 if (skb->len > ndev->mtu + ETH_HLEN) {
1972 dev_kfree_skb_any(skb);
1973 rx_ring->rx_dropped++;
1977 /* loopback self test for ethtool */
1978 if (test_bit(QL_SELFTEST, &qdev->flags)) {
1979 ql_check_lb_frame(qdev, skb);
1980 dev_kfree_skb_any(skb);
1984 prefetch(skb->data);
1986 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1987 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "%s Multicast.\n",
1988 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1989 IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1990 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1991 IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1992 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1993 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1994 rx_ring->rx_multicast++;
1996 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
1997 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1998 "Promiscuous Packet.\n");
2001 skb->protocol = eth_type_trans(skb, ndev);
2002 skb_checksum_none_assert(skb);
2004 /* If rx checksum is on, and there are no
2005 * csum or frame errors.
2007 if (qdev->rx_csum &&
2008 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
2010 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
2011 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2012 "TCP checksum done!\n");
2013 skb->ip_summed = CHECKSUM_UNNECESSARY;
2014 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
2015 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
2016 /* Unfragmented ipv4 UDP frame. */
2017 struct iphdr *iph = (struct iphdr *) skb->data;
2018 if (!(iph->frag_off &
2019 ntohs(IP_MF|IP_OFFSET))) {
2020 skb->ip_summed = CHECKSUM_UNNECESSARY;
2021 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2022 "TCP checksum done!\n");
2027 rx_ring->rx_packets++;
2028 rx_ring->rx_bytes += skb->len;
2029 skb_record_rx_queue(skb, rx_ring->cq_id);
2030 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
2032 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) &&
2034 vlan_gro_receive(&rx_ring->napi, qdev->vlgrp,
2037 napi_gro_receive(&rx_ring->napi, skb);
2040 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) &&
2042 vlan_hwaccel_receive_skb(skb, qdev->vlgrp, vlan_id);
2044 netif_receive_skb(skb);
2048 /* Process an inbound completion from an rx ring. */
2049 static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
2050 struct rx_ring *rx_ring,
2051 struct ib_mac_iocb_rsp *ib_mac_rsp)
2053 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
2054 u16 vlan_id = (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
2055 ((le16_to_cpu(ib_mac_rsp->vlan_id) &
2056 IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff;
2058 QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
2060 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
2061 /* The data and headers are split into
2064 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2066 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
2067 /* The data fit in a single small buffer.
2068 * Allocate a new skb, copy the data and
2069 * return the buffer to the free pool.
2071 ql_process_mac_rx_skb(qdev, rx_ring, ib_mac_rsp,
2073 } else if ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) &&
2074 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK) &&
2075 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T)) {
2076 /* TCP packet in a page chunk that's been checksummed.
2077 * Tack it on to our GRO skb and let it go.
2079 ql_process_mac_rx_gro_page(qdev, rx_ring, ib_mac_rsp,
2081 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
2082 /* Non-TCP packet in a page chunk. Allocate an
2083 * skb, tack it on frags, and send it up.
2085 ql_process_mac_rx_page(qdev, rx_ring, ib_mac_rsp,
2088 /* Non-TCP/UDP large frames that span multiple buffers
2089 * can be processed corrrectly by the split frame logic.
2091 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2095 return (unsigned long)length;
2098 /* Process an outbound completion from an rx ring. */
2099 static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
2100 struct ob_mac_iocb_rsp *mac_rsp)
2102 struct tx_ring *tx_ring;
2103 struct tx_ring_desc *tx_ring_desc;
2105 QL_DUMP_OB_MAC_RSP(mac_rsp);
2106 tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
2107 tx_ring_desc = &tx_ring->q[mac_rsp->tid];
2108 ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
2109 tx_ring->tx_bytes += (tx_ring_desc->skb)->len;
2110 tx_ring->tx_packets++;
2111 dev_kfree_skb(tx_ring_desc->skb);
2112 tx_ring_desc->skb = NULL;
2114 if (unlikely(mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E |
2117 OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) {
2118 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
2119 netif_warn(qdev, tx_done, qdev->ndev,
2120 "Total descriptor length did not match transfer length.\n");
2122 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
2123 netif_warn(qdev, tx_done, qdev->ndev,
2124 "Frame too short to be valid, not sent.\n");
2126 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
2127 netif_warn(qdev, tx_done, qdev->ndev,
2128 "Frame too long, but sent anyway.\n");
2130 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) {
2131 netif_warn(qdev, tx_done, qdev->ndev,
2132 "PCI backplane error. Frame not sent.\n");
2135 atomic_inc(&tx_ring->tx_count);
2138 /* Fire up a handler to reset the MPI processor. */
2139 void ql_queue_fw_error(struct ql_adapter *qdev)
2142 queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
2145 void ql_queue_asic_error(struct ql_adapter *qdev)
2148 ql_disable_interrupts(qdev);
2149 /* Clear adapter up bit to signal the recovery
2150 * process that it shouldn't kill the reset worker
2153 clear_bit(QL_ADAPTER_UP, &qdev->flags);
2154 queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
2157 static void ql_process_chip_ae_intr(struct ql_adapter *qdev,
2158 struct ib_ae_iocb_rsp *ib_ae_rsp)
2160 switch (ib_ae_rsp->event) {
2161 case MGMT_ERR_EVENT:
2162 netif_err(qdev, rx_err, qdev->ndev,
2163 "Management Processor Fatal Error.\n");
2164 ql_queue_fw_error(qdev);
2167 case CAM_LOOKUP_ERR_EVENT:
2168 netif_err(qdev, link, qdev->ndev,
2169 "Multiple CAM hits lookup occurred.\n");
2170 netif_err(qdev, drv, qdev->ndev,
2171 "This event shouldn't occur.\n");
2172 ql_queue_asic_error(qdev);
2175 case SOFT_ECC_ERROR_EVENT:
2176 netif_err(qdev, rx_err, qdev->ndev,
2177 "Soft ECC error detected.\n");
2178 ql_queue_asic_error(qdev);
2181 case PCI_ERR_ANON_BUF_RD:
2182 netif_err(qdev, rx_err, qdev->ndev,
2183 "PCI error occurred when reading anonymous buffers from rx_ring %d.\n",
2185 ql_queue_asic_error(qdev);
2189 netif_err(qdev, drv, qdev->ndev, "Unexpected event %d.\n",
2191 ql_queue_asic_error(qdev);
2196 static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
2198 struct ql_adapter *qdev = rx_ring->qdev;
2199 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2200 struct ob_mac_iocb_rsp *net_rsp = NULL;
2203 struct tx_ring *tx_ring;
2204 /* While there are entries in the completion queue. */
2205 while (prod != rx_ring->cnsmr_idx) {
2207 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2208 "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2209 rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
2211 net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
2213 switch (net_rsp->opcode) {
2215 case OPCODE_OB_MAC_TSO_IOCB:
2216 case OPCODE_OB_MAC_IOCB:
2217 ql_process_mac_tx_intr(qdev, net_rsp);
2220 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2221 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2225 ql_update_cq(rx_ring);
2226 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2230 ql_write_cq_idx(rx_ring);
2231 tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
2232 if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id)) {
2233 if (atomic_read(&tx_ring->queue_stopped) &&
2234 (atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
2236 * The queue got stopped because the tx_ring was full.
2237 * Wake it up, because it's now at least 25% empty.
2239 netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
2245 static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
2247 struct ql_adapter *qdev = rx_ring->qdev;
2248 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2249 struct ql_net_rsp_iocb *net_rsp;
2252 /* While there are entries in the completion queue. */
2253 while (prod != rx_ring->cnsmr_idx) {
2255 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2256 "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2257 rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
2259 net_rsp = rx_ring->curr_entry;
2261 switch (net_rsp->opcode) {
2262 case OPCODE_IB_MAC_IOCB:
2263 ql_process_mac_rx_intr(qdev, rx_ring,
2264 (struct ib_mac_iocb_rsp *)
2268 case OPCODE_IB_AE_IOCB:
2269 ql_process_chip_ae_intr(qdev, (struct ib_ae_iocb_rsp *)
2273 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2274 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2279 ql_update_cq(rx_ring);
2280 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2281 if (count == budget)
2284 ql_update_buffer_queues(qdev, rx_ring);
2285 ql_write_cq_idx(rx_ring);
2289 static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
2291 struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi);
2292 struct ql_adapter *qdev = rx_ring->qdev;
2293 struct rx_ring *trx_ring;
2294 int i, work_done = 0;
2295 struct intr_context *ctx = &qdev->intr_context[rx_ring->cq_id];
2297 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2298 "Enter, NAPI POLL cq_id = %d.\n", rx_ring->cq_id);
2300 /* Service the TX rings first. They start
2301 * right after the RSS rings. */
2302 for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) {
2303 trx_ring = &qdev->rx_ring[i];
2304 /* If this TX completion ring belongs to this vector and
2305 * it's not empty then service it.
2307 if ((ctx->irq_mask & (1 << trx_ring->cq_id)) &&
2308 (ql_read_sh_reg(trx_ring->prod_idx_sh_reg) !=
2309 trx_ring->cnsmr_idx)) {
2310 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2311 "%s: Servicing TX completion ring %d.\n",
2312 __func__, trx_ring->cq_id);
2313 ql_clean_outbound_rx_ring(trx_ring);
2318 * Now service the RSS ring if it's active.
2320 if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
2321 rx_ring->cnsmr_idx) {
2322 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2323 "%s: Servicing RX completion ring %d.\n",
2324 __func__, rx_ring->cq_id);
2325 work_done = ql_clean_inbound_rx_ring(rx_ring, budget);
2328 if (work_done < budget) {
2329 napi_complete(napi);
2330 ql_enable_completion_interrupt(qdev, rx_ring->irq);
2335 static void qlge_vlan_rx_register(struct net_device *ndev, struct vlan_group *grp)
2337 struct ql_adapter *qdev = netdev_priv(ndev);
2341 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
2342 "Turning on VLAN in NIC_RCV_CFG.\n");
2343 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
2344 NIC_RCV_CFG_VLAN_MATCH_AND_NON);
2346 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
2347 "Turning off VLAN in NIC_RCV_CFG.\n");
2348 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
2352 static void qlge_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
2354 struct ql_adapter *qdev = netdev_priv(ndev);
2355 u32 enable_bit = MAC_ADDR_E;
2358 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2361 if (ql_set_mac_addr_reg
2362 (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
2363 netif_err(qdev, ifup, qdev->ndev,
2364 "Failed to init vlan address.\n");
2366 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2369 static void qlge_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
2371 struct ql_adapter *qdev = netdev_priv(ndev);
2375 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2379 if (ql_set_mac_addr_reg
2380 (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
2381 netif_err(qdev, ifup, qdev->ndev,
2382 "Failed to clear vlan address.\n");
2384 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2388 /* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
2389 static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
2391 struct rx_ring *rx_ring = dev_id;
2392 napi_schedule(&rx_ring->napi);
2396 /* This handles a fatal error, MPI activity, and the default
2397 * rx_ring in an MSI-X multiple vector environment.
2398 * In MSI/Legacy environment it also process the rest of
2401 static irqreturn_t qlge_isr(int irq, void *dev_id)
2403 struct rx_ring *rx_ring = dev_id;
2404 struct ql_adapter *qdev = rx_ring->qdev;
2405 struct intr_context *intr_context = &qdev->intr_context[0];
2409 spin_lock(&qdev->hw_lock);
2410 if (atomic_read(&qdev->intr_context[0].irq_cnt)) {
2411 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2412 "Shared Interrupt, Not ours!\n");
2413 spin_unlock(&qdev->hw_lock);
2416 spin_unlock(&qdev->hw_lock);
2418 var = ql_disable_completion_interrupt(qdev, intr_context->intr);
2421 * Check for fatal error.
2424 ql_queue_asic_error(qdev);
2425 netif_err(qdev, intr, qdev->ndev,
2426 "Got fatal error, STS = %x.\n", var);
2427 var = ql_read32(qdev, ERR_STS);
2428 netif_err(qdev, intr, qdev->ndev,
2429 "Resetting chip. Error Status Register = 0x%x\n", var);
2434 * Check MPI processor activity.
2436 if ((var & STS_PI) &&
2437 (ql_read32(qdev, INTR_MASK) & INTR_MASK_PI)) {
2439 * We've got an async event or mailbox completion.
2440 * Handle it and clear the source of the interrupt.
2442 netif_err(qdev, intr, qdev->ndev,
2443 "Got MPI processor interrupt.\n");
2444 ql_disable_completion_interrupt(qdev, intr_context->intr);
2445 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
2446 queue_delayed_work_on(smp_processor_id(),
2447 qdev->workqueue, &qdev->mpi_work, 0);
2452 * Get the bit-mask that shows the active queues for this
2453 * pass. Compare it to the queues that this irq services
2454 * and call napi if there's a match.
2456 var = ql_read32(qdev, ISR1);
2457 if (var & intr_context->irq_mask) {
2458 netif_info(qdev, intr, qdev->ndev,
2459 "Waking handler for rx_ring[0].\n");
2460 ql_disable_completion_interrupt(qdev, intr_context->intr);
2461 napi_schedule(&rx_ring->napi);
2464 ql_enable_completion_interrupt(qdev, intr_context->intr);
2465 return work_done ? IRQ_HANDLED : IRQ_NONE;
2468 static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2471 if (skb_is_gso(skb)) {
2473 if (skb_header_cloned(skb)) {
2474 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2479 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2480 mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
2481 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2482 mac_iocb_ptr->total_hdrs_len =
2483 cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb));
2484 mac_iocb_ptr->net_trans_offset =
2485 cpu_to_le16(skb_network_offset(skb) |
2486 skb_transport_offset(skb)
2487 << OB_MAC_TRANSPORT_HDR_SHIFT);
2488 mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2489 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO;
2490 if (likely(skb->protocol == htons(ETH_P_IP))) {
2491 struct iphdr *iph = ip_hdr(skb);
2493 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2494 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2498 } else if (skb->protocol == htons(ETH_P_IPV6)) {
2499 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6;
2500 tcp_hdr(skb)->check =
2501 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2502 &ipv6_hdr(skb)->daddr,
2510 static void ql_hw_csum_setup(struct sk_buff *skb,
2511 struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2514 struct iphdr *iph = ip_hdr(skb);
2516 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2517 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2518 mac_iocb_ptr->net_trans_offset =
2519 cpu_to_le16(skb_network_offset(skb) |
2520 skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT);
2522 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2523 len = (ntohs(iph->tot_len) - (iph->ihl << 2));
2524 if (likely(iph->protocol == IPPROTO_TCP)) {
2525 check = &(tcp_hdr(skb)->check);
2526 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC;
2527 mac_iocb_ptr->total_hdrs_len =
2528 cpu_to_le16(skb_transport_offset(skb) +
2529 (tcp_hdr(skb)->doff << 2));
2531 check = &(udp_hdr(skb)->check);
2532 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC;
2533 mac_iocb_ptr->total_hdrs_len =
2534 cpu_to_le16(skb_transport_offset(skb) +
2535 sizeof(struct udphdr));
2537 *check = ~csum_tcpudp_magic(iph->saddr,
2538 iph->daddr, len, iph->protocol, 0);
2541 static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
2543 struct tx_ring_desc *tx_ring_desc;
2544 struct ob_mac_iocb_req *mac_iocb_ptr;
2545 struct ql_adapter *qdev = netdev_priv(ndev);
2547 struct tx_ring *tx_ring;
2548 u32 tx_ring_idx = (u32) skb->queue_mapping;
2550 tx_ring = &qdev->tx_ring[tx_ring_idx];
2552 if (skb_padto(skb, ETH_ZLEN))
2553 return NETDEV_TX_OK;
2555 if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
2556 netif_info(qdev, tx_queued, qdev->ndev,
2557 "%s: shutting down tx queue %d du to lack of resources.\n",
2558 __func__, tx_ring_idx);
2559 netif_stop_subqueue(ndev, tx_ring->wq_id);
2560 atomic_inc(&tx_ring->queue_stopped);
2561 tx_ring->tx_errors++;
2562 return NETDEV_TX_BUSY;
2564 tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
2565 mac_iocb_ptr = tx_ring_desc->queue_entry;
2566 memset((void *)mac_iocb_ptr, 0, sizeof(*mac_iocb_ptr));
2568 mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
2569 mac_iocb_ptr->tid = tx_ring_desc->index;
2570 /* We use the upper 32-bits to store the tx queue for this IO.
2571 * When we get the completion we can use it to establish the context.
2573 mac_iocb_ptr->txq_idx = tx_ring_idx;
2574 tx_ring_desc->skb = skb;
2576 mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len);
2578 if (vlan_tx_tag_present(skb)) {
2579 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2580 "Adding a vlan tag %d.\n", vlan_tx_tag_get(skb));
2581 mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
2582 mac_iocb_ptr->vlan_tci = cpu_to_le16(vlan_tx_tag_get(skb));
2584 tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2586 dev_kfree_skb_any(skb);
2587 return NETDEV_TX_OK;
2588 } else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) {
2589 ql_hw_csum_setup(skb,
2590 (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2592 if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) !=
2594 netif_err(qdev, tx_queued, qdev->ndev,
2595 "Could not map the segments.\n");
2596 tx_ring->tx_errors++;
2597 return NETDEV_TX_BUSY;
2599 QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr);
2600 tx_ring->prod_idx++;
2601 if (tx_ring->prod_idx == tx_ring->wq_len)
2602 tx_ring->prod_idx = 0;
2605 ql_write_db_reg(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
2606 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2607 "tx queued, slot %d, len %d\n",
2608 tx_ring->prod_idx, skb->len);
2610 atomic_dec(&tx_ring->tx_count);
2611 return NETDEV_TX_OK;
2615 static void ql_free_shadow_space(struct ql_adapter *qdev)
2617 if (qdev->rx_ring_shadow_reg_area) {
2618 pci_free_consistent(qdev->pdev,
2620 qdev->rx_ring_shadow_reg_area,
2621 qdev->rx_ring_shadow_reg_dma);
2622 qdev->rx_ring_shadow_reg_area = NULL;
2624 if (qdev->tx_ring_shadow_reg_area) {
2625 pci_free_consistent(qdev->pdev,
2627 qdev->tx_ring_shadow_reg_area,
2628 qdev->tx_ring_shadow_reg_dma);
2629 qdev->tx_ring_shadow_reg_area = NULL;
2633 static int ql_alloc_shadow_space(struct ql_adapter *qdev)
2635 qdev->rx_ring_shadow_reg_area =
2636 pci_alloc_consistent(qdev->pdev,
2637 PAGE_SIZE, &qdev->rx_ring_shadow_reg_dma);
2638 if (qdev->rx_ring_shadow_reg_area == NULL) {
2639 netif_err(qdev, ifup, qdev->ndev,
2640 "Allocation of RX shadow space failed.\n");
2643 memset(qdev->rx_ring_shadow_reg_area, 0, PAGE_SIZE);
2644 qdev->tx_ring_shadow_reg_area =
2645 pci_alloc_consistent(qdev->pdev, PAGE_SIZE,
2646 &qdev->tx_ring_shadow_reg_dma);
2647 if (qdev->tx_ring_shadow_reg_area == NULL) {
2648 netif_err(qdev, ifup, qdev->ndev,
2649 "Allocation of TX shadow space failed.\n");
2650 goto err_wqp_sh_area;
2652 memset(qdev->tx_ring_shadow_reg_area, 0, PAGE_SIZE);
2656 pci_free_consistent(qdev->pdev,
2658 qdev->rx_ring_shadow_reg_area,
2659 qdev->rx_ring_shadow_reg_dma);
2663 static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
2665 struct tx_ring_desc *tx_ring_desc;
2667 struct ob_mac_iocb_req *mac_iocb_ptr;
2669 mac_iocb_ptr = tx_ring->wq_base;
2670 tx_ring_desc = tx_ring->q;
2671 for (i = 0; i < tx_ring->wq_len; i++) {
2672 tx_ring_desc->index = i;
2673 tx_ring_desc->skb = NULL;
2674 tx_ring_desc->queue_entry = mac_iocb_ptr;
2678 atomic_set(&tx_ring->tx_count, tx_ring->wq_len);
2679 atomic_set(&tx_ring->queue_stopped, 0);
2682 static void ql_free_tx_resources(struct ql_adapter *qdev,
2683 struct tx_ring *tx_ring)
2685 if (tx_ring->wq_base) {
2686 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2687 tx_ring->wq_base, tx_ring->wq_base_dma);
2688 tx_ring->wq_base = NULL;
2694 static int ql_alloc_tx_resources(struct ql_adapter *qdev,
2695 struct tx_ring *tx_ring)
2698 pci_alloc_consistent(qdev->pdev, tx_ring->wq_size,
2699 &tx_ring->wq_base_dma);
2701 if ((tx_ring->wq_base == NULL) ||
2702 tx_ring->wq_base_dma & WQ_ADDR_ALIGN) {
2703 netif_err(qdev, ifup, qdev->ndev, "tx_ring alloc failed.\n");
2707 kmalloc(tx_ring->wq_len * sizeof(struct tx_ring_desc), GFP_KERNEL);
2708 if (tx_ring->q == NULL)
2713 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2714 tx_ring->wq_base, tx_ring->wq_base_dma);
2718 static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2720 struct bq_desc *lbq_desc;
2722 uint32_t curr_idx, clean_idx;
2724 curr_idx = rx_ring->lbq_curr_idx;
2725 clean_idx = rx_ring->lbq_clean_idx;
2726 while (curr_idx != clean_idx) {
2727 lbq_desc = &rx_ring->lbq[curr_idx];
2729 if (lbq_desc->p.pg_chunk.last_flag) {
2730 pci_unmap_page(qdev->pdev,
2731 lbq_desc->p.pg_chunk.map,
2732 ql_lbq_block_size(qdev),
2733 PCI_DMA_FROMDEVICE);
2734 lbq_desc->p.pg_chunk.last_flag = 0;
2737 put_page(lbq_desc->p.pg_chunk.page);
2738 lbq_desc->p.pg_chunk.page = NULL;
2740 if (++curr_idx == rx_ring->lbq_len)
2746 static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2749 struct bq_desc *sbq_desc;
2751 for (i = 0; i < rx_ring->sbq_len; i++) {
2752 sbq_desc = &rx_ring->sbq[i];
2753 if (sbq_desc == NULL) {
2754 netif_err(qdev, ifup, qdev->ndev,
2755 "sbq_desc %d is NULL.\n", i);
2758 if (sbq_desc->p.skb) {
2759 pci_unmap_single(qdev->pdev,
2760 dma_unmap_addr(sbq_desc, mapaddr),
2761 dma_unmap_len(sbq_desc, maplen),
2762 PCI_DMA_FROMDEVICE);
2763 dev_kfree_skb(sbq_desc->p.skb);
2764 sbq_desc->p.skb = NULL;
2769 /* Free all large and small rx buffers associated
2770 * with the completion queues for this device.
2772 static void ql_free_rx_buffers(struct ql_adapter *qdev)
2775 struct rx_ring *rx_ring;
2777 for (i = 0; i < qdev->rx_ring_count; i++) {
2778 rx_ring = &qdev->rx_ring[i];
2780 ql_free_lbq_buffers(qdev, rx_ring);
2782 ql_free_sbq_buffers(qdev, rx_ring);
2786 static void ql_alloc_rx_buffers(struct ql_adapter *qdev)
2788 struct rx_ring *rx_ring;
2791 for (i = 0; i < qdev->rx_ring_count; i++) {
2792 rx_ring = &qdev->rx_ring[i];
2793 if (rx_ring->type != TX_Q)
2794 ql_update_buffer_queues(qdev, rx_ring);
2798 static void ql_init_lbq_ring(struct ql_adapter *qdev,
2799 struct rx_ring *rx_ring)
2802 struct bq_desc *lbq_desc;
2803 __le64 *bq = rx_ring->lbq_base;
2805 memset(rx_ring->lbq, 0, rx_ring->lbq_len * sizeof(struct bq_desc));
2806 for (i = 0; i < rx_ring->lbq_len; i++) {
2807 lbq_desc = &rx_ring->lbq[i];
2808 memset(lbq_desc, 0, sizeof(*lbq_desc));
2809 lbq_desc->index = i;
2810 lbq_desc->addr = bq;
2815 static void ql_init_sbq_ring(struct ql_adapter *qdev,
2816 struct rx_ring *rx_ring)
2819 struct bq_desc *sbq_desc;
2820 __le64 *bq = rx_ring->sbq_base;
2822 memset(rx_ring->sbq, 0, rx_ring->sbq_len * sizeof(struct bq_desc));
2823 for (i = 0; i < rx_ring->sbq_len; i++) {
2824 sbq_desc = &rx_ring->sbq[i];
2825 memset(sbq_desc, 0, sizeof(*sbq_desc));
2826 sbq_desc->index = i;
2827 sbq_desc->addr = bq;
2832 static void ql_free_rx_resources(struct ql_adapter *qdev,
2833 struct rx_ring *rx_ring)
2835 /* Free the small buffer queue. */
2836 if (rx_ring->sbq_base) {
2837 pci_free_consistent(qdev->pdev,
2839 rx_ring->sbq_base, rx_ring->sbq_base_dma);
2840 rx_ring->sbq_base = NULL;
2843 /* Free the small buffer queue control blocks. */
2844 kfree(rx_ring->sbq);
2845 rx_ring->sbq = NULL;
2847 /* Free the large buffer queue. */
2848 if (rx_ring->lbq_base) {
2849 pci_free_consistent(qdev->pdev,
2851 rx_ring->lbq_base, rx_ring->lbq_base_dma);
2852 rx_ring->lbq_base = NULL;
2855 /* Free the large buffer queue control blocks. */
2856 kfree(rx_ring->lbq);
2857 rx_ring->lbq = NULL;
2859 /* Free the rx queue. */
2860 if (rx_ring->cq_base) {
2861 pci_free_consistent(qdev->pdev,
2863 rx_ring->cq_base, rx_ring->cq_base_dma);
2864 rx_ring->cq_base = NULL;
2868 /* Allocate queues and buffers for this completions queue based
2869 * on the values in the parameter structure. */
2870 static int ql_alloc_rx_resources(struct ql_adapter *qdev,
2871 struct rx_ring *rx_ring)
2875 * Allocate the completion queue for this rx_ring.
2878 pci_alloc_consistent(qdev->pdev, rx_ring->cq_size,
2879 &rx_ring->cq_base_dma);
2881 if (rx_ring->cq_base == NULL) {
2882 netif_err(qdev, ifup, qdev->ndev, "rx_ring alloc failed.\n");
2886 if (rx_ring->sbq_len) {
2888 * Allocate small buffer queue.
2891 pci_alloc_consistent(qdev->pdev, rx_ring->sbq_size,
2892 &rx_ring->sbq_base_dma);
2894 if (rx_ring->sbq_base == NULL) {
2895 netif_err(qdev, ifup, qdev->ndev,
2896 "Small buffer queue allocation failed.\n");
2901 * Allocate small buffer queue control blocks.
2904 kmalloc(rx_ring->sbq_len * sizeof(struct bq_desc),
2906 if (rx_ring->sbq == NULL) {
2907 netif_err(qdev, ifup, qdev->ndev,
2908 "Small buffer queue control block allocation failed.\n");
2912 ql_init_sbq_ring(qdev, rx_ring);
2915 if (rx_ring->lbq_len) {
2917 * Allocate large buffer queue.
2920 pci_alloc_consistent(qdev->pdev, rx_ring->lbq_size,
2921 &rx_ring->lbq_base_dma);
2923 if (rx_ring->lbq_base == NULL) {
2924 netif_err(qdev, ifup, qdev->ndev,
2925 "Large buffer queue allocation failed.\n");
2929 * Allocate large buffer queue control blocks.
2932 kmalloc(rx_ring->lbq_len * sizeof(struct bq_desc),
2934 if (rx_ring->lbq == NULL) {
2935 netif_err(qdev, ifup, qdev->ndev,
2936 "Large buffer queue control block allocation failed.\n");
2940 ql_init_lbq_ring(qdev, rx_ring);
2946 ql_free_rx_resources(qdev, rx_ring);
2950 static void ql_tx_ring_clean(struct ql_adapter *qdev)
2952 struct tx_ring *tx_ring;
2953 struct tx_ring_desc *tx_ring_desc;
2957 * Loop through all queues and free
2960 for (j = 0; j < qdev->tx_ring_count; j++) {
2961 tx_ring = &qdev->tx_ring[j];
2962 for (i = 0; i < tx_ring->wq_len; i++) {
2963 tx_ring_desc = &tx_ring->q[i];
2964 if (tx_ring_desc && tx_ring_desc->skb) {
2965 netif_err(qdev, ifdown, qdev->ndev,
2966 "Freeing lost SKB %p, from queue %d, index %d.\n",
2967 tx_ring_desc->skb, j,
2968 tx_ring_desc->index);
2969 ql_unmap_send(qdev, tx_ring_desc,
2970 tx_ring_desc->map_cnt);
2971 dev_kfree_skb(tx_ring_desc->skb);
2972 tx_ring_desc->skb = NULL;
2978 static void ql_free_mem_resources(struct ql_adapter *qdev)
2982 for (i = 0; i < qdev->tx_ring_count; i++)
2983 ql_free_tx_resources(qdev, &qdev->tx_ring[i]);
2984 for (i = 0; i < qdev->rx_ring_count; i++)
2985 ql_free_rx_resources(qdev, &qdev->rx_ring[i]);
2986 ql_free_shadow_space(qdev);
2989 static int ql_alloc_mem_resources(struct ql_adapter *qdev)
2993 /* Allocate space for our shadow registers and such. */
2994 if (ql_alloc_shadow_space(qdev))
2997 for (i = 0; i < qdev->rx_ring_count; i++) {
2998 if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) {
2999 netif_err(qdev, ifup, qdev->ndev,
3000 "RX resource allocation failed.\n");
3004 /* Allocate tx queue resources */
3005 for (i = 0; i < qdev->tx_ring_count; i++) {
3006 if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) {
3007 netif_err(qdev, ifup, qdev->ndev,
3008 "TX resource allocation failed.\n");
3015 ql_free_mem_resources(qdev);
3019 /* Set up the rx ring control block and pass it to the chip.
3020 * The control block is defined as
3021 * "Completion Queue Initialization Control Block", or cqicb.
3023 static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
3025 struct cqicb *cqicb = &rx_ring->cqicb;
3026 void *shadow_reg = qdev->rx_ring_shadow_reg_area +
3027 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
3028 u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma +
3029 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
3030 void __iomem *doorbell_area =
3031 qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
3035 __le64 *base_indirect_ptr;
3038 /* Set up the shadow registers for this ring. */
3039 rx_ring->prod_idx_sh_reg = shadow_reg;
3040 rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
3041 *rx_ring->prod_idx_sh_reg = 0;
3042 shadow_reg += sizeof(u64);
3043 shadow_reg_dma += sizeof(u64);
3044 rx_ring->lbq_base_indirect = shadow_reg;
3045 rx_ring->lbq_base_indirect_dma = shadow_reg_dma;
3046 shadow_reg += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3047 shadow_reg_dma += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3048 rx_ring->sbq_base_indirect = shadow_reg;
3049 rx_ring->sbq_base_indirect_dma = shadow_reg_dma;
3051 /* PCI doorbell mem area + 0x00 for consumer index register */
3052 rx_ring->cnsmr_idx_db_reg = (u32 __iomem *) doorbell_area;
3053 rx_ring->cnsmr_idx = 0;
3054 rx_ring->curr_entry = rx_ring->cq_base;
3056 /* PCI doorbell mem area + 0x04 for valid register */
3057 rx_ring->valid_db_reg = doorbell_area + 0x04;
3059 /* PCI doorbell mem area + 0x18 for large buffer consumer */
3060 rx_ring->lbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x18);
3062 /* PCI doorbell mem area + 0x1c */
3063 rx_ring->sbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x1c);
3065 memset((void *)cqicb, 0, sizeof(struct cqicb));
3066 cqicb->msix_vect = rx_ring->irq;
3068 bq_len = (rx_ring->cq_len == 65536) ? 0 : (u16) rx_ring->cq_len;
3069 cqicb->len = cpu_to_le16(bq_len | LEN_V | LEN_CPP_CONT);
3071 cqicb->addr = cpu_to_le64(rx_ring->cq_base_dma);
3073 cqicb->prod_idx_addr = cpu_to_le64(rx_ring->prod_idx_sh_reg_dma);
3076 * Set up the control block load flags.
3078 cqicb->flags = FLAGS_LC | /* Load queue base address */
3079 FLAGS_LV | /* Load MSI-X vector */
3080 FLAGS_LI; /* Load irq delay values */
3081 if (rx_ring->lbq_len) {
3082 cqicb->flags |= FLAGS_LL; /* Load lbq values */
3083 tmp = (u64)rx_ring->lbq_base_dma;
3084 base_indirect_ptr = (__le64 *) rx_ring->lbq_base_indirect;
3087 *base_indirect_ptr = cpu_to_le64(tmp);
3088 tmp += DB_PAGE_SIZE;
3089 base_indirect_ptr++;
3091 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3093 cpu_to_le64(rx_ring->lbq_base_indirect_dma);
3094 bq_len = (rx_ring->lbq_buf_size == 65536) ? 0 :
3095 (u16) rx_ring->lbq_buf_size;
3096 cqicb->lbq_buf_size = cpu_to_le16(bq_len);
3097 bq_len = (rx_ring->lbq_len == 65536) ? 0 :
3098 (u16) rx_ring->lbq_len;
3099 cqicb->lbq_len = cpu_to_le16(bq_len);
3100 rx_ring->lbq_prod_idx = 0;
3101 rx_ring->lbq_curr_idx = 0;
3102 rx_ring->lbq_clean_idx = 0;
3103 rx_ring->lbq_free_cnt = rx_ring->lbq_len;
3105 if (rx_ring->sbq_len) {
3106 cqicb->flags |= FLAGS_LS; /* Load sbq values */
3107 tmp = (u64)rx_ring->sbq_base_dma;
3108 base_indirect_ptr = (__le64 *) rx_ring->sbq_base_indirect;
3111 *base_indirect_ptr = cpu_to_le64(tmp);
3112 tmp += DB_PAGE_SIZE;
3113 base_indirect_ptr++;
3115 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->sbq_len));
3117 cpu_to_le64(rx_ring->sbq_base_indirect_dma);
3118 cqicb->sbq_buf_size =
3119 cpu_to_le16((u16)(rx_ring->sbq_buf_size));
3120 bq_len = (rx_ring->sbq_len == 65536) ? 0 :
3121 (u16) rx_ring->sbq_len;
3122 cqicb->sbq_len = cpu_to_le16(bq_len);
3123 rx_ring->sbq_prod_idx = 0;
3124 rx_ring->sbq_curr_idx = 0;
3125 rx_ring->sbq_clean_idx = 0;
3126 rx_ring->sbq_free_cnt = rx_ring->sbq_len;
3128 switch (rx_ring->type) {
3130 cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
3131 cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames);
3134 /* Inbound completion handling rx_rings run in
3135 * separate NAPI contexts.
3137 netif_napi_add(qdev->ndev, &rx_ring->napi, ql_napi_poll_msix,
3139 cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
3140 cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames);
3143 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3144 "Invalid rx_ring->type = %d.\n", rx_ring->type);
3146 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3147 "Initializing rx work queue.\n");
3148 err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb),
3149 CFG_LCQ, rx_ring->cq_id);
3151 netif_err(qdev, ifup, qdev->ndev, "Failed to load CQICB.\n");
3157 static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
3159 struct wqicb *wqicb = (struct wqicb *)tx_ring;
3160 void __iomem *doorbell_area =
3161 qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id);
3162 void *shadow_reg = qdev->tx_ring_shadow_reg_area +
3163 (tx_ring->wq_id * sizeof(u64));
3164 u64 shadow_reg_dma = qdev->tx_ring_shadow_reg_dma +
3165 (tx_ring->wq_id * sizeof(u64));
3169 * Assign doorbell registers for this tx_ring.
3171 /* TX PCI doorbell mem area for tx producer index */
3172 tx_ring->prod_idx_db_reg = (u32 __iomem *) doorbell_area;
3173 tx_ring->prod_idx = 0;
3174 /* TX PCI doorbell mem area + 0x04 */
3175 tx_ring->valid_db_reg = doorbell_area + 0x04;
3178 * Assign shadow registers for this tx_ring.
3180 tx_ring->cnsmr_idx_sh_reg = shadow_reg;
3181 tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma;
3183 wqicb->len = cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT);
3184 wqicb->flags = cpu_to_le16(Q_FLAGS_LC |
3185 Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO);
3186 wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id);
3188 wqicb->addr = cpu_to_le64(tx_ring->wq_base_dma);
3190 wqicb->cnsmr_idx_addr = cpu_to_le64(tx_ring->cnsmr_idx_sh_reg_dma);
3192 ql_init_tx_ring(qdev, tx_ring);
3194 err = ql_write_cfg(qdev, wqicb, sizeof(*wqicb), CFG_LRQ,
3195 (u16) tx_ring->wq_id);
3197 netif_err(qdev, ifup, qdev->ndev, "Failed to load tx_ring.\n");
3200 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3201 "Successfully loaded WQICB.\n");
3205 static void ql_disable_msix(struct ql_adapter *qdev)
3207 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3208 pci_disable_msix(qdev->pdev);
3209 clear_bit(QL_MSIX_ENABLED, &qdev->flags);
3210 kfree(qdev->msi_x_entry);
3211 qdev->msi_x_entry = NULL;
3212 } else if (test_bit(QL_MSI_ENABLED, &qdev->flags)) {
3213 pci_disable_msi(qdev->pdev);
3214 clear_bit(QL_MSI_ENABLED, &qdev->flags);
3218 /* We start by trying to get the number of vectors
3219 * stored in qdev->intr_count. If we don't get that
3220 * many then we reduce the count and try again.
3222 static void ql_enable_msix(struct ql_adapter *qdev)
3226 /* Get the MSIX vectors. */
3227 if (qlge_irq_type == MSIX_IRQ) {
3228 /* Try to alloc space for the msix struct,
3229 * if it fails then go to MSI/legacy.
3231 qdev->msi_x_entry = kcalloc(qdev->intr_count,
3232 sizeof(struct msix_entry),
3234 if (!qdev->msi_x_entry) {
3235 qlge_irq_type = MSI_IRQ;
3239 for (i = 0; i < qdev->intr_count; i++)
3240 qdev->msi_x_entry[i].entry = i;
3242 /* Loop to get our vectors. We start with
3243 * what we want and settle for what we get.
3246 err = pci_enable_msix(qdev->pdev,
3247 qdev->msi_x_entry, qdev->intr_count);
3249 qdev->intr_count = err;
3253 kfree(qdev->msi_x_entry);
3254 qdev->msi_x_entry = NULL;
3255 netif_warn(qdev, ifup, qdev->ndev,
3256 "MSI-X Enable failed, trying MSI.\n");
3257 qdev->intr_count = 1;
3258 qlge_irq_type = MSI_IRQ;
3259 } else if (err == 0) {
3260 set_bit(QL_MSIX_ENABLED, &qdev->flags);
3261 netif_info(qdev, ifup, qdev->ndev,
3262 "MSI-X Enabled, got %d vectors.\n",
3268 qdev->intr_count = 1;
3269 if (qlge_irq_type == MSI_IRQ) {
3270 if (!pci_enable_msi(qdev->pdev)) {
3271 set_bit(QL_MSI_ENABLED, &qdev->flags);
3272 netif_info(qdev, ifup, qdev->ndev,
3273 "Running with MSI interrupts.\n");
3277 qlge_irq_type = LEG_IRQ;
3278 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3279 "Running with legacy interrupts.\n");
3282 /* Each vector services 1 RSS ring and and 1 or more
3283 * TX completion rings. This function loops through
3284 * the TX completion rings and assigns the vector that
3285 * will service it. An example would be if there are
3286 * 2 vectors (so 2 RSS rings) and 8 TX completion rings.
3287 * This would mean that vector 0 would service RSS ring 0
3288 * and TX competion rings 0,1,2 and 3. Vector 1 would
3289 * service RSS ring 1 and TX completion rings 4,5,6 and 7.
3291 static void ql_set_tx_vect(struct ql_adapter *qdev)
3294 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3296 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3297 /* Assign irq vectors to TX rx_rings.*/
3298 for (vect = 0, j = 0, i = qdev->rss_ring_count;
3299 i < qdev->rx_ring_count; i++) {
3300 if (j == tx_rings_per_vector) {
3304 qdev->rx_ring[i].irq = vect;
3308 /* For single vector all rings have an irq
3311 for (i = 0; i < qdev->rx_ring_count; i++)
3312 qdev->rx_ring[i].irq = 0;
3316 /* Set the interrupt mask for this vector. Each vector
3317 * will service 1 RSS ring and 1 or more TX completion
3318 * rings. This function sets up a bit mask per vector
3319 * that indicates which rings it services.
3321 static void ql_set_irq_mask(struct ql_adapter *qdev, struct intr_context *ctx)
3323 int j, vect = ctx->intr;
3324 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3326 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3327 /* Add the RSS ring serviced by this vector
3330 ctx->irq_mask = (1 << qdev->rx_ring[vect].cq_id);
3331 /* Add the TX ring(s) serviced by this vector
3333 for (j = 0; j < tx_rings_per_vector; j++) {
3335 (1 << qdev->rx_ring[qdev->rss_ring_count +
3336 (vect * tx_rings_per_vector) + j].cq_id);
3339 /* For single vector we just shift each queue's
3342 for (j = 0; j < qdev->rx_ring_count; j++)
3343 ctx->irq_mask |= (1 << qdev->rx_ring[j].cq_id);
3348 * Here we build the intr_context structures based on
3349 * our rx_ring count and intr vector count.
3350 * The intr_context structure is used to hook each vector
3351 * to possibly different handlers.
3353 static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev)
3356 struct intr_context *intr_context = &qdev->intr_context[0];
3358 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3359 /* Each rx_ring has it's
3360 * own intr_context since we have separate
3361 * vectors for each queue.
3363 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3364 qdev->rx_ring[i].irq = i;
3365 intr_context->intr = i;
3366 intr_context->qdev = qdev;
3367 /* Set up this vector's bit-mask that indicates
3368 * which queues it services.
3370 ql_set_irq_mask(qdev, intr_context);
3372 * We set up each vectors enable/disable/read bits so
3373 * there's no bit/mask calculations in the critical path.
3375 intr_context->intr_en_mask =
3376 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3377 INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK | INTR_EN_IHD
3379 intr_context->intr_dis_mask =
3380 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3381 INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
3383 intr_context->intr_read_mask =
3384 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3385 INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD |
3388 /* The first vector/queue handles
3389 * broadcast/multicast, fatal errors,
3390 * and firmware events. This in addition
3391 * to normal inbound NAPI processing.
3393 intr_context->handler = qlge_isr;
3394 sprintf(intr_context->name, "%s-rx-%d",
3395 qdev->ndev->name, i);
3398 * Inbound queues handle unicast frames only.
3400 intr_context->handler = qlge_msix_rx_isr;
3401 sprintf(intr_context->name, "%s-rx-%d",
3402 qdev->ndev->name, i);
3407 * All rx_rings use the same intr_context since
3408 * there is only one vector.
3410 intr_context->intr = 0;
3411 intr_context->qdev = qdev;
3413 * We set up each vectors enable/disable/read bits so
3414 * there's no bit/mask calculations in the critical path.
3416 intr_context->intr_en_mask =
3417 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_ENABLE;
3418 intr_context->intr_dis_mask =
3419 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3420 INTR_EN_TYPE_DISABLE;
3421 intr_context->intr_read_mask =
3422 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ;
3424 * Single interrupt means one handler for all rings.
3426 intr_context->handler = qlge_isr;
3427 sprintf(intr_context->name, "%s-single_irq", qdev->ndev->name);
3428 /* Set up this vector's bit-mask that indicates
3429 * which queues it services. In this case there is
3430 * a single vector so it will service all RSS and
3431 * TX completion rings.
3433 ql_set_irq_mask(qdev, intr_context);
3435 /* Tell the TX completion rings which MSIx vector
3436 * they will be using.
3438 ql_set_tx_vect(qdev);
3441 static void ql_free_irq(struct ql_adapter *qdev)
3444 struct intr_context *intr_context = &qdev->intr_context[0];
3446 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3447 if (intr_context->hooked) {
3448 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3449 free_irq(qdev->msi_x_entry[i].vector,
3451 netif_printk(qdev, ifdown, KERN_DEBUG, qdev->ndev,
3452 "freeing msix interrupt %d.\n", i);
3454 free_irq(qdev->pdev->irq, &qdev->rx_ring[0]);
3455 netif_printk(qdev, ifdown, KERN_DEBUG, qdev->ndev,
3456 "freeing msi interrupt %d.\n", i);
3460 ql_disable_msix(qdev);
3463 static int ql_request_irq(struct ql_adapter *qdev)
3467 struct pci_dev *pdev = qdev->pdev;
3468 struct intr_context *intr_context = &qdev->intr_context[0];
3470 ql_resolve_queues_to_irqs(qdev);
3472 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3473 atomic_set(&intr_context->irq_cnt, 0);
3474 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3475 status = request_irq(qdev->msi_x_entry[i].vector,
3476 intr_context->handler,
3481 netif_err(qdev, ifup, qdev->ndev,
3482 "Failed request for MSIX interrupt %d.\n",
3486 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3487 "Hooked intr %d, queue type %s, with name %s.\n",
3489 qdev->rx_ring[i].type == DEFAULT_Q ?
3491 qdev->rx_ring[i].type == TX_Q ?
3493 qdev->rx_ring[i].type == RX_Q ?
3495 intr_context->name);
3498 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3499 "trying msi or legacy interrupts.\n");
3500 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3501 "%s: irq = %d.\n", __func__, pdev->irq);
3502 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3503 "%s: context->name = %s.\n", __func__,
3504 intr_context->name);
3505 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3506 "%s: dev_id = 0x%p.\n", __func__,
3509 request_irq(pdev->irq, qlge_isr,
3510 test_bit(QL_MSI_ENABLED,
3512 flags) ? 0 : IRQF_SHARED,
3513 intr_context->name, &qdev->rx_ring[0]);
3517 netif_err(qdev, ifup, qdev->ndev,
3518 "Hooked intr %d, queue type %s, with name %s.\n",
3520 qdev->rx_ring[0].type == DEFAULT_Q ?
3522 qdev->rx_ring[0].type == TX_Q ? "TX_Q" :
3523 qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "",
3524 intr_context->name);
3526 intr_context->hooked = 1;
3530 netif_err(qdev, ifup, qdev->ndev, "Failed to get the interrupts!!!/n");
3535 static int ql_start_rss(struct ql_adapter *qdev)
3537 u8 init_hash_seed[] = {0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
3538 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f,
3539 0xb0, 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b,
3540 0x30, 0xb4, 0x77, 0xcb, 0x2d, 0xa3, 0x80,
3541 0x30, 0xf2, 0x0c, 0x6a, 0x42, 0xb7, 0x3b,
3542 0xbe, 0xac, 0x01, 0xfa};
3543 struct ricb *ricb = &qdev->ricb;
3546 u8 *hash_id = (u8 *) ricb->hash_cq_id;
3548 memset((void *)ricb, 0, sizeof(*ricb));
3550 ricb->base_cq = RSS_L4K;
3552 (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RT4 | RSS_RT6);
3553 ricb->mask = cpu_to_le16((u16)(0x3ff));
3556 * Fill out the Indirection Table.
3558 for (i = 0; i < 1024; i++)
3559 hash_id[i] = (i & (qdev->rss_ring_count - 1));
3561 memcpy((void *)&ricb->ipv6_hash_key[0], init_hash_seed, 40);
3562 memcpy((void *)&ricb->ipv4_hash_key[0], init_hash_seed, 16);
3564 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, "Initializing RSS.\n");
3566 status = ql_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0);
3568 netif_err(qdev, ifup, qdev->ndev, "Failed to load RICB.\n");
3571 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3572 "Successfully loaded RICB.\n");
3576 static int ql_clear_routing_entries(struct ql_adapter *qdev)
3580 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3583 /* Clear all the entries in the routing table. */
3584 for (i = 0; i < 16; i++) {
3585 status = ql_set_routing_reg(qdev, i, 0, 0);
3587 netif_err(qdev, ifup, qdev->ndev,
3588 "Failed to init routing register for CAM packets.\n");
3592 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3596 /* Initialize the frame-to-queue routing. */
3597 static int ql_route_initialize(struct ql_adapter *qdev)
3601 /* Clear all the entries in the routing table. */
3602 status = ql_clear_routing_entries(qdev);
3606 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3610 status = ql_set_routing_reg(qdev, RT_IDX_IP_CSUM_ERR_SLOT,
3611 RT_IDX_IP_CSUM_ERR, 1);
3613 netif_err(qdev, ifup, qdev->ndev,
3614 "Failed to init routing register "
3615 "for IP CSUM error packets.\n");
3618 status = ql_set_routing_reg(qdev, RT_IDX_TCP_UDP_CSUM_ERR_SLOT,
3619 RT_IDX_TU_CSUM_ERR, 1);
3621 netif_err(qdev, ifup, qdev->ndev,
3622 "Failed to init routing register "
3623 "for TCP/UDP CSUM error packets.\n");
3626 status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
3628 netif_err(qdev, ifup, qdev->ndev,
3629 "Failed to init routing register for broadcast packets.\n");
3632 /* If we have more than one inbound queue, then turn on RSS in the
3635 if (qdev->rss_ring_count > 1) {
3636 status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT,
3637 RT_IDX_RSS_MATCH, 1);
3639 netif_err(qdev, ifup, qdev->ndev,
3640 "Failed to init routing register for MATCH RSS packets.\n");
3645 status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT,
3648 netif_err(qdev, ifup, qdev->ndev,
3649 "Failed to init routing register for CAM packets.\n");
3651 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3655 int ql_cam_route_initialize(struct ql_adapter *qdev)
3659 /* If check if the link is up and use to
3660 * determine if we are setting or clearing
3661 * the MAC address in the CAM.
3663 set = ql_read32(qdev, STS);
3664 set &= qdev->port_link_up;
3665 status = ql_set_mac_addr(qdev, set);
3667 netif_err(qdev, ifup, qdev->ndev, "Failed to init mac address.\n");
3671 status = ql_route_initialize(qdev);
3673 netif_err(qdev, ifup, qdev->ndev, "Failed to init routing table.\n");
3678 static int ql_adapter_initialize(struct ql_adapter *qdev)
3685 * Set up the System register to halt on errors.
3687 value = SYS_EFE | SYS_FAE;
3689 ql_write32(qdev, SYS, mask | value);
3691 /* Set the default queue, and VLAN behavior. */
3692 value = NIC_RCV_CFG_DFQ | NIC_RCV_CFG_RV;
3693 mask = NIC_RCV_CFG_DFQ_MASK | (NIC_RCV_CFG_RV << 16);
3694 ql_write32(qdev, NIC_RCV_CFG, (mask | value));
3696 /* Set the MPI interrupt to enabled. */
3697 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
3699 /* Enable the function, set pagesize, enable error checking. */
3700 value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
3701 FSC_EC | FSC_VM_PAGE_4K;
3702 value |= SPLT_SETTING;
3704 /* Set/clear header splitting. */
3705 mask = FSC_VM_PAGESIZE_MASK |
3706 FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
3707 ql_write32(qdev, FSC, mask | value);
3709 ql_write32(qdev, SPLT_HDR, SPLT_LEN);
3711 /* Set RX packet routing to use port/pci function on which the
3712 * packet arrived on in addition to usual frame routing.
3713 * This is helpful on bonding where both interfaces can have
3714 * the same MAC address.
3716 ql_write32(qdev, RST_FO, RST_FO_RR_MASK | RST_FO_RR_RCV_FUNC_CQ);
3717 /* Reroute all packets to our Interface.
3718 * They may have been routed to MPI firmware
3721 value = ql_read32(qdev, MGMT_RCV_CFG);
3722 value &= ~MGMT_RCV_CFG_RM;
3725 /* Sticky reg needs clearing due to WOL. */
3726 ql_write32(qdev, MGMT_RCV_CFG, mask);
3727 ql_write32(qdev, MGMT_RCV_CFG, mask | value);
3729 /* Default WOL is enable on Mezz cards */
3730 if (qdev->pdev->subsystem_device == 0x0068 ||
3731 qdev->pdev->subsystem_device == 0x0180)
3732 qdev->wol = WAKE_MAGIC;
3734 /* Start up the rx queues. */
3735 for (i = 0; i < qdev->rx_ring_count; i++) {
3736 status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]);
3738 netif_err(qdev, ifup, qdev->ndev,
3739 "Failed to start rx ring[%d].\n", i);
3744 /* If there is more than one inbound completion queue
3745 * then download a RICB to configure RSS.
3747 if (qdev->rss_ring_count > 1) {
3748 status = ql_start_rss(qdev);
3750 netif_err(qdev, ifup, qdev->ndev, "Failed to start RSS.\n");
3755 /* Start up the tx queues. */
3756 for (i = 0; i < qdev->tx_ring_count; i++) {
3757 status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]);
3759 netif_err(qdev, ifup, qdev->ndev,
3760 "Failed to start tx ring[%d].\n", i);
3765 /* Initialize the port and set the max framesize. */
3766 status = qdev->nic_ops->port_initialize(qdev);
3768 netif_err(qdev, ifup, qdev->ndev, "Failed to start port.\n");
3770 /* Set up the MAC address and frame routing filter. */
3771 status = ql_cam_route_initialize(qdev);
3773 netif_err(qdev, ifup, qdev->ndev,
3774 "Failed to init CAM/Routing tables.\n");
3778 /* Start NAPI for the RSS queues. */
3779 for (i = 0; i < qdev->rss_ring_count; i++) {
3780 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3781 "Enabling NAPI for rx_ring[%d].\n", i);
3782 napi_enable(&qdev->rx_ring[i].napi);
3788 /* Issue soft reset to chip. */
3789 static int ql_adapter_reset(struct ql_adapter *qdev)
3793 unsigned long end_jiffies;
3795 /* Clear all the entries in the routing table. */
3796 status = ql_clear_routing_entries(qdev);
3798 netif_err(qdev, ifup, qdev->ndev, "Failed to clear routing bits.\n");
3802 end_jiffies = jiffies +
3803 max((unsigned long)1, usecs_to_jiffies(30));
3805 /* Stop management traffic. */
3806 ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_STOP);
3808 /* Wait for the NIC and MGMNT FIFOs to empty. */
3809 ql_wait_fifo_empty(qdev);
3811 ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
3814 value = ql_read32(qdev, RST_FO);
3815 if ((value & RST_FO_FR) == 0)
3818 } while (time_before(jiffies, end_jiffies));
3820 if (value & RST_FO_FR) {
3821 netif_err(qdev, ifdown, qdev->ndev,
3822 "ETIMEDOUT!!! errored out of resetting the chip!\n");
3823 status = -ETIMEDOUT;
3826 /* Resume management traffic. */
3827 ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_RESUME);
3831 static void ql_display_dev_info(struct net_device *ndev)
3833 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
3835 netif_info(qdev, probe, qdev->ndev,
3836 "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, "
3837 "XG Roll = %d, XG Rev = %d.\n",
3840 qdev->chip_rev_id & 0x0000000f,
3841 qdev->chip_rev_id >> 4 & 0x0000000f,
3842 qdev->chip_rev_id >> 8 & 0x0000000f,
3843 qdev->chip_rev_id >> 12 & 0x0000000f);
3844 netif_info(qdev, probe, qdev->ndev,
3845 "MAC address %pM\n", ndev->dev_addr);
3848 static int ql_wol(struct ql_adapter *qdev)
3851 u32 wol = MB_WOL_DISABLE;
3853 /* The CAM is still intact after a reset, but if we
3854 * are doing WOL, then we may need to program the
3855 * routing regs. We would also need to issue the mailbox
3856 * commands to instruct the MPI what to do per the ethtool
3860 if (qdev->wol & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_PHY | WAKE_UCAST |
3861 WAKE_MCAST | WAKE_BCAST)) {
3862 netif_err(qdev, ifdown, qdev->ndev,
3863 "Unsupported WOL paramter. qdev->wol = 0x%x.\n",
3868 if (qdev->wol & WAKE_MAGIC) {
3869 status = ql_mb_wol_set_magic(qdev, 1);
3871 netif_err(qdev, ifdown, qdev->ndev,
3872 "Failed to set magic packet on %s.\n",
3876 netif_info(qdev, drv, qdev->ndev,
3877 "Enabled magic packet successfully on %s.\n",
3880 wol |= MB_WOL_MAGIC_PKT;
3884 wol |= MB_WOL_MODE_ON;
3885 status = ql_mb_wol_mode(qdev, wol);
3886 netif_err(qdev, drv, qdev->ndev,
3887 "WOL %s (wol code 0x%x) on %s\n",
3888 (status == 0) ? "Successfully set" : "Failed",
3889 wol, qdev->ndev->name);
3895 static void ql_cancel_all_work_sync(struct ql_adapter *qdev)
3898 /* Don't kill the reset worker thread if we
3899 * are in the process of recovery.
3901 if (test_bit(QL_ADAPTER_UP, &qdev->flags))
3902 cancel_delayed_work_sync(&qdev->asic_reset_work);
3903 cancel_delayed_work_sync(&qdev->mpi_reset_work);
3904 cancel_delayed_work_sync(&qdev->mpi_work);
3905 cancel_delayed_work_sync(&qdev->mpi_idc_work);
3906 cancel_delayed_work_sync(&qdev->mpi_core_to_log);
3907 cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
3910 static int ql_adapter_down(struct ql_adapter *qdev)
3916 ql_cancel_all_work_sync(qdev);
3918 for (i = 0; i < qdev->rss_ring_count; i++)
3919 napi_disable(&qdev->rx_ring[i].napi);
3921 clear_bit(QL_ADAPTER_UP, &qdev->flags);
3923 ql_disable_interrupts(qdev);
3925 ql_tx_ring_clean(qdev);
3927 /* Call netif_napi_del() from common point.
3929 for (i = 0; i < qdev->rss_ring_count; i++)
3930 netif_napi_del(&qdev->rx_ring[i].napi);
3932 status = ql_adapter_reset(qdev);
3934 netif_err(qdev, ifdown, qdev->ndev, "reset(func #%d) FAILED!\n",
3936 ql_free_rx_buffers(qdev);
3941 static int ql_adapter_up(struct ql_adapter *qdev)
3945 err = ql_adapter_initialize(qdev);
3947 netif_info(qdev, ifup, qdev->ndev, "Unable to initialize adapter.\n");
3950 set_bit(QL_ADAPTER_UP, &qdev->flags);
3951 ql_alloc_rx_buffers(qdev);
3952 /* If the port is initialized and the
3953 * link is up the turn on the carrier.
3955 if ((ql_read32(qdev, STS) & qdev->port_init) &&
3956 (ql_read32(qdev, STS) & qdev->port_link_up))
3958 /* Restore rx mode. */
3959 clear_bit(QL_ALLMULTI, &qdev->flags);
3960 clear_bit(QL_PROMISCUOUS, &qdev->flags);
3961 qlge_set_multicast_list(qdev->ndev);
3963 ql_enable_interrupts(qdev);
3964 ql_enable_all_completion_interrupts(qdev);
3965 netif_tx_start_all_queues(qdev->ndev);
3969 ql_adapter_reset(qdev);
3973 static void ql_release_adapter_resources(struct ql_adapter *qdev)
3975 ql_free_mem_resources(qdev);
3979 static int ql_get_adapter_resources(struct ql_adapter *qdev)
3983 if (ql_alloc_mem_resources(qdev)) {
3984 netif_err(qdev, ifup, qdev->ndev, "Unable to allocate memory.\n");
3987 status = ql_request_irq(qdev);
3991 static int qlge_close(struct net_device *ndev)
3993 struct ql_adapter *qdev = netdev_priv(ndev);
3995 /* If we hit pci_channel_io_perm_failure
3996 * failure condition, then we already
3997 * brought the adapter down.
3999 if (test_bit(QL_EEH_FATAL, &qdev->flags)) {
4000 netif_err(qdev, drv, qdev->ndev, "EEH fatal did unload.\n");
4001 clear_bit(QL_EEH_FATAL, &qdev->flags);
4006 * Wait for device to recover from a reset.
4007 * (Rarely happens, but possible.)
4009 while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
4011 ql_adapter_down(qdev);
4012 ql_release_adapter_resources(qdev);
4016 static int ql_configure_rings(struct ql_adapter *qdev)
4019 struct rx_ring *rx_ring;
4020 struct tx_ring *tx_ring;
4021 int cpu_cnt = min(MAX_CPUS, (int)num_online_cpus());
4022 unsigned int lbq_buf_len = (qdev->ndev->mtu > 1500) ?
4023 LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
4025 qdev->lbq_buf_order = get_order(lbq_buf_len);
4027 /* In a perfect world we have one RSS ring for each CPU
4028 * and each has it's own vector. To do that we ask for
4029 * cpu_cnt vectors. ql_enable_msix() will adjust the
4030 * vector count to what we actually get. We then
4031 * allocate an RSS ring for each.
4032 * Essentially, we are doing min(cpu_count, msix_vector_count).
4034 qdev->intr_count = cpu_cnt;
4035 ql_enable_msix(qdev);
4036 /* Adjust the RSS ring count to the actual vector count. */
4037 qdev->rss_ring_count = qdev->intr_count;
4038 qdev->tx_ring_count = cpu_cnt;
4039 qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count;
4041 for (i = 0; i < qdev->tx_ring_count; i++) {
4042 tx_ring = &qdev->tx_ring[i];
4043 memset((void *)tx_ring, 0, sizeof(*tx_ring));
4044 tx_ring->qdev = qdev;
4046 tx_ring->wq_len = qdev->tx_ring_size;
4048 tx_ring->wq_len * sizeof(struct ob_mac_iocb_req);
4051 * The completion queue ID for the tx rings start
4052 * immediately after the rss rings.
4054 tx_ring->cq_id = qdev->rss_ring_count + i;
4057 for (i = 0; i < qdev->rx_ring_count; i++) {
4058 rx_ring = &qdev->rx_ring[i];
4059 memset((void *)rx_ring, 0, sizeof(*rx_ring));
4060 rx_ring->qdev = qdev;
4062 rx_ring->cpu = i % cpu_cnt; /* CPU to run handler on. */
4063 if (i < qdev->rss_ring_count) {
4065 * Inbound (RSS) queues.
4067 rx_ring->cq_len = qdev->rx_ring_size;
4069 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4070 rx_ring->lbq_len = NUM_LARGE_BUFFERS;
4072 rx_ring->lbq_len * sizeof(__le64);
4073 rx_ring->lbq_buf_size = (u16)lbq_buf_len;
4074 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
4075 "lbq_buf_size %d, order = %d\n",
4076 rx_ring->lbq_buf_size,
4077 qdev->lbq_buf_order);
4078 rx_ring->sbq_len = NUM_SMALL_BUFFERS;
4080 rx_ring->sbq_len * sizeof(__le64);
4081 rx_ring->sbq_buf_size = SMALL_BUF_MAP_SIZE;
4082 rx_ring->type = RX_Q;
4085 * Outbound queue handles outbound completions only.
4087 /* outbound cq is same size as tx_ring it services. */
4088 rx_ring->cq_len = qdev->tx_ring_size;
4090 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4091 rx_ring->lbq_len = 0;
4092 rx_ring->lbq_size = 0;
4093 rx_ring->lbq_buf_size = 0;
4094 rx_ring->sbq_len = 0;
4095 rx_ring->sbq_size = 0;
4096 rx_ring->sbq_buf_size = 0;
4097 rx_ring->type = TX_Q;
4103 static int qlge_open(struct net_device *ndev)
4106 struct ql_adapter *qdev = netdev_priv(ndev);
4108 err = ql_adapter_reset(qdev);
4112 err = ql_configure_rings(qdev);
4116 err = ql_get_adapter_resources(qdev);
4120 err = ql_adapter_up(qdev);
4127 ql_release_adapter_resources(qdev);
4131 static int ql_change_rx_buffers(struct ql_adapter *qdev)
4133 struct rx_ring *rx_ring;
4137 /* Wait for an oustanding reset to complete. */
4138 if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4140 while (i-- && !test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4141 netif_err(qdev, ifup, qdev->ndev,
4142 "Waiting for adapter UP...\n");
4147 netif_err(qdev, ifup, qdev->ndev,
4148 "Timed out waiting for adapter UP\n");
4153 status = ql_adapter_down(qdev);
4157 /* Get the new rx buffer size. */
4158 lbq_buf_len = (qdev->ndev->mtu > 1500) ?
4159 LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
4160 qdev->lbq_buf_order = get_order(lbq_buf_len);
4162 for (i = 0; i < qdev->rss_ring_count; i++) {
4163 rx_ring = &qdev->rx_ring[i];
4164 /* Set the new size. */
4165 rx_ring->lbq_buf_size = lbq_buf_len;
4168 status = ql_adapter_up(qdev);
4174 netif_alert(qdev, ifup, qdev->ndev,
4175 "Driver up/down cycle failed, closing device.\n");
4176 set_bit(QL_ADAPTER_UP, &qdev->flags);
4177 dev_close(qdev->ndev);
4181 static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
4183 struct ql_adapter *qdev = netdev_priv(ndev);
4186 if (ndev->mtu == 1500 && new_mtu == 9000) {
4187 netif_err(qdev, ifup, qdev->ndev, "Changing to jumbo MTU.\n");
4188 } else if (ndev->mtu == 9000 && new_mtu == 1500) {
4189 netif_err(qdev, ifup, qdev->ndev, "Changing to normal MTU.\n");
4193 queue_delayed_work(qdev->workqueue,
4194 &qdev->mpi_port_cfg_work, 3*HZ);
4196 ndev->mtu = new_mtu;
4198 if (!netif_running(qdev->ndev)) {
4202 status = ql_change_rx_buffers(qdev);
4204 netif_err(qdev, ifup, qdev->ndev,
4205 "Changing MTU failed.\n");
4211 static struct net_device_stats *qlge_get_stats(struct net_device
4214 struct ql_adapter *qdev = netdev_priv(ndev);
4215 struct rx_ring *rx_ring = &qdev->rx_ring[0];
4216 struct tx_ring *tx_ring = &qdev->tx_ring[0];
4217 unsigned long pkts, mcast, dropped, errors, bytes;
4221 pkts = mcast = dropped = errors = bytes = 0;
4222 for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) {
4223 pkts += rx_ring->rx_packets;
4224 bytes += rx_ring->rx_bytes;
4225 dropped += rx_ring->rx_dropped;
4226 errors += rx_ring->rx_errors;
4227 mcast += rx_ring->rx_multicast;
4229 ndev->stats.rx_packets = pkts;
4230 ndev->stats.rx_bytes = bytes;
4231 ndev->stats.rx_dropped = dropped;
4232 ndev->stats.rx_errors = errors;
4233 ndev->stats.multicast = mcast;
4236 pkts = errors = bytes = 0;
4237 for (i = 0; i < qdev->tx_ring_count; i++, tx_ring++) {
4238 pkts += tx_ring->tx_packets;
4239 bytes += tx_ring->tx_bytes;
4240 errors += tx_ring->tx_errors;
4242 ndev->stats.tx_packets = pkts;
4243 ndev->stats.tx_bytes = bytes;
4244 ndev->stats.tx_errors = errors;
4245 return &ndev->stats;
4248 static void qlge_set_multicast_list(struct net_device *ndev)
4250 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
4251 struct netdev_hw_addr *ha;
4254 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
4258 * Set or clear promiscuous mode if a
4259 * transition is taking place.
4261 if (ndev->flags & IFF_PROMISC) {
4262 if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4263 if (ql_set_routing_reg
4264 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) {
4265 netif_err(qdev, hw, qdev->ndev,
4266 "Failed to set promiscous mode.\n");
4268 set_bit(QL_PROMISCUOUS, &qdev->flags);
4272 if (test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4273 if (ql_set_routing_reg
4274 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) {
4275 netif_err(qdev, hw, qdev->ndev,
4276 "Failed to clear promiscous mode.\n");
4278 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4284 * Set or clear all multicast mode if a
4285 * transition is taking place.
4287 if ((ndev->flags & IFF_ALLMULTI) ||
4288 (netdev_mc_count(ndev) > MAX_MULTICAST_ENTRIES)) {
4289 if (!test_bit(QL_ALLMULTI, &qdev->flags)) {
4290 if (ql_set_routing_reg
4291 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) {
4292 netif_err(qdev, hw, qdev->ndev,
4293 "Failed to set all-multi mode.\n");
4295 set_bit(QL_ALLMULTI, &qdev->flags);
4299 if (test_bit(QL_ALLMULTI, &qdev->flags)) {
4300 if (ql_set_routing_reg
4301 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) {
4302 netif_err(qdev, hw, qdev->ndev,
4303 "Failed to clear all-multi mode.\n");
4305 clear_bit(QL_ALLMULTI, &qdev->flags);
4310 if (!netdev_mc_empty(ndev)) {
4311 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4315 netdev_for_each_mc_addr(ha, ndev) {
4316 if (ql_set_mac_addr_reg(qdev, (u8 *) ha->addr,
4317 MAC_ADDR_TYPE_MULTI_MAC, i)) {
4318 netif_err(qdev, hw, qdev->ndev,
4319 "Failed to loadmulticast address.\n");
4320 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4325 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4326 if (ql_set_routing_reg
4327 (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) {
4328 netif_err(qdev, hw, qdev->ndev,
4329 "Failed to set multicast match mode.\n");
4331 set_bit(QL_ALLMULTI, &qdev->flags);
4335 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
4338 static int qlge_set_mac_address(struct net_device *ndev, void *p)
4340 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
4341 struct sockaddr *addr = p;
4344 if (!is_valid_ether_addr(addr->sa_data))
4345 return -EADDRNOTAVAIL;
4346 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
4347 /* Update local copy of current mac address. */
4348 memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
4350 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4353 status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr,
4354 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
4356 netif_err(qdev, hw, qdev->ndev, "Failed to load MAC address.\n");
4357 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4361 static void qlge_tx_timeout(struct net_device *ndev)
4363 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
4364 ql_queue_asic_error(qdev);
4367 static void ql_asic_reset_work(struct work_struct *work)
4369 struct ql_adapter *qdev =
4370 container_of(work, struct ql_adapter, asic_reset_work.work);
4373 status = ql_adapter_down(qdev);
4377 status = ql_adapter_up(qdev);
4381 /* Restore rx mode. */
4382 clear_bit(QL_ALLMULTI, &qdev->flags);
4383 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4384 qlge_set_multicast_list(qdev->ndev);
4389 netif_alert(qdev, ifup, qdev->ndev,
4390 "Driver up/down cycle failed, closing device\n");
4392 set_bit(QL_ADAPTER_UP, &qdev->flags);
4393 dev_close(qdev->ndev);
4397 static struct nic_operations qla8012_nic_ops = {
4398 .get_flash = ql_get_8012_flash_params,
4399 .port_initialize = ql_8012_port_initialize,
4402 static struct nic_operations qla8000_nic_ops = {
4403 .get_flash = ql_get_8000_flash_params,
4404 .port_initialize = ql_8000_port_initialize,
4407 /* Find the pcie function number for the other NIC
4408 * on this chip. Since both NIC functions share a
4409 * common firmware we have the lowest enabled function
4410 * do any common work. Examples would be resetting
4411 * after a fatal firmware error, or doing a firmware
4414 static int ql_get_alt_pcie_func(struct ql_adapter *qdev)
4418 u32 nic_func1, nic_func2;
4420 status = ql_read_mpi_reg(qdev, MPI_TEST_FUNC_PORT_CFG,
4425 nic_func1 = ((temp >> MPI_TEST_NIC1_FUNC_SHIFT) &
4426 MPI_TEST_NIC_FUNC_MASK);
4427 nic_func2 = ((temp >> MPI_TEST_NIC2_FUNC_SHIFT) &
4428 MPI_TEST_NIC_FUNC_MASK);
4430 if (qdev->func == nic_func1)
4431 qdev->alt_func = nic_func2;
4432 else if (qdev->func == nic_func2)
4433 qdev->alt_func = nic_func1;
4440 static int ql_get_board_info(struct ql_adapter *qdev)
4444 (ql_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT;
4448 status = ql_get_alt_pcie_func(qdev);
4452 qdev->port = (qdev->func < qdev->alt_func) ? 0 : 1;
4454 qdev->xg_sem_mask = SEM_XGMAC1_MASK;
4455 qdev->port_link_up = STS_PL1;
4456 qdev->port_init = STS_PI1;
4457 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBI;
4458 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBO;
4460 qdev->xg_sem_mask = SEM_XGMAC0_MASK;
4461 qdev->port_link_up = STS_PL0;
4462 qdev->port_init = STS_PI0;
4463 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBI;
4464 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO;
4466 qdev->chip_rev_id = ql_read32(qdev, REV_ID);
4467 qdev->device_id = qdev->pdev->device;
4468 if (qdev->device_id == QLGE_DEVICE_ID_8012)
4469 qdev->nic_ops = &qla8012_nic_ops;
4470 else if (qdev->device_id == QLGE_DEVICE_ID_8000)
4471 qdev->nic_ops = &qla8000_nic_ops;
4475 static void ql_release_all(struct pci_dev *pdev)
4477 struct net_device *ndev = pci_get_drvdata(pdev);
4478 struct ql_adapter *qdev = netdev_priv(ndev);
4480 if (qdev->workqueue) {
4481 destroy_workqueue(qdev->workqueue);
4482 qdev->workqueue = NULL;
4486 iounmap(qdev->reg_base);
4487 if (qdev->doorbell_area)
4488 iounmap(qdev->doorbell_area);
4489 vfree(qdev->mpi_coredump);
4490 pci_release_regions(pdev);
4491 pci_set_drvdata(pdev, NULL);
4494 static int __devinit ql_init_device(struct pci_dev *pdev,
4495 struct net_device *ndev, int cards_found)
4497 struct ql_adapter *qdev = netdev_priv(ndev);
4500 memset((void *)qdev, 0, sizeof(*qdev));
4501 err = pci_enable_device(pdev);
4503 dev_err(&pdev->dev, "PCI device enable failed.\n");
4509 pci_set_drvdata(pdev, ndev);
4511 /* Set PCIe read request size */
4512 err = pcie_set_readrq(pdev, 4096);
4514 dev_err(&pdev->dev, "Set readrq failed.\n");
4518 err = pci_request_regions(pdev, DRV_NAME);
4520 dev_err(&pdev->dev, "PCI region request failed.\n");
4524 pci_set_master(pdev);
4525 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
4526 set_bit(QL_DMA64, &qdev->flags);
4527 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
4529 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
4531 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
4535 dev_err(&pdev->dev, "No usable DMA configuration.\n");
4539 /* Set PCIe reset type for EEH to fundamental. */
4540 pdev->needs_freset = 1;
4541 pci_save_state(pdev);
4543 ioremap_nocache(pci_resource_start(pdev, 1),
4544 pci_resource_len(pdev, 1));
4545 if (!qdev->reg_base) {
4546 dev_err(&pdev->dev, "Register mapping failed.\n");
4551 qdev->doorbell_area_size = pci_resource_len(pdev, 3);
4552 qdev->doorbell_area =
4553 ioremap_nocache(pci_resource_start(pdev, 3),
4554 pci_resource_len(pdev, 3));
4555 if (!qdev->doorbell_area) {
4556 dev_err(&pdev->dev, "Doorbell register mapping failed.\n");
4561 err = ql_get_board_info(qdev);
4563 dev_err(&pdev->dev, "Register access failed.\n");
4567 qdev->msg_enable = netif_msg_init(debug, default_msg);
4568 spin_lock_init(&qdev->hw_lock);
4569 spin_lock_init(&qdev->stats_lock);
4571 if (qlge_mpi_coredump) {
4572 qdev->mpi_coredump =
4573 vmalloc(sizeof(struct ql_mpi_coredump));
4574 if (qdev->mpi_coredump == NULL) {
4575 dev_err(&pdev->dev, "Coredump alloc failed.\n");
4579 if (qlge_force_coredump)
4580 set_bit(QL_FRC_COREDUMP, &qdev->flags);
4582 /* make sure the EEPROM is good */
4583 err = qdev->nic_ops->get_flash(qdev);
4585 dev_err(&pdev->dev, "Invalid FLASH.\n");
4589 memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
4590 /* Keep local copy of current mac address. */
4591 memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
4593 /* Set up the default ring sizes. */
4594 qdev->tx_ring_size = NUM_TX_RING_ENTRIES;
4595 qdev->rx_ring_size = NUM_RX_RING_ENTRIES;
4597 /* Set up the coalescing parameters. */
4598 qdev->rx_coalesce_usecs = DFLT_COALESCE_WAIT;
4599 qdev->tx_coalesce_usecs = DFLT_COALESCE_WAIT;
4600 qdev->rx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4601 qdev->tx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4604 * Set up the operating parameters.
4607 qdev->workqueue = create_singlethread_workqueue(ndev->name);
4608 INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work);
4609 INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work);
4610 INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
4611 INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work);
4612 INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work);
4613 INIT_DELAYED_WORK(&qdev->mpi_core_to_log, ql_mpi_core_to_log);
4614 init_completion(&qdev->ide_completion);
4617 dev_info(&pdev->dev, "%s\n", DRV_STRING);
4618 dev_info(&pdev->dev, "Driver name: %s, Version: %s.\n",
4619 DRV_NAME, DRV_VERSION);
4623 ql_release_all(pdev);
4625 pci_disable_device(pdev);
4629 static const struct net_device_ops qlge_netdev_ops = {
4630 .ndo_open = qlge_open,
4631 .ndo_stop = qlge_close,
4632 .ndo_start_xmit = qlge_send,
4633 .ndo_change_mtu = qlge_change_mtu,
4634 .ndo_get_stats = qlge_get_stats,
4635 .ndo_set_multicast_list = qlge_set_multicast_list,
4636 .ndo_set_mac_address = qlge_set_mac_address,
4637 .ndo_validate_addr = eth_validate_addr,
4638 .ndo_tx_timeout = qlge_tx_timeout,
4639 .ndo_vlan_rx_register = qlge_vlan_rx_register,
4640 .ndo_vlan_rx_add_vid = qlge_vlan_rx_add_vid,
4641 .ndo_vlan_rx_kill_vid = qlge_vlan_rx_kill_vid,
4644 static void ql_timer(unsigned long data)
4646 struct ql_adapter *qdev = (struct ql_adapter *)data;
4649 var = ql_read32(qdev, STS);
4650 if (pci_channel_offline(qdev->pdev)) {
4651 netif_err(qdev, ifup, qdev->ndev, "EEH STS = 0x%.08x.\n", var);
4655 mod_timer(&qdev->timer, jiffies + (5*HZ));
4658 static int __devinit qlge_probe(struct pci_dev *pdev,
4659 const struct pci_device_id *pci_entry)
4661 struct net_device *ndev = NULL;
4662 struct ql_adapter *qdev = NULL;
4663 static int cards_found = 0;
4666 ndev = alloc_etherdev_mq(sizeof(struct ql_adapter),
4667 min(MAX_CPUS, (int)num_online_cpus()));
4671 err = ql_init_device(pdev, ndev, cards_found);
4677 qdev = netdev_priv(ndev);
4678 SET_NETDEV_DEV(ndev, &pdev->dev);
4685 | NETIF_F_HW_VLAN_TX
4686 | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER);
4687 ndev->features |= NETIF_F_GRO;
4689 if (test_bit(QL_DMA64, &qdev->flags))
4690 ndev->features |= NETIF_F_HIGHDMA;
4693 * Set up net_device structure.
4695 ndev->tx_queue_len = qdev->tx_ring_size;
4696 ndev->irq = pdev->irq;
4698 ndev->netdev_ops = &qlge_netdev_ops;
4699 SET_ETHTOOL_OPS(ndev, &qlge_ethtool_ops);
4700 ndev->watchdog_timeo = 10 * HZ;
4702 err = register_netdev(ndev);
4704 dev_err(&pdev->dev, "net device registration failed.\n");
4705 ql_release_all(pdev);
4706 pci_disable_device(pdev);
4709 /* Start up the timer to trigger EEH if
4712 init_timer_deferrable(&qdev->timer);
4713 qdev->timer.data = (unsigned long)qdev;
4714 qdev->timer.function = ql_timer;
4715 qdev->timer.expires = jiffies + (5*HZ);
4716 add_timer(&qdev->timer);
4718 ql_display_dev_info(ndev);
4719 atomic_set(&qdev->lb_count, 0);
4724 netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev)
4726 return qlge_send(skb, ndev);
4729 int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget)
4731 return ql_clean_inbound_rx_ring(rx_ring, budget);
4734 static void __devexit qlge_remove(struct pci_dev *pdev)
4736 struct net_device *ndev = pci_get_drvdata(pdev);
4737 struct ql_adapter *qdev = netdev_priv(ndev);
4738 del_timer_sync(&qdev->timer);
4739 ql_cancel_all_work_sync(qdev);
4740 unregister_netdev(ndev);
4741 ql_release_all(pdev);
4742 pci_disable_device(pdev);
4746 /* Clean up resources without touching hardware. */
4747 static void ql_eeh_close(struct net_device *ndev)
4750 struct ql_adapter *qdev = netdev_priv(ndev);
4752 if (netif_carrier_ok(ndev)) {
4753 netif_carrier_off(ndev);
4754 netif_stop_queue(ndev);
4757 /* Disabling the timer */
4758 del_timer_sync(&qdev->timer);
4759 ql_cancel_all_work_sync(qdev);
4761 for (i = 0; i < qdev->rss_ring_count; i++)
4762 netif_napi_del(&qdev->rx_ring[i].napi);
4764 clear_bit(QL_ADAPTER_UP, &qdev->flags);
4765 ql_tx_ring_clean(qdev);
4766 ql_free_rx_buffers(qdev);
4767 ql_release_adapter_resources(qdev);
4771 * This callback is called by the PCI subsystem whenever
4772 * a PCI bus error is detected.
4774 static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
4775 enum pci_channel_state state)
4777 struct net_device *ndev = pci_get_drvdata(pdev);
4778 struct ql_adapter *qdev = netdev_priv(ndev);
4781 case pci_channel_io_normal:
4782 return PCI_ERS_RESULT_CAN_RECOVER;
4783 case pci_channel_io_frozen:
4784 netif_device_detach(ndev);
4785 if (netif_running(ndev))
4787 pci_disable_device(pdev);
4788 return PCI_ERS_RESULT_NEED_RESET;
4789 case pci_channel_io_perm_failure:
4791 "%s: pci_channel_io_perm_failure.\n", __func__);
4793 set_bit(QL_EEH_FATAL, &qdev->flags);
4794 return PCI_ERS_RESULT_DISCONNECT;
4797 /* Request a slot reset. */
4798 return PCI_ERS_RESULT_NEED_RESET;
4802 * This callback is called after the PCI buss has been reset.
4803 * Basically, this tries to restart the card from scratch.
4804 * This is a shortened version of the device probe/discovery code,
4805 * it resembles the first-half of the () routine.
4807 static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev)
4809 struct net_device *ndev = pci_get_drvdata(pdev);
4810 struct ql_adapter *qdev = netdev_priv(ndev);
4812 pdev->error_state = pci_channel_io_normal;
4814 pci_restore_state(pdev);
4815 if (pci_enable_device(pdev)) {
4816 netif_err(qdev, ifup, qdev->ndev,
4817 "Cannot re-enable PCI device after reset.\n");
4818 return PCI_ERS_RESULT_DISCONNECT;
4820 pci_set_master(pdev);
4822 if (ql_adapter_reset(qdev)) {
4823 netif_err(qdev, drv, qdev->ndev, "reset FAILED!\n");
4824 set_bit(QL_EEH_FATAL, &qdev->flags);
4825 return PCI_ERS_RESULT_DISCONNECT;
4828 return PCI_ERS_RESULT_RECOVERED;
4831 static void qlge_io_resume(struct pci_dev *pdev)
4833 struct net_device *ndev = pci_get_drvdata(pdev);
4834 struct ql_adapter *qdev = netdev_priv(ndev);
4837 if (netif_running(ndev)) {
4838 err = qlge_open(ndev);
4840 netif_err(qdev, ifup, qdev->ndev,
4841 "Device initialization failed after reset.\n");
4845 netif_err(qdev, ifup, qdev->ndev,
4846 "Device was not running prior to EEH.\n");
4848 mod_timer(&qdev->timer, jiffies + (5*HZ));
4849 netif_device_attach(ndev);
4852 static struct pci_error_handlers qlge_err_handler = {
4853 .error_detected = qlge_io_error_detected,
4854 .slot_reset = qlge_io_slot_reset,
4855 .resume = qlge_io_resume,
4858 static int qlge_suspend(struct pci_dev *pdev, pm_message_t state)
4860 struct net_device *ndev = pci_get_drvdata(pdev);
4861 struct ql_adapter *qdev = netdev_priv(ndev);
4864 netif_device_detach(ndev);
4865 del_timer_sync(&qdev->timer);
4867 if (netif_running(ndev)) {
4868 err = ql_adapter_down(qdev);
4874 err = pci_save_state(pdev);
4878 pci_disable_device(pdev);
4880 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4886 static int qlge_resume(struct pci_dev *pdev)
4888 struct net_device *ndev = pci_get_drvdata(pdev);
4889 struct ql_adapter *qdev = netdev_priv(ndev);
4892 pci_set_power_state(pdev, PCI_D0);
4893 pci_restore_state(pdev);
4894 err = pci_enable_device(pdev);
4896 netif_err(qdev, ifup, qdev->ndev, "Cannot enable PCI device from suspend\n");
4899 pci_set_master(pdev);
4901 pci_enable_wake(pdev, PCI_D3hot, 0);
4902 pci_enable_wake(pdev, PCI_D3cold, 0);
4904 if (netif_running(ndev)) {
4905 err = ql_adapter_up(qdev);
4910 mod_timer(&qdev->timer, jiffies + (5*HZ));
4911 netif_device_attach(ndev);
4915 #endif /* CONFIG_PM */
4917 static void qlge_shutdown(struct pci_dev *pdev)
4919 qlge_suspend(pdev, PMSG_SUSPEND);
4922 static struct pci_driver qlge_driver = {
4924 .id_table = qlge_pci_tbl,
4925 .probe = qlge_probe,
4926 .remove = __devexit_p(qlge_remove),
4928 .suspend = qlge_suspend,
4929 .resume = qlge_resume,
4931 .shutdown = qlge_shutdown,
4932 .err_handler = &qlge_err_handler
4935 static int __init qlge_init_module(void)
4937 return pci_register_driver(&qlge_driver);
4940 static void __exit qlge_exit(void)
4942 pci_unregister_driver(&qlge_driver);
4945 module_init(qlge_init_module);
4946 module_exit(qlge_exit);