2 * QLogic qlge NIC HBA Driver
3 * Copyright (c) 2003-2008 QLogic Corporation
4 * See LICENSE.qlge for copyright and licensing details.
5 * Author: Linux qlge network device driver by
6 * Ron Mercer <ron.mercer@qlogic.com>
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/types.h>
11 #include <linux/module.h>
12 #include <linux/list.h>
13 #include <linux/pci.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/pagemap.h>
16 #include <linux/sched.h>
17 #include <linux/slab.h>
18 #include <linux/dmapool.h>
19 #include <linux/mempool.h>
20 #include <linux/spinlock.h>
21 #include <linux/kthread.h>
22 #include <linux/interrupt.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
27 #include <linux/ipv6.h>
29 #include <linux/tcp.h>
30 #include <linux/udp.h>
31 #include <linux/if_arp.h>
32 #include <linux/if_ether.h>
33 #include <linux/netdevice.h>
34 #include <linux/etherdevice.h>
35 #include <linux/ethtool.h>
36 #include <linux/skbuff.h>
37 #include <linux/if_vlan.h>
38 #include <linux/delay.h>
40 #include <linux/vmalloc.h>
41 #include <net/ip6_checksum.h>
45 char qlge_driver_name[] = DRV_NAME;
46 const char qlge_driver_version[] = DRV_VERSION;
48 MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>");
49 MODULE_DESCRIPTION(DRV_STRING " ");
50 MODULE_LICENSE("GPL");
51 MODULE_VERSION(DRV_VERSION);
53 static const u32 default_msg =
54 NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK |
55 /* NETIF_MSG_TIMER | */
60 /* NETIF_MSG_TX_QUEUED | */
61 /* NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS | */
62 /* NETIF_MSG_PKTDATA | */
63 NETIF_MSG_HW | NETIF_MSG_WOL | 0;
65 static int debug = 0x00007fff; /* defaults above */
66 module_param(debug, int, 0);
67 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
72 static int qlge_irq_type = MSIX_IRQ;
73 module_param(qlge_irq_type, int, MSIX_IRQ);
74 MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
76 static int qlge_mpi_coredump;
77 module_param(qlge_mpi_coredump, int, 0);
78 MODULE_PARM_DESC(qlge_mpi_coredump,
79 "Option to enable MPI firmware dump. "
80 "Default is OFF - Do Not allocate memory. ");
82 static int qlge_force_coredump;
83 module_param(qlge_force_coredump, int, 0);
84 MODULE_PARM_DESC(qlge_force_coredump,
85 "Option to allow force of firmware core dump. "
86 "Default is OFF - Do not allow.");
88 static DEFINE_PCI_DEVICE_TABLE(qlge_pci_tbl) = {
89 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)},
90 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)},
91 /* required last entry */
95 MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
97 /* This hardware semaphore causes exclusive access to
98 * resources shared between the NIC driver, MPI firmware,
99 * FCOE firmware and the FC driver.
101 static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask)
106 case SEM_XGMAC0_MASK:
107 sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
109 case SEM_XGMAC1_MASK:
110 sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
113 sem_bits = SEM_SET << SEM_ICB_SHIFT;
115 case SEM_MAC_ADDR_MASK:
116 sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
119 sem_bits = SEM_SET << SEM_FLASH_SHIFT;
122 sem_bits = SEM_SET << SEM_PROBE_SHIFT;
124 case SEM_RT_IDX_MASK:
125 sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
127 case SEM_PROC_REG_MASK:
128 sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
131 netif_alert(qdev, probe, qdev->ndev, "bad Semaphore mask!.\n");
135 ql_write32(qdev, SEM, sem_bits | sem_mask);
136 return !(ql_read32(qdev, SEM) & sem_bits);
139 int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask)
141 unsigned int wait_count = 30;
143 if (!ql_sem_trylock(qdev, sem_mask))
146 } while (--wait_count);
150 void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask)
152 ql_write32(qdev, SEM, sem_mask);
153 ql_read32(qdev, SEM); /* flush */
156 /* This function waits for a specific bit to come ready
157 * in a given register. It is used mostly by the initialize
158 * process, but is also used in kernel thread API such as
159 * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
161 int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
164 int count = UDELAY_COUNT;
167 temp = ql_read32(qdev, reg);
169 /* check for errors */
170 if (temp & err_bit) {
171 netif_alert(qdev, probe, qdev->ndev,
172 "register 0x%.08x access error, value = 0x%.08x!.\n",
175 } else if (temp & bit)
177 udelay(UDELAY_DELAY);
180 netif_alert(qdev, probe, qdev->ndev,
181 "Timed out waiting for reg %x to come ready.\n", reg);
185 /* The CFG register is used to download TX and RX control blocks
186 * to the chip. This function waits for an operation to complete.
188 static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit)
190 int count = UDELAY_COUNT;
194 temp = ql_read32(qdev, CFG);
199 udelay(UDELAY_DELAY);
206 /* Used to issue init control blocks to hw. Maps control block,
207 * sets address, triggers download, waits for completion.
209 int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
219 (bit & (CFG_LRQ | CFG_LR | CFG_LCQ)) ? PCI_DMA_TODEVICE :
222 map = pci_map_single(qdev->pdev, ptr, size, direction);
223 if (pci_dma_mapping_error(qdev->pdev, map)) {
224 netif_err(qdev, ifup, qdev->ndev, "Couldn't map DMA area.\n");
228 status = ql_sem_spinlock(qdev, SEM_ICB_MASK);
232 status = ql_wait_cfg(qdev, bit);
234 netif_err(qdev, ifup, qdev->ndev,
235 "Timed out waiting for CFG to come ready.\n");
239 ql_write32(qdev, ICB_L, (u32) map);
240 ql_write32(qdev, ICB_H, (u32) (map >> 32));
242 mask = CFG_Q_MASK | (bit << 16);
243 value = bit | (q_id << CFG_Q_SHIFT);
244 ql_write32(qdev, CFG, (mask | value));
247 * Wait for the bit to clear after signaling hw.
249 status = ql_wait_cfg(qdev, bit);
251 ql_sem_unlock(qdev, SEM_ICB_MASK); /* does flush too */
252 pci_unmap_single(qdev->pdev, map, size, direction);
256 /* Get a specific MAC address from the CAM. Used for debug and reg dump. */
257 int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
264 case MAC_ADDR_TYPE_MULTI_MAC:
265 case MAC_ADDR_TYPE_CAM_MAC:
268 ql_wait_reg_rdy(qdev,
269 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
272 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
273 (index << MAC_ADDR_IDX_SHIFT) | /* index */
274 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
276 ql_wait_reg_rdy(qdev,
277 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
280 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
282 ql_wait_reg_rdy(qdev,
283 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
286 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
287 (index << MAC_ADDR_IDX_SHIFT) | /* index */
288 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
290 ql_wait_reg_rdy(qdev,
291 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
294 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
295 if (type == MAC_ADDR_TYPE_CAM_MAC) {
297 ql_wait_reg_rdy(qdev,
298 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
301 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
302 (index << MAC_ADDR_IDX_SHIFT) | /* index */
303 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
305 ql_wait_reg_rdy(qdev, MAC_ADDR_IDX,
309 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
313 case MAC_ADDR_TYPE_VLAN:
314 case MAC_ADDR_TYPE_MULTI_FLTR:
316 netif_crit(qdev, ifup, qdev->ndev,
317 "Address type %d not yet supported.\n", type);
324 /* Set up a MAC, multicast or VLAN address for the
325 * inbound frame matching.
327 static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
334 case MAC_ADDR_TYPE_MULTI_MAC:
336 u32 upper = (addr[0] << 8) | addr[1];
337 u32 lower = (addr[2] << 24) | (addr[3] << 16) |
338 (addr[4] << 8) | (addr[5]);
341 ql_wait_reg_rdy(qdev,
342 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
345 ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
346 (index << MAC_ADDR_IDX_SHIFT) |
348 ql_write32(qdev, MAC_ADDR_DATA, lower);
350 ql_wait_reg_rdy(qdev,
351 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
354 ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
355 (index << MAC_ADDR_IDX_SHIFT) |
358 ql_write32(qdev, MAC_ADDR_DATA, upper);
360 ql_wait_reg_rdy(qdev,
361 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
366 case MAC_ADDR_TYPE_CAM_MAC:
369 u32 upper = (addr[0] << 8) | addr[1];
371 (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
374 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
375 "Adding %s address %pM at index %d in the CAM.\n",
376 type == MAC_ADDR_TYPE_MULTI_MAC ?
377 "MULTICAST" : "UNICAST",
381 ql_wait_reg_rdy(qdev,
382 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
385 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
386 (index << MAC_ADDR_IDX_SHIFT) | /* index */
388 ql_write32(qdev, MAC_ADDR_DATA, lower);
390 ql_wait_reg_rdy(qdev,
391 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
394 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
395 (index << MAC_ADDR_IDX_SHIFT) | /* index */
397 ql_write32(qdev, MAC_ADDR_DATA, upper);
399 ql_wait_reg_rdy(qdev,
400 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
403 ql_write32(qdev, MAC_ADDR_IDX, (offset) | /* offset */
404 (index << MAC_ADDR_IDX_SHIFT) | /* index */
406 /* This field should also include the queue id
407 and possibly the function id. Right now we hardcode
408 the route field to NIC core.
410 cam_output = (CAM_OUT_ROUTE_NIC |
412 func << CAM_OUT_FUNC_SHIFT) |
413 (0 << CAM_OUT_CQ_ID_SHIFT));
415 cam_output |= CAM_OUT_RV;
416 /* route to NIC core */
417 ql_write32(qdev, MAC_ADDR_DATA, cam_output);
420 case MAC_ADDR_TYPE_VLAN:
422 u32 enable_bit = *((u32 *) &addr[0]);
423 /* For VLAN, the addr actually holds a bit that
424 * either enables or disables the vlan id we are
425 * addressing. It's either MAC_ADDR_E on or off.
426 * That's bit-27 we're talking about.
428 netif_info(qdev, ifup, qdev->ndev,
429 "%s VLAN ID %d %s the CAM.\n",
430 enable_bit ? "Adding" : "Removing",
432 enable_bit ? "to" : "from");
435 ql_wait_reg_rdy(qdev,
436 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
439 ql_write32(qdev, MAC_ADDR_IDX, offset | /* offset */
440 (index << MAC_ADDR_IDX_SHIFT) | /* index */
442 enable_bit); /* enable/disable */
445 case MAC_ADDR_TYPE_MULTI_FLTR:
447 netif_crit(qdev, ifup, qdev->ndev,
448 "Address type %d not yet supported.\n", type);
455 /* Set or clear MAC address in hardware. We sometimes
456 * have to clear it to prevent wrong frame routing
457 * especially in a bonding environment.
459 static int ql_set_mac_addr(struct ql_adapter *qdev, int set)
462 char zero_mac_addr[ETH_ALEN];
466 addr = &qdev->current_mac_addr[0];
467 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
468 "Set Mac addr %pM\n", addr);
470 memset(zero_mac_addr, 0, ETH_ALEN);
471 addr = &zero_mac_addr[0];
472 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
473 "Clearing MAC address\n");
475 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
478 status = ql_set_mac_addr_reg(qdev, (u8 *) addr,
479 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
480 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
482 netif_err(qdev, ifup, qdev->ndev,
483 "Failed to init mac address.\n");
487 void ql_link_on(struct ql_adapter *qdev)
489 netif_err(qdev, link, qdev->ndev, "Link is up.\n");
490 netif_carrier_on(qdev->ndev);
491 ql_set_mac_addr(qdev, 1);
494 void ql_link_off(struct ql_adapter *qdev)
496 netif_err(qdev, link, qdev->ndev, "Link is down.\n");
497 netif_carrier_off(qdev->ndev);
498 ql_set_mac_addr(qdev, 0);
501 /* Get a specific frame routing value from the CAM.
502 * Used for debug and reg dump.
504 int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value)
508 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
512 ql_write32(qdev, RT_IDX,
513 RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT));
514 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0);
517 *value = ql_read32(qdev, RT_DATA);
522 /* The NIC function for this chip has 16 routing indexes. Each one can be used
523 * to route different frame types to various inbound queues. We send broadcast/
524 * multicast/error frames to the default queue for slow handling,
525 * and CAM hit/RSS frames to the fast handling queues.
527 static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
530 int status = -EINVAL; /* Return error if no mask match. */
533 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
534 "%s %s mask %s the routing reg.\n",
535 enable ? "Adding" : "Removing",
536 index == RT_IDX_ALL_ERR_SLOT ? "MAC ERROR/ALL ERROR" :
537 index == RT_IDX_IP_CSUM_ERR_SLOT ? "IP CSUM ERROR" :
538 index == RT_IDX_TCP_UDP_CSUM_ERR_SLOT ? "TCP/UDP CSUM ERROR" :
539 index == RT_IDX_BCAST_SLOT ? "BROADCAST" :
540 index == RT_IDX_MCAST_MATCH_SLOT ? "MULTICAST MATCH" :
541 index == RT_IDX_ALLMULTI_SLOT ? "ALL MULTICAST MATCH" :
542 index == RT_IDX_UNUSED6_SLOT ? "UNUSED6" :
543 index == RT_IDX_UNUSED7_SLOT ? "UNUSED7" :
544 index == RT_IDX_RSS_MATCH_SLOT ? "RSS ALL/IPV4 MATCH" :
545 index == RT_IDX_RSS_IPV6_SLOT ? "RSS IPV6" :
546 index == RT_IDX_RSS_TCP4_SLOT ? "RSS TCP4" :
547 index == RT_IDX_RSS_TCP6_SLOT ? "RSS TCP6" :
548 index == RT_IDX_CAM_HIT_SLOT ? "CAM HIT" :
549 index == RT_IDX_UNUSED013 ? "UNUSED13" :
550 index == RT_IDX_UNUSED014 ? "UNUSED14" :
551 index == RT_IDX_PROMISCUOUS_SLOT ? "PROMISCUOUS" :
552 "(Bad index != RT_IDX)",
553 enable ? "to" : "from");
558 value = RT_IDX_DST_CAM_Q | /* dest */
559 RT_IDX_TYPE_NICQ | /* type */
560 (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);/* index */
563 case RT_IDX_VALID: /* Promiscuous Mode frames. */
565 value = RT_IDX_DST_DFLT_Q | /* dest */
566 RT_IDX_TYPE_NICQ | /* type */
567 (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);/* index */
570 case RT_IDX_ERR: /* Pass up MAC,IP,TCP/UDP error frames. */
572 value = RT_IDX_DST_DFLT_Q | /* dest */
573 RT_IDX_TYPE_NICQ | /* type */
574 (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */
577 case RT_IDX_IP_CSUM_ERR: /* Pass up IP CSUM error frames. */
579 value = RT_IDX_DST_DFLT_Q | /* dest */
580 RT_IDX_TYPE_NICQ | /* type */
581 (RT_IDX_IP_CSUM_ERR_SLOT <<
582 RT_IDX_IDX_SHIFT); /* index */
585 case RT_IDX_TU_CSUM_ERR: /* Pass up TCP/UDP CSUM error frames. */
587 value = RT_IDX_DST_DFLT_Q | /* dest */
588 RT_IDX_TYPE_NICQ | /* type */
589 (RT_IDX_TCP_UDP_CSUM_ERR_SLOT <<
590 RT_IDX_IDX_SHIFT); /* index */
593 case RT_IDX_BCAST: /* Pass up Broadcast frames to default Q. */
595 value = RT_IDX_DST_DFLT_Q | /* dest */
596 RT_IDX_TYPE_NICQ | /* type */
597 (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);/* index */
600 case RT_IDX_MCAST: /* Pass up All Multicast frames. */
602 value = RT_IDX_DST_DFLT_Q | /* dest */
603 RT_IDX_TYPE_NICQ | /* type */
604 (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */
607 case RT_IDX_MCAST_MATCH: /* Pass up matched Multicast frames. */
609 value = RT_IDX_DST_DFLT_Q | /* dest */
610 RT_IDX_TYPE_NICQ | /* type */
611 (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
614 case RT_IDX_RSS_MATCH: /* Pass up matched RSS frames. */
616 value = RT_IDX_DST_RSS | /* dest */
617 RT_IDX_TYPE_NICQ | /* type */
618 (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
621 case 0: /* Clear the E-bit on an entry. */
623 value = RT_IDX_DST_DFLT_Q | /* dest */
624 RT_IDX_TYPE_NICQ | /* type */
625 (index << RT_IDX_IDX_SHIFT);/* index */
629 netif_err(qdev, ifup, qdev->ndev,
630 "Mask type %d not yet supported.\n", mask);
636 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
639 value |= (enable ? RT_IDX_E : 0);
640 ql_write32(qdev, RT_IDX, value);
641 ql_write32(qdev, RT_DATA, enable ? mask : 0);
647 static void ql_enable_interrupts(struct ql_adapter *qdev)
649 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI);
652 static void ql_disable_interrupts(struct ql_adapter *qdev)
654 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16));
657 /* If we're running with multiple MSI-X vectors then we enable on the fly.
658 * Otherwise, we may have multiple outstanding workers and don't want to
659 * enable until the last one finishes. In this case, the irq_cnt gets
660 * incremented everytime we queue a worker and decremented everytime
661 * a worker finishes. Once it hits zero we enable the interrupt.
663 u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
666 unsigned long hw_flags = 0;
667 struct intr_context *ctx = qdev->intr_context + intr;
669 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) {
670 /* Always enable if we're MSIX multi interrupts and
671 * it's not the default (zeroeth) interrupt.
673 ql_write32(qdev, INTR_EN,
675 var = ql_read32(qdev, STS);
679 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
680 if (atomic_dec_and_test(&ctx->irq_cnt)) {
681 ql_write32(qdev, INTR_EN,
683 var = ql_read32(qdev, STS);
685 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
689 static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
692 struct intr_context *ctx;
694 /* HW disables for us if we're MSIX multi interrupts and
695 * it's not the default (zeroeth) interrupt.
697 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr))
700 ctx = qdev->intr_context + intr;
701 spin_lock(&qdev->hw_lock);
702 if (!atomic_read(&ctx->irq_cnt)) {
703 ql_write32(qdev, INTR_EN,
705 var = ql_read32(qdev, STS);
707 atomic_inc(&ctx->irq_cnt);
708 spin_unlock(&qdev->hw_lock);
712 static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev)
715 for (i = 0; i < qdev->intr_count; i++) {
716 /* The enable call does a atomic_dec_and_test
717 * and enables only if the result is zero.
718 * So we precharge it here.
720 if (unlikely(!test_bit(QL_MSIX_ENABLED, &qdev->flags) ||
722 atomic_set(&qdev->intr_context[i].irq_cnt, 1);
723 ql_enable_completion_interrupt(qdev, i);
728 static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str)
732 __le16 *flash = (__le16 *)&qdev->flash;
734 status = strncmp((char *)&qdev->flash, str, 4);
736 netif_err(qdev, ifup, qdev->ndev, "Invalid flash signature.\n");
740 for (i = 0; i < size; i++)
741 csum += le16_to_cpu(*flash++);
744 netif_err(qdev, ifup, qdev->ndev,
745 "Invalid flash checksum, csum = 0x%.04x.\n", csum);
750 static int ql_read_flash_word(struct ql_adapter *qdev, int offset, __le32 *data)
753 /* wait for reg to come ready */
754 status = ql_wait_reg_rdy(qdev,
755 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
758 /* set up for reg read */
759 ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset);
760 /* wait for reg to come ready */
761 status = ql_wait_reg_rdy(qdev,
762 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
765 /* This data is stored on flash as an array of
766 * __le32. Since ql_read32() returns cpu endian
767 * we need to swap it back.
769 *data = cpu_to_le32(ql_read32(qdev, FLASH_DATA));
774 static int ql_get_8000_flash_params(struct ql_adapter *qdev)
778 __le32 *p = (__le32 *)&qdev->flash;
782 /* Get flash offset for function and adjust
786 offset = FUNC0_FLASH_OFFSET / sizeof(u32);
788 offset = FUNC1_FLASH_OFFSET / sizeof(u32);
790 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
793 size = sizeof(struct flash_params_8000) / sizeof(u32);
794 for (i = 0; i < size; i++, p++) {
795 status = ql_read_flash_word(qdev, i+offset, p);
797 netif_err(qdev, ifup, qdev->ndev,
798 "Error reading flash.\n");
803 status = ql_validate_flash(qdev,
804 sizeof(struct flash_params_8000) / sizeof(u16),
807 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
812 /* Extract either manufacturer or BOFM modified
815 if (qdev->flash.flash_params_8000.data_type1 == 2)
817 qdev->flash.flash_params_8000.mac_addr1,
818 qdev->ndev->addr_len);
821 qdev->flash.flash_params_8000.mac_addr,
822 qdev->ndev->addr_len);
824 if (!is_valid_ether_addr(mac_addr)) {
825 netif_err(qdev, ifup, qdev->ndev, "Invalid MAC address.\n");
830 memcpy(qdev->ndev->dev_addr,
832 qdev->ndev->addr_len);
835 ql_sem_unlock(qdev, SEM_FLASH_MASK);
839 static int ql_get_8012_flash_params(struct ql_adapter *qdev)
843 __le32 *p = (__le32 *)&qdev->flash;
845 u32 size = sizeof(struct flash_params_8012) / sizeof(u32);
847 /* Second function's parameters follow the first
853 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
856 for (i = 0; i < size; i++, p++) {
857 status = ql_read_flash_word(qdev, i+offset, p);
859 netif_err(qdev, ifup, qdev->ndev,
860 "Error reading flash.\n");
866 status = ql_validate_flash(qdev,
867 sizeof(struct flash_params_8012) / sizeof(u16),
870 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
875 if (!is_valid_ether_addr(qdev->flash.flash_params_8012.mac_addr)) {
880 memcpy(qdev->ndev->dev_addr,
881 qdev->flash.flash_params_8012.mac_addr,
882 qdev->ndev->addr_len);
885 ql_sem_unlock(qdev, SEM_FLASH_MASK);
889 /* xgmac register are located behind the xgmac_addr and xgmac_data
890 * register pair. Each read/write requires us to wait for the ready
891 * bit before reading/writing the data.
893 static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data)
896 /* wait for reg to come ready */
897 status = ql_wait_reg_rdy(qdev,
898 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
901 /* write the data to the data reg */
902 ql_write32(qdev, XGMAC_DATA, data);
903 /* trigger the write */
904 ql_write32(qdev, XGMAC_ADDR, reg);
908 /* xgmac register are located behind the xgmac_addr and xgmac_data
909 * register pair. Each read/write requires us to wait for the ready
910 * bit before reading/writing the data.
912 int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
915 /* wait for reg to come ready */
916 status = ql_wait_reg_rdy(qdev,
917 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
920 /* set up for reg read */
921 ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R);
922 /* wait for reg to come ready */
923 status = ql_wait_reg_rdy(qdev,
924 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
928 *data = ql_read32(qdev, XGMAC_DATA);
933 /* This is used for reading the 64-bit statistics regs. */
934 int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data)
940 status = ql_read_xgmac_reg(qdev, reg, &lo);
944 status = ql_read_xgmac_reg(qdev, reg + 4, &hi);
948 *data = (u64) lo | ((u64) hi << 32);
954 static int ql_8000_port_initialize(struct ql_adapter *qdev)
958 * Get MPI firmware version for driver banner
961 status = ql_mb_about_fw(qdev);
964 status = ql_mb_get_fw_state(qdev);
967 /* Wake up a worker to get/set the TX/RX frame sizes. */
968 queue_delayed_work(qdev->workqueue, &qdev->mpi_port_cfg_work, 0);
973 /* Take the MAC Core out of reset.
974 * Enable statistics counting.
975 * Take the transmitter/receiver out of reset.
976 * This functionality may be done in the MPI firmware at a
979 static int ql_8012_port_initialize(struct ql_adapter *qdev)
984 if (ql_sem_trylock(qdev, qdev->xg_sem_mask)) {
985 /* Another function has the semaphore, so
986 * wait for the port init bit to come ready.
988 netif_info(qdev, link, qdev->ndev,
989 "Another function has the semaphore, so wait for the port init bit to come ready.\n");
990 status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
992 netif_crit(qdev, link, qdev->ndev,
993 "Port initialize timed out.\n");
998 netif_info(qdev, link, qdev->ndev, "Got xgmac semaphore!.\n");
999 /* Set the core reset. */
1000 status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
1003 data |= GLOBAL_CFG_RESET;
1004 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
1008 /* Clear the core reset and turn on jumbo for receiver. */
1009 data &= ~GLOBAL_CFG_RESET; /* Clear core reset. */
1010 data |= GLOBAL_CFG_JUMBO; /* Turn on jumbo. */
1011 data |= GLOBAL_CFG_TX_STAT_EN;
1012 data |= GLOBAL_CFG_RX_STAT_EN;
1013 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
1017 /* Enable transmitter, and clear it's reset. */
1018 status = ql_read_xgmac_reg(qdev, TX_CFG, &data);
1021 data &= ~TX_CFG_RESET; /* Clear the TX MAC reset. */
1022 data |= TX_CFG_EN; /* Enable the transmitter. */
1023 status = ql_write_xgmac_reg(qdev, TX_CFG, data);
1027 /* Enable receiver and clear it's reset. */
1028 status = ql_read_xgmac_reg(qdev, RX_CFG, &data);
1031 data &= ~RX_CFG_RESET; /* Clear the RX MAC reset. */
1032 data |= RX_CFG_EN; /* Enable the receiver. */
1033 status = ql_write_xgmac_reg(qdev, RX_CFG, data);
1037 /* Turn on jumbo. */
1039 ql_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16));
1043 ql_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580);
1047 /* Signal to the world that the port is enabled. */
1048 ql_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init));
1050 ql_sem_unlock(qdev, qdev->xg_sem_mask);
1054 static inline unsigned int ql_lbq_block_size(struct ql_adapter *qdev)
1056 return PAGE_SIZE << qdev->lbq_buf_order;
1059 /* Get the next large buffer. */
1060 static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring)
1062 struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx];
1063 rx_ring->lbq_curr_idx++;
1064 if (rx_ring->lbq_curr_idx == rx_ring->lbq_len)
1065 rx_ring->lbq_curr_idx = 0;
1066 rx_ring->lbq_free_cnt++;
1070 static struct bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev,
1071 struct rx_ring *rx_ring)
1073 struct bq_desc *lbq_desc = ql_get_curr_lbuf(rx_ring);
1075 pci_dma_sync_single_for_cpu(qdev->pdev,
1076 dma_unmap_addr(lbq_desc, mapaddr),
1077 rx_ring->lbq_buf_size,
1078 PCI_DMA_FROMDEVICE);
1080 /* If it's the last chunk of our master page then
1083 if ((lbq_desc->p.pg_chunk.offset + rx_ring->lbq_buf_size)
1084 == ql_lbq_block_size(qdev))
1085 pci_unmap_page(qdev->pdev,
1086 lbq_desc->p.pg_chunk.map,
1087 ql_lbq_block_size(qdev),
1088 PCI_DMA_FROMDEVICE);
1092 /* Get the next small buffer. */
1093 static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring)
1095 struct bq_desc *sbq_desc = &rx_ring->sbq[rx_ring->sbq_curr_idx];
1096 rx_ring->sbq_curr_idx++;
1097 if (rx_ring->sbq_curr_idx == rx_ring->sbq_len)
1098 rx_ring->sbq_curr_idx = 0;
1099 rx_ring->sbq_free_cnt++;
1103 /* Update an rx ring index. */
1104 static void ql_update_cq(struct rx_ring *rx_ring)
1106 rx_ring->cnsmr_idx++;
1107 rx_ring->curr_entry++;
1108 if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) {
1109 rx_ring->cnsmr_idx = 0;
1110 rx_ring->curr_entry = rx_ring->cq_base;
1114 static void ql_write_cq_idx(struct rx_ring *rx_ring)
1116 ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
1119 static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring,
1120 struct bq_desc *lbq_desc)
1122 if (!rx_ring->pg_chunk.page) {
1124 rx_ring->pg_chunk.page = alloc_pages(__GFP_COLD | __GFP_COMP |
1126 qdev->lbq_buf_order);
1127 if (unlikely(!rx_ring->pg_chunk.page)) {
1128 netif_err(qdev, drv, qdev->ndev,
1129 "page allocation failed.\n");
1132 rx_ring->pg_chunk.offset = 0;
1133 map = pci_map_page(qdev->pdev, rx_ring->pg_chunk.page,
1134 0, ql_lbq_block_size(qdev),
1135 PCI_DMA_FROMDEVICE);
1136 if (pci_dma_mapping_error(qdev->pdev, map)) {
1137 __free_pages(rx_ring->pg_chunk.page,
1138 qdev->lbq_buf_order);
1139 netif_err(qdev, drv, qdev->ndev,
1140 "PCI mapping failed.\n");
1143 rx_ring->pg_chunk.map = map;
1144 rx_ring->pg_chunk.va = page_address(rx_ring->pg_chunk.page);
1147 /* Copy the current master pg_chunk info
1148 * to the current descriptor.
1150 lbq_desc->p.pg_chunk = rx_ring->pg_chunk;
1152 /* Adjust the master page chunk for next
1155 rx_ring->pg_chunk.offset += rx_ring->lbq_buf_size;
1156 if (rx_ring->pg_chunk.offset == ql_lbq_block_size(qdev)) {
1157 rx_ring->pg_chunk.page = NULL;
1158 lbq_desc->p.pg_chunk.last_flag = 1;
1160 rx_ring->pg_chunk.va += rx_ring->lbq_buf_size;
1161 get_page(rx_ring->pg_chunk.page);
1162 lbq_desc->p.pg_chunk.last_flag = 0;
1166 /* Process (refill) a large buffer queue. */
1167 static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1169 u32 clean_idx = rx_ring->lbq_clean_idx;
1170 u32 start_idx = clean_idx;
1171 struct bq_desc *lbq_desc;
1175 while (rx_ring->lbq_free_cnt > 32) {
1176 for (i = 0; i < 16; i++) {
1177 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1178 "lbq: try cleaning clean_idx = %d.\n",
1180 lbq_desc = &rx_ring->lbq[clean_idx];
1181 if (ql_get_next_chunk(qdev, rx_ring, lbq_desc)) {
1182 netif_err(qdev, ifup, qdev->ndev,
1183 "Could not get a page chunk.\n");
1187 map = lbq_desc->p.pg_chunk.map +
1188 lbq_desc->p.pg_chunk.offset;
1189 dma_unmap_addr_set(lbq_desc, mapaddr, map);
1190 dma_unmap_len_set(lbq_desc, maplen,
1191 rx_ring->lbq_buf_size);
1192 *lbq_desc->addr = cpu_to_le64(map);
1194 pci_dma_sync_single_for_device(qdev->pdev, map,
1195 rx_ring->lbq_buf_size,
1196 PCI_DMA_FROMDEVICE);
1198 if (clean_idx == rx_ring->lbq_len)
1202 rx_ring->lbq_clean_idx = clean_idx;
1203 rx_ring->lbq_prod_idx += 16;
1204 if (rx_ring->lbq_prod_idx == rx_ring->lbq_len)
1205 rx_ring->lbq_prod_idx = 0;
1206 rx_ring->lbq_free_cnt -= 16;
1209 if (start_idx != clean_idx) {
1210 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1211 "lbq: updating prod idx = %d.\n",
1212 rx_ring->lbq_prod_idx);
1213 ql_write_db_reg(rx_ring->lbq_prod_idx,
1214 rx_ring->lbq_prod_idx_db_reg);
1218 /* Process (refill) a small buffer queue. */
1219 static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1221 u32 clean_idx = rx_ring->sbq_clean_idx;
1222 u32 start_idx = clean_idx;
1223 struct bq_desc *sbq_desc;
1227 while (rx_ring->sbq_free_cnt > 16) {
1228 for (i = 0; i < 16; i++) {
1229 sbq_desc = &rx_ring->sbq[clean_idx];
1230 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1231 "sbq: try cleaning clean_idx = %d.\n",
1233 if (sbq_desc->p.skb == NULL) {
1234 netif_printk(qdev, rx_status, KERN_DEBUG,
1236 "sbq: getting new skb for index %d.\n",
1239 netdev_alloc_skb(qdev->ndev,
1241 if (sbq_desc->p.skb == NULL) {
1242 netif_err(qdev, probe, qdev->ndev,
1243 "Couldn't get an skb.\n");
1244 rx_ring->sbq_clean_idx = clean_idx;
1247 skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD);
1248 map = pci_map_single(qdev->pdev,
1249 sbq_desc->p.skb->data,
1250 rx_ring->sbq_buf_size,
1251 PCI_DMA_FROMDEVICE);
1252 if (pci_dma_mapping_error(qdev->pdev, map)) {
1253 netif_err(qdev, ifup, qdev->ndev,
1254 "PCI mapping failed.\n");
1255 rx_ring->sbq_clean_idx = clean_idx;
1256 dev_kfree_skb_any(sbq_desc->p.skb);
1257 sbq_desc->p.skb = NULL;
1260 dma_unmap_addr_set(sbq_desc, mapaddr, map);
1261 dma_unmap_len_set(sbq_desc, maplen,
1262 rx_ring->sbq_buf_size);
1263 *sbq_desc->addr = cpu_to_le64(map);
1267 if (clean_idx == rx_ring->sbq_len)
1270 rx_ring->sbq_clean_idx = clean_idx;
1271 rx_ring->sbq_prod_idx += 16;
1272 if (rx_ring->sbq_prod_idx == rx_ring->sbq_len)
1273 rx_ring->sbq_prod_idx = 0;
1274 rx_ring->sbq_free_cnt -= 16;
1277 if (start_idx != clean_idx) {
1278 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1279 "sbq: updating prod idx = %d.\n",
1280 rx_ring->sbq_prod_idx);
1281 ql_write_db_reg(rx_ring->sbq_prod_idx,
1282 rx_ring->sbq_prod_idx_db_reg);
1286 static void ql_update_buffer_queues(struct ql_adapter *qdev,
1287 struct rx_ring *rx_ring)
1289 ql_update_sbq(qdev, rx_ring);
1290 ql_update_lbq(qdev, rx_ring);
1293 /* Unmaps tx buffers. Can be called from send() if a pci mapping
1294 * fails at some stage, or from the interrupt when a tx completes.
1296 static void ql_unmap_send(struct ql_adapter *qdev,
1297 struct tx_ring_desc *tx_ring_desc, int mapped)
1300 for (i = 0; i < mapped; i++) {
1301 if (i == 0 || (i == 7 && mapped > 7)) {
1303 * Unmap the skb->data area, or the
1304 * external sglist (AKA the Outbound
1305 * Address List (OAL)).
1306 * If its the zeroeth element, then it's
1307 * the skb->data area. If it's the 7th
1308 * element and there is more than 6 frags,
1312 netif_printk(qdev, tx_done, KERN_DEBUG,
1314 "unmapping OAL area.\n");
1316 pci_unmap_single(qdev->pdev,
1317 dma_unmap_addr(&tx_ring_desc->map[i],
1319 dma_unmap_len(&tx_ring_desc->map[i],
1323 netif_printk(qdev, tx_done, KERN_DEBUG, qdev->ndev,
1324 "unmapping frag %d.\n", i);
1325 pci_unmap_page(qdev->pdev,
1326 dma_unmap_addr(&tx_ring_desc->map[i],
1328 dma_unmap_len(&tx_ring_desc->map[i],
1329 maplen), PCI_DMA_TODEVICE);
1335 /* Map the buffers for this transmit. This will return
1336 * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
1338 static int ql_map_send(struct ql_adapter *qdev,
1339 struct ob_mac_iocb_req *mac_iocb_ptr,
1340 struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc)
1342 int len = skb_headlen(skb);
1344 int frag_idx, err, map_idx = 0;
1345 struct tx_buf_desc *tbd = mac_iocb_ptr->tbd;
1346 int frag_cnt = skb_shinfo(skb)->nr_frags;
1349 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
1350 "frag_cnt = %d.\n", frag_cnt);
1353 * Map the skb buffer first.
1355 map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
1357 err = pci_dma_mapping_error(qdev->pdev, map);
1359 netif_err(qdev, tx_queued, qdev->ndev,
1360 "PCI mapping failed with error: %d\n", err);
1362 return NETDEV_TX_BUSY;
1365 tbd->len = cpu_to_le32(len);
1366 tbd->addr = cpu_to_le64(map);
1367 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1368 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
1372 * This loop fills the remainder of the 8 address descriptors
1373 * in the IOCB. If there are more than 7 fragments, then the
1374 * eighth address desc will point to an external list (OAL).
1375 * When this happens, the remainder of the frags will be stored
1378 for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) {
1379 skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx];
1381 if (frag_idx == 6 && frag_cnt > 7) {
1382 /* Let's tack on an sglist.
1383 * Our control block will now
1385 * iocb->seg[0] = skb->data
1386 * iocb->seg[1] = frag[0]
1387 * iocb->seg[2] = frag[1]
1388 * iocb->seg[3] = frag[2]
1389 * iocb->seg[4] = frag[3]
1390 * iocb->seg[5] = frag[4]
1391 * iocb->seg[6] = frag[5]
1392 * iocb->seg[7] = ptr to OAL (external sglist)
1393 * oal->seg[0] = frag[6]
1394 * oal->seg[1] = frag[7]
1395 * oal->seg[2] = frag[8]
1396 * oal->seg[3] = frag[9]
1397 * oal->seg[4] = frag[10]
1400 /* Tack on the OAL in the eighth segment of IOCB. */
1401 map = pci_map_single(qdev->pdev, &tx_ring_desc->oal,
1404 err = pci_dma_mapping_error(qdev->pdev, map);
1406 netif_err(qdev, tx_queued, qdev->ndev,
1407 "PCI mapping outbound address list with error: %d\n",
1412 tbd->addr = cpu_to_le64(map);
1414 * The length is the number of fragments
1415 * that remain to be mapped times the length
1416 * of our sglist (OAL).
1419 cpu_to_le32((sizeof(struct tx_buf_desc) *
1420 (frag_cnt - frag_idx)) | TX_DESC_C);
1421 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
1423 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1424 sizeof(struct oal));
1425 tbd = (struct tx_buf_desc *)&tx_ring_desc->oal;
1430 pci_map_page(qdev->pdev, frag->page,
1431 frag->page_offset, frag->size,
1434 err = pci_dma_mapping_error(qdev->pdev, map);
1436 netif_err(qdev, tx_queued, qdev->ndev,
1437 "PCI mapping frags failed with error: %d.\n",
1442 tbd->addr = cpu_to_le64(map);
1443 tbd->len = cpu_to_le32(frag->size);
1444 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1445 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1449 /* Save the number of segments we've mapped. */
1450 tx_ring_desc->map_cnt = map_idx;
1451 /* Terminate the last segment. */
1452 tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E);
1453 return NETDEV_TX_OK;
1457 * If the first frag mapping failed, then i will be zero.
1458 * This causes the unmap of the skb->data area. Otherwise
1459 * we pass in the number of frags that mapped successfully
1460 * so they can be umapped.
1462 ql_unmap_send(qdev, tx_ring_desc, map_idx);
1463 return NETDEV_TX_BUSY;
1466 /* Process an inbound completion from an rx ring. */
1467 static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
1468 struct rx_ring *rx_ring,
1469 struct ib_mac_iocb_rsp *ib_mac_rsp,
1473 struct sk_buff *skb;
1474 struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1475 struct skb_frag_struct *rx_frag;
1477 struct napi_struct *napi = &rx_ring->napi;
1479 napi->dev = qdev->ndev;
1481 skb = napi_get_frags(napi);
1483 netif_err(qdev, drv, qdev->ndev,
1484 "Couldn't get an skb, exiting.\n");
1485 rx_ring->rx_dropped++;
1486 put_page(lbq_desc->p.pg_chunk.page);
1489 prefetch(lbq_desc->p.pg_chunk.va);
1490 rx_frag = skb_shinfo(skb)->frags;
1491 nr_frags = skb_shinfo(skb)->nr_frags;
1492 rx_frag += nr_frags;
1493 rx_frag->page = lbq_desc->p.pg_chunk.page;
1494 rx_frag->page_offset = lbq_desc->p.pg_chunk.offset;
1495 rx_frag->size = length;
1498 skb->data_len += length;
1499 skb->truesize += length;
1500 skb_shinfo(skb)->nr_frags++;
1502 rx_ring->rx_packets++;
1503 rx_ring->rx_bytes += length;
1504 skb->ip_summed = CHECKSUM_UNNECESSARY;
1505 skb_record_rx_queue(skb, rx_ring->cq_id);
1506 if (qdev->vlgrp && (vlan_id != 0xffff))
1507 vlan_gro_frags(&rx_ring->napi, qdev->vlgrp, vlan_id);
1509 napi_gro_frags(napi);
1512 /* Process an inbound completion from an rx ring. */
1513 static void ql_process_mac_rx_page(struct ql_adapter *qdev,
1514 struct rx_ring *rx_ring,
1515 struct ib_mac_iocb_rsp *ib_mac_rsp,
1519 struct net_device *ndev = qdev->ndev;
1520 struct sk_buff *skb = NULL;
1522 struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1523 struct napi_struct *napi = &rx_ring->napi;
1525 skb = netdev_alloc_skb(ndev, length);
1527 netif_err(qdev, drv, qdev->ndev,
1528 "Couldn't get an skb, need to unwind!.\n");
1529 rx_ring->rx_dropped++;
1530 put_page(lbq_desc->p.pg_chunk.page);
1534 addr = lbq_desc->p.pg_chunk.va;
1538 /* Frame error, so drop the packet. */
1539 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1540 netif_info(qdev, drv, qdev->ndev,
1541 "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
1542 rx_ring->rx_errors++;
1546 /* The max framesize filter on this chip is set higher than
1547 * MTU since FCoE uses 2k frames.
1549 if (skb->len > ndev->mtu + ETH_HLEN) {
1550 netif_err(qdev, drv, qdev->ndev,
1551 "Segment too small, dropping.\n");
1552 rx_ring->rx_dropped++;
1555 memcpy(skb_put(skb, ETH_HLEN), addr, ETH_HLEN);
1556 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1557 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1559 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1560 lbq_desc->p.pg_chunk.offset+ETH_HLEN,
1562 skb->len += length-ETH_HLEN;
1563 skb->data_len += length-ETH_HLEN;
1564 skb->truesize += length-ETH_HLEN;
1566 rx_ring->rx_packets++;
1567 rx_ring->rx_bytes += skb->len;
1568 skb->protocol = eth_type_trans(skb, ndev);
1569 skb_checksum_none_assert(skb);
1571 if (qdev->rx_csum &&
1572 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1574 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1575 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1576 "TCP checksum done!\n");
1577 skb->ip_summed = CHECKSUM_UNNECESSARY;
1578 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1579 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1580 /* Unfragmented ipv4 UDP frame. */
1581 struct iphdr *iph = (struct iphdr *) skb->data;
1582 if (!(iph->frag_off &
1583 cpu_to_be16(IP_MF|IP_OFFSET))) {
1584 skb->ip_summed = CHECKSUM_UNNECESSARY;
1585 netif_printk(qdev, rx_status, KERN_DEBUG,
1587 "TCP checksum done!\n");
1592 skb_record_rx_queue(skb, rx_ring->cq_id);
1593 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
1594 if (qdev->vlgrp && (vlan_id != 0xffff))
1595 vlan_gro_receive(napi, qdev->vlgrp, vlan_id, skb);
1597 napi_gro_receive(napi, skb);
1599 if (qdev->vlgrp && (vlan_id != 0xffff))
1600 vlan_hwaccel_receive_skb(skb, qdev->vlgrp, vlan_id);
1602 netif_receive_skb(skb);
1606 dev_kfree_skb_any(skb);
1607 put_page(lbq_desc->p.pg_chunk.page);
1610 /* Process an inbound completion from an rx ring. */
1611 static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
1612 struct rx_ring *rx_ring,
1613 struct ib_mac_iocb_rsp *ib_mac_rsp,
1617 struct net_device *ndev = qdev->ndev;
1618 struct sk_buff *skb = NULL;
1619 struct sk_buff *new_skb = NULL;
1620 struct bq_desc *sbq_desc = ql_get_curr_sbuf(rx_ring);
1622 skb = sbq_desc->p.skb;
1623 /* Allocate new_skb and copy */
1624 new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN);
1625 if (new_skb == NULL) {
1626 netif_err(qdev, probe, qdev->ndev,
1627 "No skb available, drop the packet.\n");
1628 rx_ring->rx_dropped++;
1631 skb_reserve(new_skb, NET_IP_ALIGN);
1632 memcpy(skb_put(new_skb, length), skb->data, length);
1635 /* Frame error, so drop the packet. */
1636 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1637 netif_info(qdev, drv, qdev->ndev,
1638 "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
1639 dev_kfree_skb_any(skb);
1640 rx_ring->rx_errors++;
1644 /* loopback self test for ethtool */
1645 if (test_bit(QL_SELFTEST, &qdev->flags)) {
1646 ql_check_lb_frame(qdev, skb);
1647 dev_kfree_skb_any(skb);
1651 /* The max framesize filter on this chip is set higher than
1652 * MTU since FCoE uses 2k frames.
1654 if (skb->len > ndev->mtu + ETH_HLEN) {
1655 dev_kfree_skb_any(skb);
1656 rx_ring->rx_dropped++;
1660 prefetch(skb->data);
1662 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1663 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1665 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1666 IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1667 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1668 IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1669 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1670 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1672 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P)
1673 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1674 "Promiscuous Packet.\n");
1676 rx_ring->rx_packets++;
1677 rx_ring->rx_bytes += skb->len;
1678 skb->protocol = eth_type_trans(skb, ndev);
1679 skb_checksum_none_assert(skb);
1681 /* If rx checksum is on, and there are no
1682 * csum or frame errors.
1684 if (qdev->rx_csum &&
1685 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1687 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1688 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1689 "TCP checksum done!\n");
1690 skb->ip_summed = CHECKSUM_UNNECESSARY;
1691 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1692 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1693 /* Unfragmented ipv4 UDP frame. */
1694 struct iphdr *iph = (struct iphdr *) skb->data;
1695 if (!(iph->frag_off &
1696 ntohs(IP_MF|IP_OFFSET))) {
1697 skb->ip_summed = CHECKSUM_UNNECESSARY;
1698 netif_printk(qdev, rx_status, KERN_DEBUG,
1700 "TCP checksum done!\n");
1705 skb_record_rx_queue(skb, rx_ring->cq_id);
1706 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
1707 if (qdev->vlgrp && (vlan_id != 0xffff))
1708 vlan_gro_receive(&rx_ring->napi, qdev->vlgrp,
1711 napi_gro_receive(&rx_ring->napi, skb);
1713 if (qdev->vlgrp && (vlan_id != 0xffff))
1714 vlan_hwaccel_receive_skb(skb, qdev->vlgrp, vlan_id);
1716 netif_receive_skb(skb);
1720 static void ql_realign_skb(struct sk_buff *skb, int len)
1722 void *temp_addr = skb->data;
1724 /* Undo the skb_reserve(skb,32) we did before
1725 * giving to hardware, and realign data on
1726 * a 2-byte boundary.
1728 skb->data -= QLGE_SB_PAD - NET_IP_ALIGN;
1729 skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN;
1730 skb_copy_to_linear_data(skb, temp_addr,
1735 * This function builds an skb for the given inbound
1736 * completion. It will be rewritten for readability in the near
1737 * future, but for not it works well.
1739 static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1740 struct rx_ring *rx_ring,
1741 struct ib_mac_iocb_rsp *ib_mac_rsp)
1743 struct bq_desc *lbq_desc;
1744 struct bq_desc *sbq_desc;
1745 struct sk_buff *skb = NULL;
1746 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
1747 u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
1750 * Handle the header buffer if present.
1752 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV &&
1753 ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1754 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1755 "Header of %d bytes in small buffer.\n", hdr_len);
1757 * Headers fit nicely into a small buffer.
1759 sbq_desc = ql_get_curr_sbuf(rx_ring);
1760 pci_unmap_single(qdev->pdev,
1761 dma_unmap_addr(sbq_desc, mapaddr),
1762 dma_unmap_len(sbq_desc, maplen),
1763 PCI_DMA_FROMDEVICE);
1764 skb = sbq_desc->p.skb;
1765 ql_realign_skb(skb, hdr_len);
1766 skb_put(skb, hdr_len);
1767 sbq_desc->p.skb = NULL;
1771 * Handle the data buffer(s).
1773 if (unlikely(!length)) { /* Is there data too? */
1774 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1775 "No Data buffer in this packet.\n");
1779 if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
1780 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1781 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1782 "Headers in small, data of %d bytes in small, combine them.\n",
1785 * Data is less than small buffer size so it's
1786 * stuffed in a small buffer.
1787 * For this case we append the data
1788 * from the "data" small buffer to the "header" small
1791 sbq_desc = ql_get_curr_sbuf(rx_ring);
1792 pci_dma_sync_single_for_cpu(qdev->pdev,
1794 (sbq_desc, mapaddr),
1797 PCI_DMA_FROMDEVICE);
1798 memcpy(skb_put(skb, length),
1799 sbq_desc->p.skb->data, length);
1800 pci_dma_sync_single_for_device(qdev->pdev,
1807 PCI_DMA_FROMDEVICE);
1809 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1810 "%d bytes in a single small buffer.\n",
1812 sbq_desc = ql_get_curr_sbuf(rx_ring);
1813 skb = sbq_desc->p.skb;
1814 ql_realign_skb(skb, length);
1815 skb_put(skb, length);
1816 pci_unmap_single(qdev->pdev,
1817 dma_unmap_addr(sbq_desc,
1819 dma_unmap_len(sbq_desc,
1821 PCI_DMA_FROMDEVICE);
1822 sbq_desc->p.skb = NULL;
1824 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
1825 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1826 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1827 "Header in small, %d bytes in large. Chain large to small!\n",
1830 * The data is in a single large buffer. We
1831 * chain it to the header buffer's skb and let
1834 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1835 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1836 "Chaining page at offset = %d, for %d bytes to skb.\n",
1837 lbq_desc->p.pg_chunk.offset, length);
1838 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1839 lbq_desc->p.pg_chunk.offset,
1842 skb->data_len += length;
1843 skb->truesize += length;
1846 * The headers and data are in a single large buffer. We
1847 * copy it to a new skb and let it go. This can happen with
1848 * jumbo mtu on a non-TCP/UDP frame.
1850 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1851 skb = netdev_alloc_skb(qdev->ndev, length);
1853 netif_printk(qdev, probe, KERN_DEBUG, qdev->ndev,
1854 "No skb available, drop the packet.\n");
1857 pci_unmap_page(qdev->pdev,
1858 dma_unmap_addr(lbq_desc,
1860 dma_unmap_len(lbq_desc, maplen),
1861 PCI_DMA_FROMDEVICE);
1862 skb_reserve(skb, NET_IP_ALIGN);
1863 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1864 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1866 skb_fill_page_desc(skb, 0,
1867 lbq_desc->p.pg_chunk.page,
1868 lbq_desc->p.pg_chunk.offset,
1871 skb->data_len += length;
1872 skb->truesize += length;
1874 __pskb_pull_tail(skb,
1875 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1876 VLAN_ETH_HLEN : ETH_HLEN);
1880 * The data is in a chain of large buffers
1881 * pointed to by a small buffer. We loop
1882 * thru and chain them to the our small header
1884 * frags: There are 18 max frags and our small
1885 * buffer will hold 32 of them. The thing is,
1886 * we'll use 3 max for our 9000 byte jumbo
1887 * frames. If the MTU goes up we could
1888 * eventually be in trouble.
1891 sbq_desc = ql_get_curr_sbuf(rx_ring);
1892 pci_unmap_single(qdev->pdev,
1893 dma_unmap_addr(sbq_desc, mapaddr),
1894 dma_unmap_len(sbq_desc, maplen),
1895 PCI_DMA_FROMDEVICE);
1896 if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
1898 * This is an non TCP/UDP IP frame, so
1899 * the headers aren't split into a small
1900 * buffer. We have to use the small buffer
1901 * that contains our sg list as our skb to
1902 * send upstairs. Copy the sg list here to
1903 * a local buffer and use it to find the
1906 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1907 "%d bytes of headers & data in chain of large.\n",
1909 skb = sbq_desc->p.skb;
1910 sbq_desc->p.skb = NULL;
1911 skb_reserve(skb, NET_IP_ALIGN);
1913 while (length > 0) {
1914 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1915 size = (length < rx_ring->lbq_buf_size) ? length :
1916 rx_ring->lbq_buf_size;
1918 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1919 "Adding page %d to skb for %d bytes.\n",
1921 skb_fill_page_desc(skb, i,
1922 lbq_desc->p.pg_chunk.page,
1923 lbq_desc->p.pg_chunk.offset,
1926 skb->data_len += size;
1927 skb->truesize += size;
1931 __pskb_pull_tail(skb, (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1932 VLAN_ETH_HLEN : ETH_HLEN);
1937 /* Process an inbound completion from an rx ring. */
1938 static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
1939 struct rx_ring *rx_ring,
1940 struct ib_mac_iocb_rsp *ib_mac_rsp,
1943 struct net_device *ndev = qdev->ndev;
1944 struct sk_buff *skb = NULL;
1946 QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
1948 skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
1949 if (unlikely(!skb)) {
1950 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1951 "No skb available, drop packet.\n");
1952 rx_ring->rx_dropped++;
1956 /* Frame error, so drop the packet. */
1957 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1958 netif_info(qdev, drv, qdev->ndev,
1959 "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
1960 dev_kfree_skb_any(skb);
1961 rx_ring->rx_errors++;
1965 /* The max framesize filter on this chip is set higher than
1966 * MTU since FCoE uses 2k frames.
1968 if (skb->len > ndev->mtu + ETH_HLEN) {
1969 dev_kfree_skb_any(skb);
1970 rx_ring->rx_dropped++;
1974 /* loopback self test for ethtool */
1975 if (test_bit(QL_SELFTEST, &qdev->flags)) {
1976 ql_check_lb_frame(qdev, skb);
1977 dev_kfree_skb_any(skb);
1981 prefetch(skb->data);
1983 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1984 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "%s Multicast.\n",
1985 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1986 IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1987 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1988 IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1989 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1990 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1991 rx_ring->rx_multicast++;
1993 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
1994 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1995 "Promiscuous Packet.\n");
1998 skb->protocol = eth_type_trans(skb, ndev);
1999 skb_checksum_none_assert(skb);
2001 /* If rx checksum is on, and there are no
2002 * csum or frame errors.
2004 if (qdev->rx_csum &&
2005 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
2007 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
2008 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2009 "TCP checksum done!\n");
2010 skb->ip_summed = CHECKSUM_UNNECESSARY;
2011 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
2012 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
2013 /* Unfragmented ipv4 UDP frame. */
2014 struct iphdr *iph = (struct iphdr *) skb->data;
2015 if (!(iph->frag_off &
2016 ntohs(IP_MF|IP_OFFSET))) {
2017 skb->ip_summed = CHECKSUM_UNNECESSARY;
2018 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2019 "TCP checksum done!\n");
2024 rx_ring->rx_packets++;
2025 rx_ring->rx_bytes += skb->len;
2026 skb_record_rx_queue(skb, rx_ring->cq_id);
2027 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
2029 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) &&
2031 vlan_gro_receive(&rx_ring->napi, qdev->vlgrp,
2034 napi_gro_receive(&rx_ring->napi, skb);
2037 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) &&
2039 vlan_hwaccel_receive_skb(skb, qdev->vlgrp, vlan_id);
2041 netif_receive_skb(skb);
2045 /* Process an inbound completion from an rx ring. */
2046 static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
2047 struct rx_ring *rx_ring,
2048 struct ib_mac_iocb_rsp *ib_mac_rsp)
2050 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
2051 u16 vlan_id = (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
2052 ((le16_to_cpu(ib_mac_rsp->vlan_id) &
2053 IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff;
2055 QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
2057 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
2058 /* The data and headers are split into
2061 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2063 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
2064 /* The data fit in a single small buffer.
2065 * Allocate a new skb, copy the data and
2066 * return the buffer to the free pool.
2068 ql_process_mac_rx_skb(qdev, rx_ring, ib_mac_rsp,
2070 } else if ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) &&
2071 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK) &&
2072 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T)) {
2073 /* TCP packet in a page chunk that's been checksummed.
2074 * Tack it on to our GRO skb and let it go.
2076 ql_process_mac_rx_gro_page(qdev, rx_ring, ib_mac_rsp,
2078 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
2079 /* Non-TCP packet in a page chunk. Allocate an
2080 * skb, tack it on frags, and send it up.
2082 ql_process_mac_rx_page(qdev, rx_ring, ib_mac_rsp,
2085 /* Non-TCP/UDP large frames that span multiple buffers
2086 * can be processed corrrectly by the split frame logic.
2088 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2092 return (unsigned long)length;
2095 /* Process an outbound completion from an rx ring. */
2096 static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
2097 struct ob_mac_iocb_rsp *mac_rsp)
2099 struct tx_ring *tx_ring;
2100 struct tx_ring_desc *tx_ring_desc;
2102 QL_DUMP_OB_MAC_RSP(mac_rsp);
2103 tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
2104 tx_ring_desc = &tx_ring->q[mac_rsp->tid];
2105 ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
2106 tx_ring->tx_bytes += (tx_ring_desc->skb)->len;
2107 tx_ring->tx_packets++;
2108 dev_kfree_skb(tx_ring_desc->skb);
2109 tx_ring_desc->skb = NULL;
2111 if (unlikely(mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E |
2114 OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) {
2115 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
2116 netif_warn(qdev, tx_done, qdev->ndev,
2117 "Total descriptor length did not match transfer length.\n");
2119 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
2120 netif_warn(qdev, tx_done, qdev->ndev,
2121 "Frame too short to be valid, not sent.\n");
2123 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
2124 netif_warn(qdev, tx_done, qdev->ndev,
2125 "Frame too long, but sent anyway.\n");
2127 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) {
2128 netif_warn(qdev, tx_done, qdev->ndev,
2129 "PCI backplane error. Frame not sent.\n");
2132 atomic_inc(&tx_ring->tx_count);
2135 /* Fire up a handler to reset the MPI processor. */
2136 void ql_queue_fw_error(struct ql_adapter *qdev)
2139 queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
2142 void ql_queue_asic_error(struct ql_adapter *qdev)
2145 ql_disable_interrupts(qdev);
2146 /* Clear adapter up bit to signal the recovery
2147 * process that it shouldn't kill the reset worker
2150 clear_bit(QL_ADAPTER_UP, &qdev->flags);
2151 queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
2154 static void ql_process_chip_ae_intr(struct ql_adapter *qdev,
2155 struct ib_ae_iocb_rsp *ib_ae_rsp)
2157 switch (ib_ae_rsp->event) {
2158 case MGMT_ERR_EVENT:
2159 netif_err(qdev, rx_err, qdev->ndev,
2160 "Management Processor Fatal Error.\n");
2161 ql_queue_fw_error(qdev);
2164 case CAM_LOOKUP_ERR_EVENT:
2165 netif_err(qdev, link, qdev->ndev,
2166 "Multiple CAM hits lookup occurred.\n");
2167 netif_err(qdev, drv, qdev->ndev,
2168 "This event shouldn't occur.\n");
2169 ql_queue_asic_error(qdev);
2172 case SOFT_ECC_ERROR_EVENT:
2173 netif_err(qdev, rx_err, qdev->ndev,
2174 "Soft ECC error detected.\n");
2175 ql_queue_asic_error(qdev);
2178 case PCI_ERR_ANON_BUF_RD:
2179 netif_err(qdev, rx_err, qdev->ndev,
2180 "PCI error occurred when reading anonymous buffers from rx_ring %d.\n",
2182 ql_queue_asic_error(qdev);
2186 netif_err(qdev, drv, qdev->ndev, "Unexpected event %d.\n",
2188 ql_queue_asic_error(qdev);
2193 static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
2195 struct ql_adapter *qdev = rx_ring->qdev;
2196 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2197 struct ob_mac_iocb_rsp *net_rsp = NULL;
2200 struct tx_ring *tx_ring;
2201 /* While there are entries in the completion queue. */
2202 while (prod != rx_ring->cnsmr_idx) {
2204 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2205 "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2206 rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
2208 net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
2210 switch (net_rsp->opcode) {
2212 case OPCODE_OB_MAC_TSO_IOCB:
2213 case OPCODE_OB_MAC_IOCB:
2214 ql_process_mac_tx_intr(qdev, net_rsp);
2217 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2218 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2222 ql_update_cq(rx_ring);
2223 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2227 ql_write_cq_idx(rx_ring);
2228 tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
2229 if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id)) {
2230 if (atomic_read(&tx_ring->queue_stopped) &&
2231 (atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
2233 * The queue got stopped because the tx_ring was full.
2234 * Wake it up, because it's now at least 25% empty.
2236 netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
2242 static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
2244 struct ql_adapter *qdev = rx_ring->qdev;
2245 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2246 struct ql_net_rsp_iocb *net_rsp;
2249 /* While there are entries in the completion queue. */
2250 while (prod != rx_ring->cnsmr_idx) {
2252 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2253 "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2254 rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
2256 net_rsp = rx_ring->curr_entry;
2258 switch (net_rsp->opcode) {
2259 case OPCODE_IB_MAC_IOCB:
2260 ql_process_mac_rx_intr(qdev, rx_ring,
2261 (struct ib_mac_iocb_rsp *)
2265 case OPCODE_IB_AE_IOCB:
2266 ql_process_chip_ae_intr(qdev, (struct ib_ae_iocb_rsp *)
2270 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2271 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2276 ql_update_cq(rx_ring);
2277 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2278 if (count == budget)
2281 ql_update_buffer_queues(qdev, rx_ring);
2282 ql_write_cq_idx(rx_ring);
2286 static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
2288 struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi);
2289 struct ql_adapter *qdev = rx_ring->qdev;
2290 struct rx_ring *trx_ring;
2291 int i, work_done = 0;
2292 struct intr_context *ctx = &qdev->intr_context[rx_ring->cq_id];
2294 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2295 "Enter, NAPI POLL cq_id = %d.\n", rx_ring->cq_id);
2297 /* Service the TX rings first. They start
2298 * right after the RSS rings. */
2299 for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) {
2300 trx_ring = &qdev->rx_ring[i];
2301 /* If this TX completion ring belongs to this vector and
2302 * it's not empty then service it.
2304 if ((ctx->irq_mask & (1 << trx_ring->cq_id)) &&
2305 (ql_read_sh_reg(trx_ring->prod_idx_sh_reg) !=
2306 trx_ring->cnsmr_idx)) {
2307 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2308 "%s: Servicing TX completion ring %d.\n",
2309 __func__, trx_ring->cq_id);
2310 ql_clean_outbound_rx_ring(trx_ring);
2315 * Now service the RSS ring if it's active.
2317 if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
2318 rx_ring->cnsmr_idx) {
2319 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2320 "%s: Servicing RX completion ring %d.\n",
2321 __func__, rx_ring->cq_id);
2322 work_done = ql_clean_inbound_rx_ring(rx_ring, budget);
2325 if (work_done < budget) {
2326 napi_complete(napi);
2327 ql_enable_completion_interrupt(qdev, rx_ring->irq);
2332 static void qlge_vlan_rx_register(struct net_device *ndev, struct vlan_group *grp)
2334 struct ql_adapter *qdev = netdev_priv(ndev);
2338 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
2339 "Turning on VLAN in NIC_RCV_CFG.\n");
2340 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
2341 NIC_RCV_CFG_VLAN_MATCH_AND_NON);
2343 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
2344 "Turning off VLAN in NIC_RCV_CFG.\n");
2345 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
2349 static void qlge_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
2351 struct ql_adapter *qdev = netdev_priv(ndev);
2352 u32 enable_bit = MAC_ADDR_E;
2355 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2358 if (ql_set_mac_addr_reg
2359 (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
2360 netif_err(qdev, ifup, qdev->ndev,
2361 "Failed to init vlan address.\n");
2363 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2366 static void qlge_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
2368 struct ql_adapter *qdev = netdev_priv(ndev);
2372 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2376 if (ql_set_mac_addr_reg
2377 (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
2378 netif_err(qdev, ifup, qdev->ndev,
2379 "Failed to clear vlan address.\n");
2381 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2385 /* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
2386 static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
2388 struct rx_ring *rx_ring = dev_id;
2389 napi_schedule(&rx_ring->napi);
2393 /* This handles a fatal error, MPI activity, and the default
2394 * rx_ring in an MSI-X multiple vector environment.
2395 * In MSI/Legacy environment it also process the rest of
2398 static irqreturn_t qlge_isr(int irq, void *dev_id)
2400 struct rx_ring *rx_ring = dev_id;
2401 struct ql_adapter *qdev = rx_ring->qdev;
2402 struct intr_context *intr_context = &qdev->intr_context[0];
2406 spin_lock(&qdev->hw_lock);
2407 if (atomic_read(&qdev->intr_context[0].irq_cnt)) {
2408 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2409 "Shared Interrupt, Not ours!\n");
2410 spin_unlock(&qdev->hw_lock);
2413 spin_unlock(&qdev->hw_lock);
2415 var = ql_disable_completion_interrupt(qdev, intr_context->intr);
2418 * Check for fatal error.
2421 ql_queue_asic_error(qdev);
2422 netif_err(qdev, intr, qdev->ndev,
2423 "Got fatal error, STS = %x.\n", var);
2424 var = ql_read32(qdev, ERR_STS);
2425 netif_err(qdev, intr, qdev->ndev,
2426 "Resetting chip. Error Status Register = 0x%x\n", var);
2431 * Check MPI processor activity.
2433 if ((var & STS_PI) &&
2434 (ql_read32(qdev, INTR_MASK) & INTR_MASK_PI)) {
2436 * We've got an async event or mailbox completion.
2437 * Handle it and clear the source of the interrupt.
2439 netif_err(qdev, intr, qdev->ndev,
2440 "Got MPI processor interrupt.\n");
2441 ql_disable_completion_interrupt(qdev, intr_context->intr);
2442 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
2443 queue_delayed_work_on(smp_processor_id(),
2444 qdev->workqueue, &qdev->mpi_work, 0);
2449 * Get the bit-mask that shows the active queues for this
2450 * pass. Compare it to the queues that this irq services
2451 * and call napi if there's a match.
2453 var = ql_read32(qdev, ISR1);
2454 if (var & intr_context->irq_mask) {
2455 netif_info(qdev, intr, qdev->ndev,
2456 "Waking handler for rx_ring[0].\n");
2457 ql_disable_completion_interrupt(qdev, intr_context->intr);
2458 napi_schedule(&rx_ring->napi);
2461 ql_enable_completion_interrupt(qdev, intr_context->intr);
2462 return work_done ? IRQ_HANDLED : IRQ_NONE;
2465 static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2468 if (skb_is_gso(skb)) {
2470 if (skb_header_cloned(skb)) {
2471 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2476 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2477 mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
2478 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2479 mac_iocb_ptr->total_hdrs_len =
2480 cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb));
2481 mac_iocb_ptr->net_trans_offset =
2482 cpu_to_le16(skb_network_offset(skb) |
2483 skb_transport_offset(skb)
2484 << OB_MAC_TRANSPORT_HDR_SHIFT);
2485 mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2486 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO;
2487 if (likely(skb->protocol == htons(ETH_P_IP))) {
2488 struct iphdr *iph = ip_hdr(skb);
2490 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2491 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2495 } else if (skb->protocol == htons(ETH_P_IPV6)) {
2496 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6;
2497 tcp_hdr(skb)->check =
2498 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2499 &ipv6_hdr(skb)->daddr,
2507 static void ql_hw_csum_setup(struct sk_buff *skb,
2508 struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2511 struct iphdr *iph = ip_hdr(skb);
2513 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2514 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2515 mac_iocb_ptr->net_trans_offset =
2516 cpu_to_le16(skb_network_offset(skb) |
2517 skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT);
2519 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2520 len = (ntohs(iph->tot_len) - (iph->ihl << 2));
2521 if (likely(iph->protocol == IPPROTO_TCP)) {
2522 check = &(tcp_hdr(skb)->check);
2523 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC;
2524 mac_iocb_ptr->total_hdrs_len =
2525 cpu_to_le16(skb_transport_offset(skb) +
2526 (tcp_hdr(skb)->doff << 2));
2528 check = &(udp_hdr(skb)->check);
2529 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC;
2530 mac_iocb_ptr->total_hdrs_len =
2531 cpu_to_le16(skb_transport_offset(skb) +
2532 sizeof(struct udphdr));
2534 *check = ~csum_tcpudp_magic(iph->saddr,
2535 iph->daddr, len, iph->protocol, 0);
2538 static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
2540 struct tx_ring_desc *tx_ring_desc;
2541 struct ob_mac_iocb_req *mac_iocb_ptr;
2542 struct ql_adapter *qdev = netdev_priv(ndev);
2544 struct tx_ring *tx_ring;
2545 u32 tx_ring_idx = (u32) skb->queue_mapping;
2547 tx_ring = &qdev->tx_ring[tx_ring_idx];
2549 if (skb_padto(skb, ETH_ZLEN))
2550 return NETDEV_TX_OK;
2552 if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
2553 netif_info(qdev, tx_queued, qdev->ndev,
2554 "%s: shutting down tx queue %d du to lack of resources.\n",
2555 __func__, tx_ring_idx);
2556 netif_stop_subqueue(ndev, tx_ring->wq_id);
2557 atomic_inc(&tx_ring->queue_stopped);
2558 tx_ring->tx_errors++;
2559 return NETDEV_TX_BUSY;
2561 tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
2562 mac_iocb_ptr = tx_ring_desc->queue_entry;
2563 memset((void *)mac_iocb_ptr, 0, sizeof(*mac_iocb_ptr));
2565 mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
2566 mac_iocb_ptr->tid = tx_ring_desc->index;
2567 /* We use the upper 32-bits to store the tx queue for this IO.
2568 * When we get the completion we can use it to establish the context.
2570 mac_iocb_ptr->txq_idx = tx_ring_idx;
2571 tx_ring_desc->skb = skb;
2573 mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len);
2575 if (vlan_tx_tag_present(skb)) {
2576 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2577 "Adding a vlan tag %d.\n", vlan_tx_tag_get(skb));
2578 mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
2579 mac_iocb_ptr->vlan_tci = cpu_to_le16(vlan_tx_tag_get(skb));
2581 tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2583 dev_kfree_skb_any(skb);
2584 return NETDEV_TX_OK;
2585 } else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) {
2586 ql_hw_csum_setup(skb,
2587 (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2589 if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) !=
2591 netif_err(qdev, tx_queued, qdev->ndev,
2592 "Could not map the segments.\n");
2593 tx_ring->tx_errors++;
2594 return NETDEV_TX_BUSY;
2596 QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr);
2597 tx_ring->prod_idx++;
2598 if (tx_ring->prod_idx == tx_ring->wq_len)
2599 tx_ring->prod_idx = 0;
2602 ql_write_db_reg(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
2603 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2604 "tx queued, slot %d, len %d\n",
2605 tx_ring->prod_idx, skb->len);
2607 atomic_dec(&tx_ring->tx_count);
2608 return NETDEV_TX_OK;
2612 static void ql_free_shadow_space(struct ql_adapter *qdev)
2614 if (qdev->rx_ring_shadow_reg_area) {
2615 pci_free_consistent(qdev->pdev,
2617 qdev->rx_ring_shadow_reg_area,
2618 qdev->rx_ring_shadow_reg_dma);
2619 qdev->rx_ring_shadow_reg_area = NULL;
2621 if (qdev->tx_ring_shadow_reg_area) {
2622 pci_free_consistent(qdev->pdev,
2624 qdev->tx_ring_shadow_reg_area,
2625 qdev->tx_ring_shadow_reg_dma);
2626 qdev->tx_ring_shadow_reg_area = NULL;
2630 static int ql_alloc_shadow_space(struct ql_adapter *qdev)
2632 qdev->rx_ring_shadow_reg_area =
2633 pci_alloc_consistent(qdev->pdev,
2634 PAGE_SIZE, &qdev->rx_ring_shadow_reg_dma);
2635 if (qdev->rx_ring_shadow_reg_area == NULL) {
2636 netif_err(qdev, ifup, qdev->ndev,
2637 "Allocation of RX shadow space failed.\n");
2640 memset(qdev->rx_ring_shadow_reg_area, 0, PAGE_SIZE);
2641 qdev->tx_ring_shadow_reg_area =
2642 pci_alloc_consistent(qdev->pdev, PAGE_SIZE,
2643 &qdev->tx_ring_shadow_reg_dma);
2644 if (qdev->tx_ring_shadow_reg_area == NULL) {
2645 netif_err(qdev, ifup, qdev->ndev,
2646 "Allocation of TX shadow space failed.\n");
2647 goto err_wqp_sh_area;
2649 memset(qdev->tx_ring_shadow_reg_area, 0, PAGE_SIZE);
2653 pci_free_consistent(qdev->pdev,
2655 qdev->rx_ring_shadow_reg_area,
2656 qdev->rx_ring_shadow_reg_dma);
2660 static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
2662 struct tx_ring_desc *tx_ring_desc;
2664 struct ob_mac_iocb_req *mac_iocb_ptr;
2666 mac_iocb_ptr = tx_ring->wq_base;
2667 tx_ring_desc = tx_ring->q;
2668 for (i = 0; i < tx_ring->wq_len; i++) {
2669 tx_ring_desc->index = i;
2670 tx_ring_desc->skb = NULL;
2671 tx_ring_desc->queue_entry = mac_iocb_ptr;
2675 atomic_set(&tx_ring->tx_count, tx_ring->wq_len);
2676 atomic_set(&tx_ring->queue_stopped, 0);
2679 static void ql_free_tx_resources(struct ql_adapter *qdev,
2680 struct tx_ring *tx_ring)
2682 if (tx_ring->wq_base) {
2683 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2684 tx_ring->wq_base, tx_ring->wq_base_dma);
2685 tx_ring->wq_base = NULL;
2691 static int ql_alloc_tx_resources(struct ql_adapter *qdev,
2692 struct tx_ring *tx_ring)
2695 pci_alloc_consistent(qdev->pdev, tx_ring->wq_size,
2696 &tx_ring->wq_base_dma);
2698 if ((tx_ring->wq_base == NULL) ||
2699 tx_ring->wq_base_dma & WQ_ADDR_ALIGN) {
2700 netif_err(qdev, ifup, qdev->ndev, "tx_ring alloc failed.\n");
2704 kmalloc(tx_ring->wq_len * sizeof(struct tx_ring_desc), GFP_KERNEL);
2705 if (tx_ring->q == NULL)
2710 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2711 tx_ring->wq_base, tx_ring->wq_base_dma);
2715 static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2717 struct bq_desc *lbq_desc;
2719 uint32_t curr_idx, clean_idx;
2721 curr_idx = rx_ring->lbq_curr_idx;
2722 clean_idx = rx_ring->lbq_clean_idx;
2723 while (curr_idx != clean_idx) {
2724 lbq_desc = &rx_ring->lbq[curr_idx];
2726 if (lbq_desc->p.pg_chunk.last_flag) {
2727 pci_unmap_page(qdev->pdev,
2728 lbq_desc->p.pg_chunk.map,
2729 ql_lbq_block_size(qdev),
2730 PCI_DMA_FROMDEVICE);
2731 lbq_desc->p.pg_chunk.last_flag = 0;
2734 put_page(lbq_desc->p.pg_chunk.page);
2735 lbq_desc->p.pg_chunk.page = NULL;
2737 if (++curr_idx == rx_ring->lbq_len)
2743 static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2746 struct bq_desc *sbq_desc;
2748 for (i = 0; i < rx_ring->sbq_len; i++) {
2749 sbq_desc = &rx_ring->sbq[i];
2750 if (sbq_desc == NULL) {
2751 netif_err(qdev, ifup, qdev->ndev,
2752 "sbq_desc %d is NULL.\n", i);
2755 if (sbq_desc->p.skb) {
2756 pci_unmap_single(qdev->pdev,
2757 dma_unmap_addr(sbq_desc, mapaddr),
2758 dma_unmap_len(sbq_desc, maplen),
2759 PCI_DMA_FROMDEVICE);
2760 dev_kfree_skb(sbq_desc->p.skb);
2761 sbq_desc->p.skb = NULL;
2766 /* Free all large and small rx buffers associated
2767 * with the completion queues for this device.
2769 static void ql_free_rx_buffers(struct ql_adapter *qdev)
2772 struct rx_ring *rx_ring;
2774 for (i = 0; i < qdev->rx_ring_count; i++) {
2775 rx_ring = &qdev->rx_ring[i];
2777 ql_free_lbq_buffers(qdev, rx_ring);
2779 ql_free_sbq_buffers(qdev, rx_ring);
2783 static void ql_alloc_rx_buffers(struct ql_adapter *qdev)
2785 struct rx_ring *rx_ring;
2788 for (i = 0; i < qdev->rx_ring_count; i++) {
2789 rx_ring = &qdev->rx_ring[i];
2790 if (rx_ring->type != TX_Q)
2791 ql_update_buffer_queues(qdev, rx_ring);
2795 static void ql_init_lbq_ring(struct ql_adapter *qdev,
2796 struct rx_ring *rx_ring)
2799 struct bq_desc *lbq_desc;
2800 __le64 *bq = rx_ring->lbq_base;
2802 memset(rx_ring->lbq, 0, rx_ring->lbq_len * sizeof(struct bq_desc));
2803 for (i = 0; i < rx_ring->lbq_len; i++) {
2804 lbq_desc = &rx_ring->lbq[i];
2805 memset(lbq_desc, 0, sizeof(*lbq_desc));
2806 lbq_desc->index = i;
2807 lbq_desc->addr = bq;
2812 static void ql_init_sbq_ring(struct ql_adapter *qdev,
2813 struct rx_ring *rx_ring)
2816 struct bq_desc *sbq_desc;
2817 __le64 *bq = rx_ring->sbq_base;
2819 memset(rx_ring->sbq, 0, rx_ring->sbq_len * sizeof(struct bq_desc));
2820 for (i = 0; i < rx_ring->sbq_len; i++) {
2821 sbq_desc = &rx_ring->sbq[i];
2822 memset(sbq_desc, 0, sizeof(*sbq_desc));
2823 sbq_desc->index = i;
2824 sbq_desc->addr = bq;
2829 static void ql_free_rx_resources(struct ql_adapter *qdev,
2830 struct rx_ring *rx_ring)
2832 /* Free the small buffer queue. */
2833 if (rx_ring->sbq_base) {
2834 pci_free_consistent(qdev->pdev,
2836 rx_ring->sbq_base, rx_ring->sbq_base_dma);
2837 rx_ring->sbq_base = NULL;
2840 /* Free the small buffer queue control blocks. */
2841 kfree(rx_ring->sbq);
2842 rx_ring->sbq = NULL;
2844 /* Free the large buffer queue. */
2845 if (rx_ring->lbq_base) {
2846 pci_free_consistent(qdev->pdev,
2848 rx_ring->lbq_base, rx_ring->lbq_base_dma);
2849 rx_ring->lbq_base = NULL;
2852 /* Free the large buffer queue control blocks. */
2853 kfree(rx_ring->lbq);
2854 rx_ring->lbq = NULL;
2856 /* Free the rx queue. */
2857 if (rx_ring->cq_base) {
2858 pci_free_consistent(qdev->pdev,
2860 rx_ring->cq_base, rx_ring->cq_base_dma);
2861 rx_ring->cq_base = NULL;
2865 /* Allocate queues and buffers for this completions queue based
2866 * on the values in the parameter structure. */
2867 static int ql_alloc_rx_resources(struct ql_adapter *qdev,
2868 struct rx_ring *rx_ring)
2872 * Allocate the completion queue for this rx_ring.
2875 pci_alloc_consistent(qdev->pdev, rx_ring->cq_size,
2876 &rx_ring->cq_base_dma);
2878 if (rx_ring->cq_base == NULL) {
2879 netif_err(qdev, ifup, qdev->ndev, "rx_ring alloc failed.\n");
2883 if (rx_ring->sbq_len) {
2885 * Allocate small buffer queue.
2888 pci_alloc_consistent(qdev->pdev, rx_ring->sbq_size,
2889 &rx_ring->sbq_base_dma);
2891 if (rx_ring->sbq_base == NULL) {
2892 netif_err(qdev, ifup, qdev->ndev,
2893 "Small buffer queue allocation failed.\n");
2898 * Allocate small buffer queue control blocks.
2901 kmalloc(rx_ring->sbq_len * sizeof(struct bq_desc),
2903 if (rx_ring->sbq == NULL) {
2904 netif_err(qdev, ifup, qdev->ndev,
2905 "Small buffer queue control block allocation failed.\n");
2909 ql_init_sbq_ring(qdev, rx_ring);
2912 if (rx_ring->lbq_len) {
2914 * Allocate large buffer queue.
2917 pci_alloc_consistent(qdev->pdev, rx_ring->lbq_size,
2918 &rx_ring->lbq_base_dma);
2920 if (rx_ring->lbq_base == NULL) {
2921 netif_err(qdev, ifup, qdev->ndev,
2922 "Large buffer queue allocation failed.\n");
2926 * Allocate large buffer queue control blocks.
2929 kmalloc(rx_ring->lbq_len * sizeof(struct bq_desc),
2931 if (rx_ring->lbq == NULL) {
2932 netif_err(qdev, ifup, qdev->ndev,
2933 "Large buffer queue control block allocation failed.\n");
2937 ql_init_lbq_ring(qdev, rx_ring);
2943 ql_free_rx_resources(qdev, rx_ring);
2947 static void ql_tx_ring_clean(struct ql_adapter *qdev)
2949 struct tx_ring *tx_ring;
2950 struct tx_ring_desc *tx_ring_desc;
2954 * Loop through all queues and free
2957 for (j = 0; j < qdev->tx_ring_count; j++) {
2958 tx_ring = &qdev->tx_ring[j];
2959 for (i = 0; i < tx_ring->wq_len; i++) {
2960 tx_ring_desc = &tx_ring->q[i];
2961 if (tx_ring_desc && tx_ring_desc->skb) {
2962 netif_err(qdev, ifdown, qdev->ndev,
2963 "Freeing lost SKB %p, from queue %d, index %d.\n",
2964 tx_ring_desc->skb, j,
2965 tx_ring_desc->index);
2966 ql_unmap_send(qdev, tx_ring_desc,
2967 tx_ring_desc->map_cnt);
2968 dev_kfree_skb(tx_ring_desc->skb);
2969 tx_ring_desc->skb = NULL;
2975 static void ql_free_mem_resources(struct ql_adapter *qdev)
2979 for (i = 0; i < qdev->tx_ring_count; i++)
2980 ql_free_tx_resources(qdev, &qdev->tx_ring[i]);
2981 for (i = 0; i < qdev->rx_ring_count; i++)
2982 ql_free_rx_resources(qdev, &qdev->rx_ring[i]);
2983 ql_free_shadow_space(qdev);
2986 static int ql_alloc_mem_resources(struct ql_adapter *qdev)
2990 /* Allocate space for our shadow registers and such. */
2991 if (ql_alloc_shadow_space(qdev))
2994 for (i = 0; i < qdev->rx_ring_count; i++) {
2995 if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) {
2996 netif_err(qdev, ifup, qdev->ndev,
2997 "RX resource allocation failed.\n");
3001 /* Allocate tx queue resources */
3002 for (i = 0; i < qdev->tx_ring_count; i++) {
3003 if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) {
3004 netif_err(qdev, ifup, qdev->ndev,
3005 "TX resource allocation failed.\n");
3012 ql_free_mem_resources(qdev);
3016 /* Set up the rx ring control block and pass it to the chip.
3017 * The control block is defined as
3018 * "Completion Queue Initialization Control Block", or cqicb.
3020 static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
3022 struct cqicb *cqicb = &rx_ring->cqicb;
3023 void *shadow_reg = qdev->rx_ring_shadow_reg_area +
3024 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
3025 u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma +
3026 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
3027 void __iomem *doorbell_area =
3028 qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
3032 __le64 *base_indirect_ptr;
3035 /* Set up the shadow registers for this ring. */
3036 rx_ring->prod_idx_sh_reg = shadow_reg;
3037 rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
3038 *rx_ring->prod_idx_sh_reg = 0;
3039 shadow_reg += sizeof(u64);
3040 shadow_reg_dma += sizeof(u64);
3041 rx_ring->lbq_base_indirect = shadow_reg;
3042 rx_ring->lbq_base_indirect_dma = shadow_reg_dma;
3043 shadow_reg += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3044 shadow_reg_dma += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3045 rx_ring->sbq_base_indirect = shadow_reg;
3046 rx_ring->sbq_base_indirect_dma = shadow_reg_dma;
3048 /* PCI doorbell mem area + 0x00 for consumer index register */
3049 rx_ring->cnsmr_idx_db_reg = (u32 __iomem *) doorbell_area;
3050 rx_ring->cnsmr_idx = 0;
3051 rx_ring->curr_entry = rx_ring->cq_base;
3053 /* PCI doorbell mem area + 0x04 for valid register */
3054 rx_ring->valid_db_reg = doorbell_area + 0x04;
3056 /* PCI doorbell mem area + 0x18 for large buffer consumer */
3057 rx_ring->lbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x18);
3059 /* PCI doorbell mem area + 0x1c */
3060 rx_ring->sbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x1c);
3062 memset((void *)cqicb, 0, sizeof(struct cqicb));
3063 cqicb->msix_vect = rx_ring->irq;
3065 bq_len = (rx_ring->cq_len == 65536) ? 0 : (u16) rx_ring->cq_len;
3066 cqicb->len = cpu_to_le16(bq_len | LEN_V | LEN_CPP_CONT);
3068 cqicb->addr = cpu_to_le64(rx_ring->cq_base_dma);
3070 cqicb->prod_idx_addr = cpu_to_le64(rx_ring->prod_idx_sh_reg_dma);
3073 * Set up the control block load flags.
3075 cqicb->flags = FLAGS_LC | /* Load queue base address */
3076 FLAGS_LV | /* Load MSI-X vector */
3077 FLAGS_LI; /* Load irq delay values */
3078 if (rx_ring->lbq_len) {
3079 cqicb->flags |= FLAGS_LL; /* Load lbq values */
3080 tmp = (u64)rx_ring->lbq_base_dma;
3081 base_indirect_ptr = (__le64 *) rx_ring->lbq_base_indirect;
3084 *base_indirect_ptr = cpu_to_le64(tmp);
3085 tmp += DB_PAGE_SIZE;
3086 base_indirect_ptr++;
3088 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3090 cpu_to_le64(rx_ring->lbq_base_indirect_dma);
3091 bq_len = (rx_ring->lbq_buf_size == 65536) ? 0 :
3092 (u16) rx_ring->lbq_buf_size;
3093 cqicb->lbq_buf_size = cpu_to_le16(bq_len);
3094 bq_len = (rx_ring->lbq_len == 65536) ? 0 :
3095 (u16) rx_ring->lbq_len;
3096 cqicb->lbq_len = cpu_to_le16(bq_len);
3097 rx_ring->lbq_prod_idx = 0;
3098 rx_ring->lbq_curr_idx = 0;
3099 rx_ring->lbq_clean_idx = 0;
3100 rx_ring->lbq_free_cnt = rx_ring->lbq_len;
3102 if (rx_ring->sbq_len) {
3103 cqicb->flags |= FLAGS_LS; /* Load sbq values */
3104 tmp = (u64)rx_ring->sbq_base_dma;
3105 base_indirect_ptr = (__le64 *) rx_ring->sbq_base_indirect;
3108 *base_indirect_ptr = cpu_to_le64(tmp);
3109 tmp += DB_PAGE_SIZE;
3110 base_indirect_ptr++;
3112 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->sbq_len));
3114 cpu_to_le64(rx_ring->sbq_base_indirect_dma);
3115 cqicb->sbq_buf_size =
3116 cpu_to_le16((u16)(rx_ring->sbq_buf_size));
3117 bq_len = (rx_ring->sbq_len == 65536) ? 0 :
3118 (u16) rx_ring->sbq_len;
3119 cqicb->sbq_len = cpu_to_le16(bq_len);
3120 rx_ring->sbq_prod_idx = 0;
3121 rx_ring->sbq_curr_idx = 0;
3122 rx_ring->sbq_clean_idx = 0;
3123 rx_ring->sbq_free_cnt = rx_ring->sbq_len;
3125 switch (rx_ring->type) {
3127 cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
3128 cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames);
3131 /* Inbound completion handling rx_rings run in
3132 * separate NAPI contexts.
3134 netif_napi_add(qdev->ndev, &rx_ring->napi, ql_napi_poll_msix,
3136 cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
3137 cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames);
3140 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3141 "Invalid rx_ring->type = %d.\n", rx_ring->type);
3143 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3144 "Initializing rx work queue.\n");
3145 err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb),
3146 CFG_LCQ, rx_ring->cq_id);
3148 netif_err(qdev, ifup, qdev->ndev, "Failed to load CQICB.\n");
3154 static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
3156 struct wqicb *wqicb = (struct wqicb *)tx_ring;
3157 void __iomem *doorbell_area =
3158 qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id);
3159 void *shadow_reg = qdev->tx_ring_shadow_reg_area +
3160 (tx_ring->wq_id * sizeof(u64));
3161 u64 shadow_reg_dma = qdev->tx_ring_shadow_reg_dma +
3162 (tx_ring->wq_id * sizeof(u64));
3166 * Assign doorbell registers for this tx_ring.
3168 /* TX PCI doorbell mem area for tx producer index */
3169 tx_ring->prod_idx_db_reg = (u32 __iomem *) doorbell_area;
3170 tx_ring->prod_idx = 0;
3171 /* TX PCI doorbell mem area + 0x04 */
3172 tx_ring->valid_db_reg = doorbell_area + 0x04;
3175 * Assign shadow registers for this tx_ring.
3177 tx_ring->cnsmr_idx_sh_reg = shadow_reg;
3178 tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma;
3180 wqicb->len = cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT);
3181 wqicb->flags = cpu_to_le16(Q_FLAGS_LC |
3182 Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO);
3183 wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id);
3185 wqicb->addr = cpu_to_le64(tx_ring->wq_base_dma);
3187 wqicb->cnsmr_idx_addr = cpu_to_le64(tx_ring->cnsmr_idx_sh_reg_dma);
3189 ql_init_tx_ring(qdev, tx_ring);
3191 err = ql_write_cfg(qdev, wqicb, sizeof(*wqicb), CFG_LRQ,
3192 (u16) tx_ring->wq_id);
3194 netif_err(qdev, ifup, qdev->ndev, "Failed to load tx_ring.\n");
3197 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3198 "Successfully loaded WQICB.\n");
3202 static void ql_disable_msix(struct ql_adapter *qdev)
3204 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3205 pci_disable_msix(qdev->pdev);
3206 clear_bit(QL_MSIX_ENABLED, &qdev->flags);
3207 kfree(qdev->msi_x_entry);
3208 qdev->msi_x_entry = NULL;
3209 } else if (test_bit(QL_MSI_ENABLED, &qdev->flags)) {
3210 pci_disable_msi(qdev->pdev);
3211 clear_bit(QL_MSI_ENABLED, &qdev->flags);
3215 /* We start by trying to get the number of vectors
3216 * stored in qdev->intr_count. If we don't get that
3217 * many then we reduce the count and try again.
3219 static void ql_enable_msix(struct ql_adapter *qdev)
3223 /* Get the MSIX vectors. */
3224 if (qlge_irq_type == MSIX_IRQ) {
3225 /* Try to alloc space for the msix struct,
3226 * if it fails then go to MSI/legacy.
3228 qdev->msi_x_entry = kcalloc(qdev->intr_count,
3229 sizeof(struct msix_entry),
3231 if (!qdev->msi_x_entry) {
3232 qlge_irq_type = MSI_IRQ;
3236 for (i = 0; i < qdev->intr_count; i++)
3237 qdev->msi_x_entry[i].entry = i;
3239 /* Loop to get our vectors. We start with
3240 * what we want and settle for what we get.
3243 err = pci_enable_msix(qdev->pdev,
3244 qdev->msi_x_entry, qdev->intr_count);
3246 qdev->intr_count = err;
3250 kfree(qdev->msi_x_entry);
3251 qdev->msi_x_entry = NULL;
3252 netif_warn(qdev, ifup, qdev->ndev,
3253 "MSI-X Enable failed, trying MSI.\n");
3254 qdev->intr_count = 1;
3255 qlge_irq_type = MSI_IRQ;
3256 } else if (err == 0) {
3257 set_bit(QL_MSIX_ENABLED, &qdev->flags);
3258 netif_info(qdev, ifup, qdev->ndev,
3259 "MSI-X Enabled, got %d vectors.\n",
3265 qdev->intr_count = 1;
3266 if (qlge_irq_type == MSI_IRQ) {
3267 if (!pci_enable_msi(qdev->pdev)) {
3268 set_bit(QL_MSI_ENABLED, &qdev->flags);
3269 netif_info(qdev, ifup, qdev->ndev,
3270 "Running with MSI interrupts.\n");
3274 qlge_irq_type = LEG_IRQ;
3275 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3276 "Running with legacy interrupts.\n");
3279 /* Each vector services 1 RSS ring and and 1 or more
3280 * TX completion rings. This function loops through
3281 * the TX completion rings and assigns the vector that
3282 * will service it. An example would be if there are
3283 * 2 vectors (so 2 RSS rings) and 8 TX completion rings.
3284 * This would mean that vector 0 would service RSS ring 0
3285 * and TX competion rings 0,1,2 and 3. Vector 1 would
3286 * service RSS ring 1 and TX completion rings 4,5,6 and 7.
3288 static void ql_set_tx_vect(struct ql_adapter *qdev)
3291 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3293 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3294 /* Assign irq vectors to TX rx_rings.*/
3295 for (vect = 0, j = 0, i = qdev->rss_ring_count;
3296 i < qdev->rx_ring_count; i++) {
3297 if (j == tx_rings_per_vector) {
3301 qdev->rx_ring[i].irq = vect;
3305 /* For single vector all rings have an irq
3308 for (i = 0; i < qdev->rx_ring_count; i++)
3309 qdev->rx_ring[i].irq = 0;
3313 /* Set the interrupt mask for this vector. Each vector
3314 * will service 1 RSS ring and 1 or more TX completion
3315 * rings. This function sets up a bit mask per vector
3316 * that indicates which rings it services.
3318 static void ql_set_irq_mask(struct ql_adapter *qdev, struct intr_context *ctx)
3320 int j, vect = ctx->intr;
3321 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3323 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3324 /* Add the RSS ring serviced by this vector
3327 ctx->irq_mask = (1 << qdev->rx_ring[vect].cq_id);
3328 /* Add the TX ring(s) serviced by this vector
3330 for (j = 0; j < tx_rings_per_vector; j++) {
3332 (1 << qdev->rx_ring[qdev->rss_ring_count +
3333 (vect * tx_rings_per_vector) + j].cq_id);
3336 /* For single vector we just shift each queue's
3339 for (j = 0; j < qdev->rx_ring_count; j++)
3340 ctx->irq_mask |= (1 << qdev->rx_ring[j].cq_id);
3345 * Here we build the intr_context structures based on
3346 * our rx_ring count and intr vector count.
3347 * The intr_context structure is used to hook each vector
3348 * to possibly different handlers.
3350 static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev)
3353 struct intr_context *intr_context = &qdev->intr_context[0];
3355 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3356 /* Each rx_ring has it's
3357 * own intr_context since we have separate
3358 * vectors for each queue.
3360 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3361 qdev->rx_ring[i].irq = i;
3362 intr_context->intr = i;
3363 intr_context->qdev = qdev;
3364 /* Set up this vector's bit-mask that indicates
3365 * which queues it services.
3367 ql_set_irq_mask(qdev, intr_context);
3369 * We set up each vectors enable/disable/read bits so
3370 * there's no bit/mask calculations in the critical path.
3372 intr_context->intr_en_mask =
3373 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3374 INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK | INTR_EN_IHD
3376 intr_context->intr_dis_mask =
3377 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3378 INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
3380 intr_context->intr_read_mask =
3381 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3382 INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD |
3385 /* The first vector/queue handles
3386 * broadcast/multicast, fatal errors,
3387 * and firmware events. This in addition
3388 * to normal inbound NAPI processing.
3390 intr_context->handler = qlge_isr;
3391 sprintf(intr_context->name, "%s-rx-%d",
3392 qdev->ndev->name, i);
3395 * Inbound queues handle unicast frames only.
3397 intr_context->handler = qlge_msix_rx_isr;
3398 sprintf(intr_context->name, "%s-rx-%d",
3399 qdev->ndev->name, i);
3404 * All rx_rings use the same intr_context since
3405 * there is only one vector.
3407 intr_context->intr = 0;
3408 intr_context->qdev = qdev;
3410 * We set up each vectors enable/disable/read bits so
3411 * there's no bit/mask calculations in the critical path.
3413 intr_context->intr_en_mask =
3414 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_ENABLE;
3415 intr_context->intr_dis_mask =
3416 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3417 INTR_EN_TYPE_DISABLE;
3418 intr_context->intr_read_mask =
3419 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ;
3421 * Single interrupt means one handler for all rings.
3423 intr_context->handler = qlge_isr;
3424 sprintf(intr_context->name, "%s-single_irq", qdev->ndev->name);
3425 /* Set up this vector's bit-mask that indicates
3426 * which queues it services. In this case there is
3427 * a single vector so it will service all RSS and
3428 * TX completion rings.
3430 ql_set_irq_mask(qdev, intr_context);
3432 /* Tell the TX completion rings which MSIx vector
3433 * they will be using.
3435 ql_set_tx_vect(qdev);
3438 static void ql_free_irq(struct ql_adapter *qdev)
3441 struct intr_context *intr_context = &qdev->intr_context[0];
3443 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3444 if (intr_context->hooked) {
3445 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3446 free_irq(qdev->msi_x_entry[i].vector,
3448 netif_printk(qdev, ifdown, KERN_DEBUG, qdev->ndev,
3449 "freeing msix interrupt %d.\n", i);
3451 free_irq(qdev->pdev->irq, &qdev->rx_ring[0]);
3452 netif_printk(qdev, ifdown, KERN_DEBUG, qdev->ndev,
3453 "freeing msi interrupt %d.\n", i);
3457 ql_disable_msix(qdev);
3460 static int ql_request_irq(struct ql_adapter *qdev)
3464 struct pci_dev *pdev = qdev->pdev;
3465 struct intr_context *intr_context = &qdev->intr_context[0];
3467 ql_resolve_queues_to_irqs(qdev);
3469 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3470 atomic_set(&intr_context->irq_cnt, 0);
3471 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3472 status = request_irq(qdev->msi_x_entry[i].vector,
3473 intr_context->handler,
3478 netif_err(qdev, ifup, qdev->ndev,
3479 "Failed request for MSIX interrupt %d.\n",
3483 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3484 "Hooked intr %d, queue type %s, with name %s.\n",
3486 qdev->rx_ring[i].type == DEFAULT_Q ?
3488 qdev->rx_ring[i].type == TX_Q ?
3490 qdev->rx_ring[i].type == RX_Q ?
3492 intr_context->name);
3495 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3496 "trying msi or legacy interrupts.\n");
3497 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3498 "%s: irq = %d.\n", __func__, pdev->irq);
3499 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3500 "%s: context->name = %s.\n", __func__,
3501 intr_context->name);
3502 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3503 "%s: dev_id = 0x%p.\n", __func__,
3506 request_irq(pdev->irq, qlge_isr,
3507 test_bit(QL_MSI_ENABLED,
3509 flags) ? 0 : IRQF_SHARED,
3510 intr_context->name, &qdev->rx_ring[0]);
3514 netif_err(qdev, ifup, qdev->ndev,
3515 "Hooked intr %d, queue type %s, with name %s.\n",
3517 qdev->rx_ring[0].type == DEFAULT_Q ?
3519 qdev->rx_ring[0].type == TX_Q ? "TX_Q" :
3520 qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "",
3521 intr_context->name);
3523 intr_context->hooked = 1;
3527 netif_err(qdev, ifup, qdev->ndev, "Failed to get the interrupts!!!/n");
3532 static int ql_start_rss(struct ql_adapter *qdev)
3534 u8 init_hash_seed[] = {0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
3535 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f,
3536 0xb0, 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b,
3537 0x30, 0xb4, 0x77, 0xcb, 0x2d, 0xa3, 0x80,
3538 0x30, 0xf2, 0x0c, 0x6a, 0x42, 0xb7, 0x3b,
3539 0xbe, 0xac, 0x01, 0xfa};
3540 struct ricb *ricb = &qdev->ricb;
3543 u8 *hash_id = (u8 *) ricb->hash_cq_id;
3545 memset((void *)ricb, 0, sizeof(*ricb));
3547 ricb->base_cq = RSS_L4K;
3549 (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RT4 | RSS_RT6);
3550 ricb->mask = cpu_to_le16((u16)(0x3ff));
3553 * Fill out the Indirection Table.
3555 for (i = 0; i < 1024; i++)
3556 hash_id[i] = (i & (qdev->rss_ring_count - 1));
3558 memcpy((void *)&ricb->ipv6_hash_key[0], init_hash_seed, 40);
3559 memcpy((void *)&ricb->ipv4_hash_key[0], init_hash_seed, 16);
3561 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, "Initializing RSS.\n");
3563 status = ql_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0);
3565 netif_err(qdev, ifup, qdev->ndev, "Failed to load RICB.\n");
3568 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3569 "Successfully loaded RICB.\n");
3573 static int ql_clear_routing_entries(struct ql_adapter *qdev)
3577 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3580 /* Clear all the entries in the routing table. */
3581 for (i = 0; i < 16; i++) {
3582 status = ql_set_routing_reg(qdev, i, 0, 0);
3584 netif_err(qdev, ifup, qdev->ndev,
3585 "Failed to init routing register for CAM packets.\n");
3589 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3593 /* Initialize the frame-to-queue routing. */
3594 static int ql_route_initialize(struct ql_adapter *qdev)
3598 /* Clear all the entries in the routing table. */
3599 status = ql_clear_routing_entries(qdev);
3603 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3607 status = ql_set_routing_reg(qdev, RT_IDX_IP_CSUM_ERR_SLOT,
3608 RT_IDX_IP_CSUM_ERR, 1);
3610 netif_err(qdev, ifup, qdev->ndev,
3611 "Failed to init routing register "
3612 "for IP CSUM error packets.\n");
3615 status = ql_set_routing_reg(qdev, RT_IDX_TCP_UDP_CSUM_ERR_SLOT,
3616 RT_IDX_TU_CSUM_ERR, 1);
3618 netif_err(qdev, ifup, qdev->ndev,
3619 "Failed to init routing register "
3620 "for TCP/UDP CSUM error packets.\n");
3623 status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
3625 netif_err(qdev, ifup, qdev->ndev,
3626 "Failed to init routing register for broadcast packets.\n");
3629 /* If we have more than one inbound queue, then turn on RSS in the
3632 if (qdev->rss_ring_count > 1) {
3633 status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT,
3634 RT_IDX_RSS_MATCH, 1);
3636 netif_err(qdev, ifup, qdev->ndev,
3637 "Failed to init routing register for MATCH RSS packets.\n");
3642 status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT,
3645 netif_err(qdev, ifup, qdev->ndev,
3646 "Failed to init routing register for CAM packets.\n");
3648 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3652 int ql_cam_route_initialize(struct ql_adapter *qdev)
3656 /* If check if the link is up and use to
3657 * determine if we are setting or clearing
3658 * the MAC address in the CAM.
3660 set = ql_read32(qdev, STS);
3661 set &= qdev->port_link_up;
3662 status = ql_set_mac_addr(qdev, set);
3664 netif_err(qdev, ifup, qdev->ndev, "Failed to init mac address.\n");
3668 status = ql_route_initialize(qdev);
3670 netif_err(qdev, ifup, qdev->ndev, "Failed to init routing table.\n");
3675 static int ql_adapter_initialize(struct ql_adapter *qdev)
3682 * Set up the System register to halt on errors.
3684 value = SYS_EFE | SYS_FAE;
3686 ql_write32(qdev, SYS, mask | value);
3688 /* Set the default queue, and VLAN behavior. */
3689 value = NIC_RCV_CFG_DFQ | NIC_RCV_CFG_RV;
3690 mask = NIC_RCV_CFG_DFQ_MASK | (NIC_RCV_CFG_RV << 16);
3691 ql_write32(qdev, NIC_RCV_CFG, (mask | value));
3693 /* Set the MPI interrupt to enabled. */
3694 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
3696 /* Enable the function, set pagesize, enable error checking. */
3697 value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
3698 FSC_EC | FSC_VM_PAGE_4K;
3699 value |= SPLT_SETTING;
3701 /* Set/clear header splitting. */
3702 mask = FSC_VM_PAGESIZE_MASK |
3703 FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
3704 ql_write32(qdev, FSC, mask | value);
3706 ql_write32(qdev, SPLT_HDR, SPLT_LEN);
3708 /* Set RX packet routing to use port/pci function on which the
3709 * packet arrived on in addition to usual frame routing.
3710 * This is helpful on bonding where both interfaces can have
3711 * the same MAC address.
3713 ql_write32(qdev, RST_FO, RST_FO_RR_MASK | RST_FO_RR_RCV_FUNC_CQ);
3714 /* Reroute all packets to our Interface.
3715 * They may have been routed to MPI firmware
3718 value = ql_read32(qdev, MGMT_RCV_CFG);
3719 value &= ~MGMT_RCV_CFG_RM;
3722 /* Sticky reg needs clearing due to WOL. */
3723 ql_write32(qdev, MGMT_RCV_CFG, mask);
3724 ql_write32(qdev, MGMT_RCV_CFG, mask | value);
3726 /* Default WOL is enable on Mezz cards */
3727 if (qdev->pdev->subsystem_device == 0x0068 ||
3728 qdev->pdev->subsystem_device == 0x0180)
3729 qdev->wol = WAKE_MAGIC;
3731 /* Start up the rx queues. */
3732 for (i = 0; i < qdev->rx_ring_count; i++) {
3733 status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]);
3735 netif_err(qdev, ifup, qdev->ndev,
3736 "Failed to start rx ring[%d].\n", i);
3741 /* If there is more than one inbound completion queue
3742 * then download a RICB to configure RSS.
3744 if (qdev->rss_ring_count > 1) {
3745 status = ql_start_rss(qdev);
3747 netif_err(qdev, ifup, qdev->ndev, "Failed to start RSS.\n");
3752 /* Start up the tx queues. */
3753 for (i = 0; i < qdev->tx_ring_count; i++) {
3754 status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]);
3756 netif_err(qdev, ifup, qdev->ndev,
3757 "Failed to start tx ring[%d].\n", i);
3762 /* Initialize the port and set the max framesize. */
3763 status = qdev->nic_ops->port_initialize(qdev);
3765 netif_err(qdev, ifup, qdev->ndev, "Failed to start port.\n");
3767 /* Set up the MAC address and frame routing filter. */
3768 status = ql_cam_route_initialize(qdev);
3770 netif_err(qdev, ifup, qdev->ndev,
3771 "Failed to init CAM/Routing tables.\n");
3775 /* Start NAPI for the RSS queues. */
3776 for (i = 0; i < qdev->rss_ring_count; i++) {
3777 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3778 "Enabling NAPI for rx_ring[%d].\n", i);
3779 napi_enable(&qdev->rx_ring[i].napi);
3785 /* Issue soft reset to chip. */
3786 static int ql_adapter_reset(struct ql_adapter *qdev)
3790 unsigned long end_jiffies;
3792 /* Clear all the entries in the routing table. */
3793 status = ql_clear_routing_entries(qdev);
3795 netif_err(qdev, ifup, qdev->ndev, "Failed to clear routing bits.\n");
3799 end_jiffies = jiffies +
3800 max((unsigned long)1, usecs_to_jiffies(30));
3802 /* Stop management traffic. */
3803 ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_STOP);
3805 /* Wait for the NIC and MGMNT FIFOs to empty. */
3806 ql_wait_fifo_empty(qdev);
3808 ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
3811 value = ql_read32(qdev, RST_FO);
3812 if ((value & RST_FO_FR) == 0)
3815 } while (time_before(jiffies, end_jiffies));
3817 if (value & RST_FO_FR) {
3818 netif_err(qdev, ifdown, qdev->ndev,
3819 "ETIMEDOUT!!! errored out of resetting the chip!\n");
3820 status = -ETIMEDOUT;
3823 /* Resume management traffic. */
3824 ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_RESUME);
3828 static void ql_display_dev_info(struct net_device *ndev)
3830 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
3832 netif_info(qdev, probe, qdev->ndev,
3833 "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, "
3834 "XG Roll = %d, XG Rev = %d.\n",
3837 qdev->chip_rev_id & 0x0000000f,
3838 qdev->chip_rev_id >> 4 & 0x0000000f,
3839 qdev->chip_rev_id >> 8 & 0x0000000f,
3840 qdev->chip_rev_id >> 12 & 0x0000000f);
3841 netif_info(qdev, probe, qdev->ndev,
3842 "MAC address %pM\n", ndev->dev_addr);
3845 int ql_wol(struct ql_adapter *qdev)
3848 u32 wol = MB_WOL_DISABLE;
3850 /* The CAM is still intact after a reset, but if we
3851 * are doing WOL, then we may need to program the
3852 * routing regs. We would also need to issue the mailbox
3853 * commands to instruct the MPI what to do per the ethtool
3857 if (qdev->wol & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_PHY | WAKE_UCAST |
3858 WAKE_MCAST | WAKE_BCAST)) {
3859 netif_err(qdev, ifdown, qdev->ndev,
3860 "Unsupported WOL paramter. qdev->wol = 0x%x.\n",
3865 if (qdev->wol & WAKE_MAGIC) {
3866 status = ql_mb_wol_set_magic(qdev, 1);
3868 netif_err(qdev, ifdown, qdev->ndev,
3869 "Failed to set magic packet on %s.\n",
3873 netif_info(qdev, drv, qdev->ndev,
3874 "Enabled magic packet successfully on %s.\n",
3877 wol |= MB_WOL_MAGIC_PKT;
3881 wol |= MB_WOL_MODE_ON;
3882 status = ql_mb_wol_mode(qdev, wol);
3883 netif_err(qdev, drv, qdev->ndev,
3884 "WOL %s (wol code 0x%x) on %s\n",
3885 (status == 0) ? "Successfully set" : "Failed",
3886 wol, qdev->ndev->name);
3892 static void ql_cancel_all_work_sync(struct ql_adapter *qdev)
3895 /* Don't kill the reset worker thread if we
3896 * are in the process of recovery.
3898 if (test_bit(QL_ADAPTER_UP, &qdev->flags))
3899 cancel_delayed_work_sync(&qdev->asic_reset_work);
3900 cancel_delayed_work_sync(&qdev->mpi_reset_work);
3901 cancel_delayed_work_sync(&qdev->mpi_work);
3902 cancel_delayed_work_sync(&qdev->mpi_idc_work);
3903 cancel_delayed_work_sync(&qdev->mpi_core_to_log);
3904 cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
3907 static int ql_adapter_down(struct ql_adapter *qdev)
3913 ql_cancel_all_work_sync(qdev);
3915 for (i = 0; i < qdev->rss_ring_count; i++)
3916 napi_disable(&qdev->rx_ring[i].napi);
3918 clear_bit(QL_ADAPTER_UP, &qdev->flags);
3920 ql_disable_interrupts(qdev);
3922 ql_tx_ring_clean(qdev);
3924 /* Call netif_napi_del() from common point.
3926 for (i = 0; i < qdev->rss_ring_count; i++)
3927 netif_napi_del(&qdev->rx_ring[i].napi);
3929 status = ql_adapter_reset(qdev);
3931 netif_err(qdev, ifdown, qdev->ndev, "reset(func #%d) FAILED!\n",
3933 ql_free_rx_buffers(qdev);
3938 static int ql_adapter_up(struct ql_adapter *qdev)
3942 err = ql_adapter_initialize(qdev);
3944 netif_info(qdev, ifup, qdev->ndev, "Unable to initialize adapter.\n");
3947 set_bit(QL_ADAPTER_UP, &qdev->flags);
3948 ql_alloc_rx_buffers(qdev);
3949 /* If the port is initialized and the
3950 * link is up the turn on the carrier.
3952 if ((ql_read32(qdev, STS) & qdev->port_init) &&
3953 (ql_read32(qdev, STS) & qdev->port_link_up))
3955 /* Restore rx mode. */
3956 clear_bit(QL_ALLMULTI, &qdev->flags);
3957 clear_bit(QL_PROMISCUOUS, &qdev->flags);
3958 qlge_set_multicast_list(qdev->ndev);
3960 ql_enable_interrupts(qdev);
3961 ql_enable_all_completion_interrupts(qdev);
3962 netif_tx_start_all_queues(qdev->ndev);
3966 ql_adapter_reset(qdev);
3970 static void ql_release_adapter_resources(struct ql_adapter *qdev)
3972 ql_free_mem_resources(qdev);
3976 static int ql_get_adapter_resources(struct ql_adapter *qdev)
3980 if (ql_alloc_mem_resources(qdev)) {
3981 netif_err(qdev, ifup, qdev->ndev, "Unable to allocate memory.\n");
3984 status = ql_request_irq(qdev);
3988 static int qlge_close(struct net_device *ndev)
3990 struct ql_adapter *qdev = netdev_priv(ndev);
3992 /* If we hit pci_channel_io_perm_failure
3993 * failure condition, then we already
3994 * brought the adapter down.
3996 if (test_bit(QL_EEH_FATAL, &qdev->flags)) {
3997 netif_err(qdev, drv, qdev->ndev, "EEH fatal did unload.\n");
3998 clear_bit(QL_EEH_FATAL, &qdev->flags);
4003 * Wait for device to recover from a reset.
4004 * (Rarely happens, but possible.)
4006 while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
4008 ql_adapter_down(qdev);
4009 ql_release_adapter_resources(qdev);
4013 static int ql_configure_rings(struct ql_adapter *qdev)
4016 struct rx_ring *rx_ring;
4017 struct tx_ring *tx_ring;
4018 int cpu_cnt = min(MAX_CPUS, (int)num_online_cpus());
4019 unsigned int lbq_buf_len = (qdev->ndev->mtu > 1500) ?
4020 LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
4022 qdev->lbq_buf_order = get_order(lbq_buf_len);
4024 /* In a perfect world we have one RSS ring for each CPU
4025 * and each has it's own vector. To do that we ask for
4026 * cpu_cnt vectors. ql_enable_msix() will adjust the
4027 * vector count to what we actually get. We then
4028 * allocate an RSS ring for each.
4029 * Essentially, we are doing min(cpu_count, msix_vector_count).
4031 qdev->intr_count = cpu_cnt;
4032 ql_enable_msix(qdev);
4033 /* Adjust the RSS ring count to the actual vector count. */
4034 qdev->rss_ring_count = qdev->intr_count;
4035 qdev->tx_ring_count = cpu_cnt;
4036 qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count;
4038 for (i = 0; i < qdev->tx_ring_count; i++) {
4039 tx_ring = &qdev->tx_ring[i];
4040 memset((void *)tx_ring, 0, sizeof(*tx_ring));
4041 tx_ring->qdev = qdev;
4043 tx_ring->wq_len = qdev->tx_ring_size;
4045 tx_ring->wq_len * sizeof(struct ob_mac_iocb_req);
4048 * The completion queue ID for the tx rings start
4049 * immediately after the rss rings.
4051 tx_ring->cq_id = qdev->rss_ring_count + i;
4054 for (i = 0; i < qdev->rx_ring_count; i++) {
4055 rx_ring = &qdev->rx_ring[i];
4056 memset((void *)rx_ring, 0, sizeof(*rx_ring));
4057 rx_ring->qdev = qdev;
4059 rx_ring->cpu = i % cpu_cnt; /* CPU to run handler on. */
4060 if (i < qdev->rss_ring_count) {
4062 * Inbound (RSS) queues.
4064 rx_ring->cq_len = qdev->rx_ring_size;
4066 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4067 rx_ring->lbq_len = NUM_LARGE_BUFFERS;
4069 rx_ring->lbq_len * sizeof(__le64);
4070 rx_ring->lbq_buf_size = (u16)lbq_buf_len;
4071 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
4072 "lbq_buf_size %d, order = %d\n",
4073 rx_ring->lbq_buf_size,
4074 qdev->lbq_buf_order);
4075 rx_ring->sbq_len = NUM_SMALL_BUFFERS;
4077 rx_ring->sbq_len * sizeof(__le64);
4078 rx_ring->sbq_buf_size = SMALL_BUF_MAP_SIZE;
4079 rx_ring->type = RX_Q;
4082 * Outbound queue handles outbound completions only.
4084 /* outbound cq is same size as tx_ring it services. */
4085 rx_ring->cq_len = qdev->tx_ring_size;
4087 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4088 rx_ring->lbq_len = 0;
4089 rx_ring->lbq_size = 0;
4090 rx_ring->lbq_buf_size = 0;
4091 rx_ring->sbq_len = 0;
4092 rx_ring->sbq_size = 0;
4093 rx_ring->sbq_buf_size = 0;
4094 rx_ring->type = TX_Q;
4100 static int qlge_open(struct net_device *ndev)
4103 struct ql_adapter *qdev = netdev_priv(ndev);
4105 err = ql_adapter_reset(qdev);
4109 err = ql_configure_rings(qdev);
4113 err = ql_get_adapter_resources(qdev);
4117 err = ql_adapter_up(qdev);
4124 ql_release_adapter_resources(qdev);
4128 static int ql_change_rx_buffers(struct ql_adapter *qdev)
4130 struct rx_ring *rx_ring;
4134 /* Wait for an oustanding reset to complete. */
4135 if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4137 while (i-- && !test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4138 netif_err(qdev, ifup, qdev->ndev,
4139 "Waiting for adapter UP...\n");
4144 netif_err(qdev, ifup, qdev->ndev,
4145 "Timed out waiting for adapter UP\n");
4150 status = ql_adapter_down(qdev);
4154 /* Get the new rx buffer size. */
4155 lbq_buf_len = (qdev->ndev->mtu > 1500) ?
4156 LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
4157 qdev->lbq_buf_order = get_order(lbq_buf_len);
4159 for (i = 0; i < qdev->rss_ring_count; i++) {
4160 rx_ring = &qdev->rx_ring[i];
4161 /* Set the new size. */
4162 rx_ring->lbq_buf_size = lbq_buf_len;
4165 status = ql_adapter_up(qdev);
4171 netif_alert(qdev, ifup, qdev->ndev,
4172 "Driver up/down cycle failed, closing device.\n");
4173 set_bit(QL_ADAPTER_UP, &qdev->flags);
4174 dev_close(qdev->ndev);
4178 static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
4180 struct ql_adapter *qdev = netdev_priv(ndev);
4183 if (ndev->mtu == 1500 && new_mtu == 9000) {
4184 netif_err(qdev, ifup, qdev->ndev, "Changing to jumbo MTU.\n");
4185 } else if (ndev->mtu == 9000 && new_mtu == 1500) {
4186 netif_err(qdev, ifup, qdev->ndev, "Changing to normal MTU.\n");
4190 queue_delayed_work(qdev->workqueue,
4191 &qdev->mpi_port_cfg_work, 3*HZ);
4193 ndev->mtu = new_mtu;
4195 if (!netif_running(qdev->ndev)) {
4199 status = ql_change_rx_buffers(qdev);
4201 netif_err(qdev, ifup, qdev->ndev,
4202 "Changing MTU failed.\n");
4208 static struct net_device_stats *qlge_get_stats(struct net_device
4211 struct ql_adapter *qdev = netdev_priv(ndev);
4212 struct rx_ring *rx_ring = &qdev->rx_ring[0];
4213 struct tx_ring *tx_ring = &qdev->tx_ring[0];
4214 unsigned long pkts, mcast, dropped, errors, bytes;
4218 pkts = mcast = dropped = errors = bytes = 0;
4219 for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) {
4220 pkts += rx_ring->rx_packets;
4221 bytes += rx_ring->rx_bytes;
4222 dropped += rx_ring->rx_dropped;
4223 errors += rx_ring->rx_errors;
4224 mcast += rx_ring->rx_multicast;
4226 ndev->stats.rx_packets = pkts;
4227 ndev->stats.rx_bytes = bytes;
4228 ndev->stats.rx_dropped = dropped;
4229 ndev->stats.rx_errors = errors;
4230 ndev->stats.multicast = mcast;
4233 pkts = errors = bytes = 0;
4234 for (i = 0; i < qdev->tx_ring_count; i++, tx_ring++) {
4235 pkts += tx_ring->tx_packets;
4236 bytes += tx_ring->tx_bytes;
4237 errors += tx_ring->tx_errors;
4239 ndev->stats.tx_packets = pkts;
4240 ndev->stats.tx_bytes = bytes;
4241 ndev->stats.tx_errors = errors;
4242 return &ndev->stats;
4245 void qlge_set_multicast_list(struct net_device *ndev)
4247 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
4248 struct netdev_hw_addr *ha;
4251 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
4255 * Set or clear promiscuous mode if a
4256 * transition is taking place.
4258 if (ndev->flags & IFF_PROMISC) {
4259 if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4260 if (ql_set_routing_reg
4261 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) {
4262 netif_err(qdev, hw, qdev->ndev,
4263 "Failed to set promiscous mode.\n");
4265 set_bit(QL_PROMISCUOUS, &qdev->flags);
4269 if (test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4270 if (ql_set_routing_reg
4271 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) {
4272 netif_err(qdev, hw, qdev->ndev,
4273 "Failed to clear promiscous mode.\n");
4275 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4281 * Set or clear all multicast mode if a
4282 * transition is taking place.
4284 if ((ndev->flags & IFF_ALLMULTI) ||
4285 (netdev_mc_count(ndev) > MAX_MULTICAST_ENTRIES)) {
4286 if (!test_bit(QL_ALLMULTI, &qdev->flags)) {
4287 if (ql_set_routing_reg
4288 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) {
4289 netif_err(qdev, hw, qdev->ndev,
4290 "Failed to set all-multi mode.\n");
4292 set_bit(QL_ALLMULTI, &qdev->flags);
4296 if (test_bit(QL_ALLMULTI, &qdev->flags)) {
4297 if (ql_set_routing_reg
4298 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) {
4299 netif_err(qdev, hw, qdev->ndev,
4300 "Failed to clear all-multi mode.\n");
4302 clear_bit(QL_ALLMULTI, &qdev->flags);
4307 if (!netdev_mc_empty(ndev)) {
4308 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4312 netdev_for_each_mc_addr(ha, ndev) {
4313 if (ql_set_mac_addr_reg(qdev, (u8 *) ha->addr,
4314 MAC_ADDR_TYPE_MULTI_MAC, i)) {
4315 netif_err(qdev, hw, qdev->ndev,
4316 "Failed to loadmulticast address.\n");
4317 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4322 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4323 if (ql_set_routing_reg
4324 (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) {
4325 netif_err(qdev, hw, qdev->ndev,
4326 "Failed to set multicast match mode.\n");
4328 set_bit(QL_ALLMULTI, &qdev->flags);
4332 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
4335 static int qlge_set_mac_address(struct net_device *ndev, void *p)
4337 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
4338 struct sockaddr *addr = p;
4341 if (!is_valid_ether_addr(addr->sa_data))
4342 return -EADDRNOTAVAIL;
4343 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
4344 /* Update local copy of current mac address. */
4345 memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
4347 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4350 status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr,
4351 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
4353 netif_err(qdev, hw, qdev->ndev, "Failed to load MAC address.\n");
4354 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4358 static void qlge_tx_timeout(struct net_device *ndev)
4360 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
4361 ql_queue_asic_error(qdev);
4364 static void ql_asic_reset_work(struct work_struct *work)
4366 struct ql_adapter *qdev =
4367 container_of(work, struct ql_adapter, asic_reset_work.work);
4370 status = ql_adapter_down(qdev);
4374 status = ql_adapter_up(qdev);
4378 /* Restore rx mode. */
4379 clear_bit(QL_ALLMULTI, &qdev->flags);
4380 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4381 qlge_set_multicast_list(qdev->ndev);
4386 netif_alert(qdev, ifup, qdev->ndev,
4387 "Driver up/down cycle failed, closing device\n");
4389 set_bit(QL_ADAPTER_UP, &qdev->flags);
4390 dev_close(qdev->ndev);
4394 static struct nic_operations qla8012_nic_ops = {
4395 .get_flash = ql_get_8012_flash_params,
4396 .port_initialize = ql_8012_port_initialize,
4399 static struct nic_operations qla8000_nic_ops = {
4400 .get_flash = ql_get_8000_flash_params,
4401 .port_initialize = ql_8000_port_initialize,
4404 /* Find the pcie function number for the other NIC
4405 * on this chip. Since both NIC functions share a
4406 * common firmware we have the lowest enabled function
4407 * do any common work. Examples would be resetting
4408 * after a fatal firmware error, or doing a firmware
4411 static int ql_get_alt_pcie_func(struct ql_adapter *qdev)
4415 u32 nic_func1, nic_func2;
4417 status = ql_read_mpi_reg(qdev, MPI_TEST_FUNC_PORT_CFG,
4422 nic_func1 = ((temp >> MPI_TEST_NIC1_FUNC_SHIFT) &
4423 MPI_TEST_NIC_FUNC_MASK);
4424 nic_func2 = ((temp >> MPI_TEST_NIC2_FUNC_SHIFT) &
4425 MPI_TEST_NIC_FUNC_MASK);
4427 if (qdev->func == nic_func1)
4428 qdev->alt_func = nic_func2;
4429 else if (qdev->func == nic_func2)
4430 qdev->alt_func = nic_func1;
4437 static int ql_get_board_info(struct ql_adapter *qdev)
4441 (ql_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT;
4445 status = ql_get_alt_pcie_func(qdev);
4449 qdev->port = (qdev->func < qdev->alt_func) ? 0 : 1;
4451 qdev->xg_sem_mask = SEM_XGMAC1_MASK;
4452 qdev->port_link_up = STS_PL1;
4453 qdev->port_init = STS_PI1;
4454 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBI;
4455 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBO;
4457 qdev->xg_sem_mask = SEM_XGMAC0_MASK;
4458 qdev->port_link_up = STS_PL0;
4459 qdev->port_init = STS_PI0;
4460 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBI;
4461 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO;
4463 qdev->chip_rev_id = ql_read32(qdev, REV_ID);
4464 qdev->device_id = qdev->pdev->device;
4465 if (qdev->device_id == QLGE_DEVICE_ID_8012)
4466 qdev->nic_ops = &qla8012_nic_ops;
4467 else if (qdev->device_id == QLGE_DEVICE_ID_8000)
4468 qdev->nic_ops = &qla8000_nic_ops;
4472 static void ql_release_all(struct pci_dev *pdev)
4474 struct net_device *ndev = pci_get_drvdata(pdev);
4475 struct ql_adapter *qdev = netdev_priv(ndev);
4477 if (qdev->workqueue) {
4478 destroy_workqueue(qdev->workqueue);
4479 qdev->workqueue = NULL;
4483 iounmap(qdev->reg_base);
4484 if (qdev->doorbell_area)
4485 iounmap(qdev->doorbell_area);
4486 vfree(qdev->mpi_coredump);
4487 pci_release_regions(pdev);
4488 pci_set_drvdata(pdev, NULL);
4491 static int __devinit ql_init_device(struct pci_dev *pdev,
4492 struct net_device *ndev, int cards_found)
4494 struct ql_adapter *qdev = netdev_priv(ndev);
4497 memset((void *)qdev, 0, sizeof(*qdev));
4498 err = pci_enable_device(pdev);
4500 dev_err(&pdev->dev, "PCI device enable failed.\n");
4506 pci_set_drvdata(pdev, ndev);
4508 /* Set PCIe read request size */
4509 err = pcie_set_readrq(pdev, 4096);
4511 dev_err(&pdev->dev, "Set readrq failed.\n");
4515 err = pci_request_regions(pdev, DRV_NAME);
4517 dev_err(&pdev->dev, "PCI region request failed.\n");
4521 pci_set_master(pdev);
4522 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
4523 set_bit(QL_DMA64, &qdev->flags);
4524 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
4526 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
4528 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
4532 dev_err(&pdev->dev, "No usable DMA configuration.\n");
4536 /* Set PCIe reset type for EEH to fundamental. */
4537 pdev->needs_freset = 1;
4538 pci_save_state(pdev);
4540 ioremap_nocache(pci_resource_start(pdev, 1),
4541 pci_resource_len(pdev, 1));
4542 if (!qdev->reg_base) {
4543 dev_err(&pdev->dev, "Register mapping failed.\n");
4548 qdev->doorbell_area_size = pci_resource_len(pdev, 3);
4549 qdev->doorbell_area =
4550 ioremap_nocache(pci_resource_start(pdev, 3),
4551 pci_resource_len(pdev, 3));
4552 if (!qdev->doorbell_area) {
4553 dev_err(&pdev->dev, "Doorbell register mapping failed.\n");
4558 err = ql_get_board_info(qdev);
4560 dev_err(&pdev->dev, "Register access failed.\n");
4564 qdev->msg_enable = netif_msg_init(debug, default_msg);
4565 spin_lock_init(&qdev->hw_lock);
4566 spin_lock_init(&qdev->stats_lock);
4568 if (qlge_mpi_coredump) {
4569 qdev->mpi_coredump =
4570 vmalloc(sizeof(struct ql_mpi_coredump));
4571 if (qdev->mpi_coredump == NULL) {
4572 dev_err(&pdev->dev, "Coredump alloc failed.\n");
4576 if (qlge_force_coredump)
4577 set_bit(QL_FRC_COREDUMP, &qdev->flags);
4579 /* make sure the EEPROM is good */
4580 err = qdev->nic_ops->get_flash(qdev);
4582 dev_err(&pdev->dev, "Invalid FLASH.\n");
4586 memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
4587 /* Keep local copy of current mac address. */
4588 memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
4590 /* Set up the default ring sizes. */
4591 qdev->tx_ring_size = NUM_TX_RING_ENTRIES;
4592 qdev->rx_ring_size = NUM_RX_RING_ENTRIES;
4594 /* Set up the coalescing parameters. */
4595 qdev->rx_coalesce_usecs = DFLT_COALESCE_WAIT;
4596 qdev->tx_coalesce_usecs = DFLT_COALESCE_WAIT;
4597 qdev->rx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4598 qdev->tx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4601 * Set up the operating parameters.
4604 qdev->workqueue = create_singlethread_workqueue(ndev->name);
4605 INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work);
4606 INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work);
4607 INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
4608 INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work);
4609 INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work);
4610 INIT_DELAYED_WORK(&qdev->mpi_core_to_log, ql_mpi_core_to_log);
4611 init_completion(&qdev->ide_completion);
4614 dev_info(&pdev->dev, "%s\n", DRV_STRING);
4615 dev_info(&pdev->dev, "Driver name: %s, Version: %s.\n",
4616 DRV_NAME, DRV_VERSION);
4620 ql_release_all(pdev);
4622 pci_disable_device(pdev);
4626 static const struct net_device_ops qlge_netdev_ops = {
4627 .ndo_open = qlge_open,
4628 .ndo_stop = qlge_close,
4629 .ndo_start_xmit = qlge_send,
4630 .ndo_change_mtu = qlge_change_mtu,
4631 .ndo_get_stats = qlge_get_stats,
4632 .ndo_set_multicast_list = qlge_set_multicast_list,
4633 .ndo_set_mac_address = qlge_set_mac_address,
4634 .ndo_validate_addr = eth_validate_addr,
4635 .ndo_tx_timeout = qlge_tx_timeout,
4636 .ndo_vlan_rx_register = qlge_vlan_rx_register,
4637 .ndo_vlan_rx_add_vid = qlge_vlan_rx_add_vid,
4638 .ndo_vlan_rx_kill_vid = qlge_vlan_rx_kill_vid,
4641 static void ql_timer(unsigned long data)
4643 struct ql_adapter *qdev = (struct ql_adapter *)data;
4646 var = ql_read32(qdev, STS);
4647 if (pci_channel_offline(qdev->pdev)) {
4648 netif_err(qdev, ifup, qdev->ndev, "EEH STS = 0x%.08x.\n", var);
4652 mod_timer(&qdev->timer, jiffies + (5*HZ));
4655 static int __devinit qlge_probe(struct pci_dev *pdev,
4656 const struct pci_device_id *pci_entry)
4658 struct net_device *ndev = NULL;
4659 struct ql_adapter *qdev = NULL;
4660 static int cards_found = 0;
4663 ndev = alloc_etherdev_mq(sizeof(struct ql_adapter),
4664 min(MAX_CPUS, (int)num_online_cpus()));
4668 err = ql_init_device(pdev, ndev, cards_found);
4674 qdev = netdev_priv(ndev);
4675 SET_NETDEV_DEV(ndev, &pdev->dev);
4682 | NETIF_F_HW_VLAN_TX
4683 | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER);
4684 ndev->features |= NETIF_F_GRO;
4686 if (test_bit(QL_DMA64, &qdev->flags))
4687 ndev->features |= NETIF_F_HIGHDMA;
4690 * Set up net_device structure.
4692 ndev->tx_queue_len = qdev->tx_ring_size;
4693 ndev->irq = pdev->irq;
4695 ndev->netdev_ops = &qlge_netdev_ops;
4696 SET_ETHTOOL_OPS(ndev, &qlge_ethtool_ops);
4697 ndev->watchdog_timeo = 10 * HZ;
4699 err = register_netdev(ndev);
4701 dev_err(&pdev->dev, "net device registration failed.\n");
4702 ql_release_all(pdev);
4703 pci_disable_device(pdev);
4706 /* Start up the timer to trigger EEH if
4709 init_timer_deferrable(&qdev->timer);
4710 qdev->timer.data = (unsigned long)qdev;
4711 qdev->timer.function = ql_timer;
4712 qdev->timer.expires = jiffies + (5*HZ);
4713 add_timer(&qdev->timer);
4715 ql_display_dev_info(ndev);
4716 atomic_set(&qdev->lb_count, 0);
4721 netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev)
4723 return qlge_send(skb, ndev);
4726 int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget)
4728 return ql_clean_inbound_rx_ring(rx_ring, budget);
4731 static void __devexit qlge_remove(struct pci_dev *pdev)
4733 struct net_device *ndev = pci_get_drvdata(pdev);
4734 struct ql_adapter *qdev = netdev_priv(ndev);
4735 del_timer_sync(&qdev->timer);
4736 ql_cancel_all_work_sync(qdev);
4737 unregister_netdev(ndev);
4738 ql_release_all(pdev);
4739 pci_disable_device(pdev);
4743 /* Clean up resources without touching hardware. */
4744 static void ql_eeh_close(struct net_device *ndev)
4747 struct ql_adapter *qdev = netdev_priv(ndev);
4749 if (netif_carrier_ok(ndev)) {
4750 netif_carrier_off(ndev);
4751 netif_stop_queue(ndev);
4754 /* Disabling the timer */
4755 del_timer_sync(&qdev->timer);
4756 ql_cancel_all_work_sync(qdev);
4758 for (i = 0; i < qdev->rss_ring_count; i++)
4759 netif_napi_del(&qdev->rx_ring[i].napi);
4761 clear_bit(QL_ADAPTER_UP, &qdev->flags);
4762 ql_tx_ring_clean(qdev);
4763 ql_free_rx_buffers(qdev);
4764 ql_release_adapter_resources(qdev);
4768 * This callback is called by the PCI subsystem whenever
4769 * a PCI bus error is detected.
4771 static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
4772 enum pci_channel_state state)
4774 struct net_device *ndev = pci_get_drvdata(pdev);
4775 struct ql_adapter *qdev = netdev_priv(ndev);
4778 case pci_channel_io_normal:
4779 return PCI_ERS_RESULT_CAN_RECOVER;
4780 case pci_channel_io_frozen:
4781 netif_device_detach(ndev);
4782 if (netif_running(ndev))
4784 pci_disable_device(pdev);
4785 return PCI_ERS_RESULT_NEED_RESET;
4786 case pci_channel_io_perm_failure:
4788 "%s: pci_channel_io_perm_failure.\n", __func__);
4790 set_bit(QL_EEH_FATAL, &qdev->flags);
4791 return PCI_ERS_RESULT_DISCONNECT;
4794 /* Request a slot reset. */
4795 return PCI_ERS_RESULT_NEED_RESET;
4799 * This callback is called after the PCI buss has been reset.
4800 * Basically, this tries to restart the card from scratch.
4801 * This is a shortened version of the device probe/discovery code,
4802 * it resembles the first-half of the () routine.
4804 static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev)
4806 struct net_device *ndev = pci_get_drvdata(pdev);
4807 struct ql_adapter *qdev = netdev_priv(ndev);
4809 pdev->error_state = pci_channel_io_normal;
4811 pci_restore_state(pdev);
4812 if (pci_enable_device(pdev)) {
4813 netif_err(qdev, ifup, qdev->ndev,
4814 "Cannot re-enable PCI device after reset.\n");
4815 return PCI_ERS_RESULT_DISCONNECT;
4817 pci_set_master(pdev);
4819 if (ql_adapter_reset(qdev)) {
4820 netif_err(qdev, drv, qdev->ndev, "reset FAILED!\n");
4821 set_bit(QL_EEH_FATAL, &qdev->flags);
4822 return PCI_ERS_RESULT_DISCONNECT;
4825 return PCI_ERS_RESULT_RECOVERED;
4828 static void qlge_io_resume(struct pci_dev *pdev)
4830 struct net_device *ndev = pci_get_drvdata(pdev);
4831 struct ql_adapter *qdev = netdev_priv(ndev);
4834 if (netif_running(ndev)) {
4835 err = qlge_open(ndev);
4837 netif_err(qdev, ifup, qdev->ndev,
4838 "Device initialization failed after reset.\n");
4842 netif_err(qdev, ifup, qdev->ndev,
4843 "Device was not running prior to EEH.\n");
4845 mod_timer(&qdev->timer, jiffies + (5*HZ));
4846 netif_device_attach(ndev);
4849 static struct pci_error_handlers qlge_err_handler = {
4850 .error_detected = qlge_io_error_detected,
4851 .slot_reset = qlge_io_slot_reset,
4852 .resume = qlge_io_resume,
4855 static int qlge_suspend(struct pci_dev *pdev, pm_message_t state)
4857 struct net_device *ndev = pci_get_drvdata(pdev);
4858 struct ql_adapter *qdev = netdev_priv(ndev);
4861 netif_device_detach(ndev);
4862 del_timer_sync(&qdev->timer);
4864 if (netif_running(ndev)) {
4865 err = ql_adapter_down(qdev);
4871 err = pci_save_state(pdev);
4875 pci_disable_device(pdev);
4877 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4883 static int qlge_resume(struct pci_dev *pdev)
4885 struct net_device *ndev = pci_get_drvdata(pdev);
4886 struct ql_adapter *qdev = netdev_priv(ndev);
4889 pci_set_power_state(pdev, PCI_D0);
4890 pci_restore_state(pdev);
4891 err = pci_enable_device(pdev);
4893 netif_err(qdev, ifup, qdev->ndev, "Cannot enable PCI device from suspend\n");
4896 pci_set_master(pdev);
4898 pci_enable_wake(pdev, PCI_D3hot, 0);
4899 pci_enable_wake(pdev, PCI_D3cold, 0);
4901 if (netif_running(ndev)) {
4902 err = ql_adapter_up(qdev);
4907 mod_timer(&qdev->timer, jiffies + (5*HZ));
4908 netif_device_attach(ndev);
4912 #endif /* CONFIG_PM */
4914 static void qlge_shutdown(struct pci_dev *pdev)
4916 qlge_suspend(pdev, PMSG_SUSPEND);
4919 static struct pci_driver qlge_driver = {
4921 .id_table = qlge_pci_tbl,
4922 .probe = qlge_probe,
4923 .remove = __devexit_p(qlge_remove),
4925 .suspend = qlge_suspend,
4926 .resume = qlge_resume,
4928 .shutdown = qlge_shutdown,
4929 .err_handler = &qlge_err_handler
4932 static int __init qlge_init_module(void)
4934 return pci_register_driver(&qlge_driver);
4937 static void __exit qlge_exit(void)
4939 pci_unregister_driver(&qlge_driver);
4942 module_init(qlge_init_module);
4943 module_exit(qlge_exit);