2 * QLogic qlge NIC HBA Driver
3 * Copyright (c) 2003-2008 QLogic Corporation
4 * See LICENSE.qlge for copyright and licensing details.
5 * Author: Linux qlge network device driver by
6 * Ron Mercer <ron.mercer@qlogic.com>
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/bitops.h>
11 #include <linux/types.h>
12 #include <linux/module.h>
13 #include <linux/list.h>
14 #include <linux/pci.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/pagemap.h>
17 #include <linux/sched.h>
18 #include <linux/slab.h>
19 #include <linux/dmapool.h>
20 #include <linux/mempool.h>
21 #include <linux/spinlock.h>
22 #include <linux/kthread.h>
23 #include <linux/interrupt.h>
24 #include <linux/errno.h>
25 #include <linux/ioport.h>
28 #include <linux/ipv6.h>
30 #include <linux/tcp.h>
31 #include <linux/udp.h>
32 #include <linux/if_arp.h>
33 #include <linux/if_ether.h>
34 #include <linux/netdevice.h>
35 #include <linux/etherdevice.h>
36 #include <linux/ethtool.h>
37 #include <linux/if_vlan.h>
38 #include <linux/skbuff.h>
39 #include <linux/delay.h>
41 #include <linux/vmalloc.h>
42 #include <linux/prefetch.h>
43 #include <net/ip6_checksum.h>
47 char qlge_driver_name[] = DRV_NAME;
48 const char qlge_driver_version[] = DRV_VERSION;
50 MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>");
51 MODULE_DESCRIPTION(DRV_STRING " ");
52 MODULE_LICENSE("GPL");
53 MODULE_VERSION(DRV_VERSION);
55 static const u32 default_msg =
56 NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK |
57 /* NETIF_MSG_TIMER | */
62 /* NETIF_MSG_TX_QUEUED | */
63 /* NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS | */
64 /* NETIF_MSG_PKTDATA | */
65 NETIF_MSG_HW | NETIF_MSG_WOL | 0;
67 static int debug = -1; /* defaults above */
68 module_param(debug, int, 0664);
69 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
74 static int qlge_irq_type = MSIX_IRQ;
75 module_param(qlge_irq_type, int, 0664);
76 MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
78 static int qlge_mpi_coredump;
79 module_param(qlge_mpi_coredump, int, 0);
80 MODULE_PARM_DESC(qlge_mpi_coredump,
81 "Option to enable MPI firmware dump. "
82 "Default is OFF - Do Not allocate memory. ");
84 static int qlge_force_coredump;
85 module_param(qlge_force_coredump, int, 0);
86 MODULE_PARM_DESC(qlge_force_coredump,
87 "Option to allow force of firmware core dump. "
88 "Default is OFF - Do not allow.");
90 static DEFINE_PCI_DEVICE_TABLE(qlge_pci_tbl) = {
91 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)},
92 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)},
93 /* required last entry */
97 MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
99 static int ql_wol(struct ql_adapter *qdev);
100 static void qlge_set_multicast_list(struct net_device *ndev);
102 /* This hardware semaphore causes exclusive access to
103 * resources shared between the NIC driver, MPI firmware,
104 * FCOE firmware and the FC driver.
106 static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask)
111 case SEM_XGMAC0_MASK:
112 sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
114 case SEM_XGMAC1_MASK:
115 sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
118 sem_bits = SEM_SET << SEM_ICB_SHIFT;
120 case SEM_MAC_ADDR_MASK:
121 sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
124 sem_bits = SEM_SET << SEM_FLASH_SHIFT;
127 sem_bits = SEM_SET << SEM_PROBE_SHIFT;
129 case SEM_RT_IDX_MASK:
130 sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
132 case SEM_PROC_REG_MASK:
133 sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
136 netif_alert(qdev, probe, qdev->ndev, "bad Semaphore mask!.\n");
140 ql_write32(qdev, SEM, sem_bits | sem_mask);
141 return !(ql_read32(qdev, SEM) & sem_bits);
144 int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask)
146 unsigned int wait_count = 30;
148 if (!ql_sem_trylock(qdev, sem_mask))
151 } while (--wait_count);
155 void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask)
157 ql_write32(qdev, SEM, sem_mask);
158 ql_read32(qdev, SEM); /* flush */
161 /* This function waits for a specific bit to come ready
162 * in a given register. It is used mostly by the initialize
163 * process, but is also used in kernel thread API such as
164 * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
166 int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
169 int count = UDELAY_COUNT;
172 temp = ql_read32(qdev, reg);
174 /* check for errors */
175 if (temp & err_bit) {
176 netif_alert(qdev, probe, qdev->ndev,
177 "register 0x%.08x access error, value = 0x%.08x!.\n",
180 } else if (temp & bit)
182 udelay(UDELAY_DELAY);
185 netif_alert(qdev, probe, qdev->ndev,
186 "Timed out waiting for reg %x to come ready.\n", reg);
190 /* The CFG register is used to download TX and RX control blocks
191 * to the chip. This function waits for an operation to complete.
193 static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit)
195 int count = UDELAY_COUNT;
199 temp = ql_read32(qdev, CFG);
204 udelay(UDELAY_DELAY);
211 /* Used to issue init control blocks to hw. Maps control block,
212 * sets address, triggers download, waits for completion.
214 int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
224 (bit & (CFG_LRQ | CFG_LR | CFG_LCQ)) ? PCI_DMA_TODEVICE :
227 map = pci_map_single(qdev->pdev, ptr, size, direction);
228 if (pci_dma_mapping_error(qdev->pdev, map)) {
229 netif_err(qdev, ifup, qdev->ndev, "Couldn't map DMA area.\n");
233 status = ql_sem_spinlock(qdev, SEM_ICB_MASK);
237 status = ql_wait_cfg(qdev, bit);
239 netif_err(qdev, ifup, qdev->ndev,
240 "Timed out waiting for CFG to come ready.\n");
244 ql_write32(qdev, ICB_L, (u32) map);
245 ql_write32(qdev, ICB_H, (u32) (map >> 32));
247 mask = CFG_Q_MASK | (bit << 16);
248 value = bit | (q_id << CFG_Q_SHIFT);
249 ql_write32(qdev, CFG, (mask | value));
252 * Wait for the bit to clear after signaling hw.
254 status = ql_wait_cfg(qdev, bit);
256 ql_sem_unlock(qdev, SEM_ICB_MASK); /* does flush too */
257 pci_unmap_single(qdev->pdev, map, size, direction);
261 /* Get a specific MAC address from the CAM. Used for debug and reg dump. */
262 int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
269 case MAC_ADDR_TYPE_MULTI_MAC:
270 case MAC_ADDR_TYPE_CAM_MAC:
273 ql_wait_reg_rdy(qdev,
274 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
277 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
278 (index << MAC_ADDR_IDX_SHIFT) | /* index */
279 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
281 ql_wait_reg_rdy(qdev,
282 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
285 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
287 ql_wait_reg_rdy(qdev,
288 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
291 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
292 (index << MAC_ADDR_IDX_SHIFT) | /* index */
293 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
295 ql_wait_reg_rdy(qdev,
296 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
299 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
300 if (type == MAC_ADDR_TYPE_CAM_MAC) {
302 ql_wait_reg_rdy(qdev,
303 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
306 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
307 (index << MAC_ADDR_IDX_SHIFT) | /* index */
308 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
310 ql_wait_reg_rdy(qdev, MAC_ADDR_IDX,
314 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
318 case MAC_ADDR_TYPE_VLAN:
319 case MAC_ADDR_TYPE_MULTI_FLTR:
321 netif_crit(qdev, ifup, qdev->ndev,
322 "Address type %d not yet supported.\n", type);
329 /* Set up a MAC, multicast or VLAN address for the
330 * inbound frame matching.
332 static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
339 case MAC_ADDR_TYPE_MULTI_MAC:
341 u32 upper = (addr[0] << 8) | addr[1];
342 u32 lower = (addr[2] << 24) | (addr[3] << 16) |
343 (addr[4] << 8) | (addr[5]);
346 ql_wait_reg_rdy(qdev,
347 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
350 ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
351 (index << MAC_ADDR_IDX_SHIFT) |
353 ql_write32(qdev, MAC_ADDR_DATA, lower);
355 ql_wait_reg_rdy(qdev,
356 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
359 ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
360 (index << MAC_ADDR_IDX_SHIFT) |
363 ql_write32(qdev, MAC_ADDR_DATA, upper);
365 ql_wait_reg_rdy(qdev,
366 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
371 case MAC_ADDR_TYPE_CAM_MAC:
374 u32 upper = (addr[0] << 8) | addr[1];
376 (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
379 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
380 "Adding %s address %pM at index %d in the CAM.\n",
381 type == MAC_ADDR_TYPE_MULTI_MAC ?
382 "MULTICAST" : "UNICAST",
386 ql_wait_reg_rdy(qdev,
387 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
390 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
391 (index << MAC_ADDR_IDX_SHIFT) | /* index */
393 ql_write32(qdev, MAC_ADDR_DATA, lower);
395 ql_wait_reg_rdy(qdev,
396 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
399 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
400 (index << MAC_ADDR_IDX_SHIFT) | /* index */
402 ql_write32(qdev, MAC_ADDR_DATA, upper);
404 ql_wait_reg_rdy(qdev,
405 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
408 ql_write32(qdev, MAC_ADDR_IDX, (offset) | /* offset */
409 (index << MAC_ADDR_IDX_SHIFT) | /* index */
411 /* This field should also include the queue id
412 and possibly the function id. Right now we hardcode
413 the route field to NIC core.
415 cam_output = (CAM_OUT_ROUTE_NIC |
417 func << CAM_OUT_FUNC_SHIFT) |
418 (0 << CAM_OUT_CQ_ID_SHIFT));
419 if (qdev->ndev->features & NETIF_F_HW_VLAN_RX)
420 cam_output |= CAM_OUT_RV;
421 /* route to NIC core */
422 ql_write32(qdev, MAC_ADDR_DATA, cam_output);
425 case MAC_ADDR_TYPE_VLAN:
427 u32 enable_bit = *((u32 *) &addr[0]);
428 /* For VLAN, the addr actually holds a bit that
429 * either enables or disables the vlan id we are
430 * addressing. It's either MAC_ADDR_E on or off.
431 * That's bit-27 we're talking about.
433 netif_info(qdev, ifup, qdev->ndev,
434 "%s VLAN ID %d %s the CAM.\n",
435 enable_bit ? "Adding" : "Removing",
437 enable_bit ? "to" : "from");
440 ql_wait_reg_rdy(qdev,
441 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
444 ql_write32(qdev, MAC_ADDR_IDX, offset | /* offset */
445 (index << MAC_ADDR_IDX_SHIFT) | /* index */
447 enable_bit); /* enable/disable */
450 case MAC_ADDR_TYPE_MULTI_FLTR:
452 netif_crit(qdev, ifup, qdev->ndev,
453 "Address type %d not yet supported.\n", type);
460 /* Set or clear MAC address in hardware. We sometimes
461 * have to clear it to prevent wrong frame routing
462 * especially in a bonding environment.
464 static int ql_set_mac_addr(struct ql_adapter *qdev, int set)
467 char zero_mac_addr[ETH_ALEN];
471 addr = &qdev->current_mac_addr[0];
472 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
473 "Set Mac addr %pM\n", addr);
475 memset(zero_mac_addr, 0, ETH_ALEN);
476 addr = &zero_mac_addr[0];
477 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
478 "Clearing MAC address\n");
480 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
483 status = ql_set_mac_addr_reg(qdev, (u8 *) addr,
484 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
485 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
487 netif_err(qdev, ifup, qdev->ndev,
488 "Failed to init mac address.\n");
492 void ql_link_on(struct ql_adapter *qdev)
494 netif_err(qdev, link, qdev->ndev, "Link is up.\n");
495 netif_carrier_on(qdev->ndev);
496 ql_set_mac_addr(qdev, 1);
499 void ql_link_off(struct ql_adapter *qdev)
501 netif_err(qdev, link, qdev->ndev, "Link is down.\n");
502 netif_carrier_off(qdev->ndev);
503 ql_set_mac_addr(qdev, 0);
506 /* Get a specific frame routing value from the CAM.
507 * Used for debug and reg dump.
509 int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value)
513 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
517 ql_write32(qdev, RT_IDX,
518 RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT));
519 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0);
522 *value = ql_read32(qdev, RT_DATA);
527 /* The NIC function for this chip has 16 routing indexes. Each one can be used
528 * to route different frame types to various inbound queues. We send broadcast/
529 * multicast/error frames to the default queue for slow handling,
530 * and CAM hit/RSS frames to the fast handling queues.
532 static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
535 int status = -EINVAL; /* Return error if no mask match. */
538 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
539 "%s %s mask %s the routing reg.\n",
540 enable ? "Adding" : "Removing",
541 index == RT_IDX_ALL_ERR_SLOT ? "MAC ERROR/ALL ERROR" :
542 index == RT_IDX_IP_CSUM_ERR_SLOT ? "IP CSUM ERROR" :
543 index == RT_IDX_TCP_UDP_CSUM_ERR_SLOT ? "TCP/UDP CSUM ERROR" :
544 index == RT_IDX_BCAST_SLOT ? "BROADCAST" :
545 index == RT_IDX_MCAST_MATCH_SLOT ? "MULTICAST MATCH" :
546 index == RT_IDX_ALLMULTI_SLOT ? "ALL MULTICAST MATCH" :
547 index == RT_IDX_UNUSED6_SLOT ? "UNUSED6" :
548 index == RT_IDX_UNUSED7_SLOT ? "UNUSED7" :
549 index == RT_IDX_RSS_MATCH_SLOT ? "RSS ALL/IPV4 MATCH" :
550 index == RT_IDX_RSS_IPV6_SLOT ? "RSS IPV6" :
551 index == RT_IDX_RSS_TCP4_SLOT ? "RSS TCP4" :
552 index == RT_IDX_RSS_TCP6_SLOT ? "RSS TCP6" :
553 index == RT_IDX_CAM_HIT_SLOT ? "CAM HIT" :
554 index == RT_IDX_UNUSED013 ? "UNUSED13" :
555 index == RT_IDX_UNUSED014 ? "UNUSED14" :
556 index == RT_IDX_PROMISCUOUS_SLOT ? "PROMISCUOUS" :
557 "(Bad index != RT_IDX)",
558 enable ? "to" : "from");
563 value = RT_IDX_DST_CAM_Q | /* dest */
564 RT_IDX_TYPE_NICQ | /* type */
565 (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);/* index */
568 case RT_IDX_VALID: /* Promiscuous Mode frames. */
570 value = RT_IDX_DST_DFLT_Q | /* dest */
571 RT_IDX_TYPE_NICQ | /* type */
572 (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);/* index */
575 case RT_IDX_ERR: /* Pass up MAC,IP,TCP/UDP error frames. */
577 value = RT_IDX_DST_DFLT_Q | /* dest */
578 RT_IDX_TYPE_NICQ | /* type */
579 (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */
582 case RT_IDX_IP_CSUM_ERR: /* Pass up IP CSUM error frames. */
584 value = RT_IDX_DST_DFLT_Q | /* dest */
585 RT_IDX_TYPE_NICQ | /* type */
586 (RT_IDX_IP_CSUM_ERR_SLOT <<
587 RT_IDX_IDX_SHIFT); /* index */
590 case RT_IDX_TU_CSUM_ERR: /* Pass up TCP/UDP CSUM error frames. */
592 value = RT_IDX_DST_DFLT_Q | /* dest */
593 RT_IDX_TYPE_NICQ | /* type */
594 (RT_IDX_TCP_UDP_CSUM_ERR_SLOT <<
595 RT_IDX_IDX_SHIFT); /* index */
598 case RT_IDX_BCAST: /* Pass up Broadcast frames to default Q. */
600 value = RT_IDX_DST_DFLT_Q | /* dest */
601 RT_IDX_TYPE_NICQ | /* type */
602 (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);/* index */
605 case RT_IDX_MCAST: /* Pass up All Multicast frames. */
607 value = RT_IDX_DST_DFLT_Q | /* dest */
608 RT_IDX_TYPE_NICQ | /* type */
609 (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */
612 case RT_IDX_MCAST_MATCH: /* Pass up matched Multicast frames. */
614 value = RT_IDX_DST_DFLT_Q | /* dest */
615 RT_IDX_TYPE_NICQ | /* type */
616 (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
619 case RT_IDX_RSS_MATCH: /* Pass up matched RSS frames. */
621 value = RT_IDX_DST_RSS | /* dest */
622 RT_IDX_TYPE_NICQ | /* type */
623 (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
626 case 0: /* Clear the E-bit on an entry. */
628 value = RT_IDX_DST_DFLT_Q | /* dest */
629 RT_IDX_TYPE_NICQ | /* type */
630 (index << RT_IDX_IDX_SHIFT);/* index */
634 netif_err(qdev, ifup, qdev->ndev,
635 "Mask type %d not yet supported.\n", mask);
641 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
644 value |= (enable ? RT_IDX_E : 0);
645 ql_write32(qdev, RT_IDX, value);
646 ql_write32(qdev, RT_DATA, enable ? mask : 0);
652 static void ql_enable_interrupts(struct ql_adapter *qdev)
654 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI);
657 static void ql_disable_interrupts(struct ql_adapter *qdev)
659 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16));
662 /* If we're running with multiple MSI-X vectors then we enable on the fly.
663 * Otherwise, we may have multiple outstanding workers and don't want to
664 * enable until the last one finishes. In this case, the irq_cnt gets
665 * incremented every time we queue a worker and decremented every time
666 * a worker finishes. Once it hits zero we enable the interrupt.
668 u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
671 unsigned long hw_flags = 0;
672 struct intr_context *ctx = qdev->intr_context + intr;
674 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) {
675 /* Always enable if we're MSIX multi interrupts and
676 * it's not the default (zeroeth) interrupt.
678 ql_write32(qdev, INTR_EN,
680 var = ql_read32(qdev, STS);
684 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
685 if (atomic_dec_and_test(&ctx->irq_cnt)) {
686 ql_write32(qdev, INTR_EN,
688 var = ql_read32(qdev, STS);
690 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
694 static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
697 struct intr_context *ctx;
699 /* HW disables for us if we're MSIX multi interrupts and
700 * it's not the default (zeroeth) interrupt.
702 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr))
705 ctx = qdev->intr_context + intr;
706 spin_lock(&qdev->hw_lock);
707 if (!atomic_read(&ctx->irq_cnt)) {
708 ql_write32(qdev, INTR_EN,
710 var = ql_read32(qdev, STS);
712 atomic_inc(&ctx->irq_cnt);
713 spin_unlock(&qdev->hw_lock);
717 static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev)
720 for (i = 0; i < qdev->intr_count; i++) {
721 /* The enable call does a atomic_dec_and_test
722 * and enables only if the result is zero.
723 * So we precharge it here.
725 if (unlikely(!test_bit(QL_MSIX_ENABLED, &qdev->flags) ||
727 atomic_set(&qdev->intr_context[i].irq_cnt, 1);
728 ql_enable_completion_interrupt(qdev, i);
733 static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str)
737 __le16 *flash = (__le16 *)&qdev->flash;
739 status = strncmp((char *)&qdev->flash, str, 4);
741 netif_err(qdev, ifup, qdev->ndev, "Invalid flash signature.\n");
745 for (i = 0; i < size; i++)
746 csum += le16_to_cpu(*flash++);
749 netif_err(qdev, ifup, qdev->ndev,
750 "Invalid flash checksum, csum = 0x%.04x.\n", csum);
755 static int ql_read_flash_word(struct ql_adapter *qdev, int offset, __le32 *data)
758 /* wait for reg to come ready */
759 status = ql_wait_reg_rdy(qdev,
760 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
763 /* set up for reg read */
764 ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset);
765 /* wait for reg to come ready */
766 status = ql_wait_reg_rdy(qdev,
767 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
770 /* This data is stored on flash as an array of
771 * __le32. Since ql_read32() returns cpu endian
772 * we need to swap it back.
774 *data = cpu_to_le32(ql_read32(qdev, FLASH_DATA));
779 static int ql_get_8000_flash_params(struct ql_adapter *qdev)
783 __le32 *p = (__le32 *)&qdev->flash;
787 /* Get flash offset for function and adjust
791 offset = FUNC0_FLASH_OFFSET / sizeof(u32);
793 offset = FUNC1_FLASH_OFFSET / sizeof(u32);
795 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
798 size = sizeof(struct flash_params_8000) / sizeof(u32);
799 for (i = 0; i < size; i++, p++) {
800 status = ql_read_flash_word(qdev, i+offset, p);
802 netif_err(qdev, ifup, qdev->ndev,
803 "Error reading flash.\n");
808 status = ql_validate_flash(qdev,
809 sizeof(struct flash_params_8000) / sizeof(u16),
812 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
817 /* Extract either manufacturer or BOFM modified
820 if (qdev->flash.flash_params_8000.data_type1 == 2)
822 qdev->flash.flash_params_8000.mac_addr1,
823 qdev->ndev->addr_len);
826 qdev->flash.flash_params_8000.mac_addr,
827 qdev->ndev->addr_len);
829 if (!is_valid_ether_addr(mac_addr)) {
830 netif_err(qdev, ifup, qdev->ndev, "Invalid MAC address.\n");
835 memcpy(qdev->ndev->dev_addr,
837 qdev->ndev->addr_len);
840 ql_sem_unlock(qdev, SEM_FLASH_MASK);
844 static int ql_get_8012_flash_params(struct ql_adapter *qdev)
848 __le32 *p = (__le32 *)&qdev->flash;
850 u32 size = sizeof(struct flash_params_8012) / sizeof(u32);
852 /* Second function's parameters follow the first
858 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
861 for (i = 0; i < size; i++, p++) {
862 status = ql_read_flash_word(qdev, i+offset, p);
864 netif_err(qdev, ifup, qdev->ndev,
865 "Error reading flash.\n");
871 status = ql_validate_flash(qdev,
872 sizeof(struct flash_params_8012) / sizeof(u16),
875 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
880 if (!is_valid_ether_addr(qdev->flash.flash_params_8012.mac_addr)) {
885 memcpy(qdev->ndev->dev_addr,
886 qdev->flash.flash_params_8012.mac_addr,
887 qdev->ndev->addr_len);
890 ql_sem_unlock(qdev, SEM_FLASH_MASK);
894 /* xgmac register are located behind the xgmac_addr and xgmac_data
895 * register pair. Each read/write requires us to wait for the ready
896 * bit before reading/writing the data.
898 static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data)
901 /* wait for reg to come ready */
902 status = ql_wait_reg_rdy(qdev,
903 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
906 /* write the data to the data reg */
907 ql_write32(qdev, XGMAC_DATA, data);
908 /* trigger the write */
909 ql_write32(qdev, XGMAC_ADDR, reg);
913 /* xgmac register are located behind the xgmac_addr and xgmac_data
914 * register pair. Each read/write requires us to wait for the ready
915 * bit before reading/writing the data.
917 int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
920 /* wait for reg to come ready */
921 status = ql_wait_reg_rdy(qdev,
922 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
925 /* set up for reg read */
926 ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R);
927 /* wait for reg to come ready */
928 status = ql_wait_reg_rdy(qdev,
929 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
933 *data = ql_read32(qdev, XGMAC_DATA);
938 /* This is used for reading the 64-bit statistics regs. */
939 int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data)
945 status = ql_read_xgmac_reg(qdev, reg, &lo);
949 status = ql_read_xgmac_reg(qdev, reg + 4, &hi);
953 *data = (u64) lo | ((u64) hi << 32);
959 static int ql_8000_port_initialize(struct ql_adapter *qdev)
963 * Get MPI firmware version for driver banner
966 status = ql_mb_about_fw(qdev);
969 status = ql_mb_get_fw_state(qdev);
972 /* Wake up a worker to get/set the TX/RX frame sizes. */
973 queue_delayed_work(qdev->workqueue, &qdev->mpi_port_cfg_work, 0);
978 /* Take the MAC Core out of reset.
979 * Enable statistics counting.
980 * Take the transmitter/receiver out of reset.
981 * This functionality may be done in the MPI firmware at a
984 static int ql_8012_port_initialize(struct ql_adapter *qdev)
989 if (ql_sem_trylock(qdev, qdev->xg_sem_mask)) {
990 /* Another function has the semaphore, so
991 * wait for the port init bit to come ready.
993 netif_info(qdev, link, qdev->ndev,
994 "Another function has the semaphore, so wait for the port init bit to come ready.\n");
995 status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
997 netif_crit(qdev, link, qdev->ndev,
998 "Port initialize timed out.\n");
1003 netif_info(qdev, link, qdev->ndev, "Got xgmac semaphore!.\n");
1004 /* Set the core reset. */
1005 status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
1008 data |= GLOBAL_CFG_RESET;
1009 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
1013 /* Clear the core reset and turn on jumbo for receiver. */
1014 data &= ~GLOBAL_CFG_RESET; /* Clear core reset. */
1015 data |= GLOBAL_CFG_JUMBO; /* Turn on jumbo. */
1016 data |= GLOBAL_CFG_TX_STAT_EN;
1017 data |= GLOBAL_CFG_RX_STAT_EN;
1018 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
1022 /* Enable transmitter, and clear it's reset. */
1023 status = ql_read_xgmac_reg(qdev, TX_CFG, &data);
1026 data &= ~TX_CFG_RESET; /* Clear the TX MAC reset. */
1027 data |= TX_CFG_EN; /* Enable the transmitter. */
1028 status = ql_write_xgmac_reg(qdev, TX_CFG, data);
1032 /* Enable receiver and clear it's reset. */
1033 status = ql_read_xgmac_reg(qdev, RX_CFG, &data);
1036 data &= ~RX_CFG_RESET; /* Clear the RX MAC reset. */
1037 data |= RX_CFG_EN; /* Enable the receiver. */
1038 status = ql_write_xgmac_reg(qdev, RX_CFG, data);
1042 /* Turn on jumbo. */
1044 ql_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16));
1048 ql_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580);
1052 /* Signal to the world that the port is enabled. */
1053 ql_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init));
1055 ql_sem_unlock(qdev, qdev->xg_sem_mask);
1059 static inline unsigned int ql_lbq_block_size(struct ql_adapter *qdev)
1061 return PAGE_SIZE << qdev->lbq_buf_order;
1064 /* Get the next large buffer. */
1065 static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring)
1067 struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx];
1068 rx_ring->lbq_curr_idx++;
1069 if (rx_ring->lbq_curr_idx == rx_ring->lbq_len)
1070 rx_ring->lbq_curr_idx = 0;
1071 rx_ring->lbq_free_cnt++;
1075 static struct bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev,
1076 struct rx_ring *rx_ring)
1078 struct bq_desc *lbq_desc = ql_get_curr_lbuf(rx_ring);
1080 pci_dma_sync_single_for_cpu(qdev->pdev,
1081 dma_unmap_addr(lbq_desc, mapaddr),
1082 rx_ring->lbq_buf_size,
1083 PCI_DMA_FROMDEVICE);
1085 /* If it's the last chunk of our master page then
1088 if ((lbq_desc->p.pg_chunk.offset + rx_ring->lbq_buf_size)
1089 == ql_lbq_block_size(qdev))
1090 pci_unmap_page(qdev->pdev,
1091 lbq_desc->p.pg_chunk.map,
1092 ql_lbq_block_size(qdev),
1093 PCI_DMA_FROMDEVICE);
1097 /* Get the next small buffer. */
1098 static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring)
1100 struct bq_desc *sbq_desc = &rx_ring->sbq[rx_ring->sbq_curr_idx];
1101 rx_ring->sbq_curr_idx++;
1102 if (rx_ring->sbq_curr_idx == rx_ring->sbq_len)
1103 rx_ring->sbq_curr_idx = 0;
1104 rx_ring->sbq_free_cnt++;
1108 /* Update an rx ring index. */
1109 static void ql_update_cq(struct rx_ring *rx_ring)
1111 rx_ring->cnsmr_idx++;
1112 rx_ring->curr_entry++;
1113 if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) {
1114 rx_ring->cnsmr_idx = 0;
1115 rx_ring->curr_entry = rx_ring->cq_base;
1119 static void ql_write_cq_idx(struct rx_ring *rx_ring)
1121 ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
1124 static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring,
1125 struct bq_desc *lbq_desc)
1127 if (!rx_ring->pg_chunk.page) {
1129 rx_ring->pg_chunk.page = alloc_pages(__GFP_COLD | __GFP_COMP |
1131 qdev->lbq_buf_order);
1132 if (unlikely(!rx_ring->pg_chunk.page)) {
1133 netif_err(qdev, drv, qdev->ndev,
1134 "page allocation failed.\n");
1137 rx_ring->pg_chunk.offset = 0;
1138 map = pci_map_page(qdev->pdev, rx_ring->pg_chunk.page,
1139 0, ql_lbq_block_size(qdev),
1140 PCI_DMA_FROMDEVICE);
1141 if (pci_dma_mapping_error(qdev->pdev, map)) {
1142 __free_pages(rx_ring->pg_chunk.page,
1143 qdev->lbq_buf_order);
1144 netif_err(qdev, drv, qdev->ndev,
1145 "PCI mapping failed.\n");
1148 rx_ring->pg_chunk.map = map;
1149 rx_ring->pg_chunk.va = page_address(rx_ring->pg_chunk.page);
1152 /* Copy the current master pg_chunk info
1153 * to the current descriptor.
1155 lbq_desc->p.pg_chunk = rx_ring->pg_chunk;
1157 /* Adjust the master page chunk for next
1160 rx_ring->pg_chunk.offset += rx_ring->lbq_buf_size;
1161 if (rx_ring->pg_chunk.offset == ql_lbq_block_size(qdev)) {
1162 rx_ring->pg_chunk.page = NULL;
1163 lbq_desc->p.pg_chunk.last_flag = 1;
1165 rx_ring->pg_chunk.va += rx_ring->lbq_buf_size;
1166 get_page(rx_ring->pg_chunk.page);
1167 lbq_desc->p.pg_chunk.last_flag = 0;
1171 /* Process (refill) a large buffer queue. */
1172 static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1174 u32 clean_idx = rx_ring->lbq_clean_idx;
1175 u32 start_idx = clean_idx;
1176 struct bq_desc *lbq_desc;
1180 while (rx_ring->lbq_free_cnt > 32) {
1181 for (i = 0; i < 16; i++) {
1182 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1183 "lbq: try cleaning clean_idx = %d.\n",
1185 lbq_desc = &rx_ring->lbq[clean_idx];
1186 if (ql_get_next_chunk(qdev, rx_ring, lbq_desc)) {
1187 netif_err(qdev, ifup, qdev->ndev,
1188 "Could not get a page chunk.\n");
1192 map = lbq_desc->p.pg_chunk.map +
1193 lbq_desc->p.pg_chunk.offset;
1194 dma_unmap_addr_set(lbq_desc, mapaddr, map);
1195 dma_unmap_len_set(lbq_desc, maplen,
1196 rx_ring->lbq_buf_size);
1197 *lbq_desc->addr = cpu_to_le64(map);
1199 pci_dma_sync_single_for_device(qdev->pdev, map,
1200 rx_ring->lbq_buf_size,
1201 PCI_DMA_FROMDEVICE);
1203 if (clean_idx == rx_ring->lbq_len)
1207 rx_ring->lbq_clean_idx = clean_idx;
1208 rx_ring->lbq_prod_idx += 16;
1209 if (rx_ring->lbq_prod_idx == rx_ring->lbq_len)
1210 rx_ring->lbq_prod_idx = 0;
1211 rx_ring->lbq_free_cnt -= 16;
1214 if (start_idx != clean_idx) {
1215 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1216 "lbq: updating prod idx = %d.\n",
1217 rx_ring->lbq_prod_idx);
1218 ql_write_db_reg(rx_ring->lbq_prod_idx,
1219 rx_ring->lbq_prod_idx_db_reg);
1223 /* Process (refill) a small buffer queue. */
1224 static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1226 u32 clean_idx = rx_ring->sbq_clean_idx;
1227 u32 start_idx = clean_idx;
1228 struct bq_desc *sbq_desc;
1232 while (rx_ring->sbq_free_cnt > 16) {
1233 for (i = 0; i < 16; i++) {
1234 sbq_desc = &rx_ring->sbq[clean_idx];
1235 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1236 "sbq: try cleaning clean_idx = %d.\n",
1238 if (sbq_desc->p.skb == NULL) {
1239 netif_printk(qdev, rx_status, KERN_DEBUG,
1241 "sbq: getting new skb for index %d.\n",
1244 netdev_alloc_skb(qdev->ndev,
1246 if (sbq_desc->p.skb == NULL) {
1247 netif_err(qdev, probe, qdev->ndev,
1248 "Couldn't get an skb.\n");
1249 rx_ring->sbq_clean_idx = clean_idx;
1252 skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD);
1253 map = pci_map_single(qdev->pdev,
1254 sbq_desc->p.skb->data,
1255 rx_ring->sbq_buf_size,
1256 PCI_DMA_FROMDEVICE);
1257 if (pci_dma_mapping_error(qdev->pdev, map)) {
1258 netif_err(qdev, ifup, qdev->ndev,
1259 "PCI mapping failed.\n");
1260 rx_ring->sbq_clean_idx = clean_idx;
1261 dev_kfree_skb_any(sbq_desc->p.skb);
1262 sbq_desc->p.skb = NULL;
1265 dma_unmap_addr_set(sbq_desc, mapaddr, map);
1266 dma_unmap_len_set(sbq_desc, maplen,
1267 rx_ring->sbq_buf_size);
1268 *sbq_desc->addr = cpu_to_le64(map);
1272 if (clean_idx == rx_ring->sbq_len)
1275 rx_ring->sbq_clean_idx = clean_idx;
1276 rx_ring->sbq_prod_idx += 16;
1277 if (rx_ring->sbq_prod_idx == rx_ring->sbq_len)
1278 rx_ring->sbq_prod_idx = 0;
1279 rx_ring->sbq_free_cnt -= 16;
1282 if (start_idx != clean_idx) {
1283 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1284 "sbq: updating prod idx = %d.\n",
1285 rx_ring->sbq_prod_idx);
1286 ql_write_db_reg(rx_ring->sbq_prod_idx,
1287 rx_ring->sbq_prod_idx_db_reg);
1291 static void ql_update_buffer_queues(struct ql_adapter *qdev,
1292 struct rx_ring *rx_ring)
1294 ql_update_sbq(qdev, rx_ring);
1295 ql_update_lbq(qdev, rx_ring);
1298 /* Unmaps tx buffers. Can be called from send() if a pci mapping
1299 * fails at some stage, or from the interrupt when a tx completes.
1301 static void ql_unmap_send(struct ql_adapter *qdev,
1302 struct tx_ring_desc *tx_ring_desc, int mapped)
1305 for (i = 0; i < mapped; i++) {
1306 if (i == 0 || (i == 7 && mapped > 7)) {
1308 * Unmap the skb->data area, or the
1309 * external sglist (AKA the Outbound
1310 * Address List (OAL)).
1311 * If its the zeroeth element, then it's
1312 * the skb->data area. If it's the 7th
1313 * element and there is more than 6 frags,
1317 netif_printk(qdev, tx_done, KERN_DEBUG,
1319 "unmapping OAL area.\n");
1321 pci_unmap_single(qdev->pdev,
1322 dma_unmap_addr(&tx_ring_desc->map[i],
1324 dma_unmap_len(&tx_ring_desc->map[i],
1328 netif_printk(qdev, tx_done, KERN_DEBUG, qdev->ndev,
1329 "unmapping frag %d.\n", i);
1330 pci_unmap_page(qdev->pdev,
1331 dma_unmap_addr(&tx_ring_desc->map[i],
1333 dma_unmap_len(&tx_ring_desc->map[i],
1334 maplen), PCI_DMA_TODEVICE);
1340 /* Map the buffers for this transmit. This will return
1341 * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
1343 static int ql_map_send(struct ql_adapter *qdev,
1344 struct ob_mac_iocb_req *mac_iocb_ptr,
1345 struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc)
1347 int len = skb_headlen(skb);
1349 int frag_idx, err, map_idx = 0;
1350 struct tx_buf_desc *tbd = mac_iocb_ptr->tbd;
1351 int frag_cnt = skb_shinfo(skb)->nr_frags;
1354 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
1355 "frag_cnt = %d.\n", frag_cnt);
1358 * Map the skb buffer first.
1360 map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
1362 err = pci_dma_mapping_error(qdev->pdev, map);
1364 netif_err(qdev, tx_queued, qdev->ndev,
1365 "PCI mapping failed with error: %d\n", err);
1367 return NETDEV_TX_BUSY;
1370 tbd->len = cpu_to_le32(len);
1371 tbd->addr = cpu_to_le64(map);
1372 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1373 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
1377 * This loop fills the remainder of the 8 address descriptors
1378 * in the IOCB. If there are more than 7 fragments, then the
1379 * eighth address desc will point to an external list (OAL).
1380 * When this happens, the remainder of the frags will be stored
1383 for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) {
1384 skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx];
1386 if (frag_idx == 6 && frag_cnt > 7) {
1387 /* Let's tack on an sglist.
1388 * Our control block will now
1390 * iocb->seg[0] = skb->data
1391 * iocb->seg[1] = frag[0]
1392 * iocb->seg[2] = frag[1]
1393 * iocb->seg[3] = frag[2]
1394 * iocb->seg[4] = frag[3]
1395 * iocb->seg[5] = frag[4]
1396 * iocb->seg[6] = frag[5]
1397 * iocb->seg[7] = ptr to OAL (external sglist)
1398 * oal->seg[0] = frag[6]
1399 * oal->seg[1] = frag[7]
1400 * oal->seg[2] = frag[8]
1401 * oal->seg[3] = frag[9]
1402 * oal->seg[4] = frag[10]
1405 /* Tack on the OAL in the eighth segment of IOCB. */
1406 map = pci_map_single(qdev->pdev, &tx_ring_desc->oal,
1409 err = pci_dma_mapping_error(qdev->pdev, map);
1411 netif_err(qdev, tx_queued, qdev->ndev,
1412 "PCI mapping outbound address list with error: %d\n",
1417 tbd->addr = cpu_to_le64(map);
1419 * The length is the number of fragments
1420 * that remain to be mapped times the length
1421 * of our sglist (OAL).
1424 cpu_to_le32((sizeof(struct tx_buf_desc) *
1425 (frag_cnt - frag_idx)) | TX_DESC_C);
1426 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
1428 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1429 sizeof(struct oal));
1430 tbd = (struct tx_buf_desc *)&tx_ring_desc->oal;
1434 map = skb_frag_dma_map(&qdev->pdev->dev, frag, 0, skb_frag_size(frag),
1437 err = dma_mapping_error(&qdev->pdev->dev, map);
1439 netif_err(qdev, tx_queued, qdev->ndev,
1440 "PCI mapping frags failed with error: %d.\n",
1445 tbd->addr = cpu_to_le64(map);
1446 tbd->len = cpu_to_le32(skb_frag_size(frag));
1447 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1448 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1449 skb_frag_size(frag));
1452 /* Save the number of segments we've mapped. */
1453 tx_ring_desc->map_cnt = map_idx;
1454 /* Terminate the last segment. */
1455 tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E);
1456 return NETDEV_TX_OK;
1460 * If the first frag mapping failed, then i will be zero.
1461 * This causes the unmap of the skb->data area. Otherwise
1462 * we pass in the number of frags that mapped successfully
1463 * so they can be umapped.
1465 ql_unmap_send(qdev, tx_ring_desc, map_idx);
1466 return NETDEV_TX_BUSY;
1469 /* Process an inbound completion from an rx ring. */
1470 static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
1471 struct rx_ring *rx_ring,
1472 struct ib_mac_iocb_rsp *ib_mac_rsp,
1476 struct sk_buff *skb;
1477 struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1478 struct napi_struct *napi = &rx_ring->napi;
1480 napi->dev = qdev->ndev;
1482 skb = napi_get_frags(napi);
1484 netif_err(qdev, drv, qdev->ndev,
1485 "Couldn't get an skb, exiting.\n");
1486 rx_ring->rx_dropped++;
1487 put_page(lbq_desc->p.pg_chunk.page);
1490 prefetch(lbq_desc->p.pg_chunk.va);
1491 __skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
1492 lbq_desc->p.pg_chunk.page,
1493 lbq_desc->p.pg_chunk.offset,
1497 skb->data_len += length;
1498 skb->truesize += length;
1499 skb_shinfo(skb)->nr_frags++;
1501 rx_ring->rx_packets++;
1502 rx_ring->rx_bytes += length;
1503 skb->ip_summed = CHECKSUM_UNNECESSARY;
1504 skb_record_rx_queue(skb, rx_ring->cq_id);
1505 if (vlan_id != 0xffff)
1506 __vlan_hwaccel_put_tag(skb, vlan_id);
1507 napi_gro_frags(napi);
1510 /* Process an inbound completion from an rx ring. */
1511 static void ql_process_mac_rx_page(struct ql_adapter *qdev,
1512 struct rx_ring *rx_ring,
1513 struct ib_mac_iocb_rsp *ib_mac_rsp,
1517 struct net_device *ndev = qdev->ndev;
1518 struct sk_buff *skb = NULL;
1520 struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1521 struct napi_struct *napi = &rx_ring->napi;
1523 skb = netdev_alloc_skb(ndev, length);
1525 netif_err(qdev, drv, qdev->ndev,
1526 "Couldn't get an skb, need to unwind!.\n");
1527 rx_ring->rx_dropped++;
1528 put_page(lbq_desc->p.pg_chunk.page);
1532 addr = lbq_desc->p.pg_chunk.va;
1536 /* Frame error, so drop the packet. */
1537 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1538 netif_info(qdev, drv, qdev->ndev,
1539 "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
1540 rx_ring->rx_errors++;
1544 /* The max framesize filter on this chip is set higher than
1545 * MTU since FCoE uses 2k frames.
1547 if (skb->len > ndev->mtu + ETH_HLEN) {
1548 netif_err(qdev, drv, qdev->ndev,
1549 "Segment too small, dropping.\n");
1550 rx_ring->rx_dropped++;
1553 memcpy(skb_put(skb, ETH_HLEN), addr, ETH_HLEN);
1554 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1555 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1557 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1558 lbq_desc->p.pg_chunk.offset+ETH_HLEN,
1560 skb->len += length-ETH_HLEN;
1561 skb->data_len += length-ETH_HLEN;
1562 skb->truesize += length-ETH_HLEN;
1564 rx_ring->rx_packets++;
1565 rx_ring->rx_bytes += skb->len;
1566 skb->protocol = eth_type_trans(skb, ndev);
1567 skb_checksum_none_assert(skb);
1569 if ((ndev->features & NETIF_F_RXCSUM) &&
1570 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1572 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1573 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1574 "TCP checksum done!\n");
1575 skb->ip_summed = CHECKSUM_UNNECESSARY;
1576 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1577 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1578 /* Unfragmented ipv4 UDP frame. */
1579 struct iphdr *iph = (struct iphdr *) skb->data;
1580 if (!(iph->frag_off &
1581 cpu_to_be16(IP_MF|IP_OFFSET))) {
1582 skb->ip_summed = CHECKSUM_UNNECESSARY;
1583 netif_printk(qdev, rx_status, KERN_DEBUG,
1585 "TCP checksum done!\n");
1590 skb_record_rx_queue(skb, rx_ring->cq_id);
1591 if (vlan_id != 0xffff)
1592 __vlan_hwaccel_put_tag(skb, vlan_id);
1593 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1594 napi_gro_receive(napi, skb);
1596 netif_receive_skb(skb);
1599 dev_kfree_skb_any(skb);
1600 put_page(lbq_desc->p.pg_chunk.page);
1603 /* Process an inbound completion from an rx ring. */
1604 static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
1605 struct rx_ring *rx_ring,
1606 struct ib_mac_iocb_rsp *ib_mac_rsp,
1610 struct net_device *ndev = qdev->ndev;
1611 struct sk_buff *skb = NULL;
1612 struct sk_buff *new_skb = NULL;
1613 struct bq_desc *sbq_desc = ql_get_curr_sbuf(rx_ring);
1615 skb = sbq_desc->p.skb;
1616 /* Allocate new_skb and copy */
1617 new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN);
1618 if (new_skb == NULL) {
1619 netif_err(qdev, probe, qdev->ndev,
1620 "No skb available, drop the packet.\n");
1621 rx_ring->rx_dropped++;
1624 skb_reserve(new_skb, NET_IP_ALIGN);
1626 pci_dma_sync_single_for_cpu(qdev->pdev,
1627 dma_unmap_addr(sbq_desc, mapaddr),
1628 dma_unmap_len(sbq_desc, maplen),
1629 PCI_DMA_FROMDEVICE);
1631 memcpy(skb_put(new_skb, length), skb->data, length);
1633 pci_dma_sync_single_for_device(qdev->pdev,
1634 dma_unmap_addr(sbq_desc, mapaddr),
1635 dma_unmap_len(sbq_desc, maplen),
1636 PCI_DMA_FROMDEVICE);
1639 /* Frame error, so drop the packet. */
1640 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1641 netif_info(qdev, drv, qdev->ndev,
1642 "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
1643 dev_kfree_skb_any(skb);
1644 rx_ring->rx_errors++;
1648 /* loopback self test for ethtool */
1649 if (test_bit(QL_SELFTEST, &qdev->flags)) {
1650 ql_check_lb_frame(qdev, skb);
1651 dev_kfree_skb_any(skb);
1655 /* The max framesize filter on this chip is set higher than
1656 * MTU since FCoE uses 2k frames.
1658 if (skb->len > ndev->mtu + ETH_HLEN) {
1659 dev_kfree_skb_any(skb);
1660 rx_ring->rx_dropped++;
1664 prefetch(skb->data);
1666 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1667 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1669 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1670 IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1671 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1672 IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1673 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1674 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1676 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P)
1677 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1678 "Promiscuous Packet.\n");
1680 rx_ring->rx_packets++;
1681 rx_ring->rx_bytes += skb->len;
1682 skb->protocol = eth_type_trans(skb, ndev);
1683 skb_checksum_none_assert(skb);
1685 /* If rx checksum is on, and there are no
1686 * csum or frame errors.
1688 if ((ndev->features & NETIF_F_RXCSUM) &&
1689 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1691 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1692 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1693 "TCP checksum done!\n");
1694 skb->ip_summed = CHECKSUM_UNNECESSARY;
1695 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1696 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1697 /* Unfragmented ipv4 UDP frame. */
1698 struct iphdr *iph = (struct iphdr *) skb->data;
1699 if (!(iph->frag_off &
1700 ntohs(IP_MF|IP_OFFSET))) {
1701 skb->ip_summed = CHECKSUM_UNNECESSARY;
1702 netif_printk(qdev, rx_status, KERN_DEBUG,
1704 "TCP checksum done!\n");
1709 skb_record_rx_queue(skb, rx_ring->cq_id);
1710 if (vlan_id != 0xffff)
1711 __vlan_hwaccel_put_tag(skb, vlan_id);
1712 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1713 napi_gro_receive(&rx_ring->napi, skb);
1715 netif_receive_skb(skb);
1718 static void ql_realign_skb(struct sk_buff *skb, int len)
1720 void *temp_addr = skb->data;
1722 /* Undo the skb_reserve(skb,32) we did before
1723 * giving to hardware, and realign data on
1724 * a 2-byte boundary.
1726 skb->data -= QLGE_SB_PAD - NET_IP_ALIGN;
1727 skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN;
1728 skb_copy_to_linear_data(skb, temp_addr,
1733 * This function builds an skb for the given inbound
1734 * completion. It will be rewritten for readability in the near
1735 * future, but for not it works well.
1737 static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1738 struct rx_ring *rx_ring,
1739 struct ib_mac_iocb_rsp *ib_mac_rsp)
1741 struct bq_desc *lbq_desc;
1742 struct bq_desc *sbq_desc;
1743 struct sk_buff *skb = NULL;
1744 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
1745 u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
1748 * Handle the header buffer if present.
1750 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV &&
1751 ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1752 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1753 "Header of %d bytes in small buffer.\n", hdr_len);
1755 * Headers fit nicely into a small buffer.
1757 sbq_desc = ql_get_curr_sbuf(rx_ring);
1758 pci_unmap_single(qdev->pdev,
1759 dma_unmap_addr(sbq_desc, mapaddr),
1760 dma_unmap_len(sbq_desc, maplen),
1761 PCI_DMA_FROMDEVICE);
1762 skb = sbq_desc->p.skb;
1763 ql_realign_skb(skb, hdr_len);
1764 skb_put(skb, hdr_len);
1765 sbq_desc->p.skb = NULL;
1769 * Handle the data buffer(s).
1771 if (unlikely(!length)) { /* Is there data too? */
1772 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1773 "No Data buffer in this packet.\n");
1777 if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
1778 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1779 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1780 "Headers in small, data of %d bytes in small, combine them.\n",
1783 * Data is less than small buffer size so it's
1784 * stuffed in a small buffer.
1785 * For this case we append the data
1786 * from the "data" small buffer to the "header" small
1789 sbq_desc = ql_get_curr_sbuf(rx_ring);
1790 pci_dma_sync_single_for_cpu(qdev->pdev,
1792 (sbq_desc, mapaddr),
1795 PCI_DMA_FROMDEVICE);
1796 memcpy(skb_put(skb, length),
1797 sbq_desc->p.skb->data, length);
1798 pci_dma_sync_single_for_device(qdev->pdev,
1805 PCI_DMA_FROMDEVICE);
1807 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1808 "%d bytes in a single small buffer.\n",
1810 sbq_desc = ql_get_curr_sbuf(rx_ring);
1811 skb = sbq_desc->p.skb;
1812 ql_realign_skb(skb, length);
1813 skb_put(skb, length);
1814 pci_unmap_single(qdev->pdev,
1815 dma_unmap_addr(sbq_desc,
1817 dma_unmap_len(sbq_desc,
1819 PCI_DMA_FROMDEVICE);
1820 sbq_desc->p.skb = NULL;
1822 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
1823 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1824 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1825 "Header in small, %d bytes in large. Chain large to small!\n",
1828 * The data is in a single large buffer. We
1829 * chain it to the header buffer's skb and let
1832 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1833 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1834 "Chaining page at offset = %d, for %d bytes to skb.\n",
1835 lbq_desc->p.pg_chunk.offset, length);
1836 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1837 lbq_desc->p.pg_chunk.offset,
1840 skb->data_len += length;
1841 skb->truesize += length;
1844 * The headers and data are in a single large buffer. We
1845 * copy it to a new skb and let it go. This can happen with
1846 * jumbo mtu on a non-TCP/UDP frame.
1848 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1849 skb = netdev_alloc_skb(qdev->ndev, length);
1851 netif_printk(qdev, probe, KERN_DEBUG, qdev->ndev,
1852 "No skb available, drop the packet.\n");
1855 pci_unmap_page(qdev->pdev,
1856 dma_unmap_addr(lbq_desc,
1858 dma_unmap_len(lbq_desc, maplen),
1859 PCI_DMA_FROMDEVICE);
1860 skb_reserve(skb, NET_IP_ALIGN);
1861 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1862 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1864 skb_fill_page_desc(skb, 0,
1865 lbq_desc->p.pg_chunk.page,
1866 lbq_desc->p.pg_chunk.offset,
1869 skb->data_len += length;
1870 skb->truesize += length;
1872 __pskb_pull_tail(skb,
1873 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1874 VLAN_ETH_HLEN : ETH_HLEN);
1878 * The data is in a chain of large buffers
1879 * pointed to by a small buffer. We loop
1880 * thru and chain them to the our small header
1882 * frags: There are 18 max frags and our small
1883 * buffer will hold 32 of them. The thing is,
1884 * we'll use 3 max for our 9000 byte jumbo
1885 * frames. If the MTU goes up we could
1886 * eventually be in trouble.
1889 sbq_desc = ql_get_curr_sbuf(rx_ring);
1890 pci_unmap_single(qdev->pdev,
1891 dma_unmap_addr(sbq_desc, mapaddr),
1892 dma_unmap_len(sbq_desc, maplen),
1893 PCI_DMA_FROMDEVICE);
1894 if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
1896 * This is an non TCP/UDP IP frame, so
1897 * the headers aren't split into a small
1898 * buffer. We have to use the small buffer
1899 * that contains our sg list as our skb to
1900 * send upstairs. Copy the sg list here to
1901 * a local buffer and use it to find the
1904 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1905 "%d bytes of headers & data in chain of large.\n",
1907 skb = sbq_desc->p.skb;
1908 sbq_desc->p.skb = NULL;
1909 skb_reserve(skb, NET_IP_ALIGN);
1911 while (length > 0) {
1912 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1913 size = (length < rx_ring->lbq_buf_size) ? length :
1914 rx_ring->lbq_buf_size;
1916 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1917 "Adding page %d to skb for %d bytes.\n",
1919 skb_fill_page_desc(skb, i,
1920 lbq_desc->p.pg_chunk.page,
1921 lbq_desc->p.pg_chunk.offset,
1924 skb->data_len += size;
1925 skb->truesize += size;
1929 __pskb_pull_tail(skb, (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1930 VLAN_ETH_HLEN : ETH_HLEN);
1935 /* Process an inbound completion from an rx ring. */
1936 static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
1937 struct rx_ring *rx_ring,
1938 struct ib_mac_iocb_rsp *ib_mac_rsp,
1941 struct net_device *ndev = qdev->ndev;
1942 struct sk_buff *skb = NULL;
1944 QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
1946 skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
1947 if (unlikely(!skb)) {
1948 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1949 "No skb available, drop packet.\n");
1950 rx_ring->rx_dropped++;
1954 /* Frame error, so drop the packet. */
1955 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1956 netif_info(qdev, drv, qdev->ndev,
1957 "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
1958 dev_kfree_skb_any(skb);
1959 rx_ring->rx_errors++;
1963 /* The max framesize filter on this chip is set higher than
1964 * MTU since FCoE uses 2k frames.
1966 if (skb->len > ndev->mtu + ETH_HLEN) {
1967 dev_kfree_skb_any(skb);
1968 rx_ring->rx_dropped++;
1972 /* loopback self test for ethtool */
1973 if (test_bit(QL_SELFTEST, &qdev->flags)) {
1974 ql_check_lb_frame(qdev, skb);
1975 dev_kfree_skb_any(skb);
1979 prefetch(skb->data);
1981 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1982 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "%s Multicast.\n",
1983 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1984 IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1985 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1986 IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1987 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1988 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1989 rx_ring->rx_multicast++;
1991 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
1992 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1993 "Promiscuous Packet.\n");
1996 skb->protocol = eth_type_trans(skb, ndev);
1997 skb_checksum_none_assert(skb);
1999 /* If rx checksum is on, and there are no
2000 * csum or frame errors.
2002 if ((ndev->features & NETIF_F_RXCSUM) &&
2003 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
2005 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
2006 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2007 "TCP checksum done!\n");
2008 skb->ip_summed = CHECKSUM_UNNECESSARY;
2009 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
2010 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
2011 /* Unfragmented ipv4 UDP frame. */
2012 struct iphdr *iph = (struct iphdr *) skb->data;
2013 if (!(iph->frag_off &
2014 ntohs(IP_MF|IP_OFFSET))) {
2015 skb->ip_summed = CHECKSUM_UNNECESSARY;
2016 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2017 "TCP checksum done!\n");
2022 rx_ring->rx_packets++;
2023 rx_ring->rx_bytes += skb->len;
2024 skb_record_rx_queue(skb, rx_ring->cq_id);
2025 if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) && (vlan_id != 0))
2026 __vlan_hwaccel_put_tag(skb, vlan_id);
2027 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
2028 napi_gro_receive(&rx_ring->napi, skb);
2030 netif_receive_skb(skb);
2033 /* Process an inbound completion from an rx ring. */
2034 static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
2035 struct rx_ring *rx_ring,
2036 struct ib_mac_iocb_rsp *ib_mac_rsp)
2038 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
2039 u16 vlan_id = (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
2040 ((le16_to_cpu(ib_mac_rsp->vlan_id) &
2041 IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff;
2043 QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
2045 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
2046 /* The data and headers are split into
2049 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2051 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
2052 /* The data fit in a single small buffer.
2053 * Allocate a new skb, copy the data and
2054 * return the buffer to the free pool.
2056 ql_process_mac_rx_skb(qdev, rx_ring, ib_mac_rsp,
2058 } else if ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) &&
2059 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK) &&
2060 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T)) {
2061 /* TCP packet in a page chunk that's been checksummed.
2062 * Tack it on to our GRO skb and let it go.
2064 ql_process_mac_rx_gro_page(qdev, rx_ring, ib_mac_rsp,
2066 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
2067 /* Non-TCP packet in a page chunk. Allocate an
2068 * skb, tack it on frags, and send it up.
2070 ql_process_mac_rx_page(qdev, rx_ring, ib_mac_rsp,
2073 /* Non-TCP/UDP large frames that span multiple buffers
2074 * can be processed corrrectly by the split frame logic.
2076 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2080 return (unsigned long)length;
2083 /* Process an outbound completion from an rx ring. */
2084 static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
2085 struct ob_mac_iocb_rsp *mac_rsp)
2087 struct tx_ring *tx_ring;
2088 struct tx_ring_desc *tx_ring_desc;
2090 QL_DUMP_OB_MAC_RSP(mac_rsp);
2091 tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
2092 tx_ring_desc = &tx_ring->q[mac_rsp->tid];
2093 ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
2094 tx_ring->tx_bytes += (tx_ring_desc->skb)->len;
2095 tx_ring->tx_packets++;
2096 dev_kfree_skb(tx_ring_desc->skb);
2097 tx_ring_desc->skb = NULL;
2099 if (unlikely(mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E |
2102 OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) {
2103 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
2104 netif_warn(qdev, tx_done, qdev->ndev,
2105 "Total descriptor length did not match transfer length.\n");
2107 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
2108 netif_warn(qdev, tx_done, qdev->ndev,
2109 "Frame too short to be valid, not sent.\n");
2111 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
2112 netif_warn(qdev, tx_done, qdev->ndev,
2113 "Frame too long, but sent anyway.\n");
2115 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) {
2116 netif_warn(qdev, tx_done, qdev->ndev,
2117 "PCI backplane error. Frame not sent.\n");
2120 atomic_inc(&tx_ring->tx_count);
2123 /* Fire up a handler to reset the MPI processor. */
2124 void ql_queue_fw_error(struct ql_adapter *qdev)
2127 queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
2130 void ql_queue_asic_error(struct ql_adapter *qdev)
2133 ql_disable_interrupts(qdev);
2134 /* Clear adapter up bit to signal the recovery
2135 * process that it shouldn't kill the reset worker
2138 clear_bit(QL_ADAPTER_UP, &qdev->flags);
2139 /* Set asic recovery bit to indicate reset process that we are
2140 * in fatal error recovery process rather than normal close
2142 set_bit(QL_ASIC_RECOVERY, &qdev->flags);
2143 queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
2146 static void ql_process_chip_ae_intr(struct ql_adapter *qdev,
2147 struct ib_ae_iocb_rsp *ib_ae_rsp)
2149 switch (ib_ae_rsp->event) {
2150 case MGMT_ERR_EVENT:
2151 netif_err(qdev, rx_err, qdev->ndev,
2152 "Management Processor Fatal Error.\n");
2153 ql_queue_fw_error(qdev);
2156 case CAM_LOOKUP_ERR_EVENT:
2157 netdev_err(qdev->ndev, "Multiple CAM hits lookup occurred.\n");
2158 netdev_err(qdev->ndev, "This event shouldn't occur.\n");
2159 ql_queue_asic_error(qdev);
2162 case SOFT_ECC_ERROR_EVENT:
2163 netdev_err(qdev->ndev, "Soft ECC error detected.\n");
2164 ql_queue_asic_error(qdev);
2167 case PCI_ERR_ANON_BUF_RD:
2168 netdev_err(qdev->ndev, "PCI error occurred when reading "
2169 "anonymous buffers from rx_ring %d.\n",
2171 ql_queue_asic_error(qdev);
2175 netif_err(qdev, drv, qdev->ndev, "Unexpected event %d.\n",
2177 ql_queue_asic_error(qdev);
2182 static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
2184 struct ql_adapter *qdev = rx_ring->qdev;
2185 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2186 struct ob_mac_iocb_rsp *net_rsp = NULL;
2189 struct tx_ring *tx_ring;
2190 /* While there are entries in the completion queue. */
2191 while (prod != rx_ring->cnsmr_idx) {
2193 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2194 "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2195 rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
2197 net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
2199 switch (net_rsp->opcode) {
2201 case OPCODE_OB_MAC_TSO_IOCB:
2202 case OPCODE_OB_MAC_IOCB:
2203 ql_process_mac_tx_intr(qdev, net_rsp);
2206 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2207 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2211 ql_update_cq(rx_ring);
2212 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2216 ql_write_cq_idx(rx_ring);
2217 tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
2218 if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id)) {
2219 if (atomic_read(&tx_ring->queue_stopped) &&
2220 (atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
2222 * The queue got stopped because the tx_ring was full.
2223 * Wake it up, because it's now at least 25% empty.
2225 netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
2231 static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
2233 struct ql_adapter *qdev = rx_ring->qdev;
2234 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2235 struct ql_net_rsp_iocb *net_rsp;
2238 /* While there are entries in the completion queue. */
2239 while (prod != rx_ring->cnsmr_idx) {
2241 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2242 "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2243 rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
2245 net_rsp = rx_ring->curr_entry;
2247 switch (net_rsp->opcode) {
2248 case OPCODE_IB_MAC_IOCB:
2249 ql_process_mac_rx_intr(qdev, rx_ring,
2250 (struct ib_mac_iocb_rsp *)
2254 case OPCODE_IB_AE_IOCB:
2255 ql_process_chip_ae_intr(qdev, (struct ib_ae_iocb_rsp *)
2259 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2260 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2265 ql_update_cq(rx_ring);
2266 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2267 if (count == budget)
2270 ql_update_buffer_queues(qdev, rx_ring);
2271 ql_write_cq_idx(rx_ring);
2275 static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
2277 struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi);
2278 struct ql_adapter *qdev = rx_ring->qdev;
2279 struct rx_ring *trx_ring;
2280 int i, work_done = 0;
2281 struct intr_context *ctx = &qdev->intr_context[rx_ring->cq_id];
2283 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2284 "Enter, NAPI POLL cq_id = %d.\n", rx_ring->cq_id);
2286 /* Service the TX rings first. They start
2287 * right after the RSS rings. */
2288 for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) {
2289 trx_ring = &qdev->rx_ring[i];
2290 /* If this TX completion ring belongs to this vector and
2291 * it's not empty then service it.
2293 if ((ctx->irq_mask & (1 << trx_ring->cq_id)) &&
2294 (ql_read_sh_reg(trx_ring->prod_idx_sh_reg) !=
2295 trx_ring->cnsmr_idx)) {
2296 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2297 "%s: Servicing TX completion ring %d.\n",
2298 __func__, trx_ring->cq_id);
2299 ql_clean_outbound_rx_ring(trx_ring);
2304 * Now service the RSS ring if it's active.
2306 if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
2307 rx_ring->cnsmr_idx) {
2308 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2309 "%s: Servicing RX completion ring %d.\n",
2310 __func__, rx_ring->cq_id);
2311 work_done = ql_clean_inbound_rx_ring(rx_ring, budget);
2314 if (work_done < budget) {
2315 napi_complete(napi);
2316 ql_enable_completion_interrupt(qdev, rx_ring->irq);
2321 static void qlge_vlan_mode(struct net_device *ndev, u32 features)
2323 struct ql_adapter *qdev = netdev_priv(ndev);
2325 if (features & NETIF_F_HW_VLAN_RX) {
2326 netif_printk(qdev, ifup, KERN_DEBUG, ndev,
2327 "Turning on VLAN in NIC_RCV_CFG.\n");
2328 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
2329 NIC_RCV_CFG_VLAN_MATCH_AND_NON);
2331 netif_printk(qdev, ifup, KERN_DEBUG, ndev,
2332 "Turning off VLAN in NIC_RCV_CFG.\n");
2333 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
2337 static u32 qlge_fix_features(struct net_device *ndev, u32 features)
2340 * Since there is no support for separate rx/tx vlan accel
2341 * enable/disable make sure tx flag is always in same state as rx.
2343 if (features & NETIF_F_HW_VLAN_RX)
2344 features |= NETIF_F_HW_VLAN_TX;
2346 features &= ~NETIF_F_HW_VLAN_TX;
2351 static int qlge_set_features(struct net_device *ndev, u32 features)
2353 u32 changed = ndev->features ^ features;
2355 if (changed & NETIF_F_HW_VLAN_RX)
2356 qlge_vlan_mode(ndev, features);
2361 static void __qlge_vlan_rx_add_vid(struct ql_adapter *qdev, u16 vid)
2363 u32 enable_bit = MAC_ADDR_E;
2365 if (ql_set_mac_addr_reg
2366 (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
2367 netif_err(qdev, ifup, qdev->ndev,
2368 "Failed to init vlan address.\n");
2372 static void qlge_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
2374 struct ql_adapter *qdev = netdev_priv(ndev);
2377 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2381 __qlge_vlan_rx_add_vid(qdev, vid);
2382 set_bit(vid, qdev->active_vlans);
2384 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2387 static void __qlge_vlan_rx_kill_vid(struct ql_adapter *qdev, u16 vid)
2391 if (ql_set_mac_addr_reg
2392 (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
2393 netif_err(qdev, ifup, qdev->ndev,
2394 "Failed to clear vlan address.\n");
2398 static void qlge_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
2400 struct ql_adapter *qdev = netdev_priv(ndev);
2403 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2407 __qlge_vlan_rx_kill_vid(qdev, vid);
2408 clear_bit(vid, qdev->active_vlans);
2410 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2413 static void qlge_restore_vlan(struct ql_adapter *qdev)
2418 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2422 for_each_set_bit(vid, qdev->active_vlans, VLAN_N_VID)
2423 __qlge_vlan_rx_add_vid(qdev, vid);
2425 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2428 /* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
2429 static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
2431 struct rx_ring *rx_ring = dev_id;
2432 napi_schedule(&rx_ring->napi);
2436 /* This handles a fatal error, MPI activity, and the default
2437 * rx_ring in an MSI-X multiple vector environment.
2438 * In MSI/Legacy environment it also process the rest of
2441 static irqreturn_t qlge_isr(int irq, void *dev_id)
2443 struct rx_ring *rx_ring = dev_id;
2444 struct ql_adapter *qdev = rx_ring->qdev;
2445 struct intr_context *intr_context = &qdev->intr_context[0];
2449 spin_lock(&qdev->hw_lock);
2450 if (atomic_read(&qdev->intr_context[0].irq_cnt)) {
2451 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2452 "Shared Interrupt, Not ours!\n");
2453 spin_unlock(&qdev->hw_lock);
2456 spin_unlock(&qdev->hw_lock);
2458 var = ql_disable_completion_interrupt(qdev, intr_context->intr);
2461 * Check for fatal error.
2464 ql_queue_asic_error(qdev);
2465 netdev_err(qdev->ndev, "Got fatal error, STS = %x.\n", var);
2466 var = ql_read32(qdev, ERR_STS);
2467 netdev_err(qdev->ndev, "Resetting chip. "
2468 "Error Status Register = 0x%x\n", var);
2473 * Check MPI processor activity.
2475 if ((var & STS_PI) &&
2476 (ql_read32(qdev, INTR_MASK) & INTR_MASK_PI)) {
2478 * We've got an async event or mailbox completion.
2479 * Handle it and clear the source of the interrupt.
2481 netif_err(qdev, intr, qdev->ndev,
2482 "Got MPI processor interrupt.\n");
2483 ql_disable_completion_interrupt(qdev, intr_context->intr);
2484 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
2485 queue_delayed_work_on(smp_processor_id(),
2486 qdev->workqueue, &qdev->mpi_work, 0);
2491 * Get the bit-mask that shows the active queues for this
2492 * pass. Compare it to the queues that this irq services
2493 * and call napi if there's a match.
2495 var = ql_read32(qdev, ISR1);
2496 if (var & intr_context->irq_mask) {
2497 netif_info(qdev, intr, qdev->ndev,
2498 "Waking handler for rx_ring[0].\n");
2499 ql_disable_completion_interrupt(qdev, intr_context->intr);
2500 napi_schedule(&rx_ring->napi);
2503 ql_enable_completion_interrupt(qdev, intr_context->intr);
2504 return work_done ? IRQ_HANDLED : IRQ_NONE;
2507 static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2510 if (skb_is_gso(skb)) {
2512 if (skb_header_cloned(skb)) {
2513 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2518 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2519 mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
2520 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2521 mac_iocb_ptr->total_hdrs_len =
2522 cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb));
2523 mac_iocb_ptr->net_trans_offset =
2524 cpu_to_le16(skb_network_offset(skb) |
2525 skb_transport_offset(skb)
2526 << OB_MAC_TRANSPORT_HDR_SHIFT);
2527 mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2528 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO;
2529 if (likely(skb->protocol == htons(ETH_P_IP))) {
2530 struct iphdr *iph = ip_hdr(skb);
2532 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2533 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2537 } else if (skb->protocol == htons(ETH_P_IPV6)) {
2538 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6;
2539 tcp_hdr(skb)->check =
2540 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2541 &ipv6_hdr(skb)->daddr,
2549 static void ql_hw_csum_setup(struct sk_buff *skb,
2550 struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2553 struct iphdr *iph = ip_hdr(skb);
2555 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2556 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2557 mac_iocb_ptr->net_trans_offset =
2558 cpu_to_le16(skb_network_offset(skb) |
2559 skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT);
2561 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2562 len = (ntohs(iph->tot_len) - (iph->ihl << 2));
2563 if (likely(iph->protocol == IPPROTO_TCP)) {
2564 check = &(tcp_hdr(skb)->check);
2565 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC;
2566 mac_iocb_ptr->total_hdrs_len =
2567 cpu_to_le16(skb_transport_offset(skb) +
2568 (tcp_hdr(skb)->doff << 2));
2570 check = &(udp_hdr(skb)->check);
2571 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC;
2572 mac_iocb_ptr->total_hdrs_len =
2573 cpu_to_le16(skb_transport_offset(skb) +
2574 sizeof(struct udphdr));
2576 *check = ~csum_tcpudp_magic(iph->saddr,
2577 iph->daddr, len, iph->protocol, 0);
2580 static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
2582 struct tx_ring_desc *tx_ring_desc;
2583 struct ob_mac_iocb_req *mac_iocb_ptr;
2584 struct ql_adapter *qdev = netdev_priv(ndev);
2586 struct tx_ring *tx_ring;
2587 u32 tx_ring_idx = (u32) skb->queue_mapping;
2589 tx_ring = &qdev->tx_ring[tx_ring_idx];
2591 if (skb_padto(skb, ETH_ZLEN))
2592 return NETDEV_TX_OK;
2594 if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
2595 netif_info(qdev, tx_queued, qdev->ndev,
2596 "%s: shutting down tx queue %d du to lack of resources.\n",
2597 __func__, tx_ring_idx);
2598 netif_stop_subqueue(ndev, tx_ring->wq_id);
2599 atomic_inc(&tx_ring->queue_stopped);
2600 tx_ring->tx_errors++;
2601 return NETDEV_TX_BUSY;
2603 tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
2604 mac_iocb_ptr = tx_ring_desc->queue_entry;
2605 memset((void *)mac_iocb_ptr, 0, sizeof(*mac_iocb_ptr));
2607 mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
2608 mac_iocb_ptr->tid = tx_ring_desc->index;
2609 /* We use the upper 32-bits to store the tx queue for this IO.
2610 * When we get the completion we can use it to establish the context.
2612 mac_iocb_ptr->txq_idx = tx_ring_idx;
2613 tx_ring_desc->skb = skb;
2615 mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len);
2617 if (vlan_tx_tag_present(skb)) {
2618 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2619 "Adding a vlan tag %d.\n", vlan_tx_tag_get(skb));
2620 mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
2621 mac_iocb_ptr->vlan_tci = cpu_to_le16(vlan_tx_tag_get(skb));
2623 tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2625 dev_kfree_skb_any(skb);
2626 return NETDEV_TX_OK;
2627 } else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) {
2628 ql_hw_csum_setup(skb,
2629 (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2631 if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) !=
2633 netif_err(qdev, tx_queued, qdev->ndev,
2634 "Could not map the segments.\n");
2635 tx_ring->tx_errors++;
2636 return NETDEV_TX_BUSY;
2638 QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr);
2639 tx_ring->prod_idx++;
2640 if (tx_ring->prod_idx == tx_ring->wq_len)
2641 tx_ring->prod_idx = 0;
2644 ql_write_db_reg(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
2645 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2646 "tx queued, slot %d, len %d\n",
2647 tx_ring->prod_idx, skb->len);
2649 atomic_dec(&tx_ring->tx_count);
2650 return NETDEV_TX_OK;
2654 static void ql_free_shadow_space(struct ql_adapter *qdev)
2656 if (qdev->rx_ring_shadow_reg_area) {
2657 pci_free_consistent(qdev->pdev,
2659 qdev->rx_ring_shadow_reg_area,
2660 qdev->rx_ring_shadow_reg_dma);
2661 qdev->rx_ring_shadow_reg_area = NULL;
2663 if (qdev->tx_ring_shadow_reg_area) {
2664 pci_free_consistent(qdev->pdev,
2666 qdev->tx_ring_shadow_reg_area,
2667 qdev->tx_ring_shadow_reg_dma);
2668 qdev->tx_ring_shadow_reg_area = NULL;
2672 static int ql_alloc_shadow_space(struct ql_adapter *qdev)
2674 qdev->rx_ring_shadow_reg_area =
2675 pci_alloc_consistent(qdev->pdev,
2676 PAGE_SIZE, &qdev->rx_ring_shadow_reg_dma);
2677 if (qdev->rx_ring_shadow_reg_area == NULL) {
2678 netif_err(qdev, ifup, qdev->ndev,
2679 "Allocation of RX shadow space failed.\n");
2682 memset(qdev->rx_ring_shadow_reg_area, 0, PAGE_SIZE);
2683 qdev->tx_ring_shadow_reg_area =
2684 pci_alloc_consistent(qdev->pdev, PAGE_SIZE,
2685 &qdev->tx_ring_shadow_reg_dma);
2686 if (qdev->tx_ring_shadow_reg_area == NULL) {
2687 netif_err(qdev, ifup, qdev->ndev,
2688 "Allocation of TX shadow space failed.\n");
2689 goto err_wqp_sh_area;
2691 memset(qdev->tx_ring_shadow_reg_area, 0, PAGE_SIZE);
2695 pci_free_consistent(qdev->pdev,
2697 qdev->rx_ring_shadow_reg_area,
2698 qdev->rx_ring_shadow_reg_dma);
2702 static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
2704 struct tx_ring_desc *tx_ring_desc;
2706 struct ob_mac_iocb_req *mac_iocb_ptr;
2708 mac_iocb_ptr = tx_ring->wq_base;
2709 tx_ring_desc = tx_ring->q;
2710 for (i = 0; i < tx_ring->wq_len; i++) {
2711 tx_ring_desc->index = i;
2712 tx_ring_desc->skb = NULL;
2713 tx_ring_desc->queue_entry = mac_iocb_ptr;
2717 atomic_set(&tx_ring->tx_count, tx_ring->wq_len);
2718 atomic_set(&tx_ring->queue_stopped, 0);
2721 static void ql_free_tx_resources(struct ql_adapter *qdev,
2722 struct tx_ring *tx_ring)
2724 if (tx_ring->wq_base) {
2725 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2726 tx_ring->wq_base, tx_ring->wq_base_dma);
2727 tx_ring->wq_base = NULL;
2733 static int ql_alloc_tx_resources(struct ql_adapter *qdev,
2734 struct tx_ring *tx_ring)
2737 pci_alloc_consistent(qdev->pdev, tx_ring->wq_size,
2738 &tx_ring->wq_base_dma);
2740 if ((tx_ring->wq_base == NULL) ||
2741 tx_ring->wq_base_dma & WQ_ADDR_ALIGN) {
2742 netif_err(qdev, ifup, qdev->ndev, "tx_ring alloc failed.\n");
2746 kmalloc(tx_ring->wq_len * sizeof(struct tx_ring_desc), GFP_KERNEL);
2747 if (tx_ring->q == NULL)
2752 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2753 tx_ring->wq_base, tx_ring->wq_base_dma);
2757 static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2759 struct bq_desc *lbq_desc;
2761 uint32_t curr_idx, clean_idx;
2763 curr_idx = rx_ring->lbq_curr_idx;
2764 clean_idx = rx_ring->lbq_clean_idx;
2765 while (curr_idx != clean_idx) {
2766 lbq_desc = &rx_ring->lbq[curr_idx];
2768 if (lbq_desc->p.pg_chunk.last_flag) {
2769 pci_unmap_page(qdev->pdev,
2770 lbq_desc->p.pg_chunk.map,
2771 ql_lbq_block_size(qdev),
2772 PCI_DMA_FROMDEVICE);
2773 lbq_desc->p.pg_chunk.last_flag = 0;
2776 put_page(lbq_desc->p.pg_chunk.page);
2777 lbq_desc->p.pg_chunk.page = NULL;
2779 if (++curr_idx == rx_ring->lbq_len)
2785 static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2788 struct bq_desc *sbq_desc;
2790 for (i = 0; i < rx_ring->sbq_len; i++) {
2791 sbq_desc = &rx_ring->sbq[i];
2792 if (sbq_desc == NULL) {
2793 netif_err(qdev, ifup, qdev->ndev,
2794 "sbq_desc %d is NULL.\n", i);
2797 if (sbq_desc->p.skb) {
2798 pci_unmap_single(qdev->pdev,
2799 dma_unmap_addr(sbq_desc, mapaddr),
2800 dma_unmap_len(sbq_desc, maplen),
2801 PCI_DMA_FROMDEVICE);
2802 dev_kfree_skb(sbq_desc->p.skb);
2803 sbq_desc->p.skb = NULL;
2808 /* Free all large and small rx buffers associated
2809 * with the completion queues for this device.
2811 static void ql_free_rx_buffers(struct ql_adapter *qdev)
2814 struct rx_ring *rx_ring;
2816 for (i = 0; i < qdev->rx_ring_count; i++) {
2817 rx_ring = &qdev->rx_ring[i];
2819 ql_free_lbq_buffers(qdev, rx_ring);
2821 ql_free_sbq_buffers(qdev, rx_ring);
2825 static void ql_alloc_rx_buffers(struct ql_adapter *qdev)
2827 struct rx_ring *rx_ring;
2830 for (i = 0; i < qdev->rx_ring_count; i++) {
2831 rx_ring = &qdev->rx_ring[i];
2832 if (rx_ring->type != TX_Q)
2833 ql_update_buffer_queues(qdev, rx_ring);
2837 static void ql_init_lbq_ring(struct ql_adapter *qdev,
2838 struct rx_ring *rx_ring)
2841 struct bq_desc *lbq_desc;
2842 __le64 *bq = rx_ring->lbq_base;
2844 memset(rx_ring->lbq, 0, rx_ring->lbq_len * sizeof(struct bq_desc));
2845 for (i = 0; i < rx_ring->lbq_len; i++) {
2846 lbq_desc = &rx_ring->lbq[i];
2847 memset(lbq_desc, 0, sizeof(*lbq_desc));
2848 lbq_desc->index = i;
2849 lbq_desc->addr = bq;
2854 static void ql_init_sbq_ring(struct ql_adapter *qdev,
2855 struct rx_ring *rx_ring)
2858 struct bq_desc *sbq_desc;
2859 __le64 *bq = rx_ring->sbq_base;
2861 memset(rx_ring->sbq, 0, rx_ring->sbq_len * sizeof(struct bq_desc));
2862 for (i = 0; i < rx_ring->sbq_len; i++) {
2863 sbq_desc = &rx_ring->sbq[i];
2864 memset(sbq_desc, 0, sizeof(*sbq_desc));
2865 sbq_desc->index = i;
2866 sbq_desc->addr = bq;
2871 static void ql_free_rx_resources(struct ql_adapter *qdev,
2872 struct rx_ring *rx_ring)
2874 /* Free the small buffer queue. */
2875 if (rx_ring->sbq_base) {
2876 pci_free_consistent(qdev->pdev,
2878 rx_ring->sbq_base, rx_ring->sbq_base_dma);
2879 rx_ring->sbq_base = NULL;
2882 /* Free the small buffer queue control blocks. */
2883 kfree(rx_ring->sbq);
2884 rx_ring->sbq = NULL;
2886 /* Free the large buffer queue. */
2887 if (rx_ring->lbq_base) {
2888 pci_free_consistent(qdev->pdev,
2890 rx_ring->lbq_base, rx_ring->lbq_base_dma);
2891 rx_ring->lbq_base = NULL;
2894 /* Free the large buffer queue control blocks. */
2895 kfree(rx_ring->lbq);
2896 rx_ring->lbq = NULL;
2898 /* Free the rx queue. */
2899 if (rx_ring->cq_base) {
2900 pci_free_consistent(qdev->pdev,
2902 rx_ring->cq_base, rx_ring->cq_base_dma);
2903 rx_ring->cq_base = NULL;
2907 /* Allocate queues and buffers for this completions queue based
2908 * on the values in the parameter structure. */
2909 static int ql_alloc_rx_resources(struct ql_adapter *qdev,
2910 struct rx_ring *rx_ring)
2914 * Allocate the completion queue for this rx_ring.
2917 pci_alloc_consistent(qdev->pdev, rx_ring->cq_size,
2918 &rx_ring->cq_base_dma);
2920 if (rx_ring->cq_base == NULL) {
2921 netif_err(qdev, ifup, qdev->ndev, "rx_ring alloc failed.\n");
2925 if (rx_ring->sbq_len) {
2927 * Allocate small buffer queue.
2930 pci_alloc_consistent(qdev->pdev, rx_ring->sbq_size,
2931 &rx_ring->sbq_base_dma);
2933 if (rx_ring->sbq_base == NULL) {
2934 netif_err(qdev, ifup, qdev->ndev,
2935 "Small buffer queue allocation failed.\n");
2940 * Allocate small buffer queue control blocks.
2943 kmalloc(rx_ring->sbq_len * sizeof(struct bq_desc),
2945 if (rx_ring->sbq == NULL) {
2946 netif_err(qdev, ifup, qdev->ndev,
2947 "Small buffer queue control block allocation failed.\n");
2951 ql_init_sbq_ring(qdev, rx_ring);
2954 if (rx_ring->lbq_len) {
2956 * Allocate large buffer queue.
2959 pci_alloc_consistent(qdev->pdev, rx_ring->lbq_size,
2960 &rx_ring->lbq_base_dma);
2962 if (rx_ring->lbq_base == NULL) {
2963 netif_err(qdev, ifup, qdev->ndev,
2964 "Large buffer queue allocation failed.\n");
2968 * Allocate large buffer queue control blocks.
2971 kmalloc(rx_ring->lbq_len * sizeof(struct bq_desc),
2973 if (rx_ring->lbq == NULL) {
2974 netif_err(qdev, ifup, qdev->ndev,
2975 "Large buffer queue control block allocation failed.\n");
2979 ql_init_lbq_ring(qdev, rx_ring);
2985 ql_free_rx_resources(qdev, rx_ring);
2989 static void ql_tx_ring_clean(struct ql_adapter *qdev)
2991 struct tx_ring *tx_ring;
2992 struct tx_ring_desc *tx_ring_desc;
2996 * Loop through all queues and free
2999 for (j = 0; j < qdev->tx_ring_count; j++) {
3000 tx_ring = &qdev->tx_ring[j];
3001 for (i = 0; i < tx_ring->wq_len; i++) {
3002 tx_ring_desc = &tx_ring->q[i];
3003 if (tx_ring_desc && tx_ring_desc->skb) {
3004 netif_err(qdev, ifdown, qdev->ndev,
3005 "Freeing lost SKB %p, from queue %d, index %d.\n",
3006 tx_ring_desc->skb, j,
3007 tx_ring_desc->index);
3008 ql_unmap_send(qdev, tx_ring_desc,
3009 tx_ring_desc->map_cnt);
3010 dev_kfree_skb(tx_ring_desc->skb);
3011 tx_ring_desc->skb = NULL;
3017 static void ql_free_mem_resources(struct ql_adapter *qdev)
3021 for (i = 0; i < qdev->tx_ring_count; i++)
3022 ql_free_tx_resources(qdev, &qdev->tx_ring[i]);
3023 for (i = 0; i < qdev->rx_ring_count; i++)
3024 ql_free_rx_resources(qdev, &qdev->rx_ring[i]);
3025 ql_free_shadow_space(qdev);
3028 static int ql_alloc_mem_resources(struct ql_adapter *qdev)
3032 /* Allocate space for our shadow registers and such. */
3033 if (ql_alloc_shadow_space(qdev))
3036 for (i = 0; i < qdev->rx_ring_count; i++) {
3037 if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) {
3038 netif_err(qdev, ifup, qdev->ndev,
3039 "RX resource allocation failed.\n");
3043 /* Allocate tx queue resources */
3044 for (i = 0; i < qdev->tx_ring_count; i++) {
3045 if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) {
3046 netif_err(qdev, ifup, qdev->ndev,
3047 "TX resource allocation failed.\n");
3054 ql_free_mem_resources(qdev);
3058 /* Set up the rx ring control block and pass it to the chip.
3059 * The control block is defined as
3060 * "Completion Queue Initialization Control Block", or cqicb.
3062 static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
3064 struct cqicb *cqicb = &rx_ring->cqicb;
3065 void *shadow_reg = qdev->rx_ring_shadow_reg_area +
3066 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
3067 u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma +
3068 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
3069 void __iomem *doorbell_area =
3070 qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
3074 __le64 *base_indirect_ptr;
3077 /* Set up the shadow registers for this ring. */
3078 rx_ring->prod_idx_sh_reg = shadow_reg;
3079 rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
3080 *rx_ring->prod_idx_sh_reg = 0;
3081 shadow_reg += sizeof(u64);
3082 shadow_reg_dma += sizeof(u64);
3083 rx_ring->lbq_base_indirect = shadow_reg;
3084 rx_ring->lbq_base_indirect_dma = shadow_reg_dma;
3085 shadow_reg += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3086 shadow_reg_dma += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3087 rx_ring->sbq_base_indirect = shadow_reg;
3088 rx_ring->sbq_base_indirect_dma = shadow_reg_dma;
3090 /* PCI doorbell mem area + 0x00 for consumer index register */
3091 rx_ring->cnsmr_idx_db_reg = (u32 __iomem *) doorbell_area;
3092 rx_ring->cnsmr_idx = 0;
3093 rx_ring->curr_entry = rx_ring->cq_base;
3095 /* PCI doorbell mem area + 0x04 for valid register */
3096 rx_ring->valid_db_reg = doorbell_area + 0x04;
3098 /* PCI doorbell mem area + 0x18 for large buffer consumer */
3099 rx_ring->lbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x18);
3101 /* PCI doorbell mem area + 0x1c */
3102 rx_ring->sbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x1c);
3104 memset((void *)cqicb, 0, sizeof(struct cqicb));
3105 cqicb->msix_vect = rx_ring->irq;
3107 bq_len = (rx_ring->cq_len == 65536) ? 0 : (u16) rx_ring->cq_len;
3108 cqicb->len = cpu_to_le16(bq_len | LEN_V | LEN_CPP_CONT);
3110 cqicb->addr = cpu_to_le64(rx_ring->cq_base_dma);
3112 cqicb->prod_idx_addr = cpu_to_le64(rx_ring->prod_idx_sh_reg_dma);
3115 * Set up the control block load flags.
3117 cqicb->flags = FLAGS_LC | /* Load queue base address */
3118 FLAGS_LV | /* Load MSI-X vector */
3119 FLAGS_LI; /* Load irq delay values */
3120 if (rx_ring->lbq_len) {
3121 cqicb->flags |= FLAGS_LL; /* Load lbq values */
3122 tmp = (u64)rx_ring->lbq_base_dma;
3123 base_indirect_ptr = rx_ring->lbq_base_indirect;
3126 *base_indirect_ptr = cpu_to_le64(tmp);
3127 tmp += DB_PAGE_SIZE;
3128 base_indirect_ptr++;
3130 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3132 cpu_to_le64(rx_ring->lbq_base_indirect_dma);
3133 bq_len = (rx_ring->lbq_buf_size == 65536) ? 0 :
3134 (u16) rx_ring->lbq_buf_size;
3135 cqicb->lbq_buf_size = cpu_to_le16(bq_len);
3136 bq_len = (rx_ring->lbq_len == 65536) ? 0 :
3137 (u16) rx_ring->lbq_len;
3138 cqicb->lbq_len = cpu_to_le16(bq_len);
3139 rx_ring->lbq_prod_idx = 0;
3140 rx_ring->lbq_curr_idx = 0;
3141 rx_ring->lbq_clean_idx = 0;
3142 rx_ring->lbq_free_cnt = rx_ring->lbq_len;
3144 if (rx_ring->sbq_len) {
3145 cqicb->flags |= FLAGS_LS; /* Load sbq values */
3146 tmp = (u64)rx_ring->sbq_base_dma;
3147 base_indirect_ptr = rx_ring->sbq_base_indirect;
3150 *base_indirect_ptr = cpu_to_le64(tmp);
3151 tmp += DB_PAGE_SIZE;
3152 base_indirect_ptr++;
3154 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->sbq_len));
3156 cpu_to_le64(rx_ring->sbq_base_indirect_dma);
3157 cqicb->sbq_buf_size =
3158 cpu_to_le16((u16)(rx_ring->sbq_buf_size));
3159 bq_len = (rx_ring->sbq_len == 65536) ? 0 :
3160 (u16) rx_ring->sbq_len;
3161 cqicb->sbq_len = cpu_to_le16(bq_len);
3162 rx_ring->sbq_prod_idx = 0;
3163 rx_ring->sbq_curr_idx = 0;
3164 rx_ring->sbq_clean_idx = 0;
3165 rx_ring->sbq_free_cnt = rx_ring->sbq_len;
3167 switch (rx_ring->type) {
3169 cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
3170 cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames);
3173 /* Inbound completion handling rx_rings run in
3174 * separate NAPI contexts.
3176 netif_napi_add(qdev->ndev, &rx_ring->napi, ql_napi_poll_msix,
3178 cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
3179 cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames);
3182 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3183 "Invalid rx_ring->type = %d.\n", rx_ring->type);
3185 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3186 "Initializing rx work queue.\n");
3187 err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb),
3188 CFG_LCQ, rx_ring->cq_id);
3190 netif_err(qdev, ifup, qdev->ndev, "Failed to load CQICB.\n");
3196 static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
3198 struct wqicb *wqicb = (struct wqicb *)tx_ring;
3199 void __iomem *doorbell_area =
3200 qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id);
3201 void *shadow_reg = qdev->tx_ring_shadow_reg_area +
3202 (tx_ring->wq_id * sizeof(u64));
3203 u64 shadow_reg_dma = qdev->tx_ring_shadow_reg_dma +
3204 (tx_ring->wq_id * sizeof(u64));
3208 * Assign doorbell registers for this tx_ring.
3210 /* TX PCI doorbell mem area for tx producer index */
3211 tx_ring->prod_idx_db_reg = (u32 __iomem *) doorbell_area;
3212 tx_ring->prod_idx = 0;
3213 /* TX PCI doorbell mem area + 0x04 */
3214 tx_ring->valid_db_reg = doorbell_area + 0x04;
3217 * Assign shadow registers for this tx_ring.
3219 tx_ring->cnsmr_idx_sh_reg = shadow_reg;
3220 tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma;
3222 wqicb->len = cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT);
3223 wqicb->flags = cpu_to_le16(Q_FLAGS_LC |
3224 Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO);
3225 wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id);
3227 wqicb->addr = cpu_to_le64(tx_ring->wq_base_dma);
3229 wqicb->cnsmr_idx_addr = cpu_to_le64(tx_ring->cnsmr_idx_sh_reg_dma);
3231 ql_init_tx_ring(qdev, tx_ring);
3233 err = ql_write_cfg(qdev, wqicb, sizeof(*wqicb), CFG_LRQ,
3234 (u16) tx_ring->wq_id);
3236 netif_err(qdev, ifup, qdev->ndev, "Failed to load tx_ring.\n");
3239 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3240 "Successfully loaded WQICB.\n");
3244 static void ql_disable_msix(struct ql_adapter *qdev)
3246 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3247 pci_disable_msix(qdev->pdev);
3248 clear_bit(QL_MSIX_ENABLED, &qdev->flags);
3249 kfree(qdev->msi_x_entry);
3250 qdev->msi_x_entry = NULL;
3251 } else if (test_bit(QL_MSI_ENABLED, &qdev->flags)) {
3252 pci_disable_msi(qdev->pdev);
3253 clear_bit(QL_MSI_ENABLED, &qdev->flags);
3257 /* We start by trying to get the number of vectors
3258 * stored in qdev->intr_count. If we don't get that
3259 * many then we reduce the count and try again.
3261 static void ql_enable_msix(struct ql_adapter *qdev)
3265 /* Get the MSIX vectors. */
3266 if (qlge_irq_type == MSIX_IRQ) {
3267 /* Try to alloc space for the msix struct,
3268 * if it fails then go to MSI/legacy.
3270 qdev->msi_x_entry = kcalloc(qdev->intr_count,
3271 sizeof(struct msix_entry),
3273 if (!qdev->msi_x_entry) {
3274 qlge_irq_type = MSI_IRQ;
3278 for (i = 0; i < qdev->intr_count; i++)
3279 qdev->msi_x_entry[i].entry = i;
3281 /* Loop to get our vectors. We start with
3282 * what we want and settle for what we get.
3285 err = pci_enable_msix(qdev->pdev,
3286 qdev->msi_x_entry, qdev->intr_count);
3288 qdev->intr_count = err;
3292 kfree(qdev->msi_x_entry);
3293 qdev->msi_x_entry = NULL;
3294 netif_warn(qdev, ifup, qdev->ndev,
3295 "MSI-X Enable failed, trying MSI.\n");
3296 qdev->intr_count = 1;
3297 qlge_irq_type = MSI_IRQ;
3298 } else if (err == 0) {
3299 set_bit(QL_MSIX_ENABLED, &qdev->flags);
3300 netif_info(qdev, ifup, qdev->ndev,
3301 "MSI-X Enabled, got %d vectors.\n",
3307 qdev->intr_count = 1;
3308 if (qlge_irq_type == MSI_IRQ) {
3309 if (!pci_enable_msi(qdev->pdev)) {
3310 set_bit(QL_MSI_ENABLED, &qdev->flags);
3311 netif_info(qdev, ifup, qdev->ndev,
3312 "Running with MSI interrupts.\n");
3316 qlge_irq_type = LEG_IRQ;
3317 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3318 "Running with legacy interrupts.\n");
3321 /* Each vector services 1 RSS ring and and 1 or more
3322 * TX completion rings. This function loops through
3323 * the TX completion rings and assigns the vector that
3324 * will service it. An example would be if there are
3325 * 2 vectors (so 2 RSS rings) and 8 TX completion rings.
3326 * This would mean that vector 0 would service RSS ring 0
3327 * and TX completion rings 0,1,2 and 3. Vector 1 would
3328 * service RSS ring 1 and TX completion rings 4,5,6 and 7.
3330 static void ql_set_tx_vect(struct ql_adapter *qdev)
3333 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3335 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3336 /* Assign irq vectors to TX rx_rings.*/
3337 for (vect = 0, j = 0, i = qdev->rss_ring_count;
3338 i < qdev->rx_ring_count; i++) {
3339 if (j == tx_rings_per_vector) {
3343 qdev->rx_ring[i].irq = vect;
3347 /* For single vector all rings have an irq
3350 for (i = 0; i < qdev->rx_ring_count; i++)
3351 qdev->rx_ring[i].irq = 0;
3355 /* Set the interrupt mask for this vector. Each vector
3356 * will service 1 RSS ring and 1 or more TX completion
3357 * rings. This function sets up a bit mask per vector
3358 * that indicates which rings it services.
3360 static void ql_set_irq_mask(struct ql_adapter *qdev, struct intr_context *ctx)
3362 int j, vect = ctx->intr;
3363 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3365 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3366 /* Add the RSS ring serviced by this vector
3369 ctx->irq_mask = (1 << qdev->rx_ring[vect].cq_id);
3370 /* Add the TX ring(s) serviced by this vector
3372 for (j = 0; j < tx_rings_per_vector; j++) {
3374 (1 << qdev->rx_ring[qdev->rss_ring_count +
3375 (vect * tx_rings_per_vector) + j].cq_id);
3378 /* For single vector we just shift each queue's
3381 for (j = 0; j < qdev->rx_ring_count; j++)
3382 ctx->irq_mask |= (1 << qdev->rx_ring[j].cq_id);
3387 * Here we build the intr_context structures based on
3388 * our rx_ring count and intr vector count.
3389 * The intr_context structure is used to hook each vector
3390 * to possibly different handlers.
3392 static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev)
3395 struct intr_context *intr_context = &qdev->intr_context[0];
3397 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3398 /* Each rx_ring has it's
3399 * own intr_context since we have separate
3400 * vectors for each queue.
3402 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3403 qdev->rx_ring[i].irq = i;
3404 intr_context->intr = i;
3405 intr_context->qdev = qdev;
3406 /* Set up this vector's bit-mask that indicates
3407 * which queues it services.
3409 ql_set_irq_mask(qdev, intr_context);
3411 * We set up each vectors enable/disable/read bits so
3412 * there's no bit/mask calculations in the critical path.
3414 intr_context->intr_en_mask =
3415 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3416 INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK | INTR_EN_IHD
3418 intr_context->intr_dis_mask =
3419 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3420 INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
3422 intr_context->intr_read_mask =
3423 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3424 INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD |
3427 /* The first vector/queue handles
3428 * broadcast/multicast, fatal errors,
3429 * and firmware events. This in addition
3430 * to normal inbound NAPI processing.
3432 intr_context->handler = qlge_isr;
3433 sprintf(intr_context->name, "%s-rx-%d",
3434 qdev->ndev->name, i);
3437 * Inbound queues handle unicast frames only.
3439 intr_context->handler = qlge_msix_rx_isr;
3440 sprintf(intr_context->name, "%s-rx-%d",
3441 qdev->ndev->name, i);
3446 * All rx_rings use the same intr_context since
3447 * there is only one vector.
3449 intr_context->intr = 0;
3450 intr_context->qdev = qdev;
3452 * We set up each vectors enable/disable/read bits so
3453 * there's no bit/mask calculations in the critical path.
3455 intr_context->intr_en_mask =
3456 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_ENABLE;
3457 intr_context->intr_dis_mask =
3458 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3459 INTR_EN_TYPE_DISABLE;
3460 intr_context->intr_read_mask =
3461 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ;
3463 * Single interrupt means one handler for all rings.
3465 intr_context->handler = qlge_isr;
3466 sprintf(intr_context->name, "%s-single_irq", qdev->ndev->name);
3467 /* Set up this vector's bit-mask that indicates
3468 * which queues it services. In this case there is
3469 * a single vector so it will service all RSS and
3470 * TX completion rings.
3472 ql_set_irq_mask(qdev, intr_context);
3474 /* Tell the TX completion rings which MSIx vector
3475 * they will be using.
3477 ql_set_tx_vect(qdev);
3480 static void ql_free_irq(struct ql_adapter *qdev)
3483 struct intr_context *intr_context = &qdev->intr_context[0];
3485 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3486 if (intr_context->hooked) {
3487 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3488 free_irq(qdev->msi_x_entry[i].vector,
3490 netif_printk(qdev, ifdown, KERN_DEBUG, qdev->ndev,
3491 "freeing msix interrupt %d.\n", i);
3493 free_irq(qdev->pdev->irq, &qdev->rx_ring[0]);
3494 netif_printk(qdev, ifdown, KERN_DEBUG, qdev->ndev,
3495 "freeing msi interrupt %d.\n", i);
3499 ql_disable_msix(qdev);
3502 static int ql_request_irq(struct ql_adapter *qdev)
3506 struct pci_dev *pdev = qdev->pdev;
3507 struct intr_context *intr_context = &qdev->intr_context[0];
3509 ql_resolve_queues_to_irqs(qdev);
3511 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3512 atomic_set(&intr_context->irq_cnt, 0);
3513 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3514 status = request_irq(qdev->msi_x_entry[i].vector,
3515 intr_context->handler,
3520 netif_err(qdev, ifup, qdev->ndev,
3521 "Failed request for MSIX interrupt %d.\n",
3525 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3526 "Hooked intr %d, queue type %s, with name %s.\n",
3528 qdev->rx_ring[i].type == DEFAULT_Q ?
3530 qdev->rx_ring[i].type == TX_Q ?
3532 qdev->rx_ring[i].type == RX_Q ?
3534 intr_context->name);
3537 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3538 "trying msi or legacy interrupts.\n");
3539 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3540 "%s: irq = %d.\n", __func__, pdev->irq);
3541 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3542 "%s: context->name = %s.\n", __func__,
3543 intr_context->name);
3544 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3545 "%s: dev_id = 0x%p.\n", __func__,
3548 request_irq(pdev->irq, qlge_isr,
3549 test_bit(QL_MSI_ENABLED,
3551 flags) ? 0 : IRQF_SHARED,
3552 intr_context->name, &qdev->rx_ring[0]);
3556 netif_err(qdev, ifup, qdev->ndev,
3557 "Hooked intr %d, queue type %s, with name %s.\n",
3559 qdev->rx_ring[0].type == DEFAULT_Q ?
3561 qdev->rx_ring[0].type == TX_Q ? "TX_Q" :
3562 qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "",
3563 intr_context->name);
3565 intr_context->hooked = 1;
3569 netif_err(qdev, ifup, qdev->ndev, "Failed to get the interrupts!!!/n");
3574 static int ql_start_rss(struct ql_adapter *qdev)
3576 static const u8 init_hash_seed[] = {
3577 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
3578 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
3579 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
3580 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
3581 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa
3583 struct ricb *ricb = &qdev->ricb;
3586 u8 *hash_id = (u8 *) ricb->hash_cq_id;
3588 memset((void *)ricb, 0, sizeof(*ricb));
3590 ricb->base_cq = RSS_L4K;
3592 (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RT4 | RSS_RT6);
3593 ricb->mask = cpu_to_le16((u16)(0x3ff));
3596 * Fill out the Indirection Table.
3598 for (i = 0; i < 1024; i++)
3599 hash_id[i] = (i & (qdev->rss_ring_count - 1));
3601 memcpy((void *)&ricb->ipv6_hash_key[0], init_hash_seed, 40);
3602 memcpy((void *)&ricb->ipv4_hash_key[0], init_hash_seed, 16);
3604 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, "Initializing RSS.\n");
3606 status = ql_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0);
3608 netif_err(qdev, ifup, qdev->ndev, "Failed to load RICB.\n");
3611 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3612 "Successfully loaded RICB.\n");
3616 static int ql_clear_routing_entries(struct ql_adapter *qdev)
3620 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3623 /* Clear all the entries in the routing table. */
3624 for (i = 0; i < 16; i++) {
3625 status = ql_set_routing_reg(qdev, i, 0, 0);
3627 netif_err(qdev, ifup, qdev->ndev,
3628 "Failed to init routing register for CAM packets.\n");
3632 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3636 /* Initialize the frame-to-queue routing. */
3637 static int ql_route_initialize(struct ql_adapter *qdev)
3641 /* Clear all the entries in the routing table. */
3642 status = ql_clear_routing_entries(qdev);
3646 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3650 status = ql_set_routing_reg(qdev, RT_IDX_IP_CSUM_ERR_SLOT,
3651 RT_IDX_IP_CSUM_ERR, 1);
3653 netif_err(qdev, ifup, qdev->ndev,
3654 "Failed to init routing register "
3655 "for IP CSUM error packets.\n");
3658 status = ql_set_routing_reg(qdev, RT_IDX_TCP_UDP_CSUM_ERR_SLOT,
3659 RT_IDX_TU_CSUM_ERR, 1);
3661 netif_err(qdev, ifup, qdev->ndev,
3662 "Failed to init routing register "
3663 "for TCP/UDP CSUM error packets.\n");
3666 status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
3668 netif_err(qdev, ifup, qdev->ndev,
3669 "Failed to init routing register for broadcast packets.\n");
3672 /* If we have more than one inbound queue, then turn on RSS in the
3675 if (qdev->rss_ring_count > 1) {
3676 status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT,
3677 RT_IDX_RSS_MATCH, 1);
3679 netif_err(qdev, ifup, qdev->ndev,
3680 "Failed to init routing register for MATCH RSS packets.\n");
3685 status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT,
3688 netif_err(qdev, ifup, qdev->ndev,
3689 "Failed to init routing register for CAM packets.\n");
3691 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3695 int ql_cam_route_initialize(struct ql_adapter *qdev)
3699 /* If check if the link is up and use to
3700 * determine if we are setting or clearing
3701 * the MAC address in the CAM.
3703 set = ql_read32(qdev, STS);
3704 set &= qdev->port_link_up;
3705 status = ql_set_mac_addr(qdev, set);
3707 netif_err(qdev, ifup, qdev->ndev, "Failed to init mac address.\n");
3711 status = ql_route_initialize(qdev);
3713 netif_err(qdev, ifup, qdev->ndev, "Failed to init routing table.\n");
3718 static int ql_adapter_initialize(struct ql_adapter *qdev)
3725 * Set up the System register to halt on errors.
3727 value = SYS_EFE | SYS_FAE;
3729 ql_write32(qdev, SYS, mask | value);
3731 /* Set the default queue, and VLAN behavior. */
3732 value = NIC_RCV_CFG_DFQ | NIC_RCV_CFG_RV;
3733 mask = NIC_RCV_CFG_DFQ_MASK | (NIC_RCV_CFG_RV << 16);
3734 ql_write32(qdev, NIC_RCV_CFG, (mask | value));
3736 /* Set the MPI interrupt to enabled. */
3737 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
3739 /* Enable the function, set pagesize, enable error checking. */
3740 value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
3741 FSC_EC | FSC_VM_PAGE_4K;
3742 value |= SPLT_SETTING;
3744 /* Set/clear header splitting. */
3745 mask = FSC_VM_PAGESIZE_MASK |
3746 FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
3747 ql_write32(qdev, FSC, mask | value);
3749 ql_write32(qdev, SPLT_HDR, SPLT_LEN);
3751 /* Set RX packet routing to use port/pci function on which the
3752 * packet arrived on in addition to usual frame routing.
3753 * This is helpful on bonding where both interfaces can have
3754 * the same MAC address.
3756 ql_write32(qdev, RST_FO, RST_FO_RR_MASK | RST_FO_RR_RCV_FUNC_CQ);
3757 /* Reroute all packets to our Interface.
3758 * They may have been routed to MPI firmware
3761 value = ql_read32(qdev, MGMT_RCV_CFG);
3762 value &= ~MGMT_RCV_CFG_RM;
3765 /* Sticky reg needs clearing due to WOL. */
3766 ql_write32(qdev, MGMT_RCV_CFG, mask);
3767 ql_write32(qdev, MGMT_RCV_CFG, mask | value);
3769 /* Default WOL is enable on Mezz cards */
3770 if (qdev->pdev->subsystem_device == 0x0068 ||
3771 qdev->pdev->subsystem_device == 0x0180)
3772 qdev->wol = WAKE_MAGIC;
3774 /* Start up the rx queues. */
3775 for (i = 0; i < qdev->rx_ring_count; i++) {
3776 status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]);
3778 netif_err(qdev, ifup, qdev->ndev,
3779 "Failed to start rx ring[%d].\n", i);
3784 /* If there is more than one inbound completion queue
3785 * then download a RICB to configure RSS.
3787 if (qdev->rss_ring_count > 1) {
3788 status = ql_start_rss(qdev);
3790 netif_err(qdev, ifup, qdev->ndev, "Failed to start RSS.\n");
3795 /* Start up the tx queues. */
3796 for (i = 0; i < qdev->tx_ring_count; i++) {
3797 status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]);
3799 netif_err(qdev, ifup, qdev->ndev,
3800 "Failed to start tx ring[%d].\n", i);
3805 /* Initialize the port and set the max framesize. */
3806 status = qdev->nic_ops->port_initialize(qdev);
3808 netif_err(qdev, ifup, qdev->ndev, "Failed to start port.\n");
3810 /* Set up the MAC address and frame routing filter. */
3811 status = ql_cam_route_initialize(qdev);
3813 netif_err(qdev, ifup, qdev->ndev,
3814 "Failed to init CAM/Routing tables.\n");
3818 /* Start NAPI for the RSS queues. */
3819 for (i = 0; i < qdev->rss_ring_count; i++) {
3820 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3821 "Enabling NAPI for rx_ring[%d].\n", i);
3822 napi_enable(&qdev->rx_ring[i].napi);
3828 /* Issue soft reset to chip. */
3829 static int ql_adapter_reset(struct ql_adapter *qdev)
3833 unsigned long end_jiffies;
3835 /* Clear all the entries in the routing table. */
3836 status = ql_clear_routing_entries(qdev);
3838 netif_err(qdev, ifup, qdev->ndev, "Failed to clear routing bits.\n");
3842 end_jiffies = jiffies +
3843 max((unsigned long)1, usecs_to_jiffies(30));
3845 /* Check if bit is set then skip the mailbox command and
3846 * clear the bit, else we are in normal reset process.
3848 if (!test_bit(QL_ASIC_RECOVERY, &qdev->flags)) {
3849 /* Stop management traffic. */
3850 ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_STOP);
3852 /* Wait for the NIC and MGMNT FIFOs to empty. */
3853 ql_wait_fifo_empty(qdev);
3855 clear_bit(QL_ASIC_RECOVERY, &qdev->flags);
3857 ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
3860 value = ql_read32(qdev, RST_FO);
3861 if ((value & RST_FO_FR) == 0)
3864 } while (time_before(jiffies, end_jiffies));
3866 if (value & RST_FO_FR) {
3867 netif_err(qdev, ifdown, qdev->ndev,
3868 "ETIMEDOUT!!! errored out of resetting the chip!\n");
3869 status = -ETIMEDOUT;
3872 /* Resume management traffic. */
3873 ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_RESUME);
3877 static void ql_display_dev_info(struct net_device *ndev)
3879 struct ql_adapter *qdev = netdev_priv(ndev);
3881 netif_info(qdev, probe, qdev->ndev,
3882 "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, "
3883 "XG Roll = %d, XG Rev = %d.\n",
3886 qdev->chip_rev_id & 0x0000000f,
3887 qdev->chip_rev_id >> 4 & 0x0000000f,
3888 qdev->chip_rev_id >> 8 & 0x0000000f,
3889 qdev->chip_rev_id >> 12 & 0x0000000f);
3890 netif_info(qdev, probe, qdev->ndev,
3891 "MAC address %pM\n", ndev->dev_addr);
3894 static int ql_wol(struct ql_adapter *qdev)
3897 u32 wol = MB_WOL_DISABLE;
3899 /* The CAM is still intact after a reset, but if we
3900 * are doing WOL, then we may need to program the
3901 * routing regs. We would also need to issue the mailbox
3902 * commands to instruct the MPI what to do per the ethtool
3906 if (qdev->wol & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_PHY | WAKE_UCAST |
3907 WAKE_MCAST | WAKE_BCAST)) {
3908 netif_err(qdev, ifdown, qdev->ndev,
3909 "Unsupported WOL paramter. qdev->wol = 0x%x.\n",
3914 if (qdev->wol & WAKE_MAGIC) {
3915 status = ql_mb_wol_set_magic(qdev, 1);
3917 netif_err(qdev, ifdown, qdev->ndev,
3918 "Failed to set magic packet on %s.\n",
3922 netif_info(qdev, drv, qdev->ndev,
3923 "Enabled magic packet successfully on %s.\n",
3926 wol |= MB_WOL_MAGIC_PKT;
3930 wol |= MB_WOL_MODE_ON;
3931 status = ql_mb_wol_mode(qdev, wol);
3932 netif_err(qdev, drv, qdev->ndev,
3933 "WOL %s (wol code 0x%x) on %s\n",
3934 (status == 0) ? "Successfully set" : "Failed",
3935 wol, qdev->ndev->name);
3941 static void ql_cancel_all_work_sync(struct ql_adapter *qdev)
3944 /* Don't kill the reset worker thread if we
3945 * are in the process of recovery.
3947 if (test_bit(QL_ADAPTER_UP, &qdev->flags))
3948 cancel_delayed_work_sync(&qdev->asic_reset_work);
3949 cancel_delayed_work_sync(&qdev->mpi_reset_work);
3950 cancel_delayed_work_sync(&qdev->mpi_work);
3951 cancel_delayed_work_sync(&qdev->mpi_idc_work);
3952 cancel_delayed_work_sync(&qdev->mpi_core_to_log);
3953 cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
3956 static int ql_adapter_down(struct ql_adapter *qdev)
3962 ql_cancel_all_work_sync(qdev);
3964 for (i = 0; i < qdev->rss_ring_count; i++)
3965 napi_disable(&qdev->rx_ring[i].napi);
3967 clear_bit(QL_ADAPTER_UP, &qdev->flags);
3969 ql_disable_interrupts(qdev);
3971 ql_tx_ring_clean(qdev);
3973 /* Call netif_napi_del() from common point.
3975 for (i = 0; i < qdev->rss_ring_count; i++)
3976 netif_napi_del(&qdev->rx_ring[i].napi);
3978 status = ql_adapter_reset(qdev);
3980 netif_err(qdev, ifdown, qdev->ndev, "reset(func #%d) FAILED!\n",
3982 ql_free_rx_buffers(qdev);
3987 static int ql_adapter_up(struct ql_adapter *qdev)
3991 err = ql_adapter_initialize(qdev);
3993 netif_info(qdev, ifup, qdev->ndev, "Unable to initialize adapter.\n");
3996 set_bit(QL_ADAPTER_UP, &qdev->flags);
3997 ql_alloc_rx_buffers(qdev);
3998 /* If the port is initialized and the
3999 * link is up the turn on the carrier.
4001 if ((ql_read32(qdev, STS) & qdev->port_init) &&
4002 (ql_read32(qdev, STS) & qdev->port_link_up))
4004 /* Restore rx mode. */
4005 clear_bit(QL_ALLMULTI, &qdev->flags);
4006 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4007 qlge_set_multicast_list(qdev->ndev);
4009 /* Restore vlan setting. */
4010 qlge_restore_vlan(qdev);
4012 ql_enable_interrupts(qdev);
4013 ql_enable_all_completion_interrupts(qdev);
4014 netif_tx_start_all_queues(qdev->ndev);
4018 ql_adapter_reset(qdev);
4022 static void ql_release_adapter_resources(struct ql_adapter *qdev)
4024 ql_free_mem_resources(qdev);
4028 static int ql_get_adapter_resources(struct ql_adapter *qdev)
4032 if (ql_alloc_mem_resources(qdev)) {
4033 netif_err(qdev, ifup, qdev->ndev, "Unable to allocate memory.\n");
4036 status = ql_request_irq(qdev);
4040 static int qlge_close(struct net_device *ndev)
4042 struct ql_adapter *qdev = netdev_priv(ndev);
4044 /* If we hit pci_channel_io_perm_failure
4045 * failure condition, then we already
4046 * brought the adapter down.
4048 if (test_bit(QL_EEH_FATAL, &qdev->flags)) {
4049 netif_err(qdev, drv, qdev->ndev, "EEH fatal did unload.\n");
4050 clear_bit(QL_EEH_FATAL, &qdev->flags);
4055 * Wait for device to recover from a reset.
4056 * (Rarely happens, but possible.)
4058 while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
4060 ql_adapter_down(qdev);
4061 ql_release_adapter_resources(qdev);
4065 static int ql_configure_rings(struct ql_adapter *qdev)
4068 struct rx_ring *rx_ring;
4069 struct tx_ring *tx_ring;
4070 int cpu_cnt = min(MAX_CPUS, (int)num_online_cpus());
4071 unsigned int lbq_buf_len = (qdev->ndev->mtu > 1500) ?
4072 LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
4074 qdev->lbq_buf_order = get_order(lbq_buf_len);
4076 /* In a perfect world we have one RSS ring for each CPU
4077 * and each has it's own vector. To do that we ask for
4078 * cpu_cnt vectors. ql_enable_msix() will adjust the
4079 * vector count to what we actually get. We then
4080 * allocate an RSS ring for each.
4081 * Essentially, we are doing min(cpu_count, msix_vector_count).
4083 qdev->intr_count = cpu_cnt;
4084 ql_enable_msix(qdev);
4085 /* Adjust the RSS ring count to the actual vector count. */
4086 qdev->rss_ring_count = qdev->intr_count;
4087 qdev->tx_ring_count = cpu_cnt;
4088 qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count;
4090 for (i = 0; i < qdev->tx_ring_count; i++) {
4091 tx_ring = &qdev->tx_ring[i];
4092 memset((void *)tx_ring, 0, sizeof(*tx_ring));
4093 tx_ring->qdev = qdev;
4095 tx_ring->wq_len = qdev->tx_ring_size;
4097 tx_ring->wq_len * sizeof(struct ob_mac_iocb_req);
4100 * The completion queue ID for the tx rings start
4101 * immediately after the rss rings.
4103 tx_ring->cq_id = qdev->rss_ring_count + i;
4106 for (i = 0; i < qdev->rx_ring_count; i++) {
4107 rx_ring = &qdev->rx_ring[i];
4108 memset((void *)rx_ring, 0, sizeof(*rx_ring));
4109 rx_ring->qdev = qdev;
4111 rx_ring->cpu = i % cpu_cnt; /* CPU to run handler on. */
4112 if (i < qdev->rss_ring_count) {
4114 * Inbound (RSS) queues.
4116 rx_ring->cq_len = qdev->rx_ring_size;
4118 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4119 rx_ring->lbq_len = NUM_LARGE_BUFFERS;
4121 rx_ring->lbq_len * sizeof(__le64);
4122 rx_ring->lbq_buf_size = (u16)lbq_buf_len;
4123 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
4124 "lbq_buf_size %d, order = %d\n",
4125 rx_ring->lbq_buf_size,
4126 qdev->lbq_buf_order);
4127 rx_ring->sbq_len = NUM_SMALL_BUFFERS;
4129 rx_ring->sbq_len * sizeof(__le64);
4130 rx_ring->sbq_buf_size = SMALL_BUF_MAP_SIZE;
4131 rx_ring->type = RX_Q;
4134 * Outbound queue handles outbound completions only.
4136 /* outbound cq is same size as tx_ring it services. */
4137 rx_ring->cq_len = qdev->tx_ring_size;
4139 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4140 rx_ring->lbq_len = 0;
4141 rx_ring->lbq_size = 0;
4142 rx_ring->lbq_buf_size = 0;
4143 rx_ring->sbq_len = 0;
4144 rx_ring->sbq_size = 0;
4145 rx_ring->sbq_buf_size = 0;
4146 rx_ring->type = TX_Q;
4152 static int qlge_open(struct net_device *ndev)
4155 struct ql_adapter *qdev = netdev_priv(ndev);
4157 err = ql_adapter_reset(qdev);
4161 err = ql_configure_rings(qdev);
4165 err = ql_get_adapter_resources(qdev);
4169 err = ql_adapter_up(qdev);
4176 ql_release_adapter_resources(qdev);
4180 static int ql_change_rx_buffers(struct ql_adapter *qdev)
4182 struct rx_ring *rx_ring;
4186 /* Wait for an outstanding reset to complete. */
4187 if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4189 while (i-- && !test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4190 netif_err(qdev, ifup, qdev->ndev,
4191 "Waiting for adapter UP...\n");
4196 netif_err(qdev, ifup, qdev->ndev,
4197 "Timed out waiting for adapter UP\n");
4202 status = ql_adapter_down(qdev);
4206 /* Get the new rx buffer size. */
4207 lbq_buf_len = (qdev->ndev->mtu > 1500) ?
4208 LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
4209 qdev->lbq_buf_order = get_order(lbq_buf_len);
4211 for (i = 0; i < qdev->rss_ring_count; i++) {
4212 rx_ring = &qdev->rx_ring[i];
4213 /* Set the new size. */
4214 rx_ring->lbq_buf_size = lbq_buf_len;
4217 status = ql_adapter_up(qdev);
4223 netif_alert(qdev, ifup, qdev->ndev,
4224 "Driver up/down cycle failed, closing device.\n");
4225 set_bit(QL_ADAPTER_UP, &qdev->flags);
4226 dev_close(qdev->ndev);
4230 static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
4232 struct ql_adapter *qdev = netdev_priv(ndev);
4235 if (ndev->mtu == 1500 && new_mtu == 9000) {
4236 netif_err(qdev, ifup, qdev->ndev, "Changing to jumbo MTU.\n");
4237 } else if (ndev->mtu == 9000 && new_mtu == 1500) {
4238 netif_err(qdev, ifup, qdev->ndev, "Changing to normal MTU.\n");
4242 queue_delayed_work(qdev->workqueue,
4243 &qdev->mpi_port_cfg_work, 3*HZ);
4245 ndev->mtu = new_mtu;
4247 if (!netif_running(qdev->ndev)) {
4251 status = ql_change_rx_buffers(qdev);
4253 netif_err(qdev, ifup, qdev->ndev,
4254 "Changing MTU failed.\n");
4260 static struct net_device_stats *qlge_get_stats(struct net_device
4263 struct ql_adapter *qdev = netdev_priv(ndev);
4264 struct rx_ring *rx_ring = &qdev->rx_ring[0];
4265 struct tx_ring *tx_ring = &qdev->tx_ring[0];
4266 unsigned long pkts, mcast, dropped, errors, bytes;
4270 pkts = mcast = dropped = errors = bytes = 0;
4271 for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) {
4272 pkts += rx_ring->rx_packets;
4273 bytes += rx_ring->rx_bytes;
4274 dropped += rx_ring->rx_dropped;
4275 errors += rx_ring->rx_errors;
4276 mcast += rx_ring->rx_multicast;
4278 ndev->stats.rx_packets = pkts;
4279 ndev->stats.rx_bytes = bytes;
4280 ndev->stats.rx_dropped = dropped;
4281 ndev->stats.rx_errors = errors;
4282 ndev->stats.multicast = mcast;
4285 pkts = errors = bytes = 0;
4286 for (i = 0; i < qdev->tx_ring_count; i++, tx_ring++) {
4287 pkts += tx_ring->tx_packets;
4288 bytes += tx_ring->tx_bytes;
4289 errors += tx_ring->tx_errors;
4291 ndev->stats.tx_packets = pkts;
4292 ndev->stats.tx_bytes = bytes;
4293 ndev->stats.tx_errors = errors;
4294 return &ndev->stats;
4297 static void qlge_set_multicast_list(struct net_device *ndev)
4299 struct ql_adapter *qdev = netdev_priv(ndev);
4300 struct netdev_hw_addr *ha;
4303 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
4307 * Set or clear promiscuous mode if a
4308 * transition is taking place.
4310 if (ndev->flags & IFF_PROMISC) {
4311 if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4312 if (ql_set_routing_reg
4313 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) {
4314 netif_err(qdev, hw, qdev->ndev,
4315 "Failed to set promiscuous mode.\n");
4317 set_bit(QL_PROMISCUOUS, &qdev->flags);
4321 if (test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4322 if (ql_set_routing_reg
4323 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) {
4324 netif_err(qdev, hw, qdev->ndev,
4325 "Failed to clear promiscuous mode.\n");
4327 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4333 * Set or clear all multicast mode if a
4334 * transition is taking place.
4336 if ((ndev->flags & IFF_ALLMULTI) ||
4337 (netdev_mc_count(ndev) > MAX_MULTICAST_ENTRIES)) {
4338 if (!test_bit(QL_ALLMULTI, &qdev->flags)) {
4339 if (ql_set_routing_reg
4340 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) {
4341 netif_err(qdev, hw, qdev->ndev,
4342 "Failed to set all-multi mode.\n");
4344 set_bit(QL_ALLMULTI, &qdev->flags);
4348 if (test_bit(QL_ALLMULTI, &qdev->flags)) {
4349 if (ql_set_routing_reg
4350 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) {
4351 netif_err(qdev, hw, qdev->ndev,
4352 "Failed to clear all-multi mode.\n");
4354 clear_bit(QL_ALLMULTI, &qdev->flags);
4359 if (!netdev_mc_empty(ndev)) {
4360 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4364 netdev_for_each_mc_addr(ha, ndev) {
4365 if (ql_set_mac_addr_reg(qdev, (u8 *) ha->addr,
4366 MAC_ADDR_TYPE_MULTI_MAC, i)) {
4367 netif_err(qdev, hw, qdev->ndev,
4368 "Failed to loadmulticast address.\n");
4369 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4374 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4375 if (ql_set_routing_reg
4376 (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) {
4377 netif_err(qdev, hw, qdev->ndev,
4378 "Failed to set multicast match mode.\n");
4380 set_bit(QL_ALLMULTI, &qdev->flags);
4384 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
4387 static int qlge_set_mac_address(struct net_device *ndev, void *p)
4389 struct ql_adapter *qdev = netdev_priv(ndev);
4390 struct sockaddr *addr = p;
4393 if (!is_valid_ether_addr(addr->sa_data))
4394 return -EADDRNOTAVAIL;
4395 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
4396 /* Update local copy of current mac address. */
4397 memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
4399 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4402 status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr,
4403 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
4405 netif_err(qdev, hw, qdev->ndev, "Failed to load MAC address.\n");
4406 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4410 static void qlge_tx_timeout(struct net_device *ndev)
4412 struct ql_adapter *qdev = netdev_priv(ndev);
4413 ql_queue_asic_error(qdev);
4416 static void ql_asic_reset_work(struct work_struct *work)
4418 struct ql_adapter *qdev =
4419 container_of(work, struct ql_adapter, asic_reset_work.work);
4422 status = ql_adapter_down(qdev);
4426 status = ql_adapter_up(qdev);
4430 /* Restore rx mode. */
4431 clear_bit(QL_ALLMULTI, &qdev->flags);
4432 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4433 qlge_set_multicast_list(qdev->ndev);
4438 netif_alert(qdev, ifup, qdev->ndev,
4439 "Driver up/down cycle failed, closing device\n");
4441 set_bit(QL_ADAPTER_UP, &qdev->flags);
4442 dev_close(qdev->ndev);
4446 static const struct nic_operations qla8012_nic_ops = {
4447 .get_flash = ql_get_8012_flash_params,
4448 .port_initialize = ql_8012_port_initialize,
4451 static const struct nic_operations qla8000_nic_ops = {
4452 .get_flash = ql_get_8000_flash_params,
4453 .port_initialize = ql_8000_port_initialize,
4456 /* Find the pcie function number for the other NIC
4457 * on this chip. Since both NIC functions share a
4458 * common firmware we have the lowest enabled function
4459 * do any common work. Examples would be resetting
4460 * after a fatal firmware error, or doing a firmware
4463 static int ql_get_alt_pcie_func(struct ql_adapter *qdev)
4467 u32 nic_func1, nic_func2;
4469 status = ql_read_mpi_reg(qdev, MPI_TEST_FUNC_PORT_CFG,
4474 nic_func1 = ((temp >> MPI_TEST_NIC1_FUNC_SHIFT) &
4475 MPI_TEST_NIC_FUNC_MASK);
4476 nic_func2 = ((temp >> MPI_TEST_NIC2_FUNC_SHIFT) &
4477 MPI_TEST_NIC_FUNC_MASK);
4479 if (qdev->func == nic_func1)
4480 qdev->alt_func = nic_func2;
4481 else if (qdev->func == nic_func2)
4482 qdev->alt_func = nic_func1;
4489 static int ql_get_board_info(struct ql_adapter *qdev)
4493 (ql_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT;
4497 status = ql_get_alt_pcie_func(qdev);
4501 qdev->port = (qdev->func < qdev->alt_func) ? 0 : 1;
4503 qdev->xg_sem_mask = SEM_XGMAC1_MASK;
4504 qdev->port_link_up = STS_PL1;
4505 qdev->port_init = STS_PI1;
4506 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBI;
4507 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBO;
4509 qdev->xg_sem_mask = SEM_XGMAC0_MASK;
4510 qdev->port_link_up = STS_PL0;
4511 qdev->port_init = STS_PI0;
4512 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBI;
4513 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO;
4515 qdev->chip_rev_id = ql_read32(qdev, REV_ID);
4516 qdev->device_id = qdev->pdev->device;
4517 if (qdev->device_id == QLGE_DEVICE_ID_8012)
4518 qdev->nic_ops = &qla8012_nic_ops;
4519 else if (qdev->device_id == QLGE_DEVICE_ID_8000)
4520 qdev->nic_ops = &qla8000_nic_ops;
4524 static void ql_release_all(struct pci_dev *pdev)
4526 struct net_device *ndev = pci_get_drvdata(pdev);
4527 struct ql_adapter *qdev = netdev_priv(ndev);
4529 if (qdev->workqueue) {
4530 destroy_workqueue(qdev->workqueue);
4531 qdev->workqueue = NULL;
4535 iounmap(qdev->reg_base);
4536 if (qdev->doorbell_area)
4537 iounmap(qdev->doorbell_area);
4538 vfree(qdev->mpi_coredump);
4539 pci_release_regions(pdev);
4540 pci_set_drvdata(pdev, NULL);
4543 static int __devinit ql_init_device(struct pci_dev *pdev,
4544 struct net_device *ndev, int cards_found)
4546 struct ql_adapter *qdev = netdev_priv(ndev);
4549 memset((void *)qdev, 0, sizeof(*qdev));
4550 err = pci_enable_device(pdev);
4552 dev_err(&pdev->dev, "PCI device enable failed.\n");
4558 pci_set_drvdata(pdev, ndev);
4560 /* Set PCIe read request size */
4561 err = pcie_set_readrq(pdev, 4096);
4563 dev_err(&pdev->dev, "Set readrq failed.\n");
4567 err = pci_request_regions(pdev, DRV_NAME);
4569 dev_err(&pdev->dev, "PCI region request failed.\n");
4573 pci_set_master(pdev);
4574 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
4575 set_bit(QL_DMA64, &qdev->flags);
4576 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
4578 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
4580 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
4584 dev_err(&pdev->dev, "No usable DMA configuration.\n");
4588 /* Set PCIe reset type for EEH to fundamental. */
4589 pdev->needs_freset = 1;
4590 pci_save_state(pdev);
4592 ioremap_nocache(pci_resource_start(pdev, 1),
4593 pci_resource_len(pdev, 1));
4594 if (!qdev->reg_base) {
4595 dev_err(&pdev->dev, "Register mapping failed.\n");
4600 qdev->doorbell_area_size = pci_resource_len(pdev, 3);
4601 qdev->doorbell_area =
4602 ioremap_nocache(pci_resource_start(pdev, 3),
4603 pci_resource_len(pdev, 3));
4604 if (!qdev->doorbell_area) {
4605 dev_err(&pdev->dev, "Doorbell register mapping failed.\n");
4610 err = ql_get_board_info(qdev);
4612 dev_err(&pdev->dev, "Register access failed.\n");
4616 qdev->msg_enable = netif_msg_init(debug, default_msg);
4617 spin_lock_init(&qdev->hw_lock);
4618 spin_lock_init(&qdev->stats_lock);
4620 if (qlge_mpi_coredump) {
4621 qdev->mpi_coredump =
4622 vmalloc(sizeof(struct ql_mpi_coredump));
4623 if (qdev->mpi_coredump == NULL) {
4624 dev_err(&pdev->dev, "Coredump alloc failed.\n");
4628 if (qlge_force_coredump)
4629 set_bit(QL_FRC_COREDUMP, &qdev->flags);
4631 /* make sure the EEPROM is good */
4632 err = qdev->nic_ops->get_flash(qdev);
4634 dev_err(&pdev->dev, "Invalid FLASH.\n");
4638 memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
4639 /* Keep local copy of current mac address. */
4640 memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
4642 /* Set up the default ring sizes. */
4643 qdev->tx_ring_size = NUM_TX_RING_ENTRIES;
4644 qdev->rx_ring_size = NUM_RX_RING_ENTRIES;
4646 /* Set up the coalescing parameters. */
4647 qdev->rx_coalesce_usecs = DFLT_COALESCE_WAIT;
4648 qdev->tx_coalesce_usecs = DFLT_COALESCE_WAIT;
4649 qdev->rx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4650 qdev->tx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4653 * Set up the operating parameters.
4655 qdev->workqueue = create_singlethread_workqueue(ndev->name);
4656 INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work);
4657 INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work);
4658 INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
4659 INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work);
4660 INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work);
4661 INIT_DELAYED_WORK(&qdev->mpi_core_to_log, ql_mpi_core_to_log);
4662 init_completion(&qdev->ide_completion);
4663 mutex_init(&qdev->mpi_mutex);
4666 dev_info(&pdev->dev, "%s\n", DRV_STRING);
4667 dev_info(&pdev->dev, "Driver name: %s, Version: %s.\n",
4668 DRV_NAME, DRV_VERSION);
4672 ql_release_all(pdev);
4674 pci_disable_device(pdev);
4678 static const struct net_device_ops qlge_netdev_ops = {
4679 .ndo_open = qlge_open,
4680 .ndo_stop = qlge_close,
4681 .ndo_start_xmit = qlge_send,
4682 .ndo_change_mtu = qlge_change_mtu,
4683 .ndo_get_stats = qlge_get_stats,
4684 .ndo_set_rx_mode = qlge_set_multicast_list,
4685 .ndo_set_mac_address = qlge_set_mac_address,
4686 .ndo_validate_addr = eth_validate_addr,
4687 .ndo_tx_timeout = qlge_tx_timeout,
4688 .ndo_fix_features = qlge_fix_features,
4689 .ndo_set_features = qlge_set_features,
4690 .ndo_vlan_rx_add_vid = qlge_vlan_rx_add_vid,
4691 .ndo_vlan_rx_kill_vid = qlge_vlan_rx_kill_vid,
4694 static void ql_timer(unsigned long data)
4696 struct ql_adapter *qdev = (struct ql_adapter *)data;
4699 var = ql_read32(qdev, STS);
4700 if (pci_channel_offline(qdev->pdev)) {
4701 netif_err(qdev, ifup, qdev->ndev, "EEH STS = 0x%.08x.\n", var);
4705 mod_timer(&qdev->timer, jiffies + (5*HZ));
4708 static int __devinit qlge_probe(struct pci_dev *pdev,
4709 const struct pci_device_id *pci_entry)
4711 struct net_device *ndev = NULL;
4712 struct ql_adapter *qdev = NULL;
4713 static int cards_found = 0;
4716 ndev = alloc_etherdev_mq(sizeof(struct ql_adapter),
4717 min(MAX_CPUS, (int)num_online_cpus()));
4721 err = ql_init_device(pdev, ndev, cards_found);
4727 qdev = netdev_priv(ndev);
4728 SET_NETDEV_DEV(ndev, &pdev->dev);
4729 ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
4730 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN |
4731 NETIF_F_HW_VLAN_TX | NETIF_F_RXCSUM;
4732 ndev->features = ndev->hw_features |
4733 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
4735 if (test_bit(QL_DMA64, &qdev->flags))
4736 ndev->features |= NETIF_F_HIGHDMA;
4739 * Set up net_device structure.
4741 ndev->tx_queue_len = qdev->tx_ring_size;
4742 ndev->irq = pdev->irq;
4744 ndev->netdev_ops = &qlge_netdev_ops;
4745 SET_ETHTOOL_OPS(ndev, &qlge_ethtool_ops);
4746 ndev->watchdog_timeo = 10 * HZ;
4748 err = register_netdev(ndev);
4750 dev_err(&pdev->dev, "net device registration failed.\n");
4751 ql_release_all(pdev);
4752 pci_disable_device(pdev);
4755 /* Start up the timer to trigger EEH if
4758 init_timer_deferrable(&qdev->timer);
4759 qdev->timer.data = (unsigned long)qdev;
4760 qdev->timer.function = ql_timer;
4761 qdev->timer.expires = jiffies + (5*HZ);
4762 add_timer(&qdev->timer);
4764 ql_display_dev_info(ndev);
4765 atomic_set(&qdev->lb_count, 0);
4770 netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev)
4772 return qlge_send(skb, ndev);
4775 int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget)
4777 return ql_clean_inbound_rx_ring(rx_ring, budget);
4780 static void __devexit qlge_remove(struct pci_dev *pdev)
4782 struct net_device *ndev = pci_get_drvdata(pdev);
4783 struct ql_adapter *qdev = netdev_priv(ndev);
4784 del_timer_sync(&qdev->timer);
4785 ql_cancel_all_work_sync(qdev);
4786 unregister_netdev(ndev);
4787 ql_release_all(pdev);
4788 pci_disable_device(pdev);
4792 /* Clean up resources without touching hardware. */
4793 static void ql_eeh_close(struct net_device *ndev)
4796 struct ql_adapter *qdev = netdev_priv(ndev);
4798 if (netif_carrier_ok(ndev)) {
4799 netif_carrier_off(ndev);
4800 netif_stop_queue(ndev);
4803 /* Disabling the timer */
4804 del_timer_sync(&qdev->timer);
4805 ql_cancel_all_work_sync(qdev);
4807 for (i = 0; i < qdev->rss_ring_count; i++)
4808 netif_napi_del(&qdev->rx_ring[i].napi);
4810 clear_bit(QL_ADAPTER_UP, &qdev->flags);
4811 ql_tx_ring_clean(qdev);
4812 ql_free_rx_buffers(qdev);
4813 ql_release_adapter_resources(qdev);
4817 * This callback is called by the PCI subsystem whenever
4818 * a PCI bus error is detected.
4820 static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
4821 enum pci_channel_state state)
4823 struct net_device *ndev = pci_get_drvdata(pdev);
4824 struct ql_adapter *qdev = netdev_priv(ndev);
4827 case pci_channel_io_normal:
4828 return PCI_ERS_RESULT_CAN_RECOVER;
4829 case pci_channel_io_frozen:
4830 netif_device_detach(ndev);
4831 if (netif_running(ndev))
4833 pci_disable_device(pdev);
4834 return PCI_ERS_RESULT_NEED_RESET;
4835 case pci_channel_io_perm_failure:
4837 "%s: pci_channel_io_perm_failure.\n", __func__);
4839 set_bit(QL_EEH_FATAL, &qdev->flags);
4840 return PCI_ERS_RESULT_DISCONNECT;
4843 /* Request a slot reset. */
4844 return PCI_ERS_RESULT_NEED_RESET;
4848 * This callback is called after the PCI buss has been reset.
4849 * Basically, this tries to restart the card from scratch.
4850 * This is a shortened version of the device probe/discovery code,
4851 * it resembles the first-half of the () routine.
4853 static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev)
4855 struct net_device *ndev = pci_get_drvdata(pdev);
4856 struct ql_adapter *qdev = netdev_priv(ndev);
4858 pdev->error_state = pci_channel_io_normal;
4860 pci_restore_state(pdev);
4861 if (pci_enable_device(pdev)) {
4862 netif_err(qdev, ifup, qdev->ndev,
4863 "Cannot re-enable PCI device after reset.\n");
4864 return PCI_ERS_RESULT_DISCONNECT;
4866 pci_set_master(pdev);
4868 if (ql_adapter_reset(qdev)) {
4869 netif_err(qdev, drv, qdev->ndev, "reset FAILED!\n");
4870 set_bit(QL_EEH_FATAL, &qdev->flags);
4871 return PCI_ERS_RESULT_DISCONNECT;
4874 return PCI_ERS_RESULT_RECOVERED;
4877 static void qlge_io_resume(struct pci_dev *pdev)
4879 struct net_device *ndev = pci_get_drvdata(pdev);
4880 struct ql_adapter *qdev = netdev_priv(ndev);
4883 if (netif_running(ndev)) {
4884 err = qlge_open(ndev);
4886 netif_err(qdev, ifup, qdev->ndev,
4887 "Device initialization failed after reset.\n");
4891 netif_err(qdev, ifup, qdev->ndev,
4892 "Device was not running prior to EEH.\n");
4894 mod_timer(&qdev->timer, jiffies + (5*HZ));
4895 netif_device_attach(ndev);
4898 static struct pci_error_handlers qlge_err_handler = {
4899 .error_detected = qlge_io_error_detected,
4900 .slot_reset = qlge_io_slot_reset,
4901 .resume = qlge_io_resume,
4904 static int qlge_suspend(struct pci_dev *pdev, pm_message_t state)
4906 struct net_device *ndev = pci_get_drvdata(pdev);
4907 struct ql_adapter *qdev = netdev_priv(ndev);
4910 netif_device_detach(ndev);
4911 del_timer_sync(&qdev->timer);
4913 if (netif_running(ndev)) {
4914 err = ql_adapter_down(qdev);
4920 err = pci_save_state(pdev);
4924 pci_disable_device(pdev);
4926 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4932 static int qlge_resume(struct pci_dev *pdev)
4934 struct net_device *ndev = pci_get_drvdata(pdev);
4935 struct ql_adapter *qdev = netdev_priv(ndev);
4938 pci_set_power_state(pdev, PCI_D0);
4939 pci_restore_state(pdev);
4940 err = pci_enable_device(pdev);
4942 netif_err(qdev, ifup, qdev->ndev, "Cannot enable PCI device from suspend\n");
4945 pci_set_master(pdev);
4947 pci_enable_wake(pdev, PCI_D3hot, 0);
4948 pci_enable_wake(pdev, PCI_D3cold, 0);
4950 if (netif_running(ndev)) {
4951 err = ql_adapter_up(qdev);
4956 mod_timer(&qdev->timer, jiffies + (5*HZ));
4957 netif_device_attach(ndev);
4961 #endif /* CONFIG_PM */
4963 static void qlge_shutdown(struct pci_dev *pdev)
4965 qlge_suspend(pdev, PMSG_SUSPEND);
4968 static struct pci_driver qlge_driver = {
4970 .id_table = qlge_pci_tbl,
4971 .probe = qlge_probe,
4972 .remove = __devexit_p(qlge_remove),
4974 .suspend = qlge_suspend,
4975 .resume = qlge_resume,
4977 .shutdown = qlge_shutdown,
4978 .err_handler = &qlge_err_handler
4981 static int __init qlge_init_module(void)
4983 return pci_register_driver(&qlge_driver);
4986 static void __exit qlge_exit(void)
4988 pci_unregister_driver(&qlge_driver);
4991 module_init(qlge_init_module);
4992 module_exit(qlge_exit);