2 * QLogic qlge NIC HBA Driver
3 * Copyright (c) 2003-2008 QLogic Corporation
4 * See LICENSE.qlge for copyright and licensing details.
5 * Author: Linux qlge network device driver by
6 * Ron Mercer <ron.mercer@qlogic.com>
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/bitops.h>
11 #include <linux/types.h>
12 #include <linux/module.h>
13 #include <linux/list.h>
14 #include <linux/pci.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/pagemap.h>
17 #include <linux/sched.h>
18 #include <linux/slab.h>
19 #include <linux/dmapool.h>
20 #include <linux/mempool.h>
21 #include <linux/spinlock.h>
22 #include <linux/kthread.h>
23 #include <linux/interrupt.h>
24 #include <linux/errno.h>
25 #include <linux/ioport.h>
28 #include <linux/ipv6.h>
30 #include <linux/tcp.h>
31 #include <linux/udp.h>
32 #include <linux/if_arp.h>
33 #include <linux/if_ether.h>
34 #include <linux/netdevice.h>
35 #include <linux/etherdevice.h>
36 #include <linux/ethtool.h>
37 #include <linux/if_vlan.h>
38 #include <linux/skbuff.h>
39 #include <linux/delay.h>
41 #include <linux/vmalloc.h>
42 #include <linux/prefetch.h>
43 #include <net/ip6_checksum.h>
47 char qlge_driver_name[] = DRV_NAME;
48 const char qlge_driver_version[] = DRV_VERSION;
50 MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>");
51 MODULE_DESCRIPTION(DRV_STRING " ");
52 MODULE_LICENSE("GPL");
53 MODULE_VERSION(DRV_VERSION);
55 static const u32 default_msg =
56 NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK |
57 /* NETIF_MSG_TIMER | */
62 /* NETIF_MSG_TX_QUEUED | */
63 /* NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS | */
64 /* NETIF_MSG_PKTDATA | */
65 NETIF_MSG_HW | NETIF_MSG_WOL | 0;
67 static int debug = -1; /* defaults above */
68 module_param(debug, int, 0664);
69 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
74 static int qlge_irq_type = MSIX_IRQ;
75 module_param(qlge_irq_type, int, 0664);
76 MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
78 static int qlge_mpi_coredump;
79 module_param(qlge_mpi_coredump, int, 0);
80 MODULE_PARM_DESC(qlge_mpi_coredump,
81 "Option to enable MPI firmware dump. "
82 "Default is OFF - Do Not allocate memory. ");
84 static int qlge_force_coredump;
85 module_param(qlge_force_coredump, int, 0);
86 MODULE_PARM_DESC(qlge_force_coredump,
87 "Option to allow force of firmware core dump. "
88 "Default is OFF - Do not allow.");
90 static DEFINE_PCI_DEVICE_TABLE(qlge_pci_tbl) = {
91 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)},
92 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)},
93 /* required last entry */
97 MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
99 static int ql_wol(struct ql_adapter *qdev);
100 static void qlge_set_multicast_list(struct net_device *ndev);
102 /* This hardware semaphore causes exclusive access to
103 * resources shared between the NIC driver, MPI firmware,
104 * FCOE firmware and the FC driver.
106 static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask)
111 case SEM_XGMAC0_MASK:
112 sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
114 case SEM_XGMAC1_MASK:
115 sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
118 sem_bits = SEM_SET << SEM_ICB_SHIFT;
120 case SEM_MAC_ADDR_MASK:
121 sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
124 sem_bits = SEM_SET << SEM_FLASH_SHIFT;
127 sem_bits = SEM_SET << SEM_PROBE_SHIFT;
129 case SEM_RT_IDX_MASK:
130 sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
132 case SEM_PROC_REG_MASK:
133 sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
136 netif_alert(qdev, probe, qdev->ndev, "bad Semaphore mask!.\n");
140 ql_write32(qdev, SEM, sem_bits | sem_mask);
141 return !(ql_read32(qdev, SEM) & sem_bits);
144 int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask)
146 unsigned int wait_count = 30;
148 if (!ql_sem_trylock(qdev, sem_mask))
151 } while (--wait_count);
155 void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask)
157 ql_write32(qdev, SEM, sem_mask);
158 ql_read32(qdev, SEM); /* flush */
161 /* This function waits for a specific bit to come ready
162 * in a given register. It is used mostly by the initialize
163 * process, but is also used in kernel thread API such as
164 * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
166 int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
169 int count = UDELAY_COUNT;
172 temp = ql_read32(qdev, reg);
174 /* check for errors */
175 if (temp & err_bit) {
176 netif_alert(qdev, probe, qdev->ndev,
177 "register 0x%.08x access error, value = 0x%.08x!.\n",
180 } else if (temp & bit)
182 udelay(UDELAY_DELAY);
185 netif_alert(qdev, probe, qdev->ndev,
186 "Timed out waiting for reg %x to come ready.\n", reg);
190 /* The CFG register is used to download TX and RX control blocks
191 * to the chip. This function waits for an operation to complete.
193 static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit)
195 int count = UDELAY_COUNT;
199 temp = ql_read32(qdev, CFG);
204 udelay(UDELAY_DELAY);
211 /* Used to issue init control blocks to hw. Maps control block,
212 * sets address, triggers download, waits for completion.
214 int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
224 (bit & (CFG_LRQ | CFG_LR | CFG_LCQ)) ? PCI_DMA_TODEVICE :
227 map = pci_map_single(qdev->pdev, ptr, size, direction);
228 if (pci_dma_mapping_error(qdev->pdev, map)) {
229 netif_err(qdev, ifup, qdev->ndev, "Couldn't map DMA area.\n");
233 status = ql_sem_spinlock(qdev, SEM_ICB_MASK);
237 status = ql_wait_cfg(qdev, bit);
239 netif_err(qdev, ifup, qdev->ndev,
240 "Timed out waiting for CFG to come ready.\n");
244 ql_write32(qdev, ICB_L, (u32) map);
245 ql_write32(qdev, ICB_H, (u32) (map >> 32));
247 mask = CFG_Q_MASK | (bit << 16);
248 value = bit | (q_id << CFG_Q_SHIFT);
249 ql_write32(qdev, CFG, (mask | value));
252 * Wait for the bit to clear after signaling hw.
254 status = ql_wait_cfg(qdev, bit);
256 ql_sem_unlock(qdev, SEM_ICB_MASK); /* does flush too */
257 pci_unmap_single(qdev->pdev, map, size, direction);
261 /* Get a specific MAC address from the CAM. Used for debug and reg dump. */
262 int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
269 case MAC_ADDR_TYPE_MULTI_MAC:
270 case MAC_ADDR_TYPE_CAM_MAC:
273 ql_wait_reg_rdy(qdev,
274 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
277 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
278 (index << MAC_ADDR_IDX_SHIFT) | /* index */
279 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
281 ql_wait_reg_rdy(qdev,
282 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
285 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
287 ql_wait_reg_rdy(qdev,
288 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
291 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
292 (index << MAC_ADDR_IDX_SHIFT) | /* index */
293 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
295 ql_wait_reg_rdy(qdev,
296 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
299 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
300 if (type == MAC_ADDR_TYPE_CAM_MAC) {
302 ql_wait_reg_rdy(qdev,
303 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
306 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
307 (index << MAC_ADDR_IDX_SHIFT) | /* index */
308 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
310 ql_wait_reg_rdy(qdev, MAC_ADDR_IDX,
314 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
318 case MAC_ADDR_TYPE_VLAN:
319 case MAC_ADDR_TYPE_MULTI_FLTR:
321 netif_crit(qdev, ifup, qdev->ndev,
322 "Address type %d not yet supported.\n", type);
329 /* Set up a MAC, multicast or VLAN address for the
330 * inbound frame matching.
332 static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
339 case MAC_ADDR_TYPE_MULTI_MAC:
341 u32 upper = (addr[0] << 8) | addr[1];
342 u32 lower = (addr[2] << 24) | (addr[3] << 16) |
343 (addr[4] << 8) | (addr[5]);
346 ql_wait_reg_rdy(qdev,
347 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
350 ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
351 (index << MAC_ADDR_IDX_SHIFT) |
353 ql_write32(qdev, MAC_ADDR_DATA, lower);
355 ql_wait_reg_rdy(qdev,
356 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
359 ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
360 (index << MAC_ADDR_IDX_SHIFT) |
363 ql_write32(qdev, MAC_ADDR_DATA, upper);
365 ql_wait_reg_rdy(qdev,
366 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
371 case MAC_ADDR_TYPE_CAM_MAC:
374 u32 upper = (addr[0] << 8) | addr[1];
376 (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
379 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
380 "Adding %s address %pM at index %d in the CAM.\n",
381 type == MAC_ADDR_TYPE_MULTI_MAC ?
382 "MULTICAST" : "UNICAST",
386 ql_wait_reg_rdy(qdev,
387 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
390 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
391 (index << MAC_ADDR_IDX_SHIFT) | /* index */
393 ql_write32(qdev, MAC_ADDR_DATA, lower);
395 ql_wait_reg_rdy(qdev,
396 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
399 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
400 (index << MAC_ADDR_IDX_SHIFT) | /* index */
402 ql_write32(qdev, MAC_ADDR_DATA, upper);
404 ql_wait_reg_rdy(qdev,
405 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
408 ql_write32(qdev, MAC_ADDR_IDX, (offset) | /* offset */
409 (index << MAC_ADDR_IDX_SHIFT) | /* index */
411 /* This field should also include the queue id
412 and possibly the function id. Right now we hardcode
413 the route field to NIC core.
415 cam_output = (CAM_OUT_ROUTE_NIC |
417 func << CAM_OUT_FUNC_SHIFT) |
418 (0 << CAM_OUT_CQ_ID_SHIFT));
419 if (qdev->ndev->features & NETIF_F_HW_VLAN_RX)
420 cam_output |= CAM_OUT_RV;
421 /* route to NIC core */
422 ql_write32(qdev, MAC_ADDR_DATA, cam_output);
425 case MAC_ADDR_TYPE_VLAN:
427 u32 enable_bit = *((u32 *) &addr[0]);
428 /* For VLAN, the addr actually holds a bit that
429 * either enables or disables the vlan id we are
430 * addressing. It's either MAC_ADDR_E on or off.
431 * That's bit-27 we're talking about.
433 netif_info(qdev, ifup, qdev->ndev,
434 "%s VLAN ID %d %s the CAM.\n",
435 enable_bit ? "Adding" : "Removing",
437 enable_bit ? "to" : "from");
440 ql_wait_reg_rdy(qdev,
441 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
444 ql_write32(qdev, MAC_ADDR_IDX, offset | /* offset */
445 (index << MAC_ADDR_IDX_SHIFT) | /* index */
447 enable_bit); /* enable/disable */
450 case MAC_ADDR_TYPE_MULTI_FLTR:
452 netif_crit(qdev, ifup, qdev->ndev,
453 "Address type %d not yet supported.\n", type);
460 /* Set or clear MAC address in hardware. We sometimes
461 * have to clear it to prevent wrong frame routing
462 * especially in a bonding environment.
464 static int ql_set_mac_addr(struct ql_adapter *qdev, int set)
467 char zero_mac_addr[ETH_ALEN];
471 addr = &qdev->current_mac_addr[0];
472 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
473 "Set Mac addr %pM\n", addr);
475 memset(zero_mac_addr, 0, ETH_ALEN);
476 addr = &zero_mac_addr[0];
477 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
478 "Clearing MAC address\n");
480 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
483 status = ql_set_mac_addr_reg(qdev, (u8 *) addr,
484 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
485 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
487 netif_err(qdev, ifup, qdev->ndev,
488 "Failed to init mac address.\n");
492 void ql_link_on(struct ql_adapter *qdev)
494 netif_err(qdev, link, qdev->ndev, "Link is up.\n");
495 netif_carrier_on(qdev->ndev);
496 ql_set_mac_addr(qdev, 1);
499 void ql_link_off(struct ql_adapter *qdev)
501 netif_err(qdev, link, qdev->ndev, "Link is down.\n");
502 netif_carrier_off(qdev->ndev);
503 ql_set_mac_addr(qdev, 0);
506 /* Get a specific frame routing value from the CAM.
507 * Used for debug and reg dump.
509 int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value)
513 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
517 ql_write32(qdev, RT_IDX,
518 RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT));
519 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0);
522 *value = ql_read32(qdev, RT_DATA);
527 /* The NIC function for this chip has 16 routing indexes. Each one can be used
528 * to route different frame types to various inbound queues. We send broadcast/
529 * multicast/error frames to the default queue for slow handling,
530 * and CAM hit/RSS frames to the fast handling queues.
532 static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
535 int status = -EINVAL; /* Return error if no mask match. */
538 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
539 "%s %s mask %s the routing reg.\n",
540 enable ? "Adding" : "Removing",
541 index == RT_IDX_ALL_ERR_SLOT ? "MAC ERROR/ALL ERROR" :
542 index == RT_IDX_IP_CSUM_ERR_SLOT ? "IP CSUM ERROR" :
543 index == RT_IDX_TCP_UDP_CSUM_ERR_SLOT ? "TCP/UDP CSUM ERROR" :
544 index == RT_IDX_BCAST_SLOT ? "BROADCAST" :
545 index == RT_IDX_MCAST_MATCH_SLOT ? "MULTICAST MATCH" :
546 index == RT_IDX_ALLMULTI_SLOT ? "ALL MULTICAST MATCH" :
547 index == RT_IDX_UNUSED6_SLOT ? "UNUSED6" :
548 index == RT_IDX_UNUSED7_SLOT ? "UNUSED7" :
549 index == RT_IDX_RSS_MATCH_SLOT ? "RSS ALL/IPV4 MATCH" :
550 index == RT_IDX_RSS_IPV6_SLOT ? "RSS IPV6" :
551 index == RT_IDX_RSS_TCP4_SLOT ? "RSS TCP4" :
552 index == RT_IDX_RSS_TCP6_SLOT ? "RSS TCP6" :
553 index == RT_IDX_CAM_HIT_SLOT ? "CAM HIT" :
554 index == RT_IDX_UNUSED013 ? "UNUSED13" :
555 index == RT_IDX_UNUSED014 ? "UNUSED14" :
556 index == RT_IDX_PROMISCUOUS_SLOT ? "PROMISCUOUS" :
557 "(Bad index != RT_IDX)",
558 enable ? "to" : "from");
563 value = RT_IDX_DST_CAM_Q | /* dest */
564 RT_IDX_TYPE_NICQ | /* type */
565 (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);/* index */
568 case RT_IDX_VALID: /* Promiscuous Mode frames. */
570 value = RT_IDX_DST_DFLT_Q | /* dest */
571 RT_IDX_TYPE_NICQ | /* type */
572 (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);/* index */
575 case RT_IDX_ERR: /* Pass up MAC,IP,TCP/UDP error frames. */
577 value = RT_IDX_DST_DFLT_Q | /* dest */
578 RT_IDX_TYPE_NICQ | /* type */
579 (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */
582 case RT_IDX_IP_CSUM_ERR: /* Pass up IP CSUM error frames. */
584 value = RT_IDX_DST_DFLT_Q | /* dest */
585 RT_IDX_TYPE_NICQ | /* type */
586 (RT_IDX_IP_CSUM_ERR_SLOT <<
587 RT_IDX_IDX_SHIFT); /* index */
590 case RT_IDX_TU_CSUM_ERR: /* Pass up TCP/UDP CSUM error frames. */
592 value = RT_IDX_DST_DFLT_Q | /* dest */
593 RT_IDX_TYPE_NICQ | /* type */
594 (RT_IDX_TCP_UDP_CSUM_ERR_SLOT <<
595 RT_IDX_IDX_SHIFT); /* index */
598 case RT_IDX_BCAST: /* Pass up Broadcast frames to default Q. */
600 value = RT_IDX_DST_DFLT_Q | /* dest */
601 RT_IDX_TYPE_NICQ | /* type */
602 (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);/* index */
605 case RT_IDX_MCAST: /* Pass up All Multicast frames. */
607 value = RT_IDX_DST_DFLT_Q | /* dest */
608 RT_IDX_TYPE_NICQ | /* type */
609 (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */
612 case RT_IDX_MCAST_MATCH: /* Pass up matched Multicast frames. */
614 value = RT_IDX_DST_DFLT_Q | /* dest */
615 RT_IDX_TYPE_NICQ | /* type */
616 (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
619 case RT_IDX_RSS_MATCH: /* Pass up matched RSS frames. */
621 value = RT_IDX_DST_RSS | /* dest */
622 RT_IDX_TYPE_NICQ | /* type */
623 (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
626 case 0: /* Clear the E-bit on an entry. */
628 value = RT_IDX_DST_DFLT_Q | /* dest */
629 RT_IDX_TYPE_NICQ | /* type */
630 (index << RT_IDX_IDX_SHIFT);/* index */
634 netif_err(qdev, ifup, qdev->ndev,
635 "Mask type %d not yet supported.\n", mask);
641 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
644 value |= (enable ? RT_IDX_E : 0);
645 ql_write32(qdev, RT_IDX, value);
646 ql_write32(qdev, RT_DATA, enable ? mask : 0);
652 static void ql_enable_interrupts(struct ql_adapter *qdev)
654 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI);
657 static void ql_disable_interrupts(struct ql_adapter *qdev)
659 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16));
662 /* If we're running with multiple MSI-X vectors then we enable on the fly.
663 * Otherwise, we may have multiple outstanding workers and don't want to
664 * enable until the last one finishes. In this case, the irq_cnt gets
665 * incremented every time we queue a worker and decremented every time
666 * a worker finishes. Once it hits zero we enable the interrupt.
668 u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
671 unsigned long hw_flags = 0;
672 struct intr_context *ctx = qdev->intr_context + intr;
674 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) {
675 /* Always enable if we're MSIX multi interrupts and
676 * it's not the default (zeroeth) interrupt.
678 ql_write32(qdev, INTR_EN,
680 var = ql_read32(qdev, STS);
684 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
685 if (atomic_dec_and_test(&ctx->irq_cnt)) {
686 ql_write32(qdev, INTR_EN,
688 var = ql_read32(qdev, STS);
690 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
694 static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
697 struct intr_context *ctx;
699 /* HW disables for us if we're MSIX multi interrupts and
700 * it's not the default (zeroeth) interrupt.
702 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr))
705 ctx = qdev->intr_context + intr;
706 spin_lock(&qdev->hw_lock);
707 if (!atomic_read(&ctx->irq_cnt)) {
708 ql_write32(qdev, INTR_EN,
710 var = ql_read32(qdev, STS);
712 atomic_inc(&ctx->irq_cnt);
713 spin_unlock(&qdev->hw_lock);
717 static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev)
720 for (i = 0; i < qdev->intr_count; i++) {
721 /* The enable call does a atomic_dec_and_test
722 * and enables only if the result is zero.
723 * So we precharge it here.
725 if (unlikely(!test_bit(QL_MSIX_ENABLED, &qdev->flags) ||
727 atomic_set(&qdev->intr_context[i].irq_cnt, 1);
728 ql_enable_completion_interrupt(qdev, i);
733 static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str)
737 __le16 *flash = (__le16 *)&qdev->flash;
739 status = strncmp((char *)&qdev->flash, str, 4);
741 netif_err(qdev, ifup, qdev->ndev, "Invalid flash signature.\n");
745 for (i = 0; i < size; i++)
746 csum += le16_to_cpu(*flash++);
749 netif_err(qdev, ifup, qdev->ndev,
750 "Invalid flash checksum, csum = 0x%.04x.\n", csum);
755 static int ql_read_flash_word(struct ql_adapter *qdev, int offset, __le32 *data)
758 /* wait for reg to come ready */
759 status = ql_wait_reg_rdy(qdev,
760 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
763 /* set up for reg read */
764 ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset);
765 /* wait for reg to come ready */
766 status = ql_wait_reg_rdy(qdev,
767 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
770 /* This data is stored on flash as an array of
771 * __le32. Since ql_read32() returns cpu endian
772 * we need to swap it back.
774 *data = cpu_to_le32(ql_read32(qdev, FLASH_DATA));
779 static int ql_get_8000_flash_params(struct ql_adapter *qdev)
783 __le32 *p = (__le32 *)&qdev->flash;
787 /* Get flash offset for function and adjust
791 offset = FUNC0_FLASH_OFFSET / sizeof(u32);
793 offset = FUNC1_FLASH_OFFSET / sizeof(u32);
795 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
798 size = sizeof(struct flash_params_8000) / sizeof(u32);
799 for (i = 0; i < size; i++, p++) {
800 status = ql_read_flash_word(qdev, i+offset, p);
802 netif_err(qdev, ifup, qdev->ndev,
803 "Error reading flash.\n");
808 status = ql_validate_flash(qdev,
809 sizeof(struct flash_params_8000) / sizeof(u16),
812 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
817 /* Extract either manufacturer or BOFM modified
820 if (qdev->flash.flash_params_8000.data_type1 == 2)
822 qdev->flash.flash_params_8000.mac_addr1,
823 qdev->ndev->addr_len);
826 qdev->flash.flash_params_8000.mac_addr,
827 qdev->ndev->addr_len);
829 if (!is_valid_ether_addr(mac_addr)) {
830 netif_err(qdev, ifup, qdev->ndev, "Invalid MAC address.\n");
835 memcpy(qdev->ndev->dev_addr,
837 qdev->ndev->addr_len);
840 ql_sem_unlock(qdev, SEM_FLASH_MASK);
844 static int ql_get_8012_flash_params(struct ql_adapter *qdev)
848 __le32 *p = (__le32 *)&qdev->flash;
850 u32 size = sizeof(struct flash_params_8012) / sizeof(u32);
852 /* Second function's parameters follow the first
858 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
861 for (i = 0; i < size; i++, p++) {
862 status = ql_read_flash_word(qdev, i+offset, p);
864 netif_err(qdev, ifup, qdev->ndev,
865 "Error reading flash.\n");
871 status = ql_validate_flash(qdev,
872 sizeof(struct flash_params_8012) / sizeof(u16),
875 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
880 if (!is_valid_ether_addr(qdev->flash.flash_params_8012.mac_addr)) {
885 memcpy(qdev->ndev->dev_addr,
886 qdev->flash.flash_params_8012.mac_addr,
887 qdev->ndev->addr_len);
890 ql_sem_unlock(qdev, SEM_FLASH_MASK);
894 /* xgmac register are located behind the xgmac_addr and xgmac_data
895 * register pair. Each read/write requires us to wait for the ready
896 * bit before reading/writing the data.
898 static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data)
901 /* wait for reg to come ready */
902 status = ql_wait_reg_rdy(qdev,
903 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
906 /* write the data to the data reg */
907 ql_write32(qdev, XGMAC_DATA, data);
908 /* trigger the write */
909 ql_write32(qdev, XGMAC_ADDR, reg);
913 /* xgmac register are located behind the xgmac_addr and xgmac_data
914 * register pair. Each read/write requires us to wait for the ready
915 * bit before reading/writing the data.
917 int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
920 /* wait for reg to come ready */
921 status = ql_wait_reg_rdy(qdev,
922 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
925 /* set up for reg read */
926 ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R);
927 /* wait for reg to come ready */
928 status = ql_wait_reg_rdy(qdev,
929 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
933 *data = ql_read32(qdev, XGMAC_DATA);
938 /* This is used for reading the 64-bit statistics regs. */
939 int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data)
945 status = ql_read_xgmac_reg(qdev, reg, &lo);
949 status = ql_read_xgmac_reg(qdev, reg + 4, &hi);
953 *data = (u64) lo | ((u64) hi << 32);
959 static int ql_8000_port_initialize(struct ql_adapter *qdev)
963 * Get MPI firmware version for driver banner
966 status = ql_mb_about_fw(qdev);
969 status = ql_mb_get_fw_state(qdev);
972 /* Wake up a worker to get/set the TX/RX frame sizes. */
973 queue_delayed_work(qdev->workqueue, &qdev->mpi_port_cfg_work, 0);
978 /* Take the MAC Core out of reset.
979 * Enable statistics counting.
980 * Take the transmitter/receiver out of reset.
981 * This functionality may be done in the MPI firmware at a
984 static int ql_8012_port_initialize(struct ql_adapter *qdev)
989 if (ql_sem_trylock(qdev, qdev->xg_sem_mask)) {
990 /* Another function has the semaphore, so
991 * wait for the port init bit to come ready.
993 netif_info(qdev, link, qdev->ndev,
994 "Another function has the semaphore, so wait for the port init bit to come ready.\n");
995 status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
997 netif_crit(qdev, link, qdev->ndev,
998 "Port initialize timed out.\n");
1003 netif_info(qdev, link, qdev->ndev, "Got xgmac semaphore!.\n");
1004 /* Set the core reset. */
1005 status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
1008 data |= GLOBAL_CFG_RESET;
1009 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
1013 /* Clear the core reset and turn on jumbo for receiver. */
1014 data &= ~GLOBAL_CFG_RESET; /* Clear core reset. */
1015 data |= GLOBAL_CFG_JUMBO; /* Turn on jumbo. */
1016 data |= GLOBAL_CFG_TX_STAT_EN;
1017 data |= GLOBAL_CFG_RX_STAT_EN;
1018 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
1022 /* Enable transmitter, and clear it's reset. */
1023 status = ql_read_xgmac_reg(qdev, TX_CFG, &data);
1026 data &= ~TX_CFG_RESET; /* Clear the TX MAC reset. */
1027 data |= TX_CFG_EN; /* Enable the transmitter. */
1028 status = ql_write_xgmac_reg(qdev, TX_CFG, data);
1032 /* Enable receiver and clear it's reset. */
1033 status = ql_read_xgmac_reg(qdev, RX_CFG, &data);
1036 data &= ~RX_CFG_RESET; /* Clear the RX MAC reset. */
1037 data |= RX_CFG_EN; /* Enable the receiver. */
1038 status = ql_write_xgmac_reg(qdev, RX_CFG, data);
1042 /* Turn on jumbo. */
1044 ql_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16));
1048 ql_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580);
1052 /* Signal to the world that the port is enabled. */
1053 ql_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init));
1055 ql_sem_unlock(qdev, qdev->xg_sem_mask);
1059 static inline unsigned int ql_lbq_block_size(struct ql_adapter *qdev)
1061 return PAGE_SIZE << qdev->lbq_buf_order;
1064 /* Get the next large buffer. */
1065 static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring)
1067 struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx];
1068 rx_ring->lbq_curr_idx++;
1069 if (rx_ring->lbq_curr_idx == rx_ring->lbq_len)
1070 rx_ring->lbq_curr_idx = 0;
1071 rx_ring->lbq_free_cnt++;
1075 static struct bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev,
1076 struct rx_ring *rx_ring)
1078 struct bq_desc *lbq_desc = ql_get_curr_lbuf(rx_ring);
1080 pci_dma_sync_single_for_cpu(qdev->pdev,
1081 dma_unmap_addr(lbq_desc, mapaddr),
1082 rx_ring->lbq_buf_size,
1083 PCI_DMA_FROMDEVICE);
1085 /* If it's the last chunk of our master page then
1088 if ((lbq_desc->p.pg_chunk.offset + rx_ring->lbq_buf_size)
1089 == ql_lbq_block_size(qdev))
1090 pci_unmap_page(qdev->pdev,
1091 lbq_desc->p.pg_chunk.map,
1092 ql_lbq_block_size(qdev),
1093 PCI_DMA_FROMDEVICE);
1097 /* Get the next small buffer. */
1098 static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring)
1100 struct bq_desc *sbq_desc = &rx_ring->sbq[rx_ring->sbq_curr_idx];
1101 rx_ring->sbq_curr_idx++;
1102 if (rx_ring->sbq_curr_idx == rx_ring->sbq_len)
1103 rx_ring->sbq_curr_idx = 0;
1104 rx_ring->sbq_free_cnt++;
1108 /* Update an rx ring index. */
1109 static void ql_update_cq(struct rx_ring *rx_ring)
1111 rx_ring->cnsmr_idx++;
1112 rx_ring->curr_entry++;
1113 if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) {
1114 rx_ring->cnsmr_idx = 0;
1115 rx_ring->curr_entry = rx_ring->cq_base;
1119 static void ql_write_cq_idx(struct rx_ring *rx_ring)
1121 ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
1124 static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring,
1125 struct bq_desc *lbq_desc)
1127 if (!rx_ring->pg_chunk.page) {
1129 rx_ring->pg_chunk.page = alloc_pages(__GFP_COLD | __GFP_COMP |
1131 qdev->lbq_buf_order);
1132 if (unlikely(!rx_ring->pg_chunk.page)) {
1133 netif_err(qdev, drv, qdev->ndev,
1134 "page allocation failed.\n");
1137 rx_ring->pg_chunk.offset = 0;
1138 map = pci_map_page(qdev->pdev, rx_ring->pg_chunk.page,
1139 0, ql_lbq_block_size(qdev),
1140 PCI_DMA_FROMDEVICE);
1141 if (pci_dma_mapping_error(qdev->pdev, map)) {
1142 __free_pages(rx_ring->pg_chunk.page,
1143 qdev->lbq_buf_order);
1144 netif_err(qdev, drv, qdev->ndev,
1145 "PCI mapping failed.\n");
1148 rx_ring->pg_chunk.map = map;
1149 rx_ring->pg_chunk.va = page_address(rx_ring->pg_chunk.page);
1152 /* Copy the current master pg_chunk info
1153 * to the current descriptor.
1155 lbq_desc->p.pg_chunk = rx_ring->pg_chunk;
1157 /* Adjust the master page chunk for next
1160 rx_ring->pg_chunk.offset += rx_ring->lbq_buf_size;
1161 if (rx_ring->pg_chunk.offset == ql_lbq_block_size(qdev)) {
1162 rx_ring->pg_chunk.page = NULL;
1163 lbq_desc->p.pg_chunk.last_flag = 1;
1165 rx_ring->pg_chunk.va += rx_ring->lbq_buf_size;
1166 get_page(rx_ring->pg_chunk.page);
1167 lbq_desc->p.pg_chunk.last_flag = 0;
1171 /* Process (refill) a large buffer queue. */
1172 static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1174 u32 clean_idx = rx_ring->lbq_clean_idx;
1175 u32 start_idx = clean_idx;
1176 struct bq_desc *lbq_desc;
1180 while (rx_ring->lbq_free_cnt > 32) {
1181 for (i = 0; i < 16; i++) {
1182 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1183 "lbq: try cleaning clean_idx = %d.\n",
1185 lbq_desc = &rx_ring->lbq[clean_idx];
1186 if (ql_get_next_chunk(qdev, rx_ring, lbq_desc)) {
1187 netif_err(qdev, ifup, qdev->ndev,
1188 "Could not get a page chunk.\n");
1192 map = lbq_desc->p.pg_chunk.map +
1193 lbq_desc->p.pg_chunk.offset;
1194 dma_unmap_addr_set(lbq_desc, mapaddr, map);
1195 dma_unmap_len_set(lbq_desc, maplen,
1196 rx_ring->lbq_buf_size);
1197 *lbq_desc->addr = cpu_to_le64(map);
1199 pci_dma_sync_single_for_device(qdev->pdev, map,
1200 rx_ring->lbq_buf_size,
1201 PCI_DMA_FROMDEVICE);
1203 if (clean_idx == rx_ring->lbq_len)
1207 rx_ring->lbq_clean_idx = clean_idx;
1208 rx_ring->lbq_prod_idx += 16;
1209 if (rx_ring->lbq_prod_idx == rx_ring->lbq_len)
1210 rx_ring->lbq_prod_idx = 0;
1211 rx_ring->lbq_free_cnt -= 16;
1214 if (start_idx != clean_idx) {
1215 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1216 "lbq: updating prod idx = %d.\n",
1217 rx_ring->lbq_prod_idx);
1218 ql_write_db_reg(rx_ring->lbq_prod_idx,
1219 rx_ring->lbq_prod_idx_db_reg);
1223 /* Process (refill) a small buffer queue. */
1224 static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1226 u32 clean_idx = rx_ring->sbq_clean_idx;
1227 u32 start_idx = clean_idx;
1228 struct bq_desc *sbq_desc;
1232 while (rx_ring->sbq_free_cnt > 16) {
1233 for (i = 0; i < 16; i++) {
1234 sbq_desc = &rx_ring->sbq[clean_idx];
1235 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1236 "sbq: try cleaning clean_idx = %d.\n",
1238 if (sbq_desc->p.skb == NULL) {
1239 netif_printk(qdev, rx_status, KERN_DEBUG,
1241 "sbq: getting new skb for index %d.\n",
1244 netdev_alloc_skb(qdev->ndev,
1246 if (sbq_desc->p.skb == NULL) {
1247 netif_err(qdev, probe, qdev->ndev,
1248 "Couldn't get an skb.\n");
1249 rx_ring->sbq_clean_idx = clean_idx;
1252 skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD);
1253 map = pci_map_single(qdev->pdev,
1254 sbq_desc->p.skb->data,
1255 rx_ring->sbq_buf_size,
1256 PCI_DMA_FROMDEVICE);
1257 if (pci_dma_mapping_error(qdev->pdev, map)) {
1258 netif_err(qdev, ifup, qdev->ndev,
1259 "PCI mapping failed.\n");
1260 rx_ring->sbq_clean_idx = clean_idx;
1261 dev_kfree_skb_any(sbq_desc->p.skb);
1262 sbq_desc->p.skb = NULL;
1265 dma_unmap_addr_set(sbq_desc, mapaddr, map);
1266 dma_unmap_len_set(sbq_desc, maplen,
1267 rx_ring->sbq_buf_size);
1268 *sbq_desc->addr = cpu_to_le64(map);
1272 if (clean_idx == rx_ring->sbq_len)
1275 rx_ring->sbq_clean_idx = clean_idx;
1276 rx_ring->sbq_prod_idx += 16;
1277 if (rx_ring->sbq_prod_idx == rx_ring->sbq_len)
1278 rx_ring->sbq_prod_idx = 0;
1279 rx_ring->sbq_free_cnt -= 16;
1282 if (start_idx != clean_idx) {
1283 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1284 "sbq: updating prod idx = %d.\n",
1285 rx_ring->sbq_prod_idx);
1286 ql_write_db_reg(rx_ring->sbq_prod_idx,
1287 rx_ring->sbq_prod_idx_db_reg);
1291 static void ql_update_buffer_queues(struct ql_adapter *qdev,
1292 struct rx_ring *rx_ring)
1294 ql_update_sbq(qdev, rx_ring);
1295 ql_update_lbq(qdev, rx_ring);
1298 /* Unmaps tx buffers. Can be called from send() if a pci mapping
1299 * fails at some stage, or from the interrupt when a tx completes.
1301 static void ql_unmap_send(struct ql_adapter *qdev,
1302 struct tx_ring_desc *tx_ring_desc, int mapped)
1305 for (i = 0; i < mapped; i++) {
1306 if (i == 0 || (i == 7 && mapped > 7)) {
1308 * Unmap the skb->data area, or the
1309 * external sglist (AKA the Outbound
1310 * Address List (OAL)).
1311 * If its the zeroeth element, then it's
1312 * the skb->data area. If it's the 7th
1313 * element and there is more than 6 frags,
1317 netif_printk(qdev, tx_done, KERN_DEBUG,
1319 "unmapping OAL area.\n");
1321 pci_unmap_single(qdev->pdev,
1322 dma_unmap_addr(&tx_ring_desc->map[i],
1324 dma_unmap_len(&tx_ring_desc->map[i],
1328 netif_printk(qdev, tx_done, KERN_DEBUG, qdev->ndev,
1329 "unmapping frag %d.\n", i);
1330 pci_unmap_page(qdev->pdev,
1331 dma_unmap_addr(&tx_ring_desc->map[i],
1333 dma_unmap_len(&tx_ring_desc->map[i],
1334 maplen), PCI_DMA_TODEVICE);
1340 /* Map the buffers for this transmit. This will return
1341 * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
1343 static int ql_map_send(struct ql_adapter *qdev,
1344 struct ob_mac_iocb_req *mac_iocb_ptr,
1345 struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc)
1347 int len = skb_headlen(skb);
1349 int frag_idx, err, map_idx = 0;
1350 struct tx_buf_desc *tbd = mac_iocb_ptr->tbd;
1351 int frag_cnt = skb_shinfo(skb)->nr_frags;
1354 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
1355 "frag_cnt = %d.\n", frag_cnt);
1358 * Map the skb buffer first.
1360 map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
1362 err = pci_dma_mapping_error(qdev->pdev, map);
1364 netif_err(qdev, tx_queued, qdev->ndev,
1365 "PCI mapping failed with error: %d\n", err);
1367 return NETDEV_TX_BUSY;
1370 tbd->len = cpu_to_le32(len);
1371 tbd->addr = cpu_to_le64(map);
1372 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1373 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
1377 * This loop fills the remainder of the 8 address descriptors
1378 * in the IOCB. If there are more than 7 fragments, then the
1379 * eighth address desc will point to an external list (OAL).
1380 * When this happens, the remainder of the frags will be stored
1383 for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) {
1384 skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx];
1386 if (frag_idx == 6 && frag_cnt > 7) {
1387 /* Let's tack on an sglist.
1388 * Our control block will now
1390 * iocb->seg[0] = skb->data
1391 * iocb->seg[1] = frag[0]
1392 * iocb->seg[2] = frag[1]
1393 * iocb->seg[3] = frag[2]
1394 * iocb->seg[4] = frag[3]
1395 * iocb->seg[5] = frag[4]
1396 * iocb->seg[6] = frag[5]
1397 * iocb->seg[7] = ptr to OAL (external sglist)
1398 * oal->seg[0] = frag[6]
1399 * oal->seg[1] = frag[7]
1400 * oal->seg[2] = frag[8]
1401 * oal->seg[3] = frag[9]
1402 * oal->seg[4] = frag[10]
1405 /* Tack on the OAL in the eighth segment of IOCB. */
1406 map = pci_map_single(qdev->pdev, &tx_ring_desc->oal,
1409 err = pci_dma_mapping_error(qdev->pdev, map);
1411 netif_err(qdev, tx_queued, qdev->ndev,
1412 "PCI mapping outbound address list with error: %d\n",
1417 tbd->addr = cpu_to_le64(map);
1419 * The length is the number of fragments
1420 * that remain to be mapped times the length
1421 * of our sglist (OAL).
1424 cpu_to_le32((sizeof(struct tx_buf_desc) *
1425 (frag_cnt - frag_idx)) | TX_DESC_C);
1426 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
1428 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1429 sizeof(struct oal));
1430 tbd = (struct tx_buf_desc *)&tx_ring_desc->oal;
1434 map = skb_frag_dma_map(&qdev->pdev->dev, frag, 0, skb_frag_size(frag),
1437 err = dma_mapping_error(&qdev->pdev->dev, map);
1439 netif_err(qdev, tx_queued, qdev->ndev,
1440 "PCI mapping frags failed with error: %d.\n",
1445 tbd->addr = cpu_to_le64(map);
1446 tbd->len = cpu_to_le32(skb_frag_size(frag));
1447 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1448 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1449 skb_frag_size(frag));
1452 /* Save the number of segments we've mapped. */
1453 tx_ring_desc->map_cnt = map_idx;
1454 /* Terminate the last segment. */
1455 tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E);
1456 return NETDEV_TX_OK;
1460 * If the first frag mapping failed, then i will be zero.
1461 * This causes the unmap of the skb->data area. Otherwise
1462 * we pass in the number of frags that mapped successfully
1463 * so they can be umapped.
1465 ql_unmap_send(qdev, tx_ring_desc, map_idx);
1466 return NETDEV_TX_BUSY;
1469 /* Process an inbound completion from an rx ring. */
1470 static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
1471 struct rx_ring *rx_ring,
1472 struct ib_mac_iocb_rsp *ib_mac_rsp,
1476 struct sk_buff *skb;
1477 struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1478 struct napi_struct *napi = &rx_ring->napi;
1480 napi->dev = qdev->ndev;
1482 skb = napi_get_frags(napi);
1484 netif_err(qdev, drv, qdev->ndev,
1485 "Couldn't get an skb, exiting.\n");
1486 rx_ring->rx_dropped++;
1487 put_page(lbq_desc->p.pg_chunk.page);
1490 prefetch(lbq_desc->p.pg_chunk.va);
1491 __skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
1492 lbq_desc->p.pg_chunk.page,
1493 lbq_desc->p.pg_chunk.offset,
1497 skb->data_len += length;
1498 skb->truesize += length;
1499 skb_shinfo(skb)->nr_frags++;
1501 rx_ring->rx_packets++;
1502 rx_ring->rx_bytes += length;
1503 skb->ip_summed = CHECKSUM_UNNECESSARY;
1504 skb_record_rx_queue(skb, rx_ring->cq_id);
1505 if (vlan_id != 0xffff)
1506 __vlan_hwaccel_put_tag(skb, vlan_id);
1507 napi_gro_frags(napi);
1510 /* Process an inbound completion from an rx ring. */
1511 static void ql_process_mac_rx_page(struct ql_adapter *qdev,
1512 struct rx_ring *rx_ring,
1513 struct ib_mac_iocb_rsp *ib_mac_rsp,
1517 struct net_device *ndev = qdev->ndev;
1518 struct sk_buff *skb = NULL;
1520 struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1521 struct napi_struct *napi = &rx_ring->napi;
1523 skb = netdev_alloc_skb(ndev, length);
1525 netif_err(qdev, drv, qdev->ndev,
1526 "Couldn't get an skb, need to unwind!.\n");
1527 rx_ring->rx_dropped++;
1528 put_page(lbq_desc->p.pg_chunk.page);
1532 addr = lbq_desc->p.pg_chunk.va;
1536 /* Frame error, so drop the packet. */
1537 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1538 netif_info(qdev, drv, qdev->ndev,
1539 "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
1540 rx_ring->rx_errors++;
1544 /* The max framesize filter on this chip is set higher than
1545 * MTU since FCoE uses 2k frames.
1547 if (skb->len > ndev->mtu + ETH_HLEN) {
1548 netif_err(qdev, drv, qdev->ndev,
1549 "Segment too small, dropping.\n");
1550 rx_ring->rx_dropped++;
1553 memcpy(skb_put(skb, ETH_HLEN), addr, ETH_HLEN);
1554 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1555 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1557 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1558 lbq_desc->p.pg_chunk.offset+ETH_HLEN,
1560 skb->len += length-ETH_HLEN;
1561 skb->data_len += length-ETH_HLEN;
1562 skb->truesize += length-ETH_HLEN;
1564 rx_ring->rx_packets++;
1565 rx_ring->rx_bytes += skb->len;
1566 skb->protocol = eth_type_trans(skb, ndev);
1567 skb_checksum_none_assert(skb);
1569 if ((ndev->features & NETIF_F_RXCSUM) &&
1570 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1572 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1573 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1574 "TCP checksum done!\n");
1575 skb->ip_summed = CHECKSUM_UNNECESSARY;
1576 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1577 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1578 /* Unfragmented ipv4 UDP frame. */
1579 struct iphdr *iph = (struct iphdr *) skb->data;
1580 if (!(iph->frag_off &
1581 cpu_to_be16(IP_MF|IP_OFFSET))) {
1582 skb->ip_summed = CHECKSUM_UNNECESSARY;
1583 netif_printk(qdev, rx_status, KERN_DEBUG,
1585 "TCP checksum done!\n");
1590 skb_record_rx_queue(skb, rx_ring->cq_id);
1591 if (vlan_id != 0xffff)
1592 __vlan_hwaccel_put_tag(skb, vlan_id);
1593 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1594 napi_gro_receive(napi, skb);
1596 netif_receive_skb(skb);
1599 dev_kfree_skb_any(skb);
1600 put_page(lbq_desc->p.pg_chunk.page);
1603 /* Process an inbound completion from an rx ring. */
1604 static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
1605 struct rx_ring *rx_ring,
1606 struct ib_mac_iocb_rsp *ib_mac_rsp,
1610 struct net_device *ndev = qdev->ndev;
1611 struct sk_buff *skb = NULL;
1612 struct sk_buff *new_skb = NULL;
1613 struct bq_desc *sbq_desc = ql_get_curr_sbuf(rx_ring);
1615 skb = sbq_desc->p.skb;
1616 /* Allocate new_skb and copy */
1617 new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN);
1618 if (new_skb == NULL) {
1619 netif_err(qdev, probe, qdev->ndev,
1620 "No skb available, drop the packet.\n");
1621 rx_ring->rx_dropped++;
1624 skb_reserve(new_skb, NET_IP_ALIGN);
1625 memcpy(skb_put(new_skb, length), skb->data, length);
1628 /* Frame error, so drop the packet. */
1629 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1630 netif_info(qdev, drv, qdev->ndev,
1631 "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
1632 dev_kfree_skb_any(skb);
1633 rx_ring->rx_errors++;
1637 /* loopback self test for ethtool */
1638 if (test_bit(QL_SELFTEST, &qdev->flags)) {
1639 ql_check_lb_frame(qdev, skb);
1640 dev_kfree_skb_any(skb);
1644 /* The max framesize filter on this chip is set higher than
1645 * MTU since FCoE uses 2k frames.
1647 if (skb->len > ndev->mtu + ETH_HLEN) {
1648 dev_kfree_skb_any(skb);
1649 rx_ring->rx_dropped++;
1653 prefetch(skb->data);
1655 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1656 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1658 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1659 IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1660 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1661 IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1662 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1663 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1665 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P)
1666 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1667 "Promiscuous Packet.\n");
1669 rx_ring->rx_packets++;
1670 rx_ring->rx_bytes += skb->len;
1671 skb->protocol = eth_type_trans(skb, ndev);
1672 skb_checksum_none_assert(skb);
1674 /* If rx checksum is on, and there are no
1675 * csum or frame errors.
1677 if ((ndev->features & NETIF_F_RXCSUM) &&
1678 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1680 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1681 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1682 "TCP checksum done!\n");
1683 skb->ip_summed = CHECKSUM_UNNECESSARY;
1684 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1685 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1686 /* Unfragmented ipv4 UDP frame. */
1687 struct iphdr *iph = (struct iphdr *) skb->data;
1688 if (!(iph->frag_off &
1689 ntohs(IP_MF|IP_OFFSET))) {
1690 skb->ip_summed = CHECKSUM_UNNECESSARY;
1691 netif_printk(qdev, rx_status, KERN_DEBUG,
1693 "TCP checksum done!\n");
1698 skb_record_rx_queue(skb, rx_ring->cq_id);
1699 if (vlan_id != 0xffff)
1700 __vlan_hwaccel_put_tag(skb, vlan_id);
1701 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1702 napi_gro_receive(&rx_ring->napi, skb);
1704 netif_receive_skb(skb);
1707 static void ql_realign_skb(struct sk_buff *skb, int len)
1709 void *temp_addr = skb->data;
1711 /* Undo the skb_reserve(skb,32) we did before
1712 * giving to hardware, and realign data on
1713 * a 2-byte boundary.
1715 skb->data -= QLGE_SB_PAD - NET_IP_ALIGN;
1716 skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN;
1717 skb_copy_to_linear_data(skb, temp_addr,
1722 * This function builds an skb for the given inbound
1723 * completion. It will be rewritten for readability in the near
1724 * future, but for not it works well.
1726 static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1727 struct rx_ring *rx_ring,
1728 struct ib_mac_iocb_rsp *ib_mac_rsp)
1730 struct bq_desc *lbq_desc;
1731 struct bq_desc *sbq_desc;
1732 struct sk_buff *skb = NULL;
1733 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
1734 u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
1737 * Handle the header buffer if present.
1739 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV &&
1740 ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1741 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1742 "Header of %d bytes in small buffer.\n", hdr_len);
1744 * Headers fit nicely into a small buffer.
1746 sbq_desc = ql_get_curr_sbuf(rx_ring);
1747 pci_unmap_single(qdev->pdev,
1748 dma_unmap_addr(sbq_desc, mapaddr),
1749 dma_unmap_len(sbq_desc, maplen),
1750 PCI_DMA_FROMDEVICE);
1751 skb = sbq_desc->p.skb;
1752 ql_realign_skb(skb, hdr_len);
1753 skb_put(skb, hdr_len);
1754 sbq_desc->p.skb = NULL;
1758 * Handle the data buffer(s).
1760 if (unlikely(!length)) { /* Is there data too? */
1761 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1762 "No Data buffer in this packet.\n");
1766 if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
1767 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1768 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1769 "Headers in small, data of %d bytes in small, combine them.\n",
1772 * Data is less than small buffer size so it's
1773 * stuffed in a small buffer.
1774 * For this case we append the data
1775 * from the "data" small buffer to the "header" small
1778 sbq_desc = ql_get_curr_sbuf(rx_ring);
1779 pci_dma_sync_single_for_cpu(qdev->pdev,
1781 (sbq_desc, mapaddr),
1784 PCI_DMA_FROMDEVICE);
1785 memcpy(skb_put(skb, length),
1786 sbq_desc->p.skb->data, length);
1787 pci_dma_sync_single_for_device(qdev->pdev,
1794 PCI_DMA_FROMDEVICE);
1796 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1797 "%d bytes in a single small buffer.\n",
1799 sbq_desc = ql_get_curr_sbuf(rx_ring);
1800 skb = sbq_desc->p.skb;
1801 ql_realign_skb(skb, length);
1802 skb_put(skb, length);
1803 pci_unmap_single(qdev->pdev,
1804 dma_unmap_addr(sbq_desc,
1806 dma_unmap_len(sbq_desc,
1808 PCI_DMA_FROMDEVICE);
1809 sbq_desc->p.skb = NULL;
1811 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
1812 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1813 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1814 "Header in small, %d bytes in large. Chain large to small!\n",
1817 * The data is in a single large buffer. We
1818 * chain it to the header buffer's skb and let
1821 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1822 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1823 "Chaining page at offset = %d, for %d bytes to skb.\n",
1824 lbq_desc->p.pg_chunk.offset, length);
1825 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1826 lbq_desc->p.pg_chunk.offset,
1829 skb->data_len += length;
1830 skb->truesize += length;
1833 * The headers and data are in a single large buffer. We
1834 * copy it to a new skb and let it go. This can happen with
1835 * jumbo mtu on a non-TCP/UDP frame.
1837 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1838 skb = netdev_alloc_skb(qdev->ndev, length);
1840 netif_printk(qdev, probe, KERN_DEBUG, qdev->ndev,
1841 "No skb available, drop the packet.\n");
1844 pci_unmap_page(qdev->pdev,
1845 dma_unmap_addr(lbq_desc,
1847 dma_unmap_len(lbq_desc, maplen),
1848 PCI_DMA_FROMDEVICE);
1849 skb_reserve(skb, NET_IP_ALIGN);
1850 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1851 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1853 skb_fill_page_desc(skb, 0,
1854 lbq_desc->p.pg_chunk.page,
1855 lbq_desc->p.pg_chunk.offset,
1858 skb->data_len += length;
1859 skb->truesize += length;
1861 __pskb_pull_tail(skb,
1862 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1863 VLAN_ETH_HLEN : ETH_HLEN);
1867 * The data is in a chain of large buffers
1868 * pointed to by a small buffer. We loop
1869 * thru and chain them to the our small header
1871 * frags: There are 18 max frags and our small
1872 * buffer will hold 32 of them. The thing is,
1873 * we'll use 3 max for our 9000 byte jumbo
1874 * frames. If the MTU goes up we could
1875 * eventually be in trouble.
1878 sbq_desc = ql_get_curr_sbuf(rx_ring);
1879 pci_unmap_single(qdev->pdev,
1880 dma_unmap_addr(sbq_desc, mapaddr),
1881 dma_unmap_len(sbq_desc, maplen),
1882 PCI_DMA_FROMDEVICE);
1883 if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
1885 * This is an non TCP/UDP IP frame, so
1886 * the headers aren't split into a small
1887 * buffer. We have to use the small buffer
1888 * that contains our sg list as our skb to
1889 * send upstairs. Copy the sg list here to
1890 * a local buffer and use it to find the
1893 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1894 "%d bytes of headers & data in chain of large.\n",
1896 skb = sbq_desc->p.skb;
1897 sbq_desc->p.skb = NULL;
1898 skb_reserve(skb, NET_IP_ALIGN);
1900 while (length > 0) {
1901 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1902 size = (length < rx_ring->lbq_buf_size) ? length :
1903 rx_ring->lbq_buf_size;
1905 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1906 "Adding page %d to skb for %d bytes.\n",
1908 skb_fill_page_desc(skb, i,
1909 lbq_desc->p.pg_chunk.page,
1910 lbq_desc->p.pg_chunk.offset,
1913 skb->data_len += size;
1914 skb->truesize += size;
1918 __pskb_pull_tail(skb, (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1919 VLAN_ETH_HLEN : ETH_HLEN);
1924 /* Process an inbound completion from an rx ring. */
1925 static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
1926 struct rx_ring *rx_ring,
1927 struct ib_mac_iocb_rsp *ib_mac_rsp,
1930 struct net_device *ndev = qdev->ndev;
1931 struct sk_buff *skb = NULL;
1933 QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
1935 skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
1936 if (unlikely(!skb)) {
1937 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1938 "No skb available, drop packet.\n");
1939 rx_ring->rx_dropped++;
1943 /* Frame error, so drop the packet. */
1944 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1945 netif_info(qdev, drv, qdev->ndev,
1946 "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
1947 dev_kfree_skb_any(skb);
1948 rx_ring->rx_errors++;
1952 /* The max framesize filter on this chip is set higher than
1953 * MTU since FCoE uses 2k frames.
1955 if (skb->len > ndev->mtu + ETH_HLEN) {
1956 dev_kfree_skb_any(skb);
1957 rx_ring->rx_dropped++;
1961 /* loopback self test for ethtool */
1962 if (test_bit(QL_SELFTEST, &qdev->flags)) {
1963 ql_check_lb_frame(qdev, skb);
1964 dev_kfree_skb_any(skb);
1968 prefetch(skb->data);
1970 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1971 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "%s Multicast.\n",
1972 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1973 IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1974 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1975 IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1976 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1977 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1978 rx_ring->rx_multicast++;
1980 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
1981 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1982 "Promiscuous Packet.\n");
1985 skb->protocol = eth_type_trans(skb, ndev);
1986 skb_checksum_none_assert(skb);
1988 /* If rx checksum is on, and there are no
1989 * csum or frame errors.
1991 if ((ndev->features & NETIF_F_RXCSUM) &&
1992 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1994 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1995 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1996 "TCP checksum done!\n");
1997 skb->ip_summed = CHECKSUM_UNNECESSARY;
1998 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1999 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
2000 /* Unfragmented ipv4 UDP frame. */
2001 struct iphdr *iph = (struct iphdr *) skb->data;
2002 if (!(iph->frag_off &
2003 ntohs(IP_MF|IP_OFFSET))) {
2004 skb->ip_summed = CHECKSUM_UNNECESSARY;
2005 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2006 "TCP checksum done!\n");
2011 rx_ring->rx_packets++;
2012 rx_ring->rx_bytes += skb->len;
2013 skb_record_rx_queue(skb, rx_ring->cq_id);
2014 if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) && (vlan_id != 0))
2015 __vlan_hwaccel_put_tag(skb, vlan_id);
2016 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
2017 napi_gro_receive(&rx_ring->napi, skb);
2019 netif_receive_skb(skb);
2022 /* Process an inbound completion from an rx ring. */
2023 static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
2024 struct rx_ring *rx_ring,
2025 struct ib_mac_iocb_rsp *ib_mac_rsp)
2027 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
2028 u16 vlan_id = (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
2029 ((le16_to_cpu(ib_mac_rsp->vlan_id) &
2030 IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff;
2032 QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
2034 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
2035 /* The data and headers are split into
2038 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2040 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
2041 /* The data fit in a single small buffer.
2042 * Allocate a new skb, copy the data and
2043 * return the buffer to the free pool.
2045 ql_process_mac_rx_skb(qdev, rx_ring, ib_mac_rsp,
2047 } else if ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) &&
2048 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK) &&
2049 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T)) {
2050 /* TCP packet in a page chunk that's been checksummed.
2051 * Tack it on to our GRO skb and let it go.
2053 ql_process_mac_rx_gro_page(qdev, rx_ring, ib_mac_rsp,
2055 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
2056 /* Non-TCP packet in a page chunk. Allocate an
2057 * skb, tack it on frags, and send it up.
2059 ql_process_mac_rx_page(qdev, rx_ring, ib_mac_rsp,
2062 /* Non-TCP/UDP large frames that span multiple buffers
2063 * can be processed corrrectly by the split frame logic.
2065 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2069 return (unsigned long)length;
2072 /* Process an outbound completion from an rx ring. */
2073 static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
2074 struct ob_mac_iocb_rsp *mac_rsp)
2076 struct tx_ring *tx_ring;
2077 struct tx_ring_desc *tx_ring_desc;
2079 QL_DUMP_OB_MAC_RSP(mac_rsp);
2080 tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
2081 tx_ring_desc = &tx_ring->q[mac_rsp->tid];
2082 ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
2083 tx_ring->tx_bytes += (tx_ring_desc->skb)->len;
2084 tx_ring->tx_packets++;
2085 dev_kfree_skb(tx_ring_desc->skb);
2086 tx_ring_desc->skb = NULL;
2088 if (unlikely(mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E |
2091 OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) {
2092 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
2093 netif_warn(qdev, tx_done, qdev->ndev,
2094 "Total descriptor length did not match transfer length.\n");
2096 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
2097 netif_warn(qdev, tx_done, qdev->ndev,
2098 "Frame too short to be valid, not sent.\n");
2100 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
2101 netif_warn(qdev, tx_done, qdev->ndev,
2102 "Frame too long, but sent anyway.\n");
2104 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) {
2105 netif_warn(qdev, tx_done, qdev->ndev,
2106 "PCI backplane error. Frame not sent.\n");
2109 atomic_inc(&tx_ring->tx_count);
2112 /* Fire up a handler to reset the MPI processor. */
2113 void ql_queue_fw_error(struct ql_adapter *qdev)
2116 queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
2119 void ql_queue_asic_error(struct ql_adapter *qdev)
2122 ql_disable_interrupts(qdev);
2123 /* Clear adapter up bit to signal the recovery
2124 * process that it shouldn't kill the reset worker
2127 clear_bit(QL_ADAPTER_UP, &qdev->flags);
2128 /* Set asic recovery bit to indicate reset process that we are
2129 * in fatal error recovery process rather than normal close
2131 set_bit(QL_ASIC_RECOVERY, &qdev->flags);
2132 queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
2135 static void ql_process_chip_ae_intr(struct ql_adapter *qdev,
2136 struct ib_ae_iocb_rsp *ib_ae_rsp)
2138 switch (ib_ae_rsp->event) {
2139 case MGMT_ERR_EVENT:
2140 netif_err(qdev, rx_err, qdev->ndev,
2141 "Management Processor Fatal Error.\n");
2142 ql_queue_fw_error(qdev);
2145 case CAM_LOOKUP_ERR_EVENT:
2146 netdev_err(qdev->ndev, "Multiple CAM hits lookup occurred.\n");
2147 netdev_err(qdev->ndev, "This event shouldn't occur.\n");
2148 ql_queue_asic_error(qdev);
2151 case SOFT_ECC_ERROR_EVENT:
2152 netdev_err(qdev->ndev, "Soft ECC error detected.\n");
2153 ql_queue_asic_error(qdev);
2156 case PCI_ERR_ANON_BUF_RD:
2157 netdev_err(qdev->ndev, "PCI error occurred when reading "
2158 "anonymous buffers from rx_ring %d.\n",
2160 ql_queue_asic_error(qdev);
2164 netif_err(qdev, drv, qdev->ndev, "Unexpected event %d.\n",
2166 ql_queue_asic_error(qdev);
2171 static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
2173 struct ql_adapter *qdev = rx_ring->qdev;
2174 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2175 struct ob_mac_iocb_rsp *net_rsp = NULL;
2178 struct tx_ring *tx_ring;
2179 /* While there are entries in the completion queue. */
2180 while (prod != rx_ring->cnsmr_idx) {
2182 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2183 "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2184 rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
2186 net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
2188 switch (net_rsp->opcode) {
2190 case OPCODE_OB_MAC_TSO_IOCB:
2191 case OPCODE_OB_MAC_IOCB:
2192 ql_process_mac_tx_intr(qdev, net_rsp);
2195 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2196 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2200 ql_update_cq(rx_ring);
2201 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2205 ql_write_cq_idx(rx_ring);
2206 tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
2207 if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id)) {
2208 if (atomic_read(&tx_ring->queue_stopped) &&
2209 (atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
2211 * The queue got stopped because the tx_ring was full.
2212 * Wake it up, because it's now at least 25% empty.
2214 netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
2220 static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
2222 struct ql_adapter *qdev = rx_ring->qdev;
2223 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2224 struct ql_net_rsp_iocb *net_rsp;
2227 /* While there are entries in the completion queue. */
2228 while (prod != rx_ring->cnsmr_idx) {
2230 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2231 "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2232 rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
2234 net_rsp = rx_ring->curr_entry;
2236 switch (net_rsp->opcode) {
2237 case OPCODE_IB_MAC_IOCB:
2238 ql_process_mac_rx_intr(qdev, rx_ring,
2239 (struct ib_mac_iocb_rsp *)
2243 case OPCODE_IB_AE_IOCB:
2244 ql_process_chip_ae_intr(qdev, (struct ib_ae_iocb_rsp *)
2248 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2249 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2254 ql_update_cq(rx_ring);
2255 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2256 if (count == budget)
2259 ql_update_buffer_queues(qdev, rx_ring);
2260 ql_write_cq_idx(rx_ring);
2264 static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
2266 struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi);
2267 struct ql_adapter *qdev = rx_ring->qdev;
2268 struct rx_ring *trx_ring;
2269 int i, work_done = 0;
2270 struct intr_context *ctx = &qdev->intr_context[rx_ring->cq_id];
2272 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2273 "Enter, NAPI POLL cq_id = %d.\n", rx_ring->cq_id);
2275 /* Service the TX rings first. They start
2276 * right after the RSS rings. */
2277 for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) {
2278 trx_ring = &qdev->rx_ring[i];
2279 /* If this TX completion ring belongs to this vector and
2280 * it's not empty then service it.
2282 if ((ctx->irq_mask & (1 << trx_ring->cq_id)) &&
2283 (ql_read_sh_reg(trx_ring->prod_idx_sh_reg) !=
2284 trx_ring->cnsmr_idx)) {
2285 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2286 "%s: Servicing TX completion ring %d.\n",
2287 __func__, trx_ring->cq_id);
2288 ql_clean_outbound_rx_ring(trx_ring);
2293 * Now service the RSS ring if it's active.
2295 if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
2296 rx_ring->cnsmr_idx) {
2297 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2298 "%s: Servicing RX completion ring %d.\n",
2299 __func__, rx_ring->cq_id);
2300 work_done = ql_clean_inbound_rx_ring(rx_ring, budget);
2303 if (work_done < budget) {
2304 napi_complete(napi);
2305 ql_enable_completion_interrupt(qdev, rx_ring->irq);
2310 static void qlge_vlan_mode(struct net_device *ndev, u32 features)
2312 struct ql_adapter *qdev = netdev_priv(ndev);
2314 if (features & NETIF_F_HW_VLAN_RX) {
2315 netif_printk(qdev, ifup, KERN_DEBUG, ndev,
2316 "Turning on VLAN in NIC_RCV_CFG.\n");
2317 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
2318 NIC_RCV_CFG_VLAN_MATCH_AND_NON);
2320 netif_printk(qdev, ifup, KERN_DEBUG, ndev,
2321 "Turning off VLAN in NIC_RCV_CFG.\n");
2322 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
2326 static u32 qlge_fix_features(struct net_device *ndev, u32 features)
2329 * Since there is no support for separate rx/tx vlan accel
2330 * enable/disable make sure tx flag is always in same state as rx.
2332 if (features & NETIF_F_HW_VLAN_RX)
2333 features |= NETIF_F_HW_VLAN_TX;
2335 features &= ~NETIF_F_HW_VLAN_TX;
2340 static int qlge_set_features(struct net_device *ndev, u32 features)
2342 u32 changed = ndev->features ^ features;
2344 if (changed & NETIF_F_HW_VLAN_RX)
2345 qlge_vlan_mode(ndev, features);
2350 static void __qlge_vlan_rx_add_vid(struct ql_adapter *qdev, u16 vid)
2352 u32 enable_bit = MAC_ADDR_E;
2354 if (ql_set_mac_addr_reg
2355 (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
2356 netif_err(qdev, ifup, qdev->ndev,
2357 "Failed to init vlan address.\n");
2361 static void qlge_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
2363 struct ql_adapter *qdev = netdev_priv(ndev);
2366 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2370 __qlge_vlan_rx_add_vid(qdev, vid);
2371 set_bit(vid, qdev->active_vlans);
2373 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2376 static void __qlge_vlan_rx_kill_vid(struct ql_adapter *qdev, u16 vid)
2380 if (ql_set_mac_addr_reg
2381 (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
2382 netif_err(qdev, ifup, qdev->ndev,
2383 "Failed to clear vlan address.\n");
2387 static void qlge_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
2389 struct ql_adapter *qdev = netdev_priv(ndev);
2392 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2396 __qlge_vlan_rx_kill_vid(qdev, vid);
2397 clear_bit(vid, qdev->active_vlans);
2399 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2402 static void qlge_restore_vlan(struct ql_adapter *qdev)
2407 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2411 for_each_set_bit(vid, qdev->active_vlans, VLAN_N_VID)
2412 __qlge_vlan_rx_add_vid(qdev, vid);
2414 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2417 /* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
2418 static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
2420 struct rx_ring *rx_ring = dev_id;
2421 napi_schedule(&rx_ring->napi);
2425 /* This handles a fatal error, MPI activity, and the default
2426 * rx_ring in an MSI-X multiple vector environment.
2427 * In MSI/Legacy environment it also process the rest of
2430 static irqreturn_t qlge_isr(int irq, void *dev_id)
2432 struct rx_ring *rx_ring = dev_id;
2433 struct ql_adapter *qdev = rx_ring->qdev;
2434 struct intr_context *intr_context = &qdev->intr_context[0];
2438 spin_lock(&qdev->hw_lock);
2439 if (atomic_read(&qdev->intr_context[0].irq_cnt)) {
2440 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2441 "Shared Interrupt, Not ours!\n");
2442 spin_unlock(&qdev->hw_lock);
2445 spin_unlock(&qdev->hw_lock);
2447 var = ql_disable_completion_interrupt(qdev, intr_context->intr);
2450 * Check for fatal error.
2453 ql_queue_asic_error(qdev);
2454 netdev_err(qdev->ndev, "Got fatal error, STS = %x.\n", var);
2455 var = ql_read32(qdev, ERR_STS);
2456 netdev_err(qdev->ndev, "Resetting chip. "
2457 "Error Status Register = 0x%x\n", var);
2462 * Check MPI processor activity.
2464 if ((var & STS_PI) &&
2465 (ql_read32(qdev, INTR_MASK) & INTR_MASK_PI)) {
2467 * We've got an async event or mailbox completion.
2468 * Handle it and clear the source of the interrupt.
2470 netif_err(qdev, intr, qdev->ndev,
2471 "Got MPI processor interrupt.\n");
2472 ql_disable_completion_interrupt(qdev, intr_context->intr);
2473 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
2474 queue_delayed_work_on(smp_processor_id(),
2475 qdev->workqueue, &qdev->mpi_work, 0);
2480 * Get the bit-mask that shows the active queues for this
2481 * pass. Compare it to the queues that this irq services
2482 * and call napi if there's a match.
2484 var = ql_read32(qdev, ISR1);
2485 if (var & intr_context->irq_mask) {
2486 netif_info(qdev, intr, qdev->ndev,
2487 "Waking handler for rx_ring[0].\n");
2488 ql_disable_completion_interrupt(qdev, intr_context->intr);
2489 napi_schedule(&rx_ring->napi);
2492 ql_enable_completion_interrupt(qdev, intr_context->intr);
2493 return work_done ? IRQ_HANDLED : IRQ_NONE;
2496 static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2499 if (skb_is_gso(skb)) {
2501 if (skb_header_cloned(skb)) {
2502 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2507 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2508 mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
2509 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2510 mac_iocb_ptr->total_hdrs_len =
2511 cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb));
2512 mac_iocb_ptr->net_trans_offset =
2513 cpu_to_le16(skb_network_offset(skb) |
2514 skb_transport_offset(skb)
2515 << OB_MAC_TRANSPORT_HDR_SHIFT);
2516 mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2517 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO;
2518 if (likely(skb->protocol == htons(ETH_P_IP))) {
2519 struct iphdr *iph = ip_hdr(skb);
2521 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2522 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2526 } else if (skb->protocol == htons(ETH_P_IPV6)) {
2527 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6;
2528 tcp_hdr(skb)->check =
2529 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2530 &ipv6_hdr(skb)->daddr,
2538 static void ql_hw_csum_setup(struct sk_buff *skb,
2539 struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2542 struct iphdr *iph = ip_hdr(skb);
2544 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2545 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2546 mac_iocb_ptr->net_trans_offset =
2547 cpu_to_le16(skb_network_offset(skb) |
2548 skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT);
2550 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2551 len = (ntohs(iph->tot_len) - (iph->ihl << 2));
2552 if (likely(iph->protocol == IPPROTO_TCP)) {
2553 check = &(tcp_hdr(skb)->check);
2554 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC;
2555 mac_iocb_ptr->total_hdrs_len =
2556 cpu_to_le16(skb_transport_offset(skb) +
2557 (tcp_hdr(skb)->doff << 2));
2559 check = &(udp_hdr(skb)->check);
2560 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC;
2561 mac_iocb_ptr->total_hdrs_len =
2562 cpu_to_le16(skb_transport_offset(skb) +
2563 sizeof(struct udphdr));
2565 *check = ~csum_tcpudp_magic(iph->saddr,
2566 iph->daddr, len, iph->protocol, 0);
2569 static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
2571 struct tx_ring_desc *tx_ring_desc;
2572 struct ob_mac_iocb_req *mac_iocb_ptr;
2573 struct ql_adapter *qdev = netdev_priv(ndev);
2575 struct tx_ring *tx_ring;
2576 u32 tx_ring_idx = (u32) skb->queue_mapping;
2578 tx_ring = &qdev->tx_ring[tx_ring_idx];
2580 if (skb_padto(skb, ETH_ZLEN))
2581 return NETDEV_TX_OK;
2583 if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
2584 netif_info(qdev, tx_queued, qdev->ndev,
2585 "%s: shutting down tx queue %d du to lack of resources.\n",
2586 __func__, tx_ring_idx);
2587 netif_stop_subqueue(ndev, tx_ring->wq_id);
2588 atomic_inc(&tx_ring->queue_stopped);
2589 tx_ring->tx_errors++;
2590 return NETDEV_TX_BUSY;
2592 tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
2593 mac_iocb_ptr = tx_ring_desc->queue_entry;
2594 memset((void *)mac_iocb_ptr, 0, sizeof(*mac_iocb_ptr));
2596 mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
2597 mac_iocb_ptr->tid = tx_ring_desc->index;
2598 /* We use the upper 32-bits to store the tx queue for this IO.
2599 * When we get the completion we can use it to establish the context.
2601 mac_iocb_ptr->txq_idx = tx_ring_idx;
2602 tx_ring_desc->skb = skb;
2604 mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len);
2606 if (vlan_tx_tag_present(skb)) {
2607 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2608 "Adding a vlan tag %d.\n", vlan_tx_tag_get(skb));
2609 mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
2610 mac_iocb_ptr->vlan_tci = cpu_to_le16(vlan_tx_tag_get(skb));
2612 tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2614 dev_kfree_skb_any(skb);
2615 return NETDEV_TX_OK;
2616 } else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) {
2617 ql_hw_csum_setup(skb,
2618 (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2620 if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) !=
2622 netif_err(qdev, tx_queued, qdev->ndev,
2623 "Could not map the segments.\n");
2624 tx_ring->tx_errors++;
2625 return NETDEV_TX_BUSY;
2627 QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr);
2628 tx_ring->prod_idx++;
2629 if (tx_ring->prod_idx == tx_ring->wq_len)
2630 tx_ring->prod_idx = 0;
2633 ql_write_db_reg(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
2634 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2635 "tx queued, slot %d, len %d\n",
2636 tx_ring->prod_idx, skb->len);
2638 atomic_dec(&tx_ring->tx_count);
2639 return NETDEV_TX_OK;
2643 static void ql_free_shadow_space(struct ql_adapter *qdev)
2645 if (qdev->rx_ring_shadow_reg_area) {
2646 pci_free_consistent(qdev->pdev,
2648 qdev->rx_ring_shadow_reg_area,
2649 qdev->rx_ring_shadow_reg_dma);
2650 qdev->rx_ring_shadow_reg_area = NULL;
2652 if (qdev->tx_ring_shadow_reg_area) {
2653 pci_free_consistent(qdev->pdev,
2655 qdev->tx_ring_shadow_reg_area,
2656 qdev->tx_ring_shadow_reg_dma);
2657 qdev->tx_ring_shadow_reg_area = NULL;
2661 static int ql_alloc_shadow_space(struct ql_adapter *qdev)
2663 qdev->rx_ring_shadow_reg_area =
2664 pci_alloc_consistent(qdev->pdev,
2665 PAGE_SIZE, &qdev->rx_ring_shadow_reg_dma);
2666 if (qdev->rx_ring_shadow_reg_area == NULL) {
2667 netif_err(qdev, ifup, qdev->ndev,
2668 "Allocation of RX shadow space failed.\n");
2671 memset(qdev->rx_ring_shadow_reg_area, 0, PAGE_SIZE);
2672 qdev->tx_ring_shadow_reg_area =
2673 pci_alloc_consistent(qdev->pdev, PAGE_SIZE,
2674 &qdev->tx_ring_shadow_reg_dma);
2675 if (qdev->tx_ring_shadow_reg_area == NULL) {
2676 netif_err(qdev, ifup, qdev->ndev,
2677 "Allocation of TX shadow space failed.\n");
2678 goto err_wqp_sh_area;
2680 memset(qdev->tx_ring_shadow_reg_area, 0, PAGE_SIZE);
2684 pci_free_consistent(qdev->pdev,
2686 qdev->rx_ring_shadow_reg_area,
2687 qdev->rx_ring_shadow_reg_dma);
2691 static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
2693 struct tx_ring_desc *tx_ring_desc;
2695 struct ob_mac_iocb_req *mac_iocb_ptr;
2697 mac_iocb_ptr = tx_ring->wq_base;
2698 tx_ring_desc = tx_ring->q;
2699 for (i = 0; i < tx_ring->wq_len; i++) {
2700 tx_ring_desc->index = i;
2701 tx_ring_desc->skb = NULL;
2702 tx_ring_desc->queue_entry = mac_iocb_ptr;
2706 atomic_set(&tx_ring->tx_count, tx_ring->wq_len);
2707 atomic_set(&tx_ring->queue_stopped, 0);
2710 static void ql_free_tx_resources(struct ql_adapter *qdev,
2711 struct tx_ring *tx_ring)
2713 if (tx_ring->wq_base) {
2714 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2715 tx_ring->wq_base, tx_ring->wq_base_dma);
2716 tx_ring->wq_base = NULL;
2722 static int ql_alloc_tx_resources(struct ql_adapter *qdev,
2723 struct tx_ring *tx_ring)
2726 pci_alloc_consistent(qdev->pdev, tx_ring->wq_size,
2727 &tx_ring->wq_base_dma);
2729 if ((tx_ring->wq_base == NULL) ||
2730 tx_ring->wq_base_dma & WQ_ADDR_ALIGN) {
2731 netif_err(qdev, ifup, qdev->ndev, "tx_ring alloc failed.\n");
2735 kmalloc(tx_ring->wq_len * sizeof(struct tx_ring_desc), GFP_KERNEL);
2736 if (tx_ring->q == NULL)
2741 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2742 tx_ring->wq_base, tx_ring->wq_base_dma);
2746 static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2748 struct bq_desc *lbq_desc;
2750 uint32_t curr_idx, clean_idx;
2752 curr_idx = rx_ring->lbq_curr_idx;
2753 clean_idx = rx_ring->lbq_clean_idx;
2754 while (curr_idx != clean_idx) {
2755 lbq_desc = &rx_ring->lbq[curr_idx];
2757 if (lbq_desc->p.pg_chunk.last_flag) {
2758 pci_unmap_page(qdev->pdev,
2759 lbq_desc->p.pg_chunk.map,
2760 ql_lbq_block_size(qdev),
2761 PCI_DMA_FROMDEVICE);
2762 lbq_desc->p.pg_chunk.last_flag = 0;
2765 put_page(lbq_desc->p.pg_chunk.page);
2766 lbq_desc->p.pg_chunk.page = NULL;
2768 if (++curr_idx == rx_ring->lbq_len)
2774 static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2777 struct bq_desc *sbq_desc;
2779 for (i = 0; i < rx_ring->sbq_len; i++) {
2780 sbq_desc = &rx_ring->sbq[i];
2781 if (sbq_desc == NULL) {
2782 netif_err(qdev, ifup, qdev->ndev,
2783 "sbq_desc %d is NULL.\n", i);
2786 if (sbq_desc->p.skb) {
2787 pci_unmap_single(qdev->pdev,
2788 dma_unmap_addr(sbq_desc, mapaddr),
2789 dma_unmap_len(sbq_desc, maplen),
2790 PCI_DMA_FROMDEVICE);
2791 dev_kfree_skb(sbq_desc->p.skb);
2792 sbq_desc->p.skb = NULL;
2797 /* Free all large and small rx buffers associated
2798 * with the completion queues for this device.
2800 static void ql_free_rx_buffers(struct ql_adapter *qdev)
2803 struct rx_ring *rx_ring;
2805 for (i = 0; i < qdev->rx_ring_count; i++) {
2806 rx_ring = &qdev->rx_ring[i];
2808 ql_free_lbq_buffers(qdev, rx_ring);
2810 ql_free_sbq_buffers(qdev, rx_ring);
2814 static void ql_alloc_rx_buffers(struct ql_adapter *qdev)
2816 struct rx_ring *rx_ring;
2819 for (i = 0; i < qdev->rx_ring_count; i++) {
2820 rx_ring = &qdev->rx_ring[i];
2821 if (rx_ring->type != TX_Q)
2822 ql_update_buffer_queues(qdev, rx_ring);
2826 static void ql_init_lbq_ring(struct ql_adapter *qdev,
2827 struct rx_ring *rx_ring)
2830 struct bq_desc *lbq_desc;
2831 __le64 *bq = rx_ring->lbq_base;
2833 memset(rx_ring->lbq, 0, rx_ring->lbq_len * sizeof(struct bq_desc));
2834 for (i = 0; i < rx_ring->lbq_len; i++) {
2835 lbq_desc = &rx_ring->lbq[i];
2836 memset(lbq_desc, 0, sizeof(*lbq_desc));
2837 lbq_desc->index = i;
2838 lbq_desc->addr = bq;
2843 static void ql_init_sbq_ring(struct ql_adapter *qdev,
2844 struct rx_ring *rx_ring)
2847 struct bq_desc *sbq_desc;
2848 __le64 *bq = rx_ring->sbq_base;
2850 memset(rx_ring->sbq, 0, rx_ring->sbq_len * sizeof(struct bq_desc));
2851 for (i = 0; i < rx_ring->sbq_len; i++) {
2852 sbq_desc = &rx_ring->sbq[i];
2853 memset(sbq_desc, 0, sizeof(*sbq_desc));
2854 sbq_desc->index = i;
2855 sbq_desc->addr = bq;
2860 static void ql_free_rx_resources(struct ql_adapter *qdev,
2861 struct rx_ring *rx_ring)
2863 /* Free the small buffer queue. */
2864 if (rx_ring->sbq_base) {
2865 pci_free_consistent(qdev->pdev,
2867 rx_ring->sbq_base, rx_ring->sbq_base_dma);
2868 rx_ring->sbq_base = NULL;
2871 /* Free the small buffer queue control blocks. */
2872 kfree(rx_ring->sbq);
2873 rx_ring->sbq = NULL;
2875 /* Free the large buffer queue. */
2876 if (rx_ring->lbq_base) {
2877 pci_free_consistent(qdev->pdev,
2879 rx_ring->lbq_base, rx_ring->lbq_base_dma);
2880 rx_ring->lbq_base = NULL;
2883 /* Free the large buffer queue control blocks. */
2884 kfree(rx_ring->lbq);
2885 rx_ring->lbq = NULL;
2887 /* Free the rx queue. */
2888 if (rx_ring->cq_base) {
2889 pci_free_consistent(qdev->pdev,
2891 rx_ring->cq_base, rx_ring->cq_base_dma);
2892 rx_ring->cq_base = NULL;
2896 /* Allocate queues and buffers for this completions queue based
2897 * on the values in the parameter structure. */
2898 static int ql_alloc_rx_resources(struct ql_adapter *qdev,
2899 struct rx_ring *rx_ring)
2903 * Allocate the completion queue for this rx_ring.
2906 pci_alloc_consistent(qdev->pdev, rx_ring->cq_size,
2907 &rx_ring->cq_base_dma);
2909 if (rx_ring->cq_base == NULL) {
2910 netif_err(qdev, ifup, qdev->ndev, "rx_ring alloc failed.\n");
2914 if (rx_ring->sbq_len) {
2916 * Allocate small buffer queue.
2919 pci_alloc_consistent(qdev->pdev, rx_ring->sbq_size,
2920 &rx_ring->sbq_base_dma);
2922 if (rx_ring->sbq_base == NULL) {
2923 netif_err(qdev, ifup, qdev->ndev,
2924 "Small buffer queue allocation failed.\n");
2929 * Allocate small buffer queue control blocks.
2932 kmalloc(rx_ring->sbq_len * sizeof(struct bq_desc),
2934 if (rx_ring->sbq == NULL) {
2935 netif_err(qdev, ifup, qdev->ndev,
2936 "Small buffer queue control block allocation failed.\n");
2940 ql_init_sbq_ring(qdev, rx_ring);
2943 if (rx_ring->lbq_len) {
2945 * Allocate large buffer queue.
2948 pci_alloc_consistent(qdev->pdev, rx_ring->lbq_size,
2949 &rx_ring->lbq_base_dma);
2951 if (rx_ring->lbq_base == NULL) {
2952 netif_err(qdev, ifup, qdev->ndev,
2953 "Large buffer queue allocation failed.\n");
2957 * Allocate large buffer queue control blocks.
2960 kmalloc(rx_ring->lbq_len * sizeof(struct bq_desc),
2962 if (rx_ring->lbq == NULL) {
2963 netif_err(qdev, ifup, qdev->ndev,
2964 "Large buffer queue control block allocation failed.\n");
2968 ql_init_lbq_ring(qdev, rx_ring);
2974 ql_free_rx_resources(qdev, rx_ring);
2978 static void ql_tx_ring_clean(struct ql_adapter *qdev)
2980 struct tx_ring *tx_ring;
2981 struct tx_ring_desc *tx_ring_desc;
2985 * Loop through all queues and free
2988 for (j = 0; j < qdev->tx_ring_count; j++) {
2989 tx_ring = &qdev->tx_ring[j];
2990 for (i = 0; i < tx_ring->wq_len; i++) {
2991 tx_ring_desc = &tx_ring->q[i];
2992 if (tx_ring_desc && tx_ring_desc->skb) {
2993 netif_err(qdev, ifdown, qdev->ndev,
2994 "Freeing lost SKB %p, from queue %d, index %d.\n",
2995 tx_ring_desc->skb, j,
2996 tx_ring_desc->index);
2997 ql_unmap_send(qdev, tx_ring_desc,
2998 tx_ring_desc->map_cnt);
2999 dev_kfree_skb(tx_ring_desc->skb);
3000 tx_ring_desc->skb = NULL;
3006 static void ql_free_mem_resources(struct ql_adapter *qdev)
3010 for (i = 0; i < qdev->tx_ring_count; i++)
3011 ql_free_tx_resources(qdev, &qdev->tx_ring[i]);
3012 for (i = 0; i < qdev->rx_ring_count; i++)
3013 ql_free_rx_resources(qdev, &qdev->rx_ring[i]);
3014 ql_free_shadow_space(qdev);
3017 static int ql_alloc_mem_resources(struct ql_adapter *qdev)
3021 /* Allocate space for our shadow registers and such. */
3022 if (ql_alloc_shadow_space(qdev))
3025 for (i = 0; i < qdev->rx_ring_count; i++) {
3026 if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) {
3027 netif_err(qdev, ifup, qdev->ndev,
3028 "RX resource allocation failed.\n");
3032 /* Allocate tx queue resources */
3033 for (i = 0; i < qdev->tx_ring_count; i++) {
3034 if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) {
3035 netif_err(qdev, ifup, qdev->ndev,
3036 "TX resource allocation failed.\n");
3043 ql_free_mem_resources(qdev);
3047 /* Set up the rx ring control block and pass it to the chip.
3048 * The control block is defined as
3049 * "Completion Queue Initialization Control Block", or cqicb.
3051 static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
3053 struct cqicb *cqicb = &rx_ring->cqicb;
3054 void *shadow_reg = qdev->rx_ring_shadow_reg_area +
3055 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
3056 u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma +
3057 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
3058 void __iomem *doorbell_area =
3059 qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
3063 __le64 *base_indirect_ptr;
3066 /* Set up the shadow registers for this ring. */
3067 rx_ring->prod_idx_sh_reg = shadow_reg;
3068 rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
3069 *rx_ring->prod_idx_sh_reg = 0;
3070 shadow_reg += sizeof(u64);
3071 shadow_reg_dma += sizeof(u64);
3072 rx_ring->lbq_base_indirect = shadow_reg;
3073 rx_ring->lbq_base_indirect_dma = shadow_reg_dma;
3074 shadow_reg += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3075 shadow_reg_dma += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3076 rx_ring->sbq_base_indirect = shadow_reg;
3077 rx_ring->sbq_base_indirect_dma = shadow_reg_dma;
3079 /* PCI doorbell mem area + 0x00 for consumer index register */
3080 rx_ring->cnsmr_idx_db_reg = (u32 __iomem *) doorbell_area;
3081 rx_ring->cnsmr_idx = 0;
3082 rx_ring->curr_entry = rx_ring->cq_base;
3084 /* PCI doorbell mem area + 0x04 for valid register */
3085 rx_ring->valid_db_reg = doorbell_area + 0x04;
3087 /* PCI doorbell mem area + 0x18 for large buffer consumer */
3088 rx_ring->lbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x18);
3090 /* PCI doorbell mem area + 0x1c */
3091 rx_ring->sbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x1c);
3093 memset((void *)cqicb, 0, sizeof(struct cqicb));
3094 cqicb->msix_vect = rx_ring->irq;
3096 bq_len = (rx_ring->cq_len == 65536) ? 0 : (u16) rx_ring->cq_len;
3097 cqicb->len = cpu_to_le16(bq_len | LEN_V | LEN_CPP_CONT);
3099 cqicb->addr = cpu_to_le64(rx_ring->cq_base_dma);
3101 cqicb->prod_idx_addr = cpu_to_le64(rx_ring->prod_idx_sh_reg_dma);
3104 * Set up the control block load flags.
3106 cqicb->flags = FLAGS_LC | /* Load queue base address */
3107 FLAGS_LV | /* Load MSI-X vector */
3108 FLAGS_LI; /* Load irq delay values */
3109 if (rx_ring->lbq_len) {
3110 cqicb->flags |= FLAGS_LL; /* Load lbq values */
3111 tmp = (u64)rx_ring->lbq_base_dma;
3112 base_indirect_ptr = rx_ring->lbq_base_indirect;
3115 *base_indirect_ptr = cpu_to_le64(tmp);
3116 tmp += DB_PAGE_SIZE;
3117 base_indirect_ptr++;
3119 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3121 cpu_to_le64(rx_ring->lbq_base_indirect_dma);
3122 bq_len = (rx_ring->lbq_buf_size == 65536) ? 0 :
3123 (u16) rx_ring->lbq_buf_size;
3124 cqicb->lbq_buf_size = cpu_to_le16(bq_len);
3125 bq_len = (rx_ring->lbq_len == 65536) ? 0 :
3126 (u16) rx_ring->lbq_len;
3127 cqicb->lbq_len = cpu_to_le16(bq_len);
3128 rx_ring->lbq_prod_idx = 0;
3129 rx_ring->lbq_curr_idx = 0;
3130 rx_ring->lbq_clean_idx = 0;
3131 rx_ring->lbq_free_cnt = rx_ring->lbq_len;
3133 if (rx_ring->sbq_len) {
3134 cqicb->flags |= FLAGS_LS; /* Load sbq values */
3135 tmp = (u64)rx_ring->sbq_base_dma;
3136 base_indirect_ptr = rx_ring->sbq_base_indirect;