2 * QLogic qlge NIC HBA Driver
3 * Copyright (c) 2003-2008 QLogic Corporation
4 * See LICENSE.qlge for copyright and licensing details.
5 * Author: Linux qlge network device driver by
6 * Ron Mercer <ron.mercer@qlogic.com>
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/bitops.h>
11 #include <linux/types.h>
12 #include <linux/module.h>
13 #include <linux/list.h>
14 #include <linux/pci.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/pagemap.h>
17 #include <linux/sched.h>
18 #include <linux/slab.h>
19 #include <linux/dmapool.h>
20 #include <linux/mempool.h>
21 #include <linux/spinlock.h>
22 #include <linux/kthread.h>
23 #include <linux/interrupt.h>
24 #include <linux/errno.h>
25 #include <linux/ioport.h>
28 #include <linux/ipv6.h>
30 #include <linux/tcp.h>
31 #include <linux/udp.h>
32 #include <linux/if_arp.h>
33 #include <linux/if_ether.h>
34 #include <linux/netdevice.h>
35 #include <linux/etherdevice.h>
36 #include <linux/ethtool.h>
37 #include <linux/if_vlan.h>
38 #include <linux/skbuff.h>
39 #include <linux/delay.h>
41 #include <linux/vmalloc.h>
42 #include <linux/prefetch.h>
43 #include <net/ip6_checksum.h>
47 char qlge_driver_name[] = DRV_NAME;
48 const char qlge_driver_version[] = DRV_VERSION;
50 MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>");
51 MODULE_DESCRIPTION(DRV_STRING " ");
52 MODULE_LICENSE("GPL");
53 MODULE_VERSION(DRV_VERSION);
55 static const u32 default_msg =
56 NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK |
57 /* NETIF_MSG_TIMER | */
62 /* NETIF_MSG_TX_QUEUED | */
63 /* NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS | */
64 /* NETIF_MSG_PKTDATA | */
65 NETIF_MSG_HW | NETIF_MSG_WOL | 0;
67 static int debug = -1; /* defaults above */
68 module_param(debug, int, 0664);
69 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
74 static int qlge_irq_type = MSIX_IRQ;
75 module_param(qlge_irq_type, int, 0664);
76 MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
78 static int qlge_mpi_coredump;
79 module_param(qlge_mpi_coredump, int, 0);
80 MODULE_PARM_DESC(qlge_mpi_coredump,
81 "Option to enable MPI firmware dump. "
82 "Default is OFF - Do Not allocate memory. ");
84 static int qlge_force_coredump;
85 module_param(qlge_force_coredump, int, 0);
86 MODULE_PARM_DESC(qlge_force_coredump,
87 "Option to allow force of firmware core dump. "
88 "Default is OFF - Do not allow.");
90 static DEFINE_PCI_DEVICE_TABLE(qlge_pci_tbl) = {
91 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)},
92 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)},
93 /* required last entry */
97 MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
99 static int ql_wol(struct ql_adapter *qdev);
100 static void qlge_set_multicast_list(struct net_device *ndev);
102 /* This hardware semaphore causes exclusive access to
103 * resources shared between the NIC driver, MPI firmware,
104 * FCOE firmware and the FC driver.
106 static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask)
111 case SEM_XGMAC0_MASK:
112 sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
114 case SEM_XGMAC1_MASK:
115 sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
118 sem_bits = SEM_SET << SEM_ICB_SHIFT;
120 case SEM_MAC_ADDR_MASK:
121 sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
124 sem_bits = SEM_SET << SEM_FLASH_SHIFT;
127 sem_bits = SEM_SET << SEM_PROBE_SHIFT;
129 case SEM_RT_IDX_MASK:
130 sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
132 case SEM_PROC_REG_MASK:
133 sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
136 netif_alert(qdev, probe, qdev->ndev, "bad Semaphore mask!.\n");
140 ql_write32(qdev, SEM, sem_bits | sem_mask);
141 return !(ql_read32(qdev, SEM) & sem_bits);
144 int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask)
146 unsigned int wait_count = 30;
148 if (!ql_sem_trylock(qdev, sem_mask))
151 } while (--wait_count);
155 void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask)
157 ql_write32(qdev, SEM, sem_mask);
158 ql_read32(qdev, SEM); /* flush */
161 /* This function waits for a specific bit to come ready
162 * in a given register. It is used mostly by the initialize
163 * process, but is also used in kernel thread API such as
164 * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
166 int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
169 int count = UDELAY_COUNT;
172 temp = ql_read32(qdev, reg);
174 /* check for errors */
175 if (temp & err_bit) {
176 netif_alert(qdev, probe, qdev->ndev,
177 "register 0x%.08x access error, value = 0x%.08x!.\n",
180 } else if (temp & bit)
182 udelay(UDELAY_DELAY);
185 netif_alert(qdev, probe, qdev->ndev,
186 "Timed out waiting for reg %x to come ready.\n", reg);
190 /* The CFG register is used to download TX and RX control blocks
191 * to the chip. This function waits for an operation to complete.
193 static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit)
195 int count = UDELAY_COUNT;
199 temp = ql_read32(qdev, CFG);
204 udelay(UDELAY_DELAY);
211 /* Used to issue init control blocks to hw. Maps control block,
212 * sets address, triggers download, waits for completion.
214 int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
224 (bit & (CFG_LRQ | CFG_LR | CFG_LCQ)) ? PCI_DMA_TODEVICE :
227 map = pci_map_single(qdev->pdev, ptr, size, direction);
228 if (pci_dma_mapping_error(qdev->pdev, map)) {
229 netif_err(qdev, ifup, qdev->ndev, "Couldn't map DMA area.\n");
233 status = ql_sem_spinlock(qdev, SEM_ICB_MASK);
237 status = ql_wait_cfg(qdev, bit);
239 netif_err(qdev, ifup, qdev->ndev,
240 "Timed out waiting for CFG to come ready.\n");
244 ql_write32(qdev, ICB_L, (u32) map);
245 ql_write32(qdev, ICB_H, (u32) (map >> 32));
247 mask = CFG_Q_MASK | (bit << 16);
248 value = bit | (q_id << CFG_Q_SHIFT);
249 ql_write32(qdev, CFG, (mask | value));
252 * Wait for the bit to clear after signaling hw.
254 status = ql_wait_cfg(qdev, bit);
256 ql_sem_unlock(qdev, SEM_ICB_MASK); /* does flush too */
257 pci_unmap_single(qdev->pdev, map, size, direction);
261 /* Get a specific MAC address from the CAM. Used for debug and reg dump. */
262 int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
269 case MAC_ADDR_TYPE_MULTI_MAC:
270 case MAC_ADDR_TYPE_CAM_MAC:
273 ql_wait_reg_rdy(qdev,
274 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
277 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
278 (index << MAC_ADDR_IDX_SHIFT) | /* index */
279 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
281 ql_wait_reg_rdy(qdev,
282 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
285 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
287 ql_wait_reg_rdy(qdev,
288 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
291 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
292 (index << MAC_ADDR_IDX_SHIFT) | /* index */
293 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
295 ql_wait_reg_rdy(qdev,
296 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
299 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
300 if (type == MAC_ADDR_TYPE_CAM_MAC) {
302 ql_wait_reg_rdy(qdev,
303 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
306 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
307 (index << MAC_ADDR_IDX_SHIFT) | /* index */
308 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
310 ql_wait_reg_rdy(qdev, MAC_ADDR_IDX,
314 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
318 case MAC_ADDR_TYPE_VLAN:
319 case MAC_ADDR_TYPE_MULTI_FLTR:
321 netif_crit(qdev, ifup, qdev->ndev,
322 "Address type %d not yet supported.\n", type);
329 /* Set up a MAC, multicast or VLAN address for the
330 * inbound frame matching.
332 static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
339 case MAC_ADDR_TYPE_MULTI_MAC:
341 u32 upper = (addr[0] << 8) | addr[1];
342 u32 lower = (addr[2] << 24) | (addr[3] << 16) |
343 (addr[4] << 8) | (addr[5]);
346 ql_wait_reg_rdy(qdev,
347 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
350 ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
351 (index << MAC_ADDR_IDX_SHIFT) |
353 ql_write32(qdev, MAC_ADDR_DATA, lower);
355 ql_wait_reg_rdy(qdev,
356 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
359 ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
360 (index << MAC_ADDR_IDX_SHIFT) |
363 ql_write32(qdev, MAC_ADDR_DATA, upper);
365 ql_wait_reg_rdy(qdev,
366 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
371 case MAC_ADDR_TYPE_CAM_MAC:
374 u32 upper = (addr[0] << 8) | addr[1];
376 (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
379 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
380 "Adding %s address %pM at index %d in the CAM.\n",
381 type == MAC_ADDR_TYPE_MULTI_MAC ?
382 "MULTICAST" : "UNICAST",
386 ql_wait_reg_rdy(qdev,
387 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
390 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
391 (index << MAC_ADDR_IDX_SHIFT) | /* index */
393 ql_write32(qdev, MAC_ADDR_DATA, lower);
395 ql_wait_reg_rdy(qdev,
396 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
399 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
400 (index << MAC_ADDR_IDX_SHIFT) | /* index */
402 ql_write32(qdev, MAC_ADDR_DATA, upper);
404 ql_wait_reg_rdy(qdev,
405 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
408 ql_write32(qdev, MAC_ADDR_IDX, (offset) | /* offset */
409 (index << MAC_ADDR_IDX_SHIFT) | /* index */
411 /* This field should also include the queue id
412 and possibly the function id. Right now we hardcode
413 the route field to NIC core.
415 cam_output = (CAM_OUT_ROUTE_NIC |
417 func << CAM_OUT_FUNC_SHIFT) |
418 (0 << CAM_OUT_CQ_ID_SHIFT));
419 if (qdev->ndev->features & NETIF_F_HW_VLAN_RX)
420 cam_output |= CAM_OUT_RV;
421 /* route to NIC core */
422 ql_write32(qdev, MAC_ADDR_DATA, cam_output);
425 case MAC_ADDR_TYPE_VLAN:
427 u32 enable_bit = *((u32 *) &addr[0]);
428 /* For VLAN, the addr actually holds a bit that
429 * either enables or disables the vlan id we are
430 * addressing. It's either MAC_ADDR_E on or off.
431 * That's bit-27 we're talking about.
433 netif_info(qdev, ifup, qdev->ndev,
434 "%s VLAN ID %d %s the CAM.\n",
435 enable_bit ? "Adding" : "Removing",
437 enable_bit ? "to" : "from");
440 ql_wait_reg_rdy(qdev,
441 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
444 ql_write32(qdev, MAC_ADDR_IDX, offset | /* offset */
445 (index << MAC_ADDR_IDX_SHIFT) | /* index */
447 enable_bit); /* enable/disable */
450 case MAC_ADDR_TYPE_MULTI_FLTR:
452 netif_crit(qdev, ifup, qdev->ndev,
453 "Address type %d not yet supported.\n", type);
460 /* Set or clear MAC address in hardware. We sometimes
461 * have to clear it to prevent wrong frame routing
462 * especially in a bonding environment.
464 static int ql_set_mac_addr(struct ql_adapter *qdev, int set)
467 char zero_mac_addr[ETH_ALEN];
471 addr = &qdev->current_mac_addr[0];
472 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
473 "Set Mac addr %pM\n", addr);
475 memset(zero_mac_addr, 0, ETH_ALEN);
476 addr = &zero_mac_addr[0];
477 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
478 "Clearing MAC address\n");
480 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
483 status = ql_set_mac_addr_reg(qdev, (u8 *) addr,
484 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
485 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
487 netif_err(qdev, ifup, qdev->ndev,
488 "Failed to init mac address.\n");
492 void ql_link_on(struct ql_adapter *qdev)
494 netif_err(qdev, link, qdev->ndev, "Link is up.\n");
495 netif_carrier_on(qdev->ndev);
496 ql_set_mac_addr(qdev, 1);
499 void ql_link_off(struct ql_adapter *qdev)
501 netif_err(qdev, link, qdev->ndev, "Link is down.\n");
502 netif_carrier_off(qdev->ndev);
503 ql_set_mac_addr(qdev, 0);
506 /* Get a specific frame routing value from the CAM.
507 * Used for debug and reg dump.
509 int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value)
513 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
517 ql_write32(qdev, RT_IDX,
518 RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT));
519 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0);
522 *value = ql_read32(qdev, RT_DATA);
527 /* The NIC function for this chip has 16 routing indexes. Each one can be used
528 * to route different frame types to various inbound queues. We send broadcast/
529 * multicast/error frames to the default queue for slow handling,
530 * and CAM hit/RSS frames to the fast handling queues.
532 static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
535 int status = -EINVAL; /* Return error if no mask match. */
538 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
539 "%s %s mask %s the routing reg.\n",
540 enable ? "Adding" : "Removing",
541 index == RT_IDX_ALL_ERR_SLOT ? "MAC ERROR/ALL ERROR" :
542 index == RT_IDX_IP_CSUM_ERR_SLOT ? "IP CSUM ERROR" :
543 index == RT_IDX_TCP_UDP_CSUM_ERR_SLOT ? "TCP/UDP CSUM ERROR" :
544 index == RT_IDX_BCAST_SLOT ? "BROADCAST" :
545 index == RT_IDX_MCAST_MATCH_SLOT ? "MULTICAST MATCH" :
546 index == RT_IDX_ALLMULTI_SLOT ? "ALL MULTICAST MATCH" :
547 index == RT_IDX_UNUSED6_SLOT ? "UNUSED6" :
548 index == RT_IDX_UNUSED7_SLOT ? "UNUSED7" :
549 index == RT_IDX_RSS_MATCH_SLOT ? "RSS ALL/IPV4 MATCH" :
550 index == RT_IDX_RSS_IPV6_SLOT ? "RSS IPV6" :
551 index == RT_IDX_RSS_TCP4_SLOT ? "RSS TCP4" :
552 index == RT_IDX_RSS_TCP6_SLOT ? "RSS TCP6" :
553 index == RT_IDX_CAM_HIT_SLOT ? "CAM HIT" :
554 index == RT_IDX_UNUSED013 ? "UNUSED13" :
555 index == RT_IDX_UNUSED014 ? "UNUSED14" :
556 index == RT_IDX_PROMISCUOUS_SLOT ? "PROMISCUOUS" :
557 "(Bad index != RT_IDX)",
558 enable ? "to" : "from");
563 value = RT_IDX_DST_CAM_Q | /* dest */
564 RT_IDX_TYPE_NICQ | /* type */
565 (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);/* index */
568 case RT_IDX_VALID: /* Promiscuous Mode frames. */
570 value = RT_IDX_DST_DFLT_Q | /* dest */
571 RT_IDX_TYPE_NICQ | /* type */
572 (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);/* index */
575 case RT_IDX_ERR: /* Pass up MAC,IP,TCP/UDP error frames. */
577 value = RT_IDX_DST_DFLT_Q | /* dest */
578 RT_IDX_TYPE_NICQ | /* type */
579 (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */
582 case RT_IDX_IP_CSUM_ERR: /* Pass up IP CSUM error frames. */
584 value = RT_IDX_DST_DFLT_Q | /* dest */
585 RT_IDX_TYPE_NICQ | /* type */
586 (RT_IDX_IP_CSUM_ERR_SLOT <<
587 RT_IDX_IDX_SHIFT); /* index */
590 case RT_IDX_TU_CSUM_ERR: /* Pass up TCP/UDP CSUM error frames. */
592 value = RT_IDX_DST_DFLT_Q | /* dest */
593 RT_IDX_TYPE_NICQ | /* type */
594 (RT_IDX_TCP_UDP_CSUM_ERR_SLOT <<
595 RT_IDX_IDX_SHIFT); /* index */
598 case RT_IDX_BCAST: /* Pass up Broadcast frames to default Q. */
600 value = RT_IDX_DST_DFLT_Q | /* dest */
601 RT_IDX_TYPE_NICQ | /* type */
602 (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);/* index */
605 case RT_IDX_MCAST: /* Pass up All Multicast frames. */
607 value = RT_IDX_DST_DFLT_Q | /* dest */
608 RT_IDX_TYPE_NICQ | /* type */
609 (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */
612 case RT_IDX_MCAST_MATCH: /* Pass up matched Multicast frames. */
614 value = RT_IDX_DST_DFLT_Q | /* dest */
615 RT_IDX_TYPE_NICQ | /* type */
616 (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
619 case RT_IDX_RSS_MATCH: /* Pass up matched RSS frames. */
621 value = RT_IDX_DST_RSS | /* dest */
622 RT_IDX_TYPE_NICQ | /* type */
623 (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
626 case 0: /* Clear the E-bit on an entry. */
628 value = RT_IDX_DST_DFLT_Q | /* dest */
629 RT_IDX_TYPE_NICQ | /* type */
630 (index << RT_IDX_IDX_SHIFT);/* index */
634 netif_err(qdev, ifup, qdev->ndev,
635 "Mask type %d not yet supported.\n", mask);
641 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
644 value |= (enable ? RT_IDX_E : 0);
645 ql_write32(qdev, RT_IDX, value);
646 ql_write32(qdev, RT_DATA, enable ? mask : 0);
652 static void ql_enable_interrupts(struct ql_adapter *qdev)
654 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI);
657 static void ql_disable_interrupts(struct ql_adapter *qdev)
659 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16));
662 /* If we're running with multiple MSI-X vectors then we enable on the fly.
663 * Otherwise, we may have multiple outstanding workers and don't want to
664 * enable until the last one finishes. In this case, the irq_cnt gets
665 * incremented every time we queue a worker and decremented every time
666 * a worker finishes. Once it hits zero we enable the interrupt.
668 u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
671 unsigned long hw_flags = 0;
672 struct intr_context *ctx = qdev->intr_context + intr;
674 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) {
675 /* Always enable if we're MSIX multi interrupts and
676 * it's not the default (zeroeth) interrupt.
678 ql_write32(qdev, INTR_EN,
680 var = ql_read32(qdev, STS);
684 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
685 if (atomic_dec_and_test(&ctx->irq_cnt)) {
686 ql_write32(qdev, INTR_EN,
688 var = ql_read32(qdev, STS);
690 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
694 static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
697 struct intr_context *ctx;
699 /* HW disables for us if we're MSIX multi interrupts and
700 * it's not the default (zeroeth) interrupt.
702 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr))
705 ctx = qdev->intr_context + intr;
706 spin_lock(&qdev->hw_lock);
707 if (!atomic_read(&ctx->irq_cnt)) {
708 ql_write32(qdev, INTR_EN,
710 var = ql_read32(qdev, STS);
712 atomic_inc(&ctx->irq_cnt);
713 spin_unlock(&qdev->hw_lock);
717 static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev)
720 for (i = 0; i < qdev->intr_count; i++) {
721 /* The enable call does a atomic_dec_and_test
722 * and enables only if the result is zero.
723 * So we precharge it here.
725 if (unlikely(!test_bit(QL_MSIX_ENABLED, &qdev->flags) ||
727 atomic_set(&qdev->intr_context[i].irq_cnt, 1);
728 ql_enable_completion_interrupt(qdev, i);
733 static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str)
737 __le16 *flash = (__le16 *)&qdev->flash;
739 status = strncmp((char *)&qdev->flash, str, 4);
741 netif_err(qdev, ifup, qdev->ndev, "Invalid flash signature.\n");
745 for (i = 0; i < size; i++)
746 csum += le16_to_cpu(*flash++);
749 netif_err(qdev, ifup, qdev->ndev,
750 "Invalid flash checksum, csum = 0x%.04x.\n", csum);
755 static int ql_read_flash_word(struct ql_adapter *qdev, int offset, __le32 *data)
758 /* wait for reg to come ready */
759 status = ql_wait_reg_rdy(qdev,
760 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
763 /* set up for reg read */
764 ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset);
765 /* wait for reg to come ready */
766 status = ql_wait_reg_rdy(qdev,
767 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
770 /* This data is stored on flash as an array of
771 * __le32. Since ql_read32() returns cpu endian
772 * we need to swap it back.
774 *data = cpu_to_le32(ql_read32(qdev, FLASH_DATA));
779 static int ql_get_8000_flash_params(struct ql_adapter *qdev)
783 __le32 *p = (__le32 *)&qdev->flash;
787 /* Get flash offset for function and adjust
791 offset = FUNC0_FLASH_OFFSET / sizeof(u32);
793 offset = FUNC1_FLASH_OFFSET / sizeof(u32);
795 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
798 size = sizeof(struct flash_params_8000) / sizeof(u32);
799 for (i = 0; i < size; i++, p++) {
800 status = ql_read_flash_word(qdev, i+offset, p);
802 netif_err(qdev, ifup, qdev->ndev,
803 "Error reading flash.\n");
808 status = ql_validate_flash(qdev,
809 sizeof(struct flash_params_8000) / sizeof(u16),
812 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
817 /* Extract either manufacturer or BOFM modified
820 if (qdev->flash.flash_params_8000.data_type1 == 2)
822 qdev->flash.flash_params_8000.mac_addr1,
823 qdev->ndev->addr_len);
826 qdev->flash.flash_params_8000.mac_addr,
827 qdev->ndev->addr_len);
829 if (!is_valid_ether_addr(mac_addr)) {
830 netif_err(qdev, ifup, qdev->ndev, "Invalid MAC address.\n");
835 memcpy(qdev->ndev->dev_addr,
837 qdev->ndev->addr_len);
840 ql_sem_unlock(qdev, SEM_FLASH_MASK);
844 static int ql_get_8012_flash_params(struct ql_adapter *qdev)
848 __le32 *p = (__le32 *)&qdev->flash;
850 u32 size = sizeof(struct flash_params_8012) / sizeof(u32);
852 /* Second function's parameters follow the first
858 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
861 for (i = 0; i < size; i++, p++) {
862 status = ql_read_flash_word(qdev, i+offset, p);
864 netif_err(qdev, ifup, qdev->ndev,
865 "Error reading flash.\n");
871 status = ql_validate_flash(qdev,
872 sizeof(struct flash_params_8012) / sizeof(u16),
875 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
880 if (!is_valid_ether_addr(qdev->flash.flash_params_8012.mac_addr)) {
885 memcpy(qdev->ndev->dev_addr,
886 qdev->flash.flash_params_8012.mac_addr,
887 qdev->ndev->addr_len);
890 ql_sem_unlock(qdev, SEM_FLASH_MASK);
894 /* xgmac register are located behind the xgmac_addr and xgmac_data
895 * register pair. Each read/write requires us to wait for the ready
896 * bit before reading/writing the data.
898 static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data)
901 /* wait for reg to come ready */
902 status = ql_wait_reg_rdy(qdev,
903 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
906 /* write the data to the data reg */
907 ql_write32(qdev, XGMAC_DATA, data);
908 /* trigger the write */
909 ql_write32(qdev, XGMAC_ADDR, reg);
913 /* xgmac register are located behind the xgmac_addr and xgmac_data
914 * register pair. Each read/write requires us to wait for the ready
915 * bit before reading/writing the data.
917 int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
920 /* wait for reg to come ready */
921 status = ql_wait_reg_rdy(qdev,
922 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
925 /* set up for reg read */
926 ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R);
927 /* wait for reg to come ready */
928 status = ql_wait_reg_rdy(qdev,
929 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
933 *data = ql_read32(qdev, XGMAC_DATA);
938 /* This is used for reading the 64-bit statistics regs. */
939 int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data)
945 status = ql_read_xgmac_reg(qdev, reg, &lo);
949 status = ql_read_xgmac_reg(qdev, reg + 4, &hi);
953 *data = (u64) lo | ((u64) hi << 32);
959 static int ql_8000_port_initialize(struct ql_adapter *qdev)
963 * Get MPI firmware version for driver banner
966 status = ql_mb_about_fw(qdev);
969 status = ql_mb_get_fw_state(qdev);
972 /* Wake up a worker to get/set the TX/RX frame sizes. */
973 queue_delayed_work(qdev->workqueue, &qdev->mpi_port_cfg_work, 0);
978 /* Take the MAC Core out of reset.
979 * Enable statistics counting.
980 * Take the transmitter/receiver out of reset.
981 * This functionality may be done in the MPI firmware at a
984 static int ql_8012_port_initialize(struct ql_adapter *qdev)
989 if (ql_sem_trylock(qdev, qdev->xg_sem_mask)) {
990 /* Another function has the semaphore, so
991 * wait for the port init bit to come ready.
993 netif_info(qdev, link, qdev->ndev,
994 "Another function has the semaphore, so wait for the port init bit to come ready.\n");
995 status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
997 netif_crit(qdev, link, qdev->ndev,
998 "Port initialize timed out.\n");
1003 netif_info(qdev, link, qdev->ndev, "Got xgmac semaphore!.\n");
1004 /* Set the core reset. */
1005 status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
1008 data |= GLOBAL_CFG_RESET;
1009 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
1013 /* Clear the core reset and turn on jumbo for receiver. */
1014 data &= ~GLOBAL_CFG_RESET; /* Clear core reset. */
1015 data |= GLOBAL_CFG_JUMBO; /* Turn on jumbo. */
1016 data |= GLOBAL_CFG_TX_STAT_EN;
1017 data |= GLOBAL_CFG_RX_STAT_EN;
1018 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
1022 /* Enable transmitter, and clear it's reset. */
1023 status = ql_read_xgmac_reg(qdev, TX_CFG, &data);
1026 data &= ~TX_CFG_RESET; /* Clear the TX MAC reset. */
1027 data |= TX_CFG_EN; /* Enable the transmitter. */
1028 status = ql_write_xgmac_reg(qdev, TX_CFG, data);
1032 /* Enable receiver and clear it's reset. */
1033 status = ql_read_xgmac_reg(qdev, RX_CFG, &data);
1036 data &= ~RX_CFG_RESET; /* Clear the RX MAC reset. */
1037 data |= RX_CFG_EN; /* Enable the receiver. */
1038 status = ql_write_xgmac_reg(qdev, RX_CFG, data);
1042 /* Turn on jumbo. */
1044 ql_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16));
1048 ql_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580);
1052 /* Signal to the world that the port is enabled. */
1053 ql_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init));
1055 ql_sem_unlock(qdev, qdev->xg_sem_mask);
1059 static inline unsigned int ql_lbq_block_size(struct ql_adapter *qdev)
1061 return PAGE_SIZE << qdev->lbq_buf_order;
1064 /* Get the next large buffer. */
1065 static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring)
1067 struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx];
1068 rx_ring->lbq_curr_idx++;
1069 if (rx_ring->lbq_curr_idx == rx_ring->lbq_len)
1070 rx_ring->lbq_curr_idx = 0;
1071 rx_ring->lbq_free_cnt++;
1075 static struct bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev,
1076 struct rx_ring *rx_ring)
1078 struct bq_desc *lbq_desc = ql_get_curr_lbuf(rx_ring);
1080 pci_dma_sync_single_for_cpu(qdev->pdev,
1081 dma_unmap_addr(lbq_desc, mapaddr),
1082 rx_ring->lbq_buf_size,
1083 PCI_DMA_FROMDEVICE);
1085 /* If it's the last chunk of our master page then
1088 if ((lbq_desc->p.pg_chunk.offset + rx_ring->lbq_buf_size)
1089 == ql_lbq_block_size(qdev))
1090 pci_unmap_page(qdev->pdev,
1091 lbq_desc->p.pg_chunk.map,
1092 ql_lbq_block_size(qdev),
1093 PCI_DMA_FROMDEVICE);
1097 /* Get the next small buffer. */
1098 static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring)
1100 struct bq_desc *sbq_desc = &rx_ring->sbq[rx_ring->sbq_curr_idx];
1101 rx_ring->sbq_curr_idx++;
1102 if (rx_ring->sbq_curr_idx == rx_ring->sbq_len)
1103 rx_ring->sbq_curr_idx = 0;
1104 rx_ring->sbq_free_cnt++;
1108 /* Update an rx ring index. */
1109 static void ql_update_cq(struct rx_ring *rx_ring)
1111 rx_ring->cnsmr_idx++;
1112 rx_ring->curr_entry++;
1113 if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) {
1114 rx_ring->cnsmr_idx = 0;
1115 rx_ring->curr_entry = rx_ring->cq_base;
1119 static void ql_write_cq_idx(struct rx_ring *rx_ring)
1121 ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
1124 static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring,
1125 struct bq_desc *lbq_desc)
1127 if (!rx_ring->pg_chunk.page) {
1129 rx_ring->pg_chunk.page = alloc_pages(__GFP_COLD | __GFP_COMP |
1131 qdev->lbq_buf_order);
1132 if (unlikely(!rx_ring->pg_chunk.page)) {
1133 netif_err(qdev, drv, qdev->ndev,
1134 "page allocation failed.\n");
1137 rx_ring->pg_chunk.offset = 0;
1138 map = pci_map_page(qdev->pdev, rx_ring->pg_chunk.page,
1139 0, ql_lbq_block_size(qdev),
1140 PCI_DMA_FROMDEVICE);
1141 if (pci_dma_mapping_error(qdev->pdev, map)) {
1142 __free_pages(rx_ring->pg_chunk.page,
1143 qdev->lbq_buf_order);
1144 netif_err(qdev, drv, qdev->ndev,
1145 "PCI mapping failed.\n");
1148 rx_ring->pg_chunk.map = map;
1149 rx_ring->pg_chunk.va = page_address(rx_ring->pg_chunk.page);
1152 /* Copy the current master pg_chunk info
1153 * to the current descriptor.
1155 lbq_desc->p.pg_chunk = rx_ring->pg_chunk;
1157 /* Adjust the master page chunk for next
1160 rx_ring->pg_chunk.offset += rx_ring->lbq_buf_size;
1161 if (rx_ring->pg_chunk.offset == ql_lbq_block_size(qdev)) {
1162 rx_ring->pg_chunk.page = NULL;
1163 lbq_desc->p.pg_chunk.last_flag = 1;
1165 rx_ring->pg_chunk.va += rx_ring->lbq_buf_size;
1166 get_page(rx_ring->pg_chunk.page);
1167 lbq_desc->p.pg_chunk.last_flag = 0;
1171 /* Process (refill) a large buffer queue. */
1172 static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1174 u32 clean_idx = rx_ring->lbq_clean_idx;
1175 u32 start_idx = clean_idx;
1176 struct bq_desc *lbq_desc;
1180 while (rx_ring->lbq_free_cnt > 32) {
1181 for (i = 0; i < 16; i++) {
1182 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1183 "lbq: try cleaning clean_idx = %d.\n",
1185 lbq_desc = &rx_ring->lbq[clean_idx];
1186 if (ql_get_next_chunk(qdev, rx_ring, lbq_desc)) {
1187 netif_err(qdev, ifup, qdev->ndev,
1188 "Could not get a page chunk.\n");
1192 map = lbq_desc->p.pg_chunk.map +
1193 lbq_desc->p.pg_chunk.offset;
1194 dma_unmap_addr_set(lbq_desc, mapaddr, map);
1195 dma_unmap_len_set(lbq_desc, maplen,
1196 rx_ring->lbq_buf_size);
1197 *lbq_desc->addr = cpu_to_le64(map);
1199 pci_dma_sync_single_for_device(qdev->pdev, map,
1200 rx_ring->lbq_buf_size,
1201 PCI_DMA_FROMDEVICE);
1203 if (clean_idx == rx_ring->lbq_len)
1207 rx_ring->lbq_clean_idx = clean_idx;
1208 rx_ring->lbq_prod_idx += 16;
1209 if (rx_ring->lbq_prod_idx == rx_ring->lbq_len)
1210 rx_ring->lbq_prod_idx = 0;
1211 rx_ring->lbq_free_cnt -= 16;
1214 if (start_idx != clean_idx) {
1215 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1216 "lbq: updating prod idx = %d.\n",
1217 rx_ring->lbq_prod_idx);
1218 ql_write_db_reg(rx_ring->lbq_prod_idx,
1219 rx_ring->lbq_prod_idx_db_reg);
1223 /* Process (refill) a small buffer queue. */
1224 static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1226 u32 clean_idx = rx_ring->sbq_clean_idx;
1227 u32 start_idx = clean_idx;
1228 struct bq_desc *sbq_desc;
1232 while (rx_ring->sbq_free_cnt > 16) {
1233 for (i = 0; i < 16; i++) {
1234 sbq_desc = &rx_ring->sbq[clean_idx];
1235 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1236 "sbq: try cleaning clean_idx = %d.\n",
1238 if (sbq_desc->p.skb == NULL) {
1239 netif_printk(qdev, rx_status, KERN_DEBUG,
1241 "sbq: getting new skb for index %d.\n",
1244 netdev_alloc_skb(qdev->ndev,
1246 if (sbq_desc->p.skb == NULL) {
1247 netif_err(qdev, probe, qdev->ndev,
1248 "Couldn't get an skb.\n");
1249 rx_ring->sbq_clean_idx = clean_idx;
1252 skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD);
1253 map = pci_map_single(qdev->pdev,
1254 sbq_desc->p.skb->data,
1255 rx_ring->sbq_buf_size,
1256 PCI_DMA_FROMDEVICE);
1257 if (pci_dma_mapping_error(qdev->pdev, map)) {
1258 netif_err(qdev, ifup, qdev->ndev,
1259 "PCI mapping failed.\n");
1260 rx_ring->sbq_clean_idx = clean_idx;
1261 dev_kfree_skb_any(sbq_desc->p.skb);
1262 sbq_desc->p.skb = NULL;
1265 dma_unmap_addr_set(sbq_desc, mapaddr, map);
1266 dma_unmap_len_set(sbq_desc, maplen,
1267 rx_ring->sbq_buf_size);
1268 *sbq_desc->addr = cpu_to_le64(map);
1272 if (clean_idx == rx_ring->sbq_len)
1275 rx_ring->sbq_clean_idx = clean_idx;
1276 rx_ring->sbq_prod_idx += 16;
1277 if (rx_ring->sbq_prod_idx == rx_ring->sbq_len)
1278 rx_ring->sbq_prod_idx = 0;
1279 rx_ring->sbq_free_cnt -= 16;
1282 if (start_idx != clean_idx) {
1283 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1284 "sbq: updating prod idx = %d.\n",
1285 rx_ring->sbq_prod_idx);
1286 ql_write_db_reg(rx_ring->sbq_prod_idx,
1287 rx_ring->sbq_prod_idx_db_reg);
1291 static void ql_update_buffer_queues(struct ql_adapter *qdev,
1292 struct rx_ring *rx_ring)
1294 ql_update_sbq(qdev, rx_ring);
1295 ql_update_lbq(qdev, rx_ring);
1298 /* Unmaps tx buffers. Can be called from send() if a pci mapping
1299 * fails at some stage, or from the interrupt when a tx completes.
1301 static void ql_unmap_send(struct ql_adapter *qdev,
1302 struct tx_ring_desc *tx_ring_desc, int mapped)
1305 for (i = 0; i < mapped; i++) {
1306 if (i == 0 || (i == 7 && mapped > 7)) {
1308 * Unmap the skb->data area, or the
1309 * external sglist (AKA the Outbound
1310 * Address List (OAL)).
1311 * If its the zeroeth element, then it's
1312 * the skb->data area. If it's the 7th
1313 * element and there is more than 6 frags,
1317 netif_printk(qdev, tx_done, KERN_DEBUG,
1319 "unmapping OAL area.\n");
1321 pci_unmap_single(qdev->pdev,
1322 dma_unmap_addr(&tx_ring_desc->map[i],
1324 dma_unmap_len(&tx_ring_desc->map[i],
1328 netif_printk(qdev, tx_done, KERN_DEBUG, qdev->ndev,
1329 "unmapping frag %d.\n", i);
1330 pci_unmap_page(qdev->pdev,
1331 dma_unmap_addr(&tx_ring_desc->map[i],
1333 dma_unmap_len(&tx_ring_desc->map[i],
1334 maplen), PCI_DMA_TODEVICE);
1340 /* Map the buffers for this transmit. This will return
1341 * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
1343 static int ql_map_send(struct ql_adapter *qdev,
1344 struct ob_mac_iocb_req *mac_iocb_ptr,
1345 struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc)
1347 int len = skb_headlen(skb);
1349 int frag_idx, err, map_idx = 0;
1350 struct tx_buf_desc *tbd = mac_iocb_ptr->tbd;
1351 int frag_cnt = skb_shinfo(skb)->nr_frags;
1354 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
1355 "frag_cnt = %d.\n", frag_cnt);
1358 * Map the skb buffer first.
1360 map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
1362 err = pci_dma_mapping_error(qdev->pdev, map);
1364 netif_err(qdev, tx_queued, qdev->ndev,
1365 "PCI mapping failed with error: %d\n", err);
1367 return NETDEV_TX_BUSY;
1370 tbd->len = cpu_to_le32(len);
1371 tbd->addr = cpu_to_le64(map);
1372 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1373 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
1377 * This loop fills the remainder of the 8 address descriptors
1378 * in the IOCB. If there are more than 7 fragments, then the
1379 * eighth address desc will point to an external list (OAL).
1380 * When this happens, the remainder of the frags will be stored
1383 for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) {
1384 skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx];
1386 if (frag_idx == 6 && frag_cnt > 7) {
1387 /* Let's tack on an sglist.
1388 * Our control block will now
1390 * iocb->seg[0] = skb->data
1391 * iocb->seg[1] = frag[0]
1392 * iocb->seg[2] = frag[1]
1393 * iocb->seg[3] = frag[2]
1394 * iocb->seg[4] = frag[3]
1395 * iocb->seg[5] = frag[4]
1396 * iocb->seg[6] = frag[5]
1397 * iocb->seg[7] = ptr to OAL (external sglist)
1398 * oal->seg[0] = frag[6]
1399 * oal->seg[1] = frag[7]
1400 * oal->seg[2] = frag[8]
1401 * oal->seg[3] = frag[9]
1402 * oal->seg[4] = frag[10]
1405 /* Tack on the OAL in the eighth segment of IOCB. */
1406 map = pci_map_single(qdev->pdev, &tx_ring_desc->oal,
1409 err = pci_dma_mapping_error(qdev->pdev, map);
1411 netif_err(qdev, tx_queued, qdev->ndev,
1412 "PCI mapping outbound address list with error: %d\n",
1417 tbd->addr = cpu_to_le64(map);
1419 * The length is the number of fragments
1420 * that remain to be mapped times the length
1421 * of our sglist (OAL).
1424 cpu_to_le32((sizeof(struct tx_buf_desc) *
1425 (frag_cnt - frag_idx)) | TX_DESC_C);
1426 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
1428 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1429 sizeof(struct oal));
1430 tbd = (struct tx_buf_desc *)&tx_ring_desc->oal;
1434 map = skb_frag_dma_map(&qdev->pdev->dev, frag, 0, skb_frag_size(frag),
1437 err = dma_mapping_error(&qdev->pdev->dev, map);
1439 netif_err(qdev, tx_queued, qdev->ndev,
1440 "PCI mapping frags failed with error: %d.\n",
1445 tbd->addr = cpu_to_le64(map);
1446 tbd->len = cpu_to_le32(skb_frag_size(frag));
1447 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1448 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1449 skb_frag_size(frag));
1452 /* Save the number of segments we've mapped. */
1453 tx_ring_desc->map_cnt = map_idx;
1454 /* Terminate the last segment. */
1455 tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E);
1456 return NETDEV_TX_OK;
1460 * If the first frag mapping failed, then i will be zero.
1461 * This causes the unmap of the skb->data area. Otherwise
1462 * we pass in the number of frags that mapped successfully
1463 * so they can be umapped.
1465 ql_unmap_send(qdev, tx_ring_desc, map_idx);
1466 return NETDEV_TX_BUSY;
1469 /* Process an inbound completion from an rx ring. */
1470 static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
1471 struct rx_ring *rx_ring,
1472 struct ib_mac_iocb_rsp *ib_mac_rsp,
1476 struct sk_buff *skb;
1477 struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1478 struct napi_struct *napi = &rx_ring->napi;
1480 napi->dev = qdev->ndev;
1482 skb = napi_get_frags(napi);
1484 netif_err(qdev, drv, qdev->ndev,
1485 "Couldn't get an skb, exiting.\n");
1486 rx_ring->rx_dropped++;
1487 put_page(lbq_desc->p.pg_chunk.page);
1490 prefetch(lbq_desc->p.pg_chunk.va);
1491 __skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
1492 lbq_desc->p.pg_chunk.page,
1493 lbq_desc->p.pg_chunk.offset,
1497 skb->data_len += length;
1498 skb->truesize += length;
1499 skb_shinfo(skb)->nr_frags++;
1501 rx_ring->rx_packets++;
1502 rx_ring->rx_bytes += length;
1503 skb->ip_summed = CHECKSUM_UNNECESSARY;
1504 skb_record_rx_queue(skb, rx_ring->cq_id);
1505 if (vlan_id != 0xffff)
1506 __vlan_hwaccel_put_tag(skb, vlan_id);
1507 napi_gro_frags(napi);
1510 /* Process an inbound completion from an rx ring. */
1511 static void ql_process_mac_rx_page(struct ql_adapter *qdev,
1512 struct rx_ring *rx_ring,
1513 struct ib_mac_iocb_rsp *ib_mac_rsp,
1517 struct net_device *ndev = qdev->ndev;
1518 struct sk_buff *skb = NULL;
1520 struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1521 struct napi_struct *napi = &rx_ring->napi;
1523 skb = netdev_alloc_skb(ndev, length);
1525 netif_err(qdev, drv, qdev->ndev,
1526 "Couldn't get an skb, need to unwind!.\n");
1527 rx_ring->rx_dropped++;
1528 put_page(lbq_desc->p.pg_chunk.page);
1532 addr = lbq_desc->p.pg_chunk.va;
1536 /* Frame error, so drop the packet. */
1537 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1538 netif_info(qdev, drv, qdev->ndev,
1539 "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
1540 rx_ring->rx_errors++;
1544 /* The max framesize filter on this chip is set higher than
1545 * MTU since FCoE uses 2k frames.
1547 if (skb->len > ndev->mtu + ETH_HLEN) {
1548 netif_err(qdev, drv, qdev->ndev,
1549 "Segment too small, dropping.\n");
1550 rx_ring->rx_dropped++;
1553 memcpy(skb_put(skb, ETH_HLEN), addr, ETH_HLEN);
1554 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1555 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1557 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1558 lbq_desc->p.pg_chunk.offset+ETH_HLEN,
1560 skb->len += length-ETH_HLEN;
1561 skb->data_len += length-ETH_HLEN;
1562 skb->truesize += length-ETH_HLEN;
1564 rx_ring->rx_packets++;
1565 rx_ring->rx_bytes += skb->len;
1566 skb->protocol = eth_type_trans(skb, ndev);
1567 skb_checksum_none_assert(skb);
1569 if ((ndev->features & NETIF_F_RXCSUM) &&
1570 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1572 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1573 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1574 "TCP checksum done!\n");
1575 skb->ip_summed = CHECKSUM_UNNECESSARY;
1576 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1577 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1578 /* Unfragmented ipv4 UDP frame. */
1579 struct iphdr *iph = (struct iphdr *) skb->data;
1580 if (!(iph->frag_off &
1581 cpu_to_be16(IP_MF|IP_OFFSET))) {
1582 skb->ip_summed = CHECKSUM_UNNECESSARY;
1583 netif_printk(qdev, rx_status, KERN_DEBUG,
1585 "TCP checksum done!\n");
1590 skb_record_rx_queue(skb, rx_ring->cq_id);
1591 if (vlan_id != 0xffff)
1592 __vlan_hwaccel_put_tag(skb, vlan_id);
1593 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1594 napi_gro_receive(napi, skb);
1596 netif_receive_skb(skb);
1599 dev_kfree_skb_any(skb);
1600 put_page(lbq_desc->p.pg_chunk.page);
1603 /* Process an inbound completion from an rx ring. */
1604 static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
1605 struct rx_ring *rx_ring,
1606 struct ib_mac_iocb_rsp *ib_mac_rsp,
1610 struct net_device *ndev = qdev->ndev;
1611 struct sk_buff *skb = NULL;
1612 struct sk_buff *new_skb = NULL;
1613 struct bq_desc *sbq_desc = ql_get_curr_sbuf(rx_ring);
1615 skb = sbq_desc->p.skb;
1616 /* Allocate new_skb and copy */
1617 new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN);
1618 if (new_skb == NULL) {
1619 netif_err(qdev, probe, qdev->ndev,
1620 "No skb available, drop the packet.\n");
1621 rx_ring->rx_dropped++;
1624 skb_reserve(new_skb, NET_IP_ALIGN);
1626 pci_dma_sync_single_for_cpu(qdev->pdev,
1627 dma_unmap_addr(sbq_desc, mapaddr),
1628 dma_unmap_len(sbq_desc, maplen),
1629 PCI_DMA_FROMDEVICE);
1631 memcpy(skb_put(new_skb, length), skb->data, length);
1633 pci_dma_sync_single_for_device(qdev->pdev,
1634 dma_unmap_addr(sbq_desc, mapaddr),
1635 dma_unmap_len(sbq_desc, maplen),
1636 PCI_DMA_FROMDEVICE);
1639 /* Frame error, so drop the packet. */
1640 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1641 netif_info(qdev, drv, qdev->ndev,
1642 "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
1643 dev_kfree_skb_any(skb);
1644 rx_ring->rx_errors++;
1648 /* loopback self test for ethtool */
1649 if (test_bit(QL_SELFTEST, &qdev->flags)) {
1650 ql_check_lb_frame(qdev, skb);
1651 dev_kfree_skb_any(skb);
1655 /* The max framesize filter on this chip is set higher than
1656 * MTU since FCoE uses 2k frames.
1658 if (skb->len > ndev->mtu + ETH_HLEN) {
1659 dev_kfree_skb_any(skb);
1660 rx_ring->rx_dropped++;
1664 prefetch(skb->data);
1666 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1667 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1669 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1670 IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1671 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1672 IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1673 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1674 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1676 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P)
1677 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1678 "Promiscuous Packet.\n");
1680 rx_ring->rx_packets++;
1681 rx_ring->rx_bytes += skb->len;
1682 skb->protocol = eth_type_trans(skb, ndev);
1683 skb_checksum_none_assert(skb);
1685 /* If rx checksum is on, and there are no
1686 * csum or frame errors.
1688 if ((ndev->features & NETIF_F_RXCSUM) &&
1689 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1691 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1692 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1693 "TCP checksum done!\n");
1694 skb->ip_summed = CHECKSUM_UNNECESSARY;
1695 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1696 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1697 /* Unfragmented ipv4 UDP frame. */
1698 struct iphdr *iph = (struct iphdr *) skb->data;
1699 if (!(iph->frag_off &
1700 ntohs(IP_MF|IP_OFFSET))) {
1701 skb->ip_summed = CHECKSUM_UNNECESSARY;
1702 netif_printk(qdev, rx_status, KERN_DEBUG,
1704 "TCP checksum done!\n");
1709 skb_record_rx_queue(skb, rx_ring->cq_id);
1710 if (vlan_id != 0xffff)
1711 __vlan_hwaccel_put_tag(skb, vlan_id);
1712 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1713 napi_gro_receive(&rx_ring->napi, skb);
1715 netif_receive_skb(skb);
1718 static void ql_realign_skb(struct sk_buff *skb, int len)
1720 void *temp_addr = skb->data;
1722 /* Undo the skb_reserve(skb,32) we did before
1723 * giving to hardware, and realign data on
1724 * a 2-byte boundary.
1726 skb->data -= QLGE_SB_PAD - NET_IP_ALIGN;
1727 skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN;
1728 skb_copy_to_linear_data(skb, temp_addr,
1733 * This function builds an skb for the given inbound
1734 * completion. It will be rewritten for readability in the near
1735 * future, but for not it works well.
1737 static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1738 struct rx_ring *rx_ring,
1739 struct ib_mac_iocb_rsp *ib_mac_rsp)
1741 struct bq_desc *lbq_desc;
1742 struct bq_desc *sbq_desc;
1743 struct sk_buff *skb = NULL;
1744 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
1745 u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
1748 * Handle the header buffer if present.
1750 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV &&
1751 ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1752 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1753 "Header of %d bytes in small buffer.\n", hdr_len);
1755 * Headers fit nicely into a small buffer.
1757 sbq_desc = ql_get_curr_sbuf(rx_ring);
1758 pci_unmap_single(qdev->pdev,
1759 dma_unmap_addr(sbq_desc, mapaddr),
1760 dma_unmap_len(sbq_desc, maplen),
1761 PCI_DMA_FROMDEVICE);
1762 skb = sbq_desc->p.skb;
1763 ql_realign_skb(skb, hdr_len);
1764 skb_put(skb, hdr_len);
1765 sbq_desc->p.skb = NULL;
1769 * Handle the data buffer(s).
1771 if (unlikely(!length)) { /* Is there data too? */
1772 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1773 "No Data buffer in this packet.\n");
1777 if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
1778 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1779 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1780 "Headers in small, data of %d bytes in small, combine them.\n",
1783 * Data is less than small buffer size so it's
1784 * stuffed in a small buffer.
1785 * For this case we append the data
1786 * from the "data" small buffer to the "header" small
1789 sbq_desc = ql_get_curr_sbuf(rx_ring);
1790 pci_dma_sync_single_for_cpu(qdev->pdev,
1792 (sbq_desc, mapaddr),
1795 PCI_DMA_FROMDEVICE);
1796 memcpy(skb_put(skb, length),
1797 sbq_desc->p.skb->data, length);
1798 pci_dma_sync_single_for_device(qdev->pdev,
1805 PCI_DMA_FROMDEVICE);
1807 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1808 "%d bytes in a single small buffer.\n",
1810 sbq_desc = ql_get_curr_sbuf(rx_ring);
1811 skb = sbq_desc->p.skb;
1812 ql_realign_skb(skb, length);
1813 skb_put(skb, length);
1814 pci_unmap_single(qdev->pdev,
1815 dma_unmap_addr(sbq_desc,
1817 dma_unmap_len(sbq_desc,
1819 PCI_DMA_FROMDEVICE);
1820 sbq_desc->p.skb = NULL;
1822 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
1823 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1824 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1825 "Header in small, %d bytes in large. Chain large to small!\n",
1828 * The data is in a single large buffer. We
1829 * chain it to the header buffer's skb and let
1832 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1833 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1834 "Chaining page at offset = %d, for %d bytes to skb.\n",
1835 lbq_desc->p.pg_chunk.offset, length);
1836 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1837 lbq_desc->p.pg_chunk.offset,
1840 skb->data_len += length;
1841 skb->truesize += length;
1844 * The headers and data are in a single large buffer. We
1845 * copy it to a new skb and let it go. This can happen with
1846 * jumbo mtu on a non-TCP/UDP frame.
1848 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1849 skb = netdev_alloc_skb(qdev->ndev, length);
1851 netif_printk(qdev, probe, KERN_DEBUG, qdev->ndev,
1852 "No skb available, drop the packet.\n");
1855 pci_unmap_page(qdev->pdev,
1856 dma_unmap_addr(lbq_desc,
1858 dma_unmap_len(lbq_desc, maplen),
1859 PCI_DMA_FROMDEVICE);
1860 skb_reserve(skb, NET_IP_ALIGN);
1861 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1862 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1864 skb_fill_page_desc(skb, 0,
1865 lbq_desc->p.pg_chunk.page,
1866 lbq_desc->p.pg_chunk.offset,
1869 skb->data_len += length;
1870 skb->truesize += length;
1872 __pskb_pull_tail(skb,
1873 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1874 VLAN_ETH_HLEN : ETH_HLEN);
1878 * The data is in a chain of large buffers
1879 * pointed to by a small buffer. We loop
1880 * thru and chain them to the our small header
1882 * frags: There are 18 max frags and our small
1883 * buffer will hold 32 of them. The thing is,
1884 * we'll use 3 max for our 9000 byte jumbo
1885 * frames. If the MTU goes up we could
1886 * eventually be in trouble.
1889 sbq_desc = ql_get_curr_sbuf(rx_ring);
1890 pci_unmap_single(qdev->pdev,
1891 dma_unmap_addr(sbq_desc, mapaddr),
1892 dma_unmap_len(sbq_desc, maplen),
1893 PCI_DMA_FROMDEVICE);
1894 if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
1896 * This is an non TCP/UDP IP frame, so
1897 * the headers aren't split into a small
1898 * buffer. We have to use the small buffer
1899 * that contains our sg list as our skb to
1900 * send upstairs. Copy the sg list here to
1901 * a local buffer and use it to find the
1904 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1905 "%d bytes of headers & data in chain of large.\n",
1907 skb = sbq_desc->p.skb;
1908 sbq_desc->p.skb = NULL;
1909 skb_reserve(skb, NET_IP_ALIGN);
1911 while (length > 0) {
1912 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1913 size = (length < rx_ring->lbq_buf_size) ? length :
1914 rx_ring->lbq_buf_size;
1916 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1917 "Adding page %d to skb for %d bytes.\n",
1919 skb_fill_page_desc(skb, i,
1920 lbq_desc->p.pg_chunk.page,
1921 lbq_desc->p.pg_chunk.offset,
1924 skb->data_len += size;
1925 skb->truesize += size;
1929 __pskb_pull_tail(skb, (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1930 VLAN_ETH_HLEN : ETH_HLEN);
1935 /* Process an inbound completion from an rx ring. */
1936 static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
1937 struct rx_ring *rx_ring,
1938 struct ib_mac_iocb_rsp *ib_mac_rsp,
1941 struct net_device *ndev = qdev->ndev;
1942 struct sk_buff *skb = NULL;
1944 QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
1946 skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
1947 if (unlikely(!skb)) {
1948 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1949 "No skb available, drop packet.\n");
1950 rx_ring->rx_dropped++;
1954 /* Frame error, so drop the packet. */
1955 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1956 netif_info(qdev, drv, qdev->ndev,
1957 "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
1958 dev_kfree_skb_any(skb);
1959 rx_ring->rx_errors++;
1963 /* The max framesize filter on this chip is set higher than
1964 * MTU since FCoE uses 2k frames.
1966 if (skb->len > ndev->mtu + ETH_HLEN) {
1967 dev_kfree_skb_any(skb);
1968 rx_ring->rx_dropped++;
1972 /* loopback self test for ethtool */
1973 if (test_bit(QL_SELFTEST, &qdev->flags)) {
1974 ql_check_lb_frame(qdev, skb);
1975 dev_kfree_skb_any(skb);
1979 prefetch(skb->data);
1981 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1982 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "%s Multicast.\n",
1983 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1984 IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1985 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1986 IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1987 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1988 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1989 rx_ring->rx_multicast++;
1991 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
1992 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1993 "Promiscuous Packet.\n");
1996 skb->protocol = eth_type_trans(skb, ndev);
1997 skb_checksum_none_assert(skb);
1999 /* If rx checksum is on, and there are no
2000 * csum or frame errors.
2002 if ((ndev->features & NETIF_F_RXCSUM) &&
2003 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
2005 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
2006 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2007 "TCP checksum done!\n");
2008 skb->ip_summed = CHECKSUM_UNNECESSARY;
2009 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
2010 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
2011 /* Unfragmented ipv4 UDP frame. */
2012 struct iphdr *iph = (struct iphdr *) skb->data;
2013 if (!(iph->frag_off &
2014 ntohs(IP_MF|IP_OFFSET))) {
2015 skb->ip_summed = CHECKSUM_UNNECESSARY;
2016 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2017 "TCP checksum done!\n");
2022 rx_ring->rx_packets++;
2023 rx_ring->rx_bytes += skb->len;
2024 skb_record_rx_queue(skb, rx_ring->cq_id);
2025 if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) && (vlan_id != 0))
2026 __vlan_hwaccel_put_tag(skb, vlan_id);
2027 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
2028 napi_gro_receive(&rx_ring->napi, skb);
2030 netif_receive_skb(skb);
2033 /* Process an inbound completion from an rx ring. */
2034 static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
2035 struct rx_ring *rx_ring,
2036 struct ib_mac_iocb_rsp *ib_mac_rsp)
2038 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
2039 u16 vlan_id = (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
2040 ((le16_to_cpu(ib_mac_rsp->vlan_id) &
2041 IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff;
2043 QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
2045 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
2046 /* The data and headers are split into
2049 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2051 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
2052 /* The data fit in a single small buffer.
2053 * Allocate a new skb, copy the data and
2054 * return the buffer to the free pool.
2056 ql_process_mac_rx_skb(qdev, rx_ring, ib_mac_rsp,
2058 } else if ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) &&
2059 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK) &&
2060 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T)) {
2061 /* TCP packet in a page chunk that's been checksummed.
2062 * Tack it on to our GRO skb and let it go.
2064 ql_process_mac_rx_gro_page(qdev, rx_ring, ib_mac_rsp,
2066 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
2067 /* Non-TCP packet in a page chunk. Allocate an
2068 * skb, tack it on frags, and send it up.
2070 ql_process_mac_rx_page(qdev, rx_ring, ib_mac_rsp,
2073 /* Non-TCP/UDP large frames that span multiple buffers
2074 * can be processed corrrectly by the split frame logic.
2076 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2080 return (unsigned long)length;
2083 /* Process an outbound completion from an rx ring. */
2084 static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
2085 struct ob_mac_iocb_rsp *mac_rsp)
2087 struct tx_ring *tx_ring;
2088 struct tx_ring_desc *tx_ring_desc;
2090 QL_DUMP_OB_MAC_RSP(mac_rsp);
2091 tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
2092 tx_ring_desc = &tx_ring->q[mac_rsp->tid];
2093 ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
2094 tx_ring->tx_bytes += (tx_ring_desc->skb)->len;
2095 tx_ring->tx_packets++;
2096 dev_kfree_skb(tx_ring_desc->skb);
2097 tx_ring_desc->skb = NULL;
2099 if (unlikely(mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E |
2102 OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) {
2103 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
2104 netif_warn(qdev, tx_done, qdev->ndev,
2105 "Total descriptor length did not match transfer length.\n");
2107 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
2108 netif_warn(qdev, tx_done, qdev->ndev,
2109 "Frame too short to be valid, not sent.\n");
2111 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
2112 netif_warn(qdev, tx_done, qdev->ndev,
2113 "Frame too long, but sent anyway.\n");
2115 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) {
2116 netif_warn(qdev, tx_done, qdev->ndev,
2117 "PCI backplane error. Frame not sent.\n");
2120 atomic_inc(&tx_ring->tx_count);
2123 /* Fire up a handler to reset the MPI processor. */
2124 void ql_queue_fw_error(struct ql_adapter *qdev)
2127 queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
2130 void ql_queue_asic_error(struct ql_adapter *qdev)
2133 ql_disable_interrupts(qdev);
2134 /* Clear adapter up bit to signal the recovery
2135 * process that it shouldn't kill the reset worker
2138 clear_bit(QL_ADAPTER_UP, &qdev->flags);
2139 /* Set asic recovery bit to indicate reset process that we are
2140 * in fatal error recovery process rather than normal close
2142 set_bit(QL_ASIC_RECOVERY, &qdev->flags);
2143 queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
2146 static void ql_process_chip_ae_intr(struct ql_adapter *qdev,
2147 struct ib_ae_iocb_rsp *ib_ae_rsp)
2149 switch (ib_ae_rsp->event) {
2150 case MGMT_ERR_EVENT:
2151 netif_err(qdev, rx_err, qdev->ndev,
2152 "Management Processor Fatal Error.\n");
2153 ql_queue_fw_error(qdev);
2156 case CAM_LOOKUP_ERR_EVENT:
2157 netdev_err(qdev->ndev, "Multiple CAM hits lookup occurred.\n");
2158 netdev_err(qdev->ndev, "This event shouldn't occur.\n");
2159 ql_queue_asic_error(qdev);
2162 case SOFT_ECC_ERROR_EVENT:
2163 netdev_err(qdev->ndev, "Soft ECC error detected.\n");
2164 ql_queue_asic_error(qdev);
2167 case PCI_ERR_ANON_BUF_RD:
2168 netdev_err(qdev->ndev, "PCI error occurred when reading "
2169 "anonymous buffers from rx_ring %d.\n",
2171 ql_queue_asic_error(qdev);
2175 netif_err(qdev, drv, qdev->ndev, "Unexpected event %d.\n",
2177 ql_queue_asic_error(qdev);
2182 static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
2184 struct ql_adapter *qdev = rx_ring->qdev;
2185 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2186 struct ob_mac_iocb_rsp *net_rsp = NULL;
2189 struct tx_ring *tx_ring;
2190 /* While there are entries in the completion queue. */
2191 while (prod != rx_ring->cnsmr_idx) {
2193 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2194 "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2195 rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
2197 net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
2199 switch (net_rsp->opcode) {
2201 case OPCODE_OB_MAC_TSO_IOCB:
2202 case OPCODE_OB_MAC_IOCB:
2203 ql_process_mac_tx_intr(qdev, net_rsp);
2206 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2207 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2211 ql_update_cq(rx_ring);
2212 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2216 ql_write_cq_idx(rx_ring);
2217 tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
2218 if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id)) {
2219 if (atomic_read(&tx_ring->queue_stopped) &&
2220 (atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
2222 * The queue got stopped because the tx_ring was full.
2223 * Wake it up, because it's now at least 25% empty.
2225 netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
2231 static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
2233 struct ql_adapter *qdev = rx_ring->qdev;
2234 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2235 struct ql_net_rsp_iocb *net_rsp;
2238 /* While there are entries in the completion queue. */
2239 while (prod != rx_ring->cnsmr_idx) {
2241 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2242 "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2243 rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
2245 net_rsp = rx_ring->curr_entry;
2247 switch (net_rsp->opcode) {
2248 case OPCODE_IB_MAC_IOCB:
2249 ql_process_mac_rx_intr(qdev, rx_ring,
2250 (struct ib_mac_iocb_rsp *)
2254 case OPCODE_IB_AE_IOCB:
2255 ql_process_chip_ae_intr(qdev, (struct ib_ae_iocb_rsp *)
2259 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2260 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2265 ql_update_cq(rx_ring);
2266 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2267 if (count == budget)
2270 ql_update_buffer_queues(qdev, rx_ring);
2271 ql_write_cq_idx(rx_ring);
2275 static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
2277 struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi);
2278 struct ql_adapter *qdev = rx_ring->qdev;
2279 struct rx_ring *trx_ring;
2280 int i, work_done = 0;
2281 struct intr_context *ctx = &qdev->intr_context[rx_ring->cq_id];
2283 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2284 "Enter, NAPI POLL cq_id = %d.\n", rx_ring->cq_id);
2286 /* Service the TX rings first. They start
2287 * right after the RSS rings. */
2288 for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) {
2289 trx_ring = &qdev->rx_ring[i];
2290 /* If this TX completion ring belongs to this vector and
2291 * it's not empty then service it.
2293 if ((ctx->irq_mask & (1 << trx_ring->cq_id)) &&
2294 (ql_read_sh_reg(trx_ring->prod_idx_sh_reg) !=
2295 trx_ring->cnsmr_idx)) {
2296 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2297 "%s: Servicing TX completion ring %d.\n",
2298 __func__, trx_ring->cq_id);
2299 ql_clean_outbound_rx_ring(trx_ring);
2304 * Now service the RSS ring if it's active.
2306 if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
2307 rx_ring->cnsmr_idx) {
2308 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2309 "%s: Servicing RX completion ring %d.\n",
2310 __func__, rx_ring->cq_id);
2311 work_done = ql_clean_inbound_rx_ring(rx_ring, budget);
2314 if (work_done < budget) {
2315 napi_complete(napi);
2316 ql_enable_completion_interrupt(qdev, rx_ring->irq);
2321 static void qlge_vlan_mode(struct net_device *ndev, u32 features)
2323 struct ql_adapter *qdev = netdev_priv(ndev);
2325 if (features & NETIF_F_HW_VLAN_RX) {
2326 netif_printk(qdev, ifup, KERN_DEBUG, ndev,
2327 "Turning on VLAN in NIC_RCV_CFG.\n");
2328 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
2329 NIC_RCV_CFG_VLAN_MATCH_AND_NON);
2331 netif_printk(qdev, ifup, KERN_DEBUG, ndev,
2332 "Turning off VLAN in NIC_RCV_CFG.\n");
2333 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
2337 static u32 qlge_fix_features(struct net_device *ndev, u32 features)
2340 * Since there is no support for separate rx/tx vlan accel
2341 * enable/disable make sure tx flag is always in same state as rx.
2343 if (features & NETIF_F_HW_VLAN_RX)
2344 features |= NETIF_F_HW_VLAN_TX;
2346 features &= ~NETIF_F_HW_VLAN_TX;
2351 static int qlge_set_features(struct net_device *ndev, u32 features)
2353 u32 changed = ndev->features ^ features;
2355 if (changed & NETIF_F_HW_VLAN_RX)
2356 qlge_vlan_mode(ndev, features);
2361 static void __qlge_vlan_rx_add_vid(struct ql_adapter *qdev, u16 vid)
2363 u32 enable_bit = MAC_ADDR_E;
2365 if (ql_set_mac_addr_reg
2366 (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
2367 netif_err(qdev, ifup, qdev->ndev,
2368 "Failed to init vlan address.\n");
2372 static void qlge_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
2374 struct ql_adapter *qdev = netdev_priv(ndev);
2377 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2381 __qlge_vlan_rx_add_vid(qdev, vid);
2382 set_bit(vid, qdev->active_vlans);
2384 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2387 static void __qlge_vlan_rx_kill_vid(struct ql_adapter *qdev, u16 vid)
2391 if (ql_set_mac_addr_reg
2392 (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
2393 netif_err(qdev, ifup, qdev->ndev,
2394 "Failed to clear vlan address.\n");
2398 static void qlge_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
2400 struct ql_adapter *qdev = netdev_priv(ndev);
2403 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2407 __qlge_vlan_rx_kill_vid(qdev, vid);
2408 clear_bit(vid, qdev->active_vlans);
2410 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2413 static void qlge_restore_vlan(struct ql_adapter *qdev)
2418 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2422 for_each_set_bit(vid, qdev->active_vlans, VLAN_N_VID)
2423 __qlge_vlan_rx_add_vid(qdev, vid);
2425 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2428 /* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
2429 static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
2431 struct rx_ring *rx_ring = dev_id;
2432 napi_schedule(&rx_ring->napi);
2436 /* This handles a fatal error, MPI activity, and the default
2437 * rx_ring in an MSI-X multiple vector environment.
2438 * In MSI/Legacy environment it also process the rest of
2441 static irqreturn_t qlge_isr(int irq, void *dev_id)
2443 struct rx_ring *rx_ring = dev_id;
2444 struct ql_adapter *qdev = rx_ring->qdev;
2445 struct intr_context *intr_context = &qdev->intr_context[0];
2449 spin_lock(&qdev->hw_lock);
2450 if (atomic_read(&qdev->intr_context[0].irq_cnt)) {
2451 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2452 "Shared Interrupt, Not ours!\n");
2453 spin_unlock(&qdev->hw_lock);
2456 spin_unlock(&qdev->hw_lock);
2458 var = ql_disable_completion_interrupt(qdev, intr_context->intr);
2461 * Check for fatal error.
2464 ql_queue_asic_error(qdev);
2465 netdev_err(qdev->ndev, "Got fatal error, STS = %x.\n", var);
2466 var = ql_read32(qdev, ERR_STS);
2467 netdev_err(qdev->ndev, "Resetting chip. "
2468 "Error Status Register = 0x%x\n", var);
2473 * Check MPI processor activity.
2475 if ((var & STS_PI) &&
2476 (ql_read32(qdev, INTR_MASK) & INTR_MASK_PI)) {
2478 * We've got an async event or mailbox completion.
2479 * Handle it and clear the source of the interrupt.
2481 netif_err(qdev, intr, qdev->ndev,
2482 "Got MPI processor interrupt.\n");
2483 ql_disable_completion_interrupt(qdev, intr_context->intr);
2484 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
2485 queue_delayed_work_on(smp_processor_id(),
2486 qdev->workqueue, &qdev->mpi_work, 0);
2491 * Get the bit-mask that shows the active queues for this
2492 * pass. Compare it to the queues that this irq services
2493 * and call napi if there's a match.
2495 var = ql_read32(qdev, ISR1);
2496 if (var & intr_context->irq_mask) {
2497 netif_info(qdev, intr, qdev->ndev,
2498 "Waking handler for rx_ring[0].\n");
2499 ql_disable_completion_interrupt(qdev, intr_context->intr);
2500 napi_schedule(&rx_ring->napi);
2503 ql_enable_completion_interrupt(qdev, intr_context->intr);
2504 return work_done ? IRQ_HANDLED : IRQ_NONE;
2507 static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2510 if (skb_is_gso(skb)) {
2512 if (skb_header_cloned(skb)) {
2513 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2518 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2519 mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
2520 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2521 mac_iocb_ptr->total_hdrs_len =
2522 cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb));
2523 mac_iocb_ptr->net_trans_offset =
2524 cpu_to_le16(skb_network_offset(skb) |
2525 skb_transport_offset(skb)
2526 << OB_MAC_TRANSPORT_HDR_SHIFT);
2527 mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2528 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO;
2529 if (likely(skb->protocol == htons(ETH_P_IP))) {
2530 struct iphdr *iph = ip_hdr(skb);
2532 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2533 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2537 } else if (skb->protocol == htons(ETH_P_IPV6)) {
2538 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6;
2539 tcp_hdr(skb)->check =
2540 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2541 &ipv6_hdr(skb)->daddr,
2549 static void ql_hw_csum_setup(struct sk_buff *skb,
2550 struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2553 struct iphdr *iph = ip_hdr(skb);
2555 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2556 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2557 mac_iocb_ptr->net_trans_offset =
2558 cpu_to_le16(skb_network_offset(skb) |
2559 skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT);
2561 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2562 len = (ntohs(iph->tot_len) - (iph->ihl << 2));
2563 if (likely(iph->protocol == IPPROTO_TCP)) {
2564 check = &(tcp_hdr(skb)->check);
2565 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC;
2566 mac_iocb_ptr->total_hdrs_len =
2567 cpu_to_le16(skb_transport_offset(skb) +
2568 (tcp_hdr(skb)->doff << 2));
2570 check = &(udp_hdr(skb)->check);
2571 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC;
2572 mac_iocb_ptr->total_hdrs_len =
2573 cpu_to_le16(skb_transport_offset(skb) +
2574 sizeof(struct udphdr));
2576 *check = ~csum_tcpudp_magic(iph->saddr,
2577 iph->daddr, len, iph->protocol, 0);
2580 static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
2582 struct tx_ring_desc *tx_ring_desc;
2583 struct ob_mac_iocb_req *mac_iocb_ptr;
2584 struct ql_adapter *qdev = netdev_priv(ndev);
2586 struct tx_ring *tx_ring;
2587 u32 tx_ring_idx = (u32) skb->queue_mapping;
2589 tx_ring = &qdev->tx_ring[tx_ring_idx];
2591 if (skb_padto(skb, ETH_ZLEN))
2592 return NETDEV_TX_OK;
2594 if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
2595 netif_info(qdev, tx_queued, qdev->ndev,
2596 "%s: shutting down tx queue %d du to lack of resources.\n",
2597 __func__, tx_ring_idx);
2598 netif_stop_subqueue(ndev, tx_ring->wq_id);
2599 atomic_inc(&tx_ring->queue_stopped);
2600 tx_ring->tx_errors++;
2601 return NETDEV_TX_BUSY;
2603 tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
2604 mac_iocb_ptr = tx_ring_desc->queue_entry;
2605 memset((void *)mac_iocb_ptr, 0, sizeof(*mac_iocb_ptr));
2607 mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
2608 mac_iocb_ptr->tid = tx_ring_desc->index;
2609 /* We use the upper 32-bits to store the tx queue for this IO.
2610 * When we get the completion we can use it to establish the context.
2612 mac_iocb_ptr->txq_idx = tx_ring_idx;
2613 tx_ring_desc->skb = skb;
2615 mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len);
2617 if (vlan_tx_tag_present(skb)) {
2618 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2619 "Adding a vlan tag %d.\n", vlan_tx_tag_get(skb));
2620 mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
2621 mac_iocb_ptr->vlan_tci = cpu_to_le16(vlan_tx_tag_get(skb));
2623 tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2625 dev_kfree_skb_any(skb);
2626 return NETDEV_TX_OK;
2627 } else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) {
2628 ql_hw_csum_setup(skb,
2629 (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2631 if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) !=
2633 netif_err(qdev, tx_queued, qdev->ndev,
2634 "Could not map the segments.\n");
2635 tx_ring->tx_errors++;
2636 return NETDEV_TX_BUSY;
2638 QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr);
2639 tx_ring->prod_idx++;
2640 if (tx_ring->prod_idx == tx_ring->wq_len)
2641 tx_ring->prod_idx = 0;
2644 ql_write_db_reg(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
2645 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2646 "tx queued, slot %d, len %d\n",
2647 tx_ring->prod_idx, skb->len);
2649 atomic_dec(&tx_ring->tx_count);
2650 return NETDEV_TX_OK;
2654 static void ql_free_shadow_space(struct ql_adapter *qdev)
2656 if (qdev->rx_ring_shadow_reg_area) {
2657 pci_free_consistent(qdev->pdev,
2659 qdev->rx_ring_shadow_reg_area,
2660 qdev->rx_ring_shadow_reg_dma);
2661 qdev->rx_ring_shadow_reg_area = NULL;
2663 if (qdev->tx_ring_shadow_reg_area) {
2664 pci_free_consistent(qdev->pdev,
2666 qdev->tx_ring_shadow_reg_area,
2667 qdev->tx_ring_shadow_reg_dma);
2668 qdev->tx_ring_shadow_reg_area = NULL;
2672 static int ql_alloc_shadow_space(struct ql_adapter *qdev)
2674 qdev->rx_ring_shadow_reg_area =
2675 pci_alloc_consistent(qdev->pdev,
2676 PAGE_SIZE, &qdev->rx_ring_shadow_reg_dma);
2677 if (qdev->rx_ring_shadow_reg_area == NULL) {
2678 netif_err(qdev, ifup, qdev->ndev,
2679 "Allocation of RX shadow space failed.\n");
2682 memset(qdev->rx_ring_shadow_reg_area, 0, PAGE_SIZE);
2683 qdev->tx_ring_shadow_reg_area =
2684 pci_alloc_consistent(qdev->pdev, PAGE_SIZE,
2685 &qdev->tx_ring_shadow_reg_dma);
2686 if (qdev->tx_ring_shadow_reg_area == NULL) {
2687 netif_err(qdev, ifup, qdev->ndev,
2688 "Allocation of TX shadow space failed.\n");
2689 goto err_wqp_sh_area;
2691 memset(qdev->tx_ring_shadow_reg_area, 0, PAGE_SIZE);
2695 pci_free_consistent(qdev->pdev,
2697 qdev->rx_ring_shadow_reg_area,
2698 qdev->rx_ring_shadow_reg_dma);
2702 static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
2704 struct tx_ring_desc *tx_ring_desc;
2706 struct ob_mac_iocb_req *mac_iocb_ptr;
2708 mac_iocb_ptr = tx_ring->wq_base;
2709 tx_ring_desc = tx_ring->q;
2710 for (i = 0; i < tx_ring->wq_len; i++) {
2711 tx_ring_desc->index = i;
2712 tx_ring_desc->skb = NULL;
2713 tx_ring_desc->queue_entry = mac_iocb_ptr;
2717 atomic_set(&tx_ring->tx_count, tx_ring->wq_len);
2718 atomic_set(&tx_ring->queue_stopped, 0);
2721 static void ql_free_tx_resources(struct ql_adapter *qdev,
2722 struct tx_ring *tx_ring)
2724 if (tx_ring->wq_base) {
2725 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2726 tx_ring->wq_base, tx_ring->wq_base_dma);
2727 tx_ring->wq_base = NULL;
2733 static int ql_alloc_tx_resources(struct ql_adapter *qdev,
2734 struct tx_ring *tx_ring)
2737 pci_alloc_consistent(qdev->pdev, tx_ring->wq_size,
2738 &tx_ring->wq_base_dma);
2740 if ((tx_ring->wq_base == NULL) ||
2741 tx_ring->wq_base_dma & WQ_ADDR_ALIGN) {
2742 netif_err(qdev, ifup, qdev->ndev, "tx_ring alloc failed.\n");
2746 kmalloc(tx_ring->wq_len * sizeof(struct tx_ring_desc), GFP_KERNEL);
2747 if (tx_ring->q == NULL)
2752 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2753 tx_ring->wq_base, tx_ring->wq_base_dma);