2 * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
20 #include <linux/module.h>
21 #include <linux/kernel.h>
22 #include <linux/string.h>
23 #include <linux/errno.h>
24 #include <linux/types.h>
25 #include <linux/init.h>
26 #include <linux/workqueue.h>
27 #include <linux/pci.h>
28 #include <linux/netdevice.h>
29 #include <linux/etherdevice.h>
30 #include <linux/if_ether.h>
31 #include <linux/if_vlan.h>
32 #include <linux/ethtool.h>
35 #include <linux/ipv6.h>
36 #include <linux/tcp.h>
37 #include <linux/rtnetlink.h>
38 #include <net/ip6_checksum.h>
40 #include "cq_enet_desc.h"
42 #include "vnic_intr.h"
43 #include "vnic_stats.h"
49 #define ENIC_NOTIFY_TIMER_PERIOD (2 * HZ)
50 #define WQ_ENET_MAX_DESC_LEN (1 << WQ_ENET_LEN_BITS)
51 #define MAX_TSO (1 << 16)
52 #define ENIC_DESC_MAX_SPLITS (MAX_TSO / WQ_ENET_MAX_DESC_LEN + 1)
54 #define PCI_DEVICE_ID_CISCO_VIC_ENET 0x0043 /* ethernet vnic */
55 #define PCI_DEVICE_ID_CISCO_VIC_ENET_DYN 0x0044 /* enet dynamic vnic */
57 /* Supported devices */
58 static DEFINE_PCI_DEVICE_TABLE(enic_id_table) = {
59 { PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET) },
60 { PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET_DYN) },
61 { 0, } /* end of table */
64 MODULE_DESCRIPTION(DRV_DESCRIPTION);
65 MODULE_AUTHOR("Scott Feldman <scofeldm@cisco.com>");
66 MODULE_LICENSE("GPL");
67 MODULE_VERSION(DRV_VERSION);
68 MODULE_DEVICE_TABLE(pci, enic_id_table);
71 char name[ETH_GSTRING_LEN];
75 #define ENIC_TX_STAT(stat) \
76 { .name = #stat, .offset = offsetof(struct vnic_tx_stats, stat) / 8 }
77 #define ENIC_RX_STAT(stat) \
78 { .name = #stat, .offset = offsetof(struct vnic_rx_stats, stat) / 8 }
80 static const struct enic_stat enic_tx_stats[] = {
81 ENIC_TX_STAT(tx_frames_ok),
82 ENIC_TX_STAT(tx_unicast_frames_ok),
83 ENIC_TX_STAT(tx_multicast_frames_ok),
84 ENIC_TX_STAT(tx_broadcast_frames_ok),
85 ENIC_TX_STAT(tx_bytes_ok),
86 ENIC_TX_STAT(tx_unicast_bytes_ok),
87 ENIC_TX_STAT(tx_multicast_bytes_ok),
88 ENIC_TX_STAT(tx_broadcast_bytes_ok),
89 ENIC_TX_STAT(tx_drops),
90 ENIC_TX_STAT(tx_errors),
94 static const struct enic_stat enic_rx_stats[] = {
95 ENIC_RX_STAT(rx_frames_ok),
96 ENIC_RX_STAT(rx_frames_total),
97 ENIC_RX_STAT(rx_unicast_frames_ok),
98 ENIC_RX_STAT(rx_multicast_frames_ok),
99 ENIC_RX_STAT(rx_broadcast_frames_ok),
100 ENIC_RX_STAT(rx_bytes_ok),
101 ENIC_RX_STAT(rx_unicast_bytes_ok),
102 ENIC_RX_STAT(rx_multicast_bytes_ok),
103 ENIC_RX_STAT(rx_broadcast_bytes_ok),
104 ENIC_RX_STAT(rx_drop),
105 ENIC_RX_STAT(rx_no_bufs),
106 ENIC_RX_STAT(rx_errors),
107 ENIC_RX_STAT(rx_rss),
108 ENIC_RX_STAT(rx_crc_errors),
109 ENIC_RX_STAT(rx_frames_64),
110 ENIC_RX_STAT(rx_frames_127),
111 ENIC_RX_STAT(rx_frames_255),
112 ENIC_RX_STAT(rx_frames_511),
113 ENIC_RX_STAT(rx_frames_1023),
114 ENIC_RX_STAT(rx_frames_1518),
115 ENIC_RX_STAT(rx_frames_to_max),
118 static const unsigned int enic_n_tx_stats = ARRAY_SIZE(enic_tx_stats);
119 static const unsigned int enic_n_rx_stats = ARRAY_SIZE(enic_rx_stats);
121 static int enic_is_dynamic(struct enic *enic)
123 return enic->pdev->device == PCI_DEVICE_ID_CISCO_VIC_ENET_DYN;
126 static inline unsigned int enic_cq_rq(struct enic *enic, unsigned int rq)
131 static inline unsigned int enic_cq_wq(struct enic *enic, unsigned int wq)
133 return enic->rq_count + wq;
136 static inline unsigned int enic_legacy_io_intr(void)
141 static inline unsigned int enic_legacy_err_intr(void)
146 static inline unsigned int enic_legacy_notify_intr(void)
151 static inline unsigned int enic_msix_rq_intr(struct enic *enic, unsigned int rq)
156 static inline unsigned int enic_msix_wq_intr(struct enic *enic, unsigned int wq)
158 return enic->rq_count + wq;
161 static inline unsigned int enic_msix_err_intr(struct enic *enic)
163 return enic->rq_count + enic->wq_count;
166 static inline unsigned int enic_msix_notify_intr(struct enic *enic)
168 return enic->rq_count + enic->wq_count + 1;
171 static int enic_get_settings(struct net_device *netdev,
172 struct ethtool_cmd *ecmd)
174 struct enic *enic = netdev_priv(netdev);
176 ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
177 ecmd->advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE);
178 ecmd->port = PORT_FIBRE;
179 ecmd->transceiver = XCVR_EXTERNAL;
181 if (netif_carrier_ok(netdev)) {
182 ecmd->speed = vnic_dev_port_speed(enic->vdev);
183 ecmd->duplex = DUPLEX_FULL;
189 ecmd->autoneg = AUTONEG_DISABLE;
194 static void enic_get_drvinfo(struct net_device *netdev,
195 struct ethtool_drvinfo *drvinfo)
197 struct enic *enic = netdev_priv(netdev);
198 struct vnic_devcmd_fw_info *fw_info;
200 enic_dev_fw_info(enic, &fw_info);
202 strncpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
203 strncpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
204 strncpy(drvinfo->fw_version, fw_info->fw_version,
205 sizeof(drvinfo->fw_version));
206 strncpy(drvinfo->bus_info, pci_name(enic->pdev),
207 sizeof(drvinfo->bus_info));
210 static void enic_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
216 for (i = 0; i < enic_n_tx_stats; i++) {
217 memcpy(data, enic_tx_stats[i].name, ETH_GSTRING_LEN);
218 data += ETH_GSTRING_LEN;
220 for (i = 0; i < enic_n_rx_stats; i++) {
221 memcpy(data, enic_rx_stats[i].name, ETH_GSTRING_LEN);
222 data += ETH_GSTRING_LEN;
228 static int enic_get_sset_count(struct net_device *netdev, int sset)
232 return enic_n_tx_stats + enic_n_rx_stats;
238 static void enic_get_ethtool_stats(struct net_device *netdev,
239 struct ethtool_stats *stats, u64 *data)
241 struct enic *enic = netdev_priv(netdev);
242 struct vnic_stats *vstats;
245 enic_dev_stats_dump(enic, &vstats);
247 for (i = 0; i < enic_n_tx_stats; i++)
248 *(data++) = ((u64 *)&vstats->tx)[enic_tx_stats[i].offset];
249 for (i = 0; i < enic_n_rx_stats; i++)
250 *(data++) = ((u64 *)&vstats->rx)[enic_rx_stats[i].offset];
253 static u32 enic_get_rx_csum(struct net_device *netdev)
255 struct enic *enic = netdev_priv(netdev);
256 return enic->csum_rx_enabled;
259 static int enic_set_rx_csum(struct net_device *netdev, u32 data)
261 struct enic *enic = netdev_priv(netdev);
263 if (data && !ENIC_SETTING(enic, RXCSUM))
266 enic->csum_rx_enabled = !!data;
271 static int enic_set_tx_csum(struct net_device *netdev, u32 data)
273 struct enic *enic = netdev_priv(netdev);
275 if (data && !ENIC_SETTING(enic, TXCSUM))
279 netdev->features |= NETIF_F_HW_CSUM;
281 netdev->features &= ~NETIF_F_HW_CSUM;
286 static int enic_set_tso(struct net_device *netdev, u32 data)
288 struct enic *enic = netdev_priv(netdev);
290 if (data && !ENIC_SETTING(enic, TSO))
295 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN;
298 ~(NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN);
303 static u32 enic_get_msglevel(struct net_device *netdev)
305 struct enic *enic = netdev_priv(netdev);
306 return enic->msg_enable;
309 static void enic_set_msglevel(struct net_device *netdev, u32 value)
311 struct enic *enic = netdev_priv(netdev);
312 enic->msg_enable = value;
315 static int enic_get_coalesce(struct net_device *netdev,
316 struct ethtool_coalesce *ecmd)
318 struct enic *enic = netdev_priv(netdev);
320 ecmd->tx_coalesce_usecs = enic->tx_coalesce_usecs;
321 ecmd->rx_coalesce_usecs = enic->rx_coalesce_usecs;
326 static int enic_set_coalesce(struct net_device *netdev,
327 struct ethtool_coalesce *ecmd)
329 struct enic *enic = netdev_priv(netdev);
330 u32 tx_coalesce_usecs;
331 u32 rx_coalesce_usecs;
332 unsigned int i, intr;
334 tx_coalesce_usecs = min_t(u32,
335 INTR_COALESCE_HW_TO_USEC(VNIC_INTR_TIMER_MAX),
336 ecmd->tx_coalesce_usecs);
337 rx_coalesce_usecs = min_t(u32,
338 INTR_COALESCE_HW_TO_USEC(VNIC_INTR_TIMER_MAX),
339 ecmd->rx_coalesce_usecs);
341 switch (vnic_dev_get_intr_mode(enic->vdev)) {
342 case VNIC_DEV_INTR_MODE_INTX:
343 if (tx_coalesce_usecs != rx_coalesce_usecs)
346 intr = enic_legacy_io_intr();
347 vnic_intr_coalescing_timer_set(&enic->intr[intr],
348 INTR_COALESCE_USEC_TO_HW(tx_coalesce_usecs));
350 case VNIC_DEV_INTR_MODE_MSI:
351 if (tx_coalesce_usecs != rx_coalesce_usecs)
354 vnic_intr_coalescing_timer_set(&enic->intr[0],
355 INTR_COALESCE_USEC_TO_HW(tx_coalesce_usecs));
357 case VNIC_DEV_INTR_MODE_MSIX:
358 for (i = 0; i < enic->wq_count; i++) {
359 intr = enic_msix_wq_intr(enic, i);
360 vnic_intr_coalescing_timer_set(&enic->intr[intr],
361 INTR_COALESCE_USEC_TO_HW(tx_coalesce_usecs));
364 for (i = 0; i < enic->rq_count; i++) {
365 intr = enic_msix_rq_intr(enic, i);
366 vnic_intr_coalescing_timer_set(&enic->intr[intr],
367 INTR_COALESCE_USEC_TO_HW(rx_coalesce_usecs));
375 enic->tx_coalesce_usecs = tx_coalesce_usecs;
376 enic->rx_coalesce_usecs = rx_coalesce_usecs;
381 static const struct ethtool_ops enic_ethtool_ops = {
382 .get_settings = enic_get_settings,
383 .get_drvinfo = enic_get_drvinfo,
384 .get_msglevel = enic_get_msglevel,
385 .set_msglevel = enic_set_msglevel,
386 .get_link = ethtool_op_get_link,
387 .get_strings = enic_get_strings,
388 .get_sset_count = enic_get_sset_count,
389 .get_ethtool_stats = enic_get_ethtool_stats,
390 .get_rx_csum = enic_get_rx_csum,
391 .set_rx_csum = enic_set_rx_csum,
392 .get_tx_csum = ethtool_op_get_tx_csum,
393 .set_tx_csum = enic_set_tx_csum,
394 .get_sg = ethtool_op_get_sg,
395 .set_sg = ethtool_op_set_sg,
396 .get_tso = ethtool_op_get_tso,
397 .set_tso = enic_set_tso,
398 .get_coalesce = enic_get_coalesce,
399 .set_coalesce = enic_set_coalesce,
400 .get_flags = ethtool_op_get_flags,
403 static void enic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf)
405 struct enic *enic = vnic_dev_priv(wq->vdev);
408 pci_unmap_single(enic->pdev, buf->dma_addr,
409 buf->len, PCI_DMA_TODEVICE);
411 pci_unmap_page(enic->pdev, buf->dma_addr,
412 buf->len, PCI_DMA_TODEVICE);
415 dev_kfree_skb_any(buf->os_buf);
418 static void enic_wq_free_buf(struct vnic_wq *wq,
419 struct cq_desc *cq_desc, struct vnic_wq_buf *buf, void *opaque)
421 enic_free_wq_buf(wq, buf);
424 static int enic_wq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc,
425 u8 type, u16 q_number, u16 completed_index, void *opaque)
427 struct enic *enic = vnic_dev_priv(vdev);
429 spin_lock(&enic->wq_lock[q_number]);
431 vnic_wq_service(&enic->wq[q_number], cq_desc,
432 completed_index, enic_wq_free_buf,
435 if (netif_queue_stopped(enic->netdev) &&
436 vnic_wq_desc_avail(&enic->wq[q_number]) >=
437 (MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS))
438 netif_wake_queue(enic->netdev);
440 spin_unlock(&enic->wq_lock[q_number]);
445 static void enic_log_q_error(struct enic *enic)
450 for (i = 0; i < enic->wq_count; i++) {
451 error_status = vnic_wq_error_status(&enic->wq[i]);
453 netdev_err(enic->netdev, "WQ[%d] error_status %d\n",
457 for (i = 0; i < enic->rq_count; i++) {
458 error_status = vnic_rq_error_status(&enic->rq[i]);
460 netdev_err(enic->netdev, "RQ[%d] error_status %d\n",
465 static void enic_msglvl_check(struct enic *enic)
467 u32 msg_enable = vnic_dev_msg_lvl(enic->vdev);
469 if (msg_enable != enic->msg_enable) {
470 netdev_info(enic->netdev, "msg lvl changed from 0x%x to 0x%x\n",
471 enic->msg_enable, msg_enable);
472 enic->msg_enable = msg_enable;
476 static void enic_mtu_check(struct enic *enic)
478 u32 mtu = vnic_dev_mtu(enic->vdev);
479 struct net_device *netdev = enic->netdev;
481 if (mtu && mtu != enic->port_mtu) {
482 enic->port_mtu = mtu;
483 if (mtu < netdev->mtu)
485 "interface MTU (%d) set higher "
486 "than switch port MTU (%d)\n",
491 static void enic_link_check(struct enic *enic)
493 int link_status = vnic_dev_link_status(enic->vdev);
494 int carrier_ok = netif_carrier_ok(enic->netdev);
496 if (link_status && !carrier_ok) {
497 netdev_info(enic->netdev, "Link UP\n");
498 netif_carrier_on(enic->netdev);
499 } else if (!link_status && carrier_ok) {
500 netdev_info(enic->netdev, "Link DOWN\n");
501 netif_carrier_off(enic->netdev);
505 static void enic_notify_check(struct enic *enic)
507 enic_msglvl_check(enic);
508 enic_mtu_check(enic);
509 enic_link_check(enic);
512 #define ENIC_TEST_INTR(pba, i) (pba & (1 << i))
514 static irqreturn_t enic_isr_legacy(int irq, void *data)
516 struct net_device *netdev = data;
517 struct enic *enic = netdev_priv(netdev);
518 unsigned int io_intr = enic_legacy_io_intr();
519 unsigned int err_intr = enic_legacy_err_intr();
520 unsigned int notify_intr = enic_legacy_notify_intr();
523 vnic_intr_mask(&enic->intr[io_intr]);
525 pba = vnic_intr_legacy_pba(enic->legacy_pba);
527 vnic_intr_unmask(&enic->intr[io_intr]);
528 return IRQ_NONE; /* not our interrupt */
531 if (ENIC_TEST_INTR(pba, notify_intr)) {
532 vnic_intr_return_all_credits(&enic->intr[notify_intr]);
533 enic_notify_check(enic);
536 if (ENIC_TEST_INTR(pba, err_intr)) {
537 vnic_intr_return_all_credits(&enic->intr[err_intr]);
538 enic_log_q_error(enic);
539 /* schedule recovery from WQ/RQ error */
540 schedule_work(&enic->reset);
544 if (ENIC_TEST_INTR(pba, io_intr)) {
545 if (napi_schedule_prep(&enic->napi[0]))
546 __napi_schedule(&enic->napi[0]);
548 vnic_intr_unmask(&enic->intr[io_intr]);
554 static irqreturn_t enic_isr_msi(int irq, void *data)
556 struct enic *enic = data;
558 /* With MSI, there is no sharing of interrupts, so this is
559 * our interrupt and there is no need to ack it. The device
560 * is not providing per-vector masking, so the OS will not
561 * write to PCI config space to mask/unmask the interrupt.
562 * We're using mask_on_assertion for MSI, so the device
563 * automatically masks the interrupt when the interrupt is
564 * generated. Later, when exiting polling, the interrupt
565 * will be unmasked (see enic_poll).
567 * Also, the device uses the same PCIe Traffic Class (TC)
568 * for Memory Write data and MSI, so there are no ordering
569 * issues; the MSI will always arrive at the Root Complex
570 * _after_ corresponding Memory Writes (i.e. descriptor
574 napi_schedule(&enic->napi[0]);
579 static irqreturn_t enic_isr_msix_rq(int irq, void *data)
581 struct napi_struct *napi = data;
583 /* schedule NAPI polling for RQ cleanup */
589 static irqreturn_t enic_isr_msix_wq(int irq, void *data)
591 struct enic *enic = data;
592 unsigned int cq = enic_cq_wq(enic, 0);
593 unsigned int intr = enic_msix_wq_intr(enic, 0);
594 unsigned int wq_work_to_do = -1; /* no limit */
595 unsigned int wq_work_done;
597 wq_work_done = vnic_cq_service(&enic->cq[cq],
598 wq_work_to_do, enic_wq_service, NULL);
600 vnic_intr_return_credits(&enic->intr[intr],
603 1 /* reset intr timer */);
608 static irqreturn_t enic_isr_msix_err(int irq, void *data)
610 struct enic *enic = data;
611 unsigned int intr = enic_msix_err_intr(enic);
613 vnic_intr_return_all_credits(&enic->intr[intr]);
615 enic_log_q_error(enic);
617 /* schedule recovery from WQ/RQ error */
618 schedule_work(&enic->reset);
623 static irqreturn_t enic_isr_msix_notify(int irq, void *data)
625 struct enic *enic = data;
626 unsigned int intr = enic_msix_notify_intr(enic);
628 vnic_intr_return_all_credits(&enic->intr[intr]);
629 enic_notify_check(enic);
634 static inline void enic_queue_wq_skb_cont(struct enic *enic,
635 struct vnic_wq *wq, struct sk_buff *skb,
636 unsigned int len_left, int loopback)
640 /* Queue additional data fragments */
641 for (frag = skb_shinfo(skb)->frags; len_left; frag++) {
642 len_left -= frag->size;
643 enic_queue_wq_desc_cont(wq, skb,
644 pci_map_page(enic->pdev, frag->page,
645 frag->page_offset, frag->size,
648 (len_left == 0), /* EOP? */
653 static inline void enic_queue_wq_skb_vlan(struct enic *enic,
654 struct vnic_wq *wq, struct sk_buff *skb,
655 int vlan_tag_insert, unsigned int vlan_tag, int loopback)
657 unsigned int head_len = skb_headlen(skb);
658 unsigned int len_left = skb->len - head_len;
659 int eop = (len_left == 0);
661 /* Queue the main skb fragment. The fragments are no larger
662 * than max MTU(9000)+ETH_HDR_LEN(14) bytes, which is less
663 * than WQ_ENET_MAX_DESC_LEN length. So only one descriptor
664 * per fragment is queued.
666 enic_queue_wq_desc(wq, skb,
667 pci_map_single(enic->pdev, skb->data,
668 head_len, PCI_DMA_TODEVICE),
670 vlan_tag_insert, vlan_tag,
674 enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback);
677 static inline void enic_queue_wq_skb_csum_l4(struct enic *enic,
678 struct vnic_wq *wq, struct sk_buff *skb,
679 int vlan_tag_insert, unsigned int vlan_tag, int loopback)
681 unsigned int head_len = skb_headlen(skb);
682 unsigned int len_left = skb->len - head_len;
683 unsigned int hdr_len = skb_checksum_start_offset(skb);
684 unsigned int csum_offset = hdr_len + skb->csum_offset;
685 int eop = (len_left == 0);
687 /* Queue the main skb fragment. The fragments are no larger
688 * than max MTU(9000)+ETH_HDR_LEN(14) bytes, which is less
689 * than WQ_ENET_MAX_DESC_LEN length. So only one descriptor
690 * per fragment is queued.
692 enic_queue_wq_desc_csum_l4(wq, skb,
693 pci_map_single(enic->pdev, skb->data,
694 head_len, PCI_DMA_TODEVICE),
698 vlan_tag_insert, vlan_tag,
702 enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback);
705 static inline void enic_queue_wq_skb_tso(struct enic *enic,
706 struct vnic_wq *wq, struct sk_buff *skb, unsigned int mss,
707 int vlan_tag_insert, unsigned int vlan_tag, int loopback)
709 unsigned int frag_len_left = skb_headlen(skb);
710 unsigned int len_left = skb->len - frag_len_left;
711 unsigned int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
712 int eop = (len_left == 0);
715 unsigned int offset = 0;
718 /* Preload TCP csum field with IP pseudo hdr calculated
719 * with IP length set to zero. HW will later add in length
720 * to each TCP segment resulting from the TSO.
723 if (skb->protocol == cpu_to_be16(ETH_P_IP)) {
724 ip_hdr(skb)->check = 0;
725 tcp_hdr(skb)->check = ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
726 ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
727 } else if (skb->protocol == cpu_to_be16(ETH_P_IPV6)) {
728 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
729 &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
732 /* Queue WQ_ENET_MAX_DESC_LEN length descriptors
733 * for the main skb fragment
735 while (frag_len_left) {
736 len = min(frag_len_left, (unsigned int)WQ_ENET_MAX_DESC_LEN);
737 dma_addr = pci_map_single(enic->pdev, skb->data + offset,
738 len, PCI_DMA_TODEVICE);
739 enic_queue_wq_desc_tso(wq, skb,
743 vlan_tag_insert, vlan_tag,
744 eop && (len == frag_len_left), loopback);
745 frag_len_left -= len;
752 /* Queue WQ_ENET_MAX_DESC_LEN length descriptors
753 * for additional data fragments
755 for (frag = skb_shinfo(skb)->frags; len_left; frag++) {
756 len_left -= frag->size;
757 frag_len_left = frag->size;
758 offset = frag->page_offset;
760 while (frag_len_left) {
761 len = min(frag_len_left,
762 (unsigned int)WQ_ENET_MAX_DESC_LEN);
763 dma_addr = pci_map_page(enic->pdev, frag->page,
766 enic_queue_wq_desc_cont(wq, skb,
770 (len == frag_len_left), /* EOP? */
772 frag_len_left -= len;
778 static inline void enic_queue_wq_skb(struct enic *enic,
779 struct vnic_wq *wq, struct sk_buff *skb)
781 unsigned int mss = skb_shinfo(skb)->gso_size;
782 unsigned int vlan_tag = 0;
783 int vlan_tag_insert = 0;
786 if (vlan_tx_tag_present(skb)) {
787 /* VLAN tag from trunking driver */
789 vlan_tag = vlan_tx_tag_get(skb);
790 } else if (enic->loop_enable) {
791 vlan_tag = enic->loop_tag;
796 enic_queue_wq_skb_tso(enic, wq, skb, mss,
797 vlan_tag_insert, vlan_tag, loopback);
798 else if (skb->ip_summed == CHECKSUM_PARTIAL)
799 enic_queue_wq_skb_csum_l4(enic, wq, skb,
800 vlan_tag_insert, vlan_tag, loopback);
802 enic_queue_wq_skb_vlan(enic, wq, skb,
803 vlan_tag_insert, vlan_tag, loopback);
806 /* netif_tx_lock held, process context with BHs disabled, or BH */
807 static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
808 struct net_device *netdev)
810 struct enic *enic = netdev_priv(netdev);
811 struct vnic_wq *wq = &enic->wq[0];
819 /* Non-TSO sends must fit within ENIC_NON_TSO_MAX_DESC descs,
820 * which is very likely. In the off chance it's going to take
821 * more than * ENIC_NON_TSO_MAX_DESC, linearize the skb.
824 if (skb_shinfo(skb)->gso_size == 0 &&
825 skb_shinfo(skb)->nr_frags + 1 > ENIC_NON_TSO_MAX_DESC &&
826 skb_linearize(skb)) {
831 spin_lock_irqsave(&enic->wq_lock[0], flags);
833 if (vnic_wq_desc_avail(wq) <
834 skb_shinfo(skb)->nr_frags + ENIC_DESC_MAX_SPLITS) {
835 netif_stop_queue(netdev);
836 /* This is a hard error, log it */
837 netdev_err(netdev, "BUG! Tx ring full when queue awake!\n");
838 spin_unlock_irqrestore(&enic->wq_lock[0], flags);
839 return NETDEV_TX_BUSY;
842 enic_queue_wq_skb(enic, wq, skb);
844 if (vnic_wq_desc_avail(wq) < MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS)
845 netif_stop_queue(netdev);
847 spin_unlock_irqrestore(&enic->wq_lock[0], flags);
852 /* dev_base_lock rwlock held, nominally process context */
853 static struct net_device_stats *enic_get_stats(struct net_device *netdev)
855 struct enic *enic = netdev_priv(netdev);
856 struct net_device_stats *net_stats = &netdev->stats;
857 struct vnic_stats *stats;
859 enic_dev_stats_dump(enic, &stats);
861 net_stats->tx_packets = stats->tx.tx_frames_ok;
862 net_stats->tx_bytes = stats->tx.tx_bytes_ok;
863 net_stats->tx_errors = stats->tx.tx_errors;
864 net_stats->tx_dropped = stats->tx.tx_drops;
866 net_stats->rx_packets = stats->rx.rx_frames_ok;
867 net_stats->rx_bytes = stats->rx.rx_bytes_ok;
868 net_stats->rx_errors = stats->rx.rx_errors;
869 net_stats->multicast = stats->rx.rx_multicast_frames_ok;
870 net_stats->rx_over_errors = enic->rq_truncated_pkts;
871 net_stats->rx_crc_errors = enic->rq_bad_fcs;
872 net_stats->rx_dropped = stats->rx.rx_no_bufs + stats->rx.rx_drop;
877 static void enic_reset_multicast_list(struct enic *enic)
883 static int enic_set_mac_addr(struct net_device *netdev, char *addr)
885 struct enic *enic = netdev_priv(netdev);
887 if (enic_is_dynamic(enic)) {
888 if (!is_valid_ether_addr(addr) && !is_zero_ether_addr(addr))
889 return -EADDRNOTAVAIL;
891 if (!is_valid_ether_addr(addr))
892 return -EADDRNOTAVAIL;
895 memcpy(netdev->dev_addr, addr, netdev->addr_len);
900 static int enic_set_mac_address_dynamic(struct net_device *netdev, void *p)
902 struct enic *enic = netdev_priv(netdev);
903 struct sockaddr *saddr = p;
904 char *addr = saddr->sa_data;
907 if (netif_running(enic->netdev)) {
908 err = enic_dev_del_station_addr(enic);
913 err = enic_set_mac_addr(netdev, addr);
917 if (netif_running(enic->netdev)) {
918 err = enic_dev_add_station_addr(enic);
926 static int enic_set_mac_address(struct net_device *netdev, void *p)
928 struct sockaddr *saddr = p;
929 char *addr = saddr->sa_data;
930 struct enic *enic = netdev_priv(netdev);
933 err = enic_dev_del_station_addr(enic);
937 err = enic_set_mac_addr(netdev, addr);
941 return enic_dev_add_station_addr(enic);
944 static void enic_add_multicast_addr_list(struct enic *enic)
946 struct net_device *netdev = enic->netdev;
947 struct netdev_hw_addr *ha;
948 unsigned int mc_count = netdev_mc_count(netdev);
949 u8 mc_addr[ENIC_MULTICAST_PERFECT_FILTERS][ETH_ALEN];
952 if (mc_count > ENIC_MULTICAST_PERFECT_FILTERS) {
953 netdev_warn(netdev, "Registering only %d out of %d "
954 "multicast addresses\n",
955 ENIC_MULTICAST_PERFECT_FILTERS, mc_count);
956 mc_count = ENIC_MULTICAST_PERFECT_FILTERS;
959 /* Is there an easier way? Trying to minimize to
960 * calls to add/del multicast addrs. We keep the
961 * addrs from the last call in enic->mc_addr and
962 * look for changes to add/del.
966 netdev_for_each_mc_addr(ha, netdev) {
969 memcpy(mc_addr[i++], ha->addr, ETH_ALEN);
972 for (i = 0; i < enic->mc_count; i++) {
973 for (j = 0; j < mc_count; j++)
974 if (compare_ether_addr(enic->mc_addr[i],
978 enic_dev_del_addr(enic, enic->mc_addr[i]);
981 for (i = 0; i < mc_count; i++) {
982 for (j = 0; j < enic->mc_count; j++)
983 if (compare_ether_addr(mc_addr[i],
984 enic->mc_addr[j]) == 0)
986 if (j == enic->mc_count)
987 enic_dev_add_addr(enic, mc_addr[i]);
990 /* Save the list to compare against next time
993 for (i = 0; i < mc_count; i++)
994 memcpy(enic->mc_addr[i], mc_addr[i], ETH_ALEN);
996 enic->mc_count = mc_count;
999 static void enic_add_unicast_addr_list(struct enic *enic)
1001 struct net_device *netdev = enic->netdev;
1002 struct netdev_hw_addr *ha;
1003 unsigned int uc_count = netdev_uc_count(netdev);
1004 u8 uc_addr[ENIC_UNICAST_PERFECT_FILTERS][ETH_ALEN];
1007 if (uc_count > ENIC_UNICAST_PERFECT_FILTERS) {
1008 netdev_warn(netdev, "Registering only %d out of %d "
1009 "unicast addresses\n",
1010 ENIC_UNICAST_PERFECT_FILTERS, uc_count);
1011 uc_count = ENIC_UNICAST_PERFECT_FILTERS;
1014 /* Is there an easier way? Trying to minimize to
1015 * calls to add/del unicast addrs. We keep the
1016 * addrs from the last call in enic->uc_addr and
1017 * look for changes to add/del.
1021 netdev_for_each_uc_addr(ha, netdev) {
1024 memcpy(uc_addr[i++], ha->addr, ETH_ALEN);
1027 for (i = 0; i < enic->uc_count; i++) {
1028 for (j = 0; j < uc_count; j++)
1029 if (compare_ether_addr(enic->uc_addr[i],
1033 enic_dev_del_addr(enic, enic->uc_addr[i]);
1036 for (i = 0; i < uc_count; i++) {
1037 for (j = 0; j < enic->uc_count; j++)
1038 if (compare_ether_addr(uc_addr[i],
1039 enic->uc_addr[j]) == 0)
1041 if (j == enic->uc_count)
1042 enic_dev_add_addr(enic, uc_addr[i]);
1045 /* Save the list to compare against next time
1048 for (i = 0; i < uc_count; i++)
1049 memcpy(enic->uc_addr[i], uc_addr[i], ETH_ALEN);
1051 enic->uc_count = uc_count;
1054 /* netif_tx_lock held, BHs disabled */
1055 static void enic_set_rx_mode(struct net_device *netdev)
1057 struct enic *enic = netdev_priv(netdev);
1059 int multicast = (netdev->flags & IFF_MULTICAST) ? 1 : 0;
1060 int broadcast = (netdev->flags & IFF_BROADCAST) ? 1 : 0;
1061 int promisc = (netdev->flags & IFF_PROMISC) ||
1062 netdev_uc_count(netdev) > ENIC_UNICAST_PERFECT_FILTERS;
1063 int allmulti = (netdev->flags & IFF_ALLMULTI) ||
1064 netdev_mc_count(netdev) > ENIC_MULTICAST_PERFECT_FILTERS;
1065 unsigned int flags = netdev->flags |
1066 (allmulti ? IFF_ALLMULTI : 0) |
1067 (promisc ? IFF_PROMISC : 0);
1069 if (enic->flags != flags) {
1070 enic->flags = flags;
1071 enic_dev_packet_filter(enic, directed,
1072 multicast, broadcast, promisc, allmulti);
1076 enic_add_unicast_addr_list(enic);
1078 enic_add_multicast_addr_list(enic);
1082 /* rtnl lock is held */
1083 static void enic_vlan_rx_register(struct net_device *netdev,
1084 struct vlan_group *vlan_group)
1086 struct enic *enic = netdev_priv(netdev);
1087 enic->vlan_group = vlan_group;
1090 /* netif_tx_lock held, BHs disabled */
1091 static void enic_tx_timeout(struct net_device *netdev)
1093 struct enic *enic = netdev_priv(netdev);
1094 schedule_work(&enic->reset);
1097 static int enic_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1099 struct enic *enic = netdev_priv(netdev);
1101 if (vf != PORT_SELF_VF)
1104 /* Ignore the vf argument for now. We can assume the request
1105 * is coming on a vf.
1107 if (is_valid_ether_addr(mac)) {
1108 memcpy(enic->pp.vf_mac, mac, ETH_ALEN);
1114 static int enic_set_port_profile(struct enic *enic, u8 *mac)
1116 struct vic_provinfo *vp;
1117 u8 oui[3] = VIC_PROVINFO_CISCO_OUI;
1118 u16 os_type = VIC_GENERIC_PROV_OS_TYPE_LINUX;
1120 char client_mac_str[18];
1124 err = enic_vnic_dev_deinit(enic);
1128 switch (enic->pp.request) {
1130 case PORT_REQUEST_ASSOCIATE:
1132 if (!(enic->pp.set & ENIC_SET_NAME) || !strlen(enic->pp.name))
1135 if (!is_valid_ether_addr(mac))
1136 return -EADDRNOTAVAIL;
1138 vp = vic_provinfo_alloc(GFP_KERNEL, oui,
1139 VIC_PROVINFO_GENERIC_TYPE);
1143 vic_provinfo_add_tlv(vp,
1144 VIC_GENERIC_PROV_TLV_PORT_PROFILE_NAME_STR,
1145 strlen(enic->pp.name) + 1, enic->pp.name);
1147 if (!is_zero_ether_addr(enic->pp.mac_addr))
1148 client_mac = enic->pp.mac_addr;
1152 vic_provinfo_add_tlv(vp,
1153 VIC_GENERIC_PROV_TLV_CLIENT_MAC_ADDR,
1154 ETH_ALEN, client_mac);
1156 sprintf(client_mac_str, "%pM", client_mac);
1157 vic_provinfo_add_tlv(vp,
1158 VIC_GENERIC_PROV_TLV_CLUSTER_PORT_UUID_STR,
1159 sizeof(client_mac_str), client_mac_str);
1161 if (enic->pp.set & ENIC_SET_INSTANCE) {
1162 sprintf(uuid_str, "%pUB", enic->pp.instance_uuid);
1163 vic_provinfo_add_tlv(vp,
1164 VIC_GENERIC_PROV_TLV_CLIENT_UUID_STR,
1165 sizeof(uuid_str), uuid_str);
1168 if (enic->pp.set & ENIC_SET_HOST) {
1169 sprintf(uuid_str, "%pUB", enic->pp.host_uuid);
1170 vic_provinfo_add_tlv(vp,
1171 VIC_GENERIC_PROV_TLV_HOST_UUID_STR,
1172 sizeof(uuid_str), uuid_str);
1175 os_type = htons(os_type);
1176 vic_provinfo_add_tlv(vp,
1177 VIC_GENERIC_PROV_TLV_OS_TYPE,
1178 sizeof(os_type), &os_type);
1180 err = enic_dev_init_prov(enic, vp);
1181 vic_provinfo_free(vp);
1186 case PORT_REQUEST_DISASSOCIATE:
1193 /* Set flag to indicate that the port assoc/disassoc
1194 * request has been sent out to fw
1196 enic->pp.set |= ENIC_PORT_REQUEST_APPLIED;
1201 static int enic_set_vf_port(struct net_device *netdev, int vf,
1202 struct nlattr *port[])
1204 struct enic *enic = netdev_priv(netdev);
1205 struct enic_port_profile new_pp;
1208 memset(&new_pp, 0, sizeof(new_pp));
1210 if (port[IFLA_PORT_REQUEST]) {
1211 new_pp.set |= ENIC_SET_REQUEST;
1212 new_pp.request = nla_get_u8(port[IFLA_PORT_REQUEST]);
1215 if (port[IFLA_PORT_PROFILE]) {
1216 new_pp.set |= ENIC_SET_NAME;
1217 memcpy(new_pp.name, nla_data(port[IFLA_PORT_PROFILE]),
1221 if (port[IFLA_PORT_INSTANCE_UUID]) {
1222 new_pp.set |= ENIC_SET_INSTANCE;
1223 memcpy(new_pp.instance_uuid,
1224 nla_data(port[IFLA_PORT_INSTANCE_UUID]), PORT_UUID_MAX);
1227 if (port[IFLA_PORT_HOST_UUID]) {
1228 new_pp.set |= ENIC_SET_HOST;
1229 memcpy(new_pp.host_uuid,
1230 nla_data(port[IFLA_PORT_HOST_UUID]), PORT_UUID_MAX);
1233 /* don't support VFs, yet */
1234 if (vf != PORT_SELF_VF)
1237 if (!(new_pp.set & ENIC_SET_REQUEST))
1240 if (new_pp.request == PORT_REQUEST_ASSOCIATE) {
1241 /* Special case handling */
1242 if (!is_zero_ether_addr(enic->pp.vf_mac))
1243 memcpy(new_pp.mac_addr, enic->pp.vf_mac, ETH_ALEN);
1245 if (is_zero_ether_addr(netdev->dev_addr))
1246 random_ether_addr(netdev->dev_addr);
1249 memcpy(&enic->pp, &new_pp, sizeof(struct enic_port_profile));
1251 err = enic_set_port_profile(enic, netdev->dev_addr);
1253 goto set_port_profile_cleanup;
1255 set_port_profile_cleanup:
1256 memset(enic->pp.vf_mac, 0, ETH_ALEN);
1258 if (err || enic->pp.request == PORT_REQUEST_DISASSOCIATE) {
1259 memset(netdev->dev_addr, 0, ETH_ALEN);
1260 memset(enic->pp.mac_addr, 0, ETH_ALEN);
1266 static int enic_get_vf_port(struct net_device *netdev, int vf,
1267 struct sk_buff *skb)
1269 struct enic *enic = netdev_priv(netdev);
1270 int err, error, done;
1271 u16 response = PORT_PROFILE_RESPONSE_SUCCESS;
1273 if (!(enic->pp.set & ENIC_PORT_REQUEST_APPLIED))
1276 err = enic_dev_init_done(enic, &done, &error);
1283 response = PORT_PROFILE_RESPONSE_INPROGRESS;
1286 response = PORT_PROFILE_RESPONSE_INVALID;
1289 response = PORT_PROFILE_RESPONSE_BADSTATE;
1292 response = PORT_PROFILE_RESPONSE_INSUFFICIENT_RESOURCES;
1295 response = PORT_PROFILE_RESPONSE_ERROR;
1299 NLA_PUT_U16(skb, IFLA_PORT_REQUEST, enic->pp.request);
1300 NLA_PUT_U16(skb, IFLA_PORT_RESPONSE, response);
1301 if (enic->pp.set & ENIC_SET_NAME)
1302 NLA_PUT(skb, IFLA_PORT_PROFILE, PORT_PROFILE_MAX,
1304 if (enic->pp.set & ENIC_SET_INSTANCE)
1305 NLA_PUT(skb, IFLA_PORT_INSTANCE_UUID, PORT_UUID_MAX,
1306 enic->pp.instance_uuid);
1307 if (enic->pp.set & ENIC_SET_HOST)
1308 NLA_PUT(skb, IFLA_PORT_HOST_UUID, PORT_UUID_MAX,
1309 enic->pp.host_uuid);
1317 static void enic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf)
1319 struct enic *enic = vnic_dev_priv(rq->vdev);
1324 pci_unmap_single(enic->pdev, buf->dma_addr,
1325 buf->len, PCI_DMA_FROMDEVICE);
1326 dev_kfree_skb_any(buf->os_buf);
1329 static int enic_rq_alloc_buf(struct vnic_rq *rq)
1331 struct enic *enic = vnic_dev_priv(rq->vdev);
1332 struct net_device *netdev = enic->netdev;
1333 struct sk_buff *skb;
1334 unsigned int len = netdev->mtu + VLAN_ETH_HLEN;
1335 unsigned int os_buf_index = 0;
1336 dma_addr_t dma_addr;
1338 skb = netdev_alloc_skb_ip_align(netdev, len);
1342 dma_addr = pci_map_single(enic->pdev, skb->data,
1343 len, PCI_DMA_FROMDEVICE);
1345 enic_queue_rq_desc(rq, skb, os_buf_index,
1351 static void enic_rq_indicate_buf(struct vnic_rq *rq,
1352 struct cq_desc *cq_desc, struct vnic_rq_buf *buf,
1353 int skipped, void *opaque)
1355 struct enic *enic = vnic_dev_priv(rq->vdev);
1356 struct net_device *netdev = enic->netdev;
1357 struct sk_buff *skb;
1359 u8 type, color, eop, sop, ingress_port, vlan_stripped;
1360 u8 fcoe, fcoe_sof, fcoe_fc_crc_ok, fcoe_enc_error, fcoe_eof;
1361 u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok;
1362 u8 ipv6, ipv4, ipv4_fragment, fcs_ok, rss_type, csum_not_calc;
1364 u16 q_number, completed_index, bytes_written, vlan_tci, checksum;
1371 prefetch(skb->data - NET_IP_ALIGN);
1372 pci_unmap_single(enic->pdev, buf->dma_addr,
1373 buf->len, PCI_DMA_FROMDEVICE);
1375 cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc,
1376 &type, &color, &q_number, &completed_index,
1377 &ingress_port, &fcoe, &eop, &sop, &rss_type,
1378 &csum_not_calc, &rss_hash, &bytes_written,
1379 &packet_error, &vlan_stripped, &vlan_tci, &checksum,
1380 &fcoe_sof, &fcoe_fc_crc_ok, &fcoe_enc_error,
1381 &fcoe_eof, &tcp_udp_csum_ok, &udp, &tcp,
1382 &ipv4_csum_ok, &ipv6, &ipv4, &ipv4_fragment,
1388 if (bytes_written > 0)
1390 else if (bytes_written == 0)
1391 enic->rq_truncated_pkts++;
1394 dev_kfree_skb_any(skb);
1399 if (eop && bytes_written > 0) {
1404 skb_put(skb, bytes_written);
1405 skb->protocol = eth_type_trans(skb, netdev);
1407 if (enic->csum_rx_enabled && !csum_not_calc) {
1408 skb->csum = htons(checksum);
1409 skb->ip_summed = CHECKSUM_COMPLETE;
1414 if (enic->vlan_group && vlan_stripped &&
1415 (vlan_tci & CQ_ENET_RQ_DESC_VLAN_TCI_VLAN_MASK)) {
1417 if (netdev->features & NETIF_F_GRO)
1418 vlan_gro_receive(&enic->napi[q_number],
1419 enic->vlan_group, vlan_tci, skb);
1421 vlan_hwaccel_receive_skb(skb,
1422 enic->vlan_group, vlan_tci);
1426 if (netdev->features & NETIF_F_GRO)
1427 napi_gro_receive(&enic->napi[q_number], skb);
1429 netif_receive_skb(skb);
1437 dev_kfree_skb_any(skb);
1441 static int enic_rq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc,
1442 u8 type, u16 q_number, u16 completed_index, void *opaque)
1444 struct enic *enic = vnic_dev_priv(vdev);
1446 vnic_rq_service(&enic->rq[q_number], cq_desc,
1447 completed_index, VNIC_RQ_RETURN_DESC,
1448 enic_rq_indicate_buf, opaque);
1453 static int enic_poll(struct napi_struct *napi, int budget)
1455 struct net_device *netdev = napi->dev;
1456 struct enic *enic = netdev_priv(netdev);
1457 unsigned int cq_rq = enic_cq_rq(enic, 0);
1458 unsigned int cq_wq = enic_cq_wq(enic, 0);
1459 unsigned int intr = enic_legacy_io_intr();
1460 unsigned int rq_work_to_do = budget;
1461 unsigned int wq_work_to_do = -1; /* no limit */
1462 unsigned int work_done, rq_work_done, wq_work_done;
1465 /* Service RQ (first) and WQ
1468 rq_work_done = vnic_cq_service(&enic->cq[cq_rq],
1469 rq_work_to_do, enic_rq_service, NULL);
1471 wq_work_done = vnic_cq_service(&enic->cq[cq_wq],
1472 wq_work_to_do, enic_wq_service, NULL);
1474 /* Accumulate intr event credits for this polling
1475 * cycle. An intr event is the completion of a
1476 * a WQ or RQ packet.
1479 work_done = rq_work_done + wq_work_done;
1482 vnic_intr_return_credits(&enic->intr[intr],
1484 0 /* don't unmask intr */,
1485 0 /* don't reset intr timer */);
1487 err = vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf);
1489 /* Buffer allocation failed. Stay in polling
1490 * mode so we can try to fill the ring again.
1494 rq_work_done = rq_work_to_do;
1496 if (rq_work_done < rq_work_to_do) {
1498 /* Some work done, but not enough to stay in polling,
1502 napi_complete(napi);
1503 vnic_intr_unmask(&enic->intr[intr]);
1506 return rq_work_done;
1509 static int enic_poll_msix(struct napi_struct *napi, int budget)
1511 struct net_device *netdev = napi->dev;
1512 struct enic *enic = netdev_priv(netdev);
1513 unsigned int rq = (napi - &enic->napi[0]);
1514 unsigned int cq = enic_cq_rq(enic, rq);
1515 unsigned int intr = enic_msix_rq_intr(enic, rq);
1516 unsigned int work_to_do = budget;
1517 unsigned int work_done;
1523 work_done = vnic_cq_service(&enic->cq[cq],
1524 work_to_do, enic_rq_service, NULL);
1526 /* Return intr event credits for this polling
1527 * cycle. An intr event is the completion of a
1532 vnic_intr_return_credits(&enic->intr[intr],
1534 0 /* don't unmask intr */,
1535 0 /* don't reset intr timer */);
1537 err = vnic_rq_fill(&enic->rq[rq], enic_rq_alloc_buf);
1539 /* Buffer allocation failed. Stay in polling mode
1540 * so we can try to fill the ring again.
1544 work_done = work_to_do;
1546 if (work_done < work_to_do) {
1548 /* Some work done, but not enough to stay in polling,
1552 napi_complete(napi);
1553 vnic_intr_unmask(&enic->intr[intr]);
1559 static void enic_notify_timer(unsigned long data)
1561 struct enic *enic = (struct enic *)data;
1563 enic_notify_check(enic);
1565 mod_timer(&enic->notify_timer,
1566 round_jiffies(jiffies + ENIC_NOTIFY_TIMER_PERIOD));
1569 static void enic_free_intr(struct enic *enic)
1571 struct net_device *netdev = enic->netdev;
1574 switch (vnic_dev_get_intr_mode(enic->vdev)) {
1575 case VNIC_DEV_INTR_MODE_INTX:
1576 free_irq(enic->pdev->irq, netdev);
1578 case VNIC_DEV_INTR_MODE_MSI:
1579 free_irq(enic->pdev->irq, enic);
1581 case VNIC_DEV_INTR_MODE_MSIX:
1582 for (i = 0; i < ARRAY_SIZE(enic->msix); i++)
1583 if (enic->msix[i].requested)
1584 free_irq(enic->msix_entry[i].vector,
1585 enic->msix[i].devid);
1592 static int enic_request_intr(struct enic *enic)
1594 struct net_device *netdev = enic->netdev;
1595 unsigned int i, intr;
1598 switch (vnic_dev_get_intr_mode(enic->vdev)) {
1600 case VNIC_DEV_INTR_MODE_INTX:
1602 err = request_irq(enic->pdev->irq, enic_isr_legacy,
1603 IRQF_SHARED, netdev->name, netdev);
1606 case VNIC_DEV_INTR_MODE_MSI:
1608 err = request_irq(enic->pdev->irq, enic_isr_msi,
1609 0, netdev->name, enic);
1612 case VNIC_DEV_INTR_MODE_MSIX:
1614 for (i = 0; i < enic->rq_count; i++) {
1615 intr = enic_msix_rq_intr(enic, i);
1616 sprintf(enic->msix[intr].devname,
1617 "%.11s-rx-%d", netdev->name, i);
1618 enic->msix[intr].isr = enic_isr_msix_rq;
1619 enic->msix[intr].devid = &enic->napi[i];
1622 for (i = 0; i < enic->wq_count; i++) {
1623 intr = enic_msix_wq_intr(enic, i);
1624 sprintf(enic->msix[intr].devname,
1625 "%.11s-tx-%d", netdev->name, i);
1626 enic->msix[intr].isr = enic_isr_msix_wq;
1627 enic->msix[intr].devid = enic;
1630 intr = enic_msix_err_intr(enic);
1631 sprintf(enic->msix[intr].devname,
1632 "%.11s-err", netdev->name);
1633 enic->msix[intr].isr = enic_isr_msix_err;
1634 enic->msix[intr].devid = enic;
1636 intr = enic_msix_notify_intr(enic);
1637 sprintf(enic->msix[intr].devname,
1638 "%.11s-notify", netdev->name);
1639 enic->msix[intr].isr = enic_isr_msix_notify;
1640 enic->msix[intr].devid = enic;
1642 for (i = 0; i < ARRAY_SIZE(enic->msix); i++)
1643 enic->msix[i].requested = 0;
1645 for (i = 0; i < enic->intr_count; i++) {
1646 err = request_irq(enic->msix_entry[i].vector,
1647 enic->msix[i].isr, 0,
1648 enic->msix[i].devname,
1649 enic->msix[i].devid);
1651 enic_free_intr(enic);
1654 enic->msix[i].requested = 1;
1666 static void enic_synchronize_irqs(struct enic *enic)
1670 switch (vnic_dev_get_intr_mode(enic->vdev)) {
1671 case VNIC_DEV_INTR_MODE_INTX:
1672 case VNIC_DEV_INTR_MODE_MSI:
1673 synchronize_irq(enic->pdev->irq);
1675 case VNIC_DEV_INTR_MODE_MSIX:
1676 for (i = 0; i < enic->intr_count; i++)
1677 synchronize_irq(enic->msix_entry[i].vector);
1684 static int enic_dev_notify_set(struct enic *enic)
1688 spin_lock(&enic->devcmd_lock);
1689 switch (vnic_dev_get_intr_mode(enic->vdev)) {
1690 case VNIC_DEV_INTR_MODE_INTX:
1691 err = vnic_dev_notify_set(enic->vdev,
1692 enic_legacy_notify_intr());
1694 case VNIC_DEV_INTR_MODE_MSIX:
1695 err = vnic_dev_notify_set(enic->vdev,
1696 enic_msix_notify_intr(enic));
1699 err = vnic_dev_notify_set(enic->vdev, -1 /* no intr */);
1702 spin_unlock(&enic->devcmd_lock);
1707 static void enic_notify_timer_start(struct enic *enic)
1709 switch (vnic_dev_get_intr_mode(enic->vdev)) {
1710 case VNIC_DEV_INTR_MODE_MSI:
1711 mod_timer(&enic->notify_timer, jiffies);
1714 /* Using intr for notification for INTx/MSI-X */
1719 /* rtnl lock is held, process context */
1720 static int enic_open(struct net_device *netdev)
1722 struct enic *enic = netdev_priv(netdev);
1726 err = enic_request_intr(enic);
1728 netdev_err(netdev, "Unable to request irq.\n");
1732 err = enic_dev_notify_set(enic);
1735 "Failed to alloc notify buffer, aborting.\n");
1736 goto err_out_free_intr;
1739 for (i = 0; i < enic->rq_count; i++) {
1740 vnic_rq_fill(&enic->rq[i], enic_rq_alloc_buf);
1741 /* Need at least one buffer on ring to get going */
1742 if (vnic_rq_desc_used(&enic->rq[i]) == 0) {
1743 netdev_err(netdev, "Unable to alloc receive buffers\n");
1745 goto err_out_notify_unset;
1749 for (i = 0; i < enic->wq_count; i++)
1750 vnic_wq_enable(&enic->wq[i]);
1751 for (i = 0; i < enic->rq_count; i++)
1752 vnic_rq_enable(&enic->rq[i]);
1754 if (enic_is_dynamic(enic) && !is_zero_ether_addr(enic->pp.mac_addr))
1755 enic_dev_add_addr(enic, enic->pp.mac_addr);
1757 enic_dev_add_station_addr(enic);
1758 enic_set_rx_mode(netdev);
1760 netif_wake_queue(netdev);
1762 for (i = 0; i < enic->rq_count; i++)
1763 napi_enable(&enic->napi[i]);
1765 enic_dev_enable(enic);
1767 for (i = 0; i < enic->intr_count; i++)
1768 vnic_intr_unmask(&enic->intr[i]);
1770 enic_notify_timer_start(enic);
1774 err_out_notify_unset:
1775 enic_dev_notify_unset(enic);
1777 enic_free_intr(enic);
1782 /* rtnl lock is held, process context */
1783 static int enic_stop(struct net_device *netdev)
1785 struct enic *enic = netdev_priv(netdev);
1789 for (i = 0; i < enic->intr_count; i++) {
1790 vnic_intr_mask(&enic->intr[i]);
1791 (void)vnic_intr_masked(&enic->intr[i]); /* flush write */
1794 enic_synchronize_irqs(enic);
1796 del_timer_sync(&enic->notify_timer);
1798 enic_dev_disable(enic);
1800 for (i = 0; i < enic->rq_count; i++)
1801 napi_disable(&enic->napi[i]);
1803 netif_carrier_off(netdev);
1804 netif_tx_disable(netdev);
1805 if (enic_is_dynamic(enic) && !is_zero_ether_addr(enic->pp.mac_addr))
1806 enic_dev_del_addr(enic, enic->pp.mac_addr);
1808 enic_dev_del_station_addr(enic);
1810 for (i = 0; i < enic->wq_count; i++) {
1811 err = vnic_wq_disable(&enic->wq[i]);
1815 for (i = 0; i < enic->rq_count; i++) {
1816 err = vnic_rq_disable(&enic->rq[i]);
1821 enic_dev_notify_unset(enic);
1822 enic_free_intr(enic);
1824 for (i = 0; i < enic->wq_count; i++)
1825 vnic_wq_clean(&enic->wq[i], enic_free_wq_buf);
1826 for (i = 0; i < enic->rq_count; i++)
1827 vnic_rq_clean(&enic->rq[i], enic_free_rq_buf);
1828 for (i = 0; i < enic->cq_count; i++)
1829 vnic_cq_clean(&enic->cq[i]);
1830 for (i = 0; i < enic->intr_count; i++)
1831 vnic_intr_clean(&enic->intr[i]);
1836 static int enic_change_mtu(struct net_device *netdev, int new_mtu)
1838 struct enic *enic = netdev_priv(netdev);
1839 int running = netif_running(netdev);
1841 if (new_mtu < ENIC_MIN_MTU || new_mtu > ENIC_MAX_MTU)
1847 netdev->mtu = new_mtu;
1849 if (netdev->mtu > enic->port_mtu)
1851 "interface MTU (%d) set higher than port MTU (%d)\n",
1852 netdev->mtu, enic->port_mtu);
1860 #ifdef CONFIG_NET_POLL_CONTROLLER
1861 static void enic_poll_controller(struct net_device *netdev)
1863 struct enic *enic = netdev_priv(netdev);
1864 struct vnic_dev *vdev = enic->vdev;
1865 unsigned int i, intr;
1867 switch (vnic_dev_get_intr_mode(vdev)) {
1868 case VNIC_DEV_INTR_MODE_MSIX:
1869 for (i = 0; i < enic->rq_count; i++) {
1870 intr = enic_msix_rq_intr(enic, i);
1871 enic_isr_msix_rq(enic->msix_entry[intr].vector,
1874 intr = enic_msix_wq_intr(enic, i);
1875 enic_isr_msix_wq(enic->msix_entry[intr].vector, enic);
1877 case VNIC_DEV_INTR_MODE_MSI:
1878 enic_isr_msi(enic->pdev->irq, enic);
1880 case VNIC_DEV_INTR_MODE_INTX:
1881 enic_isr_legacy(enic->pdev->irq, netdev);
1889 static int enic_dev_wait(struct vnic_dev *vdev,
1890 int (*start)(struct vnic_dev *, int),
1891 int (*finished)(struct vnic_dev *, int *),
1898 BUG_ON(in_interrupt());
1900 err = start(vdev, arg);
1904 /* Wait for func to complete...2 seconds max
1907 time = jiffies + (HZ * 2);
1910 err = finished(vdev, &done);
1917 schedule_timeout_uninterruptible(HZ / 10);
1919 } while (time_after(time, jiffies));
1924 static int enic_dev_open(struct enic *enic)
1928 err = enic_dev_wait(enic->vdev, vnic_dev_open,
1929 vnic_dev_open_done, 0);
1931 dev_err(enic_get_dev(enic), "vNIC device open failed, err %d\n",
1937 static int enic_dev_hang_reset(struct enic *enic)
1941 err = enic_dev_wait(enic->vdev, vnic_dev_hang_reset,
1942 vnic_dev_hang_reset_done, 0);
1944 netdev_err(enic->netdev, "vNIC hang reset failed, err %d\n",
1950 static int enic_set_rsskey(struct enic *enic)
1952 dma_addr_t rss_key_buf_pa;
1953 union vnic_rss_key *rss_key_buf_va = NULL;
1954 union vnic_rss_key rss_key = {
1955 .key[0].b = {85, 67, 83, 97, 119, 101, 115, 111, 109, 101},
1956 .key[1].b = {80, 65, 76, 79, 117, 110, 105, 113, 117, 101},
1957 .key[2].b = {76, 73, 78, 85, 88, 114, 111, 99, 107, 115},
1958 .key[3].b = {69, 78, 73, 67, 105, 115, 99, 111, 111, 108},
1962 rss_key_buf_va = pci_alloc_consistent(enic->pdev,
1963 sizeof(union vnic_rss_key), &rss_key_buf_pa);
1964 if (!rss_key_buf_va)
1967 memcpy(rss_key_buf_va, &rss_key, sizeof(union vnic_rss_key));
1969 spin_lock(&enic->devcmd_lock);
1970 err = enic_set_rss_key(enic,
1972 sizeof(union vnic_rss_key));
1973 spin_unlock(&enic->devcmd_lock);
1975 pci_free_consistent(enic->pdev, sizeof(union vnic_rss_key),
1976 rss_key_buf_va, rss_key_buf_pa);
1981 static int enic_set_rsscpu(struct enic *enic, u8 rss_hash_bits)
1983 dma_addr_t rss_cpu_buf_pa;
1984 union vnic_rss_cpu *rss_cpu_buf_va = NULL;
1988 rss_cpu_buf_va = pci_alloc_consistent(enic->pdev,
1989 sizeof(union vnic_rss_cpu), &rss_cpu_buf_pa);
1990 if (!rss_cpu_buf_va)
1993 for (i = 0; i < (1 << rss_hash_bits); i++)
1994 (*rss_cpu_buf_va).cpu[i/4].b[i%4] = i % enic->rq_count;
1996 spin_lock(&enic->devcmd_lock);
1997 err = enic_set_rss_cpu(enic,
1999 sizeof(union vnic_rss_cpu));
2000 spin_unlock(&enic->devcmd_lock);
2002 pci_free_consistent(enic->pdev, sizeof(union vnic_rss_cpu),
2003 rss_cpu_buf_va, rss_cpu_buf_pa);
2008 static int enic_set_niccfg(struct enic *enic, u8 rss_default_cpu,
2009 u8 rss_hash_type, u8 rss_hash_bits, u8 rss_base_cpu, u8 rss_enable)
2011 const u8 tso_ipid_split_en = 0;
2012 const u8 ig_vlan_strip_en = 1;
2015 /* Enable VLAN tag stripping.
2018 spin_lock(&enic->devcmd_lock);
2019 err = enic_set_nic_cfg(enic,
2020 rss_default_cpu, rss_hash_type,
2021 rss_hash_bits, rss_base_cpu,
2022 rss_enable, tso_ipid_split_en,
2024 spin_unlock(&enic->devcmd_lock);
2029 static int enic_set_rss_nic_cfg(struct enic *enic)
2031 struct device *dev = enic_get_dev(enic);
2032 const u8 rss_default_cpu = 0;
2033 const u8 rss_hash_type = NIC_CFG_RSS_HASH_TYPE_IPV4 |
2034 NIC_CFG_RSS_HASH_TYPE_TCP_IPV4 |
2035 NIC_CFG_RSS_HASH_TYPE_IPV6 |
2036 NIC_CFG_RSS_HASH_TYPE_TCP_IPV6;
2037 const u8 rss_hash_bits = 7;
2038 const u8 rss_base_cpu = 0;
2039 u8 rss_enable = ENIC_SETTING(enic, RSS) && (enic->rq_count > 1);
2042 if (!enic_set_rsskey(enic)) {
2043 if (enic_set_rsscpu(enic, rss_hash_bits)) {
2045 dev_warn(dev, "RSS disabled, "
2046 "Failed to set RSS cpu indirection table.");
2050 dev_warn(dev, "RSS disabled, Failed to set RSS key.\n");
2054 return enic_set_niccfg(enic, rss_default_cpu, rss_hash_type,
2055 rss_hash_bits, rss_base_cpu, rss_enable);
2058 static void enic_reset(struct work_struct *work)
2060 struct enic *enic = container_of(work, struct enic, reset);
2062 if (!netif_running(enic->netdev))
2067 enic_dev_hang_notify(enic);
2068 enic_stop(enic->netdev);
2069 enic_dev_hang_reset(enic);
2070 enic_reset_multicast_list(enic);
2071 enic_init_vnic_resources(enic);
2072 enic_set_rss_nic_cfg(enic);
2073 enic_dev_set_ig_vlan_rewrite_mode(enic);
2074 enic_open(enic->netdev);
2079 static int enic_set_intr_mode(struct enic *enic)
2081 unsigned int n = min_t(unsigned int, enic->rq_count, ENIC_RQ_MAX);
2085 /* Set interrupt mode (INTx, MSI, MSI-X) depending
2086 * on system capabilities.
2090 * We need n RQs, m WQs, n+m CQs, and n+m+2 INTRs
2091 * (the second to last INTR is used for WQ/RQ errors)
2092 * (the last INTR is used for notifications)
2095 BUG_ON(ARRAY_SIZE(enic->msix_entry) < n + m + 2);
2096 for (i = 0; i < n + m + 2; i++)
2097 enic->msix_entry[i].entry = i;
2099 /* Use multiple RQs if RSS is enabled
2102 if (ENIC_SETTING(enic, RSS) &&
2103 enic->config.intr_mode < 1 &&
2104 enic->rq_count >= n &&
2105 enic->wq_count >= m &&
2106 enic->cq_count >= n + m &&
2107 enic->intr_count >= n + m + 2) {
2109 if (!pci_enable_msix(enic->pdev, enic->msix_entry, n + m + 2)) {
2113 enic->cq_count = n + m;
2114 enic->intr_count = n + m + 2;
2116 vnic_dev_set_intr_mode(enic->vdev,
2117 VNIC_DEV_INTR_MODE_MSIX);
2123 if (enic->config.intr_mode < 1 &&
2124 enic->rq_count >= 1 &&
2125 enic->wq_count >= m &&
2126 enic->cq_count >= 1 + m &&
2127 enic->intr_count >= 1 + m + 2) {
2128 if (!pci_enable_msix(enic->pdev, enic->msix_entry, 1 + m + 2)) {
2132 enic->cq_count = 1 + m;
2133 enic->intr_count = 1 + m + 2;
2135 vnic_dev_set_intr_mode(enic->vdev,
2136 VNIC_DEV_INTR_MODE_MSIX);
2144 * We need 1 RQ, 1 WQ, 2 CQs, and 1 INTR
2147 if (enic->config.intr_mode < 2 &&
2148 enic->rq_count >= 1 &&
2149 enic->wq_count >= 1 &&
2150 enic->cq_count >= 2 &&
2151 enic->intr_count >= 1 &&
2152 !pci_enable_msi(enic->pdev)) {
2157 enic->intr_count = 1;
2159 vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_MSI);
2166 * We need 1 RQ, 1 WQ, 2 CQs, and 3 INTRs
2167 * (the first INTR is used for WQ/RQ)
2168 * (the second INTR is used for WQ/RQ errors)
2169 * (the last INTR is used for notifications)
2172 if (enic->config.intr_mode < 3 &&
2173 enic->rq_count >= 1 &&
2174 enic->wq_count >= 1 &&
2175 enic->cq_count >= 2 &&
2176 enic->intr_count >= 3) {
2181 enic->intr_count = 3;
2183 vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_INTX);
2188 vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN);
2193 static void enic_clear_intr_mode(struct enic *enic)
2195 switch (vnic_dev_get_intr_mode(enic->vdev)) {
2196 case VNIC_DEV_INTR_MODE_MSIX:
2197 pci_disable_msix(enic->pdev);
2199 case VNIC_DEV_INTR_MODE_MSI:
2200 pci_disable_msi(enic->pdev);
2206 vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN);
2209 static const struct net_device_ops enic_netdev_dynamic_ops = {
2210 .ndo_open = enic_open,
2211 .ndo_stop = enic_stop,
2212 .ndo_start_xmit = enic_hard_start_xmit,
2213 .ndo_get_stats = enic_get_stats,
2214 .ndo_validate_addr = eth_validate_addr,
2215 .ndo_set_rx_mode = enic_set_rx_mode,
2216 .ndo_set_multicast_list = enic_set_rx_mode,
2217 .ndo_set_mac_address = enic_set_mac_address_dynamic,
2218 .ndo_change_mtu = enic_change_mtu,
2219 .ndo_vlan_rx_register = enic_vlan_rx_register,
2220 .ndo_vlan_rx_add_vid = enic_vlan_rx_add_vid,
2221 .ndo_vlan_rx_kill_vid = enic_vlan_rx_kill_vid,
2222 .ndo_tx_timeout = enic_tx_timeout,
2223 .ndo_set_vf_port = enic_set_vf_port,
2224 .ndo_get_vf_port = enic_get_vf_port,
2226 .ndo_set_vf_mac = enic_set_vf_mac,
2228 #ifdef CONFIG_NET_POLL_CONTROLLER
2229 .ndo_poll_controller = enic_poll_controller,
2233 static const struct net_device_ops enic_netdev_ops = {
2234 .ndo_open = enic_open,
2235 .ndo_stop = enic_stop,
2236 .ndo_start_xmit = enic_hard_start_xmit,
2237 .ndo_get_stats = enic_get_stats,
2238 .ndo_validate_addr = eth_validate_addr,
2239 .ndo_set_mac_address = enic_set_mac_address,
2240 .ndo_set_rx_mode = enic_set_rx_mode,
2241 .ndo_set_multicast_list = enic_set_rx_mode,
2242 .ndo_change_mtu = enic_change_mtu,
2243 .ndo_vlan_rx_register = enic_vlan_rx_register,
2244 .ndo_vlan_rx_add_vid = enic_vlan_rx_add_vid,
2245 .ndo_vlan_rx_kill_vid = enic_vlan_rx_kill_vid,
2246 .ndo_tx_timeout = enic_tx_timeout,
2247 #ifdef CONFIG_NET_POLL_CONTROLLER
2248 .ndo_poll_controller = enic_poll_controller,
2252 static void enic_dev_deinit(struct enic *enic)
2256 for (i = 0; i < enic->rq_count; i++)
2257 netif_napi_del(&enic->napi[i]);
2259 enic_free_vnic_resources(enic);
2260 enic_clear_intr_mode(enic);
2263 static int enic_dev_init(struct enic *enic)
2265 struct device *dev = enic_get_dev(enic);
2266 struct net_device *netdev = enic->netdev;
2270 /* Get vNIC configuration
2273 err = enic_get_vnic_config(enic);
2275 dev_err(dev, "Get vNIC configuration failed, aborting\n");
2279 /* Get available resource counts
2282 enic_get_res_counts(enic);
2284 /* Set interrupt mode based on resource counts and system
2288 err = enic_set_intr_mode(enic);
2290 dev_err(dev, "Failed to set intr mode based on resource "
2291 "counts and system capabilities, aborting\n");
2295 /* Allocate and configure vNIC resources
2298 err = enic_alloc_vnic_resources(enic);
2300 dev_err(dev, "Failed to alloc vNIC resources, aborting\n");
2301 goto err_out_free_vnic_resources;
2304 enic_init_vnic_resources(enic);
2306 err = enic_set_rss_nic_cfg(enic);
2308 dev_err(dev, "Failed to config nic, aborting\n");
2309 goto err_out_free_vnic_resources;
2312 switch (vnic_dev_get_intr_mode(enic->vdev)) {
2314 netif_napi_add(netdev, &enic->napi[0], enic_poll, 64);
2316 case VNIC_DEV_INTR_MODE_MSIX:
2317 for (i = 0; i < enic->rq_count; i++)
2318 netif_napi_add(netdev, &enic->napi[i],
2319 enic_poll_msix, 64);
2325 err_out_free_vnic_resources:
2326 enic_clear_intr_mode(enic);
2327 enic_free_vnic_resources(enic);
2332 static void enic_iounmap(struct enic *enic)
2336 for (i = 0; i < ARRAY_SIZE(enic->bar); i++)
2337 if (enic->bar[i].vaddr)
2338 iounmap(enic->bar[i].vaddr);
2341 static int __devinit enic_probe(struct pci_dev *pdev,
2342 const struct pci_device_id *ent)
2344 struct device *dev = &pdev->dev;
2345 struct net_device *netdev;
2351 /* Allocate net device structure and initialize. Private
2352 * instance data is initialized to zero.
2355 netdev = alloc_etherdev(sizeof(struct enic));
2357 pr_err("Etherdev alloc failed, aborting\n");
2361 pci_set_drvdata(pdev, netdev);
2363 SET_NETDEV_DEV(netdev, &pdev->dev);
2365 enic = netdev_priv(netdev);
2366 enic->netdev = netdev;
2369 /* Setup PCI resources
2372 err = pci_enable_device_mem(pdev);
2374 dev_err(dev, "Cannot enable PCI device, aborting\n");
2375 goto err_out_free_netdev;
2378 err = pci_request_regions(pdev, DRV_NAME);
2380 dev_err(dev, "Cannot request PCI regions, aborting\n");
2381 goto err_out_disable_device;
2384 pci_set_master(pdev);
2386 /* Query PCI controller on system for DMA addressing
2387 * limitation for the device. Try 40-bit first, and
2391 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(40));
2393 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2395 dev_err(dev, "No usable DMA configuration, aborting\n");
2396 goto err_out_release_regions;
2398 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
2400 dev_err(dev, "Unable to obtain %u-bit DMA "
2401 "for consistent allocations, aborting\n", 32);
2402 goto err_out_release_regions;
2405 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40));
2407 dev_err(dev, "Unable to obtain %u-bit DMA "
2408 "for consistent allocations, aborting\n", 40);
2409 goto err_out_release_regions;
2414 /* Map vNIC resources from BAR0-5
2417 for (i = 0; i < ARRAY_SIZE(enic->bar); i++) {
2418 if (!(pci_resource_flags(pdev, i) & IORESOURCE_MEM))
2420 enic->bar[i].len = pci_resource_len(pdev, i);
2421 enic->bar[i].vaddr = pci_iomap(pdev, i, enic->bar[i].len);
2422 if (!enic->bar[i].vaddr) {
2423 dev_err(dev, "Cannot memory-map BAR %d, aborting\n", i);
2425 goto err_out_iounmap;
2427 enic->bar[i].bus_addr = pci_resource_start(pdev, i);
2430 /* Register vNIC device
2433 enic->vdev = vnic_dev_register(NULL, enic, pdev, enic->bar,
2434 ARRAY_SIZE(enic->bar));
2436 dev_err(dev, "vNIC registration failed, aborting\n");
2438 goto err_out_iounmap;
2441 /* Issue device open to get device in known state
2444 err = enic_dev_open(enic);
2446 dev_err(dev, "vNIC dev open failed, aborting\n");
2447 goto err_out_vnic_unregister;
2450 /* Setup devcmd lock
2453 spin_lock_init(&enic->devcmd_lock);
2456 * Set ingress vlan rewrite mode before vnic initialization
2459 err = enic_dev_set_ig_vlan_rewrite_mode(enic);
2462 "Failed to set ingress vlan rewrite mode, aborting.\n");
2463 goto err_out_dev_close;
2466 /* Issue device init to initialize the vnic-to-switch link.
2467 * We'll start with carrier off and wait for link UP
2468 * notification later to turn on carrier. We don't need
2469 * to wait here for the vnic-to-switch link initialization
2470 * to complete; link UP notification is the indication that
2471 * the process is complete.
2474 netif_carrier_off(netdev);
2476 /* Do not call dev_init for a dynamic vnic.
2477 * For a dynamic vnic, init_prov_info will be
2478 * called later by an upper layer.
2481 if (!enic_is_dynamic(enic)) {
2482 err = vnic_dev_init(enic->vdev, 0);
2484 dev_err(dev, "vNIC dev init failed, aborting\n");
2485 goto err_out_dev_close;
2489 err = enic_dev_init(enic);
2491 dev_err(dev, "Device initialization failed, aborting\n");
2492 goto err_out_dev_close;
2495 /* Setup notification timer, HW reset task, and wq locks
2498 init_timer(&enic->notify_timer);
2499 enic->notify_timer.function = enic_notify_timer;
2500 enic->notify_timer.data = (unsigned long)enic;
2502 INIT_WORK(&enic->reset, enic_reset);
2504 for (i = 0; i < enic->wq_count; i++)
2505 spin_lock_init(&enic->wq_lock[i]);
2507 /* Register net device
2510 enic->port_mtu = enic->config.mtu;
2511 (void)enic_change_mtu(netdev, enic->port_mtu);
2513 err = enic_set_mac_addr(netdev, enic->mac_addr);
2515 dev_err(dev, "Invalid MAC address, aborting\n");
2516 goto err_out_dev_deinit;
2519 enic->tx_coalesce_usecs = enic->config.intr_timer_usec;
2520 enic->rx_coalesce_usecs = enic->tx_coalesce_usecs;
2522 if (enic_is_dynamic(enic))
2523 netdev->netdev_ops = &enic_netdev_dynamic_ops;
2525 netdev->netdev_ops = &enic_netdev_ops;
2527 netdev->watchdog_timeo = 2 * HZ;
2528 netdev->ethtool_ops = &enic_ethtool_ops;
2530 netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
2531 if (ENIC_SETTING(enic, LOOP)) {
2532 netdev->features &= ~NETIF_F_HW_VLAN_TX;
2533 enic->loop_enable = 1;
2534 enic->loop_tag = enic->config.loop_tag;
2535 dev_info(dev, "loopback tag=0x%04x\n", enic->loop_tag);
2537 if (ENIC_SETTING(enic, TXCSUM))
2538 netdev->features |= NETIF_F_SG | NETIF_F_HW_CSUM;
2539 if (ENIC_SETTING(enic, TSO))
2540 netdev->features |= NETIF_F_TSO |
2541 NETIF_F_TSO6 | NETIF_F_TSO_ECN;
2542 if (ENIC_SETTING(enic, LRO))
2543 netdev->features |= NETIF_F_GRO;
2545 netdev->features |= NETIF_F_HIGHDMA;
2547 enic->csum_rx_enabled = ENIC_SETTING(enic, RXCSUM);
2549 err = register_netdev(netdev);
2551 dev_err(dev, "Cannot register net device, aborting\n");
2552 goto err_out_dev_deinit;
2558 enic_dev_deinit(enic);
2560 vnic_dev_close(enic->vdev);
2561 err_out_vnic_unregister:
2562 vnic_dev_unregister(enic->vdev);
2565 err_out_release_regions:
2566 pci_release_regions(pdev);
2567 err_out_disable_device:
2568 pci_disable_device(pdev);
2569 err_out_free_netdev:
2570 pci_set_drvdata(pdev, NULL);
2571 free_netdev(netdev);
2576 static void __devexit enic_remove(struct pci_dev *pdev)
2578 struct net_device *netdev = pci_get_drvdata(pdev);
2581 struct enic *enic = netdev_priv(netdev);
2583 cancel_work_sync(&enic->reset);
2584 unregister_netdev(netdev);
2585 enic_dev_deinit(enic);
2586 vnic_dev_close(enic->vdev);
2587 vnic_dev_unregister(enic->vdev);
2589 pci_release_regions(pdev);
2590 pci_disable_device(pdev);
2591 pci_set_drvdata(pdev, NULL);
2592 free_netdev(netdev);
2596 static struct pci_driver enic_driver = {
2598 .id_table = enic_id_table,
2599 .probe = enic_probe,
2600 .remove = __devexit_p(enic_remove),
2603 static int __init enic_init_module(void)
2605 pr_info("%s, ver %s\n", DRV_DESCRIPTION, DRV_VERSION);
2607 return pci_register_driver(&enic_driver);
2610 static void __exit enic_cleanup_module(void)
2612 pci_unregister_driver(&enic_driver);
2615 module_init(enic_init_module);
2616 module_exit(enic_cleanup_module);