1 /*******************************************************************************
3 Intel 82599 Virtual Function driver
4 Copyright(c) 1999 - 2010 Intel Corporation.
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 *******************************************************************************/
29 /******************************************************************************
30 Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code
31 ******************************************************************************/
32 #include <linux/types.h>
33 #include <linux/bitops.h>
34 #include <linux/module.h>
35 #include <linux/pci.h>
36 #include <linux/netdevice.h>
37 #include <linux/vmalloc.h>
38 #include <linux/string.h>
41 #include <linux/tcp.h>
42 #include <linux/ipv6.h>
43 #include <linux/slab.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <linux/ethtool.h>
48 #include <linux/if_vlan.h>
49 #include <linux/prefetch.h>
53 char ixgbevf_driver_name[] = "ixgbevf";
54 static const char ixgbevf_driver_string[] =
55 "Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver";
57 #define DRV_VERSION "2.1.0-k"
58 const char ixgbevf_driver_version[] = DRV_VERSION;
59 static char ixgbevf_copyright[] =
60 "Copyright (c) 2009 - 2010 Intel Corporation.";
62 static const struct ixgbevf_info *ixgbevf_info_tbl[] = {
63 [board_82599_vf] = &ixgbevf_82599_vf_info,
64 [board_X540_vf] = &ixgbevf_X540_vf_info,
67 /* ixgbevf_pci_tbl - PCI Device ID Table
69 * Wildcard entries (PCI_ANY_ID) should come last
70 * Last entry must be all 0s
72 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
73 * Class, Class Mask, private data (not used) }
75 static struct pci_device_id ixgbevf_pci_tbl[] = {
76 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF),
78 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF),
81 /* required last entry */
84 MODULE_DEVICE_TABLE(pci, ixgbevf_pci_tbl);
86 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
87 MODULE_DESCRIPTION("Intel(R) 82599 Virtual Function Driver");
88 MODULE_LICENSE("GPL");
89 MODULE_VERSION(DRV_VERSION);
91 #define DEFAULT_DEBUG_LEVEL_SHIFT 3
94 static void ixgbevf_set_itr_msix(struct ixgbevf_q_vector *q_vector);
95 static void ixgbevf_write_eitr(struct ixgbevf_adapter *adapter, int v_idx,
98 static inline void ixgbevf_release_rx_desc(struct ixgbe_hw *hw,
99 struct ixgbevf_ring *rx_ring,
103 * Force memory writes to complete before letting h/w
104 * know there are new descriptors to fetch. (Only
105 * applicable for weak-ordered memory model archs,
109 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rx_ring->reg_idx), val);
113 * ixgbevf_set_ivar - set IVAR registers - maps interrupt causes to vectors
114 * @adapter: pointer to adapter struct
115 * @direction: 0 for Rx, 1 for Tx, -1 for other causes
116 * @queue: queue to map the corresponding interrupt to
117 * @msix_vector: the vector to map to the corresponding queue
120 static void ixgbevf_set_ivar(struct ixgbevf_adapter *adapter, s8 direction,
121 u8 queue, u8 msix_vector)
124 struct ixgbe_hw *hw = &adapter->hw;
125 if (direction == -1) {
127 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
128 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
131 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
133 /* tx or rx causes */
134 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
135 index = ((16 * (queue & 1)) + (8 * direction));
136 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(queue >> 1));
137 ivar &= ~(0xFF << index);
138 ivar |= (msix_vector << index);
139 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(queue >> 1), ivar);
143 static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_adapter *adapter,
144 struct ixgbevf_tx_buffer
147 if (tx_buffer_info->dma) {
148 if (tx_buffer_info->mapped_as_page)
149 dma_unmap_page(&adapter->pdev->dev,
151 tx_buffer_info->length,
154 dma_unmap_single(&adapter->pdev->dev,
156 tx_buffer_info->length,
158 tx_buffer_info->dma = 0;
160 if (tx_buffer_info->skb) {
161 dev_kfree_skb_any(tx_buffer_info->skb);
162 tx_buffer_info->skb = NULL;
164 tx_buffer_info->time_stamp = 0;
165 /* tx_buffer_info must be completely set up in the transmit path */
168 #define IXGBE_MAX_TXD_PWR 14
169 #define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR)
171 /* Tx Descriptors needed, worst case */
172 #define TXD_USE_COUNT(S) (((S) >> IXGBE_MAX_TXD_PWR) + \
173 (((S) & (IXGBE_MAX_DATA_PER_TXD - 1)) ? 1 : 0))
175 #define DESC_NEEDED (TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD) /* skb->data */ + \
176 MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1) /* for context */
178 #define DESC_NEEDED TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD)
181 static void ixgbevf_tx_timeout(struct net_device *netdev);
184 * ixgbevf_clean_tx_irq - Reclaim resources after transmit completes
185 * @adapter: board private structure
186 * @tx_ring: tx ring to clean
188 static bool ixgbevf_clean_tx_irq(struct ixgbevf_adapter *adapter,
189 struct ixgbevf_ring *tx_ring)
191 struct net_device *netdev = adapter->netdev;
192 struct ixgbe_hw *hw = &adapter->hw;
193 union ixgbe_adv_tx_desc *tx_desc, *eop_desc;
194 struct ixgbevf_tx_buffer *tx_buffer_info;
195 unsigned int i, eop, count = 0;
196 unsigned int total_bytes = 0, total_packets = 0;
198 i = tx_ring->next_to_clean;
199 eop = tx_ring->tx_buffer_info[i].next_to_watch;
200 eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
202 while ((eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) &&
203 (count < tx_ring->work_limit)) {
204 bool cleaned = false;
205 rmb(); /* read buffer_info after eop_desc */
206 /* eop could change between read and DD-check */
207 if (unlikely(eop != tx_ring->tx_buffer_info[i].next_to_watch))
209 for ( ; !cleaned; count++) {
211 tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i);
212 tx_buffer_info = &tx_ring->tx_buffer_info[i];
213 cleaned = (i == eop);
214 skb = tx_buffer_info->skb;
216 if (cleaned && skb) {
217 unsigned int segs, bytecount;
219 /* gso_segs is currently only valid for tcp */
220 segs = skb_shinfo(skb)->gso_segs ?: 1;
221 /* multiply data chunks by size of headers */
222 bytecount = ((segs - 1) * skb_headlen(skb)) +
224 total_packets += segs;
225 total_bytes += bytecount;
228 ixgbevf_unmap_and_free_tx_resource(adapter,
231 tx_desc->wb.status = 0;
234 if (i == tx_ring->count)
239 eop = tx_ring->tx_buffer_info[i].next_to_watch;
240 eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
243 tx_ring->next_to_clean = i;
245 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
246 if (unlikely(count && netif_carrier_ok(netdev) &&
247 (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
248 /* Make sure that anybody stopping the queue after this
249 * sees the new next_to_clean.
253 if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) &&
254 !test_bit(__IXGBEVF_DOWN, &adapter->state)) {
255 netif_wake_subqueue(netdev, tx_ring->queue_index);
256 ++adapter->restart_queue;
259 if (netif_queue_stopped(netdev) &&
260 !test_bit(__IXGBEVF_DOWN, &adapter->state)) {
261 netif_wake_queue(netdev);
262 ++adapter->restart_queue;
267 /* re-arm the interrupt */
268 if ((count >= tx_ring->work_limit) &&
269 (!test_bit(__IXGBEVF_DOWN, &adapter->state))) {
270 IXGBE_WRITE_REG(hw, IXGBE_VTEICS, tx_ring->v_idx);
273 u64_stats_update_begin(&tx_ring->syncp);
274 tx_ring->total_bytes += total_bytes;
275 tx_ring->total_packets += total_packets;
276 u64_stats_update_end(&tx_ring->syncp);
278 return count < tx_ring->work_limit;
282 * ixgbevf_receive_skb - Send a completed packet up the stack
283 * @q_vector: structure containing interrupt and ring information
284 * @skb: packet to send up
285 * @status: hardware indication of status of receive
286 * @rx_ring: rx descriptor ring (for a specific queue) to setup
287 * @rx_desc: rx descriptor
289 static void ixgbevf_receive_skb(struct ixgbevf_q_vector *q_vector,
290 struct sk_buff *skb, u8 status,
291 struct ixgbevf_ring *ring,
292 union ixgbe_adv_rx_desc *rx_desc)
294 struct ixgbevf_adapter *adapter = q_vector->adapter;
295 bool is_vlan = (status & IXGBE_RXD_STAT_VP);
298 u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan);
300 __vlan_hwaccel_put_tag(skb, tag);
303 if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL))
304 napi_gro_receive(&q_vector->napi, skb);
310 * ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum
311 * @adapter: address of board private structure
312 * @status_err: hardware indication of status of receive
313 * @skb: skb currently being received and modified
315 static inline void ixgbevf_rx_checksum(struct ixgbevf_adapter *adapter,
316 u32 status_err, struct sk_buff *skb)
318 skb_checksum_none_assert(skb);
320 /* Rx csum disabled */
321 if (!(adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED))
324 /* if IP and error */
325 if ((status_err & IXGBE_RXD_STAT_IPCS) &&
326 (status_err & IXGBE_RXDADV_ERR_IPE)) {
327 adapter->hw_csum_rx_error++;
331 if (!(status_err & IXGBE_RXD_STAT_L4CS))
334 if (status_err & IXGBE_RXDADV_ERR_TCPE) {
335 adapter->hw_csum_rx_error++;
339 /* It must be a TCP or UDP packet with a valid checksum */
340 skb->ip_summed = CHECKSUM_UNNECESSARY;
341 adapter->hw_csum_rx_good++;
345 * ixgbevf_alloc_rx_buffers - Replace used receive buffers; packet split
346 * @adapter: address of board private structure
348 static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter,
349 struct ixgbevf_ring *rx_ring,
352 struct pci_dev *pdev = adapter->pdev;
353 union ixgbe_adv_rx_desc *rx_desc;
354 struct ixgbevf_rx_buffer *bi;
357 unsigned int bufsz = rx_ring->rx_buf_len + NET_IP_ALIGN;
359 i = rx_ring->next_to_use;
360 bi = &rx_ring->rx_buffer_info[i];
362 while (cleaned_count--) {
363 rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i);
366 (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED)) {
368 bi->page = netdev_alloc_page(adapter->netdev);
370 adapter->alloc_rx_page_failed++;
375 /* use a half page if we're re-using */
376 bi->page_offset ^= (PAGE_SIZE / 2);
379 bi->page_dma = dma_map_page(&pdev->dev, bi->page,
387 skb = netdev_alloc_skb(adapter->netdev,
391 adapter->alloc_rx_buff_failed++;
396 * Make buffer alignment 2 beyond a 16 byte boundary
397 * this will result in a 16 byte aligned IP header after
398 * the 14 byte MAC header is removed
400 skb_reserve(skb, NET_IP_ALIGN);
405 bi->dma = dma_map_single(&pdev->dev, skb->data,
409 /* Refresh the desc even if buffer_addrs didn't change because
410 * each write-back erases this info. */
411 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
412 rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
413 rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
415 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
419 if (i == rx_ring->count)
421 bi = &rx_ring->rx_buffer_info[i];
425 if (rx_ring->next_to_use != i) {
426 rx_ring->next_to_use = i;
428 i = (rx_ring->count - 1);
430 ixgbevf_release_rx_desc(&adapter->hw, rx_ring, i);
434 static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter *adapter,
438 struct ixgbe_hw *hw = &adapter->hw;
440 mask = (qmask & 0xFFFFFFFF);
441 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
444 static inline u16 ixgbevf_get_hdr_info(union ixgbe_adv_rx_desc *rx_desc)
446 return rx_desc->wb.lower.lo_dword.hs_rss.hdr_info;
449 static inline u16 ixgbevf_get_pkt_info(union ixgbe_adv_rx_desc *rx_desc)
451 return rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
454 static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
455 struct ixgbevf_ring *rx_ring,
456 int *work_done, int work_to_do)
458 struct ixgbevf_adapter *adapter = q_vector->adapter;
459 struct pci_dev *pdev = adapter->pdev;
460 union ixgbe_adv_rx_desc *rx_desc, *next_rxd;
461 struct ixgbevf_rx_buffer *rx_buffer_info, *next_buffer;
466 bool cleaned = false;
467 int cleaned_count = 0;
468 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
470 i = rx_ring->next_to_clean;
471 rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i);
472 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
473 rx_buffer_info = &rx_ring->rx_buffer_info[i];
475 while (staterr & IXGBE_RXD_STAT_DD) {
477 if (*work_done >= work_to_do)
481 rmb(); /* read descriptor and rx_buffer_info after status DD */
482 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
483 hdr_info = le16_to_cpu(ixgbevf_get_hdr_info(rx_desc));
484 len = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
485 IXGBE_RXDADV_HDRBUFLEN_SHIFT;
486 if (hdr_info & IXGBE_RXDADV_SPH)
487 adapter->rx_hdr_split++;
488 if (len > IXGBEVF_RX_HDR_SIZE)
489 len = IXGBEVF_RX_HDR_SIZE;
490 upper_len = le16_to_cpu(rx_desc->wb.upper.length);
492 len = le16_to_cpu(rx_desc->wb.upper.length);
495 skb = rx_buffer_info->skb;
496 prefetch(skb->data - NET_IP_ALIGN);
497 rx_buffer_info->skb = NULL;
499 if (rx_buffer_info->dma) {
500 dma_unmap_single(&pdev->dev, rx_buffer_info->dma,
503 rx_buffer_info->dma = 0;
508 dma_unmap_page(&pdev->dev, rx_buffer_info->page_dma,
509 PAGE_SIZE / 2, DMA_FROM_DEVICE);
510 rx_buffer_info->page_dma = 0;
511 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
512 rx_buffer_info->page,
513 rx_buffer_info->page_offset,
516 if ((rx_ring->rx_buf_len > (PAGE_SIZE / 2)) ||
517 (page_count(rx_buffer_info->page) != 1))
518 rx_buffer_info->page = NULL;
520 get_page(rx_buffer_info->page);
522 skb->len += upper_len;
523 skb->data_len += upper_len;
524 skb->truesize += upper_len;
528 if (i == rx_ring->count)
531 next_rxd = IXGBE_RX_DESC_ADV(*rx_ring, i);
535 next_buffer = &rx_ring->rx_buffer_info[i];
537 if (!(staterr & IXGBE_RXD_STAT_EOP)) {
538 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
539 rx_buffer_info->skb = next_buffer->skb;
540 rx_buffer_info->dma = next_buffer->dma;
541 next_buffer->skb = skb;
542 next_buffer->dma = 0;
544 skb->next = next_buffer->skb;
545 skb->next->prev = skb;
547 adapter->non_eop_descs++;
551 /* ERR_MASK will only have valid bits if EOP set */
552 if (unlikely(staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK)) {
553 dev_kfree_skb_irq(skb);
557 ixgbevf_rx_checksum(adapter, staterr, skb);
559 /* probably a little skewed due to removing CRC */
560 total_rx_bytes += skb->len;
564 * Work around issue of some types of VM to VM loop back
565 * packets not getting split correctly
567 if (staterr & IXGBE_RXD_STAT_LB) {
568 u32 header_fixup_len = skb_headlen(skb);
569 if (header_fixup_len < 14)
570 skb_push(skb, header_fixup_len);
572 skb->protocol = eth_type_trans(skb, adapter->netdev);
574 ixgbevf_receive_skb(q_vector, skb, staterr, rx_ring, rx_desc);
577 rx_desc->wb.upper.status_error = 0;
579 /* return some buffers to hardware, one at a time is too slow */
580 if (cleaned_count >= IXGBEVF_RX_BUFFER_WRITE) {
581 ixgbevf_alloc_rx_buffers(adapter, rx_ring,
586 /* use prefetched values */
588 rx_buffer_info = &rx_ring->rx_buffer_info[i];
590 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
593 rx_ring->next_to_clean = i;
594 cleaned_count = IXGBE_DESC_UNUSED(rx_ring);
597 ixgbevf_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
599 u64_stats_update_begin(&rx_ring->syncp);
600 rx_ring->total_packets += total_rx_packets;
601 rx_ring->total_bytes += total_rx_bytes;
602 u64_stats_update_end(&rx_ring->syncp);
608 * ixgbevf_clean_rxonly - msix (aka one shot) rx clean routine
609 * @napi: napi struct with our devices info in it
610 * @budget: amount of work driver is allowed to do this pass, in packets
612 * This function is optimized for cleaning one queue only on a single
615 static int ixgbevf_clean_rxonly(struct napi_struct *napi, int budget)
617 struct ixgbevf_q_vector *q_vector =
618 container_of(napi, struct ixgbevf_q_vector, napi);
619 struct ixgbevf_adapter *adapter = q_vector->adapter;
620 struct ixgbevf_ring *rx_ring = NULL;
624 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
625 rx_ring = &(adapter->rx_ring[r_idx]);
627 ixgbevf_clean_rx_irq(q_vector, rx_ring, &work_done, budget);
629 /* If all Rx work done, exit the polling mode */
630 if (work_done < budget) {
632 if (adapter->itr_setting & 1)
633 ixgbevf_set_itr_msix(q_vector);
634 if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
635 ixgbevf_irq_enable_queues(adapter, rx_ring->v_idx);
642 * ixgbevf_clean_rxonly_many - msix (aka one shot) rx clean routine
643 * @napi: napi struct with our devices info in it
644 * @budget: amount of work driver is allowed to do this pass, in packets
646 * This function will clean more than one rx queue associated with a
649 static int ixgbevf_clean_rxonly_many(struct napi_struct *napi, int budget)
651 struct ixgbevf_q_vector *q_vector =
652 container_of(napi, struct ixgbevf_q_vector, napi);
653 struct ixgbevf_adapter *adapter = q_vector->adapter;
654 struct ixgbevf_ring *rx_ring = NULL;
655 int work_done = 0, i;
659 /* attempt to distribute budget to each queue fairly, but don't allow
660 * the budget to go below 1 because we'll exit polling */
661 budget /= (q_vector->rxr_count ?: 1);
662 budget = max(budget, 1);
663 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
664 for (i = 0; i < q_vector->rxr_count; i++) {
665 rx_ring = &(adapter->rx_ring[r_idx]);
666 ixgbevf_clean_rx_irq(q_vector, rx_ring, &work_done, budget);
667 enable_mask |= rx_ring->v_idx;
668 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
672 #ifndef HAVE_NETDEV_NAPI_LIST
673 if (!netif_running(adapter->netdev))
677 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
678 rx_ring = &(adapter->rx_ring[r_idx]);
680 /* If all Rx work done, exit the polling mode */
681 if (work_done < budget) {
683 if (adapter->itr_setting & 1)
684 ixgbevf_set_itr_msix(q_vector);
685 if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
686 ixgbevf_irq_enable_queues(adapter, enable_mask);
694 * ixgbevf_configure_msix - Configure MSI-X hardware
695 * @adapter: board private structure
697 * ixgbevf_configure_msix sets up the hardware to properly generate MSI-X
700 static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter)
702 struct ixgbevf_q_vector *q_vector;
703 struct ixgbe_hw *hw = &adapter->hw;
704 int i, j, q_vectors, v_idx, r_idx;
707 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
710 * Populate the IVAR table and set the ITR values to the
711 * corresponding register.
713 for (v_idx = 0; v_idx < q_vectors; v_idx++) {
714 q_vector = adapter->q_vector[v_idx];
715 /* XXX for_each_set_bit(...) */
716 r_idx = find_first_bit(q_vector->rxr_idx,
717 adapter->num_rx_queues);
719 for (i = 0; i < q_vector->rxr_count; i++) {
720 j = adapter->rx_ring[r_idx].reg_idx;
721 ixgbevf_set_ivar(adapter, 0, j, v_idx);
722 r_idx = find_next_bit(q_vector->rxr_idx,
723 adapter->num_rx_queues,
726 r_idx = find_first_bit(q_vector->txr_idx,
727 adapter->num_tx_queues);
729 for (i = 0; i < q_vector->txr_count; i++) {
730 j = adapter->tx_ring[r_idx].reg_idx;
731 ixgbevf_set_ivar(adapter, 1, j, v_idx);
732 r_idx = find_next_bit(q_vector->txr_idx,
733 adapter->num_tx_queues,
737 /* if this is a tx only vector halve the interrupt rate */
738 if (q_vector->txr_count && !q_vector->rxr_count)
739 q_vector->eitr = (adapter->eitr_param >> 1);
740 else if (q_vector->rxr_count)
742 q_vector->eitr = adapter->eitr_param;
744 ixgbevf_write_eitr(adapter, v_idx, q_vector->eitr);
747 ixgbevf_set_ivar(adapter, -1, 1, v_idx);
749 /* set up to autoclear timer, and the vectors */
750 mask = IXGBE_EIMS_ENABLE_MASK;
751 mask &= ~IXGBE_EIMS_OTHER;
752 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
759 latency_invalid = 255
763 * ixgbevf_update_itr - update the dynamic ITR value based on statistics
764 * @adapter: pointer to adapter
765 * @eitr: eitr setting (ints per sec) to give last timeslice
766 * @itr_setting: current throttle rate in ints/second
767 * @packets: the number of packets during this measurement interval
768 * @bytes: the number of bytes during this measurement interval
770 * Stores a new ITR value based on packets and byte
771 * counts during the last interrupt. The advantage of per interrupt
772 * computation is faster updates and more accurate ITR for the current
773 * traffic pattern. Constants in this function were computed
774 * based on theoretical maximum wire speed and thresholds were set based
775 * on testing data as well as attempting to minimize response time
776 * while increasing bulk throughput.
778 static u8 ixgbevf_update_itr(struct ixgbevf_adapter *adapter,
779 u32 eitr, u8 itr_setting,
780 int packets, int bytes)
782 unsigned int retval = itr_setting;
787 goto update_itr_done;
790 /* simple throttlerate management
791 * 0-20MB/s lowest (100000 ints/s)
792 * 20-100MB/s low (20000 ints/s)
793 * 100-1249MB/s bulk (8000 ints/s)
795 /* what was last interrupt timeslice? */
796 timepassed_us = 1000000/eitr;
797 bytes_perint = bytes / timepassed_us; /* bytes/usec */
799 switch (itr_setting) {
801 if (bytes_perint > adapter->eitr_low)
802 retval = low_latency;
805 if (bytes_perint > adapter->eitr_high)
806 retval = bulk_latency;
807 else if (bytes_perint <= adapter->eitr_low)
808 retval = lowest_latency;
811 if (bytes_perint <= adapter->eitr_high)
812 retval = low_latency;
821 * ixgbevf_write_eitr - write VTEITR register in hardware specific way
822 * @adapter: pointer to adapter struct
823 * @v_idx: vector index into q_vector array
824 * @itr_reg: new value to be written in *register* format, not ints/s
826 * This function is made to be called by ethtool and by the driver
827 * when it needs to update VTEITR registers at runtime. Hardware
828 * specific quirks/differences are taken care of here.
830 static void ixgbevf_write_eitr(struct ixgbevf_adapter *adapter, int v_idx,
833 struct ixgbe_hw *hw = &adapter->hw;
835 itr_reg = EITR_INTS_PER_SEC_TO_REG(itr_reg);
838 * set the WDIS bit to not clear the timer bits and cause an
839 * immediate assertion of the interrupt
841 itr_reg |= IXGBE_EITR_CNT_WDIS;
843 IXGBE_WRITE_REG(hw, IXGBE_VTEITR(v_idx), itr_reg);
846 static void ixgbevf_set_itr_msix(struct ixgbevf_q_vector *q_vector)
848 struct ixgbevf_adapter *adapter = q_vector->adapter;
850 u8 current_itr, ret_itr;
851 int i, r_idx, v_idx = q_vector->v_idx;
852 struct ixgbevf_ring *rx_ring, *tx_ring;
854 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
855 for (i = 0; i < q_vector->txr_count; i++) {
856 tx_ring = &(adapter->tx_ring[r_idx]);
857 ret_itr = ixgbevf_update_itr(adapter, q_vector->eitr,
859 tx_ring->total_packets,
860 tx_ring->total_bytes);
861 /* if the result for this queue would decrease interrupt
862 * rate for this vector then use that result */
863 q_vector->tx_itr = ((q_vector->tx_itr > ret_itr) ?
864 q_vector->tx_itr - 1 : ret_itr);
865 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
869 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
870 for (i = 0; i < q_vector->rxr_count; i++) {
871 rx_ring = &(adapter->rx_ring[r_idx]);
872 ret_itr = ixgbevf_update_itr(adapter, q_vector->eitr,
874 rx_ring->total_packets,
875 rx_ring->total_bytes);
876 /* if the result for this queue would decrease interrupt
877 * rate for this vector then use that result */
878 q_vector->rx_itr = ((q_vector->rx_itr > ret_itr) ?
879 q_vector->rx_itr - 1 : ret_itr);
880 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
884 current_itr = max(q_vector->rx_itr, q_vector->tx_itr);
886 switch (current_itr) {
887 /* counts and packets in update_itr are dependent on these numbers */
892 new_itr = 20000; /* aka hwitr = ~200 */
900 if (new_itr != q_vector->eitr) {
903 /* save the algorithm value here, not the smoothed one */
904 q_vector->eitr = new_itr;
905 /* do an exponential smoothing */
906 new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100);
907 itr_reg = EITR_INTS_PER_SEC_TO_REG(new_itr);
908 ixgbevf_write_eitr(adapter, v_idx, itr_reg);
912 static irqreturn_t ixgbevf_msix_mbx(int irq, void *data)
914 struct net_device *netdev = data;
915 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
916 struct ixgbe_hw *hw = &adapter->hw;
920 eicr = IXGBE_READ_REG(hw, IXGBE_VTEICS);
921 IXGBE_WRITE_REG(hw, IXGBE_VTEICR, eicr);
923 if (!hw->mbx.ops.check_for_ack(hw)) {
925 * checking for the ack clears the PFACK bit. Place
926 * it back in the v2p_mailbox cache so that anyone
927 * polling for an ack will not miss it. Also
928 * avoid the read below because the code to read
929 * the mailbox will also clear the ack bit. This was
930 * causing lost acks. Just cache the bit and exit
933 hw->mbx.v2p_mailbox |= IXGBE_VFMAILBOX_PFACK;
937 /* Not an ack interrupt, go ahead and read the message */
938 hw->mbx.ops.read(hw, &msg, 1);
940 if ((msg & IXGBE_MBVFICR_VFREQ_MASK) == IXGBE_PF_CONTROL_MSG)
941 mod_timer(&adapter->watchdog_timer,
942 round_jiffies(jiffies + 1));
948 static irqreturn_t ixgbevf_msix_clean_tx(int irq, void *data)
950 struct ixgbevf_q_vector *q_vector = data;
951 struct ixgbevf_adapter *adapter = q_vector->adapter;
952 struct ixgbevf_ring *tx_ring;
955 if (!q_vector->txr_count)
958 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
959 for (i = 0; i < q_vector->txr_count; i++) {
960 tx_ring = &(adapter->tx_ring[r_idx]);
961 tx_ring->total_bytes = 0;
962 tx_ring->total_packets = 0;
963 ixgbevf_clean_tx_irq(adapter, tx_ring);
964 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
968 if (adapter->itr_setting & 1)
969 ixgbevf_set_itr_msix(q_vector);
975 * ixgbevf_msix_clean_rx - single unshared vector rx clean (all queues)
977 * @data: pointer to our q_vector struct for this interrupt vector
979 static irqreturn_t ixgbevf_msix_clean_rx(int irq, void *data)
981 struct ixgbevf_q_vector *q_vector = data;
982 struct ixgbevf_adapter *adapter = q_vector->adapter;
983 struct ixgbe_hw *hw = &adapter->hw;
984 struct ixgbevf_ring *rx_ring;
988 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
989 for (i = 0; i < q_vector->rxr_count; i++) {
990 rx_ring = &(adapter->rx_ring[r_idx]);
991 rx_ring->total_bytes = 0;
992 rx_ring->total_packets = 0;
993 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
997 if (!q_vector->rxr_count)
1000 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
1001 rx_ring = &(adapter->rx_ring[r_idx]);
1002 /* disable interrupts on this vector only */
1003 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, rx_ring->v_idx);
1004 napi_schedule(&q_vector->napi);
1010 static irqreturn_t ixgbevf_msix_clean_many(int irq, void *data)
1012 ixgbevf_msix_clean_rx(irq, data);
1013 ixgbevf_msix_clean_tx(irq, data);
1018 static inline void map_vector_to_rxq(struct ixgbevf_adapter *a, int v_idx,
1021 struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
1023 set_bit(r_idx, q_vector->rxr_idx);
1024 q_vector->rxr_count++;
1025 a->rx_ring[r_idx].v_idx = 1 << v_idx;
1028 static inline void map_vector_to_txq(struct ixgbevf_adapter *a, int v_idx,
1031 struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
1033 set_bit(t_idx, q_vector->txr_idx);
1034 q_vector->txr_count++;
1035 a->tx_ring[t_idx].v_idx = 1 << v_idx;
1039 * ixgbevf_map_rings_to_vectors - Maps descriptor rings to vectors
1040 * @adapter: board private structure to initialize
1042 * This function maps descriptor rings to the queue-specific vectors
1043 * we were allotted through the MSI-X enabling code. Ideally, we'd have
1044 * one vector per ring/queue, but on a constrained vector budget, we
1045 * group the rings as "efficiently" as possible. You would add new
1046 * mapping configurations in here.
1048 static int ixgbevf_map_rings_to_vectors(struct ixgbevf_adapter *adapter)
1052 int rxr_idx = 0, txr_idx = 0;
1053 int rxr_remaining = adapter->num_rx_queues;
1054 int txr_remaining = adapter->num_tx_queues;
1059 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1062 * The ideal configuration...
1063 * We have enough vectors to map one per queue.
1065 if (q_vectors == adapter->num_rx_queues + adapter->num_tx_queues) {
1066 for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++)
1067 map_vector_to_rxq(adapter, v_start, rxr_idx);
1069 for (; txr_idx < txr_remaining; v_start++, txr_idx++)
1070 map_vector_to_txq(adapter, v_start, txr_idx);
1075 * If we don't have enough vectors for a 1-to-1
1076 * mapping, we'll have to group them so there are
1077 * multiple queues per vector.
1079 /* Re-adjusting *qpv takes care of the remainder. */
1080 for (i = v_start; i < q_vectors; i++) {
1081 rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - i);
1082 for (j = 0; j < rqpv; j++) {
1083 map_vector_to_rxq(adapter, i, rxr_idx);
1088 for (i = v_start; i < q_vectors; i++) {
1089 tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - i);
1090 for (j = 0; j < tqpv; j++) {
1091 map_vector_to_txq(adapter, i, txr_idx);
1102 * ixgbevf_request_msix_irqs - Initialize MSI-X interrupts
1103 * @adapter: board private structure
1105 * ixgbevf_request_msix_irqs allocates MSI-X vectors and requests
1106 * interrupts from the kernel.
1108 static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter)
1110 struct net_device *netdev = adapter->netdev;
1111 irqreturn_t (*handler)(int, void *);
1112 int i, vector, q_vectors, err;
1115 /* Decrement for Other and TCP Timer vectors */
1116 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1118 #define SET_HANDLER(_v) (((_v)->rxr_count && (_v)->txr_count) \
1119 ? &ixgbevf_msix_clean_many : \
1120 (_v)->rxr_count ? &ixgbevf_msix_clean_rx : \
1121 (_v)->txr_count ? &ixgbevf_msix_clean_tx : \
1123 for (vector = 0; vector < q_vectors; vector++) {
1124 handler = SET_HANDLER(adapter->q_vector[vector]);
1126 if (handler == &ixgbevf_msix_clean_rx) {
1127 sprintf(adapter->name[vector], "%s-%s-%d",
1128 netdev->name, "rx", ri++);
1129 } else if (handler == &ixgbevf_msix_clean_tx) {
1130 sprintf(adapter->name[vector], "%s-%s-%d",
1131 netdev->name, "tx", ti++);
1132 } else if (handler == &ixgbevf_msix_clean_many) {
1133 sprintf(adapter->name[vector], "%s-%s-%d",
1134 netdev->name, "TxRx", vector);
1136 /* skip this unused q_vector */
1139 err = request_irq(adapter->msix_entries[vector].vector,
1140 handler, 0, adapter->name[vector],
1141 adapter->q_vector[vector]);
1143 hw_dbg(&adapter->hw,
1144 "request_irq failed for MSIX interrupt "
1145 "Error: %d\n", err);
1146 goto free_queue_irqs;
1150 sprintf(adapter->name[vector], "%s:mbx", netdev->name);
1151 err = request_irq(adapter->msix_entries[vector].vector,
1152 &ixgbevf_msix_mbx, 0, adapter->name[vector], netdev);
1154 hw_dbg(&adapter->hw,
1155 "request_irq for msix_mbx failed: %d\n", err);
1156 goto free_queue_irqs;
1162 for (i = vector - 1; i >= 0; i--)
1163 free_irq(adapter->msix_entries[--vector].vector,
1164 &(adapter->q_vector[i]));
1165 pci_disable_msix(adapter->pdev);
1166 kfree(adapter->msix_entries);
1167 adapter->msix_entries = NULL;
1171 static inline void ixgbevf_reset_q_vectors(struct ixgbevf_adapter *adapter)
1173 int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1175 for (i = 0; i < q_vectors; i++) {
1176 struct ixgbevf_q_vector *q_vector = adapter->q_vector[i];
1177 bitmap_zero(q_vector->rxr_idx, MAX_RX_QUEUES);
1178 bitmap_zero(q_vector->txr_idx, MAX_TX_QUEUES);
1179 q_vector->rxr_count = 0;
1180 q_vector->txr_count = 0;
1181 q_vector->eitr = adapter->eitr_param;
1186 * ixgbevf_request_irq - initialize interrupts
1187 * @adapter: board private structure
1189 * Attempts to configure interrupts using the best available
1190 * capabilities of the hardware and kernel.
1192 static int ixgbevf_request_irq(struct ixgbevf_adapter *adapter)
1196 err = ixgbevf_request_msix_irqs(adapter);
1199 hw_dbg(&adapter->hw,
1200 "request_irq failed, Error %d\n", err);
1205 static void ixgbevf_free_irq(struct ixgbevf_adapter *adapter)
1207 struct net_device *netdev = adapter->netdev;
1210 q_vectors = adapter->num_msix_vectors;
1214 free_irq(adapter->msix_entries[i].vector, netdev);
1217 for (; i >= 0; i--) {
1218 free_irq(adapter->msix_entries[i].vector,
1219 adapter->q_vector[i]);
1222 ixgbevf_reset_q_vectors(adapter);
1226 * ixgbevf_irq_disable - Mask off interrupt generation on the NIC
1227 * @adapter: board private structure
1229 static inline void ixgbevf_irq_disable(struct ixgbevf_adapter *adapter)
1232 struct ixgbe_hw *hw = &adapter->hw;
1234 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, ~0);
1236 IXGBE_WRITE_FLUSH(hw);
1238 for (i = 0; i < adapter->num_msix_vectors; i++)
1239 synchronize_irq(adapter->msix_entries[i].vector);
1243 * ixgbevf_irq_enable - Enable default interrupt generation settings
1244 * @adapter: board private structure
1246 static inline void ixgbevf_irq_enable(struct ixgbevf_adapter *adapter,
1247 bool queues, bool flush)
1249 struct ixgbe_hw *hw = &adapter->hw;
1253 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
1256 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
1259 ixgbevf_irq_enable_queues(adapter, qmask);
1262 IXGBE_WRITE_FLUSH(hw);
1266 * ixgbevf_configure_tx - Configure 82599 VF Transmit Unit after Reset
1267 * @adapter: board private structure
1269 * Configure the Tx unit of the MAC after a reset.
1271 static void ixgbevf_configure_tx(struct ixgbevf_adapter *adapter)
1274 struct ixgbe_hw *hw = &adapter->hw;
1275 u32 i, j, tdlen, txctrl;
1277 /* Setup the HW Tx Head and Tail descriptor pointers */
1278 for (i = 0; i < adapter->num_tx_queues; i++) {
1279 struct ixgbevf_ring *ring = &adapter->tx_ring[i];
1282 tdlen = ring->count * sizeof(union ixgbe_adv_tx_desc);
1283 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(j),
1284 (tdba & DMA_BIT_MASK(32)));
1285 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(j), (tdba >> 32));
1286 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(j), tdlen);
1287 IXGBE_WRITE_REG(hw, IXGBE_VFTDH(j), 0);
1288 IXGBE_WRITE_REG(hw, IXGBE_VFTDT(j), 0);
1289 adapter->tx_ring[i].head = IXGBE_VFTDH(j);
1290 adapter->tx_ring[i].tail = IXGBE_VFTDT(j);
1291 /* Disable Tx Head Writeback RO bit, since this hoses
1292 * bookkeeping if things aren't delivered in order.
1294 txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(j));
1295 txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
1296 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(j), txctrl);
1300 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
1302 static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter, int index)
1304 struct ixgbevf_ring *rx_ring;
1305 struct ixgbe_hw *hw = &adapter->hw;
1308 rx_ring = &adapter->rx_ring[index];
1310 srrctl = IXGBE_SRRCTL_DROP_EN;
1312 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
1313 u16 bufsz = IXGBEVF_RXBUFFER_2048;
1314 /* grow the amount we can receive on large page machines */
1315 if (bufsz < (PAGE_SIZE / 2))
1316 bufsz = (PAGE_SIZE / 2);
1317 /* cap the bufsz at our largest descriptor size */
1318 bufsz = min((u16)IXGBEVF_MAX_RXBUFFER, bufsz);
1320 srrctl |= bufsz >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1321 srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
1322 srrctl |= ((IXGBEVF_RX_HDR_SIZE <<
1323 IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
1324 IXGBE_SRRCTL_BSIZEHDR_MASK);
1326 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
1328 if (rx_ring->rx_buf_len == MAXIMUM_ETHERNET_VLAN_SIZE)
1329 srrctl |= IXGBEVF_RXBUFFER_2048 >>
1330 IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1332 srrctl |= rx_ring->rx_buf_len >>
1333 IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1335 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(index), srrctl);
1339 * ixgbevf_configure_rx - Configure 82599 VF Receive Unit after Reset
1340 * @adapter: board private structure
1342 * Configure the Rx unit of the MAC after a reset.
1344 static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
1347 struct ixgbe_hw *hw = &adapter->hw;
1348 struct net_device *netdev = adapter->netdev;
1349 int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
1354 /* Decide whether to use packet split mode or not */
1355 if (netdev->mtu > ETH_DATA_LEN) {
1356 if (adapter->flags & IXGBE_FLAG_RX_PS_CAPABLE)
1357 adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
1359 adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED;
1361 if (adapter->flags & IXGBE_FLAG_RX_1BUF_CAPABLE)
1362 adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED;
1364 adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
1367 /* Set the RX buffer length according to the mode */
1368 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
1369 /* PSRTYPE must be initialized in 82599 */
1370 u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
1371 IXGBE_PSRTYPE_UDPHDR |
1372 IXGBE_PSRTYPE_IPV4HDR |
1373 IXGBE_PSRTYPE_IPV6HDR |
1374 IXGBE_PSRTYPE_L2HDR;
1375 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
1376 rx_buf_len = IXGBEVF_RX_HDR_SIZE;
1378 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, 0);
1379 if (netdev->mtu <= ETH_DATA_LEN)
1380 rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
1382 rx_buf_len = ALIGN(max_frame, 1024);
1385 rdlen = adapter->rx_ring[0].count * sizeof(union ixgbe_adv_rx_desc);
1386 /* Setup the HW Rx Head and Tail Descriptor Pointers and
1387 * the Base and Length of the Rx Descriptor Ring */
1388 for (i = 0; i < adapter->num_rx_queues; i++) {
1389 rdba = adapter->rx_ring[i].dma;
1390 j = adapter->rx_ring[i].reg_idx;
1391 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(j),
1392 (rdba & DMA_BIT_MASK(32)));
1393 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(j), (rdba >> 32));
1394 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(j), rdlen);
1395 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(j), 0);
1396 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(j), 0);
1397 adapter->rx_ring[i].head = IXGBE_VFRDH(j);
1398 adapter->rx_ring[i].tail = IXGBE_VFRDT(j);
1399 adapter->rx_ring[i].rx_buf_len = rx_buf_len;
1401 ixgbevf_configure_srrctl(adapter, j);
1405 static void ixgbevf_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
1407 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1408 struct ixgbe_hw *hw = &adapter->hw;
1410 /* add VID to filter table */
1411 if (hw->mac.ops.set_vfta)
1412 hw->mac.ops.set_vfta(hw, vid, 0, true);
1413 set_bit(vid, adapter->active_vlans);
1416 static void ixgbevf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
1418 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1419 struct ixgbe_hw *hw = &adapter->hw;
1421 /* remove VID from filter table */
1422 if (hw->mac.ops.set_vfta)
1423 hw->mac.ops.set_vfta(hw, vid, 0, false);
1424 clear_bit(vid, adapter->active_vlans);
1427 static void ixgbevf_restore_vlan(struct ixgbevf_adapter *adapter)
1431 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
1432 ixgbevf_vlan_rx_add_vid(adapter->netdev, vid);
1435 static int ixgbevf_write_uc_addr_list(struct net_device *netdev)
1437 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1438 struct ixgbe_hw *hw = &adapter->hw;
1441 if ((netdev_uc_count(netdev)) > 10) {
1442 printk(KERN_ERR "Too many unicast filters - No Space\n");
1446 if (!netdev_uc_empty(netdev)) {
1447 struct netdev_hw_addr *ha;
1448 netdev_for_each_uc_addr(ha, netdev) {
1449 hw->mac.ops.set_uc_addr(hw, ++count, ha->addr);
1454 * If the list is empty then send message to PF driver to
1455 * clear all macvlans on this VF.
1457 hw->mac.ops.set_uc_addr(hw, 0, NULL);
1464 * ixgbevf_set_rx_mode - Multicast set
1465 * @netdev: network interface device structure
1467 * The set_rx_method entry point is called whenever the multicast address
1468 * list or the network interface flags are updated. This routine is
1469 * responsible for configuring the hardware for proper multicast mode.
1471 static void ixgbevf_set_rx_mode(struct net_device *netdev)
1473 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1474 struct ixgbe_hw *hw = &adapter->hw;
1476 /* reprogram multicast list */
1477 if (hw->mac.ops.update_mc_addr_list)
1478 hw->mac.ops.update_mc_addr_list(hw, netdev);
1480 ixgbevf_write_uc_addr_list(netdev);
1483 static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter)
1486 struct ixgbevf_q_vector *q_vector;
1487 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1489 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
1490 struct napi_struct *napi;
1491 q_vector = adapter->q_vector[q_idx];
1492 if (!q_vector->rxr_count)
1494 napi = &q_vector->napi;
1495 if (q_vector->rxr_count > 1)
1496 napi->poll = &ixgbevf_clean_rxonly_many;
1502 static void ixgbevf_napi_disable_all(struct ixgbevf_adapter *adapter)
1505 struct ixgbevf_q_vector *q_vector;
1506 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1508 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
1509 q_vector = adapter->q_vector[q_idx];
1510 if (!q_vector->rxr_count)
1512 napi_disable(&q_vector->napi);
1516 static void ixgbevf_configure(struct ixgbevf_adapter *adapter)
1518 struct net_device *netdev = adapter->netdev;
1521 ixgbevf_set_rx_mode(netdev);
1523 ixgbevf_restore_vlan(adapter);
1525 ixgbevf_configure_tx(adapter);
1526 ixgbevf_configure_rx(adapter);
1527 for (i = 0; i < adapter->num_rx_queues; i++) {
1528 struct ixgbevf_ring *ring = &adapter->rx_ring[i];
1529 ixgbevf_alloc_rx_buffers(adapter, ring, ring->count);
1530 ring->next_to_use = ring->count - 1;
1531 writel(ring->next_to_use, adapter->hw.hw_addr + ring->tail);
1535 #define IXGBE_MAX_RX_DESC_POLL 10
1536 static inline void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter,
1539 struct ixgbe_hw *hw = &adapter->hw;
1540 int j = adapter->rx_ring[rxr].reg_idx;
1543 for (k = 0; k < IXGBE_MAX_RX_DESC_POLL; k++) {
1544 if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) & IXGBE_RXDCTL_ENABLE)
1549 if (k >= IXGBE_MAX_RX_DESC_POLL) {
1550 hw_dbg(hw, "RXDCTL.ENABLE on Rx queue %d "
1551 "not set within the polling period\n", rxr);
1554 ixgbevf_release_rx_desc(&adapter->hw, &adapter->rx_ring[rxr],
1555 (adapter->rx_ring[rxr].count - 1));
1558 static void ixgbevf_save_reset_stats(struct ixgbevf_adapter *adapter)
1560 /* Only save pre-reset stats if there are some */
1561 if (adapter->stats.vfgprc || adapter->stats.vfgptc) {
1562 adapter->stats.saved_reset_vfgprc += adapter->stats.vfgprc -
1563 adapter->stats.base_vfgprc;
1564 adapter->stats.saved_reset_vfgptc += adapter->stats.vfgptc -
1565 adapter->stats.base_vfgptc;
1566 adapter->stats.saved_reset_vfgorc += adapter->stats.vfgorc -
1567 adapter->stats.base_vfgorc;
1568 adapter->stats.saved_reset_vfgotc += adapter->stats.vfgotc -
1569 adapter->stats.base_vfgotc;
1570 adapter->stats.saved_reset_vfmprc += adapter->stats.vfmprc -
1571 adapter->stats.base_vfmprc;
1575 static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter *adapter)
1577 struct ixgbe_hw *hw = &adapter->hw;
1579 adapter->stats.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
1580 adapter->stats.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
1581 adapter->stats.last_vfgorc |=
1582 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
1583 adapter->stats.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
1584 adapter->stats.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
1585 adapter->stats.last_vfgotc |=
1586 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
1587 adapter->stats.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
1589 adapter->stats.base_vfgprc = adapter->stats.last_vfgprc;
1590 adapter->stats.base_vfgorc = adapter->stats.last_vfgorc;
1591 adapter->stats.base_vfgptc = adapter->stats.last_vfgptc;
1592 adapter->stats.base_vfgotc = adapter->stats.last_vfgotc;
1593 adapter->stats.base_vfmprc = adapter->stats.last_vfmprc;
1596 static int ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
1598 struct net_device *netdev = adapter->netdev;
1599 struct ixgbe_hw *hw = &adapter->hw;
1601 int num_rx_rings = adapter->num_rx_queues;
1604 for (i = 0; i < adapter->num_tx_queues; i++) {
1605 j = adapter->tx_ring[i].reg_idx;
1606 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
1607 /* enable WTHRESH=8 descriptors, to encourage burst writeback */
1608 txdctl |= (8 << 16);
1609 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
1612 for (i = 0; i < adapter->num_tx_queues; i++) {
1613 j = adapter->tx_ring[i].reg_idx;
1614 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
1615 txdctl |= IXGBE_TXDCTL_ENABLE;
1616 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
1619 for (i = 0; i < num_rx_rings; i++) {
1620 j = adapter->rx_ring[i].reg_idx;
1621 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j));
1622 rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
1623 if (hw->mac.type == ixgbe_mac_X540_vf) {
1624 rxdctl &= ~IXGBE_RXDCTL_RLPMLMASK;
1625 rxdctl |= ((netdev->mtu + ETH_HLEN + ETH_FCS_LEN) |
1626 IXGBE_RXDCTL_RLPML_EN);
1628 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl);
1629 ixgbevf_rx_desc_queue_enable(adapter, i);
1632 ixgbevf_configure_msix(adapter);
1634 if (hw->mac.ops.set_rar) {
1635 if (is_valid_ether_addr(hw->mac.addr))
1636 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
1638 hw->mac.ops.set_rar(hw, 0, hw->mac.perm_addr, 0);
1641 clear_bit(__IXGBEVF_DOWN, &adapter->state);
1642 ixgbevf_napi_enable_all(adapter);
1644 /* enable transmits */
1645 netif_tx_start_all_queues(netdev);
1647 ixgbevf_save_reset_stats(adapter);
1648 ixgbevf_init_last_counter_stats(adapter);
1650 /* bring the link up in the watchdog, this could race with our first
1651 * link up interrupt but shouldn't be a problem */
1652 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
1653 adapter->link_check_timeout = jiffies;
1654 mod_timer(&adapter->watchdog_timer, jiffies);
1658 int ixgbevf_up(struct ixgbevf_adapter *adapter)
1661 struct ixgbe_hw *hw = &adapter->hw;
1663 ixgbevf_configure(adapter);
1665 err = ixgbevf_up_complete(adapter);
1667 /* clear any pending interrupts, may auto mask */
1668 IXGBE_READ_REG(hw, IXGBE_VTEICR);
1670 ixgbevf_irq_enable(adapter, true, true);
1676 * ixgbevf_clean_rx_ring - Free Rx Buffers per Queue
1677 * @adapter: board private structure
1678 * @rx_ring: ring to free buffers from
1680 static void ixgbevf_clean_rx_ring(struct ixgbevf_adapter *adapter,
1681 struct ixgbevf_ring *rx_ring)
1683 struct pci_dev *pdev = adapter->pdev;
1687 if (!rx_ring->rx_buffer_info)
1690 /* Free all the Rx ring sk_buffs */
1691 for (i = 0; i < rx_ring->count; i++) {
1692 struct ixgbevf_rx_buffer *rx_buffer_info;
1694 rx_buffer_info = &rx_ring->rx_buffer_info[i];
1695 if (rx_buffer_info->dma) {
1696 dma_unmap_single(&pdev->dev, rx_buffer_info->dma,
1697 rx_ring->rx_buf_len,
1699 rx_buffer_info->dma = 0;
1701 if (rx_buffer_info->skb) {
1702 struct sk_buff *skb = rx_buffer_info->skb;
1703 rx_buffer_info->skb = NULL;
1705 struct sk_buff *this = skb;
1707 dev_kfree_skb(this);
1710 if (!rx_buffer_info->page)
1712 dma_unmap_page(&pdev->dev, rx_buffer_info->page_dma,
1713 PAGE_SIZE / 2, DMA_FROM_DEVICE);
1714 rx_buffer_info->page_dma = 0;
1715 put_page(rx_buffer_info->page);
1716 rx_buffer_info->page = NULL;
1717 rx_buffer_info->page_offset = 0;
1720 size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
1721 memset(rx_ring->rx_buffer_info, 0, size);
1723 /* Zero out the descriptor ring */
1724 memset(rx_ring->desc, 0, rx_ring->size);
1726 rx_ring->next_to_clean = 0;
1727 rx_ring->next_to_use = 0;
1730 writel(0, adapter->hw.hw_addr + rx_ring->head);
1732 writel(0, adapter->hw.hw_addr + rx_ring->tail);
1736 * ixgbevf_clean_tx_ring - Free Tx Buffers
1737 * @adapter: board private structure
1738 * @tx_ring: ring to be cleaned
1740 static void ixgbevf_clean_tx_ring(struct ixgbevf_adapter *adapter,
1741 struct ixgbevf_ring *tx_ring)
1743 struct ixgbevf_tx_buffer *tx_buffer_info;
1747 if (!tx_ring->tx_buffer_info)
1750 /* Free all the Tx ring sk_buffs */
1752 for (i = 0; i < tx_ring->count; i++) {
1753 tx_buffer_info = &tx_ring->tx_buffer_info[i];
1754 ixgbevf_unmap_and_free_tx_resource(adapter, tx_buffer_info);
1757 size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
1758 memset(tx_ring->tx_buffer_info, 0, size);
1760 memset(tx_ring->desc, 0, tx_ring->size);
1762 tx_ring->next_to_use = 0;
1763 tx_ring->next_to_clean = 0;
1766 writel(0, adapter->hw.hw_addr + tx_ring->head);
1768 writel(0, adapter->hw.hw_addr + tx_ring->tail);
1772 * ixgbevf_clean_all_rx_rings - Free Rx Buffers for all queues
1773 * @adapter: board private structure
1775 static void ixgbevf_clean_all_rx_rings(struct ixgbevf_adapter *adapter)
1779 for (i = 0; i < adapter->num_rx_queues; i++)
1780 ixgbevf_clean_rx_ring(adapter, &adapter->rx_ring[i]);
1784 * ixgbevf_clean_all_tx_rings - Free Tx Buffers for all queues
1785 * @adapter: board private structure
1787 static void ixgbevf_clean_all_tx_rings(struct ixgbevf_adapter *adapter)
1791 for (i = 0; i < adapter->num_tx_queues; i++)
1792 ixgbevf_clean_tx_ring(adapter, &adapter->tx_ring[i]);
1795 void ixgbevf_down(struct ixgbevf_adapter *adapter)
1797 struct net_device *netdev = adapter->netdev;
1798 struct ixgbe_hw *hw = &adapter->hw;
1802 /* signal that we are down to the interrupt handler */
1803 set_bit(__IXGBEVF_DOWN, &adapter->state);
1804 /* disable receives */
1806 netif_tx_disable(netdev);
1810 netif_tx_stop_all_queues(netdev);
1812 ixgbevf_irq_disable(adapter);
1814 ixgbevf_napi_disable_all(adapter);
1816 del_timer_sync(&adapter->watchdog_timer);
1817 /* can't call flush scheduled work here because it can deadlock
1818 * if linkwatch_event tries to acquire the rtnl_lock which we are
1820 while (adapter->flags & IXGBE_FLAG_IN_WATCHDOG_TASK)
1823 /* disable transmits in the hardware now that interrupts are off */
1824 for (i = 0; i < adapter->num_tx_queues; i++) {
1825 j = adapter->tx_ring[i].reg_idx;
1826 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
1827 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j),
1828 (txdctl & ~IXGBE_TXDCTL_ENABLE));
1831 netif_carrier_off(netdev);
1833 if (!pci_channel_offline(adapter->pdev))
1834 ixgbevf_reset(adapter);
1836 ixgbevf_clean_all_tx_rings(adapter);
1837 ixgbevf_clean_all_rx_rings(adapter);
1840 void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter)
1842 struct ixgbe_hw *hw = &adapter->hw;
1844 WARN_ON(in_interrupt());
1846 while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state))
1850 * Check if PF is up before re-init. If not then skip until
1851 * later when the PF is up and ready to service requests from
1852 * the VF via mailbox. If the VF is up and running then the
1853 * watchdog task will continue to schedule reset tasks until
1854 * the PF is up and running.
1856 if (!hw->mac.ops.reset_hw(hw)) {
1857 ixgbevf_down(adapter);
1858 ixgbevf_up(adapter);
1861 clear_bit(__IXGBEVF_RESETTING, &adapter->state);
1864 void ixgbevf_reset(struct ixgbevf_adapter *adapter)
1866 struct ixgbe_hw *hw = &adapter->hw;
1867 struct net_device *netdev = adapter->netdev;
1869 if (hw->mac.ops.reset_hw(hw))
1870 hw_dbg(hw, "PF still resetting\n");
1872 hw->mac.ops.init_hw(hw);
1874 if (is_valid_ether_addr(adapter->hw.mac.addr)) {
1875 memcpy(netdev->dev_addr, adapter->hw.mac.addr,
1877 memcpy(netdev->perm_addr, adapter->hw.mac.addr,
1882 static void ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
1885 int err, vector_threshold;
1887 /* We'll want at least 3 (vector_threshold):
1890 * 3) Other (Link Status Change, etc.)
1892 vector_threshold = MIN_MSIX_COUNT;
1894 /* The more we get, the more we will assign to Tx/Rx Cleanup
1895 * for the separate queues...where Rx Cleanup >= Tx Cleanup.
1896 * Right now, we simply care about how many we'll get; we'll
1897 * set them up later while requesting irq's.
1899 while (vectors >= vector_threshold) {
1900 err = pci_enable_msix(adapter->pdev, adapter->msix_entries,
1902 if (!err) /* Success in acquiring all requested vectors. */
1905 vectors = 0; /* Nasty failure, quit now */
1906 else /* err == number of vectors we should try again with */
1910 if (vectors < vector_threshold) {
1911 /* Can't allocate enough MSI-X interrupts? Oh well.
1912 * This just means we'll go with either a single MSI
1913 * vector or fall back to legacy interrupts.
1915 hw_dbg(&adapter->hw,
1916 "Unable to allocate MSI-X interrupts\n");
1917 kfree(adapter->msix_entries);
1918 adapter->msix_entries = NULL;
1921 * Adjust for only the vectors we'll use, which is minimum
1922 * of max_msix_q_vectors + NON_Q_VECTORS, or the number of
1923 * vectors we were allocated.
1925 adapter->num_msix_vectors = vectors;
1930 * ixgbevf_set_num_queues: Allocate queues for device, feature dependent
1931 * @adapter: board private structure to initialize
1933 * This is the top level queue allocation routine. The order here is very
1934 * important, starting with the "most" number of features turned on at once,
1935 * and ending with the smallest set of features. This way large combinations
1936 * can be allocated if they're turned on, and smaller combinations are the
1937 * fallthrough conditions.
1940 static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter)
1942 /* Start with base case */
1943 adapter->num_rx_queues = 1;
1944 adapter->num_tx_queues = 1;
1945 adapter->num_rx_pools = adapter->num_rx_queues;
1946 adapter->num_rx_queues_per_pool = 1;
1950 * ixgbevf_alloc_queues - Allocate memory for all rings
1951 * @adapter: board private structure to initialize
1953 * We allocate one ring per queue at run-time since we don't know the
1954 * number of queues at compile-time. The polling_netdev array is
1955 * intended for Multiqueue, but should work fine with a single queue.
1957 static int ixgbevf_alloc_queues(struct ixgbevf_adapter *adapter)
1961 adapter->tx_ring = kcalloc(adapter->num_tx_queues,
1962 sizeof(struct ixgbevf_ring), GFP_KERNEL);
1963 if (!adapter->tx_ring)
1964 goto err_tx_ring_allocation;
1966 adapter->rx_ring = kcalloc(adapter->num_rx_queues,
1967 sizeof(struct ixgbevf_ring), GFP_KERNEL);
1968 if (!adapter->rx_ring)
1969 goto err_rx_ring_allocation;
1971 for (i = 0; i < adapter->num_tx_queues; i++) {
1972 adapter->tx_ring[i].count = adapter->tx_ring_count;
1973 adapter->tx_ring[i].queue_index = i;
1974 adapter->tx_ring[i].reg_idx = i;
1977 for (i = 0; i < adapter->num_rx_queues; i++) {
1978 adapter->rx_ring[i].count = adapter->rx_ring_count;
1979 adapter->rx_ring[i].queue_index = i;
1980 adapter->rx_ring[i].reg_idx = i;
1985 err_rx_ring_allocation:
1986 kfree(adapter->tx_ring);
1987 err_tx_ring_allocation:
1992 * ixgbevf_set_interrupt_capability - set MSI-X or FAIL if not supported
1993 * @adapter: board private structure to initialize
1995 * Attempt to configure the interrupts using the best available
1996 * capabilities of the hardware and the kernel.
1998 static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter)
2001 int vector, v_budget;
2004 * It's easy to be greedy for MSI-X vectors, but it really
2005 * doesn't do us much good if we have a lot more vectors
2006 * than CPU's. So let's be conservative and only ask for
2007 * (roughly) twice the number of vectors as there are CPU's.
2009 v_budget = min(adapter->num_rx_queues + adapter->num_tx_queues,
2010 (int)(num_online_cpus() * 2)) + NON_Q_VECTORS;
2012 /* A failure in MSI-X entry allocation isn't fatal, but it does
2013 * mean we disable MSI-X capabilities of the adapter. */
2014 adapter->msix_entries = kcalloc(v_budget,
2015 sizeof(struct msix_entry), GFP_KERNEL);
2016 if (!adapter->msix_entries) {
2021 for (vector = 0; vector < v_budget; vector++)
2022 adapter->msix_entries[vector].entry = vector;
2024 ixgbevf_acquire_msix_vectors(adapter, v_budget);
2031 * ixgbevf_alloc_q_vectors - Allocate memory for interrupt vectors
2032 * @adapter: board private structure to initialize
2034 * We allocate one q_vector per queue interrupt. If allocation fails we
2037 static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter *adapter)
2039 int q_idx, num_q_vectors;
2040 struct ixgbevf_q_vector *q_vector;
2042 int (*poll)(struct napi_struct *, int);
2044 num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2045 napi_vectors = adapter->num_rx_queues;
2046 poll = &ixgbevf_clean_rxonly;
2048 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
2049 q_vector = kzalloc(sizeof(struct ixgbevf_q_vector), GFP_KERNEL);
2052 q_vector->adapter = adapter;
2053 q_vector->v_idx = q_idx;
2054 q_vector->eitr = adapter->eitr_param;
2055 if (q_idx < napi_vectors)
2056 netif_napi_add(adapter->netdev, &q_vector->napi,
2058 adapter->q_vector[q_idx] = q_vector;
2066 q_vector = adapter->q_vector[q_idx];
2067 netif_napi_del(&q_vector->napi);
2069 adapter->q_vector[q_idx] = NULL;
2075 * ixgbevf_free_q_vectors - Free memory allocated for interrupt vectors
2076 * @adapter: board private structure to initialize
2078 * This function frees the memory allocated to the q_vectors. In addition if
2079 * NAPI is enabled it will delete any references to the NAPI struct prior
2080 * to freeing the q_vector.
2082 static void ixgbevf_free_q_vectors(struct ixgbevf_adapter *adapter)
2084 int q_idx, num_q_vectors;
2087 num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2088 napi_vectors = adapter->num_rx_queues;
2090 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
2091 struct ixgbevf_q_vector *q_vector = adapter->q_vector[q_idx];
2093 adapter->q_vector[q_idx] = NULL;
2094 if (q_idx < napi_vectors)
2095 netif_napi_del(&q_vector->napi);
2101 * ixgbevf_reset_interrupt_capability - Reset MSIX setup
2102 * @adapter: board private structure
2105 static void ixgbevf_reset_interrupt_capability(struct ixgbevf_adapter *adapter)
2107 pci_disable_msix(adapter->pdev);
2108 kfree(adapter->msix_entries);
2109 adapter->msix_entries = NULL;
2113 * ixgbevf_init_interrupt_scheme - Determine if MSIX is supported and init
2114 * @adapter: board private structure to initialize
2117 static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter *adapter)
2121 /* Number of supported queues */
2122 ixgbevf_set_num_queues(adapter);
2124 err = ixgbevf_set_interrupt_capability(adapter);
2126 hw_dbg(&adapter->hw,
2127 "Unable to setup interrupt capabilities\n");
2128 goto err_set_interrupt;
2131 err = ixgbevf_alloc_q_vectors(adapter);
2133 hw_dbg(&adapter->hw, "Unable to allocate memory for queue "
2135 goto err_alloc_q_vectors;
2138 err = ixgbevf_alloc_queues(adapter);
2140 printk(KERN_ERR "Unable to allocate memory for queues\n");
2141 goto err_alloc_queues;
2144 hw_dbg(&adapter->hw, "Multiqueue %s: Rx Queue count = %u, "
2145 "Tx Queue count = %u\n",
2146 (adapter->num_rx_queues > 1) ? "Enabled" :
2147 "Disabled", adapter->num_rx_queues, adapter->num_tx_queues);
2149 set_bit(__IXGBEVF_DOWN, &adapter->state);
2153 ixgbevf_free_q_vectors(adapter);
2154 err_alloc_q_vectors:
2155 ixgbevf_reset_interrupt_capability(adapter);
2161 * ixgbevf_sw_init - Initialize general software structures
2162 * (struct ixgbevf_adapter)
2163 * @adapter: board private structure to initialize
2165 * ixgbevf_sw_init initializes the Adapter private data structure.
2166 * Fields are initialized based on PCI device information and
2167 * OS network device settings (MTU size).
2169 static int __devinit ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
2171 struct ixgbe_hw *hw = &adapter->hw;
2172 struct pci_dev *pdev = adapter->pdev;
2175 /* PCI config space info */
2177 hw->vendor_id = pdev->vendor;
2178 hw->device_id = pdev->device;
2179 hw->revision_id = pdev->revision;
2180 hw->subsystem_vendor_id = pdev->subsystem_vendor;
2181 hw->subsystem_device_id = pdev->subsystem_device;
2183 hw->mbx.ops.init_params(hw);
2184 hw->mac.max_tx_queues = MAX_TX_QUEUES;
2185 hw->mac.max_rx_queues = MAX_RX_QUEUES;
2186 err = hw->mac.ops.reset_hw(hw);
2188 dev_info(&pdev->dev,
2189 "PF still in reset state, assigning new address\n");
2190 dev_hw_addr_random(adapter->netdev, hw->mac.addr);
2192 err = hw->mac.ops.init_hw(hw);
2194 printk(KERN_ERR "init_shared_code failed: %d\n", err);
2199 /* Enable dynamic interrupt throttling rates */
2200 adapter->eitr_param = 20000;
2201 adapter->itr_setting = 1;
2203 /* set defaults for eitr in MegaBytes */
2204 adapter->eitr_low = 10;
2205 adapter->eitr_high = 20;
2207 /* set default ring sizes */
2208 adapter->tx_ring_count = IXGBEVF_DEFAULT_TXD;
2209 adapter->rx_ring_count = IXGBEVF_DEFAULT_RXD;
2211 /* enable rx csum by default */
2212 adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED;
2214 set_bit(__IXGBEVF_DOWN, &adapter->state);
2220 #define UPDATE_VF_COUNTER_32bit(reg, last_counter, counter) \
2222 u32 current_counter = IXGBE_READ_REG(hw, reg); \
2223 if (current_counter < last_counter) \
2224 counter += 0x100000000LL; \
2225 last_counter = current_counter; \
2226 counter &= 0xFFFFFFFF00000000LL; \
2227 counter |= current_counter; \
2230 #define UPDATE_VF_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
2232 u64 current_counter_lsb = IXGBE_READ_REG(hw, reg_lsb); \
2233 u64 current_counter_msb = IXGBE_READ_REG(hw, reg_msb); \
2234 u64 current_counter = (current_counter_msb << 32) | \
2235 current_counter_lsb; \
2236 if (current_counter < last_counter) \
2237 counter += 0x1000000000LL; \
2238 last_counter = current_counter; \
2239 counter &= 0xFFFFFFF000000000LL; \
2240 counter |= current_counter; \
2243 * ixgbevf_update_stats - Update the board statistics counters.
2244 * @adapter: board private structure
2246 void ixgbevf_update_stats(struct ixgbevf_adapter *adapter)
2248 struct ixgbe_hw *hw = &adapter->hw;
2250 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPRC, adapter->stats.last_vfgprc,
2251 adapter->stats.vfgprc);
2252 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPTC, adapter->stats.last_vfgptc,
2253 adapter->stats.vfgptc);
2254 UPDATE_VF_COUNTER_36bit(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
2255 adapter->stats.last_vfgorc,
2256 adapter->stats.vfgorc);
2257 UPDATE_VF_COUNTER_36bit(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
2258 adapter->stats.last_vfgotc,
2259 adapter->stats.vfgotc);
2260 UPDATE_VF_COUNTER_32bit(IXGBE_VFMPRC, adapter->stats.last_vfmprc,
2261 adapter->stats.vfmprc);
2265 * ixgbevf_watchdog - Timer Call-back
2266 * @data: pointer to adapter cast into an unsigned long
2268 static void ixgbevf_watchdog(unsigned long data)
2270 struct ixgbevf_adapter *adapter = (struct ixgbevf_adapter *)data;
2271 struct ixgbe_hw *hw = &adapter->hw;
2276 * Do the watchdog outside of interrupt context due to the lovely
2277 * delays that some of the newer hardware requires
2280 if (test_bit(__IXGBEVF_DOWN, &adapter->state))
2281 goto watchdog_short_circuit;
2283 /* get one bit for every active tx/rx interrupt vector */
2284 for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) {
2285 struct ixgbevf_q_vector *qv = adapter->q_vector[i];
2286 if (qv->rxr_count || qv->txr_count)
2290 IXGBE_WRITE_REG(hw, IXGBE_VTEICS, (u32)eics);
2292 watchdog_short_circuit:
2293 schedule_work(&adapter->watchdog_task);
2297 * ixgbevf_tx_timeout - Respond to a Tx Hang
2298 * @netdev: network interface device structure
2300 static void ixgbevf_tx_timeout(struct net_device *netdev)
2302 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2304 /* Do the reset outside of interrupt context */
2305 schedule_work(&adapter->reset_task);
2308 static void ixgbevf_reset_task(struct work_struct *work)
2310 struct ixgbevf_adapter *adapter;
2311 adapter = container_of(work, struct ixgbevf_adapter, reset_task);
2313 /* If we're already down or resetting, just bail */
2314 if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
2315 test_bit(__IXGBEVF_RESETTING, &adapter->state))
2318 adapter->tx_timeout_count++;
2320 ixgbevf_reinit_locked(adapter);
2324 * ixgbevf_watchdog_task - worker thread to bring link up
2325 * @work: pointer to work_struct containing our data
2327 static void ixgbevf_watchdog_task(struct work_struct *work)
2329 struct ixgbevf_adapter *adapter = container_of(work,
2330 struct ixgbevf_adapter,
2332 struct net_device *netdev = adapter->netdev;
2333 struct ixgbe_hw *hw = &adapter->hw;
2334 u32 link_speed = adapter->link_speed;
2335 bool link_up = adapter->link_up;
2337 adapter->flags |= IXGBE_FLAG_IN_WATCHDOG_TASK;
2340 * Always check the link on the watchdog because we have
2343 if (hw->mac.ops.check_link) {
2344 if ((hw->mac.ops.check_link(hw, &link_speed,
2345 &link_up, false)) != 0) {
2346 adapter->link_up = link_up;
2347 adapter->link_speed = link_speed;
2348 netif_carrier_off(netdev);
2349 netif_tx_stop_all_queues(netdev);
2350 schedule_work(&adapter->reset_task);
2354 /* always assume link is up, if no check link
2356 link_speed = IXGBE_LINK_SPEED_10GB_FULL;
2359 adapter->link_up = link_up;
2360 adapter->link_speed = link_speed;
2363 if (!netif_carrier_ok(netdev)) {
2364 hw_dbg(&adapter->hw, "NIC Link is Up, %u Gbps\n",
2365 (link_speed == IXGBE_LINK_SPEED_10GB_FULL) ?
2367 netif_carrier_on(netdev);
2368 netif_tx_wake_all_queues(netdev);
2371 adapter->link_up = false;
2372 adapter->link_speed = 0;
2373 if (netif_carrier_ok(netdev)) {
2374 hw_dbg(&adapter->hw, "NIC Link is Down\n");
2375 netif_carrier_off(netdev);
2376 netif_tx_stop_all_queues(netdev);
2380 ixgbevf_update_stats(adapter);
2383 /* Reset the timer */
2384 if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
2385 mod_timer(&adapter->watchdog_timer,
2386 round_jiffies(jiffies + (2 * HZ)));
2388 adapter->flags &= ~IXGBE_FLAG_IN_WATCHDOG_TASK;
2392 * ixgbevf_free_tx_resources - Free Tx Resources per Queue
2393 * @adapter: board private structure
2394 * @tx_ring: Tx descriptor ring for a specific queue
2396 * Free all transmit software resources
2398 void ixgbevf_free_tx_resources(struct ixgbevf_adapter *adapter,
2399 struct ixgbevf_ring *tx_ring)
2401 struct pci_dev *pdev = adapter->pdev;
2403 ixgbevf_clean_tx_ring(adapter, tx_ring);
2405 vfree(tx_ring->tx_buffer_info);
2406 tx_ring->tx_buffer_info = NULL;
2408 dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
2411 tx_ring->desc = NULL;
2415 * ixgbevf_free_all_tx_resources - Free Tx Resources for All Queues
2416 * @adapter: board private structure
2418 * Free all transmit software resources
2420 static void ixgbevf_free_all_tx_resources(struct ixgbevf_adapter *adapter)
2424 for (i = 0; i < adapter->num_tx_queues; i++)
2425 if (adapter->tx_ring[i].desc)
2426 ixgbevf_free_tx_resources(adapter,
2427 &adapter->tx_ring[i]);
2432 * ixgbevf_setup_tx_resources - allocate Tx resources (Descriptors)
2433 * @adapter: board private structure
2434 * @tx_ring: tx descriptor ring (for a specific queue) to setup
2436 * Return 0 on success, negative on failure
2438 int ixgbevf_setup_tx_resources(struct ixgbevf_adapter *adapter,
2439 struct ixgbevf_ring *tx_ring)
2441 struct pci_dev *pdev = adapter->pdev;
2444 size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
2445 tx_ring->tx_buffer_info = vzalloc(size);
2446 if (!tx_ring->tx_buffer_info)
2449 /* round up to nearest 4K */
2450 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
2451 tx_ring->size = ALIGN(tx_ring->size, 4096);
2453 tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
2454 &tx_ring->dma, GFP_KERNEL);
2458 tx_ring->next_to_use = 0;
2459 tx_ring->next_to_clean = 0;
2460 tx_ring->work_limit = tx_ring->count;
2464 vfree(tx_ring->tx_buffer_info);
2465 tx_ring->tx_buffer_info = NULL;
2466 hw_dbg(&adapter->hw, "Unable to allocate memory for the transmit "
2467 "descriptor ring\n");
2472 * ixgbevf_setup_all_tx_resources - allocate all queues Tx resources
2473 * @adapter: board private structure
2475 * If this function returns with an error, then it's possible one or
2476 * more of the rings is populated (while the rest are not). It is the
2477 * callers duty to clean those orphaned rings.
2479 * Return 0 on success, negative on failure
2481 static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter)
2485 for (i = 0; i < adapter->num_tx_queues; i++) {
2486 err = ixgbevf_setup_tx_resources(adapter, &adapter->tx_ring[i]);
2489 hw_dbg(&adapter->hw,
2490 "Allocation for Tx Queue %u failed\n", i);
2498 * ixgbevf_setup_rx_resources - allocate Rx resources (Descriptors)
2499 * @adapter: board private structure
2500 * @rx_ring: rx descriptor ring (for a specific queue) to setup
2502 * Returns 0 on success, negative on failure
2504 int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *adapter,
2505 struct ixgbevf_ring *rx_ring)
2507 struct pci_dev *pdev = adapter->pdev;
2510 size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
2511 rx_ring->rx_buffer_info = vzalloc(size);
2512 if (!rx_ring->rx_buffer_info) {
2513 hw_dbg(&adapter->hw,
2514 "Unable to vmalloc buffer memory for "
2515 "the receive descriptor ring\n");
2519 /* Round up to nearest 4K */
2520 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
2521 rx_ring->size = ALIGN(rx_ring->size, 4096);
2523 rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
2524 &rx_ring->dma, GFP_KERNEL);
2526 if (!rx_ring->desc) {
2527 hw_dbg(&adapter->hw,
2528 "Unable to allocate memory for "
2529 "the receive descriptor ring\n");
2530 vfree(rx_ring->rx_buffer_info);
2531 rx_ring->rx_buffer_info = NULL;
2535 rx_ring->next_to_clean = 0;
2536 rx_ring->next_to_use = 0;
2544 * ixgbevf_setup_all_rx_resources - allocate all queues Rx resources
2545 * @adapter: board private structure
2547 * If this function returns with an error, then it's possible one or
2548 * more of the rings is populated (while the rest are not). It is the
2549 * callers duty to clean those orphaned rings.
2551 * Return 0 on success, negative on failure
2553 static int ixgbevf_setup_all_rx_resources(struct ixgbevf_adapter *adapter)
2557 for (i = 0; i < adapter->num_rx_queues; i++) {
2558 err = ixgbevf_setup_rx_resources(adapter, &adapter->rx_ring[i]);
2561 hw_dbg(&adapter->hw,
2562 "Allocation for Rx Queue %u failed\n", i);
2569 * ixgbevf_free_rx_resources - Free Rx Resources
2570 * @adapter: board private structure
2571 * @rx_ring: ring to clean the resources from
2573 * Free all receive software resources
2575 void ixgbevf_free_rx_resources(struct ixgbevf_adapter *adapter,
2576 struct ixgbevf_ring *rx_ring)
2578 struct pci_dev *pdev = adapter->pdev;
2580 ixgbevf_clean_rx_ring(adapter, rx_ring);
2582 vfree(rx_ring->rx_buffer_info);
2583 rx_ring->rx_buffer_info = NULL;
2585 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
2588 rx_ring->desc = NULL;
2592 * ixgbevf_free_all_rx_resources - Free Rx Resources for All Queues
2593 * @adapter: board private structure
2595 * Free all receive software resources
2597 static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter)
2601 for (i = 0; i < adapter->num_rx_queues; i++)
2602 if (adapter->rx_ring[i].desc)
2603 ixgbevf_free_rx_resources(adapter,
2604 &adapter->rx_ring[i]);
2608 * ixgbevf_open - Called when a network interface is made active
2609 * @netdev: network interface device structure
2611 * Returns 0 on success, negative value on failure
2613 * The open entry point is called when a network interface is made
2614 * active by the system (IFF_UP). At this point all resources needed
2615 * for transmit and receive operations are allocated, the interrupt
2616 * handler is registered with the OS, the watchdog timer is started,
2617 * and the stack is notified that the interface is ready.
2619 static int ixgbevf_open(struct net_device *netdev)
2621 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2622 struct ixgbe_hw *hw = &adapter->hw;
2625 /* disallow open during test */
2626 if (test_bit(__IXGBEVF_TESTING, &adapter->state))
2629 if (hw->adapter_stopped) {
2630 ixgbevf_reset(adapter);
2631 /* if adapter is still stopped then PF isn't up and
2632 * the vf can't start. */
2633 if (hw->adapter_stopped) {
2634 err = IXGBE_ERR_MBX;
2635 printk(KERN_ERR "Unable to start - perhaps the PF"
2636 " Driver isn't up yet\n");
2637 goto err_setup_reset;
2641 /* allocate transmit descriptors */
2642 err = ixgbevf_setup_all_tx_resources(adapter);
2646 /* allocate receive descriptors */
2647 err = ixgbevf_setup_all_rx_resources(adapter);
2651 ixgbevf_configure(adapter);
2654 * Map the Tx/Rx rings to the vectors we were allotted.
2655 * if request_irq will be called in this function map_rings
2656 * must be called *before* up_complete
2658 ixgbevf_map_rings_to_vectors(adapter);
2660 err = ixgbevf_up_complete(adapter);
2664 /* clear any pending interrupts, may auto mask */
2665 IXGBE_READ_REG(hw, IXGBE_VTEICR);
2666 err = ixgbevf_request_irq(adapter);
2670 ixgbevf_irq_enable(adapter, true, true);
2675 ixgbevf_down(adapter);
2677 ixgbevf_free_irq(adapter);
2679 ixgbevf_free_all_rx_resources(adapter);
2681 ixgbevf_free_all_tx_resources(adapter);
2682 ixgbevf_reset(adapter);
2690 * ixgbevf_close - Disables a network interface
2691 * @netdev: network interface device structure
2693 * Returns 0, this is not allowed to fail
2695 * The close entry point is called when an interface is de-activated
2696 * by the OS. The hardware is still under the drivers control, but
2697 * needs to be disabled. A global MAC reset is issued to stop the
2698 * hardware, and all transmit and receive resources are freed.
2700 static int ixgbevf_close(struct net_device *netdev)
2702 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2704 ixgbevf_down(adapter);
2705 ixgbevf_free_irq(adapter);
2707 ixgbevf_free_all_tx_resources(adapter);
2708 ixgbevf_free_all_rx_resources(adapter);
2713 static int ixgbevf_tso(struct ixgbevf_adapter *adapter,
2714 struct ixgbevf_ring *tx_ring,
2715 struct sk_buff *skb, u32 tx_flags, u8 *hdr_len)
2717 struct ixgbe_adv_tx_context_desc *context_desc;
2720 struct ixgbevf_tx_buffer *tx_buffer_info;
2721 u32 vlan_macip_lens = 0, type_tucmd_mlhl;
2722 u32 mss_l4len_idx, l4len;
2724 if (skb_is_gso(skb)) {
2725 if (skb_header_cloned(skb)) {
2726 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2730 l4len = tcp_hdrlen(skb);
2733 if (skb->protocol == htons(ETH_P_IP)) {
2734 struct iphdr *iph = ip_hdr(skb);
2737 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2741 adapter->hw_tso_ctxt++;
2742 } else if (skb_is_gso_v6(skb)) {
2743 ipv6_hdr(skb)->payload_len = 0;
2744 tcp_hdr(skb)->check =
2745 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2746 &ipv6_hdr(skb)->daddr,
2748 adapter->hw_tso6_ctxt++;
2751 i = tx_ring->next_to_use;
2753 tx_buffer_info = &tx_ring->tx_buffer_info[i];
2754 context_desc = IXGBE_TX_CTXTDESC_ADV(*tx_ring, i);
2756 /* VLAN MACLEN IPLEN */
2757 if (tx_flags & IXGBE_TX_FLAGS_VLAN)
2759 (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK);
2760 vlan_macip_lens |= ((skb_network_offset(skb)) <<
2761 IXGBE_ADVTXD_MACLEN_SHIFT);
2762 *hdr_len += skb_network_offset(skb);
2764 (skb_transport_header(skb) - skb_network_header(skb));
2766 (skb_transport_header(skb) - skb_network_header(skb));
2767 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
2768 context_desc->seqnum_seed = 0;
2770 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
2771 type_tucmd_mlhl = (IXGBE_TXD_CMD_DEXT |
2772 IXGBE_ADVTXD_DTYP_CTXT);
2774 if (skb->protocol == htons(ETH_P_IP))
2775 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
2776 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
2777 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
2781 (skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT);
2782 mss_l4len_idx |= (l4len << IXGBE_ADVTXD_L4LEN_SHIFT);
2783 /* use index 1 for TSO */
2784 mss_l4len_idx |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
2785 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
2787 tx_buffer_info->time_stamp = jiffies;
2788 tx_buffer_info->next_to_watch = i;
2791 if (i == tx_ring->count)
2793 tx_ring->next_to_use = i;
2801 static bool ixgbevf_tx_csum(struct ixgbevf_adapter *adapter,
2802 struct ixgbevf_ring *tx_ring,
2803 struct sk_buff *skb, u32 tx_flags)
2805 struct ixgbe_adv_tx_context_desc *context_desc;
2807 struct ixgbevf_tx_buffer *tx_buffer_info;
2808 u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
2810 if (skb->ip_summed == CHECKSUM_PARTIAL ||
2811 (tx_flags & IXGBE_TX_FLAGS_VLAN)) {
2812 i = tx_ring->next_to_use;
2813 tx_buffer_info = &tx_ring->tx_buffer_info[i];
2814 context_desc = IXGBE_TX_CTXTDESC_ADV(*tx_ring, i);
2816 if (tx_flags & IXGBE_TX_FLAGS_VLAN)
2817 vlan_macip_lens |= (tx_flags &
2818 IXGBE_TX_FLAGS_VLAN_MASK);
2819 vlan_macip_lens |= (skb_network_offset(skb) <<
2820 IXGBE_ADVTXD_MACLEN_SHIFT);
2821 if (skb->ip_summed == CHECKSUM_PARTIAL)
2822 vlan_macip_lens |= (skb_transport_header(skb) -
2823 skb_network_header(skb));
2825 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
2826 context_desc->seqnum_seed = 0;
2828 type_tucmd_mlhl |= (IXGBE_TXD_CMD_DEXT |
2829 IXGBE_ADVTXD_DTYP_CTXT);
2831 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2832 switch (skb->protocol) {
2833 case __constant_htons(ETH_P_IP):
2834 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
2835 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2837 IXGBE_ADVTXD_TUCMD_L4T_TCP;
2839 case __constant_htons(ETH_P_IPV6):
2840 /* XXX what about other V6 headers?? */
2841 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
2843 IXGBE_ADVTXD_TUCMD_L4T_TCP;
2846 if (unlikely(net_ratelimit())) {
2848 "partial checksum but "
2856 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
2857 /* use index zero for tx checksum offload */
2858 context_desc->mss_l4len_idx = 0;
2860 tx_buffer_info->time_stamp = jiffies;
2861 tx_buffer_info->next_to_watch = i;
2863 adapter->hw_csum_tx_good++;
2865 if (i == tx_ring->count)
2867 tx_ring->next_to_use = i;
2875 static int ixgbevf_tx_map(struct ixgbevf_adapter *adapter,
2876 struct ixgbevf_ring *tx_ring,
2877 struct sk_buff *skb, u32 tx_flags,
2880 struct pci_dev *pdev = adapter->pdev;
2881 struct ixgbevf_tx_buffer *tx_buffer_info;
2883 unsigned int total = skb->len;
2884 unsigned int offset = 0, size;
2886 unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
2890 i = tx_ring->next_to_use;
2892 len = min(skb_headlen(skb), total);
2894 tx_buffer_info = &tx_ring->tx_buffer_info[i];
2895 size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD);
2897 tx_buffer_info->length = size;
2898 tx_buffer_info->mapped_as_page = false;
2899 tx_buffer_info->dma = dma_map_single(&adapter->pdev->dev,
2901 size, DMA_TO_DEVICE);
2902 if (dma_mapping_error(&pdev->dev, tx_buffer_info->dma))
2904 tx_buffer_info->time_stamp = jiffies;
2905 tx_buffer_info->next_to_watch = i;
2912 if (i == tx_ring->count)
2916 for (f = 0; f < nr_frags; f++) {
2917 struct skb_frag_struct *frag;
2919 frag = &skb_shinfo(skb)->frags[f];
2920 len = min((unsigned int)frag->size, total);
2921 offset = frag->page_offset;
2924 tx_buffer_info = &tx_ring->tx_buffer_info[i];
2925 size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD);
2927 tx_buffer_info->length = size;
2928 tx_buffer_info->dma = dma_map_page(&adapter->pdev->dev,
2933 tx_buffer_info->mapped_as_page = true;
2934 if (dma_mapping_error(&pdev->dev, tx_buffer_info->dma))
2936 tx_buffer_info->time_stamp = jiffies;
2937 tx_buffer_info->next_to_watch = i;
2944 if (i == tx_ring->count)
2952 i = tx_ring->count - 1;
2955 tx_ring->tx_buffer_info[i].skb = skb;
2956 tx_ring->tx_buffer_info[first].next_to_watch = i;
2961 dev_err(&pdev->dev, "TX DMA map failed\n");
2963 /* clear timestamp and dma mappings for failed tx_buffer_info map */
2964 tx_buffer_info->dma = 0;
2965 tx_buffer_info->time_stamp = 0;
2966 tx_buffer_info->next_to_watch = 0;
2969 /* clear timestamp and dma mappings for remaining portion of packet */
2970 while (count >= 0) {
2974 i += tx_ring->count;
2975 tx_buffer_info = &tx_ring->tx_buffer_info[i];
2976 ixgbevf_unmap_and_free_tx_resource(adapter, tx_buffer_info);
2982 static void ixgbevf_tx_queue(struct ixgbevf_adapter *adapter,
2983 struct ixgbevf_ring *tx_ring, int tx_flags,
2984 int count, u32 paylen, u8 hdr_len)
2986 union ixgbe_adv_tx_desc *tx_desc = NULL;
2987 struct ixgbevf_tx_buffer *tx_buffer_info;
2988 u32 olinfo_status = 0, cmd_type_len = 0;
2991 u32 txd_cmd = IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS | IXGBE_TXD_CMD_IFCS;
2993 cmd_type_len |= IXGBE_ADVTXD_DTYP_DATA;
2995 cmd_type_len |= IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT;
2997 if (tx_flags & IXGBE_TX_FLAGS_VLAN)
2998 cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
3000 if (tx_flags & IXGBE_TX_FLAGS_TSO) {
3001 cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
3003 olinfo_status |= IXGBE_TXD_POPTS_TXSM <<
3004 IXGBE_ADVTXD_POPTS_SHIFT;
3006 /* use index 1 context for tso */
3007 olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
3008 if (tx_flags & IXGBE_TX_FLAGS_IPV4)
3009 olinfo_status |= IXGBE_TXD_POPTS_IXSM <<
3010 IXGBE_ADVTXD_POPTS_SHIFT;
3012 } else if (tx_flags & IXGBE_TX_FLAGS_CSUM)
3013 olinfo_status |= IXGBE_TXD_POPTS_TXSM <<
3014 IXGBE_ADVTXD_POPTS_SHIFT;
3016 olinfo_status |= ((paylen - hdr_len) << IXGBE_ADVTXD_PAYLEN_SHIFT);
3018 i = tx_ring->next_to_use;
3020 tx_buffer_info = &tx_ring->tx_buffer_info[i];
3021 tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i);
3022 tx_desc->read.buffer_addr = cpu_to_le64(tx_buffer_info->dma);
3023 tx_desc->read.cmd_type_len =
3024 cpu_to_le32(cmd_type_len | tx_buffer_info->length);
3025 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
3027 if (i == tx_ring->count)
3031 tx_desc->read.cmd_type_len |= cpu_to_le32(txd_cmd);
3034 * Force memory writes to complete before letting h/w
3035 * know there are new descriptors to fetch. (Only
3036 * applicable for weak-ordered memory model archs,
3041 tx_ring->next_to_use = i;
3042 writel(i, adapter->hw.hw_addr + tx_ring->tail);
3045 static int __ixgbevf_maybe_stop_tx(struct net_device *netdev,
3046 struct ixgbevf_ring *tx_ring, int size)
3048 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3050 netif_stop_subqueue(netdev, tx_ring->queue_index);
3051 /* Herbert's original patch had:
3052 * smp_mb__after_netif_stop_queue();
3053 * but since that doesn't exist yet, just open code it. */
3056 /* We need to check again in a case another CPU has just
3057 * made room available. */
3058 if (likely(IXGBE_DESC_UNUSED(tx_ring) < size))
3061 /* A reprieve! - use start_queue because it doesn't call schedule */
3062 netif_start_subqueue(netdev, tx_ring->queue_index);
3063 ++adapter->restart_queue;
3067 static int ixgbevf_maybe_stop_tx(struct net_device *netdev,
3068 struct ixgbevf_ring *tx_ring, int size)
3070 if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size))
3072 return __ixgbevf_maybe_stop_tx(netdev, tx_ring, size);
3075 static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
3077 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3078 struct ixgbevf_ring *tx_ring;
3080 unsigned int tx_flags = 0;
3087 tx_ring = &adapter->tx_ring[r_idx];
3089 if (vlan_tx_tag_present(skb)) {
3090 tx_flags |= vlan_tx_tag_get(skb);
3091 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
3092 tx_flags |= IXGBE_TX_FLAGS_VLAN;
3095 /* four things can cause us to need a context descriptor */
3096 if (skb_is_gso(skb) ||
3097 (skb->ip_summed == CHECKSUM_PARTIAL) ||
3098 (tx_flags & IXGBE_TX_FLAGS_VLAN))
3101 count += TXD_USE_COUNT(skb_headlen(skb));
3102 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
3103 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
3105 if (ixgbevf_maybe_stop_tx(netdev, tx_ring, count)) {
3107 return NETDEV_TX_BUSY;
3110 first = tx_ring->next_to_use;
3112 if (skb->protocol == htons(ETH_P_IP))
3113 tx_flags |= IXGBE_TX_FLAGS_IPV4;
3114 tso = ixgbevf_tso(adapter, tx_ring, skb, tx_flags, &hdr_len);
3116 dev_kfree_skb_any(skb);
3117 return NETDEV_TX_OK;
3121 tx_flags |= IXGBE_TX_FLAGS_TSO;
3122 else if (ixgbevf_tx_csum(adapter, tx_ring, skb, tx_flags) &&
3123 (skb->ip_summed == CHECKSUM_PARTIAL))
3124 tx_flags |= IXGBE_TX_FLAGS_CSUM;
3126 ixgbevf_tx_queue(adapter, tx_ring, tx_flags,
3127 ixgbevf_tx_map(adapter, tx_ring, skb, tx_flags, first),
3130 ixgbevf_maybe_stop_tx(netdev, tx_ring, DESC_NEEDED);
3132 return NETDEV_TX_OK;
3136 * ixgbevf_set_mac - Change the Ethernet Address of the NIC
3137 * @netdev: network interface device structure
3138 * @p: pointer to an address structure
3140 * Returns 0 on success, negative on failure
3142 static int ixgbevf_set_mac(struct net_device *netdev, void *p)
3144 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3145 struct ixgbe_hw *hw = &adapter->hw;
3146 struct sockaddr *addr = p;
3148 if (!is_valid_ether_addr(addr->sa_data))
3149 return -EADDRNOTAVAIL;
3151 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
3152 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
3154 if (hw->mac.ops.set_rar)
3155 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
3161 * ixgbevf_change_mtu - Change the Maximum Transfer Unit
3162 * @netdev: network interface device structure
3163 * @new_mtu: new value for maximum frame size
3165 * Returns 0 on success, negative on failure
3167 static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
3169 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3170 struct ixgbe_hw *hw = &adapter->hw;
3171 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
3172 int max_possible_frame = MAXIMUM_ETHERNET_VLAN_SIZE;
3175 if (adapter->hw.mac.type == ixgbe_mac_X540_vf)
3176 max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE;
3178 /* MTU < 68 is an error and causes problems on some kernels */
3179 if ((new_mtu < 68) || (max_frame > max_possible_frame))
3182 hw_dbg(&adapter->hw, "changing MTU from %d to %d\n",
3183 netdev->mtu, new_mtu);
3184 /* must set new MTU before calling down or up */
3185 netdev->mtu = new_mtu;
3187 msg[0] = IXGBE_VF_SET_LPE;
3189 hw->mbx.ops.write_posted(hw, msg, 2);
3191 if (netif_running(netdev))
3192 ixgbevf_reinit_locked(adapter);
3197 static void ixgbevf_shutdown(struct pci_dev *pdev)
3199 struct net_device *netdev = pci_get_drvdata(pdev);
3200 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3202 netif_device_detach(netdev);
3204 if (netif_running(netdev)) {
3205 ixgbevf_down(adapter);
3206 ixgbevf_free_irq(adapter);
3207 ixgbevf_free_all_tx_resources(adapter);
3208 ixgbevf_free_all_rx_resources(adapter);
3212 pci_save_state(pdev);
3215 pci_disable_device(pdev);
3218 static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev,
3219 struct rtnl_link_stats64 *stats)
3221 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3224 const struct ixgbevf_ring *ring;
3227 ixgbevf_update_stats(adapter);
3229 stats->multicast = adapter->stats.vfmprc - adapter->stats.base_vfmprc;
3231 for (i = 0; i < adapter->num_rx_queues; i++) {
3232 ring = &adapter->rx_ring[i];
3234 start = u64_stats_fetch_begin_bh(&ring->syncp);
3235 bytes = ring->total_bytes;
3236 packets = ring->total_packets;
3237 } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
3238 stats->rx_bytes += bytes;
3239 stats->rx_packets += packets;
3242 for (i = 0; i < adapter->num_tx_queues; i++) {
3243 ring = &adapter->tx_ring[i];
3245 start = u64_stats_fetch_begin_bh(&ring->syncp);
3246 bytes = ring->total_bytes;
3247 packets = ring->total_packets;
3248 } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
3249 stats->tx_bytes += bytes;
3250 stats->tx_packets += packets;
3256 static const struct net_device_ops ixgbe_netdev_ops = {
3257 .ndo_open = ixgbevf_open,
3258 .ndo_stop = ixgbevf_close,
3259 .ndo_start_xmit = ixgbevf_xmit_frame,
3260 .ndo_set_rx_mode = ixgbevf_set_rx_mode,
3261 .ndo_get_stats64 = ixgbevf_get_stats,
3262 .ndo_validate_addr = eth_validate_addr,
3263 .ndo_set_mac_address = ixgbevf_set_mac,
3264 .ndo_change_mtu = ixgbevf_change_mtu,
3265 .ndo_tx_timeout = ixgbevf_tx_timeout,
3266 .ndo_vlan_rx_add_vid = ixgbevf_vlan_rx_add_vid,
3267 .ndo_vlan_rx_kill_vid = ixgbevf_vlan_rx_kill_vid,
3270 static void ixgbevf_assign_netdev_ops(struct net_device *dev)
3272 dev->netdev_ops = &ixgbe_netdev_ops;
3273 ixgbevf_set_ethtool_ops(dev);
3274 dev->watchdog_timeo = 5 * HZ;
3278 * ixgbevf_probe - Device Initialization Routine
3279 * @pdev: PCI device information struct
3280 * @ent: entry in ixgbevf_pci_tbl
3282 * Returns 0 on success, negative on failure
3284 * ixgbevf_probe initializes an adapter identified by a pci_dev structure.
3285 * The OS initialization, configuring of the adapter private structure,
3286 * and a hardware reset occur.
3288 static int __devinit ixgbevf_probe(struct pci_dev *pdev,
3289 const struct pci_device_id *ent)
3291 struct net_device *netdev;
3292 struct ixgbevf_adapter *adapter = NULL;
3293 struct ixgbe_hw *hw = NULL;
3294 const struct ixgbevf_info *ii = ixgbevf_info_tbl[ent->driver_data];
3295 static int cards_found;
3296 int err, pci_using_dac;
3298 err = pci_enable_device(pdev);
3302 if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
3303 !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
3306 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
3308 err = dma_set_coherent_mask(&pdev->dev,
3311 dev_err(&pdev->dev, "No usable DMA "
3312 "configuration, aborting\n");
3319 err = pci_request_regions(pdev, ixgbevf_driver_name);
3321 dev_err(&pdev->dev, "pci_request_regions failed 0x%x\n", err);
3325 pci_set_master(pdev);
3328 netdev = alloc_etherdev_mq(sizeof(struct ixgbevf_adapter),
3331 netdev = alloc_etherdev(sizeof(struct ixgbevf_adapter));
3335 goto err_alloc_etherdev;
3338 SET_NETDEV_DEV(netdev, &pdev->dev);
3340 pci_set_drvdata(pdev, netdev);
3341 adapter = netdev_priv(netdev);
3343 adapter->netdev = netdev;
3344 adapter->pdev = pdev;
3347 adapter->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
3350 * call save state here in standalone driver because it relies on
3351 * adapter struct to exist, and needs to call netdev_priv
3353 pci_save_state(pdev);
3355 hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
3356 pci_resource_len(pdev, 0));
3362 ixgbevf_assign_netdev_ops(netdev);
3364 adapter->bd_number = cards_found;
3367 memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
3368 hw->mac.type = ii->mac;
3370 memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops,
3371 sizeof(struct ixgbe_mbx_operations));
3373 adapter->flags &= ~IXGBE_FLAG_RX_PS_CAPABLE;
3374 adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED;
3375 adapter->flags |= IXGBE_FLAG_RX_1BUF_CAPABLE;
3377 /* setup the private structure */
3378 err = ixgbevf_sw_init(adapter);
3380 netdev->features = NETIF_F_SG |
3382 NETIF_F_HW_VLAN_TX |
3383 NETIF_F_HW_VLAN_RX |
3384 NETIF_F_HW_VLAN_FILTER;
3386 netdev->features |= NETIF_F_IPV6_CSUM;
3387 netdev->features |= NETIF_F_TSO;
3388 netdev->features |= NETIF_F_TSO6;
3389 netdev->features |= NETIF_F_GRO;
3390 netdev->vlan_features |= NETIF_F_TSO;
3391 netdev->vlan_features |= NETIF_F_TSO6;
3392 netdev->vlan_features |= NETIF_F_IP_CSUM;
3393 netdev->vlan_features |= NETIF_F_IPV6_CSUM;
3394 netdev->vlan_features |= NETIF_F_SG;
3397 netdev->features |= NETIF_F_HIGHDMA;
3399 netdev->priv_flags |= IFF_UNICAST_FLT;
3401 /* The HW MAC address was set and/or determined in sw_init */
3402 memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len);
3403 memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len);
3405 if (!is_valid_ether_addr(netdev->dev_addr)) {
3406 printk(KERN_ERR "invalid MAC address\n");
3411 init_timer(&adapter->watchdog_timer);
3412 adapter->watchdog_timer.function = ixgbevf_watchdog;
3413 adapter->watchdog_timer.data = (unsigned long)adapter;
3415 INIT_WORK(&adapter->reset_task, ixgbevf_reset_task);
3416 INIT_WORK(&adapter->watchdog_task, ixgbevf_watchdog_task);
3418 err = ixgbevf_init_interrupt_scheme(adapter);
3422 /* pick up the PCI bus settings for reporting later */
3423 if (hw->mac.ops.get_bus_info)
3424 hw->mac.ops.get_bus_info(hw);
3426 strcpy(netdev->name, "eth%d");
3428 err = register_netdev(netdev);
3432 adapter->netdev_registered = true;
3434 netif_carrier_off(netdev);
3436 ixgbevf_init_last_counter_stats(adapter);
3438 /* print the MAC address */
3439 hw_dbg(hw, "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x\n",
3440 netdev->dev_addr[0],
3441 netdev->dev_addr[1],
3442 netdev->dev_addr[2],
3443 netdev->dev_addr[3],
3444 netdev->dev_addr[4],
3445 netdev->dev_addr[5]);
3447 hw_dbg(hw, "MAC: %d\n", hw->mac.type);
3449 hw_dbg(hw, "LRO is disabled\n");
3451 hw_dbg(hw, "Intel(R) 82599 Virtual Function\n");
3457 ixgbevf_reset_interrupt_capability(adapter);
3458 iounmap(hw->hw_addr);
3460 free_netdev(netdev);
3462 pci_release_regions(pdev);
3465 pci_disable_device(pdev);
3470 * ixgbevf_remove - Device Removal Routine
3471 * @pdev: PCI device information struct
3473 * ixgbevf_remove is called by the PCI subsystem to alert the driver
3474 * that it should release a PCI device. The could be caused by a
3475 * Hot-Plug event, or because the driver is going to be removed from
3478 static void __devexit ixgbevf_remove(struct pci_dev *pdev)
3480 struct net_device *netdev = pci_get_drvdata(pdev);
3481 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3483 set_bit(__IXGBEVF_DOWN, &adapter->state);
3485 del_timer_sync(&adapter->watchdog_timer);
3487 cancel_work_sync(&adapter->reset_task);
3488 cancel_work_sync(&adapter->watchdog_task);
3490 if (adapter->netdev_registered) {
3491 unregister_netdev(netdev);
3492 adapter->netdev_registered = false;
3495 ixgbevf_reset_interrupt_capability(adapter);
3497 iounmap(adapter->hw.hw_addr);
3498 pci_release_regions(pdev);
3500 hw_dbg(&adapter->hw, "Remove complete\n");
3502 kfree(adapter->tx_ring);
3503 kfree(adapter->rx_ring);
3505 free_netdev(netdev);
3507 pci_disable_device(pdev);
3510 static struct pci_driver ixgbevf_driver = {
3511 .name = ixgbevf_driver_name,
3512 .id_table = ixgbevf_pci_tbl,
3513 .probe = ixgbevf_probe,
3514 .remove = __devexit_p(ixgbevf_remove),
3515 .shutdown = ixgbevf_shutdown,
3519 * ixgbevf_init_module - Driver Registration Routine
3521 * ixgbevf_init_module is the first routine called when the driver is
3522 * loaded. All it does is register with the PCI subsystem.
3524 static int __init ixgbevf_init_module(void)
3527 printk(KERN_INFO "ixgbevf: %s - version %s\n", ixgbevf_driver_string,
3528 ixgbevf_driver_version);
3530 printk(KERN_INFO "%s\n", ixgbevf_copyright);
3532 ret = pci_register_driver(&ixgbevf_driver);
3536 module_init(ixgbevf_init_module);
3539 * ixgbevf_exit_module - Driver Exit Cleanup Routine
3541 * ixgbevf_exit_module is called just before the driver is removed
3544 static void __exit ixgbevf_exit_module(void)
3546 pci_unregister_driver(&ixgbevf_driver);
3551 * ixgbevf_get_hw_dev_name - return device name string
3552 * used by hardware layer to print debugging information
3554 char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw)
3556 struct ixgbevf_adapter *adapter = hw->back;
3557 return adapter->netdev->name;
3561 module_exit(ixgbevf_exit_module);
3563 /* ixgbevf_main.c */