1 /*******************************************************************************
3 Intel 82599 Virtual Function driver
4 Copyright(c) 1999 - 2009 Intel Corporation.
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 *******************************************************************************/
29 /******************************************************************************
30 Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code
31 ******************************************************************************/
32 #include <linux/types.h>
33 #include <linux/module.h>
34 #include <linux/pci.h>
35 #include <linux/netdevice.h>
36 #include <linux/vmalloc.h>
37 #include <linux/string.h>
40 #include <linux/tcp.h>
41 #include <linux/ipv6.h>
42 #include <net/checksum.h>
43 #include <net/ip6_checksum.h>
44 #include <linux/ethtool.h>
45 #include <linux/if_vlan.h>
49 char ixgbevf_driver_name[] = "ixgbevf";
50 static const char ixgbevf_driver_string[] =
51 "Intel(R) 82599 Virtual Function";
53 #define DRV_VERSION "1.0.0-k0"
54 const char ixgbevf_driver_version[] = DRV_VERSION;
55 static char ixgbevf_copyright[] = "Copyright (c) 2009 Intel Corporation.";
57 static const struct ixgbevf_info *ixgbevf_info_tbl[] = {
58 [board_82599_vf] = &ixgbevf_vf_info,
61 /* ixgbevf_pci_tbl - PCI Device ID Table
63 * Wildcard entries (PCI_ANY_ID) should come last
64 * Last entry must be all 0s
66 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
67 * Class, Class Mask, private data (not used) }
69 static struct pci_device_id ixgbevf_pci_tbl[] = {
70 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF),
73 /* required last entry */
76 MODULE_DEVICE_TABLE(pci, ixgbevf_pci_tbl);
78 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
79 MODULE_DESCRIPTION("Intel(R) 82599 Virtual Function Driver");
80 MODULE_LICENSE("GPL");
81 MODULE_VERSION(DRV_VERSION);
83 #define DEFAULT_DEBUG_LEVEL_SHIFT 3
86 static void ixgbevf_set_itr_msix(struct ixgbevf_q_vector *q_vector);
87 static void ixgbevf_write_eitr(struct ixgbevf_adapter *adapter, int v_idx,
90 static inline void ixgbevf_release_rx_desc(struct ixgbe_hw *hw,
91 struct ixgbevf_ring *rx_ring,
95 * Force memory writes to complete before letting h/w
96 * know there are new descriptors to fetch. (Only
97 * applicable for weak-ordered memory model archs,
101 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rx_ring->reg_idx), val);
105 * ixgbe_set_ivar - set the IVAR registers, mapping interrupt causes to vectors
106 * @adapter: pointer to adapter struct
107 * @direction: 0 for Rx, 1 for Tx, -1 for other causes
108 * @queue: queue to map the corresponding interrupt to
109 * @msix_vector: the vector to map to the corresponding queue
112 static void ixgbevf_set_ivar(struct ixgbevf_adapter *adapter, s8 direction,
113 u8 queue, u8 msix_vector)
116 struct ixgbe_hw *hw = &adapter->hw;
117 if (direction == -1) {
119 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
120 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
123 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
125 /* tx or rx causes */
126 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
127 index = ((16 * (queue & 1)) + (8 * direction));
128 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(queue >> 1));
129 ivar &= ~(0xFF << index);
130 ivar |= (msix_vector << index);
131 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(queue >> 1), ivar);
135 static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_adapter *adapter,
136 struct ixgbevf_tx_buffer
139 if (tx_buffer_info->dma) {
140 if (tx_buffer_info->mapped_as_page)
141 pci_unmap_page(adapter->pdev,
143 tx_buffer_info->length,
146 pci_unmap_single(adapter->pdev,
148 tx_buffer_info->length,
150 tx_buffer_info->dma = 0;
152 if (tx_buffer_info->skb) {
153 dev_kfree_skb_any(tx_buffer_info->skb);
154 tx_buffer_info->skb = NULL;
156 tx_buffer_info->time_stamp = 0;
157 /* tx_buffer_info must be completely set up in the transmit path */
160 static inline bool ixgbevf_check_tx_hang(struct ixgbevf_adapter *adapter,
161 struct ixgbevf_ring *tx_ring,
164 struct ixgbe_hw *hw = &adapter->hw;
167 /* Detect a transmit hang in hardware, this serializes the
168 * check with the clearing of time_stamp and movement of eop */
169 head = readl(hw->hw_addr + tx_ring->head);
170 tail = readl(hw->hw_addr + tx_ring->tail);
171 adapter->detect_tx_hung = false;
172 if ((head != tail) &&
173 tx_ring->tx_buffer_info[eop].time_stamp &&
174 time_after(jiffies, tx_ring->tx_buffer_info[eop].time_stamp + HZ)) {
175 /* detected Tx unit hang */
176 union ixgbe_adv_tx_desc *tx_desc;
177 tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
178 printk(KERN_ERR "Detected Tx Unit Hang\n"
180 " TDH, TDT <%x>, <%x>\n"
181 " next_to_use <%x>\n"
182 " next_to_clean <%x>\n"
183 "tx_buffer_info[next_to_clean]\n"
184 " time_stamp <%lx>\n"
186 tx_ring->queue_index,
188 tx_ring->next_to_use, eop,
189 tx_ring->tx_buffer_info[eop].time_stamp, jiffies);
196 #define IXGBE_MAX_TXD_PWR 14
197 #define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR)
199 /* Tx Descriptors needed, worst case */
200 #define TXD_USE_COUNT(S) (((S) >> IXGBE_MAX_TXD_PWR) + \
201 (((S) & (IXGBE_MAX_DATA_PER_TXD - 1)) ? 1 : 0))
203 #define DESC_NEEDED (TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD) /* skb->data */ + \
204 MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1) /* for context */
206 #define DESC_NEEDED TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD)
209 static void ixgbevf_tx_timeout(struct net_device *netdev);
212 * ixgbevf_clean_tx_irq - Reclaim resources after transmit completes
213 * @adapter: board private structure
214 * @tx_ring: tx ring to clean
216 static bool ixgbevf_clean_tx_irq(struct ixgbevf_adapter *adapter,
217 struct ixgbevf_ring *tx_ring)
219 struct net_device *netdev = adapter->netdev;
220 struct ixgbe_hw *hw = &adapter->hw;
221 union ixgbe_adv_tx_desc *tx_desc, *eop_desc;
222 struct ixgbevf_tx_buffer *tx_buffer_info;
223 unsigned int i, eop, count = 0;
224 unsigned int total_bytes = 0, total_packets = 0;
226 i = tx_ring->next_to_clean;
227 eop = tx_ring->tx_buffer_info[i].next_to_watch;
228 eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
230 while ((eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) &&
231 (count < tx_ring->work_limit)) {
232 bool cleaned = false;
233 for ( ; !cleaned; count++) {
235 tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i);
236 tx_buffer_info = &tx_ring->tx_buffer_info[i];
237 cleaned = (i == eop);
238 skb = tx_buffer_info->skb;
240 if (cleaned && skb) {
241 unsigned int segs, bytecount;
243 /* gso_segs is currently only valid for tcp */
244 segs = skb_shinfo(skb)->gso_segs ?: 1;
245 /* multiply data chunks by size of headers */
246 bytecount = ((segs - 1) * skb_headlen(skb)) +
248 total_packets += segs;
249 total_bytes += bytecount;
252 ixgbevf_unmap_and_free_tx_resource(adapter,
255 tx_desc->wb.status = 0;
258 if (i == tx_ring->count)
262 eop = tx_ring->tx_buffer_info[i].next_to_watch;
263 eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
266 tx_ring->next_to_clean = i;
268 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
269 if (unlikely(count && netif_carrier_ok(netdev) &&
270 (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
271 /* Make sure that anybody stopping the queue after this
272 * sees the new next_to_clean.
276 if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) &&
277 !test_bit(__IXGBEVF_DOWN, &adapter->state)) {
278 netif_wake_subqueue(netdev, tx_ring->queue_index);
279 ++adapter->restart_queue;
282 if (netif_queue_stopped(netdev) &&
283 !test_bit(__IXGBEVF_DOWN, &adapter->state)) {
284 netif_wake_queue(netdev);
285 ++adapter->restart_queue;
290 if (adapter->detect_tx_hung) {
291 if (ixgbevf_check_tx_hang(adapter, tx_ring, i)) {
292 /* schedule immediate reset if we believe we hung */
294 "tx hang %d detected, resetting adapter\n",
295 adapter->tx_timeout_count + 1);
296 ixgbevf_tx_timeout(adapter->netdev);
300 /* re-arm the interrupt */
301 if ((count >= tx_ring->work_limit) &&
302 (!test_bit(__IXGBEVF_DOWN, &adapter->state))) {
303 IXGBE_WRITE_REG(hw, IXGBE_VTEICS, tx_ring->v_idx);
306 tx_ring->total_bytes += total_bytes;
307 tx_ring->total_packets += total_packets;
309 adapter->net_stats.tx_bytes += total_bytes;
310 adapter->net_stats.tx_packets += total_packets;
312 return (count < tx_ring->work_limit);
316 * ixgbevf_receive_skb - Send a completed packet up the stack
317 * @q_vector: structure containing interrupt and ring information
318 * @skb: packet to send up
319 * @status: hardware indication of status of receive
320 * @rx_ring: rx descriptor ring (for a specific queue) to setup
321 * @rx_desc: rx descriptor
323 static void ixgbevf_receive_skb(struct ixgbevf_q_vector *q_vector,
324 struct sk_buff *skb, u8 status,
325 struct ixgbevf_ring *ring,
326 union ixgbe_adv_rx_desc *rx_desc)
328 struct ixgbevf_adapter *adapter = q_vector->adapter;
329 bool is_vlan = (status & IXGBE_RXD_STAT_VP);
330 u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan);
333 if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) {
334 if (adapter->vlgrp && is_vlan)
335 vlan_gro_receive(&q_vector->napi,
339 napi_gro_receive(&q_vector->napi, skb);
341 if (adapter->vlgrp && is_vlan)
342 ret = vlan_hwaccel_rx(skb, adapter->vlgrp, tag);
349 * ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum
350 * @adapter: address of board private structure
351 * @status_err: hardware indication of status of receive
352 * @skb: skb currently being received and modified
354 static inline void ixgbevf_rx_checksum(struct ixgbevf_adapter *adapter,
355 u32 status_err, struct sk_buff *skb)
357 skb->ip_summed = CHECKSUM_NONE;
359 /* Rx csum disabled */
360 if (!(adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED))
363 /* if IP and error */
364 if ((status_err & IXGBE_RXD_STAT_IPCS) &&
365 (status_err & IXGBE_RXDADV_ERR_IPE)) {
366 adapter->hw_csum_rx_error++;
370 if (!(status_err & IXGBE_RXD_STAT_L4CS))
373 if (status_err & IXGBE_RXDADV_ERR_TCPE) {
374 adapter->hw_csum_rx_error++;
378 /* It must be a TCP or UDP packet with a valid checksum */
379 skb->ip_summed = CHECKSUM_UNNECESSARY;
380 adapter->hw_csum_rx_good++;
384 * ixgbevf_alloc_rx_buffers - Replace used receive buffers; packet split
385 * @adapter: address of board private structure
387 static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter,
388 struct ixgbevf_ring *rx_ring,
391 struct pci_dev *pdev = adapter->pdev;
392 union ixgbe_adv_rx_desc *rx_desc;
393 struct ixgbevf_rx_buffer *bi;
396 unsigned int bufsz = rx_ring->rx_buf_len + NET_IP_ALIGN;
398 i = rx_ring->next_to_use;
399 bi = &rx_ring->rx_buffer_info[i];
401 while (cleaned_count--) {
402 rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i);
405 (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED)) {
407 bi->page = netdev_alloc_page(adapter->netdev);
409 adapter->alloc_rx_page_failed++;
414 /* use a half page if we're re-using */
415 bi->page_offset ^= (PAGE_SIZE / 2);
418 bi->page_dma = pci_map_page(pdev, bi->page,
426 skb = netdev_alloc_skb(adapter->netdev,
430 adapter->alloc_rx_buff_failed++;
435 * Make buffer alignment 2 beyond a 16 byte boundary
436 * this will result in a 16 byte aligned IP header after
437 * the 14 byte MAC header is removed
439 skb_reserve(skb, NET_IP_ALIGN);
444 bi->dma = pci_map_single(pdev, skb->data,
448 /* Refresh the desc even if buffer_addrs didn't change because
449 * each write-back erases this info. */
450 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
451 rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
452 rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
454 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
458 if (i == rx_ring->count)
460 bi = &rx_ring->rx_buffer_info[i];
464 if (rx_ring->next_to_use != i) {
465 rx_ring->next_to_use = i;
467 i = (rx_ring->count - 1);
469 ixgbevf_release_rx_desc(&adapter->hw, rx_ring, i);
473 static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter *adapter,
477 struct ixgbe_hw *hw = &adapter->hw;
479 mask = (qmask & 0xFFFFFFFF);
480 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
483 static inline u16 ixgbevf_get_hdr_info(union ixgbe_adv_rx_desc *rx_desc)
485 return rx_desc->wb.lower.lo_dword.hs_rss.hdr_info;
488 static inline u16 ixgbevf_get_pkt_info(union ixgbe_adv_rx_desc *rx_desc)
490 return rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
493 static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
494 struct ixgbevf_ring *rx_ring,
495 int *work_done, int work_to_do)
497 struct ixgbevf_adapter *adapter = q_vector->adapter;
498 struct pci_dev *pdev = adapter->pdev;
499 union ixgbe_adv_rx_desc *rx_desc, *next_rxd;
500 struct ixgbevf_rx_buffer *rx_buffer_info, *next_buffer;
505 bool cleaned = false;
506 int cleaned_count = 0;
507 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
509 i = rx_ring->next_to_clean;
510 rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i);
511 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
512 rx_buffer_info = &rx_ring->rx_buffer_info[i];
514 while (staterr & IXGBE_RXD_STAT_DD) {
516 if (*work_done >= work_to_do)
520 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
521 hdr_info = le16_to_cpu(ixgbevf_get_hdr_info(rx_desc));
522 len = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
523 IXGBE_RXDADV_HDRBUFLEN_SHIFT;
524 if (hdr_info & IXGBE_RXDADV_SPH)
525 adapter->rx_hdr_split++;
526 if (len > IXGBEVF_RX_HDR_SIZE)
527 len = IXGBEVF_RX_HDR_SIZE;
528 upper_len = le16_to_cpu(rx_desc->wb.upper.length);
530 len = le16_to_cpu(rx_desc->wb.upper.length);
533 skb = rx_buffer_info->skb;
534 prefetch(skb->data - NET_IP_ALIGN);
535 rx_buffer_info->skb = NULL;
537 if (rx_buffer_info->dma) {
538 pci_unmap_single(pdev, rx_buffer_info->dma,
541 rx_buffer_info->dma = 0;
546 pci_unmap_page(pdev, rx_buffer_info->page_dma,
547 PAGE_SIZE / 2, PCI_DMA_FROMDEVICE);
548 rx_buffer_info->page_dma = 0;
549 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
550 rx_buffer_info->page,
551 rx_buffer_info->page_offset,
554 if ((rx_ring->rx_buf_len > (PAGE_SIZE / 2)) ||
555 (page_count(rx_buffer_info->page) != 1))
556 rx_buffer_info->page = NULL;
558 get_page(rx_buffer_info->page);
560 skb->len += upper_len;
561 skb->data_len += upper_len;
562 skb->truesize += upper_len;
566 if (i == rx_ring->count)
569 next_rxd = IXGBE_RX_DESC_ADV(*rx_ring, i);
573 next_buffer = &rx_ring->rx_buffer_info[i];
575 if (!(staterr & IXGBE_RXD_STAT_EOP)) {
576 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
577 rx_buffer_info->skb = next_buffer->skb;
578 rx_buffer_info->dma = next_buffer->dma;
579 next_buffer->skb = skb;
580 next_buffer->dma = 0;
582 skb->next = next_buffer->skb;
583 skb->next->prev = skb;
585 adapter->non_eop_descs++;
589 /* ERR_MASK will only have valid bits if EOP set */
590 if (unlikely(staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK)) {
591 dev_kfree_skb_irq(skb);
595 ixgbevf_rx_checksum(adapter, staterr, skb);
597 /* probably a little skewed due to removing CRC */
598 total_rx_bytes += skb->len;
602 * Work around issue of some types of VM to VM loop back
603 * packets not getting split correctly
605 if (staterr & IXGBE_RXD_STAT_LB) {
606 u32 header_fixup_len = skb->len - skb->data_len;
607 if (header_fixup_len < 14)
608 skb_push(skb, header_fixup_len);
610 skb->protocol = eth_type_trans(skb, adapter->netdev);
612 ixgbevf_receive_skb(q_vector, skb, staterr, rx_ring, rx_desc);
613 adapter->netdev->last_rx = jiffies;
616 rx_desc->wb.upper.status_error = 0;
618 /* return some buffers to hardware, one at a time is too slow */
619 if (cleaned_count >= IXGBEVF_RX_BUFFER_WRITE) {
620 ixgbevf_alloc_rx_buffers(adapter, rx_ring,
625 /* use prefetched values */
627 rx_buffer_info = &rx_ring->rx_buffer_info[i];
629 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
632 rx_ring->next_to_clean = i;
633 cleaned_count = IXGBE_DESC_UNUSED(rx_ring);
636 ixgbevf_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
638 rx_ring->total_packets += total_rx_packets;
639 rx_ring->total_bytes += total_rx_bytes;
640 adapter->net_stats.rx_bytes += total_rx_bytes;
641 adapter->net_stats.rx_packets += total_rx_packets;
647 * ixgbevf_clean_rxonly - msix (aka one shot) rx clean routine
648 * @napi: napi struct with our devices info in it
649 * @budget: amount of work driver is allowed to do this pass, in packets
651 * This function is optimized for cleaning one queue only on a single
654 static int ixgbevf_clean_rxonly(struct napi_struct *napi, int budget)
656 struct ixgbevf_q_vector *q_vector =
657 container_of(napi, struct ixgbevf_q_vector, napi);
658 struct ixgbevf_adapter *adapter = q_vector->adapter;
659 struct ixgbevf_ring *rx_ring = NULL;
663 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
664 rx_ring = &(adapter->rx_ring[r_idx]);
666 ixgbevf_clean_rx_irq(q_vector, rx_ring, &work_done, budget);
668 /* If all Rx work done, exit the polling mode */
669 if (work_done < budget) {
671 if (adapter->itr_setting & 1)
672 ixgbevf_set_itr_msix(q_vector);
673 if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
674 ixgbevf_irq_enable_queues(adapter, rx_ring->v_idx);
681 * ixgbevf_clean_rxonly_many - msix (aka one shot) rx clean routine
682 * @napi: napi struct with our devices info in it
683 * @budget: amount of work driver is allowed to do this pass, in packets
685 * This function will clean more than one rx queue associated with a
688 static int ixgbevf_clean_rxonly_many(struct napi_struct *napi, int budget)
690 struct ixgbevf_q_vector *q_vector =
691 container_of(napi, struct ixgbevf_q_vector, napi);
692 struct ixgbevf_adapter *adapter = q_vector->adapter;
693 struct ixgbevf_ring *rx_ring = NULL;
694 int work_done = 0, i;
698 /* attempt to distribute budget to each queue fairly, but don't allow
699 * the budget to go below 1 because we'll exit polling */
700 budget /= (q_vector->rxr_count ?: 1);
701 budget = max(budget, 1);
702 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
703 for (i = 0; i < q_vector->rxr_count; i++) {
704 rx_ring = &(adapter->rx_ring[r_idx]);
705 ixgbevf_clean_rx_irq(q_vector, rx_ring, &work_done, budget);
706 enable_mask |= rx_ring->v_idx;
707 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
711 #ifndef HAVE_NETDEV_NAPI_LIST
712 if (!netif_running(adapter->netdev))
716 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
717 rx_ring = &(adapter->rx_ring[r_idx]);
719 /* If all Rx work done, exit the polling mode */
720 if (work_done < budget) {
722 if (adapter->itr_setting & 1)
723 ixgbevf_set_itr_msix(q_vector);
724 if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
725 ixgbevf_irq_enable_queues(adapter, enable_mask);
733 * ixgbevf_configure_msix - Configure MSI-X hardware
734 * @adapter: board private structure
736 * ixgbevf_configure_msix sets up the hardware to properly generate MSI-X
739 static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter)
741 struct ixgbevf_q_vector *q_vector;
742 struct ixgbe_hw *hw = &adapter->hw;
743 int i, j, q_vectors, v_idx, r_idx;
746 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
749 * Populate the IVAR table and set the ITR values to the
750 * corresponding register.
752 for (v_idx = 0; v_idx < q_vectors; v_idx++) {
753 q_vector = adapter->q_vector[v_idx];
754 /* XXX for_each_set_bit(...) */
755 r_idx = find_first_bit(q_vector->rxr_idx,
756 adapter->num_rx_queues);
758 for (i = 0; i < q_vector->rxr_count; i++) {
759 j = adapter->rx_ring[r_idx].reg_idx;
760 ixgbevf_set_ivar(adapter, 0, j, v_idx);
761 r_idx = find_next_bit(q_vector->rxr_idx,
762 adapter->num_rx_queues,
765 r_idx = find_first_bit(q_vector->txr_idx,
766 adapter->num_tx_queues);
768 for (i = 0; i < q_vector->txr_count; i++) {
769 j = adapter->tx_ring[r_idx].reg_idx;
770 ixgbevf_set_ivar(adapter, 1, j, v_idx);
771 r_idx = find_next_bit(q_vector->txr_idx,
772 adapter->num_tx_queues,
776 /* if this is a tx only vector halve the interrupt rate */
777 if (q_vector->txr_count && !q_vector->rxr_count)
778 q_vector->eitr = (adapter->eitr_param >> 1);
779 else if (q_vector->rxr_count)
781 q_vector->eitr = adapter->eitr_param;
783 ixgbevf_write_eitr(adapter, v_idx, q_vector->eitr);
786 ixgbevf_set_ivar(adapter, -1, 1, v_idx);
788 /* set up to autoclear timer, and the vectors */
789 mask = IXGBE_EIMS_ENABLE_MASK;
790 mask &= ~IXGBE_EIMS_OTHER;
791 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
798 latency_invalid = 255
802 * ixgbevf_update_itr - update the dynamic ITR value based on statistics
803 * @adapter: pointer to adapter
804 * @eitr: eitr setting (ints per sec) to give last timeslice
805 * @itr_setting: current throttle rate in ints/second
806 * @packets: the number of packets during this measurement interval
807 * @bytes: the number of bytes during this measurement interval
809 * Stores a new ITR value based on packets and byte
810 * counts during the last interrupt. The advantage of per interrupt
811 * computation is faster updates and more accurate ITR for the current
812 * traffic pattern. Constants in this function were computed
813 * based on theoretical maximum wire speed and thresholds were set based
814 * on testing data as well as attempting to minimize response time
815 * while increasing bulk throughput.
817 static u8 ixgbevf_update_itr(struct ixgbevf_adapter *adapter,
818 u32 eitr, u8 itr_setting,
819 int packets, int bytes)
821 unsigned int retval = itr_setting;
826 goto update_itr_done;
829 /* simple throttlerate management
830 * 0-20MB/s lowest (100000 ints/s)
831 * 20-100MB/s low (20000 ints/s)
832 * 100-1249MB/s bulk (8000 ints/s)
834 /* what was last interrupt timeslice? */
835 timepassed_us = 1000000/eitr;
836 bytes_perint = bytes / timepassed_us; /* bytes/usec */
838 switch (itr_setting) {
840 if (bytes_perint > adapter->eitr_low)
841 retval = low_latency;
844 if (bytes_perint > adapter->eitr_high)
845 retval = bulk_latency;
846 else if (bytes_perint <= adapter->eitr_low)
847 retval = lowest_latency;
850 if (bytes_perint <= adapter->eitr_high)
851 retval = low_latency;
860 * ixgbevf_write_eitr - write VTEITR register in hardware specific way
861 * @adapter: pointer to adapter struct
862 * @v_idx: vector index into q_vector array
863 * @itr_reg: new value to be written in *register* format, not ints/s
865 * This function is made to be called by ethtool and by the driver
866 * when it needs to update VTEITR registers at runtime. Hardware
867 * specific quirks/differences are taken care of here.
869 static void ixgbevf_write_eitr(struct ixgbevf_adapter *adapter, int v_idx,
872 struct ixgbe_hw *hw = &adapter->hw;
874 itr_reg = EITR_INTS_PER_SEC_TO_REG(itr_reg);
877 * set the WDIS bit to not clear the timer bits and cause an
878 * immediate assertion of the interrupt
880 itr_reg |= IXGBE_EITR_CNT_WDIS;
882 IXGBE_WRITE_REG(hw, IXGBE_VTEITR(v_idx), itr_reg);
885 static void ixgbevf_set_itr_msix(struct ixgbevf_q_vector *q_vector)
887 struct ixgbevf_adapter *adapter = q_vector->adapter;
889 u8 current_itr, ret_itr;
890 int i, r_idx, v_idx = q_vector->v_idx;
891 struct ixgbevf_ring *rx_ring, *tx_ring;
893 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
894 for (i = 0; i < q_vector->txr_count; i++) {
895 tx_ring = &(adapter->tx_ring[r_idx]);
896 ret_itr = ixgbevf_update_itr(adapter, q_vector->eitr,
898 tx_ring->total_packets,
899 tx_ring->total_bytes);
900 /* if the result for this queue would decrease interrupt
901 * rate for this vector then use that result */
902 q_vector->tx_itr = ((q_vector->tx_itr > ret_itr) ?
903 q_vector->tx_itr - 1 : ret_itr);
904 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
908 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
909 for (i = 0; i < q_vector->rxr_count; i++) {
910 rx_ring = &(adapter->rx_ring[r_idx]);
911 ret_itr = ixgbevf_update_itr(adapter, q_vector->eitr,
913 rx_ring->total_packets,
914 rx_ring->total_bytes);
915 /* if the result for this queue would decrease interrupt
916 * rate for this vector then use that result */
917 q_vector->rx_itr = ((q_vector->rx_itr > ret_itr) ?
918 q_vector->rx_itr - 1 : ret_itr);
919 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
923 current_itr = max(q_vector->rx_itr, q_vector->tx_itr);
925 switch (current_itr) {
926 /* counts and packets in update_itr are dependent on these numbers */
931 new_itr = 20000; /* aka hwitr = ~200 */
939 if (new_itr != q_vector->eitr) {
942 /* save the algorithm value here, not the smoothed one */
943 q_vector->eitr = new_itr;
944 /* do an exponential smoothing */
945 new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100);
946 itr_reg = EITR_INTS_PER_SEC_TO_REG(new_itr);
947 ixgbevf_write_eitr(adapter, v_idx, itr_reg);
953 static irqreturn_t ixgbevf_msix_mbx(int irq, void *data)
955 struct net_device *netdev = data;
956 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
957 struct ixgbe_hw *hw = &adapter->hw;
961 eicr = IXGBE_READ_REG(hw, IXGBE_VTEICS);
962 IXGBE_WRITE_REG(hw, IXGBE_VTEICR, eicr);
964 hw->mbx.ops.read(hw, &msg, 1);
966 if ((msg & IXGBE_MBVFICR_VFREQ_MASK) == IXGBE_PF_CONTROL_MSG)
967 mod_timer(&adapter->watchdog_timer,
968 round_jiffies(jiffies + 1));
973 static irqreturn_t ixgbevf_msix_clean_tx(int irq, void *data)
975 struct ixgbevf_q_vector *q_vector = data;
976 struct ixgbevf_adapter *adapter = q_vector->adapter;
977 struct ixgbevf_ring *tx_ring;
980 if (!q_vector->txr_count)
983 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
984 for (i = 0; i < q_vector->txr_count; i++) {
985 tx_ring = &(adapter->tx_ring[r_idx]);
986 tx_ring->total_bytes = 0;
987 tx_ring->total_packets = 0;
988 ixgbevf_clean_tx_irq(adapter, tx_ring);
989 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
993 if (adapter->itr_setting & 1)
994 ixgbevf_set_itr_msix(q_vector);
1000 * ixgbe_msix_clean_rx - single unshared vector rx clean (all queues)
1002 * @data: pointer to our q_vector struct for this interrupt vector
1004 static irqreturn_t ixgbevf_msix_clean_rx(int irq, void *data)
1006 struct ixgbevf_q_vector *q_vector = data;
1007 struct ixgbevf_adapter *adapter = q_vector->adapter;
1008 struct ixgbe_hw *hw = &adapter->hw;
1009 struct ixgbevf_ring *rx_ring;
1013 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
1014 for (i = 0; i < q_vector->rxr_count; i++) {
1015 rx_ring = &(adapter->rx_ring[r_idx]);
1016 rx_ring->total_bytes = 0;
1017 rx_ring->total_packets = 0;
1018 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
1022 if (!q_vector->rxr_count)
1025 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
1026 rx_ring = &(adapter->rx_ring[r_idx]);
1027 /* disable interrupts on this vector only */
1028 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, rx_ring->v_idx);
1029 napi_schedule(&q_vector->napi);
1035 static irqreturn_t ixgbevf_msix_clean_many(int irq, void *data)
1037 ixgbevf_msix_clean_rx(irq, data);
1038 ixgbevf_msix_clean_tx(irq, data);
1043 static inline void map_vector_to_rxq(struct ixgbevf_adapter *a, int v_idx,
1046 struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
1048 set_bit(r_idx, q_vector->rxr_idx);
1049 q_vector->rxr_count++;
1050 a->rx_ring[r_idx].v_idx = 1 << v_idx;
1053 static inline void map_vector_to_txq(struct ixgbevf_adapter *a, int v_idx,
1056 struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
1058 set_bit(t_idx, q_vector->txr_idx);
1059 q_vector->txr_count++;
1060 a->tx_ring[t_idx].v_idx = 1 << v_idx;
1064 * ixgbevf_map_rings_to_vectors - Maps descriptor rings to vectors
1065 * @adapter: board private structure to initialize
1067 * This function maps descriptor rings to the queue-specific vectors
1068 * we were allotted through the MSI-X enabling code. Ideally, we'd have
1069 * one vector per ring/queue, but on a constrained vector budget, we
1070 * group the rings as "efficiently" as possible. You would add new
1071 * mapping configurations in here.
1073 static int ixgbevf_map_rings_to_vectors(struct ixgbevf_adapter *adapter)
1077 int rxr_idx = 0, txr_idx = 0;
1078 int rxr_remaining = adapter->num_rx_queues;
1079 int txr_remaining = adapter->num_tx_queues;
1084 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1087 * The ideal configuration...
1088 * We have enough vectors to map one per queue.
1090 if (q_vectors == adapter->num_rx_queues + adapter->num_tx_queues) {
1091 for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++)
1092 map_vector_to_rxq(adapter, v_start, rxr_idx);
1094 for (; txr_idx < txr_remaining; v_start++, txr_idx++)
1095 map_vector_to_txq(adapter, v_start, txr_idx);
1100 * If we don't have enough vectors for a 1-to-1
1101 * mapping, we'll have to group them so there are
1102 * multiple queues per vector.
1104 /* Re-adjusting *qpv takes care of the remainder. */
1105 for (i = v_start; i < q_vectors; i++) {
1106 rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - i);
1107 for (j = 0; j < rqpv; j++) {
1108 map_vector_to_rxq(adapter, i, rxr_idx);
1113 for (i = v_start; i < q_vectors; i++) {
1114 tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - i);
1115 for (j = 0; j < tqpv; j++) {
1116 map_vector_to_txq(adapter, i, txr_idx);
1127 * ixgbevf_request_msix_irqs - Initialize MSI-X interrupts
1128 * @adapter: board private structure
1130 * ixgbevf_request_msix_irqs allocates MSI-X vectors and requests
1131 * interrupts from the kernel.
1133 static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter)
1135 struct net_device *netdev = adapter->netdev;
1136 irqreturn_t (*handler)(int, void *);
1137 int i, vector, q_vectors, err;
1140 /* Decrement for Other and TCP Timer vectors */
1141 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1143 #define SET_HANDLER(_v) (((_v)->rxr_count && (_v)->txr_count) \
1144 ? &ixgbevf_msix_clean_many : \
1145 (_v)->rxr_count ? &ixgbevf_msix_clean_rx : \
1146 (_v)->txr_count ? &ixgbevf_msix_clean_tx : \
1148 for (vector = 0; vector < q_vectors; vector++) {
1149 handler = SET_HANDLER(adapter->q_vector[vector]);
1151 if (handler == &ixgbevf_msix_clean_rx) {
1152 sprintf(adapter->name[vector], "%s-%s-%d",
1153 netdev->name, "rx", ri++);
1154 } else if (handler == &ixgbevf_msix_clean_tx) {
1155 sprintf(adapter->name[vector], "%s-%s-%d",
1156 netdev->name, "tx", ti++);
1157 } else if (handler == &ixgbevf_msix_clean_many) {
1158 sprintf(adapter->name[vector], "%s-%s-%d",
1159 netdev->name, "TxRx", vector);
1161 /* skip this unused q_vector */
1164 err = request_irq(adapter->msix_entries[vector].vector,
1165 handler, 0, adapter->name[vector],
1166 adapter->q_vector[vector]);
1168 hw_dbg(&adapter->hw,
1169 "request_irq failed for MSIX interrupt "
1170 "Error: %d\n", err);
1171 goto free_queue_irqs;
1175 sprintf(adapter->name[vector], "%s:mbx", netdev->name);
1176 err = request_irq(adapter->msix_entries[vector].vector,
1177 &ixgbevf_msix_mbx, 0, adapter->name[vector], netdev);
1179 hw_dbg(&adapter->hw,
1180 "request_irq for msix_mbx failed: %d\n", err);
1181 goto free_queue_irqs;
1187 for (i = vector - 1; i >= 0; i--)
1188 free_irq(adapter->msix_entries[--vector].vector,
1189 &(adapter->q_vector[i]));
1190 pci_disable_msix(adapter->pdev);
1191 kfree(adapter->msix_entries);
1192 adapter->msix_entries = NULL;
1196 static inline void ixgbevf_reset_q_vectors(struct ixgbevf_adapter *adapter)
1198 int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1200 for (i = 0; i < q_vectors; i++) {
1201 struct ixgbevf_q_vector *q_vector = adapter->q_vector[i];
1202 bitmap_zero(q_vector->rxr_idx, MAX_RX_QUEUES);
1203 bitmap_zero(q_vector->txr_idx, MAX_TX_QUEUES);
1204 q_vector->rxr_count = 0;
1205 q_vector->txr_count = 0;
1206 q_vector->eitr = adapter->eitr_param;
1211 * ixgbevf_request_irq - initialize interrupts
1212 * @adapter: board private structure
1214 * Attempts to configure interrupts using the best available
1215 * capabilities of the hardware and kernel.
1217 static int ixgbevf_request_irq(struct ixgbevf_adapter *adapter)
1221 err = ixgbevf_request_msix_irqs(adapter);
1224 hw_dbg(&adapter->hw,
1225 "request_irq failed, Error %d\n", err);
1230 static void ixgbevf_free_irq(struct ixgbevf_adapter *adapter)
1232 struct net_device *netdev = adapter->netdev;
1235 q_vectors = adapter->num_msix_vectors;
1239 free_irq(adapter->msix_entries[i].vector, netdev);
1242 for (; i >= 0; i--) {
1243 free_irq(adapter->msix_entries[i].vector,
1244 adapter->q_vector[i]);
1247 ixgbevf_reset_q_vectors(adapter);
1251 * ixgbevf_irq_disable - Mask off interrupt generation on the NIC
1252 * @adapter: board private structure
1254 static inline void ixgbevf_irq_disable(struct ixgbevf_adapter *adapter)
1257 struct ixgbe_hw *hw = &adapter->hw;
1259 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, ~0);
1261 IXGBE_WRITE_FLUSH(hw);
1263 for (i = 0; i < adapter->num_msix_vectors; i++)
1264 synchronize_irq(adapter->msix_entries[i].vector);
1268 * ixgbevf_irq_enable - Enable default interrupt generation settings
1269 * @adapter: board private structure
1271 static inline void ixgbevf_irq_enable(struct ixgbevf_adapter *adapter,
1272 bool queues, bool flush)
1274 struct ixgbe_hw *hw = &adapter->hw;
1278 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
1281 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
1284 ixgbevf_irq_enable_queues(adapter, qmask);
1287 IXGBE_WRITE_FLUSH(hw);
1291 * ixgbevf_configure_tx - Configure 82599 VF Transmit Unit after Reset
1292 * @adapter: board private structure
1294 * Configure the Tx unit of the MAC after a reset.
1296 static void ixgbevf_configure_tx(struct ixgbevf_adapter *adapter)
1299 struct ixgbe_hw *hw = &adapter->hw;
1300 u32 i, j, tdlen, txctrl;
1302 /* Setup the HW Tx Head and Tail descriptor pointers */
1303 for (i = 0; i < adapter->num_tx_queues; i++) {
1304 struct ixgbevf_ring *ring = &adapter->tx_ring[i];
1307 tdlen = ring->count * sizeof(union ixgbe_adv_tx_desc);
1308 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(j),
1309 (tdba & DMA_BIT_MASK(32)));
1310 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(j), (tdba >> 32));
1311 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(j), tdlen);
1312 IXGBE_WRITE_REG(hw, IXGBE_VFTDH(j), 0);
1313 IXGBE_WRITE_REG(hw, IXGBE_VFTDT(j), 0);
1314 adapter->tx_ring[i].head = IXGBE_VFTDH(j);
1315 adapter->tx_ring[i].tail = IXGBE_VFTDT(j);
1316 /* Disable Tx Head Writeback RO bit, since this hoses
1317 * bookkeeping if things aren't delivered in order.
1319 txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(j));
1320 txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
1321 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(j), txctrl);
1325 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
1327 static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter, int index)
1329 struct ixgbevf_ring *rx_ring;
1330 struct ixgbe_hw *hw = &adapter->hw;
1333 rx_ring = &adapter->rx_ring[index];
1335 srrctl = IXGBE_SRRCTL_DROP_EN;
1337 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
1338 u16 bufsz = IXGBEVF_RXBUFFER_2048;
1339 /* grow the amount we can receive on large page machines */
1340 if (bufsz < (PAGE_SIZE / 2))
1341 bufsz = (PAGE_SIZE / 2);
1342 /* cap the bufsz at our largest descriptor size */
1343 bufsz = min((u16)IXGBEVF_MAX_RXBUFFER, bufsz);
1345 srrctl |= bufsz >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1346 srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
1347 srrctl |= ((IXGBEVF_RX_HDR_SIZE <<
1348 IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
1349 IXGBE_SRRCTL_BSIZEHDR_MASK);
1351 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
1353 if (rx_ring->rx_buf_len == MAXIMUM_ETHERNET_VLAN_SIZE)
1354 srrctl |= IXGBEVF_RXBUFFER_2048 >>
1355 IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1357 srrctl |= rx_ring->rx_buf_len >>
1358 IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1360 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(index), srrctl);
1364 * ixgbevf_configure_rx - Configure 82599 VF Receive Unit after Reset
1365 * @adapter: board private structure
1367 * Configure the Rx unit of the MAC after a reset.
1369 static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
1372 struct ixgbe_hw *hw = &adapter->hw;
1373 struct net_device *netdev = adapter->netdev;
1374 int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
1379 /* Decide whether to use packet split mode or not */
1380 if (netdev->mtu > ETH_DATA_LEN) {
1381 if (adapter->flags & IXGBE_FLAG_RX_PS_CAPABLE)
1382 adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
1384 adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED;
1386 if (adapter->flags & IXGBE_FLAG_RX_1BUF_CAPABLE)
1387 adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED;
1389 adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
1392 /* Set the RX buffer length according to the mode */
1393 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
1394 /* PSRTYPE must be initialized in 82599 */
1395 u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
1396 IXGBE_PSRTYPE_UDPHDR |
1397 IXGBE_PSRTYPE_IPV4HDR |
1398 IXGBE_PSRTYPE_IPV6HDR |
1399 IXGBE_PSRTYPE_L2HDR;
1400 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
1401 rx_buf_len = IXGBEVF_RX_HDR_SIZE;
1403 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, 0);
1404 if (netdev->mtu <= ETH_DATA_LEN)
1405 rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
1407 rx_buf_len = ALIGN(max_frame, 1024);
1410 rdlen = adapter->rx_ring[0].count * sizeof(union ixgbe_adv_rx_desc);
1411 /* Setup the HW Rx Head and Tail Descriptor Pointers and
1412 * the Base and Length of the Rx Descriptor Ring */
1413 for (i = 0; i < adapter->num_rx_queues; i++) {
1414 rdba = adapter->rx_ring[i].dma;
1415 j = adapter->rx_ring[i].reg_idx;
1416 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(j),
1417 (rdba & DMA_BIT_MASK(32)));
1418 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(j), (rdba >> 32));
1419 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(j), rdlen);
1420 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(j), 0);
1421 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(j), 0);
1422 adapter->rx_ring[i].head = IXGBE_VFRDH(j);
1423 adapter->rx_ring[i].tail = IXGBE_VFRDT(j);
1424 adapter->rx_ring[i].rx_buf_len = rx_buf_len;
1426 ixgbevf_configure_srrctl(adapter, j);
1430 static void ixgbevf_vlan_rx_register(struct net_device *netdev,
1431 struct vlan_group *grp)
1433 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1434 struct ixgbe_hw *hw = &adapter->hw;
1438 adapter->vlgrp = grp;
1440 for (i = 0; i < adapter->num_rx_queues; i++) {
1441 j = adapter->rx_ring[i].reg_idx;
1442 ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j));
1443 ctrl |= IXGBE_RXDCTL_VME;
1444 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), ctrl);
1448 static void ixgbevf_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
1450 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1451 struct ixgbe_hw *hw = &adapter->hw;
1452 struct net_device *v_netdev;
1454 /* add VID to filter table */
1455 if (hw->mac.ops.set_vfta)
1456 hw->mac.ops.set_vfta(hw, vid, 0, true);
1458 * Copy feature flags from netdev to the vlan netdev for this vid.
1459 * This allows things like TSO to bubble down to our vlan device.
1461 v_netdev = vlan_group_get_device(adapter->vlgrp, vid);
1462 v_netdev->features |= adapter->netdev->features;
1463 vlan_group_set_device(adapter->vlgrp, vid, v_netdev);
1466 static void ixgbevf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
1468 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1469 struct ixgbe_hw *hw = &adapter->hw;
1471 if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
1472 ixgbevf_irq_disable(adapter);
1474 vlan_group_set_device(adapter->vlgrp, vid, NULL);
1476 if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
1477 ixgbevf_irq_enable(adapter, true, true);
1479 /* remove VID from filter table */
1480 if (hw->mac.ops.set_vfta)
1481 hw->mac.ops.set_vfta(hw, vid, 0, false);
1484 static void ixgbevf_restore_vlan(struct ixgbevf_adapter *adapter)
1486 ixgbevf_vlan_rx_register(adapter->netdev, adapter->vlgrp);
1488 if (adapter->vlgrp) {
1490 for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
1491 if (!vlan_group_get_device(adapter->vlgrp, vid))
1493 ixgbevf_vlan_rx_add_vid(adapter->netdev, vid);
1498 static u8 *ixgbevf_addr_list_itr(struct ixgbe_hw *hw, u8 **mc_addr_ptr,
1501 struct dev_mc_list *mc_ptr;
1502 u8 *addr = *mc_addr_ptr;
1505 mc_ptr = container_of(addr, struct dev_mc_list, dmi_addr[0]);
1507 *mc_addr_ptr = mc_ptr->next->dmi_addr;
1509 *mc_addr_ptr = NULL;
1515 * ixgbevf_set_rx_mode - Multicast set
1516 * @netdev: network interface device structure
1518 * The set_rx_method entry point is called whenever the multicast address
1519 * list or the network interface flags are updated. This routine is
1520 * responsible for configuring the hardware for proper multicast mode.
1522 static void ixgbevf_set_rx_mode(struct net_device *netdev)
1524 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1525 struct ixgbe_hw *hw = &adapter->hw;
1526 u8 *addr_list = NULL;
1529 /* reprogram multicast list */
1530 addr_count = netdev_mc_count(netdev);
1532 addr_list = netdev->mc_list->dmi_addr;
1533 if (hw->mac.ops.update_mc_addr_list)
1534 hw->mac.ops.update_mc_addr_list(hw, addr_list, addr_count,
1535 ixgbevf_addr_list_itr);
1538 static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter)
1541 struct ixgbevf_q_vector *q_vector;
1542 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1544 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
1545 struct napi_struct *napi;
1546 q_vector = adapter->q_vector[q_idx];
1547 if (!q_vector->rxr_count)
1549 napi = &q_vector->napi;
1550 if (q_vector->rxr_count > 1)
1551 napi->poll = &ixgbevf_clean_rxonly_many;
1557 static void ixgbevf_napi_disable_all(struct ixgbevf_adapter *adapter)
1560 struct ixgbevf_q_vector *q_vector;
1561 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1563 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
1564 q_vector = adapter->q_vector[q_idx];
1565 if (!q_vector->rxr_count)
1567 napi_disable(&q_vector->napi);
1571 static void ixgbevf_configure(struct ixgbevf_adapter *adapter)
1573 struct net_device *netdev = adapter->netdev;
1576 ixgbevf_set_rx_mode(netdev);
1578 ixgbevf_restore_vlan(adapter);
1580 ixgbevf_configure_tx(adapter);
1581 ixgbevf_configure_rx(adapter);
1582 for (i = 0; i < adapter->num_rx_queues; i++) {
1583 struct ixgbevf_ring *ring = &adapter->rx_ring[i];
1584 ixgbevf_alloc_rx_buffers(adapter, ring, ring->count);
1585 ring->next_to_use = ring->count - 1;
1586 writel(ring->next_to_use, adapter->hw.hw_addr + ring->tail);
1590 #define IXGBE_MAX_RX_DESC_POLL 10
1591 static inline void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter,
1594 struct ixgbe_hw *hw = &adapter->hw;
1595 int j = adapter->rx_ring[rxr].reg_idx;
1598 for (k = 0; k < IXGBE_MAX_RX_DESC_POLL; k++) {
1599 if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) & IXGBE_RXDCTL_ENABLE)
1604 if (k >= IXGBE_MAX_RX_DESC_POLL) {
1605 hw_dbg(hw, "RXDCTL.ENABLE on Rx queue %d "
1606 "not set within the polling period\n", rxr);
1609 ixgbevf_release_rx_desc(&adapter->hw, &adapter->rx_ring[rxr],
1610 (adapter->rx_ring[rxr].count - 1));
1613 static void ixgbevf_save_reset_stats(struct ixgbevf_adapter *adapter)
1615 /* Only save pre-reset stats if there are some */
1616 if (adapter->stats.vfgprc || adapter->stats.vfgptc) {
1617 adapter->stats.saved_reset_vfgprc += adapter->stats.vfgprc -
1618 adapter->stats.base_vfgprc;
1619 adapter->stats.saved_reset_vfgptc += adapter->stats.vfgptc -
1620 adapter->stats.base_vfgptc;
1621 adapter->stats.saved_reset_vfgorc += adapter->stats.vfgorc -
1622 adapter->stats.base_vfgorc;
1623 adapter->stats.saved_reset_vfgotc += adapter->stats.vfgotc -
1624 adapter->stats.base_vfgotc;
1625 adapter->stats.saved_reset_vfmprc += adapter->stats.vfmprc -
1626 adapter->stats.base_vfmprc;
1630 static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter *adapter)
1632 struct ixgbe_hw *hw = &adapter->hw;
1634 adapter->stats.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
1635 adapter->stats.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
1636 adapter->stats.last_vfgorc |=
1637 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
1638 adapter->stats.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
1639 adapter->stats.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
1640 adapter->stats.last_vfgotc |=
1641 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
1642 adapter->stats.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
1644 adapter->stats.base_vfgprc = adapter->stats.last_vfgprc;
1645 adapter->stats.base_vfgorc = adapter->stats.last_vfgorc;
1646 adapter->stats.base_vfgptc = adapter->stats.last_vfgptc;
1647 adapter->stats.base_vfgotc = adapter->stats.last_vfgotc;
1648 adapter->stats.base_vfmprc = adapter->stats.last_vfmprc;
1651 static int ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
1653 struct net_device *netdev = adapter->netdev;
1654 struct ixgbe_hw *hw = &adapter->hw;
1656 int num_rx_rings = adapter->num_rx_queues;
1659 for (i = 0; i < adapter->num_tx_queues; i++) {
1660 j = adapter->tx_ring[i].reg_idx;
1661 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
1662 /* enable WTHRESH=8 descriptors, to encourage burst writeback */
1663 txdctl |= (8 << 16);
1664 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
1667 for (i = 0; i < adapter->num_tx_queues; i++) {
1668 j = adapter->tx_ring[i].reg_idx;
1669 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
1670 txdctl |= IXGBE_TXDCTL_ENABLE;
1671 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
1674 for (i = 0; i < num_rx_rings; i++) {
1675 j = adapter->rx_ring[i].reg_idx;
1676 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j));
1677 rxdctl |= IXGBE_RXDCTL_ENABLE;
1678 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl);
1679 ixgbevf_rx_desc_queue_enable(adapter, i);
1682 ixgbevf_configure_msix(adapter);
1684 if (hw->mac.ops.set_rar) {
1685 if (is_valid_ether_addr(hw->mac.addr))
1686 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
1688 hw->mac.ops.set_rar(hw, 0, hw->mac.perm_addr, 0);
1691 clear_bit(__IXGBEVF_DOWN, &adapter->state);
1692 ixgbevf_napi_enable_all(adapter);
1694 /* enable transmits */
1695 netif_tx_start_all_queues(netdev);
1697 ixgbevf_save_reset_stats(adapter);
1698 ixgbevf_init_last_counter_stats(adapter);
1700 /* bring the link up in the watchdog, this could race with our first
1701 * link up interrupt but shouldn't be a problem */
1702 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
1703 adapter->link_check_timeout = jiffies;
1704 mod_timer(&adapter->watchdog_timer, jiffies);
1708 int ixgbevf_up(struct ixgbevf_adapter *adapter)
1711 struct ixgbe_hw *hw = &adapter->hw;
1713 ixgbevf_configure(adapter);
1715 err = ixgbevf_up_complete(adapter);
1717 /* clear any pending interrupts, may auto mask */
1718 IXGBE_READ_REG(hw, IXGBE_VTEICR);
1720 ixgbevf_irq_enable(adapter, true, true);
1726 * ixgbevf_clean_rx_ring - Free Rx Buffers per Queue
1727 * @adapter: board private structure
1728 * @rx_ring: ring to free buffers from
1730 static void ixgbevf_clean_rx_ring(struct ixgbevf_adapter *adapter,
1731 struct ixgbevf_ring *rx_ring)
1733 struct pci_dev *pdev = adapter->pdev;
1737 if (!rx_ring->rx_buffer_info)
1740 /* Free all the Rx ring sk_buffs */
1741 for (i = 0; i < rx_ring->count; i++) {
1742 struct ixgbevf_rx_buffer *rx_buffer_info;
1744 rx_buffer_info = &rx_ring->rx_buffer_info[i];
1745 if (rx_buffer_info->dma) {
1746 pci_unmap_single(pdev, rx_buffer_info->dma,
1747 rx_ring->rx_buf_len,
1748 PCI_DMA_FROMDEVICE);
1749 rx_buffer_info->dma = 0;
1751 if (rx_buffer_info->skb) {
1752 struct sk_buff *skb = rx_buffer_info->skb;
1753 rx_buffer_info->skb = NULL;
1755 struct sk_buff *this = skb;
1757 dev_kfree_skb(this);
1760 if (!rx_buffer_info->page)
1762 pci_unmap_page(pdev, rx_buffer_info->page_dma, PAGE_SIZE / 2,
1763 PCI_DMA_FROMDEVICE);
1764 rx_buffer_info->page_dma = 0;
1765 put_page(rx_buffer_info->page);
1766 rx_buffer_info->page = NULL;
1767 rx_buffer_info->page_offset = 0;
1770 size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
1771 memset(rx_ring->rx_buffer_info, 0, size);
1773 /* Zero out the descriptor ring */
1774 memset(rx_ring->desc, 0, rx_ring->size);
1776 rx_ring->next_to_clean = 0;
1777 rx_ring->next_to_use = 0;
1780 writel(0, adapter->hw.hw_addr + rx_ring->head);
1782 writel(0, adapter->hw.hw_addr + rx_ring->tail);
1786 * ixgbevf_clean_tx_ring - Free Tx Buffers
1787 * @adapter: board private structure
1788 * @tx_ring: ring to be cleaned
1790 static void ixgbevf_clean_tx_ring(struct ixgbevf_adapter *adapter,
1791 struct ixgbevf_ring *tx_ring)
1793 struct ixgbevf_tx_buffer *tx_buffer_info;
1797 if (!tx_ring->tx_buffer_info)
1800 /* Free all the Tx ring sk_buffs */
1802 for (i = 0; i < tx_ring->count; i++) {
1803 tx_buffer_info = &tx_ring->tx_buffer_info[i];
1804 ixgbevf_unmap_and_free_tx_resource(adapter, tx_buffer_info);
1807 size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
1808 memset(tx_ring->tx_buffer_info, 0, size);
1810 memset(tx_ring->desc, 0, tx_ring->size);
1812 tx_ring->next_to_use = 0;
1813 tx_ring->next_to_clean = 0;
1816 writel(0, adapter->hw.hw_addr + tx_ring->head);
1818 writel(0, adapter->hw.hw_addr + tx_ring->tail);
1822 * ixgbevf_clean_all_rx_rings - Free Rx Buffers for all queues
1823 * @adapter: board private structure
1825 static void ixgbevf_clean_all_rx_rings(struct ixgbevf_adapter *adapter)
1829 for (i = 0; i < adapter->num_rx_queues; i++)
1830 ixgbevf_clean_rx_ring(adapter, &adapter->rx_ring[i]);
1834 * ixgbevf_clean_all_tx_rings - Free Tx Buffers for all queues
1835 * @adapter: board private structure
1837 static void ixgbevf_clean_all_tx_rings(struct ixgbevf_adapter *adapter)
1841 for (i = 0; i < adapter->num_tx_queues; i++)
1842 ixgbevf_clean_tx_ring(adapter, &adapter->tx_ring[i]);
1845 void ixgbevf_down(struct ixgbevf_adapter *adapter)
1847 struct net_device *netdev = adapter->netdev;
1848 struct ixgbe_hw *hw = &adapter->hw;
1852 /* signal that we are down to the interrupt handler */
1853 set_bit(__IXGBEVF_DOWN, &adapter->state);
1854 /* disable receives */
1856 netif_tx_disable(netdev);
1860 netif_tx_stop_all_queues(netdev);
1862 ixgbevf_irq_disable(adapter);
1864 ixgbevf_napi_disable_all(adapter);
1866 del_timer_sync(&adapter->watchdog_timer);
1867 /* can't call flush scheduled work here because it can deadlock
1868 * if linkwatch_event tries to acquire the rtnl_lock which we are
1870 while (adapter->flags & IXGBE_FLAG_IN_WATCHDOG_TASK)
1873 /* disable transmits in the hardware now that interrupts are off */
1874 for (i = 0; i < adapter->num_tx_queues; i++) {
1875 j = adapter->tx_ring[i].reg_idx;
1876 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
1877 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j),
1878 (txdctl & ~IXGBE_TXDCTL_ENABLE));
1881 netif_carrier_off(netdev);
1883 if (!pci_channel_offline(adapter->pdev))
1884 ixgbevf_reset(adapter);
1886 ixgbevf_clean_all_tx_rings(adapter);
1887 ixgbevf_clean_all_rx_rings(adapter);
1890 void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter)
1892 struct ixgbe_hw *hw = &adapter->hw;
1894 WARN_ON(in_interrupt());
1896 while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state))
1900 * Check if PF is up before re-init. If not then skip until
1901 * later when the PF is up and ready to service requests from
1902 * the VF via mailbox. If the VF is up and running then the
1903 * watchdog task will continue to schedule reset tasks until
1904 * the PF is up and running.
1906 if (!hw->mac.ops.reset_hw(hw)) {
1907 ixgbevf_down(adapter);
1908 ixgbevf_up(adapter);
1911 clear_bit(__IXGBEVF_RESETTING, &adapter->state);
1914 void ixgbevf_reset(struct ixgbevf_adapter *adapter)
1916 struct ixgbe_hw *hw = &adapter->hw;
1917 struct net_device *netdev = adapter->netdev;
1919 if (hw->mac.ops.reset_hw(hw))
1920 hw_dbg(hw, "PF still resetting\n");
1922 hw->mac.ops.init_hw(hw);
1924 if (is_valid_ether_addr(adapter->hw.mac.addr)) {
1925 memcpy(netdev->dev_addr, adapter->hw.mac.addr,
1927 memcpy(netdev->perm_addr, adapter->hw.mac.addr,
1932 static void ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
1935 int err, vector_threshold;
1937 /* We'll want at least 3 (vector_threshold):
1940 * 3) Other (Link Status Change, etc.)
1942 vector_threshold = MIN_MSIX_COUNT;
1944 /* The more we get, the more we will assign to Tx/Rx Cleanup
1945 * for the separate queues...where Rx Cleanup >= Tx Cleanup.
1946 * Right now, we simply care about how many we'll get; we'll
1947 * set them up later while requesting irq's.
1949 while (vectors >= vector_threshold) {
1950 err = pci_enable_msix(adapter->pdev, adapter->msix_entries,
1952 if (!err) /* Success in acquiring all requested vectors. */
1955 vectors = 0; /* Nasty failure, quit now */
1956 else /* err == number of vectors we should try again with */
1960 if (vectors < vector_threshold) {
1961 /* Can't allocate enough MSI-X interrupts? Oh well.
1962 * This just means we'll go with either a single MSI
1963 * vector or fall back to legacy interrupts.
1965 hw_dbg(&adapter->hw,
1966 "Unable to allocate MSI-X interrupts\n");
1967 kfree(adapter->msix_entries);
1968 adapter->msix_entries = NULL;
1971 * Adjust for only the vectors we'll use, which is minimum
1972 * of max_msix_q_vectors + NON_Q_VECTORS, or the number of
1973 * vectors we were allocated.
1975 adapter->num_msix_vectors = vectors;
1980 * ixgbe_set_num_queues: Allocate queues for device, feature dependant
1981 * @adapter: board private structure to initialize
1983 * This is the top level queue allocation routine. The order here is very
1984 * important, starting with the "most" number of features turned on at once,
1985 * and ending with the smallest set of features. This way large combinations
1986 * can be allocated if they're turned on, and smaller combinations are the
1987 * fallthrough conditions.
1990 static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter)
1992 /* Start with base case */
1993 adapter->num_rx_queues = 1;
1994 adapter->num_tx_queues = 1;
1995 adapter->num_rx_pools = adapter->num_rx_queues;
1996 adapter->num_rx_queues_per_pool = 1;
2000 * ixgbevf_alloc_queues - Allocate memory for all rings
2001 * @adapter: board private structure to initialize
2003 * We allocate one ring per queue at run-time since we don't know the
2004 * number of queues at compile-time. The polling_netdev array is
2005 * intended for Multiqueue, but should work fine with a single queue.
2007 static int ixgbevf_alloc_queues(struct ixgbevf_adapter *adapter)
2011 adapter->tx_ring = kcalloc(adapter->num_tx_queues,
2012 sizeof(struct ixgbevf_ring), GFP_KERNEL);
2013 if (!adapter->tx_ring)
2014 goto err_tx_ring_allocation;
2016 adapter->rx_ring = kcalloc(adapter->num_rx_queues,
2017 sizeof(struct ixgbevf_ring), GFP_KERNEL);
2018 if (!adapter->rx_ring)
2019 goto err_rx_ring_allocation;
2021 for (i = 0; i < adapter->num_tx_queues; i++) {
2022 adapter->tx_ring[i].count = adapter->tx_ring_count;
2023 adapter->tx_ring[i].queue_index = i;
2024 adapter->tx_ring[i].reg_idx = i;
2027 for (i = 0; i < adapter->num_rx_queues; i++) {
2028 adapter->rx_ring[i].count = adapter->rx_ring_count;
2029 adapter->rx_ring[i].queue_index = i;
2030 adapter->rx_ring[i].reg_idx = i;
2035 err_rx_ring_allocation:
2036 kfree(adapter->tx_ring);
2037 err_tx_ring_allocation:
2042 * ixgbevf_set_interrupt_capability - set MSI-X or FAIL if not supported
2043 * @adapter: board private structure to initialize
2045 * Attempt to configure the interrupts using the best available
2046 * capabilities of the hardware and the kernel.
2048 static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter)
2051 int vector, v_budget;
2054 * It's easy to be greedy for MSI-X vectors, but it really
2055 * doesn't do us much good if we have a lot more vectors
2056 * than CPU's. So let's be conservative and only ask for
2057 * (roughly) twice the number of vectors as there are CPU's.
2059 v_budget = min(adapter->num_rx_queues + adapter->num_tx_queues,
2060 (int)(num_online_cpus() * 2)) + NON_Q_VECTORS;
2062 /* A failure in MSI-X entry allocation isn't fatal, but it does
2063 * mean we disable MSI-X capabilities of the adapter. */
2064 adapter->msix_entries = kcalloc(v_budget,
2065 sizeof(struct msix_entry), GFP_KERNEL);
2066 if (!adapter->msix_entries) {
2071 for (vector = 0; vector < v_budget; vector++)
2072 adapter->msix_entries[vector].entry = vector;
2074 ixgbevf_acquire_msix_vectors(adapter, v_budget);
2081 * ixgbevf_alloc_q_vectors - Allocate memory for interrupt vectors
2082 * @adapter: board private structure to initialize
2084 * We allocate one q_vector per queue interrupt. If allocation fails we
2087 static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter *adapter)
2089 int q_idx, num_q_vectors;
2090 struct ixgbevf_q_vector *q_vector;
2092 int (*poll)(struct napi_struct *, int);
2094 num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2095 napi_vectors = adapter->num_rx_queues;
2096 poll = &ixgbevf_clean_rxonly;
2098 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
2099 q_vector = kzalloc(sizeof(struct ixgbevf_q_vector), GFP_KERNEL);
2102 q_vector->adapter = adapter;
2103 q_vector->v_idx = q_idx;
2104 q_vector->eitr = adapter->eitr_param;
2105 if (q_idx < napi_vectors)
2106 netif_napi_add(adapter->netdev, &q_vector->napi,
2108 adapter->q_vector[q_idx] = q_vector;
2116 q_vector = adapter->q_vector[q_idx];
2117 netif_napi_del(&q_vector->napi);
2119 adapter->q_vector[q_idx] = NULL;
2125 * ixgbevf_free_q_vectors - Free memory allocated for interrupt vectors
2126 * @adapter: board private structure to initialize
2128 * This function frees the memory allocated to the q_vectors. In addition if
2129 * NAPI is enabled it will delete any references to the NAPI struct prior
2130 * to freeing the q_vector.
2132 static void ixgbevf_free_q_vectors(struct ixgbevf_adapter *adapter)
2134 int q_idx, num_q_vectors;
2137 num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2138 napi_vectors = adapter->num_rx_queues;
2140 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
2141 struct ixgbevf_q_vector *q_vector = adapter->q_vector[q_idx];
2143 adapter->q_vector[q_idx] = NULL;
2144 if (q_idx < napi_vectors)
2145 netif_napi_del(&q_vector->napi);
2151 * ixgbevf_reset_interrupt_capability - Reset MSIX setup
2152 * @adapter: board private structure
2155 static void ixgbevf_reset_interrupt_capability(struct ixgbevf_adapter *adapter)
2157 pci_disable_msix(adapter->pdev);
2158 kfree(adapter->msix_entries);
2159 adapter->msix_entries = NULL;
2165 * ixgbevf_init_interrupt_scheme - Determine if MSIX is supported and init
2166 * @adapter: board private structure to initialize
2169 static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter *adapter)
2173 /* Number of supported queues */
2174 ixgbevf_set_num_queues(adapter);
2176 err = ixgbevf_set_interrupt_capability(adapter);
2178 hw_dbg(&adapter->hw,
2179 "Unable to setup interrupt capabilities\n");
2180 goto err_set_interrupt;
2183 err = ixgbevf_alloc_q_vectors(adapter);
2185 hw_dbg(&adapter->hw, "Unable to allocate memory for queue "
2187 goto err_alloc_q_vectors;
2190 err = ixgbevf_alloc_queues(adapter);
2192 printk(KERN_ERR "Unable to allocate memory for queues\n");
2193 goto err_alloc_queues;
2196 hw_dbg(&adapter->hw, "Multiqueue %s: Rx Queue count = %u, "
2197 "Tx Queue count = %u\n",
2198 (adapter->num_rx_queues > 1) ? "Enabled" :
2199 "Disabled", adapter->num_rx_queues, adapter->num_tx_queues);
2201 set_bit(__IXGBEVF_DOWN, &adapter->state);
2205 ixgbevf_free_q_vectors(adapter);
2206 err_alloc_q_vectors:
2207 ixgbevf_reset_interrupt_capability(adapter);
2213 * ixgbevf_sw_init - Initialize general software structures
2214 * (struct ixgbevf_adapter)
2215 * @adapter: board private structure to initialize
2217 * ixgbevf_sw_init initializes the Adapter private data structure.
2218 * Fields are initialized based on PCI device information and
2219 * OS network device settings (MTU size).
2221 static int __devinit ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
2223 struct ixgbe_hw *hw = &adapter->hw;
2224 struct pci_dev *pdev = adapter->pdev;
2227 /* PCI config space info */
2229 hw->vendor_id = pdev->vendor;
2230 hw->device_id = pdev->device;
2231 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
2232 hw->subsystem_vendor_id = pdev->subsystem_vendor;
2233 hw->subsystem_device_id = pdev->subsystem_device;
2235 hw->mbx.ops.init_params(hw);
2236 hw->mac.max_tx_queues = MAX_TX_QUEUES;
2237 hw->mac.max_rx_queues = MAX_RX_QUEUES;
2238 err = hw->mac.ops.reset_hw(hw);
2240 dev_info(&pdev->dev,
2241 "PF still in reset state, assigning new address\n");
2242 random_ether_addr(hw->mac.addr);
2244 err = hw->mac.ops.init_hw(hw);
2246 printk(KERN_ERR "init_shared_code failed: %d\n", err);
2251 /* Enable dynamic interrupt throttling rates */
2252 adapter->eitr_param = 20000;
2253 adapter->itr_setting = 1;
2255 /* set defaults for eitr in MegaBytes */
2256 adapter->eitr_low = 10;
2257 adapter->eitr_high = 20;
2259 /* set default ring sizes */
2260 adapter->tx_ring_count = IXGBEVF_DEFAULT_TXD;
2261 adapter->rx_ring_count = IXGBEVF_DEFAULT_RXD;
2263 /* enable rx csum by default */
2264 adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED;
2266 set_bit(__IXGBEVF_DOWN, &adapter->state);
2272 #define UPDATE_VF_COUNTER_32bit(reg, last_counter, counter) \
2274 u32 current_counter = IXGBE_READ_REG(hw, reg); \
2275 if (current_counter < last_counter) \
2276 counter += 0x100000000LL; \
2277 last_counter = current_counter; \
2278 counter &= 0xFFFFFFFF00000000LL; \
2279 counter |= current_counter; \
2282 #define UPDATE_VF_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
2284 u64 current_counter_lsb = IXGBE_READ_REG(hw, reg_lsb); \
2285 u64 current_counter_msb = IXGBE_READ_REG(hw, reg_msb); \
2286 u64 current_counter = (current_counter_msb << 32) | \
2287 current_counter_lsb; \
2288 if (current_counter < last_counter) \
2289 counter += 0x1000000000LL; \
2290 last_counter = current_counter; \
2291 counter &= 0xFFFFFFF000000000LL; \
2292 counter |= current_counter; \
2295 * ixgbevf_update_stats - Update the board statistics counters.
2296 * @adapter: board private structure
2298 void ixgbevf_update_stats(struct ixgbevf_adapter *adapter)
2300 struct ixgbe_hw *hw = &adapter->hw;
2302 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPRC, adapter->stats.last_vfgprc,
2303 adapter->stats.vfgprc);
2304 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPTC, adapter->stats.last_vfgptc,
2305 adapter->stats.vfgptc);
2306 UPDATE_VF_COUNTER_36bit(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
2307 adapter->stats.last_vfgorc,
2308 adapter->stats.vfgorc);
2309 UPDATE_VF_COUNTER_36bit(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
2310 adapter->stats.last_vfgotc,
2311 adapter->stats.vfgotc);
2312 UPDATE_VF_COUNTER_32bit(IXGBE_VFMPRC, adapter->stats.last_vfmprc,
2313 adapter->stats.vfmprc);
2315 /* Fill out the OS statistics structure */
2316 adapter->net_stats.multicast = adapter->stats.vfmprc -
2317 adapter->stats.base_vfmprc;
2321 * ixgbevf_watchdog - Timer Call-back
2322 * @data: pointer to adapter cast into an unsigned long
2324 static void ixgbevf_watchdog(unsigned long data)
2326 struct ixgbevf_adapter *adapter = (struct ixgbevf_adapter *)data;
2327 struct ixgbe_hw *hw = &adapter->hw;
2332 * Do the watchdog outside of interrupt context due to the lovely
2333 * delays that some of the newer hardware requires
2336 if (test_bit(__IXGBEVF_DOWN, &adapter->state))
2337 goto watchdog_short_circuit;
2339 /* get one bit for every active tx/rx interrupt vector */
2340 for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) {
2341 struct ixgbevf_q_vector *qv = adapter->q_vector[i];
2342 if (qv->rxr_count || qv->txr_count)
2346 IXGBE_WRITE_REG(hw, IXGBE_VTEICS, (u32)eics);
2348 watchdog_short_circuit:
2349 schedule_work(&adapter->watchdog_task);
2353 * ixgbevf_tx_timeout - Respond to a Tx Hang
2354 * @netdev: network interface device structure
2356 static void ixgbevf_tx_timeout(struct net_device *netdev)
2358 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2360 /* Do the reset outside of interrupt context */
2361 schedule_work(&adapter->reset_task);
2364 static void ixgbevf_reset_task(struct work_struct *work)
2366 struct ixgbevf_adapter *adapter;
2367 adapter = container_of(work, struct ixgbevf_adapter, reset_task);
2369 /* If we're already down or resetting, just bail */
2370 if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
2371 test_bit(__IXGBEVF_RESETTING, &adapter->state))
2374 adapter->tx_timeout_count++;
2376 ixgbevf_reinit_locked(adapter);
2380 * ixgbevf_watchdog_task - worker thread to bring link up
2381 * @work: pointer to work_struct containing our data
2383 static void ixgbevf_watchdog_task(struct work_struct *work)
2385 struct ixgbevf_adapter *adapter = container_of(work,
2386 struct ixgbevf_adapter,
2388 struct net_device *netdev = adapter->netdev;
2389 struct ixgbe_hw *hw = &adapter->hw;
2390 u32 link_speed = adapter->link_speed;
2391 bool link_up = adapter->link_up;
2393 adapter->flags |= IXGBE_FLAG_IN_WATCHDOG_TASK;
2396 * Always check the link on the watchdog because we have
2399 if (hw->mac.ops.check_link) {
2400 if ((hw->mac.ops.check_link(hw, &link_speed,
2401 &link_up, false)) != 0) {
2402 adapter->link_up = link_up;
2403 adapter->link_speed = link_speed;
2404 netif_carrier_off(netdev);
2405 netif_tx_stop_all_queues(netdev);
2406 schedule_work(&adapter->reset_task);
2410 /* always assume link is up, if no check link
2412 link_speed = IXGBE_LINK_SPEED_10GB_FULL;
2415 adapter->link_up = link_up;
2416 adapter->link_speed = link_speed;
2419 if (!netif_carrier_ok(netdev)) {
2420 hw_dbg(&adapter->hw, "NIC Link is Up %s, ",
2421 ((link_speed == IXGBE_LINK_SPEED_10GB_FULL) ?
2422 "10 Gbps\n" : "1 Gbps\n"));
2423 netif_carrier_on(netdev);
2424 netif_tx_wake_all_queues(netdev);
2426 /* Force detection of hung controller */
2427 adapter->detect_tx_hung = true;
2430 adapter->link_up = false;
2431 adapter->link_speed = 0;
2432 if (netif_carrier_ok(netdev)) {
2433 hw_dbg(&adapter->hw, "NIC Link is Down\n");
2434 netif_carrier_off(netdev);
2435 netif_tx_stop_all_queues(netdev);
2439 ixgbevf_update_stats(adapter);
2442 /* Force detection of hung controller every watchdog period */
2443 adapter->detect_tx_hung = true;
2445 /* Reset the timer */
2446 if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
2447 mod_timer(&adapter->watchdog_timer,
2448 round_jiffies(jiffies + (2 * HZ)));
2450 adapter->flags &= ~IXGBE_FLAG_IN_WATCHDOG_TASK;
2454 * ixgbevf_free_tx_resources - Free Tx Resources per Queue
2455 * @adapter: board private structure
2456 * @tx_ring: Tx descriptor ring for a specific queue
2458 * Free all transmit software resources
2460 void ixgbevf_free_tx_resources(struct ixgbevf_adapter *adapter,
2461 struct ixgbevf_ring *tx_ring)
2463 struct pci_dev *pdev = adapter->pdev;
2465 ixgbevf_clean_tx_ring(adapter, tx_ring);
2467 vfree(tx_ring->tx_buffer_info);
2468 tx_ring->tx_buffer_info = NULL;
2470 pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma);
2472 tx_ring->desc = NULL;
2476 * ixgbevf_free_all_tx_resources - Free Tx Resources for All Queues
2477 * @adapter: board private structure
2479 * Free all transmit software resources
2481 static void ixgbevf_free_all_tx_resources(struct ixgbevf_adapter *adapter)
2485 for (i = 0; i < adapter->num_tx_queues; i++)
2486 if (adapter->tx_ring[i].desc)
2487 ixgbevf_free_tx_resources(adapter,
2488 &adapter->tx_ring[i]);
2493 * ixgbevf_setup_tx_resources - allocate Tx resources (Descriptors)
2494 * @adapter: board private structure
2495 * @tx_ring: tx descriptor ring (for a specific queue) to setup
2497 * Return 0 on success, negative on failure
2499 int ixgbevf_setup_tx_resources(struct ixgbevf_adapter *adapter,
2500 struct ixgbevf_ring *tx_ring)
2502 struct pci_dev *pdev = adapter->pdev;
2505 size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
2506 tx_ring->tx_buffer_info = vmalloc(size);
2507 if (!tx_ring->tx_buffer_info)
2509 memset(tx_ring->tx_buffer_info, 0, size);
2511 /* round up to nearest 4K */
2512 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
2513 tx_ring->size = ALIGN(tx_ring->size, 4096);
2515 tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size,
2520 tx_ring->next_to_use = 0;
2521 tx_ring->next_to_clean = 0;
2522 tx_ring->work_limit = tx_ring->count;
2526 vfree(tx_ring->tx_buffer_info);
2527 tx_ring->tx_buffer_info = NULL;
2528 hw_dbg(&adapter->hw, "Unable to allocate memory for the transmit "
2529 "descriptor ring\n");
2534 * ixgbevf_setup_all_tx_resources - allocate all queues Tx resources
2535 * @adapter: board private structure
2537 * If this function returns with an error, then it's possible one or
2538 * more of the rings is populated (while the rest are not). It is the
2539 * callers duty to clean those orphaned rings.
2541 * Return 0 on success, negative on failure
2543 static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter)
2547 for (i = 0; i < adapter->num_tx_queues; i++) {
2548 err = ixgbevf_setup_tx_resources(adapter, &adapter->tx_ring[i]);
2551 hw_dbg(&adapter->hw,
2552 "Allocation for Tx Queue %u failed\n", i);
2560 * ixgbevf_setup_rx_resources - allocate Rx resources (Descriptors)
2561 * @adapter: board private structure
2562 * @rx_ring: rx descriptor ring (for a specific queue) to setup
2564 * Returns 0 on success, negative on failure
2566 int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *adapter,
2567 struct ixgbevf_ring *rx_ring)
2569 struct pci_dev *pdev = adapter->pdev;
2572 size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
2573 rx_ring->rx_buffer_info = vmalloc(size);
2574 if (!rx_ring->rx_buffer_info) {
2575 hw_dbg(&adapter->hw,
2576 "Unable to vmalloc buffer memory for "
2577 "the receive descriptor ring\n");
2580 memset(rx_ring->rx_buffer_info, 0, size);
2582 /* Round up to nearest 4K */
2583 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
2584 rx_ring->size = ALIGN(rx_ring->size, 4096);
2586 rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size,
2589 if (!rx_ring->desc) {
2590 hw_dbg(&adapter->hw,
2591 "Unable to allocate memory for "
2592 "the receive descriptor ring\n");
2593 vfree(rx_ring->rx_buffer_info);
2594 rx_ring->rx_buffer_info = NULL;
2598 rx_ring->next_to_clean = 0;
2599 rx_ring->next_to_use = 0;
2607 * ixgbevf_setup_all_rx_resources - allocate all queues Rx resources
2608 * @adapter: board private structure
2610 * If this function returns with an error, then it's possible one or
2611 * more of the rings is populated (while the rest are not). It is the
2612 * callers duty to clean those orphaned rings.
2614 * Return 0 on success, negative on failure
2616 static int ixgbevf_setup_all_rx_resources(struct ixgbevf_adapter *adapter)
2620 for (i = 0; i < adapter->num_rx_queues; i++) {
2621 err = ixgbevf_setup_rx_resources(adapter, &adapter->rx_ring[i]);
2624 hw_dbg(&adapter->hw,
2625 "Allocation for Rx Queue %u failed\n", i);
2632 * ixgbevf_free_rx_resources - Free Rx Resources
2633 * @adapter: board private structure
2634 * @rx_ring: ring to clean the resources from
2636 * Free all receive software resources
2638 void ixgbevf_free_rx_resources(struct ixgbevf_adapter *adapter,
2639 struct ixgbevf_ring *rx_ring)
2641 struct pci_dev *pdev = adapter->pdev;
2643 ixgbevf_clean_rx_ring(adapter, rx_ring);
2645 vfree(rx_ring->rx_buffer_info);
2646 rx_ring->rx_buffer_info = NULL;
2648 pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
2650 rx_ring->desc = NULL;
2654 * ixgbevf_free_all_rx_resources - Free Rx Resources for All Queues
2655 * @adapter: board private structure
2657 * Free all receive software resources
2659 static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter)
2663 for (i = 0; i < adapter->num_rx_queues; i++)
2664 if (adapter->rx_ring[i].desc)
2665 ixgbevf_free_rx_resources(adapter,
2666 &adapter->rx_ring[i]);
2670 * ixgbevf_open - Called when a network interface is made active
2671 * @netdev: network interface device structure
2673 * Returns 0 on success, negative value on failure
2675 * The open entry point is called when a network interface is made
2676 * active by the system (IFF_UP). At this point all resources needed
2677 * for transmit and receive operations are allocated, the interrupt
2678 * handler is registered with the OS, the watchdog timer is started,
2679 * and the stack is notified that the interface is ready.
2681 static int ixgbevf_open(struct net_device *netdev)
2683 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2684 struct ixgbe_hw *hw = &adapter->hw;
2687 /* disallow open during test */
2688 if (test_bit(__IXGBEVF_TESTING, &adapter->state))
2691 if (hw->adapter_stopped) {
2692 ixgbevf_reset(adapter);
2693 /* if adapter is still stopped then PF isn't up and
2694 * the vf can't start. */
2695 if (hw->adapter_stopped) {
2696 err = IXGBE_ERR_MBX;
2697 printk(KERN_ERR "Unable to start - perhaps the PF"
2698 " Driver isn't up yet\n");
2699 goto err_setup_reset;
2703 /* allocate transmit descriptors */
2704 err = ixgbevf_setup_all_tx_resources(adapter);
2708 /* allocate receive descriptors */
2709 err = ixgbevf_setup_all_rx_resources(adapter);
2713 ixgbevf_configure(adapter);
2716 * Map the Tx/Rx rings to the vectors we were allotted.
2717 * if request_irq will be called in this function map_rings
2718 * must be called *before* up_complete
2720 ixgbevf_map_rings_to_vectors(adapter);
2722 err = ixgbevf_up_complete(adapter);
2726 /* clear any pending interrupts, may auto mask */
2727 IXGBE_READ_REG(hw, IXGBE_VTEICR);
2728 err = ixgbevf_request_irq(adapter);
2732 ixgbevf_irq_enable(adapter, true, true);
2737 ixgbevf_down(adapter);
2739 ixgbevf_free_irq(adapter);
2741 ixgbevf_free_all_rx_resources(adapter);
2743 ixgbevf_free_all_tx_resources(adapter);
2744 ixgbevf_reset(adapter);
2752 * ixgbevf_close - Disables a network interface
2753 * @netdev: network interface device structure
2755 * Returns 0, this is not allowed to fail
2757 * The close entry point is called when an interface is de-activated
2758 * by the OS. The hardware is still under the drivers control, but
2759 * needs to be disabled. A global MAC reset is issued to stop the
2760 * hardware, and all transmit and receive resources are freed.
2762 static int ixgbevf_close(struct net_device *netdev)
2764 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2766 ixgbevf_down(adapter);
2767 ixgbevf_free_irq(adapter);
2769 ixgbevf_free_all_tx_resources(adapter);
2770 ixgbevf_free_all_rx_resources(adapter);
2775 static int ixgbevf_tso(struct ixgbevf_adapter *adapter,
2776 struct ixgbevf_ring *tx_ring,
2777 struct sk_buff *skb, u32 tx_flags, u8 *hdr_len)
2779 struct ixgbe_adv_tx_context_desc *context_desc;
2782 struct ixgbevf_tx_buffer *tx_buffer_info;
2783 u32 vlan_macip_lens = 0, type_tucmd_mlhl;
2784 u32 mss_l4len_idx, l4len;
2786 if (skb_is_gso(skb)) {
2787 if (skb_header_cloned(skb)) {
2788 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2792 l4len = tcp_hdrlen(skb);
2795 if (skb->protocol == htons(ETH_P_IP)) {
2796 struct iphdr *iph = ip_hdr(skb);
2799 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2803 adapter->hw_tso_ctxt++;
2804 } else if (skb_is_gso_v6(skb)) {
2805 ipv6_hdr(skb)->payload_len = 0;
2806 tcp_hdr(skb)->check =
2807 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2808 &ipv6_hdr(skb)->daddr,
2810 adapter->hw_tso6_ctxt++;
2813 i = tx_ring->next_to_use;
2815 tx_buffer_info = &tx_ring->tx_buffer_info[i];
2816 context_desc = IXGBE_TX_CTXTDESC_ADV(*tx_ring, i);
2818 /* VLAN MACLEN IPLEN */
2819 if (tx_flags & IXGBE_TX_FLAGS_VLAN)
2821 (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK);
2822 vlan_macip_lens |= ((skb_network_offset(skb)) <<
2823 IXGBE_ADVTXD_MACLEN_SHIFT);
2824 *hdr_len += skb_network_offset(skb);
2826 (skb_transport_header(skb) - skb_network_header(skb));
2828 (skb_transport_header(skb) - skb_network_header(skb));
2829 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
2830 context_desc->seqnum_seed = 0;
2832 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
2833 type_tucmd_mlhl = (IXGBE_TXD_CMD_DEXT |
2834 IXGBE_ADVTXD_DTYP_CTXT);
2836 if (skb->protocol == htons(ETH_P_IP))
2837 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
2838 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
2839 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
2843 (skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT);
2844 mss_l4len_idx |= (l4len << IXGBE_ADVTXD_L4LEN_SHIFT);
2845 /* use index 1 for TSO */
2846 mss_l4len_idx |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
2847 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
2849 tx_buffer_info->time_stamp = jiffies;
2850 tx_buffer_info->next_to_watch = i;
2853 if (i == tx_ring->count)
2855 tx_ring->next_to_use = i;
2863 static bool ixgbevf_tx_csum(struct ixgbevf_adapter *adapter,
2864 struct ixgbevf_ring *tx_ring,
2865 struct sk_buff *skb, u32 tx_flags)
2867 struct ixgbe_adv_tx_context_desc *context_desc;
2869 struct ixgbevf_tx_buffer *tx_buffer_info;
2870 u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
2872 if (skb->ip_summed == CHECKSUM_PARTIAL ||
2873 (tx_flags & IXGBE_TX_FLAGS_VLAN)) {
2874 i = tx_ring->next_to_use;
2875 tx_buffer_info = &tx_ring->tx_buffer_info[i];
2876 context_desc = IXGBE_TX_CTXTDESC_ADV(*tx_ring, i);
2878 if (tx_flags & IXGBE_TX_FLAGS_VLAN)
2879 vlan_macip_lens |= (tx_flags &
2880 IXGBE_TX_FLAGS_VLAN_MASK);
2881 vlan_macip_lens |= (skb_network_offset(skb) <<
2882 IXGBE_ADVTXD_MACLEN_SHIFT);
2883 if (skb->ip_summed == CHECKSUM_PARTIAL)
2884 vlan_macip_lens |= (skb_transport_header(skb) -
2885 skb_network_header(skb));
2887 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
2888 context_desc->seqnum_seed = 0;
2890 type_tucmd_mlhl |= (IXGBE_TXD_CMD_DEXT |
2891 IXGBE_ADVTXD_DTYP_CTXT);
2893 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2894 switch (skb->protocol) {
2895 case __constant_htons(ETH_P_IP):
2896 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
2897 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2899 IXGBE_ADVTXD_TUCMD_L4T_TCP;
2901 case __constant_htons(ETH_P_IPV6):
2902 /* XXX what about other V6 headers?? */
2903 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
2905 IXGBE_ADVTXD_TUCMD_L4T_TCP;
2908 if (unlikely(net_ratelimit())) {
2910 "partial checksum but "
2918 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
2919 /* use index zero for tx checksum offload */
2920 context_desc->mss_l4len_idx = 0;
2922 tx_buffer_info->time_stamp = jiffies;
2923 tx_buffer_info->next_to_watch = i;
2925 adapter->hw_csum_tx_good++;
2927 if (i == tx_ring->count)
2929 tx_ring->next_to_use = i;
2937 static int ixgbevf_tx_map(struct ixgbevf_adapter *adapter,
2938 struct ixgbevf_ring *tx_ring,
2939 struct sk_buff *skb, u32 tx_flags,
2942 struct pci_dev *pdev = adapter->pdev;
2943 struct ixgbevf_tx_buffer *tx_buffer_info;
2945 unsigned int total = skb->len;
2946 unsigned int offset = 0, size, count = 0, i;
2947 unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
2950 i = tx_ring->next_to_use;
2952 len = min(skb_headlen(skb), total);
2954 tx_buffer_info = &tx_ring->tx_buffer_info[i];
2955 size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD);
2957 tx_buffer_info->length = size;
2958 tx_buffer_info->mapped_as_page = false;
2959 tx_buffer_info->dma = pci_map_single(adapter->pdev,
2961 size, PCI_DMA_TODEVICE);
2962 if (pci_dma_mapping_error(pdev, tx_buffer_info->dma))
2964 tx_buffer_info->time_stamp = jiffies;
2965 tx_buffer_info->next_to_watch = i;
2972 if (i == tx_ring->count)
2976 for (f = 0; f < nr_frags; f++) {
2977 struct skb_frag_struct *frag;
2979 frag = &skb_shinfo(skb)->frags[f];
2980 len = min((unsigned int)frag->size, total);
2981 offset = frag->page_offset;
2984 tx_buffer_info = &tx_ring->tx_buffer_info[i];
2985 size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD);
2987 tx_buffer_info->length = size;
2988 tx_buffer_info->dma = pci_map_page(adapter->pdev,
2993 tx_buffer_info->mapped_as_page = true;
2994 if (pci_dma_mapping_error(pdev, tx_buffer_info->dma))
2996 tx_buffer_info->time_stamp = jiffies;
2997 tx_buffer_info->next_to_watch = i;
3004 if (i == tx_ring->count)
3012 i = tx_ring->count - 1;
3015 tx_ring->tx_buffer_info[i].skb = skb;
3016 tx_ring->tx_buffer_info[first].next_to_watch = i;
3021 dev_err(&pdev->dev, "TX DMA map failed\n");
3023 /* clear timestamp and dma mappings for failed tx_buffer_info map */
3024 tx_buffer_info->dma = 0;
3025 tx_buffer_info->time_stamp = 0;
3026 tx_buffer_info->next_to_watch = 0;
3029 /* clear timestamp and dma mappings for remaining portion of packet */
3030 while (count >= 0) {
3034 i += tx_ring->count;
3035 tx_buffer_info = &tx_ring->tx_buffer_info[i];
3036 ixgbevf_unmap_and_free_tx_resource(adapter, tx_buffer_info);
3042 static void ixgbevf_tx_queue(struct ixgbevf_adapter *adapter,
3043 struct ixgbevf_ring *tx_ring, int tx_flags,
3044 int count, u32 paylen, u8 hdr_len)
3046 union ixgbe_adv_tx_desc *tx_desc = NULL;
3047 struct ixgbevf_tx_buffer *tx_buffer_info;
3048 u32 olinfo_status = 0, cmd_type_len = 0;
3051 u32 txd_cmd = IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS | IXGBE_TXD_CMD_IFCS;
3053 cmd_type_len |= IXGBE_ADVTXD_DTYP_DATA;
3055 cmd_type_len |= IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT;
3057 if (tx_flags & IXGBE_TX_FLAGS_VLAN)
3058 cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
3060 if (tx_flags & IXGBE_TX_FLAGS_TSO) {
3061 cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
3063 olinfo_status |= IXGBE_TXD_POPTS_TXSM <<
3064 IXGBE_ADVTXD_POPTS_SHIFT;
3066 /* use index 1 context for tso */
3067 olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
3068 if (tx_flags & IXGBE_TX_FLAGS_IPV4)
3069 olinfo_status |= IXGBE_TXD_POPTS_IXSM <<
3070 IXGBE_ADVTXD_POPTS_SHIFT;
3072 } else if (tx_flags & IXGBE_TX_FLAGS_CSUM)
3073 olinfo_status |= IXGBE_TXD_POPTS_TXSM <<
3074 IXGBE_ADVTXD_POPTS_SHIFT;
3076 olinfo_status |= ((paylen - hdr_len) << IXGBE_ADVTXD_PAYLEN_SHIFT);
3078 i = tx_ring->next_to_use;
3080 tx_buffer_info = &tx_ring->tx_buffer_info[i];
3081 tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i);
3082 tx_desc->read.buffer_addr = cpu_to_le64(tx_buffer_info->dma);
3083 tx_desc->read.cmd_type_len =
3084 cpu_to_le32(cmd_type_len | tx_buffer_info->length);
3085 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
3087 if (i == tx_ring->count)
3091 tx_desc->read.cmd_type_len |= cpu_to_le32(txd_cmd);
3094 * Force memory writes to complete before letting h/w
3095 * know there are new descriptors to fetch. (Only
3096 * applicable for weak-ordered memory model archs,
3101 tx_ring->next_to_use = i;
3102 writel(i, adapter->hw.hw_addr + tx_ring->tail);
3105 static int __ixgbevf_maybe_stop_tx(struct net_device *netdev,
3106 struct ixgbevf_ring *tx_ring, int size)
3108 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3110 netif_stop_subqueue(netdev, tx_ring->queue_index);
3111 /* Herbert's original patch had:
3112 * smp_mb__after_netif_stop_queue();
3113 * but since that doesn't exist yet, just open code it. */
3116 /* We need to check again in a case another CPU has just
3117 * made room available. */
3118 if (likely(IXGBE_DESC_UNUSED(tx_ring) < size))
3121 /* A reprieve! - use start_queue because it doesn't call schedule */
3122 netif_start_subqueue(netdev, tx_ring->queue_index);
3123 ++adapter->restart_queue;
3127 static int ixgbevf_maybe_stop_tx(struct net_device *netdev,
3128 struct ixgbevf_ring *tx_ring, int size)
3130 if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size))
3132 return __ixgbevf_maybe_stop_tx(netdev, tx_ring, size);
3135 static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
3137 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3138 struct ixgbevf_ring *tx_ring;
3140 unsigned int tx_flags = 0;
3147 tx_ring = &adapter->tx_ring[r_idx];
3149 if (adapter->vlgrp && vlan_tx_tag_present(skb)) {
3150 tx_flags |= vlan_tx_tag_get(skb);
3151 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
3152 tx_flags |= IXGBE_TX_FLAGS_VLAN;
3155 /* four things can cause us to need a context descriptor */
3156 if (skb_is_gso(skb) ||
3157 (skb->ip_summed == CHECKSUM_PARTIAL) ||
3158 (tx_flags & IXGBE_TX_FLAGS_VLAN))
3161 count += TXD_USE_COUNT(skb_headlen(skb));
3162 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
3163 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
3165 if (ixgbevf_maybe_stop_tx(netdev, tx_ring, count)) {
3167 return NETDEV_TX_BUSY;
3170 first = tx_ring->next_to_use;
3172 if (skb->protocol == htons(ETH_P_IP))
3173 tx_flags |= IXGBE_TX_FLAGS_IPV4;
3174 tso = ixgbevf_tso(adapter, tx_ring, skb, tx_flags, &hdr_len);
3176 dev_kfree_skb_any(skb);
3177 return NETDEV_TX_OK;
3181 tx_flags |= IXGBE_TX_FLAGS_TSO;
3182 else if (ixgbevf_tx_csum(adapter, tx_ring, skb, tx_flags) &&
3183 (skb->ip_summed == CHECKSUM_PARTIAL))
3184 tx_flags |= IXGBE_TX_FLAGS_CSUM;
3186 ixgbevf_tx_queue(adapter, tx_ring, tx_flags,
3187 ixgbevf_tx_map(adapter, tx_ring, skb, tx_flags, first),
3190 netdev->trans_start = jiffies;
3192 ixgbevf_maybe_stop_tx(netdev, tx_ring, DESC_NEEDED);
3194 return NETDEV_TX_OK;
3198 * ixgbevf_get_stats - Get System Network Statistics
3199 * @netdev: network interface device structure
3201 * Returns the address of the device statistics structure.
3202 * The statistics are actually updated from the timer callback.
3204 static struct net_device_stats *ixgbevf_get_stats(struct net_device *netdev)
3206 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3208 /* only return the current stats */
3209 return &adapter->net_stats;
3213 * ixgbevf_set_mac - Change the Ethernet Address of the NIC
3214 * @netdev: network interface device structure
3215 * @p: pointer to an address structure
3217 * Returns 0 on success, negative on failure
3219 static int ixgbevf_set_mac(struct net_device *netdev, void *p)
3221 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3222 struct ixgbe_hw *hw = &adapter->hw;
3223 struct sockaddr *addr = p;
3225 if (!is_valid_ether_addr(addr->sa_data))
3226 return -EADDRNOTAVAIL;
3228 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
3229 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
3231 if (hw->mac.ops.set_rar)
3232 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
3238 * ixgbevf_change_mtu - Change the Maximum Transfer Unit
3239 * @netdev: network interface device structure
3240 * @new_mtu: new value for maximum frame size
3242 * Returns 0 on success, negative on failure
3244 static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
3246 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3247 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
3249 /* MTU < 68 is an error and causes problems on some kernels */
3250 if ((new_mtu < 68) || (max_frame > MAXIMUM_ETHERNET_VLAN_SIZE))
3253 hw_dbg(&adapter->hw, "changing MTU from %d to %d\n",
3254 netdev->mtu, new_mtu);
3255 /* must set new MTU before calling down or up */
3256 netdev->mtu = new_mtu;
3258 if (netif_running(netdev))
3259 ixgbevf_reinit_locked(adapter);
3264 static void ixgbevf_shutdown(struct pci_dev *pdev)
3266 struct net_device *netdev = pci_get_drvdata(pdev);
3267 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3269 netif_device_detach(netdev);
3271 if (netif_running(netdev)) {
3272 ixgbevf_down(adapter);
3273 ixgbevf_free_irq(adapter);
3274 ixgbevf_free_all_tx_resources(adapter);
3275 ixgbevf_free_all_rx_resources(adapter);
3279 pci_save_state(pdev);
3282 pci_disable_device(pdev);
3285 static const struct net_device_ops ixgbe_netdev_ops = {
3286 .ndo_open = &ixgbevf_open,
3287 .ndo_stop = &ixgbevf_close,
3288 .ndo_start_xmit = &ixgbevf_xmit_frame,
3289 .ndo_get_stats = &ixgbevf_get_stats,
3290 .ndo_set_rx_mode = &ixgbevf_set_rx_mode,
3291 .ndo_set_multicast_list = &ixgbevf_set_rx_mode,
3292 .ndo_validate_addr = eth_validate_addr,
3293 .ndo_set_mac_address = &ixgbevf_set_mac,
3294 .ndo_change_mtu = &ixgbevf_change_mtu,
3295 .ndo_tx_timeout = &ixgbevf_tx_timeout,
3296 .ndo_vlan_rx_register = &ixgbevf_vlan_rx_register,
3297 .ndo_vlan_rx_add_vid = &ixgbevf_vlan_rx_add_vid,
3298 .ndo_vlan_rx_kill_vid = &ixgbevf_vlan_rx_kill_vid,
3301 static void ixgbevf_assign_netdev_ops(struct net_device *dev)
3303 struct ixgbevf_adapter *adapter;
3304 adapter = netdev_priv(dev);
3305 dev->netdev_ops = &ixgbe_netdev_ops;
3306 ixgbevf_set_ethtool_ops(dev);
3307 dev->watchdog_timeo = 5 * HZ;
3311 * ixgbevf_probe - Device Initialization Routine
3312 * @pdev: PCI device information struct
3313 * @ent: entry in ixgbevf_pci_tbl
3315 * Returns 0 on success, negative on failure
3317 * ixgbevf_probe initializes an adapter identified by a pci_dev structure.
3318 * The OS initialization, configuring of the adapter private structure,
3319 * and a hardware reset occur.
3321 static int __devinit ixgbevf_probe(struct pci_dev *pdev,
3322 const struct pci_device_id *ent)
3324 struct net_device *netdev;
3325 struct ixgbevf_adapter *adapter = NULL;
3326 struct ixgbe_hw *hw = NULL;
3327 const struct ixgbevf_info *ii = ixgbevf_info_tbl[ent->driver_data];
3328 static int cards_found;
3329 int err, pci_using_dac;
3331 err = pci_enable_device(pdev);
3335 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
3336 !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
3339 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
3341 err = pci_set_consistent_dma_mask(pdev,
3344 dev_err(&pdev->dev, "No usable DMA "
3345 "configuration, aborting\n");
3352 err = pci_request_regions(pdev, ixgbevf_driver_name);
3354 dev_err(&pdev->dev, "pci_request_regions failed 0x%x\n", err);
3358 pci_set_master(pdev);
3361 netdev = alloc_etherdev_mq(sizeof(struct ixgbevf_adapter),
3364 netdev = alloc_etherdev(sizeof(struct ixgbevf_adapter));
3368 goto err_alloc_etherdev;
3371 SET_NETDEV_DEV(netdev, &pdev->dev);
3373 pci_set_drvdata(pdev, netdev);
3374 adapter = netdev_priv(netdev);
3376 adapter->netdev = netdev;
3377 adapter->pdev = pdev;
3380 adapter->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
3383 * call save state here in standalone driver because it relies on
3384 * adapter struct to exist, and needs to call netdev_priv
3386 pci_save_state(pdev);
3388 hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
3389 pci_resource_len(pdev, 0));
3395 ixgbevf_assign_netdev_ops(netdev);
3397 adapter->bd_number = cards_found;
3400 memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
3401 hw->mac.type = ii->mac;
3403 memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops,
3404 sizeof(struct ixgbe_mac_operations));
3406 adapter->flags &= ~IXGBE_FLAG_RX_PS_CAPABLE;
3407 adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED;
3408 adapter->flags |= IXGBE_FLAG_RX_1BUF_CAPABLE;
3410 /* setup the private structure */
3411 err = ixgbevf_sw_init(adapter);
3413 #ifdef MAX_SKB_FRAGS
3414 netdev->features = NETIF_F_SG |
3416 NETIF_F_HW_VLAN_TX |
3417 NETIF_F_HW_VLAN_RX |
3418 NETIF_F_HW_VLAN_FILTER;
3420 netdev->features |= NETIF_F_IPV6_CSUM;
3421 netdev->features |= NETIF_F_TSO;
3422 netdev->features |= NETIF_F_TSO6;
3423 netdev->vlan_features |= NETIF_F_TSO;
3424 netdev->vlan_features |= NETIF_F_TSO6;
3425 netdev->vlan_features |= NETIF_F_IP_CSUM;
3426 netdev->vlan_features |= NETIF_F_SG;
3429 netdev->features |= NETIF_F_HIGHDMA;
3431 #endif /* MAX_SKB_FRAGS */
3433 /* The HW MAC address was set and/or determined in sw_init */
3434 memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len);
3435 memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len);
3437 if (!is_valid_ether_addr(netdev->dev_addr)) {
3438 printk(KERN_ERR "invalid MAC address\n");
3443 init_timer(&adapter->watchdog_timer);
3444 adapter->watchdog_timer.function = &ixgbevf_watchdog;
3445 adapter->watchdog_timer.data = (unsigned long)adapter;
3447 INIT_WORK(&adapter->reset_task, ixgbevf_reset_task);
3448 INIT_WORK(&adapter->watchdog_task, ixgbevf_watchdog_task);
3450 err = ixgbevf_init_interrupt_scheme(adapter);
3454 /* pick up the PCI bus settings for reporting later */
3455 if (hw->mac.ops.get_bus_info)
3456 hw->mac.ops.get_bus_info(hw);
3459 netif_carrier_off(netdev);
3460 netif_tx_stop_all_queues(netdev);
3462 strcpy(netdev->name, "eth%d");
3464 err = register_netdev(netdev);
3468 adapter->netdev_registered = true;
3470 ixgbevf_init_last_counter_stats(adapter);
3472 /* print the MAC address */
3473 hw_dbg(hw, "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x\n",
3474 netdev->dev_addr[0],
3475 netdev->dev_addr[1],
3476 netdev->dev_addr[2],
3477 netdev->dev_addr[3],
3478 netdev->dev_addr[4],
3479 netdev->dev_addr[5]);
3481 hw_dbg(hw, "MAC: %d\n", hw->mac.type);
3483 hw_dbg(hw, "LRO is disabled \n");
3485 hw_dbg(hw, "Intel(R) 82599 Virtual Function\n");
3491 ixgbevf_reset_interrupt_capability(adapter);
3492 iounmap(hw->hw_addr);
3494 free_netdev(netdev);
3496 pci_release_regions(pdev);
3499 pci_disable_device(pdev);
3504 * ixgbevf_remove - Device Removal Routine
3505 * @pdev: PCI device information struct
3507 * ixgbevf_remove is called by the PCI subsystem to alert the driver
3508 * that it should release a PCI device. The could be caused by a
3509 * Hot-Plug event, or because the driver is going to be removed from
3512 static void __devexit ixgbevf_remove(struct pci_dev *pdev)
3514 struct net_device *netdev = pci_get_drvdata(pdev);
3515 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3517 set_bit(__IXGBEVF_DOWN, &adapter->state);
3519 del_timer_sync(&adapter->watchdog_timer);
3521 cancel_work_sync(&adapter->watchdog_task);
3523 flush_scheduled_work();
3525 if (adapter->netdev_registered) {
3526 unregister_netdev(netdev);
3527 adapter->netdev_registered = false;
3530 ixgbevf_reset_interrupt_capability(adapter);
3532 iounmap(adapter->hw.hw_addr);
3533 pci_release_regions(pdev);
3535 hw_dbg(&adapter->hw, "Remove complete\n");
3537 kfree(adapter->tx_ring);
3538 kfree(adapter->rx_ring);
3540 free_netdev(netdev);
3542 pci_disable_device(pdev);
3545 static struct pci_driver ixgbevf_driver = {
3546 .name = ixgbevf_driver_name,
3547 .id_table = ixgbevf_pci_tbl,
3548 .probe = ixgbevf_probe,
3549 .remove = __devexit_p(ixgbevf_remove),
3550 .shutdown = ixgbevf_shutdown,
3554 * ixgbe_init_module - Driver Registration Routine
3556 * ixgbe_init_module is the first routine called when the driver is
3557 * loaded. All it does is register with the PCI subsystem.
3559 static int __init ixgbevf_init_module(void)
3562 printk(KERN_INFO "ixgbevf: %s - version %s\n", ixgbevf_driver_string,
3563 ixgbevf_driver_version);
3565 printk(KERN_INFO "%s\n", ixgbevf_copyright);
3567 ret = pci_register_driver(&ixgbevf_driver);
3571 module_init(ixgbevf_init_module);
3574 * ixgbe_exit_module - Driver Exit Cleanup Routine
3576 * ixgbe_exit_module is called just before the driver is removed
3579 static void __exit ixgbevf_exit_module(void)
3581 pci_unregister_driver(&ixgbevf_driver);
3586 * ixgbe_get_hw_dev_name - return device name string
3587 * used by hardware layer to print debugging information
3589 char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw)
3591 struct ixgbevf_adapter *adapter = hw->back;
3592 return adapter->netdev->name;
3596 module_exit(ixgbevf_exit_module);
3598 /* ixgbevf_main.c */