Merge branch 'iommu/largepages' into amd-iommu/2.6.35
[pandora-kernel.git] / drivers / net / ixgbevf / ixgbevf_main.c
1 /*******************************************************************************
2
3   Intel 82599 Virtual Function driver
4   Copyright(c) 1999 - 2009 Intel Corporation.
5
6   This program is free software; you can redistribute it and/or modify it
7   under the terms and conditions of the GNU General Public License,
8   version 2, as published by the Free Software Foundation.
9
10   This program is distributed in the hope it will be useful, but WITHOUT
11   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13   more details.
14
15   You should have received a copy of the GNU General Public License along with
16   this program; if not, write to the Free Software Foundation, Inc.,
17   51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19   The full GNU General Public License is included in this distribution in
20   the file called "COPYING".
21
22   Contact Information:
23   e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24   Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26 *******************************************************************************/
27
28
29 /******************************************************************************
30  Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code
31 ******************************************************************************/
32 #include <linux/types.h>
33 #include <linux/module.h>
34 #include <linux/pci.h>
35 #include <linux/netdevice.h>
36 #include <linux/vmalloc.h>
37 #include <linux/string.h>
38 #include <linux/in.h>
39 #include <linux/ip.h>
40 #include <linux/tcp.h>
41 #include <linux/ipv6.h>
42 #include <linux/slab.h>
43 #include <net/checksum.h>
44 #include <net/ip6_checksum.h>
45 #include <linux/ethtool.h>
46 #include <linux/if_vlan.h>
47
48 #include "ixgbevf.h"
49
50 char ixgbevf_driver_name[] = "ixgbevf";
51 static const char ixgbevf_driver_string[] =
52         "Intel(R) 82599 Virtual Function";
53
54 #define DRV_VERSION "1.0.0-k0"
55 const char ixgbevf_driver_version[] = DRV_VERSION;
56 static char ixgbevf_copyright[] = "Copyright (c) 2009 Intel Corporation.";
57
58 static const struct ixgbevf_info *ixgbevf_info_tbl[] = {
59         [board_82599_vf] = &ixgbevf_vf_info,
60 };
61
62 /* ixgbevf_pci_tbl - PCI Device ID Table
63  *
64  * Wildcard entries (PCI_ANY_ID) should come last
65  * Last entry must be all 0s
66  *
67  * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
68  *   Class, Class Mask, private data (not used) }
69  */
70 static struct pci_device_id ixgbevf_pci_tbl[] = {
71         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF),
72         board_82599_vf},
73
74         /* required last entry */
75         {0, }
76 };
77 MODULE_DEVICE_TABLE(pci, ixgbevf_pci_tbl);
78
79 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
80 MODULE_DESCRIPTION("Intel(R) 82599 Virtual Function Driver");
81 MODULE_LICENSE("GPL");
82 MODULE_VERSION(DRV_VERSION);
83
84 #define DEFAULT_DEBUG_LEVEL_SHIFT 3
85
86 /* forward decls */
87 static void ixgbevf_set_itr_msix(struct ixgbevf_q_vector *q_vector);
88 static void ixgbevf_write_eitr(struct ixgbevf_adapter *adapter, int v_idx,
89                                u32 itr_reg);
90
91 static inline void ixgbevf_release_rx_desc(struct ixgbe_hw *hw,
92                                            struct ixgbevf_ring *rx_ring,
93                                            u32 val)
94 {
95         /*
96          * Force memory writes to complete before letting h/w
97          * know there are new descriptors to fetch.  (Only
98          * applicable for weak-ordered memory model archs,
99          * such as IA-64).
100          */
101         wmb();
102         IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rx_ring->reg_idx), val);
103 }
104
105 /*
106  * ixgbe_set_ivar - set the IVAR registers, mapping interrupt causes to vectors
107  * @adapter: pointer to adapter struct
108  * @direction: 0 for Rx, 1 for Tx, -1 for other causes
109  * @queue: queue to map the corresponding interrupt to
110  * @msix_vector: the vector to map to the corresponding queue
111  *
112  */
113 static void ixgbevf_set_ivar(struct ixgbevf_adapter *adapter, s8 direction,
114                              u8 queue, u8 msix_vector)
115 {
116         u32 ivar, index;
117         struct ixgbe_hw *hw = &adapter->hw;
118         if (direction == -1) {
119                 /* other causes */
120                 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
121                 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
122                 ivar &= ~0xFF;
123                 ivar |= msix_vector;
124                 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
125         } else {
126                 /* tx or rx causes */
127                 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
128                 index = ((16 * (queue & 1)) + (8 * direction));
129                 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(queue >> 1));
130                 ivar &= ~(0xFF << index);
131                 ivar |= (msix_vector << index);
132                 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(queue >> 1), ivar);
133         }
134 }
135
136 static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_adapter *adapter,
137                                                struct ixgbevf_tx_buffer
138                                                *tx_buffer_info)
139 {
140         if (tx_buffer_info->dma) {
141                 if (tx_buffer_info->mapped_as_page)
142                         pci_unmap_page(adapter->pdev,
143                                        tx_buffer_info->dma,
144                                        tx_buffer_info->length,
145                                        PCI_DMA_TODEVICE);
146                 else
147                         pci_unmap_single(adapter->pdev,
148                                          tx_buffer_info->dma,
149                                          tx_buffer_info->length,
150                                          PCI_DMA_TODEVICE);
151                 tx_buffer_info->dma = 0;
152         }
153         if (tx_buffer_info->skb) {
154                 dev_kfree_skb_any(tx_buffer_info->skb);
155                 tx_buffer_info->skb = NULL;
156         }
157         tx_buffer_info->time_stamp = 0;
158         /* tx_buffer_info must be completely set up in the transmit path */
159 }
160
161 static inline bool ixgbevf_check_tx_hang(struct ixgbevf_adapter *adapter,
162                                          struct ixgbevf_ring *tx_ring,
163                                          unsigned int eop)
164 {
165         struct ixgbe_hw *hw = &adapter->hw;
166         u32 head, tail;
167
168         /* Detect a transmit hang in hardware, this serializes the
169          * check with the clearing of time_stamp and movement of eop */
170         head = readl(hw->hw_addr + tx_ring->head);
171         tail = readl(hw->hw_addr + tx_ring->tail);
172         adapter->detect_tx_hung = false;
173         if ((head != tail) &&
174             tx_ring->tx_buffer_info[eop].time_stamp &&
175             time_after(jiffies, tx_ring->tx_buffer_info[eop].time_stamp + HZ)) {
176                 /* detected Tx unit hang */
177                 union ixgbe_adv_tx_desc *tx_desc;
178                 tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
179                 printk(KERN_ERR "Detected Tx Unit Hang\n"
180                        "  Tx Queue             <%d>\n"
181                        "  TDH, TDT             <%x>, <%x>\n"
182                        "  next_to_use          <%x>\n"
183                        "  next_to_clean        <%x>\n"
184                        "tx_buffer_info[next_to_clean]\n"
185                        "  time_stamp           <%lx>\n"
186                        "  jiffies              <%lx>\n",
187                        tx_ring->queue_index,
188                        head, tail,
189                        tx_ring->next_to_use, eop,
190                        tx_ring->tx_buffer_info[eop].time_stamp, jiffies);
191                 return true;
192         }
193
194         return false;
195 }
196
197 #define IXGBE_MAX_TXD_PWR       14
198 #define IXGBE_MAX_DATA_PER_TXD  (1 << IXGBE_MAX_TXD_PWR)
199
200 /* Tx Descriptors needed, worst case */
201 #define TXD_USE_COUNT(S) (((S) >> IXGBE_MAX_TXD_PWR) + \
202                          (((S) & (IXGBE_MAX_DATA_PER_TXD - 1)) ? 1 : 0))
203 #ifdef MAX_SKB_FRAGS
204 #define DESC_NEEDED (TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD) /* skb->data */ + \
205         MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1)      /* for context */
206 #else
207 #define DESC_NEEDED TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD)
208 #endif
209
210 static void ixgbevf_tx_timeout(struct net_device *netdev);
211
212 /**
213  * ixgbevf_clean_tx_irq - Reclaim resources after transmit completes
214  * @adapter: board private structure
215  * @tx_ring: tx ring to clean
216  **/
217 static bool ixgbevf_clean_tx_irq(struct ixgbevf_adapter *adapter,
218                                  struct ixgbevf_ring *tx_ring)
219 {
220         struct net_device *netdev = adapter->netdev;
221         struct ixgbe_hw *hw = &adapter->hw;
222         union ixgbe_adv_tx_desc *tx_desc, *eop_desc;
223         struct ixgbevf_tx_buffer *tx_buffer_info;
224         unsigned int i, eop, count = 0;
225         unsigned int total_bytes = 0, total_packets = 0;
226
227         i = tx_ring->next_to_clean;
228         eop = tx_ring->tx_buffer_info[i].next_to_watch;
229         eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
230
231         while ((eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) &&
232                (count < tx_ring->work_limit)) {
233                 bool cleaned = false;
234                 for ( ; !cleaned; count++) {
235                         struct sk_buff *skb;
236                         tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i);
237                         tx_buffer_info = &tx_ring->tx_buffer_info[i];
238                         cleaned = (i == eop);
239                         skb = tx_buffer_info->skb;
240
241                         if (cleaned && skb) {
242                                 unsigned int segs, bytecount;
243
244                                 /* gso_segs is currently only valid for tcp */
245                                 segs = skb_shinfo(skb)->gso_segs ?: 1;
246                                 /* multiply data chunks by size of headers */
247                                 bytecount = ((segs - 1) * skb_headlen(skb)) +
248                                             skb->len;
249                                 total_packets += segs;
250                                 total_bytes += bytecount;
251                         }
252
253                         ixgbevf_unmap_and_free_tx_resource(adapter,
254                                                            tx_buffer_info);
255
256                         tx_desc->wb.status = 0;
257
258                         i++;
259                         if (i == tx_ring->count)
260                                 i = 0;
261                 }
262
263                 eop = tx_ring->tx_buffer_info[i].next_to_watch;
264                 eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
265         }
266
267         tx_ring->next_to_clean = i;
268
269 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
270         if (unlikely(count && netif_carrier_ok(netdev) &&
271                      (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
272                 /* Make sure that anybody stopping the queue after this
273                  * sees the new next_to_clean.
274                  */
275                 smp_mb();
276 #ifdef HAVE_TX_MQ
277                 if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) &&
278                     !test_bit(__IXGBEVF_DOWN, &adapter->state)) {
279                         netif_wake_subqueue(netdev, tx_ring->queue_index);
280                         ++adapter->restart_queue;
281                 }
282 #else
283                 if (netif_queue_stopped(netdev) &&
284                     !test_bit(__IXGBEVF_DOWN, &adapter->state)) {
285                         netif_wake_queue(netdev);
286                         ++adapter->restart_queue;
287                 }
288 #endif
289         }
290
291         if (adapter->detect_tx_hung) {
292                 if (ixgbevf_check_tx_hang(adapter, tx_ring, i)) {
293                         /* schedule immediate reset if we believe we hung */
294                         printk(KERN_INFO
295                                "tx hang %d detected, resetting adapter\n",
296                                adapter->tx_timeout_count + 1);
297                         ixgbevf_tx_timeout(adapter->netdev);
298                 }
299         }
300
301         /* re-arm the interrupt */
302         if ((count >= tx_ring->work_limit) &&
303             (!test_bit(__IXGBEVF_DOWN, &adapter->state))) {
304                 IXGBE_WRITE_REG(hw, IXGBE_VTEICS, tx_ring->v_idx);
305         }
306
307         tx_ring->total_bytes += total_bytes;
308         tx_ring->total_packets += total_packets;
309
310         adapter->net_stats.tx_bytes += total_bytes;
311         adapter->net_stats.tx_packets += total_packets;
312
313         return (count < tx_ring->work_limit);
314 }
315
316 /**
317  * ixgbevf_receive_skb - Send a completed packet up the stack
318  * @q_vector: structure containing interrupt and ring information
319  * @skb: packet to send up
320  * @status: hardware indication of status of receive
321  * @rx_ring: rx descriptor ring (for a specific queue) to setup
322  * @rx_desc: rx descriptor
323  **/
324 static void ixgbevf_receive_skb(struct ixgbevf_q_vector *q_vector,
325                                 struct sk_buff *skb, u8 status,
326                                 struct ixgbevf_ring *ring,
327                                 union ixgbe_adv_rx_desc *rx_desc)
328 {
329         struct ixgbevf_adapter *adapter = q_vector->adapter;
330         bool is_vlan = (status & IXGBE_RXD_STAT_VP);
331         u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan);
332         int ret;
333
334         if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) {
335                 if (adapter->vlgrp && is_vlan)
336                         vlan_gro_receive(&q_vector->napi,
337                                          adapter->vlgrp,
338                                          tag, skb);
339                 else
340                         napi_gro_receive(&q_vector->napi, skb);
341         } else {
342                 if (adapter->vlgrp && is_vlan)
343                         ret = vlan_hwaccel_rx(skb, adapter->vlgrp, tag);
344                 else
345                         ret = netif_rx(skb);
346         }
347 }
348
349 /**
350  * ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum
351  * @adapter: address of board private structure
352  * @status_err: hardware indication of status of receive
353  * @skb: skb currently being received and modified
354  **/
355 static inline void ixgbevf_rx_checksum(struct ixgbevf_adapter *adapter,
356                                        u32 status_err, struct sk_buff *skb)
357 {
358         skb->ip_summed = CHECKSUM_NONE;
359
360         /* Rx csum disabled */
361         if (!(adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED))
362                 return;
363
364         /* if IP and error */
365         if ((status_err & IXGBE_RXD_STAT_IPCS) &&
366             (status_err & IXGBE_RXDADV_ERR_IPE)) {
367                 adapter->hw_csum_rx_error++;
368                 return;
369         }
370
371         if (!(status_err & IXGBE_RXD_STAT_L4CS))
372                 return;
373
374         if (status_err & IXGBE_RXDADV_ERR_TCPE) {
375                 adapter->hw_csum_rx_error++;
376                 return;
377         }
378
379         /* It must be a TCP or UDP packet with a valid checksum */
380         skb->ip_summed = CHECKSUM_UNNECESSARY;
381         adapter->hw_csum_rx_good++;
382 }
383
384 /**
385  * ixgbevf_alloc_rx_buffers - Replace used receive buffers; packet split
386  * @adapter: address of board private structure
387  **/
388 static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter,
389                                      struct ixgbevf_ring *rx_ring,
390                                      int cleaned_count)
391 {
392         struct pci_dev *pdev = adapter->pdev;
393         union ixgbe_adv_rx_desc *rx_desc;
394         struct ixgbevf_rx_buffer *bi;
395         struct sk_buff *skb;
396         unsigned int i;
397         unsigned int bufsz = rx_ring->rx_buf_len + NET_IP_ALIGN;
398
399         i = rx_ring->next_to_use;
400         bi = &rx_ring->rx_buffer_info[i];
401
402         while (cleaned_count--) {
403                 rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i);
404
405                 if (!bi->page_dma &&
406                     (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED)) {
407                         if (!bi->page) {
408                                 bi->page = netdev_alloc_page(adapter->netdev);
409                                 if (!bi->page) {
410                                         adapter->alloc_rx_page_failed++;
411                                         goto no_buffers;
412                                 }
413                                 bi->page_offset = 0;
414                         } else {
415                                 /* use a half page if we're re-using */
416                                 bi->page_offset ^= (PAGE_SIZE / 2);
417                         }
418
419                         bi->page_dma = pci_map_page(pdev, bi->page,
420                                                     bi->page_offset,
421                                                     (PAGE_SIZE / 2),
422                                                     PCI_DMA_FROMDEVICE);
423                 }
424
425                 skb = bi->skb;
426                 if (!skb) {
427                         skb = netdev_alloc_skb(adapter->netdev,
428                                                                bufsz);
429
430                         if (!skb) {
431                                 adapter->alloc_rx_buff_failed++;
432                                 goto no_buffers;
433                         }
434
435                         /*
436                          * Make buffer alignment 2 beyond a 16 byte boundary
437                          * this will result in a 16 byte aligned IP header after
438                          * the 14 byte MAC header is removed
439                          */
440                         skb_reserve(skb, NET_IP_ALIGN);
441
442                         bi->skb = skb;
443                 }
444                 if (!bi->dma) {
445                         bi->dma = pci_map_single(pdev, skb->data,
446                                                  rx_ring->rx_buf_len,
447                                                  PCI_DMA_FROMDEVICE);
448                 }
449                 /* Refresh the desc even if buffer_addrs didn't change because
450                  * each write-back erases this info. */
451                 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
452                         rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
453                         rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
454                 } else {
455                         rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
456                 }
457
458                 i++;
459                 if (i == rx_ring->count)
460                         i = 0;
461                 bi = &rx_ring->rx_buffer_info[i];
462         }
463
464 no_buffers:
465         if (rx_ring->next_to_use != i) {
466                 rx_ring->next_to_use = i;
467                 if (i-- == 0)
468                         i = (rx_ring->count - 1);
469
470                 ixgbevf_release_rx_desc(&adapter->hw, rx_ring, i);
471         }
472 }
473
474 static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter *adapter,
475                                              u64 qmask)
476 {
477         u32 mask;
478         struct ixgbe_hw *hw = &adapter->hw;
479
480         mask = (qmask & 0xFFFFFFFF);
481         IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
482 }
483
484 static inline u16 ixgbevf_get_hdr_info(union ixgbe_adv_rx_desc *rx_desc)
485 {
486         return rx_desc->wb.lower.lo_dword.hs_rss.hdr_info;
487 }
488
489 static inline u16 ixgbevf_get_pkt_info(union ixgbe_adv_rx_desc *rx_desc)
490 {
491         return rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
492 }
493
494 static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
495                                  struct ixgbevf_ring *rx_ring,
496                                  int *work_done, int work_to_do)
497 {
498         struct ixgbevf_adapter *adapter = q_vector->adapter;
499         struct pci_dev *pdev = adapter->pdev;
500         union ixgbe_adv_rx_desc *rx_desc, *next_rxd;
501         struct ixgbevf_rx_buffer *rx_buffer_info, *next_buffer;
502         struct sk_buff *skb;
503         unsigned int i;
504         u32 len, staterr;
505         u16 hdr_info;
506         bool cleaned = false;
507         int cleaned_count = 0;
508         unsigned int total_rx_bytes = 0, total_rx_packets = 0;
509
510         i = rx_ring->next_to_clean;
511         rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i);
512         staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
513         rx_buffer_info = &rx_ring->rx_buffer_info[i];
514
515         while (staterr & IXGBE_RXD_STAT_DD) {
516                 u32 upper_len = 0;
517                 if (*work_done >= work_to_do)
518                         break;
519                 (*work_done)++;
520
521                 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
522                         hdr_info = le16_to_cpu(ixgbevf_get_hdr_info(rx_desc));
523                         len = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
524                                IXGBE_RXDADV_HDRBUFLEN_SHIFT;
525                         if (hdr_info & IXGBE_RXDADV_SPH)
526                                 adapter->rx_hdr_split++;
527                         if (len > IXGBEVF_RX_HDR_SIZE)
528                                 len = IXGBEVF_RX_HDR_SIZE;
529                         upper_len = le16_to_cpu(rx_desc->wb.upper.length);
530                 } else {
531                         len = le16_to_cpu(rx_desc->wb.upper.length);
532                 }
533                 cleaned = true;
534                 skb = rx_buffer_info->skb;
535                 prefetch(skb->data - NET_IP_ALIGN);
536                 rx_buffer_info->skb = NULL;
537
538                 if (rx_buffer_info->dma) {
539                         pci_unmap_single(pdev, rx_buffer_info->dma,
540                                          rx_ring->rx_buf_len,
541                                          PCI_DMA_FROMDEVICE);
542                         rx_buffer_info->dma = 0;
543                         skb_put(skb, len);
544                 }
545
546                 if (upper_len) {
547                         pci_unmap_page(pdev, rx_buffer_info->page_dma,
548                                        PAGE_SIZE / 2, PCI_DMA_FROMDEVICE);
549                         rx_buffer_info->page_dma = 0;
550                         skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
551                                            rx_buffer_info->page,
552                                            rx_buffer_info->page_offset,
553                                            upper_len);
554
555                         if ((rx_ring->rx_buf_len > (PAGE_SIZE / 2)) ||
556                             (page_count(rx_buffer_info->page) != 1))
557                                 rx_buffer_info->page = NULL;
558                         else
559                                 get_page(rx_buffer_info->page);
560
561                         skb->len += upper_len;
562                         skb->data_len += upper_len;
563                         skb->truesize += upper_len;
564                 }
565
566                 i++;
567                 if (i == rx_ring->count)
568                         i = 0;
569
570                 next_rxd = IXGBE_RX_DESC_ADV(*rx_ring, i);
571                 prefetch(next_rxd);
572                 cleaned_count++;
573
574                 next_buffer = &rx_ring->rx_buffer_info[i];
575
576                 if (!(staterr & IXGBE_RXD_STAT_EOP)) {
577                         if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
578                                 rx_buffer_info->skb = next_buffer->skb;
579                                 rx_buffer_info->dma = next_buffer->dma;
580                                 next_buffer->skb = skb;
581                                 next_buffer->dma = 0;
582                         } else {
583                                 skb->next = next_buffer->skb;
584                                 skb->next->prev = skb;
585                         }
586                         adapter->non_eop_descs++;
587                         goto next_desc;
588                 }
589
590                 /* ERR_MASK will only have valid bits if EOP set */
591                 if (unlikely(staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK)) {
592                         dev_kfree_skb_irq(skb);
593                         goto next_desc;
594                 }
595
596                 ixgbevf_rx_checksum(adapter, staterr, skb);
597
598                 /* probably a little skewed due to removing CRC */
599                 total_rx_bytes += skb->len;
600                 total_rx_packets++;
601
602                 /*
603                  * Work around issue of some types of VM to VM loop back
604                  * packets not getting split correctly
605                  */
606                 if (staterr & IXGBE_RXD_STAT_LB) {
607                         u32 header_fixup_len = skb->len - skb->data_len;
608                         if (header_fixup_len < 14)
609                                 skb_push(skb, header_fixup_len);
610                 }
611                 skb->protocol = eth_type_trans(skb, adapter->netdev);
612
613                 ixgbevf_receive_skb(q_vector, skb, staterr, rx_ring, rx_desc);
614                 adapter->netdev->last_rx = jiffies;
615
616 next_desc:
617                 rx_desc->wb.upper.status_error = 0;
618
619                 /* return some buffers to hardware, one at a time is too slow */
620                 if (cleaned_count >= IXGBEVF_RX_BUFFER_WRITE) {
621                         ixgbevf_alloc_rx_buffers(adapter, rx_ring,
622                                                  cleaned_count);
623                         cleaned_count = 0;
624                 }
625
626                 /* use prefetched values */
627                 rx_desc = next_rxd;
628                 rx_buffer_info = &rx_ring->rx_buffer_info[i];
629
630                 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
631         }
632
633         rx_ring->next_to_clean = i;
634         cleaned_count = IXGBE_DESC_UNUSED(rx_ring);
635
636         if (cleaned_count)
637                 ixgbevf_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
638
639         rx_ring->total_packets += total_rx_packets;
640         rx_ring->total_bytes += total_rx_bytes;
641         adapter->net_stats.rx_bytes += total_rx_bytes;
642         adapter->net_stats.rx_packets += total_rx_packets;
643
644         return cleaned;
645 }
646
647 /**
648  * ixgbevf_clean_rxonly - msix (aka one shot) rx clean routine
649  * @napi: napi struct with our devices info in it
650  * @budget: amount of work driver is allowed to do this pass, in packets
651  *
652  * This function is optimized for cleaning one queue only on a single
653  * q_vector!!!
654  **/
655 static int ixgbevf_clean_rxonly(struct napi_struct *napi, int budget)
656 {
657         struct ixgbevf_q_vector *q_vector =
658                 container_of(napi, struct ixgbevf_q_vector, napi);
659         struct ixgbevf_adapter *adapter = q_vector->adapter;
660         struct ixgbevf_ring *rx_ring = NULL;
661         int work_done = 0;
662         long r_idx;
663
664         r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
665         rx_ring = &(adapter->rx_ring[r_idx]);
666
667         ixgbevf_clean_rx_irq(q_vector, rx_ring, &work_done, budget);
668
669         /* If all Rx work done, exit the polling mode */
670         if (work_done < budget) {
671                 napi_complete(napi);
672                 if (adapter->itr_setting & 1)
673                         ixgbevf_set_itr_msix(q_vector);
674                 if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
675                         ixgbevf_irq_enable_queues(adapter, rx_ring->v_idx);
676         }
677
678         return work_done;
679 }
680
681 /**
682  * ixgbevf_clean_rxonly_many - msix (aka one shot) rx clean routine
683  * @napi: napi struct with our devices info in it
684  * @budget: amount of work driver is allowed to do this pass, in packets
685  *
686  * This function will clean more than one rx queue associated with a
687  * q_vector.
688  **/
689 static int ixgbevf_clean_rxonly_many(struct napi_struct *napi, int budget)
690 {
691         struct ixgbevf_q_vector *q_vector =
692                 container_of(napi, struct ixgbevf_q_vector, napi);
693         struct ixgbevf_adapter *adapter = q_vector->adapter;
694         struct ixgbevf_ring *rx_ring = NULL;
695         int work_done = 0, i;
696         long r_idx;
697         u64 enable_mask = 0;
698
699         /* attempt to distribute budget to each queue fairly, but don't allow
700          * the budget to go below 1 because we'll exit polling */
701         budget /= (q_vector->rxr_count ?: 1);
702         budget = max(budget, 1);
703         r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
704         for (i = 0; i < q_vector->rxr_count; i++) {
705                 rx_ring = &(adapter->rx_ring[r_idx]);
706                 ixgbevf_clean_rx_irq(q_vector, rx_ring, &work_done, budget);
707                 enable_mask |= rx_ring->v_idx;
708                 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
709                                       r_idx + 1);
710         }
711
712 #ifndef HAVE_NETDEV_NAPI_LIST
713         if (!netif_running(adapter->netdev))
714                 work_done = 0;
715
716 #endif
717         r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
718         rx_ring = &(adapter->rx_ring[r_idx]);
719
720         /* If all Rx work done, exit the polling mode */
721         if (work_done < budget) {
722                 napi_complete(napi);
723                 if (adapter->itr_setting & 1)
724                         ixgbevf_set_itr_msix(q_vector);
725                 if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
726                         ixgbevf_irq_enable_queues(adapter, enable_mask);
727         }
728
729         return work_done;
730 }
731
732
733 /**
734  * ixgbevf_configure_msix - Configure MSI-X hardware
735  * @adapter: board private structure
736  *
737  * ixgbevf_configure_msix sets up the hardware to properly generate MSI-X
738  * interrupts.
739  **/
740 static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter)
741 {
742         struct ixgbevf_q_vector *q_vector;
743         struct ixgbe_hw *hw = &adapter->hw;
744         int i, j, q_vectors, v_idx, r_idx;
745         u32 mask;
746
747         q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
748
749         /*
750          * Populate the IVAR table and set the ITR values to the
751          * corresponding register.
752          */
753         for (v_idx = 0; v_idx < q_vectors; v_idx++) {
754                 q_vector = adapter->q_vector[v_idx];
755                 /* XXX for_each_set_bit(...) */
756                 r_idx = find_first_bit(q_vector->rxr_idx,
757                                        adapter->num_rx_queues);
758
759                 for (i = 0; i < q_vector->rxr_count; i++) {
760                         j = adapter->rx_ring[r_idx].reg_idx;
761                         ixgbevf_set_ivar(adapter, 0, j, v_idx);
762                         r_idx = find_next_bit(q_vector->rxr_idx,
763                                               adapter->num_rx_queues,
764                                               r_idx + 1);
765                 }
766                 r_idx = find_first_bit(q_vector->txr_idx,
767                                        adapter->num_tx_queues);
768
769                 for (i = 0; i < q_vector->txr_count; i++) {
770                         j = adapter->tx_ring[r_idx].reg_idx;
771                         ixgbevf_set_ivar(adapter, 1, j, v_idx);
772                         r_idx = find_next_bit(q_vector->txr_idx,
773                                               adapter->num_tx_queues,
774                                               r_idx + 1);
775                 }
776
777                 /* if this is a tx only vector halve the interrupt rate */
778                 if (q_vector->txr_count && !q_vector->rxr_count)
779                         q_vector->eitr = (adapter->eitr_param >> 1);
780                 else if (q_vector->rxr_count)
781                         /* rx only */
782                         q_vector->eitr = adapter->eitr_param;
783
784                 ixgbevf_write_eitr(adapter, v_idx, q_vector->eitr);
785         }
786
787         ixgbevf_set_ivar(adapter, -1, 1, v_idx);
788
789         /* set up to autoclear timer, and the vectors */
790         mask = IXGBE_EIMS_ENABLE_MASK;
791         mask &= ~IXGBE_EIMS_OTHER;
792         IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
793 }
794
795 enum latency_range {
796         lowest_latency = 0,
797         low_latency = 1,
798         bulk_latency = 2,
799         latency_invalid = 255
800 };
801
802 /**
803  * ixgbevf_update_itr - update the dynamic ITR value based on statistics
804  * @adapter: pointer to adapter
805  * @eitr: eitr setting (ints per sec) to give last timeslice
806  * @itr_setting: current throttle rate in ints/second
807  * @packets: the number of packets during this measurement interval
808  * @bytes: the number of bytes during this measurement interval
809  *
810  *      Stores a new ITR value based on packets and byte
811  *      counts during the last interrupt.  The advantage of per interrupt
812  *      computation is faster updates and more accurate ITR for the current
813  *      traffic pattern.  Constants in this function were computed
814  *      based on theoretical maximum wire speed and thresholds were set based
815  *      on testing data as well as attempting to minimize response time
816  *      while increasing bulk throughput.
817  **/
818 static u8 ixgbevf_update_itr(struct ixgbevf_adapter *adapter,
819                              u32 eitr, u8 itr_setting,
820                              int packets, int bytes)
821 {
822         unsigned int retval = itr_setting;
823         u32 timepassed_us;
824         u64 bytes_perint;
825
826         if (packets == 0)
827                 goto update_itr_done;
828
829
830         /* simple throttlerate management
831          *    0-20MB/s lowest (100000 ints/s)
832          *   20-100MB/s low   (20000 ints/s)
833          *  100-1249MB/s bulk (8000 ints/s)
834          */
835         /* what was last interrupt timeslice? */
836         timepassed_us = 1000000/eitr;
837         bytes_perint = bytes / timepassed_us; /* bytes/usec */
838
839         switch (itr_setting) {
840         case lowest_latency:
841                 if (bytes_perint > adapter->eitr_low)
842                         retval = low_latency;
843                 break;
844         case low_latency:
845                 if (bytes_perint > adapter->eitr_high)
846                         retval = bulk_latency;
847                 else if (bytes_perint <= adapter->eitr_low)
848                         retval = lowest_latency;
849                 break;
850         case bulk_latency:
851                 if (bytes_perint <= adapter->eitr_high)
852                         retval = low_latency;
853                 break;
854         }
855
856 update_itr_done:
857         return retval;
858 }
859
860 /**
861  * ixgbevf_write_eitr - write VTEITR register in hardware specific way
862  * @adapter: pointer to adapter struct
863  * @v_idx: vector index into q_vector array
864  * @itr_reg: new value to be written in *register* format, not ints/s
865  *
866  * This function is made to be called by ethtool and by the driver
867  * when it needs to update VTEITR registers at runtime.  Hardware
868  * specific quirks/differences are taken care of here.
869  */
870 static void ixgbevf_write_eitr(struct ixgbevf_adapter *adapter, int v_idx,
871                                u32 itr_reg)
872 {
873         struct ixgbe_hw *hw = &adapter->hw;
874
875         itr_reg = EITR_INTS_PER_SEC_TO_REG(itr_reg);
876
877         /*
878          * set the WDIS bit to not clear the timer bits and cause an
879          * immediate assertion of the interrupt
880          */
881         itr_reg |= IXGBE_EITR_CNT_WDIS;
882
883         IXGBE_WRITE_REG(hw, IXGBE_VTEITR(v_idx), itr_reg);
884 }
885
886 static void ixgbevf_set_itr_msix(struct ixgbevf_q_vector *q_vector)
887 {
888         struct ixgbevf_adapter *adapter = q_vector->adapter;
889         u32 new_itr;
890         u8 current_itr, ret_itr;
891         int i, r_idx, v_idx = q_vector->v_idx;
892         struct ixgbevf_ring *rx_ring, *tx_ring;
893
894         r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
895         for (i = 0; i < q_vector->txr_count; i++) {
896                 tx_ring = &(adapter->tx_ring[r_idx]);
897                 ret_itr = ixgbevf_update_itr(adapter, q_vector->eitr,
898                                              q_vector->tx_itr,
899                                              tx_ring->total_packets,
900                                              tx_ring->total_bytes);
901                 /* if the result for this queue would decrease interrupt
902                  * rate for this vector then use that result */
903                 q_vector->tx_itr = ((q_vector->tx_itr > ret_itr) ?
904                                     q_vector->tx_itr - 1 : ret_itr);
905                 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
906                                       r_idx + 1);
907         }
908
909         r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
910         for (i = 0; i < q_vector->rxr_count; i++) {
911                 rx_ring = &(adapter->rx_ring[r_idx]);
912                 ret_itr = ixgbevf_update_itr(adapter, q_vector->eitr,
913                                              q_vector->rx_itr,
914                                              rx_ring->total_packets,
915                                              rx_ring->total_bytes);
916                 /* if the result for this queue would decrease interrupt
917                  * rate for this vector then use that result */
918                 q_vector->rx_itr = ((q_vector->rx_itr > ret_itr) ?
919                                     q_vector->rx_itr - 1 : ret_itr);
920                 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
921                                       r_idx + 1);
922         }
923
924         current_itr = max(q_vector->rx_itr, q_vector->tx_itr);
925
926         switch (current_itr) {
927         /* counts and packets in update_itr are dependent on these numbers */
928         case lowest_latency:
929                 new_itr = 100000;
930                 break;
931         case low_latency:
932                 new_itr = 20000; /* aka hwitr = ~200 */
933                 break;
934         case bulk_latency:
935         default:
936                 new_itr = 8000;
937                 break;
938         }
939
940         if (new_itr != q_vector->eitr) {
941                 u32 itr_reg;
942
943                 /* save the algorithm value here, not the smoothed one */
944                 q_vector->eitr = new_itr;
945                 /* do an exponential smoothing */
946                 new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100);
947                 itr_reg = EITR_INTS_PER_SEC_TO_REG(new_itr);
948                 ixgbevf_write_eitr(adapter, v_idx, itr_reg);
949         }
950
951         return;
952 }
953
954 static irqreturn_t ixgbevf_msix_mbx(int irq, void *data)
955 {
956         struct net_device *netdev = data;
957         struct ixgbevf_adapter *adapter = netdev_priv(netdev);
958         struct ixgbe_hw *hw = &adapter->hw;
959         u32 eicr;
960         u32 msg;
961
962         eicr = IXGBE_READ_REG(hw, IXGBE_VTEICS);
963         IXGBE_WRITE_REG(hw, IXGBE_VTEICR, eicr);
964
965         hw->mbx.ops.read(hw, &msg, 1);
966
967         if ((msg & IXGBE_MBVFICR_VFREQ_MASK) == IXGBE_PF_CONTROL_MSG)
968                 mod_timer(&adapter->watchdog_timer,
969                           round_jiffies(jiffies + 1));
970
971         return IRQ_HANDLED;
972 }
973
974 static irqreturn_t ixgbevf_msix_clean_tx(int irq, void *data)
975 {
976         struct ixgbevf_q_vector *q_vector = data;
977         struct ixgbevf_adapter  *adapter = q_vector->adapter;
978         struct ixgbevf_ring     *tx_ring;
979         int i, r_idx;
980
981         if (!q_vector->txr_count)
982                 return IRQ_HANDLED;
983
984         r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
985         for (i = 0; i < q_vector->txr_count; i++) {
986                 tx_ring = &(adapter->tx_ring[r_idx]);
987                 tx_ring->total_bytes = 0;
988                 tx_ring->total_packets = 0;
989                 ixgbevf_clean_tx_irq(adapter, tx_ring);
990                 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
991                                       r_idx + 1);
992         }
993
994         if (adapter->itr_setting & 1)
995                 ixgbevf_set_itr_msix(q_vector);
996
997         return IRQ_HANDLED;
998 }
999
1000 /**
1001  * ixgbe_msix_clean_rx - single unshared vector rx clean (all queues)
1002  * @irq: unused
1003  * @data: pointer to our q_vector struct for this interrupt vector
1004  **/
1005 static irqreturn_t ixgbevf_msix_clean_rx(int irq, void *data)
1006 {
1007         struct ixgbevf_q_vector *q_vector = data;
1008         struct ixgbevf_adapter  *adapter = q_vector->adapter;
1009         struct ixgbe_hw *hw = &adapter->hw;
1010         struct ixgbevf_ring  *rx_ring;
1011         int r_idx;
1012         int i;
1013
1014         r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
1015         for (i = 0; i < q_vector->rxr_count; i++) {
1016                 rx_ring = &(adapter->rx_ring[r_idx]);
1017                 rx_ring->total_bytes = 0;
1018                 rx_ring->total_packets = 0;
1019                 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
1020                                       r_idx + 1);
1021         }
1022
1023         if (!q_vector->rxr_count)
1024                 return IRQ_HANDLED;
1025
1026         r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
1027         rx_ring = &(adapter->rx_ring[r_idx]);
1028         /* disable interrupts on this vector only */
1029         IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, rx_ring->v_idx);
1030         napi_schedule(&q_vector->napi);
1031
1032
1033         return IRQ_HANDLED;
1034 }
1035
1036 static irqreturn_t ixgbevf_msix_clean_many(int irq, void *data)
1037 {
1038         ixgbevf_msix_clean_rx(irq, data);
1039         ixgbevf_msix_clean_tx(irq, data);
1040
1041         return IRQ_HANDLED;
1042 }
1043
1044 static inline void map_vector_to_rxq(struct ixgbevf_adapter *a, int v_idx,
1045                                      int r_idx)
1046 {
1047         struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
1048
1049         set_bit(r_idx, q_vector->rxr_idx);
1050         q_vector->rxr_count++;
1051         a->rx_ring[r_idx].v_idx = 1 << v_idx;
1052 }
1053
1054 static inline void map_vector_to_txq(struct ixgbevf_adapter *a, int v_idx,
1055                                      int t_idx)
1056 {
1057         struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
1058
1059         set_bit(t_idx, q_vector->txr_idx);
1060         q_vector->txr_count++;
1061         a->tx_ring[t_idx].v_idx = 1 << v_idx;
1062 }
1063
1064 /**
1065  * ixgbevf_map_rings_to_vectors - Maps descriptor rings to vectors
1066  * @adapter: board private structure to initialize
1067  *
1068  * This function maps descriptor rings to the queue-specific vectors
1069  * we were allotted through the MSI-X enabling code.  Ideally, we'd have
1070  * one vector per ring/queue, but on a constrained vector budget, we
1071  * group the rings as "efficiently" as possible.  You would add new
1072  * mapping configurations in here.
1073  **/
1074 static int ixgbevf_map_rings_to_vectors(struct ixgbevf_adapter *adapter)
1075 {
1076         int q_vectors;
1077         int v_start = 0;
1078         int rxr_idx = 0, txr_idx = 0;
1079         int rxr_remaining = adapter->num_rx_queues;
1080         int txr_remaining = adapter->num_tx_queues;
1081         int i, j;
1082         int rqpv, tqpv;
1083         int err = 0;
1084
1085         q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1086
1087         /*
1088          * The ideal configuration...
1089          * We have enough vectors to map one per queue.
1090          */
1091         if (q_vectors == adapter->num_rx_queues + adapter->num_tx_queues) {
1092                 for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++)
1093                         map_vector_to_rxq(adapter, v_start, rxr_idx);
1094
1095                 for (; txr_idx < txr_remaining; v_start++, txr_idx++)
1096                         map_vector_to_txq(adapter, v_start, txr_idx);
1097                 goto out;
1098         }
1099
1100         /*
1101          * If we don't have enough vectors for a 1-to-1
1102          * mapping, we'll have to group them so there are
1103          * multiple queues per vector.
1104          */
1105         /* Re-adjusting *qpv takes care of the remainder. */
1106         for (i = v_start; i < q_vectors; i++) {
1107                 rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - i);
1108                 for (j = 0; j < rqpv; j++) {
1109                         map_vector_to_rxq(adapter, i, rxr_idx);
1110                         rxr_idx++;
1111                         rxr_remaining--;
1112                 }
1113         }
1114         for (i = v_start; i < q_vectors; i++) {
1115                 tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - i);
1116                 for (j = 0; j < tqpv; j++) {
1117                         map_vector_to_txq(adapter, i, txr_idx);
1118                         txr_idx++;
1119                         txr_remaining--;
1120                 }
1121         }
1122
1123 out:
1124         return err;
1125 }
1126
1127 /**
1128  * ixgbevf_request_msix_irqs - Initialize MSI-X interrupts
1129  * @adapter: board private structure
1130  *
1131  * ixgbevf_request_msix_irqs allocates MSI-X vectors and requests
1132  * interrupts from the kernel.
1133  **/
1134 static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter)
1135 {
1136         struct net_device *netdev = adapter->netdev;
1137         irqreturn_t (*handler)(int, void *);
1138         int i, vector, q_vectors, err;
1139         int ri = 0, ti = 0;
1140
1141         /* Decrement for Other and TCP Timer vectors */
1142         q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1143
1144 #define SET_HANDLER(_v) (((_v)->rxr_count && (_v)->txr_count)          \
1145                                           ? &ixgbevf_msix_clean_many : \
1146                           (_v)->rxr_count ? &ixgbevf_msix_clean_rx   : \
1147                           (_v)->txr_count ? &ixgbevf_msix_clean_tx   : \
1148                           NULL)
1149         for (vector = 0; vector < q_vectors; vector++) {
1150                 handler = SET_HANDLER(adapter->q_vector[vector]);
1151
1152                 if (handler == &ixgbevf_msix_clean_rx) {
1153                         sprintf(adapter->name[vector], "%s-%s-%d",
1154                                 netdev->name, "rx", ri++);
1155                 } else if (handler == &ixgbevf_msix_clean_tx) {
1156                         sprintf(adapter->name[vector], "%s-%s-%d",
1157                                 netdev->name, "tx", ti++);
1158                 } else if (handler == &ixgbevf_msix_clean_many) {
1159                         sprintf(adapter->name[vector], "%s-%s-%d",
1160                                 netdev->name, "TxRx", vector);
1161                 } else {
1162                         /* skip this unused q_vector */
1163                         continue;
1164                 }
1165                 err = request_irq(adapter->msix_entries[vector].vector,
1166                                   handler, 0, adapter->name[vector],
1167                                   adapter->q_vector[vector]);
1168                 if (err) {
1169                         hw_dbg(&adapter->hw,
1170                                "request_irq failed for MSIX interrupt "
1171                                "Error: %d\n", err);
1172                         goto free_queue_irqs;
1173                 }
1174         }
1175
1176         sprintf(adapter->name[vector], "%s:mbx", netdev->name);
1177         err = request_irq(adapter->msix_entries[vector].vector,
1178                           &ixgbevf_msix_mbx, 0, adapter->name[vector], netdev);
1179         if (err) {
1180                 hw_dbg(&adapter->hw,
1181                        "request_irq for msix_mbx failed: %d\n", err);
1182                 goto free_queue_irqs;
1183         }
1184
1185         return 0;
1186
1187 free_queue_irqs:
1188         for (i = vector - 1; i >= 0; i--)
1189                 free_irq(adapter->msix_entries[--vector].vector,
1190                          &(adapter->q_vector[i]));
1191         pci_disable_msix(adapter->pdev);
1192         kfree(adapter->msix_entries);
1193         adapter->msix_entries = NULL;
1194         return err;
1195 }
1196
1197 static inline void ixgbevf_reset_q_vectors(struct ixgbevf_adapter *adapter)
1198 {
1199         int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1200
1201         for (i = 0; i < q_vectors; i++) {
1202                 struct ixgbevf_q_vector *q_vector = adapter->q_vector[i];
1203                 bitmap_zero(q_vector->rxr_idx, MAX_RX_QUEUES);
1204                 bitmap_zero(q_vector->txr_idx, MAX_TX_QUEUES);
1205                 q_vector->rxr_count = 0;
1206                 q_vector->txr_count = 0;
1207                 q_vector->eitr = adapter->eitr_param;
1208         }
1209 }
1210
1211 /**
1212  * ixgbevf_request_irq - initialize interrupts
1213  * @adapter: board private structure
1214  *
1215  * Attempts to configure interrupts using the best available
1216  * capabilities of the hardware and kernel.
1217  **/
1218 static int ixgbevf_request_irq(struct ixgbevf_adapter *adapter)
1219 {
1220         int err = 0;
1221
1222         err = ixgbevf_request_msix_irqs(adapter);
1223
1224         if (err)
1225                 hw_dbg(&adapter->hw,
1226                        "request_irq failed, Error %d\n", err);
1227
1228         return err;
1229 }
1230
1231 static void ixgbevf_free_irq(struct ixgbevf_adapter *adapter)
1232 {
1233         struct net_device *netdev = adapter->netdev;
1234         int i, q_vectors;
1235
1236         q_vectors = adapter->num_msix_vectors;
1237
1238         i = q_vectors - 1;
1239
1240         free_irq(adapter->msix_entries[i].vector, netdev);
1241         i--;
1242
1243         for (; i >= 0; i--) {
1244                 free_irq(adapter->msix_entries[i].vector,
1245                          adapter->q_vector[i]);
1246         }
1247
1248         ixgbevf_reset_q_vectors(adapter);
1249 }
1250
1251 /**
1252  * ixgbevf_irq_disable - Mask off interrupt generation on the NIC
1253  * @adapter: board private structure
1254  **/
1255 static inline void ixgbevf_irq_disable(struct ixgbevf_adapter *adapter)
1256 {
1257         int i;
1258         struct ixgbe_hw *hw = &adapter->hw;
1259
1260         IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, ~0);
1261
1262         IXGBE_WRITE_FLUSH(hw);
1263
1264         for (i = 0; i < adapter->num_msix_vectors; i++)
1265                 synchronize_irq(adapter->msix_entries[i].vector);
1266 }
1267
1268 /**
1269  * ixgbevf_irq_enable - Enable default interrupt generation settings
1270  * @adapter: board private structure
1271  **/
1272 static inline void ixgbevf_irq_enable(struct ixgbevf_adapter *adapter,
1273                                       bool queues, bool flush)
1274 {
1275         struct ixgbe_hw *hw = &adapter->hw;
1276         u32 mask;
1277         u64 qmask;
1278
1279         mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
1280         qmask = ~0;
1281
1282         IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
1283
1284         if (queues)
1285                 ixgbevf_irq_enable_queues(adapter, qmask);
1286
1287         if (flush)
1288                 IXGBE_WRITE_FLUSH(hw);
1289 }
1290
1291 /**
1292  * ixgbevf_configure_tx - Configure 82599 VF Transmit Unit after Reset
1293  * @adapter: board private structure
1294  *
1295  * Configure the Tx unit of the MAC after a reset.
1296  **/
1297 static void ixgbevf_configure_tx(struct ixgbevf_adapter *adapter)
1298 {
1299         u64 tdba;
1300         struct ixgbe_hw *hw = &adapter->hw;
1301         u32 i, j, tdlen, txctrl;
1302
1303         /* Setup the HW Tx Head and Tail descriptor pointers */
1304         for (i = 0; i < adapter->num_tx_queues; i++) {
1305                 struct ixgbevf_ring *ring = &adapter->tx_ring[i];
1306                 j = ring->reg_idx;
1307                 tdba = ring->dma;
1308                 tdlen = ring->count * sizeof(union ixgbe_adv_tx_desc);
1309                 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(j),
1310                                 (tdba & DMA_BIT_MASK(32)));
1311                 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(j), (tdba >> 32));
1312                 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(j), tdlen);
1313                 IXGBE_WRITE_REG(hw, IXGBE_VFTDH(j), 0);
1314                 IXGBE_WRITE_REG(hw, IXGBE_VFTDT(j), 0);
1315                 adapter->tx_ring[i].head = IXGBE_VFTDH(j);
1316                 adapter->tx_ring[i].tail = IXGBE_VFTDT(j);
1317                 /* Disable Tx Head Writeback RO bit, since this hoses
1318                  * bookkeeping if things aren't delivered in order.
1319                  */
1320                 txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(j));
1321                 txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
1322                 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(j), txctrl);
1323         }
1324 }
1325
1326 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
1327
1328 static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter, int index)
1329 {
1330         struct ixgbevf_ring *rx_ring;
1331         struct ixgbe_hw *hw = &adapter->hw;
1332         u32 srrctl;
1333
1334         rx_ring = &adapter->rx_ring[index];
1335
1336         srrctl = IXGBE_SRRCTL_DROP_EN;
1337
1338         if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
1339                 u16 bufsz = IXGBEVF_RXBUFFER_2048;
1340                 /* grow the amount we can receive on large page machines */
1341                 if (bufsz < (PAGE_SIZE / 2))
1342                         bufsz = (PAGE_SIZE / 2);
1343                 /* cap the bufsz at our largest descriptor size */
1344                 bufsz = min((u16)IXGBEVF_MAX_RXBUFFER, bufsz);
1345
1346                 srrctl |= bufsz >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1347                 srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
1348                 srrctl |= ((IXGBEVF_RX_HDR_SIZE <<
1349                            IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
1350                            IXGBE_SRRCTL_BSIZEHDR_MASK);
1351         } else {
1352                 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
1353
1354                 if (rx_ring->rx_buf_len == MAXIMUM_ETHERNET_VLAN_SIZE)
1355                         srrctl |= IXGBEVF_RXBUFFER_2048 >>
1356                                 IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1357                 else
1358                         srrctl |= rx_ring->rx_buf_len >>
1359                                 IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1360         }
1361         IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(index), srrctl);
1362 }
1363
1364 /**
1365  * ixgbevf_configure_rx - Configure 82599 VF Receive Unit after Reset
1366  * @adapter: board private structure
1367  *
1368  * Configure the Rx unit of the MAC after a reset.
1369  **/
1370 static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
1371 {
1372         u64 rdba;
1373         struct ixgbe_hw *hw = &adapter->hw;
1374         struct net_device *netdev = adapter->netdev;
1375         int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
1376         int i, j;
1377         u32 rdlen;
1378         int rx_buf_len;
1379
1380         /* Decide whether to use packet split mode or not */
1381         if (netdev->mtu > ETH_DATA_LEN) {
1382                 if (adapter->flags & IXGBE_FLAG_RX_PS_CAPABLE)
1383                         adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
1384                 else
1385                         adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED;
1386         } else {
1387                 if (adapter->flags & IXGBE_FLAG_RX_1BUF_CAPABLE)
1388                         adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED;
1389                 else
1390                         adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
1391         }
1392
1393         /* Set the RX buffer length according to the mode */
1394         if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
1395                 /* PSRTYPE must be initialized in 82599 */
1396                 u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
1397                         IXGBE_PSRTYPE_UDPHDR |
1398                         IXGBE_PSRTYPE_IPV4HDR |
1399                         IXGBE_PSRTYPE_IPV6HDR |
1400                         IXGBE_PSRTYPE_L2HDR;
1401                 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
1402                 rx_buf_len = IXGBEVF_RX_HDR_SIZE;
1403         } else {
1404                 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, 0);
1405                 if (netdev->mtu <= ETH_DATA_LEN)
1406                         rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
1407                 else
1408                         rx_buf_len = ALIGN(max_frame, 1024);
1409         }
1410
1411         rdlen = adapter->rx_ring[0].count * sizeof(union ixgbe_adv_rx_desc);
1412         /* Setup the HW Rx Head and Tail Descriptor Pointers and
1413          * the Base and Length of the Rx Descriptor Ring */
1414         for (i = 0; i < adapter->num_rx_queues; i++) {
1415                 rdba = adapter->rx_ring[i].dma;
1416                 j = adapter->rx_ring[i].reg_idx;
1417                 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(j),
1418                                 (rdba & DMA_BIT_MASK(32)));
1419                 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(j), (rdba >> 32));
1420                 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(j), rdlen);
1421                 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(j), 0);
1422                 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(j), 0);
1423                 adapter->rx_ring[i].head = IXGBE_VFRDH(j);
1424                 adapter->rx_ring[i].tail = IXGBE_VFRDT(j);
1425                 adapter->rx_ring[i].rx_buf_len = rx_buf_len;
1426
1427                 ixgbevf_configure_srrctl(adapter, j);
1428         }
1429 }
1430
1431 static void ixgbevf_vlan_rx_register(struct net_device *netdev,
1432                                      struct vlan_group *grp)
1433 {
1434         struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1435         struct ixgbe_hw *hw = &adapter->hw;
1436         int i, j;
1437         u32 ctrl;
1438
1439         adapter->vlgrp = grp;
1440
1441         for (i = 0; i < adapter->num_rx_queues; i++) {
1442                 j = adapter->rx_ring[i].reg_idx;
1443                 ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j));
1444                 ctrl |= IXGBE_RXDCTL_VME;
1445                 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), ctrl);
1446         }
1447 }
1448
1449 static void ixgbevf_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
1450 {
1451         struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1452         struct ixgbe_hw *hw = &adapter->hw;
1453         struct net_device *v_netdev;
1454
1455         /* add VID to filter table */
1456         if (hw->mac.ops.set_vfta)
1457                 hw->mac.ops.set_vfta(hw, vid, 0, true);
1458         /*
1459          * Copy feature flags from netdev to the vlan netdev for this vid.
1460          * This allows things like TSO to bubble down to our vlan device.
1461          */
1462         v_netdev = vlan_group_get_device(adapter->vlgrp, vid);
1463         v_netdev->features |= adapter->netdev->features;
1464         vlan_group_set_device(adapter->vlgrp, vid, v_netdev);
1465 }
1466
1467 static void ixgbevf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
1468 {
1469         struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1470         struct ixgbe_hw *hw = &adapter->hw;
1471
1472         if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
1473                 ixgbevf_irq_disable(adapter);
1474
1475         vlan_group_set_device(adapter->vlgrp, vid, NULL);
1476
1477         if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
1478                 ixgbevf_irq_enable(adapter, true, true);
1479
1480         /* remove VID from filter table */
1481         if (hw->mac.ops.set_vfta)
1482                 hw->mac.ops.set_vfta(hw, vid, 0, false);
1483 }
1484
1485 static void ixgbevf_restore_vlan(struct ixgbevf_adapter *adapter)
1486 {
1487         ixgbevf_vlan_rx_register(adapter->netdev, adapter->vlgrp);
1488
1489         if (adapter->vlgrp) {
1490                 u16 vid;
1491                 for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
1492                         if (!vlan_group_get_device(adapter->vlgrp, vid))
1493                                 continue;
1494                         ixgbevf_vlan_rx_add_vid(adapter->netdev, vid);
1495                 }
1496         }
1497 }
1498
1499 static u8 *ixgbevf_addr_list_itr(struct ixgbe_hw *hw, u8 **mc_addr_ptr,
1500                                  u32 *vmdq)
1501 {
1502         struct dev_mc_list *mc_ptr;
1503         u8 *addr = *mc_addr_ptr;
1504         *vmdq = 0;
1505
1506         mc_ptr = container_of(addr, struct dev_mc_list, dmi_addr[0]);
1507         if (mc_ptr->next)
1508                 *mc_addr_ptr = mc_ptr->next->dmi_addr;
1509         else
1510                 *mc_addr_ptr = NULL;
1511
1512         return addr;
1513 }
1514
1515 /**
1516  * ixgbevf_set_rx_mode - Multicast set
1517  * @netdev: network interface device structure
1518  *
1519  * The set_rx_method entry point is called whenever the multicast address
1520  * list or the network interface flags are updated.  This routine is
1521  * responsible for configuring the hardware for proper multicast mode.
1522  **/
1523 static void ixgbevf_set_rx_mode(struct net_device *netdev)
1524 {
1525         struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1526         struct ixgbe_hw *hw = &adapter->hw;
1527         u8 *addr_list = NULL;
1528         int addr_count = 0;
1529
1530         /* reprogram multicast list */
1531         addr_count = netdev_mc_count(netdev);
1532         if (addr_count)
1533                 addr_list = netdev->mc_list->dmi_addr;
1534         if (hw->mac.ops.update_mc_addr_list)
1535                 hw->mac.ops.update_mc_addr_list(hw, addr_list, addr_count,
1536                                                 ixgbevf_addr_list_itr);
1537 }
1538
1539 static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter)
1540 {
1541         int q_idx;
1542         struct ixgbevf_q_vector *q_vector;
1543         int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1544
1545         for (q_idx = 0; q_idx < q_vectors; q_idx++) {
1546                 struct napi_struct *napi;
1547                 q_vector = adapter->q_vector[q_idx];
1548                 if (!q_vector->rxr_count)
1549                         continue;
1550                 napi = &q_vector->napi;
1551                 if (q_vector->rxr_count > 1)
1552                         napi->poll = &ixgbevf_clean_rxonly_many;
1553
1554                 napi_enable(napi);
1555         }
1556 }
1557
1558 static void ixgbevf_napi_disable_all(struct ixgbevf_adapter *adapter)
1559 {
1560         int q_idx;
1561         struct ixgbevf_q_vector *q_vector;
1562         int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1563
1564         for (q_idx = 0; q_idx < q_vectors; q_idx++) {
1565                 q_vector = adapter->q_vector[q_idx];
1566                 if (!q_vector->rxr_count)
1567                         continue;
1568                 napi_disable(&q_vector->napi);
1569         }
1570 }
1571
1572 static void ixgbevf_configure(struct ixgbevf_adapter *adapter)
1573 {
1574         struct net_device *netdev = adapter->netdev;
1575         int i;
1576
1577         ixgbevf_set_rx_mode(netdev);
1578
1579         ixgbevf_restore_vlan(adapter);
1580
1581         ixgbevf_configure_tx(adapter);
1582         ixgbevf_configure_rx(adapter);
1583         for (i = 0; i < adapter->num_rx_queues; i++) {
1584                 struct ixgbevf_ring *ring = &adapter->rx_ring[i];
1585                 ixgbevf_alloc_rx_buffers(adapter, ring, ring->count);
1586                 ring->next_to_use = ring->count - 1;
1587                 writel(ring->next_to_use, adapter->hw.hw_addr + ring->tail);
1588         }
1589 }
1590
1591 #define IXGBE_MAX_RX_DESC_POLL 10
1592 static inline void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter,
1593                                                 int rxr)
1594 {
1595         struct ixgbe_hw *hw = &adapter->hw;
1596         int j = adapter->rx_ring[rxr].reg_idx;
1597         int k;
1598
1599         for (k = 0; k < IXGBE_MAX_RX_DESC_POLL; k++) {
1600                 if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) & IXGBE_RXDCTL_ENABLE)
1601                         break;
1602                 else
1603                         msleep(1);
1604         }
1605         if (k >= IXGBE_MAX_RX_DESC_POLL) {
1606                 hw_dbg(hw, "RXDCTL.ENABLE on Rx queue %d "
1607                        "not set within the polling period\n", rxr);
1608         }
1609
1610         ixgbevf_release_rx_desc(&adapter->hw, &adapter->rx_ring[rxr],
1611                                 (adapter->rx_ring[rxr].count - 1));
1612 }
1613
1614 static void ixgbevf_save_reset_stats(struct ixgbevf_adapter *adapter)
1615 {
1616         /* Only save pre-reset stats if there are some */
1617         if (adapter->stats.vfgprc || adapter->stats.vfgptc) {
1618                 adapter->stats.saved_reset_vfgprc += adapter->stats.vfgprc -
1619                         adapter->stats.base_vfgprc;
1620                 adapter->stats.saved_reset_vfgptc += adapter->stats.vfgptc -
1621                         adapter->stats.base_vfgptc;
1622                 adapter->stats.saved_reset_vfgorc += adapter->stats.vfgorc -
1623                         adapter->stats.base_vfgorc;
1624                 adapter->stats.saved_reset_vfgotc += adapter->stats.vfgotc -
1625                         adapter->stats.base_vfgotc;
1626                 adapter->stats.saved_reset_vfmprc += adapter->stats.vfmprc -
1627                         adapter->stats.base_vfmprc;
1628         }
1629 }
1630
1631 static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter *adapter)
1632 {
1633         struct ixgbe_hw *hw = &adapter->hw;
1634
1635         adapter->stats.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
1636         adapter->stats.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
1637         adapter->stats.last_vfgorc |=
1638                 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
1639         adapter->stats.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
1640         adapter->stats.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
1641         adapter->stats.last_vfgotc |=
1642                 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
1643         adapter->stats.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
1644
1645         adapter->stats.base_vfgprc = adapter->stats.last_vfgprc;
1646         adapter->stats.base_vfgorc = adapter->stats.last_vfgorc;
1647         adapter->stats.base_vfgptc = adapter->stats.last_vfgptc;
1648         adapter->stats.base_vfgotc = adapter->stats.last_vfgotc;
1649         adapter->stats.base_vfmprc = adapter->stats.last_vfmprc;
1650 }
1651
1652 static int ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
1653 {
1654         struct net_device *netdev = adapter->netdev;
1655         struct ixgbe_hw *hw = &adapter->hw;
1656         int i, j = 0;
1657         int num_rx_rings = adapter->num_rx_queues;
1658         u32 txdctl, rxdctl;
1659
1660         for (i = 0; i < adapter->num_tx_queues; i++) {
1661                 j = adapter->tx_ring[i].reg_idx;
1662                 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
1663                 /* enable WTHRESH=8 descriptors, to encourage burst writeback */
1664                 txdctl |= (8 << 16);
1665                 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
1666         }
1667
1668         for (i = 0; i < adapter->num_tx_queues; i++) {
1669                 j = adapter->tx_ring[i].reg_idx;
1670                 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
1671                 txdctl |= IXGBE_TXDCTL_ENABLE;
1672                 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
1673         }
1674
1675         for (i = 0; i < num_rx_rings; i++) {
1676                 j = adapter->rx_ring[i].reg_idx;
1677                 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j));
1678                 rxdctl |= IXGBE_RXDCTL_ENABLE;
1679                 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl);
1680                 ixgbevf_rx_desc_queue_enable(adapter, i);
1681         }
1682
1683         ixgbevf_configure_msix(adapter);
1684
1685         if (hw->mac.ops.set_rar) {
1686                 if (is_valid_ether_addr(hw->mac.addr))
1687                         hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
1688                 else
1689                         hw->mac.ops.set_rar(hw, 0, hw->mac.perm_addr, 0);
1690         }
1691
1692         clear_bit(__IXGBEVF_DOWN, &adapter->state);
1693         ixgbevf_napi_enable_all(adapter);
1694
1695         /* enable transmits */
1696         netif_tx_start_all_queues(netdev);
1697
1698         ixgbevf_save_reset_stats(adapter);
1699         ixgbevf_init_last_counter_stats(adapter);
1700
1701         /* bring the link up in the watchdog, this could race with our first
1702          * link up interrupt but shouldn't be a problem */
1703         adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
1704         adapter->link_check_timeout = jiffies;
1705         mod_timer(&adapter->watchdog_timer, jiffies);
1706         return 0;
1707 }
1708
1709 int ixgbevf_up(struct ixgbevf_adapter *adapter)
1710 {
1711         int err;
1712         struct ixgbe_hw *hw = &adapter->hw;
1713
1714         ixgbevf_configure(adapter);
1715
1716         err = ixgbevf_up_complete(adapter);
1717
1718         /* clear any pending interrupts, may auto mask */
1719         IXGBE_READ_REG(hw, IXGBE_VTEICR);
1720
1721         ixgbevf_irq_enable(adapter, true, true);
1722
1723         return err;
1724 }
1725
1726 /**
1727  * ixgbevf_clean_rx_ring - Free Rx Buffers per Queue
1728  * @adapter: board private structure
1729  * @rx_ring: ring to free buffers from
1730  **/
1731 static void ixgbevf_clean_rx_ring(struct ixgbevf_adapter *adapter,
1732                                   struct ixgbevf_ring *rx_ring)
1733 {
1734         struct pci_dev *pdev = adapter->pdev;
1735         unsigned long size;
1736         unsigned int i;
1737
1738         if (!rx_ring->rx_buffer_info)
1739                 return;
1740
1741         /* Free all the Rx ring sk_buffs */
1742         for (i = 0; i < rx_ring->count; i++) {
1743                 struct ixgbevf_rx_buffer *rx_buffer_info;
1744
1745                 rx_buffer_info = &rx_ring->rx_buffer_info[i];
1746                 if (rx_buffer_info->dma) {
1747                         pci_unmap_single(pdev, rx_buffer_info->dma,
1748                                          rx_ring->rx_buf_len,
1749                                          PCI_DMA_FROMDEVICE);
1750                         rx_buffer_info->dma = 0;
1751                 }
1752                 if (rx_buffer_info->skb) {
1753                         struct sk_buff *skb = rx_buffer_info->skb;
1754                         rx_buffer_info->skb = NULL;
1755                         do {
1756                                 struct sk_buff *this = skb;
1757                                 skb = skb->prev;
1758                                 dev_kfree_skb(this);
1759                         } while (skb);
1760                 }
1761                 if (!rx_buffer_info->page)
1762                         continue;
1763                 pci_unmap_page(pdev, rx_buffer_info->page_dma, PAGE_SIZE / 2,
1764                                PCI_DMA_FROMDEVICE);
1765                 rx_buffer_info->page_dma = 0;
1766                 put_page(rx_buffer_info->page);
1767                 rx_buffer_info->page = NULL;
1768                 rx_buffer_info->page_offset = 0;
1769         }
1770
1771         size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
1772         memset(rx_ring->rx_buffer_info, 0, size);
1773
1774         /* Zero out the descriptor ring */
1775         memset(rx_ring->desc, 0, rx_ring->size);
1776
1777         rx_ring->next_to_clean = 0;
1778         rx_ring->next_to_use = 0;
1779
1780         if (rx_ring->head)
1781                 writel(0, adapter->hw.hw_addr + rx_ring->head);
1782         if (rx_ring->tail)
1783                 writel(0, adapter->hw.hw_addr + rx_ring->tail);
1784 }
1785
1786 /**
1787  * ixgbevf_clean_tx_ring - Free Tx Buffers
1788  * @adapter: board private structure
1789  * @tx_ring: ring to be cleaned
1790  **/
1791 static void ixgbevf_clean_tx_ring(struct ixgbevf_adapter *adapter,
1792                                   struct ixgbevf_ring *tx_ring)
1793 {
1794         struct ixgbevf_tx_buffer *tx_buffer_info;
1795         unsigned long size;
1796         unsigned int i;
1797
1798         if (!tx_ring->tx_buffer_info)
1799                 return;
1800
1801         /* Free all the Tx ring sk_buffs */
1802
1803         for (i = 0; i < tx_ring->count; i++) {
1804                 tx_buffer_info = &tx_ring->tx_buffer_info[i];
1805                 ixgbevf_unmap_and_free_tx_resource(adapter, tx_buffer_info);
1806         }
1807
1808         size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
1809         memset(tx_ring->tx_buffer_info, 0, size);
1810
1811         memset(tx_ring->desc, 0, tx_ring->size);
1812
1813         tx_ring->next_to_use = 0;
1814         tx_ring->next_to_clean = 0;
1815
1816         if (tx_ring->head)
1817                 writel(0, adapter->hw.hw_addr + tx_ring->head);
1818         if (tx_ring->tail)
1819                 writel(0, adapter->hw.hw_addr + tx_ring->tail);
1820 }
1821
1822 /**
1823  * ixgbevf_clean_all_rx_rings - Free Rx Buffers for all queues
1824  * @adapter: board private structure
1825  **/
1826 static void ixgbevf_clean_all_rx_rings(struct ixgbevf_adapter *adapter)
1827 {
1828         int i;
1829
1830         for (i = 0; i < adapter->num_rx_queues; i++)
1831                 ixgbevf_clean_rx_ring(adapter, &adapter->rx_ring[i]);
1832 }
1833
1834 /**
1835  * ixgbevf_clean_all_tx_rings - Free Tx Buffers for all queues
1836  * @adapter: board private structure
1837  **/
1838 static void ixgbevf_clean_all_tx_rings(struct ixgbevf_adapter *adapter)
1839 {
1840         int i;
1841
1842         for (i = 0; i < adapter->num_tx_queues; i++)
1843                 ixgbevf_clean_tx_ring(adapter, &adapter->tx_ring[i]);
1844 }
1845
1846 void ixgbevf_down(struct ixgbevf_adapter *adapter)
1847 {
1848         struct net_device *netdev = adapter->netdev;
1849         struct ixgbe_hw *hw = &adapter->hw;
1850         u32 txdctl;
1851         int i, j;
1852
1853         /* signal that we are down to the interrupt handler */
1854         set_bit(__IXGBEVF_DOWN, &adapter->state);
1855         /* disable receives */
1856
1857         netif_tx_disable(netdev);
1858
1859         msleep(10);
1860
1861         netif_tx_stop_all_queues(netdev);
1862
1863         ixgbevf_irq_disable(adapter);
1864
1865         ixgbevf_napi_disable_all(adapter);
1866
1867         del_timer_sync(&adapter->watchdog_timer);
1868         /* can't call flush scheduled work here because it can deadlock
1869          * if linkwatch_event tries to acquire the rtnl_lock which we are
1870          * holding */
1871         while (adapter->flags & IXGBE_FLAG_IN_WATCHDOG_TASK)
1872                 msleep(1);
1873
1874         /* disable transmits in the hardware now that interrupts are off */
1875         for (i = 0; i < adapter->num_tx_queues; i++) {
1876                 j = adapter->tx_ring[i].reg_idx;
1877                 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
1878                 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j),
1879                                 (txdctl & ~IXGBE_TXDCTL_ENABLE));
1880         }
1881
1882         netif_carrier_off(netdev);
1883
1884         if (!pci_channel_offline(adapter->pdev))
1885                 ixgbevf_reset(adapter);
1886
1887         ixgbevf_clean_all_tx_rings(adapter);
1888         ixgbevf_clean_all_rx_rings(adapter);
1889 }
1890
1891 void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter)
1892 {
1893         struct ixgbe_hw *hw = &adapter->hw;
1894
1895         WARN_ON(in_interrupt());
1896
1897         while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state))
1898                 msleep(1);
1899
1900         /*
1901          * Check if PF is up before re-init.  If not then skip until
1902          * later when the PF is up and ready to service requests from
1903          * the VF via mailbox.  If the VF is up and running then the
1904          * watchdog task will continue to schedule reset tasks until
1905          * the PF is up and running.
1906          */
1907         if (!hw->mac.ops.reset_hw(hw)) {
1908                 ixgbevf_down(adapter);
1909                 ixgbevf_up(adapter);
1910         }
1911
1912         clear_bit(__IXGBEVF_RESETTING, &adapter->state);
1913 }
1914
1915 void ixgbevf_reset(struct ixgbevf_adapter *adapter)
1916 {
1917         struct ixgbe_hw *hw = &adapter->hw;
1918         struct net_device *netdev = adapter->netdev;
1919
1920         if (hw->mac.ops.reset_hw(hw))
1921                 hw_dbg(hw, "PF still resetting\n");
1922         else
1923                 hw->mac.ops.init_hw(hw);
1924
1925         if (is_valid_ether_addr(adapter->hw.mac.addr)) {
1926                 memcpy(netdev->dev_addr, adapter->hw.mac.addr,
1927                        netdev->addr_len);
1928                 memcpy(netdev->perm_addr, adapter->hw.mac.addr,
1929                        netdev->addr_len);
1930         }
1931 }
1932
1933 static void ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
1934                                          int vectors)
1935 {
1936         int err, vector_threshold;
1937
1938         /* We'll want at least 3 (vector_threshold):
1939          * 1) TxQ[0] Cleanup
1940          * 2) RxQ[0] Cleanup
1941          * 3) Other (Link Status Change, etc.)
1942          */
1943         vector_threshold = MIN_MSIX_COUNT;
1944
1945         /* The more we get, the more we will assign to Tx/Rx Cleanup
1946          * for the separate queues...where Rx Cleanup >= Tx Cleanup.
1947          * Right now, we simply care about how many we'll get; we'll
1948          * set them up later while requesting irq's.
1949          */
1950         while (vectors >= vector_threshold) {
1951                 err = pci_enable_msix(adapter->pdev, adapter->msix_entries,
1952                                       vectors);
1953                 if (!err) /* Success in acquiring all requested vectors. */
1954                         break;
1955                 else if (err < 0)
1956                         vectors = 0; /* Nasty failure, quit now */
1957                 else /* err == number of vectors we should try again with */
1958                         vectors = err;
1959         }
1960
1961         if (vectors < vector_threshold) {
1962                 /* Can't allocate enough MSI-X interrupts?  Oh well.
1963                  * This just means we'll go with either a single MSI
1964                  * vector or fall back to legacy interrupts.
1965                  */
1966                 hw_dbg(&adapter->hw,
1967                        "Unable to allocate MSI-X interrupts\n");
1968                 kfree(adapter->msix_entries);
1969                 adapter->msix_entries = NULL;
1970         } else {
1971                 /*
1972                  * Adjust for only the vectors we'll use, which is minimum
1973                  * of max_msix_q_vectors + NON_Q_VECTORS, or the number of
1974                  * vectors we were allocated.
1975                  */
1976                 adapter->num_msix_vectors = vectors;
1977         }
1978 }
1979
1980 /*
1981  * ixgbe_set_num_queues: Allocate queues for device, feature dependant
1982  * @adapter: board private structure to initialize
1983  *
1984  * This is the top level queue allocation routine.  The order here is very
1985  * important, starting with the "most" number of features turned on at once,
1986  * and ending with the smallest set of features.  This way large combinations
1987  * can be allocated if they're turned on, and smaller combinations are the
1988  * fallthrough conditions.
1989  *
1990  **/
1991 static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter)
1992 {
1993         /* Start with base case */
1994         adapter->num_rx_queues = 1;
1995         adapter->num_tx_queues = 1;
1996         adapter->num_rx_pools = adapter->num_rx_queues;
1997         adapter->num_rx_queues_per_pool = 1;
1998 }
1999
2000 /**
2001  * ixgbevf_alloc_queues - Allocate memory for all rings
2002  * @adapter: board private structure to initialize
2003  *
2004  * We allocate one ring per queue at run-time since we don't know the
2005  * number of queues at compile-time.  The polling_netdev array is
2006  * intended for Multiqueue, but should work fine with a single queue.
2007  **/
2008 static int ixgbevf_alloc_queues(struct ixgbevf_adapter *adapter)
2009 {
2010         int i;
2011
2012         adapter->tx_ring = kcalloc(adapter->num_tx_queues,
2013                                    sizeof(struct ixgbevf_ring), GFP_KERNEL);
2014         if (!adapter->tx_ring)
2015                 goto err_tx_ring_allocation;
2016
2017         adapter->rx_ring = kcalloc(adapter->num_rx_queues,
2018                                    sizeof(struct ixgbevf_ring), GFP_KERNEL);
2019         if (!adapter->rx_ring)
2020                 goto err_rx_ring_allocation;
2021
2022         for (i = 0; i < adapter->num_tx_queues; i++) {
2023                 adapter->tx_ring[i].count = adapter->tx_ring_count;
2024                 adapter->tx_ring[i].queue_index = i;
2025                 adapter->tx_ring[i].reg_idx = i;
2026         }
2027
2028         for (i = 0; i < adapter->num_rx_queues; i++) {
2029                 adapter->rx_ring[i].count = adapter->rx_ring_count;
2030                 adapter->rx_ring[i].queue_index = i;
2031                 adapter->rx_ring[i].reg_idx = i;
2032         }
2033
2034         return 0;
2035
2036 err_rx_ring_allocation:
2037         kfree(adapter->tx_ring);
2038 err_tx_ring_allocation:
2039         return -ENOMEM;
2040 }
2041
2042 /**
2043  * ixgbevf_set_interrupt_capability - set MSI-X or FAIL if not supported
2044  * @adapter: board private structure to initialize
2045  *
2046  * Attempt to configure the interrupts using the best available
2047  * capabilities of the hardware and the kernel.
2048  **/
2049 static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter)
2050 {
2051         int err = 0;
2052         int vector, v_budget;
2053
2054         /*
2055          * It's easy to be greedy for MSI-X vectors, but it really
2056          * doesn't do us much good if we have a lot more vectors
2057          * than CPU's.  So let's be conservative and only ask for
2058          * (roughly) twice the number of vectors as there are CPU's.
2059          */
2060         v_budget = min(adapter->num_rx_queues + adapter->num_tx_queues,
2061                        (int)(num_online_cpus() * 2)) + NON_Q_VECTORS;
2062
2063         /* A failure in MSI-X entry allocation isn't fatal, but it does
2064          * mean we disable MSI-X capabilities of the adapter. */
2065         adapter->msix_entries = kcalloc(v_budget,
2066                                         sizeof(struct msix_entry), GFP_KERNEL);
2067         if (!adapter->msix_entries) {
2068                 err = -ENOMEM;
2069                 goto out;
2070         }
2071
2072         for (vector = 0; vector < v_budget; vector++)
2073                 adapter->msix_entries[vector].entry = vector;
2074
2075         ixgbevf_acquire_msix_vectors(adapter, v_budget);
2076
2077 out:
2078         return err;
2079 }
2080
2081 /**
2082  * ixgbevf_alloc_q_vectors - Allocate memory for interrupt vectors
2083  * @adapter: board private structure to initialize
2084  *
2085  * We allocate one q_vector per queue interrupt.  If allocation fails we
2086  * return -ENOMEM.
2087  **/
2088 static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter *adapter)
2089 {
2090         int q_idx, num_q_vectors;
2091         struct ixgbevf_q_vector *q_vector;
2092         int napi_vectors;
2093         int (*poll)(struct napi_struct *, int);
2094
2095         num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2096         napi_vectors = adapter->num_rx_queues;
2097         poll = &ixgbevf_clean_rxonly;
2098
2099         for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
2100                 q_vector = kzalloc(sizeof(struct ixgbevf_q_vector), GFP_KERNEL);
2101                 if (!q_vector)
2102                         goto err_out;
2103                 q_vector->adapter = adapter;
2104                 q_vector->v_idx = q_idx;
2105                 q_vector->eitr = adapter->eitr_param;
2106                 if (q_idx < napi_vectors)
2107                         netif_napi_add(adapter->netdev, &q_vector->napi,
2108                                        (*poll), 64);
2109                 adapter->q_vector[q_idx] = q_vector;
2110         }
2111
2112         return 0;
2113
2114 err_out:
2115         while (q_idx) {
2116                 q_idx--;
2117                 q_vector = adapter->q_vector[q_idx];
2118                 netif_napi_del(&q_vector->napi);
2119                 kfree(q_vector);
2120                 adapter->q_vector[q_idx] = NULL;
2121         }
2122         return -ENOMEM;
2123 }
2124
2125 /**
2126  * ixgbevf_free_q_vectors - Free memory allocated for interrupt vectors
2127  * @adapter: board private structure to initialize
2128  *
2129  * This function frees the memory allocated to the q_vectors.  In addition if
2130  * NAPI is enabled it will delete any references to the NAPI struct prior
2131  * to freeing the q_vector.
2132  **/
2133 static void ixgbevf_free_q_vectors(struct ixgbevf_adapter *adapter)
2134 {
2135         int q_idx, num_q_vectors;
2136         int napi_vectors;
2137
2138         num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2139         napi_vectors = adapter->num_rx_queues;
2140
2141         for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
2142                 struct ixgbevf_q_vector *q_vector = adapter->q_vector[q_idx];
2143
2144                 adapter->q_vector[q_idx] = NULL;
2145                 if (q_idx < napi_vectors)
2146                         netif_napi_del(&q_vector->napi);
2147                 kfree(q_vector);
2148         }
2149 }
2150
2151 /**
2152  * ixgbevf_reset_interrupt_capability - Reset MSIX setup
2153  * @adapter: board private structure
2154  *
2155  **/
2156 static void ixgbevf_reset_interrupt_capability(struct ixgbevf_adapter *adapter)
2157 {
2158         pci_disable_msix(adapter->pdev);
2159         kfree(adapter->msix_entries);
2160         adapter->msix_entries = NULL;
2161
2162         return;
2163 }
2164
2165 /**
2166  * ixgbevf_init_interrupt_scheme - Determine if MSIX is supported and init
2167  * @adapter: board private structure to initialize
2168  *
2169  **/
2170 static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter *adapter)
2171 {
2172         int err;
2173
2174         /* Number of supported queues */
2175         ixgbevf_set_num_queues(adapter);
2176
2177         err = ixgbevf_set_interrupt_capability(adapter);
2178         if (err) {
2179                 hw_dbg(&adapter->hw,
2180                        "Unable to setup interrupt capabilities\n");
2181                 goto err_set_interrupt;
2182         }
2183
2184         err = ixgbevf_alloc_q_vectors(adapter);
2185         if (err) {
2186                 hw_dbg(&adapter->hw, "Unable to allocate memory for queue "
2187                        "vectors\n");
2188                 goto err_alloc_q_vectors;
2189         }
2190
2191         err = ixgbevf_alloc_queues(adapter);
2192         if (err) {
2193                 printk(KERN_ERR "Unable to allocate memory for queues\n");
2194                 goto err_alloc_queues;
2195         }
2196
2197         hw_dbg(&adapter->hw, "Multiqueue %s: Rx Queue count = %u, "
2198                "Tx Queue count = %u\n",
2199                (adapter->num_rx_queues > 1) ? "Enabled" :
2200                "Disabled", adapter->num_rx_queues, adapter->num_tx_queues);
2201
2202         set_bit(__IXGBEVF_DOWN, &adapter->state);
2203
2204         return 0;
2205 err_alloc_queues:
2206         ixgbevf_free_q_vectors(adapter);
2207 err_alloc_q_vectors:
2208         ixgbevf_reset_interrupt_capability(adapter);
2209 err_set_interrupt:
2210         return err;
2211 }
2212
2213 /**
2214  * ixgbevf_sw_init - Initialize general software structures
2215  * (struct ixgbevf_adapter)
2216  * @adapter: board private structure to initialize
2217  *
2218  * ixgbevf_sw_init initializes the Adapter private data structure.
2219  * Fields are initialized based on PCI device information and
2220  * OS network device settings (MTU size).
2221  **/
2222 static int __devinit ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
2223 {
2224         struct ixgbe_hw *hw = &adapter->hw;
2225         struct pci_dev *pdev = adapter->pdev;
2226         int err;
2227
2228         /* PCI config space info */
2229
2230         hw->vendor_id = pdev->vendor;
2231         hw->device_id = pdev->device;
2232         pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
2233         hw->subsystem_vendor_id = pdev->subsystem_vendor;
2234         hw->subsystem_device_id = pdev->subsystem_device;
2235
2236         hw->mbx.ops.init_params(hw);
2237         hw->mac.max_tx_queues = MAX_TX_QUEUES;
2238         hw->mac.max_rx_queues = MAX_RX_QUEUES;
2239         err = hw->mac.ops.reset_hw(hw);
2240         if (err) {
2241                 dev_info(&pdev->dev,
2242                          "PF still in reset state, assigning new address\n");
2243                 random_ether_addr(hw->mac.addr);
2244         } else {
2245                 err = hw->mac.ops.init_hw(hw);
2246                 if (err) {
2247                         printk(KERN_ERR "init_shared_code failed: %d\n", err);
2248                         goto out;
2249                 }
2250         }
2251
2252         /* Enable dynamic interrupt throttling rates */
2253         adapter->eitr_param = 20000;
2254         adapter->itr_setting = 1;
2255
2256         /* set defaults for eitr in MegaBytes */
2257         adapter->eitr_low = 10;
2258         adapter->eitr_high = 20;
2259
2260         /* set default ring sizes */
2261         adapter->tx_ring_count = IXGBEVF_DEFAULT_TXD;
2262         adapter->rx_ring_count = IXGBEVF_DEFAULT_RXD;
2263
2264         /* enable rx csum by default */
2265         adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED;
2266
2267         set_bit(__IXGBEVF_DOWN, &adapter->state);
2268
2269 out:
2270         return err;
2271 }
2272
2273 #define UPDATE_VF_COUNTER_32bit(reg, last_counter, counter)     \
2274         {                                                       \
2275                 u32 current_counter = IXGBE_READ_REG(hw, reg);  \
2276                 if (current_counter < last_counter)             \
2277                         counter += 0x100000000LL;               \
2278                 last_counter = current_counter;                 \
2279                 counter &= 0xFFFFFFFF00000000LL;                \
2280                 counter |= current_counter;                     \
2281         }
2282
2283 #define UPDATE_VF_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
2284         {                                                                \
2285                 u64 current_counter_lsb = IXGBE_READ_REG(hw, reg_lsb);   \
2286                 u64 current_counter_msb = IXGBE_READ_REG(hw, reg_msb);   \
2287                 u64 current_counter = (current_counter_msb << 32) |      \
2288                         current_counter_lsb;                             \
2289                 if (current_counter < last_counter)                      \
2290                         counter += 0x1000000000LL;                       \
2291                 last_counter = current_counter;                          \
2292                 counter &= 0xFFFFFFF000000000LL;                         \
2293                 counter |= current_counter;                              \
2294         }
2295 /**
2296  * ixgbevf_update_stats - Update the board statistics counters.
2297  * @adapter: board private structure
2298  **/
2299 void ixgbevf_update_stats(struct ixgbevf_adapter *adapter)
2300 {
2301         struct ixgbe_hw *hw = &adapter->hw;
2302
2303         UPDATE_VF_COUNTER_32bit(IXGBE_VFGPRC, adapter->stats.last_vfgprc,
2304                                 adapter->stats.vfgprc);
2305         UPDATE_VF_COUNTER_32bit(IXGBE_VFGPTC, adapter->stats.last_vfgptc,
2306                                 adapter->stats.vfgptc);
2307         UPDATE_VF_COUNTER_36bit(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
2308                                 adapter->stats.last_vfgorc,
2309                                 adapter->stats.vfgorc);
2310         UPDATE_VF_COUNTER_36bit(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
2311                                 adapter->stats.last_vfgotc,
2312                                 adapter->stats.vfgotc);
2313         UPDATE_VF_COUNTER_32bit(IXGBE_VFMPRC, adapter->stats.last_vfmprc,
2314                                 adapter->stats.vfmprc);
2315
2316         /* Fill out the OS statistics structure */
2317         adapter->net_stats.multicast = adapter->stats.vfmprc -
2318                 adapter->stats.base_vfmprc;
2319 }
2320
2321 /**
2322  * ixgbevf_watchdog - Timer Call-back
2323  * @data: pointer to adapter cast into an unsigned long
2324  **/
2325 static void ixgbevf_watchdog(unsigned long data)
2326 {
2327         struct ixgbevf_adapter *adapter = (struct ixgbevf_adapter *)data;
2328         struct ixgbe_hw *hw = &adapter->hw;
2329         u64 eics = 0;
2330         int i;
2331
2332         /*
2333          * Do the watchdog outside of interrupt context due to the lovely
2334          * delays that some of the newer hardware requires
2335          */
2336
2337         if (test_bit(__IXGBEVF_DOWN, &adapter->state))
2338                 goto watchdog_short_circuit;
2339
2340         /* get one bit for every active tx/rx interrupt vector */
2341         for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) {
2342                 struct ixgbevf_q_vector *qv = adapter->q_vector[i];
2343                 if (qv->rxr_count || qv->txr_count)
2344                         eics |= (1 << i);
2345         }
2346
2347         IXGBE_WRITE_REG(hw, IXGBE_VTEICS, (u32)eics);
2348
2349 watchdog_short_circuit:
2350         schedule_work(&adapter->watchdog_task);
2351 }
2352
2353 /**
2354  * ixgbevf_tx_timeout - Respond to a Tx Hang
2355  * @netdev: network interface device structure
2356  **/
2357 static void ixgbevf_tx_timeout(struct net_device *netdev)
2358 {
2359         struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2360
2361         /* Do the reset outside of interrupt context */
2362         schedule_work(&adapter->reset_task);
2363 }
2364
2365 static void ixgbevf_reset_task(struct work_struct *work)
2366 {
2367         struct ixgbevf_adapter *adapter;
2368         adapter = container_of(work, struct ixgbevf_adapter, reset_task);
2369
2370         /* If we're already down or resetting, just bail */
2371         if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
2372             test_bit(__IXGBEVF_RESETTING, &adapter->state))
2373                 return;
2374
2375         adapter->tx_timeout_count++;
2376
2377         ixgbevf_reinit_locked(adapter);
2378 }
2379
2380 /**
2381  * ixgbevf_watchdog_task - worker thread to bring link up
2382  * @work: pointer to work_struct containing our data
2383  **/
2384 static void ixgbevf_watchdog_task(struct work_struct *work)
2385 {
2386         struct ixgbevf_adapter *adapter = container_of(work,
2387                                                        struct ixgbevf_adapter,
2388                                                        watchdog_task);
2389         struct net_device *netdev = adapter->netdev;
2390         struct ixgbe_hw *hw = &adapter->hw;
2391         u32 link_speed = adapter->link_speed;
2392         bool link_up = adapter->link_up;
2393
2394         adapter->flags |= IXGBE_FLAG_IN_WATCHDOG_TASK;
2395
2396         /*
2397          * Always check the link on the watchdog because we have
2398          * no LSC interrupt
2399          */
2400         if (hw->mac.ops.check_link) {
2401                 if ((hw->mac.ops.check_link(hw, &link_speed,
2402                                             &link_up, false)) != 0) {
2403                         adapter->link_up = link_up;
2404                         adapter->link_speed = link_speed;
2405                         netif_carrier_off(netdev);
2406                         netif_tx_stop_all_queues(netdev);
2407                         schedule_work(&adapter->reset_task);
2408                         goto pf_has_reset;
2409                 }
2410         } else {
2411                 /* always assume link is up, if no check link
2412                  * function */
2413                 link_speed = IXGBE_LINK_SPEED_10GB_FULL;
2414                 link_up = true;
2415         }
2416         adapter->link_up = link_up;
2417         adapter->link_speed = link_speed;
2418
2419         if (link_up) {
2420                 if (!netif_carrier_ok(netdev)) {
2421                         hw_dbg(&adapter->hw, "NIC Link is Up %s, ",
2422                                ((link_speed == IXGBE_LINK_SPEED_10GB_FULL) ?
2423                                 "10 Gbps\n" : "1 Gbps\n"));
2424                         netif_carrier_on(netdev);
2425                         netif_tx_wake_all_queues(netdev);
2426                 } else {
2427                         /* Force detection of hung controller */
2428                         adapter->detect_tx_hung = true;
2429                 }
2430         } else {
2431                 adapter->link_up = false;
2432                 adapter->link_speed = 0;
2433                 if (netif_carrier_ok(netdev)) {
2434                         hw_dbg(&adapter->hw, "NIC Link is Down\n");
2435                         netif_carrier_off(netdev);
2436                         netif_tx_stop_all_queues(netdev);
2437                 }
2438         }
2439
2440         ixgbevf_update_stats(adapter);
2441
2442 pf_has_reset:
2443         /* Force detection of hung controller every watchdog period */
2444         adapter->detect_tx_hung = true;
2445
2446         /* Reset the timer */
2447         if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
2448                 mod_timer(&adapter->watchdog_timer,
2449                           round_jiffies(jiffies + (2 * HZ)));
2450
2451         adapter->flags &= ~IXGBE_FLAG_IN_WATCHDOG_TASK;
2452 }
2453
2454 /**
2455  * ixgbevf_free_tx_resources - Free Tx Resources per Queue
2456  * @adapter: board private structure
2457  * @tx_ring: Tx descriptor ring for a specific queue
2458  *
2459  * Free all transmit software resources
2460  **/
2461 void ixgbevf_free_tx_resources(struct ixgbevf_adapter *adapter,
2462                                struct ixgbevf_ring *tx_ring)
2463 {
2464         struct pci_dev *pdev = adapter->pdev;
2465
2466         ixgbevf_clean_tx_ring(adapter, tx_ring);
2467
2468         vfree(tx_ring->tx_buffer_info);
2469         tx_ring->tx_buffer_info = NULL;
2470
2471         pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma);
2472
2473         tx_ring->desc = NULL;
2474 }
2475
2476 /**
2477  * ixgbevf_free_all_tx_resources - Free Tx Resources for All Queues
2478  * @adapter: board private structure
2479  *
2480  * Free all transmit software resources
2481  **/
2482 static void ixgbevf_free_all_tx_resources(struct ixgbevf_adapter *adapter)
2483 {
2484         int i;
2485
2486         for (i = 0; i < adapter->num_tx_queues; i++)
2487                 if (adapter->tx_ring[i].desc)
2488                         ixgbevf_free_tx_resources(adapter,
2489                                                   &adapter->tx_ring[i]);
2490
2491 }
2492
2493 /**
2494  * ixgbevf_setup_tx_resources - allocate Tx resources (Descriptors)
2495  * @adapter: board private structure
2496  * @tx_ring:    tx descriptor ring (for a specific queue) to setup
2497  *
2498  * Return 0 on success, negative on failure
2499  **/
2500 int ixgbevf_setup_tx_resources(struct ixgbevf_adapter *adapter,
2501                                struct ixgbevf_ring *tx_ring)
2502 {
2503         struct pci_dev *pdev = adapter->pdev;
2504         int size;
2505
2506         size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
2507         tx_ring->tx_buffer_info = vmalloc(size);
2508         if (!tx_ring->tx_buffer_info)
2509                 goto err;
2510         memset(tx_ring->tx_buffer_info, 0, size);
2511
2512         /* round up to nearest 4K */
2513         tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
2514         tx_ring->size = ALIGN(tx_ring->size, 4096);
2515
2516         tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size,
2517                                              &tx_ring->dma);
2518         if (!tx_ring->desc)
2519                 goto err;
2520
2521         tx_ring->next_to_use = 0;
2522         tx_ring->next_to_clean = 0;
2523         tx_ring->work_limit = tx_ring->count;
2524         return 0;
2525
2526 err:
2527         vfree(tx_ring->tx_buffer_info);
2528         tx_ring->tx_buffer_info = NULL;
2529         hw_dbg(&adapter->hw, "Unable to allocate memory for the transmit "
2530                "descriptor ring\n");
2531         return -ENOMEM;
2532 }
2533
2534 /**
2535  * ixgbevf_setup_all_tx_resources - allocate all queues Tx resources
2536  * @adapter: board private structure
2537  *
2538  * If this function returns with an error, then it's possible one or
2539  * more of the rings is populated (while the rest are not).  It is the
2540  * callers duty to clean those orphaned rings.
2541  *
2542  * Return 0 on success, negative on failure
2543  **/
2544 static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter)
2545 {
2546         int i, err = 0;
2547
2548         for (i = 0; i < adapter->num_tx_queues; i++) {
2549                 err = ixgbevf_setup_tx_resources(adapter, &adapter->tx_ring[i]);
2550                 if (!err)
2551                         continue;
2552                 hw_dbg(&adapter->hw,
2553                        "Allocation for Tx Queue %u failed\n", i);
2554                 break;
2555         }
2556
2557         return err;
2558 }
2559
2560 /**
2561  * ixgbevf_setup_rx_resources - allocate Rx resources (Descriptors)
2562  * @adapter: board private structure
2563  * @rx_ring:    rx descriptor ring (for a specific queue) to setup
2564  *
2565  * Returns 0 on success, negative on failure
2566  **/
2567 int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *adapter,
2568                                struct ixgbevf_ring *rx_ring)
2569 {
2570         struct pci_dev *pdev = adapter->pdev;
2571         int size;
2572
2573         size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
2574         rx_ring->rx_buffer_info = vmalloc(size);
2575         if (!rx_ring->rx_buffer_info) {
2576                 hw_dbg(&adapter->hw,
2577                        "Unable to vmalloc buffer memory for "
2578                        "the receive descriptor ring\n");
2579                 goto alloc_failed;
2580         }
2581         memset(rx_ring->rx_buffer_info, 0, size);
2582
2583         /* Round up to nearest 4K */
2584         rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
2585         rx_ring->size = ALIGN(rx_ring->size, 4096);
2586
2587         rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size,
2588                                              &rx_ring->dma);
2589
2590         if (!rx_ring->desc) {
2591                 hw_dbg(&adapter->hw,
2592                        "Unable to allocate memory for "
2593                        "the receive descriptor ring\n");
2594                 vfree(rx_ring->rx_buffer_info);
2595                 rx_ring->rx_buffer_info = NULL;
2596                 goto alloc_failed;
2597         }
2598
2599         rx_ring->next_to_clean = 0;
2600         rx_ring->next_to_use = 0;
2601
2602         return 0;
2603 alloc_failed:
2604         return -ENOMEM;
2605 }
2606
2607 /**
2608  * ixgbevf_setup_all_rx_resources - allocate all queues Rx resources
2609  * @adapter: board private structure
2610  *
2611  * If this function returns with an error, then it's possible one or
2612  * more of the rings is populated (while the rest are not).  It is the
2613  * callers duty to clean those orphaned rings.
2614  *
2615  * Return 0 on success, negative on failure
2616  **/
2617 static int ixgbevf_setup_all_rx_resources(struct ixgbevf_adapter *adapter)
2618 {
2619         int i, err = 0;
2620
2621         for (i = 0; i < adapter->num_rx_queues; i++) {
2622                 err = ixgbevf_setup_rx_resources(adapter, &adapter->rx_ring[i]);
2623                 if (!err)
2624                         continue;
2625                 hw_dbg(&adapter->hw,
2626                        "Allocation for Rx Queue %u failed\n", i);
2627                 break;
2628         }
2629         return err;
2630 }
2631
2632 /**
2633  * ixgbevf_free_rx_resources - Free Rx Resources
2634  * @adapter: board private structure
2635  * @rx_ring: ring to clean the resources from
2636  *
2637  * Free all receive software resources
2638  **/
2639 void ixgbevf_free_rx_resources(struct ixgbevf_adapter *adapter,
2640                                struct ixgbevf_ring *rx_ring)
2641 {
2642         struct pci_dev *pdev = adapter->pdev;
2643
2644         ixgbevf_clean_rx_ring(adapter, rx_ring);
2645
2646         vfree(rx_ring->rx_buffer_info);
2647         rx_ring->rx_buffer_info = NULL;
2648
2649         pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
2650
2651         rx_ring->desc = NULL;
2652 }
2653
2654 /**
2655  * ixgbevf_free_all_rx_resources - Free Rx Resources for All Queues
2656  * @adapter: board private structure
2657  *
2658  * Free all receive software resources
2659  **/
2660 static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter)
2661 {
2662         int i;
2663
2664         for (i = 0; i < adapter->num_rx_queues; i++)
2665                 if (adapter->rx_ring[i].desc)
2666                         ixgbevf_free_rx_resources(adapter,
2667                                                   &adapter->rx_ring[i]);
2668 }
2669
2670 /**
2671  * ixgbevf_open - Called when a network interface is made active
2672  * @netdev: network interface device structure
2673  *
2674  * Returns 0 on success, negative value on failure
2675  *
2676  * The open entry point is called when a network interface is made
2677  * active by the system (IFF_UP).  At this point all resources needed
2678  * for transmit and receive operations are allocated, the interrupt
2679  * handler is registered with the OS, the watchdog timer is started,
2680  * and the stack is notified that the interface is ready.
2681  **/
2682 static int ixgbevf_open(struct net_device *netdev)
2683 {
2684         struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2685         struct ixgbe_hw *hw = &adapter->hw;
2686         int err;
2687
2688         /* disallow open during test */
2689         if (test_bit(__IXGBEVF_TESTING, &adapter->state))
2690                 return -EBUSY;
2691
2692         if (hw->adapter_stopped) {
2693                 ixgbevf_reset(adapter);
2694                 /* if adapter is still stopped then PF isn't up and
2695                  * the vf can't start. */
2696                 if (hw->adapter_stopped) {
2697                         err = IXGBE_ERR_MBX;
2698                         printk(KERN_ERR "Unable to start - perhaps the PF"
2699                                " Driver isn't up yet\n");
2700                         goto err_setup_reset;
2701                 }
2702         }
2703
2704         /* allocate transmit descriptors */
2705         err = ixgbevf_setup_all_tx_resources(adapter);
2706         if (err)
2707                 goto err_setup_tx;
2708
2709         /* allocate receive descriptors */
2710         err = ixgbevf_setup_all_rx_resources(adapter);
2711         if (err)
2712                 goto err_setup_rx;
2713
2714         ixgbevf_configure(adapter);
2715
2716         /*
2717          * Map the Tx/Rx rings to the vectors we were allotted.
2718          * if request_irq will be called in this function map_rings
2719          * must be called *before* up_complete
2720          */
2721         ixgbevf_map_rings_to_vectors(adapter);
2722
2723         err = ixgbevf_up_complete(adapter);
2724         if (err)
2725                 goto err_up;
2726
2727         /* clear any pending interrupts, may auto mask */
2728         IXGBE_READ_REG(hw, IXGBE_VTEICR);
2729         err = ixgbevf_request_irq(adapter);
2730         if (err)
2731                 goto err_req_irq;
2732
2733         ixgbevf_irq_enable(adapter, true, true);
2734
2735         return 0;
2736
2737 err_req_irq:
2738         ixgbevf_down(adapter);
2739 err_up:
2740         ixgbevf_free_irq(adapter);
2741 err_setup_rx:
2742         ixgbevf_free_all_rx_resources(adapter);
2743 err_setup_tx:
2744         ixgbevf_free_all_tx_resources(adapter);
2745         ixgbevf_reset(adapter);
2746
2747 err_setup_reset:
2748
2749         return err;
2750 }
2751
2752 /**
2753  * ixgbevf_close - Disables a network interface
2754  * @netdev: network interface device structure
2755  *
2756  * Returns 0, this is not allowed to fail
2757  *
2758  * The close entry point is called when an interface is de-activated
2759  * by the OS.  The hardware is still under the drivers control, but
2760  * needs to be disabled.  A global MAC reset is issued to stop the
2761  * hardware, and all transmit and receive resources are freed.
2762  **/
2763 static int ixgbevf_close(struct net_device *netdev)
2764 {
2765         struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2766
2767         ixgbevf_down(adapter);
2768         ixgbevf_free_irq(adapter);
2769
2770         ixgbevf_free_all_tx_resources(adapter);
2771         ixgbevf_free_all_rx_resources(adapter);
2772
2773         return 0;
2774 }
2775
2776 static int ixgbevf_tso(struct ixgbevf_adapter *adapter,
2777                        struct ixgbevf_ring *tx_ring,
2778                        struct sk_buff *skb, u32 tx_flags, u8 *hdr_len)
2779 {
2780         struct ixgbe_adv_tx_context_desc *context_desc;
2781         unsigned int i;
2782         int err;
2783         struct ixgbevf_tx_buffer *tx_buffer_info;
2784         u32 vlan_macip_lens = 0, type_tucmd_mlhl;
2785         u32 mss_l4len_idx, l4len;
2786
2787         if (skb_is_gso(skb)) {
2788                 if (skb_header_cloned(skb)) {
2789                         err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2790                         if (err)
2791                                 return err;
2792                 }
2793                 l4len = tcp_hdrlen(skb);
2794                 *hdr_len += l4len;
2795
2796                 if (skb->protocol == htons(ETH_P_IP)) {
2797                         struct iphdr *iph = ip_hdr(skb);
2798                         iph->tot_len = 0;
2799                         iph->check = 0;
2800                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2801                                                                  iph->daddr, 0,
2802                                                                  IPPROTO_TCP,
2803                                                                  0);
2804                         adapter->hw_tso_ctxt++;
2805                 } else if (skb_is_gso_v6(skb)) {
2806                         ipv6_hdr(skb)->payload_len = 0;
2807                         tcp_hdr(skb)->check =
2808                             ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2809                                              &ipv6_hdr(skb)->daddr,
2810                                              0, IPPROTO_TCP, 0);
2811                         adapter->hw_tso6_ctxt++;
2812                 }
2813
2814                 i = tx_ring->next_to_use;
2815
2816                 tx_buffer_info = &tx_ring->tx_buffer_info[i];
2817                 context_desc = IXGBE_TX_CTXTDESC_ADV(*tx_ring, i);
2818
2819                 /* VLAN MACLEN IPLEN */
2820                 if (tx_flags & IXGBE_TX_FLAGS_VLAN)
2821                         vlan_macip_lens |=
2822                                 (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK);
2823                 vlan_macip_lens |= ((skb_network_offset(skb)) <<
2824                                     IXGBE_ADVTXD_MACLEN_SHIFT);
2825                 *hdr_len += skb_network_offset(skb);
2826                 vlan_macip_lens |=
2827                         (skb_transport_header(skb) - skb_network_header(skb));
2828                 *hdr_len +=
2829                         (skb_transport_header(skb) - skb_network_header(skb));
2830                 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
2831                 context_desc->seqnum_seed = 0;
2832
2833                 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
2834                 type_tucmd_mlhl = (IXGBE_TXD_CMD_DEXT |
2835                                     IXGBE_ADVTXD_DTYP_CTXT);
2836
2837                 if (skb->protocol == htons(ETH_P_IP))
2838                         type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
2839                 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
2840                 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
2841
2842                 /* MSS L4LEN IDX */
2843                 mss_l4len_idx =
2844                         (skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT);
2845                 mss_l4len_idx |= (l4len << IXGBE_ADVTXD_L4LEN_SHIFT);
2846                 /* use index 1 for TSO */
2847                 mss_l4len_idx |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
2848                 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
2849
2850                 tx_buffer_info->time_stamp = jiffies;
2851                 tx_buffer_info->next_to_watch = i;
2852
2853                 i++;
2854                 if (i == tx_ring->count)
2855                         i = 0;
2856                 tx_ring->next_to_use = i;
2857
2858                 return true;
2859         }
2860
2861         return false;
2862 }
2863
2864 static bool ixgbevf_tx_csum(struct ixgbevf_adapter *adapter,
2865                             struct ixgbevf_ring *tx_ring,
2866                             struct sk_buff *skb, u32 tx_flags)
2867 {
2868         struct ixgbe_adv_tx_context_desc *context_desc;
2869         unsigned int i;
2870         struct ixgbevf_tx_buffer *tx_buffer_info;
2871         u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
2872
2873         if (skb->ip_summed == CHECKSUM_PARTIAL ||
2874             (tx_flags & IXGBE_TX_FLAGS_VLAN)) {
2875                 i = tx_ring->next_to_use;
2876                 tx_buffer_info = &tx_ring->tx_buffer_info[i];
2877                 context_desc = IXGBE_TX_CTXTDESC_ADV(*tx_ring, i);
2878
2879                 if (tx_flags & IXGBE_TX_FLAGS_VLAN)
2880                         vlan_macip_lens |= (tx_flags &
2881                                             IXGBE_TX_FLAGS_VLAN_MASK);
2882                 vlan_macip_lens |= (skb_network_offset(skb) <<
2883                                     IXGBE_ADVTXD_MACLEN_SHIFT);
2884                 if (skb->ip_summed == CHECKSUM_PARTIAL)
2885                         vlan_macip_lens |= (skb_transport_header(skb) -
2886                                             skb_network_header(skb));
2887
2888                 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
2889                 context_desc->seqnum_seed = 0;
2890
2891                 type_tucmd_mlhl |= (IXGBE_TXD_CMD_DEXT |
2892                                     IXGBE_ADVTXD_DTYP_CTXT);
2893
2894                 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2895                         switch (skb->protocol) {
2896                         case __constant_htons(ETH_P_IP):
2897                                 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
2898                                 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2899                                         type_tucmd_mlhl |=
2900                                             IXGBE_ADVTXD_TUCMD_L4T_TCP;
2901                                 break;
2902                         case __constant_htons(ETH_P_IPV6):
2903                                 /* XXX what about other V6 headers?? */
2904                                 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
2905                                         type_tucmd_mlhl |=
2906                                                 IXGBE_ADVTXD_TUCMD_L4T_TCP;
2907                                 break;
2908                         default:
2909                                 if (unlikely(net_ratelimit())) {
2910                                         printk(KERN_WARNING
2911                                                "partial checksum but "
2912                                                "proto=%x!\n",
2913                                                skb->protocol);
2914                                 }
2915                                 break;
2916                         }
2917                 }
2918
2919                 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
2920                 /* use index zero for tx checksum offload */
2921                 context_desc->mss_l4len_idx = 0;
2922
2923                 tx_buffer_info->time_stamp = jiffies;
2924                 tx_buffer_info->next_to_watch = i;
2925
2926                 adapter->hw_csum_tx_good++;
2927                 i++;
2928                 if (i == tx_ring->count)
2929                         i = 0;
2930                 tx_ring->next_to_use = i;
2931
2932                 return true;
2933         }
2934
2935         return false;
2936 }
2937
2938 static int ixgbevf_tx_map(struct ixgbevf_adapter *adapter,
2939                           struct ixgbevf_ring *tx_ring,
2940                           struct sk_buff *skb, u32 tx_flags,
2941                           unsigned int first)
2942 {
2943         struct pci_dev *pdev = adapter->pdev;
2944         struct ixgbevf_tx_buffer *tx_buffer_info;
2945         unsigned int len;
2946         unsigned int total = skb->len;
2947         unsigned int offset = 0, size, count = 0;
2948         unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
2949         unsigned int f;
2950         int i;
2951
2952         i = tx_ring->next_to_use;
2953
2954         len = min(skb_headlen(skb), total);
2955         while (len) {
2956                 tx_buffer_info = &tx_ring->tx_buffer_info[i];
2957                 size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD);
2958
2959                 tx_buffer_info->length = size;
2960                 tx_buffer_info->mapped_as_page = false;
2961                 tx_buffer_info->dma = pci_map_single(adapter->pdev,
2962                                                      skb->data + offset,
2963                                                      size, PCI_DMA_TODEVICE);
2964                 if (pci_dma_mapping_error(pdev, tx_buffer_info->dma))
2965                         goto dma_error;
2966                 tx_buffer_info->time_stamp = jiffies;
2967                 tx_buffer_info->next_to_watch = i;
2968
2969                 len -= size;
2970                 total -= size;
2971                 offset += size;
2972                 count++;
2973                 i++;
2974                 if (i == tx_ring->count)
2975                         i = 0;
2976         }
2977
2978         for (f = 0; f < nr_frags; f++) {
2979                 struct skb_frag_struct *frag;
2980
2981                 frag = &skb_shinfo(skb)->frags[f];
2982                 len = min((unsigned int)frag->size, total);
2983                 offset = frag->page_offset;
2984
2985                 while (len) {
2986                         tx_buffer_info = &tx_ring->tx_buffer_info[i];
2987                         size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD);
2988
2989                         tx_buffer_info->length = size;
2990                         tx_buffer_info->dma = pci_map_page(adapter->pdev,
2991                                                            frag->page,
2992                                                            offset,
2993                                                            size,
2994                                                            PCI_DMA_TODEVICE);
2995                         tx_buffer_info->mapped_as_page = true;
2996                         if (pci_dma_mapping_error(pdev, tx_buffer_info->dma))
2997                                 goto dma_error;
2998                         tx_buffer_info->time_stamp = jiffies;
2999                         tx_buffer_info->next_to_watch = i;
3000
3001                         len -= size;
3002                         total -= size;
3003                         offset += size;
3004                         count++;
3005                         i++;
3006                         if (i == tx_ring->count)
3007                                 i = 0;
3008                 }
3009                 if (total == 0)
3010                         break;
3011         }
3012
3013         if (i == 0)
3014                 i = tx_ring->count - 1;
3015         else
3016                 i = i - 1;
3017         tx_ring->tx_buffer_info[i].skb = skb;
3018         tx_ring->tx_buffer_info[first].next_to_watch = i;
3019
3020         return count;
3021
3022 dma_error:
3023         dev_err(&pdev->dev, "TX DMA map failed\n");
3024
3025         /* clear timestamp and dma mappings for failed tx_buffer_info map */
3026         tx_buffer_info->dma = 0;
3027         tx_buffer_info->time_stamp = 0;
3028         tx_buffer_info->next_to_watch = 0;
3029         count--;
3030
3031         /* clear timestamp and dma mappings for remaining portion of packet */
3032         while (count >= 0) {
3033                 count--;
3034                 i--;
3035                 if (i < 0)
3036                         i += tx_ring->count;
3037                 tx_buffer_info = &tx_ring->tx_buffer_info[i];
3038                 ixgbevf_unmap_and_free_tx_resource(adapter, tx_buffer_info);
3039         }
3040
3041         return count;
3042 }
3043
3044 static void ixgbevf_tx_queue(struct ixgbevf_adapter *adapter,
3045                              struct ixgbevf_ring *tx_ring, int tx_flags,
3046                              int count, u32 paylen, u8 hdr_len)
3047 {
3048         union ixgbe_adv_tx_desc *tx_desc = NULL;
3049         struct ixgbevf_tx_buffer *tx_buffer_info;
3050         u32 olinfo_status = 0, cmd_type_len = 0;
3051         unsigned int i;
3052
3053         u32 txd_cmd = IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS | IXGBE_TXD_CMD_IFCS;
3054
3055         cmd_type_len |= IXGBE_ADVTXD_DTYP_DATA;
3056
3057         cmd_type_len |= IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT;
3058
3059         if (tx_flags & IXGBE_TX_FLAGS_VLAN)
3060                 cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
3061
3062         if (tx_flags & IXGBE_TX_FLAGS_TSO) {
3063                 cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
3064
3065                 olinfo_status |= IXGBE_TXD_POPTS_TXSM <<
3066                         IXGBE_ADVTXD_POPTS_SHIFT;
3067
3068                 /* use index 1 context for tso */
3069                 olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
3070                 if (tx_flags & IXGBE_TX_FLAGS_IPV4)
3071                         olinfo_status |= IXGBE_TXD_POPTS_IXSM <<
3072                                 IXGBE_ADVTXD_POPTS_SHIFT;
3073
3074         } else if (tx_flags & IXGBE_TX_FLAGS_CSUM)
3075                 olinfo_status |= IXGBE_TXD_POPTS_TXSM <<
3076                         IXGBE_ADVTXD_POPTS_SHIFT;
3077
3078         olinfo_status |= ((paylen - hdr_len) << IXGBE_ADVTXD_PAYLEN_SHIFT);
3079
3080         i = tx_ring->next_to_use;
3081         while (count--) {
3082                 tx_buffer_info = &tx_ring->tx_buffer_info[i];
3083                 tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i);
3084                 tx_desc->read.buffer_addr = cpu_to_le64(tx_buffer_info->dma);
3085                 tx_desc->read.cmd_type_len =
3086                         cpu_to_le32(cmd_type_len | tx_buffer_info->length);
3087                 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
3088                 i++;
3089                 if (i == tx_ring->count)
3090                         i = 0;
3091         }
3092
3093         tx_desc->read.cmd_type_len |= cpu_to_le32(txd_cmd);
3094
3095         /*
3096          * Force memory writes to complete before letting h/w
3097          * know there are new descriptors to fetch.  (Only
3098          * applicable for weak-ordered memory model archs,
3099          * such as IA-64).
3100          */
3101         wmb();
3102
3103         tx_ring->next_to_use = i;
3104         writel(i, adapter->hw.hw_addr + tx_ring->tail);
3105 }
3106
3107 static int __ixgbevf_maybe_stop_tx(struct net_device *netdev,
3108                                    struct ixgbevf_ring *tx_ring, int size)
3109 {
3110         struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3111
3112         netif_stop_subqueue(netdev, tx_ring->queue_index);
3113         /* Herbert's original patch had:
3114          *  smp_mb__after_netif_stop_queue();
3115          * but since that doesn't exist yet, just open code it. */
3116         smp_mb();
3117
3118         /* We need to check again in a case another CPU has just
3119          * made room available. */
3120         if (likely(IXGBE_DESC_UNUSED(tx_ring) < size))
3121                 return -EBUSY;
3122
3123         /* A reprieve! - use start_queue because it doesn't call schedule */
3124         netif_start_subqueue(netdev, tx_ring->queue_index);
3125         ++adapter->restart_queue;
3126         return 0;
3127 }
3128
3129 static int ixgbevf_maybe_stop_tx(struct net_device *netdev,
3130                                  struct ixgbevf_ring *tx_ring, int size)
3131 {
3132         if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size))
3133                 return 0;
3134         return __ixgbevf_maybe_stop_tx(netdev, tx_ring, size);
3135 }
3136
3137 static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
3138 {
3139         struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3140         struct ixgbevf_ring *tx_ring;
3141         unsigned int first;
3142         unsigned int tx_flags = 0;
3143         u8 hdr_len = 0;
3144         int r_idx = 0, tso;
3145         int count = 0;
3146
3147         unsigned int f;
3148
3149         tx_ring = &adapter->tx_ring[r_idx];
3150
3151         if (adapter->vlgrp && vlan_tx_tag_present(skb)) {
3152                 tx_flags |= vlan_tx_tag_get(skb);
3153                 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
3154                 tx_flags |= IXGBE_TX_FLAGS_VLAN;
3155         }
3156
3157         /* four things can cause us to need a context descriptor */
3158         if (skb_is_gso(skb) ||
3159             (skb->ip_summed == CHECKSUM_PARTIAL) ||
3160             (tx_flags & IXGBE_TX_FLAGS_VLAN))
3161                 count++;
3162
3163         count += TXD_USE_COUNT(skb_headlen(skb));
3164         for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
3165                 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
3166
3167         if (ixgbevf_maybe_stop_tx(netdev, tx_ring, count)) {
3168                 adapter->tx_busy++;
3169                 return NETDEV_TX_BUSY;
3170         }
3171
3172         first = tx_ring->next_to_use;
3173
3174         if (skb->protocol == htons(ETH_P_IP))
3175                 tx_flags |= IXGBE_TX_FLAGS_IPV4;
3176         tso = ixgbevf_tso(adapter, tx_ring, skb, tx_flags, &hdr_len);
3177         if (tso < 0) {
3178                 dev_kfree_skb_any(skb);
3179                 return NETDEV_TX_OK;
3180         }
3181
3182         if (tso)
3183                 tx_flags |= IXGBE_TX_FLAGS_TSO;
3184         else if (ixgbevf_tx_csum(adapter, tx_ring, skb, tx_flags) &&
3185                  (skb->ip_summed == CHECKSUM_PARTIAL))
3186                 tx_flags |= IXGBE_TX_FLAGS_CSUM;
3187
3188         ixgbevf_tx_queue(adapter, tx_ring, tx_flags,
3189                          ixgbevf_tx_map(adapter, tx_ring, skb, tx_flags, first),
3190                          skb->len, hdr_len);
3191
3192         netdev->trans_start = jiffies;
3193
3194         ixgbevf_maybe_stop_tx(netdev, tx_ring, DESC_NEEDED);
3195
3196         return NETDEV_TX_OK;
3197 }
3198
3199 /**
3200  * ixgbevf_get_stats - Get System Network Statistics
3201  * @netdev: network interface device structure
3202  *
3203  * Returns the address of the device statistics structure.
3204  * The statistics are actually updated from the timer callback.
3205  **/
3206 static struct net_device_stats *ixgbevf_get_stats(struct net_device *netdev)
3207 {
3208         struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3209
3210         /* only return the current stats */
3211         return &adapter->net_stats;
3212 }
3213
3214 /**
3215  * ixgbevf_set_mac - Change the Ethernet Address of the NIC
3216  * @netdev: network interface device structure
3217  * @p: pointer to an address structure
3218  *
3219  * Returns 0 on success, negative on failure
3220  **/
3221 static int ixgbevf_set_mac(struct net_device *netdev, void *p)
3222 {
3223         struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3224         struct ixgbe_hw *hw = &adapter->hw;
3225         struct sockaddr *addr = p;
3226
3227         if (!is_valid_ether_addr(addr->sa_data))
3228                 return -EADDRNOTAVAIL;
3229
3230         memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
3231         memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
3232
3233         if (hw->mac.ops.set_rar)
3234                 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
3235
3236         return 0;
3237 }
3238
3239 /**
3240  * ixgbevf_change_mtu - Change the Maximum Transfer Unit
3241  * @netdev: network interface device structure
3242  * @new_mtu: new value for maximum frame size
3243  *
3244  * Returns 0 on success, negative on failure
3245  **/
3246 static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
3247 {
3248         struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3249         int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
3250
3251         /* MTU < 68 is an error and causes problems on some kernels */
3252         if ((new_mtu < 68) || (max_frame > MAXIMUM_ETHERNET_VLAN_SIZE))
3253                 return -EINVAL;
3254
3255         hw_dbg(&adapter->hw, "changing MTU from %d to %d\n",
3256                netdev->mtu, new_mtu);
3257         /* must set new MTU before calling down or up */
3258         netdev->mtu = new_mtu;
3259
3260         if (netif_running(netdev))
3261                 ixgbevf_reinit_locked(adapter);
3262
3263         return 0;
3264 }
3265
3266 static void ixgbevf_shutdown(struct pci_dev *pdev)
3267 {
3268         struct net_device *netdev = pci_get_drvdata(pdev);
3269         struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3270
3271         netif_device_detach(netdev);
3272
3273         if (netif_running(netdev)) {
3274                 ixgbevf_down(adapter);
3275                 ixgbevf_free_irq(adapter);
3276                 ixgbevf_free_all_tx_resources(adapter);
3277                 ixgbevf_free_all_rx_resources(adapter);
3278         }
3279
3280 #ifdef CONFIG_PM
3281         pci_save_state(pdev);
3282 #endif
3283
3284         pci_disable_device(pdev);
3285 }
3286
3287 static const struct net_device_ops ixgbe_netdev_ops = {
3288         .ndo_open               = &ixgbevf_open,
3289         .ndo_stop               = &ixgbevf_close,
3290         .ndo_start_xmit         = &ixgbevf_xmit_frame,
3291         .ndo_get_stats          = &ixgbevf_get_stats,
3292         .ndo_set_rx_mode        = &ixgbevf_set_rx_mode,
3293         .ndo_set_multicast_list = &ixgbevf_set_rx_mode,
3294         .ndo_validate_addr      = eth_validate_addr,
3295         .ndo_set_mac_address    = &ixgbevf_set_mac,
3296         .ndo_change_mtu         = &ixgbevf_change_mtu,
3297         .ndo_tx_timeout         = &ixgbevf_tx_timeout,
3298         .ndo_vlan_rx_register   = &ixgbevf_vlan_rx_register,
3299         .ndo_vlan_rx_add_vid    = &ixgbevf_vlan_rx_add_vid,
3300         .ndo_vlan_rx_kill_vid   = &ixgbevf_vlan_rx_kill_vid,
3301 };
3302
3303 static void ixgbevf_assign_netdev_ops(struct net_device *dev)
3304 {
3305         struct ixgbevf_adapter *adapter;
3306         adapter = netdev_priv(dev);
3307         dev->netdev_ops = &ixgbe_netdev_ops;
3308         ixgbevf_set_ethtool_ops(dev);
3309         dev->watchdog_timeo = 5 * HZ;
3310 }
3311
3312 /**
3313  * ixgbevf_probe - Device Initialization Routine
3314  * @pdev: PCI device information struct
3315  * @ent: entry in ixgbevf_pci_tbl
3316  *
3317  * Returns 0 on success, negative on failure
3318  *
3319  * ixgbevf_probe initializes an adapter identified by a pci_dev structure.
3320  * The OS initialization, configuring of the adapter private structure,
3321  * and a hardware reset occur.
3322  **/
3323 static int __devinit ixgbevf_probe(struct pci_dev *pdev,
3324                                    const struct pci_device_id *ent)
3325 {
3326         struct net_device *netdev;
3327         struct ixgbevf_adapter *adapter = NULL;
3328         struct ixgbe_hw *hw = NULL;
3329         const struct ixgbevf_info *ii = ixgbevf_info_tbl[ent->driver_data];
3330         static int cards_found;
3331         int err, pci_using_dac;
3332
3333         err = pci_enable_device(pdev);
3334         if (err)
3335                 return err;
3336
3337         if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
3338             !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
3339                 pci_using_dac = 1;
3340         } else {
3341                 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
3342                 if (err) {
3343                         err = pci_set_consistent_dma_mask(pdev,
3344                                                           DMA_BIT_MASK(32));
3345                         if (err) {
3346                                 dev_err(&pdev->dev, "No usable DMA "
3347                                         "configuration, aborting\n");
3348                                 goto err_dma;
3349                         }
3350                 }
3351                 pci_using_dac = 0;
3352         }
3353
3354         err = pci_request_regions(pdev, ixgbevf_driver_name);
3355         if (err) {
3356                 dev_err(&pdev->dev, "pci_request_regions failed 0x%x\n", err);
3357                 goto err_pci_reg;
3358         }
3359
3360         pci_set_master(pdev);
3361
3362 #ifdef HAVE_TX_MQ
3363         netdev = alloc_etherdev_mq(sizeof(struct ixgbevf_adapter),
3364                                    MAX_TX_QUEUES);
3365 #else
3366         netdev = alloc_etherdev(sizeof(struct ixgbevf_adapter));
3367 #endif
3368         if (!netdev) {
3369                 err = -ENOMEM;
3370                 goto err_alloc_etherdev;
3371         }
3372
3373         SET_NETDEV_DEV(netdev, &pdev->dev);
3374
3375         pci_set_drvdata(pdev, netdev);
3376         adapter = netdev_priv(netdev);
3377
3378         adapter->netdev = netdev;
3379         adapter->pdev = pdev;
3380         hw = &adapter->hw;
3381         hw->back = adapter;
3382         adapter->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
3383
3384         /*
3385          * call save state here in standalone driver because it relies on
3386          * adapter struct to exist, and needs to call netdev_priv
3387          */
3388         pci_save_state(pdev);
3389
3390         hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
3391                               pci_resource_len(pdev, 0));
3392         if (!hw->hw_addr) {
3393                 err = -EIO;
3394                 goto err_ioremap;
3395         }
3396
3397         ixgbevf_assign_netdev_ops(netdev);
3398
3399         adapter->bd_number = cards_found;
3400
3401         /* Setup hw api */
3402         memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
3403         hw->mac.type  = ii->mac;
3404
3405         memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops,
3406                sizeof(struct ixgbe_mac_operations));
3407
3408         adapter->flags &= ~IXGBE_FLAG_RX_PS_CAPABLE;
3409         adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED;
3410         adapter->flags |= IXGBE_FLAG_RX_1BUF_CAPABLE;
3411
3412         /* setup the private structure */
3413         err = ixgbevf_sw_init(adapter);
3414
3415 #ifdef MAX_SKB_FRAGS
3416         netdev->features = NETIF_F_SG |
3417                            NETIF_F_IP_CSUM |
3418                            NETIF_F_HW_VLAN_TX |
3419                            NETIF_F_HW_VLAN_RX |
3420                            NETIF_F_HW_VLAN_FILTER;
3421
3422         netdev->features |= NETIF_F_IPV6_CSUM;
3423         netdev->features |= NETIF_F_TSO;
3424         netdev->features |= NETIF_F_TSO6;
3425         netdev->vlan_features |= NETIF_F_TSO;
3426         netdev->vlan_features |= NETIF_F_TSO6;
3427         netdev->vlan_features |= NETIF_F_IP_CSUM;
3428         netdev->vlan_features |= NETIF_F_SG;
3429
3430         if (pci_using_dac)
3431                 netdev->features |= NETIF_F_HIGHDMA;
3432
3433 #endif /* MAX_SKB_FRAGS */
3434
3435         /* The HW MAC address was set and/or determined in sw_init */
3436         memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len);
3437         memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len);
3438
3439         if (!is_valid_ether_addr(netdev->dev_addr)) {
3440                 printk(KERN_ERR "invalid MAC address\n");
3441                 err = -EIO;
3442                 goto err_sw_init;
3443         }
3444
3445         init_timer(&adapter->watchdog_timer);
3446         adapter->watchdog_timer.function = &ixgbevf_watchdog;
3447         adapter->watchdog_timer.data = (unsigned long)adapter;
3448
3449         INIT_WORK(&adapter->reset_task, ixgbevf_reset_task);
3450         INIT_WORK(&adapter->watchdog_task, ixgbevf_watchdog_task);
3451
3452         err = ixgbevf_init_interrupt_scheme(adapter);
3453         if (err)
3454                 goto err_sw_init;
3455
3456         /* pick up the PCI bus settings for reporting later */
3457         if (hw->mac.ops.get_bus_info)
3458                 hw->mac.ops.get_bus_info(hw);
3459
3460
3461         netif_carrier_off(netdev);
3462         netif_tx_stop_all_queues(netdev);
3463
3464         strcpy(netdev->name, "eth%d");
3465
3466         err = register_netdev(netdev);
3467         if (err)
3468                 goto err_register;
3469
3470         adapter->netdev_registered = true;
3471
3472         ixgbevf_init_last_counter_stats(adapter);
3473
3474         /* print the MAC address */
3475         hw_dbg(hw, "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x\n",
3476                netdev->dev_addr[0],
3477                netdev->dev_addr[1],
3478                netdev->dev_addr[2],
3479                netdev->dev_addr[3],
3480                netdev->dev_addr[4],
3481                netdev->dev_addr[5]);
3482
3483         hw_dbg(hw, "MAC: %d\n", hw->mac.type);
3484
3485         hw_dbg(hw, "LRO is disabled \n");
3486
3487         hw_dbg(hw, "Intel(R) 82599 Virtual Function\n");
3488         cards_found++;
3489         return 0;
3490
3491 err_register:
3492 err_sw_init:
3493         ixgbevf_reset_interrupt_capability(adapter);
3494         iounmap(hw->hw_addr);
3495 err_ioremap:
3496         free_netdev(netdev);
3497 err_alloc_etherdev:
3498         pci_release_regions(pdev);
3499 err_pci_reg:
3500 err_dma:
3501         pci_disable_device(pdev);
3502         return err;
3503 }
3504
3505 /**
3506  * ixgbevf_remove - Device Removal Routine
3507  * @pdev: PCI device information struct
3508  *
3509  * ixgbevf_remove is called by the PCI subsystem to alert the driver
3510  * that it should release a PCI device.  The could be caused by a
3511  * Hot-Plug event, or because the driver is going to be removed from
3512  * memory.
3513  **/
3514 static void __devexit ixgbevf_remove(struct pci_dev *pdev)
3515 {
3516         struct net_device *netdev = pci_get_drvdata(pdev);
3517         struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3518
3519         set_bit(__IXGBEVF_DOWN, &adapter->state);
3520
3521         del_timer_sync(&adapter->watchdog_timer);
3522
3523         cancel_work_sync(&adapter->watchdog_task);
3524
3525         flush_scheduled_work();
3526
3527         if (adapter->netdev_registered) {
3528                 unregister_netdev(netdev);
3529                 adapter->netdev_registered = false;
3530         }
3531
3532         ixgbevf_reset_interrupt_capability(adapter);
3533
3534         iounmap(adapter->hw.hw_addr);
3535         pci_release_regions(pdev);
3536
3537         hw_dbg(&adapter->hw, "Remove complete\n");
3538
3539         kfree(adapter->tx_ring);
3540         kfree(adapter->rx_ring);
3541
3542         free_netdev(netdev);
3543
3544         pci_disable_device(pdev);
3545 }
3546
3547 static struct pci_driver ixgbevf_driver = {
3548         .name     = ixgbevf_driver_name,
3549         .id_table = ixgbevf_pci_tbl,
3550         .probe    = ixgbevf_probe,
3551         .remove   = __devexit_p(ixgbevf_remove),
3552         .shutdown = ixgbevf_shutdown,
3553 };
3554
3555 /**
3556  * ixgbe_init_module - Driver Registration Routine
3557  *
3558  * ixgbe_init_module is the first routine called when the driver is
3559  * loaded. All it does is register with the PCI subsystem.
3560  **/
3561 static int __init ixgbevf_init_module(void)
3562 {
3563         int ret;
3564         printk(KERN_INFO "ixgbevf: %s - version %s\n", ixgbevf_driver_string,
3565                ixgbevf_driver_version);
3566
3567         printk(KERN_INFO "%s\n", ixgbevf_copyright);
3568
3569         ret = pci_register_driver(&ixgbevf_driver);
3570         return ret;
3571 }
3572
3573 module_init(ixgbevf_init_module);
3574
3575 /**
3576  * ixgbe_exit_module - Driver Exit Cleanup Routine
3577  *
3578  * ixgbe_exit_module is called just before the driver is removed
3579  * from memory.
3580  **/
3581 static void __exit ixgbevf_exit_module(void)
3582 {
3583         pci_unregister_driver(&ixgbevf_driver);
3584 }
3585
3586 #ifdef DEBUG
3587 /**
3588  * ixgbe_get_hw_dev_name - return device name string
3589  * used by hardware layer to print debugging information
3590  **/
3591 char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw)
3592 {
3593         struct ixgbevf_adapter *adapter = hw->back;
3594         return adapter->netdev->name;
3595 }
3596
3597 #endif
3598 module_exit(ixgbevf_exit_module);
3599
3600 /* ixgbevf_main.c */