ixgbe: Correctly name and handle MSI-X other interrupt
[pandora-kernel.git] / drivers / net / ethernet / intel / ixgbevf / ixgbevf_main.c
1 /*******************************************************************************
2
3   Intel 82599 Virtual Function driver
4   Copyright(c) 1999 - 2010 Intel Corporation.
5
6   This program is free software; you can redistribute it and/or modify it
7   under the terms and conditions of the GNU General Public License,
8   version 2, as published by the Free Software Foundation.
9
10   This program is distributed in the hope it will be useful, but WITHOUT
11   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13   more details.
14
15   You should have received a copy of the GNU General Public License along with
16   this program; if not, write to the Free Software Foundation, Inc.,
17   51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19   The full GNU General Public License is included in this distribution in
20   the file called "COPYING".
21
22   Contact Information:
23   e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24   Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26 *******************************************************************************/
27
28
29 /******************************************************************************
30  Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code
31 ******************************************************************************/
32 #include <linux/types.h>
33 #include <linux/bitops.h>
34 #include <linux/module.h>
35 #include <linux/pci.h>
36 #include <linux/netdevice.h>
37 #include <linux/vmalloc.h>
38 #include <linux/string.h>
39 #include <linux/in.h>
40 #include <linux/ip.h>
41 #include <linux/tcp.h>
42 #include <linux/ipv6.h>
43 #include <linux/slab.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <linux/ethtool.h>
47 #include <linux/if.h>
48 #include <linux/if_vlan.h>
49 #include <linux/prefetch.h>
50
51 #include "ixgbevf.h"
52
53 char ixgbevf_driver_name[] = "ixgbevf";
54 static const char ixgbevf_driver_string[] =
55         "Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver";
56
57 #define DRV_VERSION "2.1.0-k"
58 const char ixgbevf_driver_version[] = DRV_VERSION;
59 static char ixgbevf_copyright[] =
60         "Copyright (c) 2009 - 2010 Intel Corporation.";
61
62 static const struct ixgbevf_info *ixgbevf_info_tbl[] = {
63         [board_82599_vf] = &ixgbevf_82599_vf_info,
64         [board_X540_vf]  = &ixgbevf_X540_vf_info,
65 };
66
67 /* ixgbevf_pci_tbl - PCI Device ID Table
68  *
69  * Wildcard entries (PCI_ANY_ID) should come last
70  * Last entry must be all 0s
71  *
72  * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
73  *   Class, Class Mask, private data (not used) }
74  */
75 static struct pci_device_id ixgbevf_pci_tbl[] = {
76         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF),
77         board_82599_vf},
78         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF),
79         board_X540_vf},
80
81         /* required last entry */
82         {0, }
83 };
84 MODULE_DEVICE_TABLE(pci, ixgbevf_pci_tbl);
85
86 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
87 MODULE_DESCRIPTION("Intel(R) 82599 Virtual Function Driver");
88 MODULE_LICENSE("GPL");
89 MODULE_VERSION(DRV_VERSION);
90
91 #define DEFAULT_DEBUG_LEVEL_SHIFT 3
92
93 /* forward decls */
94 static void ixgbevf_set_itr_msix(struct ixgbevf_q_vector *q_vector);
95 static void ixgbevf_write_eitr(struct ixgbevf_adapter *adapter, int v_idx,
96                                u32 itr_reg);
97
98 static inline void ixgbevf_release_rx_desc(struct ixgbe_hw *hw,
99                                            struct ixgbevf_ring *rx_ring,
100                                            u32 val)
101 {
102         /*
103          * Force memory writes to complete before letting h/w
104          * know there are new descriptors to fetch.  (Only
105          * applicable for weak-ordered memory model archs,
106          * such as IA-64).
107          */
108         wmb();
109         IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rx_ring->reg_idx), val);
110 }
111
112 /*
113  * ixgbevf_set_ivar - set IVAR registers - maps interrupt causes to vectors
114  * @adapter: pointer to adapter struct
115  * @direction: 0 for Rx, 1 for Tx, -1 for other causes
116  * @queue: queue to map the corresponding interrupt to
117  * @msix_vector: the vector to map to the corresponding queue
118  *
119  */
120 static void ixgbevf_set_ivar(struct ixgbevf_adapter *adapter, s8 direction,
121                              u8 queue, u8 msix_vector)
122 {
123         u32 ivar, index;
124         struct ixgbe_hw *hw = &adapter->hw;
125         if (direction == -1) {
126                 /* other causes */
127                 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
128                 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
129                 ivar &= ~0xFF;
130                 ivar |= msix_vector;
131                 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
132         } else {
133                 /* tx or rx causes */
134                 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
135                 index = ((16 * (queue & 1)) + (8 * direction));
136                 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(queue >> 1));
137                 ivar &= ~(0xFF << index);
138                 ivar |= (msix_vector << index);
139                 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(queue >> 1), ivar);
140         }
141 }
142
143 static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_adapter *adapter,
144                                                struct ixgbevf_tx_buffer
145                                                *tx_buffer_info)
146 {
147         if (tx_buffer_info->dma) {
148                 if (tx_buffer_info->mapped_as_page)
149                         dma_unmap_page(&adapter->pdev->dev,
150                                        tx_buffer_info->dma,
151                                        tx_buffer_info->length,
152                                        DMA_TO_DEVICE);
153                 else
154                         dma_unmap_single(&adapter->pdev->dev,
155                                          tx_buffer_info->dma,
156                                          tx_buffer_info->length,
157                                          DMA_TO_DEVICE);
158                 tx_buffer_info->dma = 0;
159         }
160         if (tx_buffer_info->skb) {
161                 dev_kfree_skb_any(tx_buffer_info->skb);
162                 tx_buffer_info->skb = NULL;
163         }
164         tx_buffer_info->time_stamp = 0;
165         /* tx_buffer_info must be completely set up in the transmit path */
166 }
167
168 #define IXGBE_MAX_TXD_PWR       14
169 #define IXGBE_MAX_DATA_PER_TXD  (1 << IXGBE_MAX_TXD_PWR)
170
171 /* Tx Descriptors needed, worst case */
172 #define TXD_USE_COUNT(S) (((S) >> IXGBE_MAX_TXD_PWR) + \
173                          (((S) & (IXGBE_MAX_DATA_PER_TXD - 1)) ? 1 : 0))
174 #ifdef MAX_SKB_FRAGS
175 #define DESC_NEEDED (TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD) /* skb->data */ + \
176         MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1)      /* for context */
177 #else
178 #define DESC_NEEDED TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD)
179 #endif
180
181 static void ixgbevf_tx_timeout(struct net_device *netdev);
182
183 /**
184  * ixgbevf_clean_tx_irq - Reclaim resources after transmit completes
185  * @adapter: board private structure
186  * @tx_ring: tx ring to clean
187  **/
188 static bool ixgbevf_clean_tx_irq(struct ixgbevf_adapter *adapter,
189                                  struct ixgbevf_ring *tx_ring)
190 {
191         struct net_device *netdev = adapter->netdev;
192         struct ixgbe_hw *hw = &adapter->hw;
193         union ixgbe_adv_tx_desc *tx_desc, *eop_desc;
194         struct ixgbevf_tx_buffer *tx_buffer_info;
195         unsigned int i, eop, count = 0;
196         unsigned int total_bytes = 0, total_packets = 0;
197
198         i = tx_ring->next_to_clean;
199         eop = tx_ring->tx_buffer_info[i].next_to_watch;
200         eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
201
202         while ((eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) &&
203                (count < tx_ring->work_limit)) {
204                 bool cleaned = false;
205                 rmb(); /* read buffer_info after eop_desc */
206                 /* eop could change between read and DD-check */
207                 if (unlikely(eop != tx_ring->tx_buffer_info[i].next_to_watch))
208                         goto cont_loop;
209                 for ( ; !cleaned; count++) {
210                         struct sk_buff *skb;
211                         tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i);
212                         tx_buffer_info = &tx_ring->tx_buffer_info[i];
213                         cleaned = (i == eop);
214                         skb = tx_buffer_info->skb;
215
216                         if (cleaned && skb) {
217                                 unsigned int segs, bytecount;
218
219                                 /* gso_segs is currently only valid for tcp */
220                                 segs = skb_shinfo(skb)->gso_segs ?: 1;
221                                 /* multiply data chunks by size of headers */
222                                 bytecount = ((segs - 1) * skb_headlen(skb)) +
223                                             skb->len;
224                                 total_packets += segs;
225                                 total_bytes += bytecount;
226                         }
227
228                         ixgbevf_unmap_and_free_tx_resource(adapter,
229                                                            tx_buffer_info);
230
231                         tx_desc->wb.status = 0;
232
233                         i++;
234                         if (i == tx_ring->count)
235                                 i = 0;
236                 }
237
238 cont_loop:
239                 eop = tx_ring->tx_buffer_info[i].next_to_watch;
240                 eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
241         }
242
243         tx_ring->next_to_clean = i;
244
245 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
246         if (unlikely(count && netif_carrier_ok(netdev) &&
247                      (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
248                 /* Make sure that anybody stopping the queue after this
249                  * sees the new next_to_clean.
250                  */
251                 smp_mb();
252 #ifdef HAVE_TX_MQ
253                 if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) &&
254                     !test_bit(__IXGBEVF_DOWN, &adapter->state)) {
255                         netif_wake_subqueue(netdev, tx_ring->queue_index);
256                         ++adapter->restart_queue;
257                 }
258 #else
259                 if (netif_queue_stopped(netdev) &&
260                     !test_bit(__IXGBEVF_DOWN, &adapter->state)) {
261                         netif_wake_queue(netdev);
262                         ++adapter->restart_queue;
263                 }
264 #endif
265         }
266
267         /* re-arm the interrupt */
268         if ((count >= tx_ring->work_limit) &&
269             (!test_bit(__IXGBEVF_DOWN, &adapter->state))) {
270                 IXGBE_WRITE_REG(hw, IXGBE_VTEICS, tx_ring->v_idx);
271         }
272
273         u64_stats_update_begin(&tx_ring->syncp);
274         tx_ring->total_bytes += total_bytes;
275         tx_ring->total_packets += total_packets;
276         u64_stats_update_end(&tx_ring->syncp);
277
278         return count < tx_ring->work_limit;
279 }
280
281 /**
282  * ixgbevf_receive_skb - Send a completed packet up the stack
283  * @q_vector: structure containing interrupt and ring information
284  * @skb: packet to send up
285  * @status: hardware indication of status of receive
286  * @rx_ring: rx descriptor ring (for a specific queue) to setup
287  * @rx_desc: rx descriptor
288  **/
289 static void ixgbevf_receive_skb(struct ixgbevf_q_vector *q_vector,
290                                 struct sk_buff *skb, u8 status,
291                                 struct ixgbevf_ring *ring,
292                                 union ixgbe_adv_rx_desc *rx_desc)
293 {
294         struct ixgbevf_adapter *adapter = q_vector->adapter;
295         bool is_vlan = (status & IXGBE_RXD_STAT_VP);
296
297         if (is_vlan) {
298                 u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan);
299
300                 __vlan_hwaccel_put_tag(skb, tag);
301         }
302
303         if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL))
304                         napi_gro_receive(&q_vector->napi, skb);
305         else
306                         netif_rx(skb);
307 }
308
309 /**
310  * ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum
311  * @adapter: address of board private structure
312  * @status_err: hardware indication of status of receive
313  * @skb: skb currently being received and modified
314  **/
315 static inline void ixgbevf_rx_checksum(struct ixgbevf_adapter *adapter,
316                                        u32 status_err, struct sk_buff *skb)
317 {
318         skb_checksum_none_assert(skb);
319
320         /* Rx csum disabled */
321         if (!(adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED))
322                 return;
323
324         /* if IP and error */
325         if ((status_err & IXGBE_RXD_STAT_IPCS) &&
326             (status_err & IXGBE_RXDADV_ERR_IPE)) {
327                 adapter->hw_csum_rx_error++;
328                 return;
329         }
330
331         if (!(status_err & IXGBE_RXD_STAT_L4CS))
332                 return;
333
334         if (status_err & IXGBE_RXDADV_ERR_TCPE) {
335                 adapter->hw_csum_rx_error++;
336                 return;
337         }
338
339         /* It must be a TCP or UDP packet with a valid checksum */
340         skb->ip_summed = CHECKSUM_UNNECESSARY;
341         adapter->hw_csum_rx_good++;
342 }
343
344 /**
345  * ixgbevf_alloc_rx_buffers - Replace used receive buffers; packet split
346  * @adapter: address of board private structure
347  **/
348 static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter,
349                                      struct ixgbevf_ring *rx_ring,
350                                      int cleaned_count)
351 {
352         struct pci_dev *pdev = adapter->pdev;
353         union ixgbe_adv_rx_desc *rx_desc;
354         struct ixgbevf_rx_buffer *bi;
355         struct sk_buff *skb;
356         unsigned int i;
357         unsigned int bufsz = rx_ring->rx_buf_len + NET_IP_ALIGN;
358
359         i = rx_ring->next_to_use;
360         bi = &rx_ring->rx_buffer_info[i];
361
362         while (cleaned_count--) {
363                 rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i);
364
365                 if (!bi->page_dma &&
366                     (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED)) {
367                         if (!bi->page) {
368                                 bi->page = netdev_alloc_page(adapter->netdev);
369                                 if (!bi->page) {
370                                         adapter->alloc_rx_page_failed++;
371                                         goto no_buffers;
372                                 }
373                                 bi->page_offset = 0;
374                         } else {
375                                 /* use a half page if we're re-using */
376                                 bi->page_offset ^= (PAGE_SIZE / 2);
377                         }
378
379                         bi->page_dma = dma_map_page(&pdev->dev, bi->page,
380                                                     bi->page_offset,
381                                                     (PAGE_SIZE / 2),
382                                                     DMA_FROM_DEVICE);
383                 }
384
385                 skb = bi->skb;
386                 if (!skb) {
387                         skb = netdev_alloc_skb(adapter->netdev,
388                                                                bufsz);
389
390                         if (!skb) {
391                                 adapter->alloc_rx_buff_failed++;
392                                 goto no_buffers;
393                         }
394
395                         /*
396                          * Make buffer alignment 2 beyond a 16 byte boundary
397                          * this will result in a 16 byte aligned IP header after
398                          * the 14 byte MAC header is removed
399                          */
400                         skb_reserve(skb, NET_IP_ALIGN);
401
402                         bi->skb = skb;
403                 }
404                 if (!bi->dma) {
405                         bi->dma = dma_map_single(&pdev->dev, skb->data,
406                                                  rx_ring->rx_buf_len,
407                                                  DMA_FROM_DEVICE);
408                 }
409                 /* Refresh the desc even if buffer_addrs didn't change because
410                  * each write-back erases this info. */
411                 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
412                         rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
413                         rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
414                 } else {
415                         rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
416                 }
417
418                 i++;
419                 if (i == rx_ring->count)
420                         i = 0;
421                 bi = &rx_ring->rx_buffer_info[i];
422         }
423
424 no_buffers:
425         if (rx_ring->next_to_use != i) {
426                 rx_ring->next_to_use = i;
427                 if (i-- == 0)
428                         i = (rx_ring->count - 1);
429
430                 ixgbevf_release_rx_desc(&adapter->hw, rx_ring, i);
431         }
432 }
433
434 static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter *adapter,
435                                              u64 qmask)
436 {
437         u32 mask;
438         struct ixgbe_hw *hw = &adapter->hw;
439
440         mask = (qmask & 0xFFFFFFFF);
441         IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
442 }
443
444 static inline u16 ixgbevf_get_hdr_info(union ixgbe_adv_rx_desc *rx_desc)
445 {
446         return rx_desc->wb.lower.lo_dword.hs_rss.hdr_info;
447 }
448
449 static inline u16 ixgbevf_get_pkt_info(union ixgbe_adv_rx_desc *rx_desc)
450 {
451         return rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
452 }
453
454 static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
455                                  struct ixgbevf_ring *rx_ring,
456                                  int *work_done, int work_to_do)
457 {
458         struct ixgbevf_adapter *adapter = q_vector->adapter;
459         struct pci_dev *pdev = adapter->pdev;
460         union ixgbe_adv_rx_desc *rx_desc, *next_rxd;
461         struct ixgbevf_rx_buffer *rx_buffer_info, *next_buffer;
462         struct sk_buff *skb;
463         unsigned int i;
464         u32 len, staterr;
465         u16 hdr_info;
466         bool cleaned = false;
467         int cleaned_count = 0;
468         unsigned int total_rx_bytes = 0, total_rx_packets = 0;
469
470         i = rx_ring->next_to_clean;
471         rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i);
472         staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
473         rx_buffer_info = &rx_ring->rx_buffer_info[i];
474
475         while (staterr & IXGBE_RXD_STAT_DD) {
476                 u32 upper_len = 0;
477                 if (*work_done >= work_to_do)
478                         break;
479                 (*work_done)++;
480
481                 rmb(); /* read descriptor and rx_buffer_info after status DD */
482                 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
483                         hdr_info = le16_to_cpu(ixgbevf_get_hdr_info(rx_desc));
484                         len = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
485                                IXGBE_RXDADV_HDRBUFLEN_SHIFT;
486                         if (hdr_info & IXGBE_RXDADV_SPH)
487                                 adapter->rx_hdr_split++;
488                         if (len > IXGBEVF_RX_HDR_SIZE)
489                                 len = IXGBEVF_RX_HDR_SIZE;
490                         upper_len = le16_to_cpu(rx_desc->wb.upper.length);
491                 } else {
492                         len = le16_to_cpu(rx_desc->wb.upper.length);
493                 }
494                 cleaned = true;
495                 skb = rx_buffer_info->skb;
496                 prefetch(skb->data - NET_IP_ALIGN);
497                 rx_buffer_info->skb = NULL;
498
499                 if (rx_buffer_info->dma) {
500                         dma_unmap_single(&pdev->dev, rx_buffer_info->dma,
501                                          rx_ring->rx_buf_len,
502                                          DMA_FROM_DEVICE);
503                         rx_buffer_info->dma = 0;
504                         skb_put(skb, len);
505                 }
506
507                 if (upper_len) {
508                         dma_unmap_page(&pdev->dev, rx_buffer_info->page_dma,
509                                        PAGE_SIZE / 2, DMA_FROM_DEVICE);
510                         rx_buffer_info->page_dma = 0;
511                         skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
512                                            rx_buffer_info->page,
513                                            rx_buffer_info->page_offset,
514                                            upper_len);
515
516                         if ((rx_ring->rx_buf_len > (PAGE_SIZE / 2)) ||
517                             (page_count(rx_buffer_info->page) != 1))
518                                 rx_buffer_info->page = NULL;
519                         else
520                                 get_page(rx_buffer_info->page);
521
522                         skb->len += upper_len;
523                         skb->data_len += upper_len;
524                         skb->truesize += upper_len;
525                 }
526
527                 i++;
528                 if (i == rx_ring->count)
529                         i = 0;
530
531                 next_rxd = IXGBE_RX_DESC_ADV(*rx_ring, i);
532                 prefetch(next_rxd);
533                 cleaned_count++;
534
535                 next_buffer = &rx_ring->rx_buffer_info[i];
536
537                 if (!(staterr & IXGBE_RXD_STAT_EOP)) {
538                         if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
539                                 rx_buffer_info->skb = next_buffer->skb;
540                                 rx_buffer_info->dma = next_buffer->dma;
541                                 next_buffer->skb = skb;
542                                 next_buffer->dma = 0;
543                         } else {
544                                 skb->next = next_buffer->skb;
545                                 skb->next->prev = skb;
546                         }
547                         adapter->non_eop_descs++;
548                         goto next_desc;
549                 }
550
551                 /* ERR_MASK will only have valid bits if EOP set */
552                 if (unlikely(staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK)) {
553                         dev_kfree_skb_irq(skb);
554                         goto next_desc;
555                 }
556
557                 ixgbevf_rx_checksum(adapter, staterr, skb);
558
559                 /* probably a little skewed due to removing CRC */
560                 total_rx_bytes += skb->len;
561                 total_rx_packets++;
562
563                 /*
564                  * Work around issue of some types of VM to VM loop back
565                  * packets not getting split correctly
566                  */
567                 if (staterr & IXGBE_RXD_STAT_LB) {
568                         u32 header_fixup_len = skb_headlen(skb);
569                         if (header_fixup_len < 14)
570                                 skb_push(skb, header_fixup_len);
571                 }
572                 skb->protocol = eth_type_trans(skb, adapter->netdev);
573
574                 ixgbevf_receive_skb(q_vector, skb, staterr, rx_ring, rx_desc);
575
576 next_desc:
577                 rx_desc->wb.upper.status_error = 0;
578
579                 /* return some buffers to hardware, one at a time is too slow */
580                 if (cleaned_count >= IXGBEVF_RX_BUFFER_WRITE) {
581                         ixgbevf_alloc_rx_buffers(adapter, rx_ring,
582                                                  cleaned_count);
583                         cleaned_count = 0;
584                 }
585
586                 /* use prefetched values */
587                 rx_desc = next_rxd;
588                 rx_buffer_info = &rx_ring->rx_buffer_info[i];
589
590                 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
591         }
592
593         rx_ring->next_to_clean = i;
594         cleaned_count = IXGBE_DESC_UNUSED(rx_ring);
595
596         if (cleaned_count)
597                 ixgbevf_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
598
599         u64_stats_update_begin(&rx_ring->syncp);
600         rx_ring->total_packets += total_rx_packets;
601         rx_ring->total_bytes += total_rx_bytes;
602         u64_stats_update_end(&rx_ring->syncp);
603
604         return cleaned;
605 }
606
607 /**
608  * ixgbevf_clean_rxonly - msix (aka one shot) rx clean routine
609  * @napi: napi struct with our devices info in it
610  * @budget: amount of work driver is allowed to do this pass, in packets
611  *
612  * This function is optimized for cleaning one queue only on a single
613  * q_vector!!!
614  **/
615 static int ixgbevf_clean_rxonly(struct napi_struct *napi, int budget)
616 {
617         struct ixgbevf_q_vector *q_vector =
618                 container_of(napi, struct ixgbevf_q_vector, napi);
619         struct ixgbevf_adapter *adapter = q_vector->adapter;
620         struct ixgbevf_ring *rx_ring = NULL;
621         int work_done = 0;
622         long r_idx;
623
624         r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
625         rx_ring = &(adapter->rx_ring[r_idx]);
626
627         ixgbevf_clean_rx_irq(q_vector, rx_ring, &work_done, budget);
628
629         /* If all Rx work done, exit the polling mode */
630         if (work_done < budget) {
631                 napi_complete(napi);
632                 if (adapter->itr_setting & 1)
633                         ixgbevf_set_itr_msix(q_vector);
634                 if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
635                         ixgbevf_irq_enable_queues(adapter, rx_ring->v_idx);
636         }
637
638         return work_done;
639 }
640
641 /**
642  * ixgbevf_clean_rxonly_many - msix (aka one shot) rx clean routine
643  * @napi: napi struct with our devices info in it
644  * @budget: amount of work driver is allowed to do this pass, in packets
645  *
646  * This function will clean more than one rx queue associated with a
647  * q_vector.
648  **/
649 static int ixgbevf_clean_rxonly_many(struct napi_struct *napi, int budget)
650 {
651         struct ixgbevf_q_vector *q_vector =
652                 container_of(napi, struct ixgbevf_q_vector, napi);
653         struct ixgbevf_adapter *adapter = q_vector->adapter;
654         struct ixgbevf_ring *rx_ring = NULL;
655         int work_done = 0, i;
656         long r_idx;
657         u64 enable_mask = 0;
658
659         /* attempt to distribute budget to each queue fairly, but don't allow
660          * the budget to go below 1 because we'll exit polling */
661         budget /= (q_vector->rxr_count ?: 1);
662         budget = max(budget, 1);
663         r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
664         for (i = 0; i < q_vector->rxr_count; i++) {
665                 rx_ring = &(adapter->rx_ring[r_idx]);
666                 ixgbevf_clean_rx_irq(q_vector, rx_ring, &work_done, budget);
667                 enable_mask |= rx_ring->v_idx;
668                 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
669                                       r_idx + 1);
670         }
671
672 #ifndef HAVE_NETDEV_NAPI_LIST
673         if (!netif_running(adapter->netdev))
674                 work_done = 0;
675
676 #endif
677         r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
678         rx_ring = &(adapter->rx_ring[r_idx]);
679
680         /* If all Rx work done, exit the polling mode */
681         if (work_done < budget) {
682                 napi_complete(napi);
683                 if (adapter->itr_setting & 1)
684                         ixgbevf_set_itr_msix(q_vector);
685                 if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
686                         ixgbevf_irq_enable_queues(adapter, enable_mask);
687         }
688
689         return work_done;
690 }
691
692
693 /**
694  * ixgbevf_configure_msix - Configure MSI-X hardware
695  * @adapter: board private structure
696  *
697  * ixgbevf_configure_msix sets up the hardware to properly generate MSI-X
698  * interrupts.
699  **/
700 static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter)
701 {
702         struct ixgbevf_q_vector *q_vector;
703         struct ixgbe_hw *hw = &adapter->hw;
704         int i, j, q_vectors, v_idx, r_idx;
705         u32 mask;
706
707         q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
708
709         /*
710          * Populate the IVAR table and set the ITR values to the
711          * corresponding register.
712          */
713         for (v_idx = 0; v_idx < q_vectors; v_idx++) {
714                 q_vector = adapter->q_vector[v_idx];
715                 /* XXX for_each_set_bit(...) */
716                 r_idx = find_first_bit(q_vector->rxr_idx,
717                                        adapter->num_rx_queues);
718
719                 for (i = 0; i < q_vector->rxr_count; i++) {
720                         j = adapter->rx_ring[r_idx].reg_idx;
721                         ixgbevf_set_ivar(adapter, 0, j, v_idx);
722                         r_idx = find_next_bit(q_vector->rxr_idx,
723                                               adapter->num_rx_queues,
724                                               r_idx + 1);
725                 }
726                 r_idx = find_first_bit(q_vector->txr_idx,
727                                        adapter->num_tx_queues);
728
729                 for (i = 0; i < q_vector->txr_count; i++) {
730                         j = adapter->tx_ring[r_idx].reg_idx;
731                         ixgbevf_set_ivar(adapter, 1, j, v_idx);
732                         r_idx = find_next_bit(q_vector->txr_idx,
733                                               adapter->num_tx_queues,
734                                               r_idx + 1);
735                 }
736
737                 /* if this is a tx only vector halve the interrupt rate */
738                 if (q_vector->txr_count && !q_vector->rxr_count)
739                         q_vector->eitr = (adapter->eitr_param >> 1);
740                 else if (q_vector->rxr_count)
741                         /* rx only */
742                         q_vector->eitr = adapter->eitr_param;
743
744                 ixgbevf_write_eitr(adapter, v_idx, q_vector->eitr);
745         }
746
747         ixgbevf_set_ivar(adapter, -1, 1, v_idx);
748
749         /* set up to autoclear timer, and the vectors */
750         mask = IXGBE_EIMS_ENABLE_MASK;
751         mask &= ~IXGBE_EIMS_OTHER;
752         IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
753 }
754
755 enum latency_range {
756         lowest_latency = 0,
757         low_latency = 1,
758         bulk_latency = 2,
759         latency_invalid = 255
760 };
761
762 /**
763  * ixgbevf_update_itr - update the dynamic ITR value based on statistics
764  * @adapter: pointer to adapter
765  * @eitr: eitr setting (ints per sec) to give last timeslice
766  * @itr_setting: current throttle rate in ints/second
767  * @packets: the number of packets during this measurement interval
768  * @bytes: the number of bytes during this measurement interval
769  *
770  *      Stores a new ITR value based on packets and byte
771  *      counts during the last interrupt.  The advantage of per interrupt
772  *      computation is faster updates and more accurate ITR for the current
773  *      traffic pattern.  Constants in this function were computed
774  *      based on theoretical maximum wire speed and thresholds were set based
775  *      on testing data as well as attempting to minimize response time
776  *      while increasing bulk throughput.
777  **/
778 static u8 ixgbevf_update_itr(struct ixgbevf_adapter *adapter,
779                              u32 eitr, u8 itr_setting,
780                              int packets, int bytes)
781 {
782         unsigned int retval = itr_setting;
783         u32 timepassed_us;
784         u64 bytes_perint;
785
786         if (packets == 0)
787                 goto update_itr_done;
788
789
790         /* simple throttlerate management
791          *    0-20MB/s lowest (100000 ints/s)
792          *   20-100MB/s low   (20000 ints/s)
793          *  100-1249MB/s bulk (8000 ints/s)
794          */
795         /* what was last interrupt timeslice? */
796         timepassed_us = 1000000/eitr;
797         bytes_perint = bytes / timepassed_us; /* bytes/usec */
798
799         switch (itr_setting) {
800         case lowest_latency:
801                 if (bytes_perint > adapter->eitr_low)
802                         retval = low_latency;
803                 break;
804         case low_latency:
805                 if (bytes_perint > adapter->eitr_high)
806                         retval = bulk_latency;
807                 else if (bytes_perint <= adapter->eitr_low)
808                         retval = lowest_latency;
809                 break;
810         case bulk_latency:
811                 if (bytes_perint <= adapter->eitr_high)
812                         retval = low_latency;
813                 break;
814         }
815
816 update_itr_done:
817         return retval;
818 }
819
820 /**
821  * ixgbevf_write_eitr - write VTEITR register in hardware specific way
822  * @adapter: pointer to adapter struct
823  * @v_idx: vector index into q_vector array
824  * @itr_reg: new value to be written in *register* format, not ints/s
825  *
826  * This function is made to be called by ethtool and by the driver
827  * when it needs to update VTEITR registers at runtime.  Hardware
828  * specific quirks/differences are taken care of here.
829  */
830 static void ixgbevf_write_eitr(struct ixgbevf_adapter *adapter, int v_idx,
831                                u32 itr_reg)
832 {
833         struct ixgbe_hw *hw = &adapter->hw;
834
835         itr_reg = EITR_INTS_PER_SEC_TO_REG(itr_reg);
836
837         /*
838          * set the WDIS bit to not clear the timer bits and cause an
839          * immediate assertion of the interrupt
840          */
841         itr_reg |= IXGBE_EITR_CNT_WDIS;
842
843         IXGBE_WRITE_REG(hw, IXGBE_VTEITR(v_idx), itr_reg);
844 }
845
846 static void ixgbevf_set_itr_msix(struct ixgbevf_q_vector *q_vector)
847 {
848         struct ixgbevf_adapter *adapter = q_vector->adapter;
849         u32 new_itr;
850         u8 current_itr, ret_itr;
851         int i, r_idx, v_idx = q_vector->v_idx;
852         struct ixgbevf_ring *rx_ring, *tx_ring;
853
854         r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
855         for (i = 0; i < q_vector->txr_count; i++) {
856                 tx_ring = &(adapter->tx_ring[r_idx]);
857                 ret_itr = ixgbevf_update_itr(adapter, q_vector->eitr,
858                                              q_vector->tx_itr,
859                                              tx_ring->total_packets,
860                                              tx_ring->total_bytes);
861                 /* if the result for this queue would decrease interrupt
862                  * rate for this vector then use that result */
863                 q_vector->tx_itr = ((q_vector->tx_itr > ret_itr) ?
864                                     q_vector->tx_itr - 1 : ret_itr);
865                 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
866                                       r_idx + 1);
867         }
868
869         r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
870         for (i = 0; i < q_vector->rxr_count; i++) {
871                 rx_ring = &(adapter->rx_ring[r_idx]);
872                 ret_itr = ixgbevf_update_itr(adapter, q_vector->eitr,
873                                              q_vector->rx_itr,
874                                              rx_ring->total_packets,
875                                              rx_ring->total_bytes);
876                 /* if the result for this queue would decrease interrupt
877                  * rate for this vector then use that result */
878                 q_vector->rx_itr = ((q_vector->rx_itr > ret_itr) ?
879                                     q_vector->rx_itr - 1 : ret_itr);
880                 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
881                                       r_idx + 1);
882         }
883
884         current_itr = max(q_vector->rx_itr, q_vector->tx_itr);
885
886         switch (current_itr) {
887         /* counts and packets in update_itr are dependent on these numbers */
888         case lowest_latency:
889                 new_itr = 100000;
890                 break;
891         case low_latency:
892                 new_itr = 20000; /* aka hwitr = ~200 */
893                 break;
894         case bulk_latency:
895         default:
896                 new_itr = 8000;
897                 break;
898         }
899
900         if (new_itr != q_vector->eitr) {
901                 u32 itr_reg;
902
903                 /* save the algorithm value here, not the smoothed one */
904                 q_vector->eitr = new_itr;
905                 /* do an exponential smoothing */
906                 new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100);
907                 itr_reg = EITR_INTS_PER_SEC_TO_REG(new_itr);
908                 ixgbevf_write_eitr(adapter, v_idx, itr_reg);
909         }
910 }
911
912 static irqreturn_t ixgbevf_msix_mbx(int irq, void *data)
913 {
914         struct net_device *netdev = data;
915         struct ixgbevf_adapter *adapter = netdev_priv(netdev);
916         struct ixgbe_hw *hw = &adapter->hw;
917         u32 eicr;
918         u32 msg;
919
920         eicr = IXGBE_READ_REG(hw, IXGBE_VTEICS);
921         IXGBE_WRITE_REG(hw, IXGBE_VTEICR, eicr);
922
923         if (!hw->mbx.ops.check_for_ack(hw)) {
924                 /*
925                  * checking for the ack clears the PFACK bit.  Place
926                  * it back in the v2p_mailbox cache so that anyone
927                  * polling for an ack will not miss it.  Also
928                  * avoid the read below because the code to read
929                  * the mailbox will also clear the ack bit.  This was
930                  * causing lost acks.  Just cache the bit and exit
931                  * the IRQ handler.
932                  */
933                 hw->mbx.v2p_mailbox |= IXGBE_VFMAILBOX_PFACK;
934                 goto out;
935         }
936
937         /* Not an ack interrupt, go ahead and read the message */
938         hw->mbx.ops.read(hw, &msg, 1);
939
940         if ((msg & IXGBE_MBVFICR_VFREQ_MASK) == IXGBE_PF_CONTROL_MSG)
941                 mod_timer(&adapter->watchdog_timer,
942                           round_jiffies(jiffies + 1));
943
944 out:
945         return IRQ_HANDLED;
946 }
947
948 static irqreturn_t ixgbevf_msix_clean_tx(int irq, void *data)
949 {
950         struct ixgbevf_q_vector *q_vector = data;
951         struct ixgbevf_adapter  *adapter = q_vector->adapter;
952         struct ixgbevf_ring     *tx_ring;
953         int i, r_idx;
954
955         if (!q_vector->txr_count)
956                 return IRQ_HANDLED;
957
958         r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
959         for (i = 0; i < q_vector->txr_count; i++) {
960                 tx_ring = &(adapter->tx_ring[r_idx]);
961                 tx_ring->total_bytes = 0;
962                 tx_ring->total_packets = 0;
963                 ixgbevf_clean_tx_irq(adapter, tx_ring);
964                 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
965                                       r_idx + 1);
966         }
967
968         if (adapter->itr_setting & 1)
969                 ixgbevf_set_itr_msix(q_vector);
970
971         return IRQ_HANDLED;
972 }
973
974 /**
975  * ixgbevf_msix_clean_rx - single unshared vector rx clean (all queues)
976  * @irq: unused
977  * @data: pointer to our q_vector struct for this interrupt vector
978  **/
979 static irqreturn_t ixgbevf_msix_clean_rx(int irq, void *data)
980 {
981         struct ixgbevf_q_vector *q_vector = data;
982         struct ixgbevf_adapter  *adapter = q_vector->adapter;
983         struct ixgbe_hw *hw = &adapter->hw;
984         struct ixgbevf_ring  *rx_ring;
985         int r_idx;
986         int i;
987
988         r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
989         for (i = 0; i < q_vector->rxr_count; i++) {
990                 rx_ring = &(adapter->rx_ring[r_idx]);
991                 rx_ring->total_bytes = 0;
992                 rx_ring->total_packets = 0;
993                 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
994                                       r_idx + 1);
995         }
996
997         if (!q_vector->rxr_count)
998                 return IRQ_HANDLED;
999
1000         r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
1001         rx_ring = &(adapter->rx_ring[r_idx]);
1002         /* disable interrupts on this vector only */
1003         IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, rx_ring->v_idx);
1004         napi_schedule(&q_vector->napi);
1005
1006
1007         return IRQ_HANDLED;
1008 }
1009
1010 static irqreturn_t ixgbevf_msix_clean_many(int irq, void *data)
1011 {
1012         ixgbevf_msix_clean_rx(irq, data);
1013         ixgbevf_msix_clean_tx(irq, data);
1014
1015         return IRQ_HANDLED;
1016 }
1017
1018 static inline void map_vector_to_rxq(struct ixgbevf_adapter *a, int v_idx,
1019                                      int r_idx)
1020 {
1021         struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
1022
1023         set_bit(r_idx, q_vector->rxr_idx);
1024         q_vector->rxr_count++;
1025         a->rx_ring[r_idx].v_idx = 1 << v_idx;
1026 }
1027
1028 static inline void map_vector_to_txq(struct ixgbevf_adapter *a, int v_idx,
1029                                      int t_idx)
1030 {
1031         struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
1032
1033         set_bit(t_idx, q_vector->txr_idx);
1034         q_vector->txr_count++;
1035         a->tx_ring[t_idx].v_idx = 1 << v_idx;
1036 }
1037
1038 /**
1039  * ixgbevf_map_rings_to_vectors - Maps descriptor rings to vectors
1040  * @adapter: board private structure to initialize
1041  *
1042  * This function maps descriptor rings to the queue-specific vectors
1043  * we were allotted through the MSI-X enabling code.  Ideally, we'd have
1044  * one vector per ring/queue, but on a constrained vector budget, we
1045  * group the rings as "efficiently" as possible.  You would add new
1046  * mapping configurations in here.
1047  **/
1048 static int ixgbevf_map_rings_to_vectors(struct ixgbevf_adapter *adapter)
1049 {
1050         int q_vectors;
1051         int v_start = 0;
1052         int rxr_idx = 0, txr_idx = 0;
1053         int rxr_remaining = adapter->num_rx_queues;
1054         int txr_remaining = adapter->num_tx_queues;
1055         int i, j;
1056         int rqpv, tqpv;
1057         int err = 0;
1058
1059         q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1060
1061         /*
1062          * The ideal configuration...
1063          * We have enough vectors to map one per queue.
1064          */
1065         if (q_vectors == adapter->num_rx_queues + adapter->num_tx_queues) {
1066                 for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++)
1067                         map_vector_to_rxq(adapter, v_start, rxr_idx);
1068
1069                 for (; txr_idx < txr_remaining; v_start++, txr_idx++)
1070                         map_vector_to_txq(adapter, v_start, txr_idx);
1071                 goto out;
1072         }
1073
1074         /*
1075          * If we don't have enough vectors for a 1-to-1
1076          * mapping, we'll have to group them so there are
1077          * multiple queues per vector.
1078          */
1079         /* Re-adjusting *qpv takes care of the remainder. */
1080         for (i = v_start; i < q_vectors; i++) {
1081                 rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - i);
1082                 for (j = 0; j < rqpv; j++) {
1083                         map_vector_to_rxq(adapter, i, rxr_idx);
1084                         rxr_idx++;
1085                         rxr_remaining--;
1086                 }
1087         }
1088         for (i = v_start; i < q_vectors; i++) {
1089                 tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - i);
1090                 for (j = 0; j < tqpv; j++) {
1091                         map_vector_to_txq(adapter, i, txr_idx);
1092                         txr_idx++;
1093                         txr_remaining--;
1094                 }
1095         }
1096
1097 out:
1098         return err;
1099 }
1100
1101 /**
1102  * ixgbevf_request_msix_irqs - Initialize MSI-X interrupts
1103  * @adapter: board private structure
1104  *
1105  * ixgbevf_request_msix_irqs allocates MSI-X vectors and requests
1106  * interrupts from the kernel.
1107  **/
1108 static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter)
1109 {
1110         struct net_device *netdev = adapter->netdev;
1111         irqreturn_t (*handler)(int, void *);
1112         int i, vector, q_vectors, err;
1113         int ri = 0, ti = 0;
1114
1115         /* Decrement for Other and TCP Timer vectors */
1116         q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1117
1118 #define SET_HANDLER(_v) (((_v)->rxr_count && (_v)->txr_count)          \
1119                                           ? &ixgbevf_msix_clean_many : \
1120                           (_v)->rxr_count ? &ixgbevf_msix_clean_rx   : \
1121                           (_v)->txr_count ? &ixgbevf_msix_clean_tx   : \
1122                           NULL)
1123         for (vector = 0; vector < q_vectors; vector++) {
1124                 handler = SET_HANDLER(adapter->q_vector[vector]);
1125
1126                 if (handler == &ixgbevf_msix_clean_rx) {
1127                         sprintf(adapter->name[vector], "%s-%s-%d",
1128                                 netdev->name, "rx", ri++);
1129                 } else if (handler == &ixgbevf_msix_clean_tx) {
1130                         sprintf(adapter->name[vector], "%s-%s-%d",
1131                                 netdev->name, "tx", ti++);
1132                 } else if (handler == &ixgbevf_msix_clean_many) {
1133                         sprintf(adapter->name[vector], "%s-%s-%d",
1134                                 netdev->name, "TxRx", vector);
1135                 } else {
1136                         /* skip this unused q_vector */
1137                         continue;
1138                 }
1139                 err = request_irq(adapter->msix_entries[vector].vector,
1140                                   handler, 0, adapter->name[vector],
1141                                   adapter->q_vector[vector]);
1142                 if (err) {
1143                         hw_dbg(&adapter->hw,
1144                                "request_irq failed for MSIX interrupt "
1145                                "Error: %d\n", err);
1146                         goto free_queue_irqs;
1147                 }
1148         }
1149
1150         sprintf(adapter->name[vector], "%s:mbx", netdev->name);
1151         err = request_irq(adapter->msix_entries[vector].vector,
1152                           &ixgbevf_msix_mbx, 0, adapter->name[vector], netdev);
1153         if (err) {
1154                 hw_dbg(&adapter->hw,
1155                        "request_irq for msix_mbx failed: %d\n", err);
1156                 goto free_queue_irqs;
1157         }
1158
1159         return 0;
1160
1161 free_queue_irqs:
1162         for (i = vector - 1; i >= 0; i--)
1163                 free_irq(adapter->msix_entries[--vector].vector,
1164                          &(adapter->q_vector[i]));
1165         pci_disable_msix(adapter->pdev);
1166         kfree(adapter->msix_entries);
1167         adapter->msix_entries = NULL;
1168         return err;
1169 }
1170
1171 static inline void ixgbevf_reset_q_vectors(struct ixgbevf_adapter *adapter)
1172 {
1173         int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1174
1175         for (i = 0; i < q_vectors; i++) {
1176                 struct ixgbevf_q_vector *q_vector = adapter->q_vector[i];
1177                 bitmap_zero(q_vector->rxr_idx, MAX_RX_QUEUES);
1178                 bitmap_zero(q_vector->txr_idx, MAX_TX_QUEUES);
1179                 q_vector->rxr_count = 0;
1180                 q_vector->txr_count = 0;
1181                 q_vector->eitr = adapter->eitr_param;
1182         }
1183 }
1184
1185 /**
1186  * ixgbevf_request_irq - initialize interrupts
1187  * @adapter: board private structure
1188  *
1189  * Attempts to configure interrupts using the best available
1190  * capabilities of the hardware and kernel.
1191  **/
1192 static int ixgbevf_request_irq(struct ixgbevf_adapter *adapter)
1193 {
1194         int err = 0;
1195
1196         err = ixgbevf_request_msix_irqs(adapter);
1197
1198         if (err)
1199                 hw_dbg(&adapter->hw,
1200                        "request_irq failed, Error %d\n", err);
1201
1202         return err;
1203 }
1204
1205 static void ixgbevf_free_irq(struct ixgbevf_adapter *adapter)
1206 {
1207         struct net_device *netdev = adapter->netdev;
1208         int i, q_vectors;
1209
1210         q_vectors = adapter->num_msix_vectors;
1211
1212         i = q_vectors - 1;
1213
1214         free_irq(adapter->msix_entries[i].vector, netdev);
1215         i--;
1216
1217         for (; i >= 0; i--) {
1218                 free_irq(adapter->msix_entries[i].vector,
1219                          adapter->q_vector[i]);
1220         }
1221
1222         ixgbevf_reset_q_vectors(adapter);
1223 }
1224
1225 /**
1226  * ixgbevf_irq_disable - Mask off interrupt generation on the NIC
1227  * @adapter: board private structure
1228  **/
1229 static inline void ixgbevf_irq_disable(struct ixgbevf_adapter *adapter)
1230 {
1231         int i;
1232         struct ixgbe_hw *hw = &adapter->hw;
1233
1234         IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, ~0);
1235
1236         IXGBE_WRITE_FLUSH(hw);
1237
1238         for (i = 0; i < adapter->num_msix_vectors; i++)
1239                 synchronize_irq(adapter->msix_entries[i].vector);
1240 }
1241
1242 /**
1243  * ixgbevf_irq_enable - Enable default interrupt generation settings
1244  * @adapter: board private structure
1245  **/
1246 static inline void ixgbevf_irq_enable(struct ixgbevf_adapter *adapter,
1247                                       bool queues, bool flush)
1248 {
1249         struct ixgbe_hw *hw = &adapter->hw;
1250         u32 mask;
1251         u64 qmask;
1252
1253         mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
1254         qmask = ~0;
1255
1256         IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
1257
1258         if (queues)
1259                 ixgbevf_irq_enable_queues(adapter, qmask);
1260
1261         if (flush)
1262                 IXGBE_WRITE_FLUSH(hw);
1263 }
1264
1265 /**
1266  * ixgbevf_configure_tx - Configure 82599 VF Transmit Unit after Reset
1267  * @adapter: board private structure
1268  *
1269  * Configure the Tx unit of the MAC after a reset.
1270  **/
1271 static void ixgbevf_configure_tx(struct ixgbevf_adapter *adapter)
1272 {
1273         u64 tdba;
1274         struct ixgbe_hw *hw = &adapter->hw;
1275         u32 i, j, tdlen, txctrl;
1276
1277         /* Setup the HW Tx Head and Tail descriptor pointers */
1278         for (i = 0; i < adapter->num_tx_queues; i++) {
1279                 struct ixgbevf_ring *ring = &adapter->tx_ring[i];
1280                 j = ring->reg_idx;
1281                 tdba = ring->dma;
1282                 tdlen = ring->count * sizeof(union ixgbe_adv_tx_desc);
1283                 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(j),
1284                                 (tdba & DMA_BIT_MASK(32)));
1285                 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(j), (tdba >> 32));
1286                 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(j), tdlen);
1287                 IXGBE_WRITE_REG(hw, IXGBE_VFTDH(j), 0);
1288                 IXGBE_WRITE_REG(hw, IXGBE_VFTDT(j), 0);
1289                 adapter->tx_ring[i].head = IXGBE_VFTDH(j);
1290                 adapter->tx_ring[i].tail = IXGBE_VFTDT(j);
1291                 /* Disable Tx Head Writeback RO bit, since this hoses
1292                  * bookkeeping if things aren't delivered in order.
1293                  */
1294                 txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(j));
1295                 txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
1296                 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(j), txctrl);
1297         }
1298 }
1299
1300 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
1301
1302 static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter, int index)
1303 {
1304         struct ixgbevf_ring *rx_ring;
1305         struct ixgbe_hw *hw = &adapter->hw;
1306         u32 srrctl;
1307
1308         rx_ring = &adapter->rx_ring[index];
1309
1310         srrctl = IXGBE_SRRCTL_DROP_EN;
1311
1312         if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
1313                 u16 bufsz = IXGBEVF_RXBUFFER_2048;
1314                 /* grow the amount we can receive on large page machines */
1315                 if (bufsz < (PAGE_SIZE / 2))
1316                         bufsz = (PAGE_SIZE / 2);
1317                 /* cap the bufsz at our largest descriptor size */
1318                 bufsz = min((u16)IXGBEVF_MAX_RXBUFFER, bufsz);
1319
1320                 srrctl |= bufsz >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1321                 srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
1322                 srrctl |= ((IXGBEVF_RX_HDR_SIZE <<
1323                            IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
1324                            IXGBE_SRRCTL_BSIZEHDR_MASK);
1325         } else {
1326                 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
1327
1328                 if (rx_ring->rx_buf_len == MAXIMUM_ETHERNET_VLAN_SIZE)
1329                         srrctl |= IXGBEVF_RXBUFFER_2048 >>
1330                                 IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1331                 else
1332                         srrctl |= rx_ring->rx_buf_len >>
1333                                 IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1334         }
1335         IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(index), srrctl);
1336 }
1337
1338 /**
1339  * ixgbevf_configure_rx - Configure 82599 VF Receive Unit after Reset
1340  * @adapter: board private structure
1341  *
1342  * Configure the Rx unit of the MAC after a reset.
1343  **/
1344 static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
1345 {
1346         u64 rdba;
1347         struct ixgbe_hw *hw = &adapter->hw;
1348         struct net_device *netdev = adapter->netdev;
1349         int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
1350         int i, j;
1351         u32 rdlen;
1352         int rx_buf_len;
1353
1354         /* Decide whether to use packet split mode or not */
1355         if (netdev->mtu > ETH_DATA_LEN) {
1356                 if (adapter->flags & IXGBE_FLAG_RX_PS_CAPABLE)
1357                         adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
1358                 else
1359                         adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED;
1360         } else {
1361                 if (adapter->flags & IXGBE_FLAG_RX_1BUF_CAPABLE)
1362                         adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED;
1363                 else
1364                         adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
1365         }
1366
1367         /* Set the RX buffer length according to the mode */
1368         if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
1369                 /* PSRTYPE must be initialized in 82599 */
1370                 u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
1371                         IXGBE_PSRTYPE_UDPHDR |
1372                         IXGBE_PSRTYPE_IPV4HDR |
1373                         IXGBE_PSRTYPE_IPV6HDR |
1374                         IXGBE_PSRTYPE_L2HDR;
1375                 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
1376                 rx_buf_len = IXGBEVF_RX_HDR_SIZE;
1377         } else {
1378                 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, 0);
1379                 if (netdev->mtu <= ETH_DATA_LEN)
1380                         rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
1381                 else
1382                         rx_buf_len = ALIGN(max_frame, 1024);
1383         }
1384
1385         rdlen = adapter->rx_ring[0].count * sizeof(union ixgbe_adv_rx_desc);
1386         /* Setup the HW Rx Head and Tail Descriptor Pointers and
1387          * the Base and Length of the Rx Descriptor Ring */
1388         for (i = 0; i < adapter->num_rx_queues; i++) {
1389                 rdba = adapter->rx_ring[i].dma;
1390                 j = adapter->rx_ring[i].reg_idx;
1391                 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(j),
1392                                 (rdba & DMA_BIT_MASK(32)));
1393                 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(j), (rdba >> 32));
1394                 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(j), rdlen);
1395                 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(j), 0);
1396                 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(j), 0);
1397                 adapter->rx_ring[i].head = IXGBE_VFRDH(j);
1398                 adapter->rx_ring[i].tail = IXGBE_VFRDT(j);
1399                 adapter->rx_ring[i].rx_buf_len = rx_buf_len;
1400
1401                 ixgbevf_configure_srrctl(adapter, j);
1402         }
1403 }
1404
1405 static void ixgbevf_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
1406 {
1407         struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1408         struct ixgbe_hw *hw = &adapter->hw;
1409
1410         /* add VID to filter table */
1411         if (hw->mac.ops.set_vfta)
1412                 hw->mac.ops.set_vfta(hw, vid, 0, true);
1413         set_bit(vid, adapter->active_vlans);
1414 }
1415
1416 static void ixgbevf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
1417 {
1418         struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1419         struct ixgbe_hw *hw = &adapter->hw;
1420
1421         /* remove VID from filter table */
1422         if (hw->mac.ops.set_vfta)
1423                 hw->mac.ops.set_vfta(hw, vid, 0, false);
1424         clear_bit(vid, adapter->active_vlans);
1425 }
1426
1427 static void ixgbevf_restore_vlan(struct ixgbevf_adapter *adapter)
1428 {
1429         u16 vid;
1430
1431         for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
1432                 ixgbevf_vlan_rx_add_vid(adapter->netdev, vid);
1433 }
1434
1435 static int ixgbevf_write_uc_addr_list(struct net_device *netdev)
1436 {
1437         struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1438         struct ixgbe_hw *hw = &adapter->hw;
1439         int count = 0;
1440
1441         if ((netdev_uc_count(netdev)) > 10) {
1442                 printk(KERN_ERR "Too many unicast filters - No Space\n");
1443                 return -ENOSPC;
1444         }
1445
1446         if (!netdev_uc_empty(netdev)) {
1447                 struct netdev_hw_addr *ha;
1448                 netdev_for_each_uc_addr(ha, netdev) {
1449                         hw->mac.ops.set_uc_addr(hw, ++count, ha->addr);
1450                         udelay(200);
1451                 }
1452         } else {
1453                 /*
1454                  * If the list is empty then send message to PF driver to
1455                  * clear all macvlans on this VF.
1456                  */
1457                 hw->mac.ops.set_uc_addr(hw, 0, NULL);
1458         }
1459
1460         return count;
1461 }
1462
1463 /**
1464  * ixgbevf_set_rx_mode - Multicast set
1465  * @netdev: network interface device structure
1466  *
1467  * The set_rx_method entry point is called whenever the multicast address
1468  * list or the network interface flags are updated.  This routine is
1469  * responsible for configuring the hardware for proper multicast mode.
1470  **/
1471 static void ixgbevf_set_rx_mode(struct net_device *netdev)
1472 {
1473         struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1474         struct ixgbe_hw *hw = &adapter->hw;
1475
1476         /* reprogram multicast list */
1477         if (hw->mac.ops.update_mc_addr_list)
1478                 hw->mac.ops.update_mc_addr_list(hw, netdev);
1479
1480         ixgbevf_write_uc_addr_list(netdev);
1481 }
1482
1483 static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter)
1484 {
1485         int q_idx;
1486         struct ixgbevf_q_vector *q_vector;
1487         int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1488
1489         for (q_idx = 0; q_idx < q_vectors; q_idx++) {
1490                 struct napi_struct *napi;
1491                 q_vector = adapter->q_vector[q_idx];
1492                 if (!q_vector->rxr_count)
1493                         continue;
1494                 napi = &q_vector->napi;
1495                 if (q_vector->rxr_count > 1)
1496                         napi->poll = &ixgbevf_clean_rxonly_many;
1497
1498                 napi_enable(napi);
1499         }
1500 }
1501
1502 static void ixgbevf_napi_disable_all(struct ixgbevf_adapter *adapter)
1503 {
1504         int q_idx;
1505         struct ixgbevf_q_vector *q_vector;
1506         int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1507
1508         for (q_idx = 0; q_idx < q_vectors; q_idx++) {
1509                 q_vector = adapter->q_vector[q_idx];
1510                 if (!q_vector->rxr_count)
1511                         continue;
1512                 napi_disable(&q_vector->napi);
1513         }
1514 }
1515
1516 static void ixgbevf_configure(struct ixgbevf_adapter *adapter)
1517 {
1518         struct net_device *netdev = adapter->netdev;
1519         int i;
1520
1521         ixgbevf_set_rx_mode(netdev);
1522
1523         ixgbevf_restore_vlan(adapter);
1524
1525         ixgbevf_configure_tx(adapter);
1526         ixgbevf_configure_rx(adapter);
1527         for (i = 0; i < adapter->num_rx_queues; i++) {
1528                 struct ixgbevf_ring *ring = &adapter->rx_ring[i];
1529                 ixgbevf_alloc_rx_buffers(adapter, ring, ring->count);
1530                 ring->next_to_use = ring->count - 1;
1531                 writel(ring->next_to_use, adapter->hw.hw_addr + ring->tail);
1532         }
1533 }
1534
1535 #define IXGBE_MAX_RX_DESC_POLL 10
1536 static inline void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter,
1537                                                 int rxr)
1538 {
1539         struct ixgbe_hw *hw = &adapter->hw;
1540         int j = adapter->rx_ring[rxr].reg_idx;
1541         int k;
1542
1543         for (k = 0; k < IXGBE_MAX_RX_DESC_POLL; k++) {
1544                 if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) & IXGBE_RXDCTL_ENABLE)
1545                         break;
1546                 else
1547                         msleep(1);
1548         }
1549         if (k >= IXGBE_MAX_RX_DESC_POLL) {
1550                 hw_dbg(hw, "RXDCTL.ENABLE on Rx queue %d "
1551                        "not set within the polling period\n", rxr);
1552         }
1553
1554         ixgbevf_release_rx_desc(&adapter->hw, &adapter->rx_ring[rxr],
1555                                 (adapter->rx_ring[rxr].count - 1));
1556 }
1557
1558 static void ixgbevf_save_reset_stats(struct ixgbevf_adapter *adapter)
1559 {
1560         /* Only save pre-reset stats if there are some */
1561         if (adapter->stats.vfgprc || adapter->stats.vfgptc) {
1562                 adapter->stats.saved_reset_vfgprc += adapter->stats.vfgprc -
1563                         adapter->stats.base_vfgprc;
1564                 adapter->stats.saved_reset_vfgptc += adapter->stats.vfgptc -
1565                         adapter->stats.base_vfgptc;
1566                 adapter->stats.saved_reset_vfgorc += adapter->stats.vfgorc -
1567                         adapter->stats.base_vfgorc;
1568                 adapter->stats.saved_reset_vfgotc += adapter->stats.vfgotc -
1569                         adapter->stats.base_vfgotc;
1570                 adapter->stats.saved_reset_vfmprc += adapter->stats.vfmprc -
1571                         adapter->stats.base_vfmprc;
1572         }
1573 }
1574
1575 static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter *adapter)
1576 {
1577         struct ixgbe_hw *hw = &adapter->hw;
1578
1579         adapter->stats.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
1580         adapter->stats.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
1581         adapter->stats.last_vfgorc |=
1582                 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
1583         adapter->stats.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
1584         adapter->stats.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
1585         adapter->stats.last_vfgotc |=
1586                 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
1587         adapter->stats.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
1588
1589         adapter->stats.base_vfgprc = adapter->stats.last_vfgprc;
1590         adapter->stats.base_vfgorc = adapter->stats.last_vfgorc;
1591         adapter->stats.base_vfgptc = adapter->stats.last_vfgptc;
1592         adapter->stats.base_vfgotc = adapter->stats.last_vfgotc;
1593         adapter->stats.base_vfmprc = adapter->stats.last_vfmprc;
1594 }
1595
1596 static int ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
1597 {
1598         struct net_device *netdev = adapter->netdev;
1599         struct ixgbe_hw *hw = &adapter->hw;
1600         int i, j = 0;
1601         int num_rx_rings = adapter->num_rx_queues;
1602         u32 txdctl, rxdctl;
1603
1604         for (i = 0; i < adapter->num_tx_queues; i++) {
1605                 j = adapter->tx_ring[i].reg_idx;
1606                 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
1607                 /* enable WTHRESH=8 descriptors, to encourage burst writeback */
1608                 txdctl |= (8 << 16);
1609                 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
1610         }
1611
1612         for (i = 0; i < adapter->num_tx_queues; i++) {
1613                 j = adapter->tx_ring[i].reg_idx;
1614                 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
1615                 txdctl |= IXGBE_TXDCTL_ENABLE;
1616                 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
1617         }
1618
1619         for (i = 0; i < num_rx_rings; i++) {
1620                 j = adapter->rx_ring[i].reg_idx;
1621                 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j));
1622                 rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
1623                 if (hw->mac.type == ixgbe_mac_X540_vf) {
1624                         rxdctl &= ~IXGBE_RXDCTL_RLPMLMASK;
1625                         rxdctl |= ((netdev->mtu + ETH_HLEN + ETH_FCS_LEN) |
1626                                    IXGBE_RXDCTL_RLPML_EN);
1627                 }
1628                 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl);
1629                 ixgbevf_rx_desc_queue_enable(adapter, i);
1630         }
1631
1632         ixgbevf_configure_msix(adapter);
1633
1634         if (hw->mac.ops.set_rar) {
1635                 if (is_valid_ether_addr(hw->mac.addr))
1636                         hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
1637                 else
1638                         hw->mac.ops.set_rar(hw, 0, hw->mac.perm_addr, 0);
1639         }
1640
1641         clear_bit(__IXGBEVF_DOWN, &adapter->state);
1642         ixgbevf_napi_enable_all(adapter);
1643
1644         /* enable transmits */
1645         netif_tx_start_all_queues(netdev);
1646
1647         ixgbevf_save_reset_stats(adapter);
1648         ixgbevf_init_last_counter_stats(adapter);
1649
1650         /* bring the link up in the watchdog, this could race with our first
1651          * link up interrupt but shouldn't be a problem */
1652         adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
1653         adapter->link_check_timeout = jiffies;
1654         mod_timer(&adapter->watchdog_timer, jiffies);
1655         return 0;
1656 }
1657
1658 int ixgbevf_up(struct ixgbevf_adapter *adapter)
1659 {
1660         int err;
1661         struct ixgbe_hw *hw = &adapter->hw;
1662
1663         ixgbevf_configure(adapter);
1664
1665         err = ixgbevf_up_complete(adapter);
1666
1667         /* clear any pending interrupts, may auto mask */
1668         IXGBE_READ_REG(hw, IXGBE_VTEICR);
1669
1670         ixgbevf_irq_enable(adapter, true, true);
1671
1672         return err;
1673 }
1674
1675 /**
1676  * ixgbevf_clean_rx_ring - Free Rx Buffers per Queue
1677  * @adapter: board private structure
1678  * @rx_ring: ring to free buffers from
1679  **/
1680 static void ixgbevf_clean_rx_ring(struct ixgbevf_adapter *adapter,
1681                                   struct ixgbevf_ring *rx_ring)
1682 {
1683         struct pci_dev *pdev = adapter->pdev;
1684         unsigned long size;
1685         unsigned int i;
1686
1687         if (!rx_ring->rx_buffer_info)
1688                 return;
1689
1690         /* Free all the Rx ring sk_buffs */
1691         for (i = 0; i < rx_ring->count; i++) {
1692                 struct ixgbevf_rx_buffer *rx_buffer_info;
1693
1694                 rx_buffer_info = &rx_ring->rx_buffer_info[i];
1695                 if (rx_buffer_info->dma) {
1696                         dma_unmap_single(&pdev->dev, rx_buffer_info->dma,
1697                                          rx_ring->rx_buf_len,
1698                                          DMA_FROM_DEVICE);
1699                         rx_buffer_info->dma = 0;
1700                 }
1701                 if (rx_buffer_info->skb) {
1702                         struct sk_buff *skb = rx_buffer_info->skb;
1703                         rx_buffer_info->skb = NULL;
1704                         do {
1705                                 struct sk_buff *this = skb;
1706                                 skb = skb->prev;
1707                                 dev_kfree_skb(this);
1708                         } while (skb);
1709                 }
1710                 if (!rx_buffer_info->page)
1711                         continue;
1712                 dma_unmap_page(&pdev->dev, rx_buffer_info->page_dma,
1713                                PAGE_SIZE / 2, DMA_FROM_DEVICE);
1714                 rx_buffer_info->page_dma = 0;
1715                 put_page(rx_buffer_info->page);
1716                 rx_buffer_info->page = NULL;
1717                 rx_buffer_info->page_offset = 0;
1718         }
1719
1720         size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
1721         memset(rx_ring->rx_buffer_info, 0, size);
1722
1723         /* Zero out the descriptor ring */
1724         memset(rx_ring->desc, 0, rx_ring->size);
1725
1726         rx_ring->next_to_clean = 0;
1727         rx_ring->next_to_use = 0;
1728
1729         if (rx_ring->head)
1730                 writel(0, adapter->hw.hw_addr + rx_ring->head);
1731         if (rx_ring->tail)
1732                 writel(0, adapter->hw.hw_addr + rx_ring->tail);
1733 }
1734
1735 /**
1736  * ixgbevf_clean_tx_ring - Free Tx Buffers
1737  * @adapter: board private structure
1738  * @tx_ring: ring to be cleaned
1739  **/
1740 static void ixgbevf_clean_tx_ring(struct ixgbevf_adapter *adapter,
1741                                   struct ixgbevf_ring *tx_ring)
1742 {
1743         struct ixgbevf_tx_buffer *tx_buffer_info;
1744         unsigned long size;
1745         unsigned int i;
1746
1747         if (!tx_ring->tx_buffer_info)
1748                 return;
1749
1750         /* Free all the Tx ring sk_buffs */
1751
1752         for (i = 0; i < tx_ring->count; i++) {
1753                 tx_buffer_info = &tx_ring->tx_buffer_info[i];
1754                 ixgbevf_unmap_and_free_tx_resource(adapter, tx_buffer_info);
1755         }
1756
1757         size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
1758         memset(tx_ring->tx_buffer_info, 0, size);
1759
1760         memset(tx_ring->desc, 0, tx_ring->size);
1761
1762         tx_ring->next_to_use = 0;
1763         tx_ring->next_to_clean = 0;
1764
1765         if (tx_ring->head)
1766                 writel(0, adapter->hw.hw_addr + tx_ring->head);
1767         if (tx_ring->tail)
1768                 writel(0, adapter->hw.hw_addr + tx_ring->tail);
1769 }
1770
1771 /**
1772  * ixgbevf_clean_all_rx_rings - Free Rx Buffers for all queues
1773  * @adapter: board private structure
1774  **/
1775 static void ixgbevf_clean_all_rx_rings(struct ixgbevf_adapter *adapter)
1776 {
1777         int i;
1778
1779         for (i = 0; i < adapter->num_rx_queues; i++)
1780                 ixgbevf_clean_rx_ring(adapter, &adapter->rx_ring[i]);
1781 }
1782
1783 /**
1784  * ixgbevf_clean_all_tx_rings - Free Tx Buffers for all queues
1785  * @adapter: board private structure
1786  **/
1787 static void ixgbevf_clean_all_tx_rings(struct ixgbevf_adapter *adapter)
1788 {
1789         int i;
1790
1791         for (i = 0; i < adapter->num_tx_queues; i++)
1792                 ixgbevf_clean_tx_ring(adapter, &adapter->tx_ring[i]);
1793 }
1794
1795 void ixgbevf_down(struct ixgbevf_adapter *adapter)
1796 {
1797         struct net_device *netdev = adapter->netdev;
1798         struct ixgbe_hw *hw = &adapter->hw;
1799         u32 txdctl;
1800         int i, j;
1801
1802         /* signal that we are down to the interrupt handler */
1803         set_bit(__IXGBEVF_DOWN, &adapter->state);
1804         /* disable receives */
1805
1806         netif_tx_disable(netdev);
1807
1808         msleep(10);
1809
1810         netif_tx_stop_all_queues(netdev);
1811
1812         ixgbevf_irq_disable(adapter);
1813
1814         ixgbevf_napi_disable_all(adapter);
1815
1816         del_timer_sync(&adapter->watchdog_timer);
1817         /* can't call flush scheduled work here because it can deadlock
1818          * if linkwatch_event tries to acquire the rtnl_lock which we are
1819          * holding */
1820         while (adapter->flags & IXGBE_FLAG_IN_WATCHDOG_TASK)
1821                 msleep(1);
1822
1823         /* disable transmits in the hardware now that interrupts are off */
1824         for (i = 0; i < adapter->num_tx_queues; i++) {
1825                 j = adapter->tx_ring[i].reg_idx;
1826                 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
1827                 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j),
1828                                 (txdctl & ~IXGBE_TXDCTL_ENABLE));
1829         }
1830
1831         netif_carrier_off(netdev);
1832
1833         if (!pci_channel_offline(adapter->pdev))
1834                 ixgbevf_reset(adapter);
1835
1836         ixgbevf_clean_all_tx_rings(adapter);
1837         ixgbevf_clean_all_rx_rings(adapter);
1838 }
1839
1840 void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter)
1841 {
1842         struct ixgbe_hw *hw = &adapter->hw;
1843
1844         WARN_ON(in_interrupt());
1845
1846         while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state))
1847                 msleep(1);
1848
1849         /*
1850          * Check if PF is up before re-init.  If not then skip until
1851          * later when the PF is up and ready to service requests from
1852          * the VF via mailbox.  If the VF is up and running then the
1853          * watchdog task will continue to schedule reset tasks until
1854          * the PF is up and running.
1855          */
1856         if (!hw->mac.ops.reset_hw(hw)) {
1857                 ixgbevf_down(adapter);
1858                 ixgbevf_up(adapter);
1859         }
1860
1861         clear_bit(__IXGBEVF_RESETTING, &adapter->state);
1862 }
1863
1864 void ixgbevf_reset(struct ixgbevf_adapter *adapter)
1865 {
1866         struct ixgbe_hw *hw = &adapter->hw;
1867         struct net_device *netdev = adapter->netdev;
1868
1869         if (hw->mac.ops.reset_hw(hw))
1870                 hw_dbg(hw, "PF still resetting\n");
1871         else
1872                 hw->mac.ops.init_hw(hw);
1873
1874         if (is_valid_ether_addr(adapter->hw.mac.addr)) {
1875                 memcpy(netdev->dev_addr, adapter->hw.mac.addr,
1876                        netdev->addr_len);
1877                 memcpy(netdev->perm_addr, adapter->hw.mac.addr,
1878                        netdev->addr_len);
1879         }
1880 }
1881
1882 static void ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
1883                                          int vectors)
1884 {
1885         int err, vector_threshold;
1886
1887         /* We'll want at least 3 (vector_threshold):
1888          * 1) TxQ[0] Cleanup
1889          * 2) RxQ[0] Cleanup
1890          * 3) Other (Link Status Change, etc.)
1891          */
1892         vector_threshold = MIN_MSIX_COUNT;
1893
1894         /* The more we get, the more we will assign to Tx/Rx Cleanup
1895          * for the separate queues...where Rx Cleanup >= Tx Cleanup.
1896          * Right now, we simply care about how many we'll get; we'll
1897          * set them up later while requesting irq's.
1898          */
1899         while (vectors >= vector_threshold) {
1900                 err = pci_enable_msix(adapter->pdev, adapter->msix_entries,
1901                                       vectors);
1902                 if (!err) /* Success in acquiring all requested vectors. */
1903                         break;
1904                 else if (err < 0)
1905                         vectors = 0; /* Nasty failure, quit now */
1906                 else /* err == number of vectors we should try again with */
1907                         vectors = err;
1908         }
1909
1910         if (vectors < vector_threshold) {
1911                 /* Can't allocate enough MSI-X interrupts?  Oh well.
1912                  * This just means we'll go with either a single MSI
1913                  * vector or fall back to legacy interrupts.
1914                  */
1915                 hw_dbg(&adapter->hw,
1916                        "Unable to allocate MSI-X interrupts\n");
1917                 kfree(adapter->msix_entries);
1918                 adapter->msix_entries = NULL;
1919         } else {
1920                 /*
1921                  * Adjust for only the vectors we'll use, which is minimum
1922                  * of max_msix_q_vectors + NON_Q_VECTORS, or the number of
1923                  * vectors we were allocated.
1924                  */
1925                 adapter->num_msix_vectors = vectors;
1926         }
1927 }
1928
1929 /*
1930  * ixgbevf_set_num_queues: Allocate queues for device, feature dependent
1931  * @adapter: board private structure to initialize
1932  *
1933  * This is the top level queue allocation routine.  The order here is very
1934  * important, starting with the "most" number of features turned on at once,
1935  * and ending with the smallest set of features.  This way large combinations
1936  * can be allocated if they're turned on, and smaller combinations are the
1937  * fallthrough conditions.
1938  *
1939  **/
1940 static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter)
1941 {
1942         /* Start with base case */
1943         adapter->num_rx_queues = 1;
1944         adapter->num_tx_queues = 1;
1945         adapter->num_rx_pools = adapter->num_rx_queues;
1946         adapter->num_rx_queues_per_pool = 1;
1947 }
1948
1949 /**
1950  * ixgbevf_alloc_queues - Allocate memory for all rings
1951  * @adapter: board private structure to initialize
1952  *
1953  * We allocate one ring per queue at run-time since we don't know the
1954  * number of queues at compile-time.  The polling_netdev array is
1955  * intended for Multiqueue, but should work fine with a single queue.
1956  **/
1957 static int ixgbevf_alloc_queues(struct ixgbevf_adapter *adapter)
1958 {
1959         int i;
1960
1961         adapter->tx_ring = kcalloc(adapter->num_tx_queues,
1962                                    sizeof(struct ixgbevf_ring), GFP_KERNEL);
1963         if (!adapter->tx_ring)
1964                 goto err_tx_ring_allocation;
1965
1966         adapter->rx_ring = kcalloc(adapter->num_rx_queues,
1967                                    sizeof(struct ixgbevf_ring), GFP_KERNEL);
1968         if (!adapter->rx_ring)
1969                 goto err_rx_ring_allocation;
1970
1971         for (i = 0; i < adapter->num_tx_queues; i++) {
1972                 adapter->tx_ring[i].count = adapter->tx_ring_count;
1973                 adapter->tx_ring[i].queue_index = i;
1974                 adapter->tx_ring[i].reg_idx = i;
1975         }
1976
1977         for (i = 0; i < adapter->num_rx_queues; i++) {
1978                 adapter->rx_ring[i].count = adapter->rx_ring_count;
1979                 adapter->rx_ring[i].queue_index = i;
1980                 adapter->rx_ring[i].reg_idx = i;
1981         }
1982
1983         return 0;
1984
1985 err_rx_ring_allocation:
1986         kfree(adapter->tx_ring);
1987 err_tx_ring_allocation:
1988         return -ENOMEM;
1989 }
1990
1991 /**
1992  * ixgbevf_set_interrupt_capability - set MSI-X or FAIL if not supported
1993  * @adapter: board private structure to initialize
1994  *
1995  * Attempt to configure the interrupts using the best available
1996  * capabilities of the hardware and the kernel.
1997  **/
1998 static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter)
1999 {
2000         int err = 0;
2001         int vector, v_budget;
2002
2003         /*
2004          * It's easy to be greedy for MSI-X vectors, but it really
2005          * doesn't do us much good if we have a lot more vectors
2006          * than CPU's.  So let's be conservative and only ask for
2007          * (roughly) twice the number of vectors as there are CPU's.
2008          */
2009         v_budget = min(adapter->num_rx_queues + adapter->num_tx_queues,
2010                        (int)(num_online_cpus() * 2)) + NON_Q_VECTORS;
2011
2012         /* A failure in MSI-X entry allocation isn't fatal, but it does
2013          * mean we disable MSI-X capabilities of the adapter. */
2014         adapter->msix_entries = kcalloc(v_budget,
2015                                         sizeof(struct msix_entry), GFP_KERNEL);
2016         if (!adapter->msix_entries) {
2017                 err = -ENOMEM;
2018                 goto out;
2019         }
2020
2021         for (vector = 0; vector < v_budget; vector++)
2022                 adapter->msix_entries[vector].entry = vector;
2023
2024         ixgbevf_acquire_msix_vectors(adapter, v_budget);
2025
2026 out:
2027         return err;
2028 }
2029
2030 /**
2031  * ixgbevf_alloc_q_vectors - Allocate memory for interrupt vectors
2032  * @adapter: board private structure to initialize
2033  *
2034  * We allocate one q_vector per queue interrupt.  If allocation fails we
2035  * return -ENOMEM.
2036  **/
2037 static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter *adapter)
2038 {
2039         int q_idx, num_q_vectors;
2040         struct ixgbevf_q_vector *q_vector;
2041         int napi_vectors;
2042         int (*poll)(struct napi_struct *, int);
2043
2044         num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2045         napi_vectors = adapter->num_rx_queues;
2046         poll = &ixgbevf_clean_rxonly;
2047
2048         for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
2049                 q_vector = kzalloc(sizeof(struct ixgbevf_q_vector), GFP_KERNEL);
2050                 if (!q_vector)
2051                         goto err_out;
2052                 q_vector->adapter = adapter;
2053                 q_vector->v_idx = q_idx;
2054                 q_vector->eitr = adapter->eitr_param;
2055                 if (q_idx < napi_vectors)
2056                         netif_napi_add(adapter->netdev, &q_vector->napi,
2057                                        (*poll), 64);
2058                 adapter->q_vector[q_idx] = q_vector;
2059         }
2060
2061         return 0;
2062
2063 err_out:
2064         while (q_idx) {
2065                 q_idx--;
2066                 q_vector = adapter->q_vector[q_idx];
2067                 netif_napi_del(&q_vector->napi);
2068                 kfree(q_vector);
2069                 adapter->q_vector[q_idx] = NULL;
2070         }
2071         return -ENOMEM;
2072 }
2073
2074 /**
2075  * ixgbevf_free_q_vectors - Free memory allocated for interrupt vectors
2076  * @adapter: board private structure to initialize
2077  *
2078  * This function frees the memory allocated to the q_vectors.  In addition if
2079  * NAPI is enabled it will delete any references to the NAPI struct prior
2080  * to freeing the q_vector.
2081  **/
2082 static void ixgbevf_free_q_vectors(struct ixgbevf_adapter *adapter)
2083 {
2084         int q_idx, num_q_vectors;
2085         int napi_vectors;
2086
2087         num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2088         napi_vectors = adapter->num_rx_queues;
2089
2090         for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
2091                 struct ixgbevf_q_vector *q_vector = adapter->q_vector[q_idx];
2092
2093                 adapter->q_vector[q_idx] = NULL;
2094                 if (q_idx < napi_vectors)
2095                         netif_napi_del(&q_vector->napi);
2096                 kfree(q_vector);
2097         }
2098 }
2099
2100 /**
2101  * ixgbevf_reset_interrupt_capability - Reset MSIX setup
2102  * @adapter: board private structure
2103  *
2104  **/
2105 static void ixgbevf_reset_interrupt_capability(struct ixgbevf_adapter *adapter)
2106 {
2107         pci_disable_msix(adapter->pdev);
2108         kfree(adapter->msix_entries);
2109         adapter->msix_entries = NULL;
2110 }
2111
2112 /**
2113  * ixgbevf_init_interrupt_scheme - Determine if MSIX is supported and init
2114  * @adapter: board private structure to initialize
2115  *
2116  **/
2117 static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter *adapter)
2118 {
2119         int err;
2120
2121         /* Number of supported queues */
2122         ixgbevf_set_num_queues(adapter);
2123
2124         err = ixgbevf_set_interrupt_capability(adapter);
2125         if (err) {
2126                 hw_dbg(&adapter->hw,
2127                        "Unable to setup interrupt capabilities\n");
2128                 goto err_set_interrupt;
2129         }
2130
2131         err = ixgbevf_alloc_q_vectors(adapter);
2132         if (err) {
2133                 hw_dbg(&adapter->hw, "Unable to allocate memory for queue "
2134                        "vectors\n");
2135                 goto err_alloc_q_vectors;
2136         }
2137
2138         err = ixgbevf_alloc_queues(adapter);
2139         if (err) {
2140                 printk(KERN_ERR "Unable to allocate memory for queues\n");
2141                 goto err_alloc_queues;
2142         }
2143
2144         hw_dbg(&adapter->hw, "Multiqueue %s: Rx Queue count = %u, "
2145                "Tx Queue count = %u\n",
2146                (adapter->num_rx_queues > 1) ? "Enabled" :
2147                "Disabled", adapter->num_rx_queues, adapter->num_tx_queues);
2148
2149         set_bit(__IXGBEVF_DOWN, &adapter->state);
2150
2151         return 0;
2152 err_alloc_queues:
2153         ixgbevf_free_q_vectors(adapter);
2154 err_alloc_q_vectors:
2155         ixgbevf_reset_interrupt_capability(adapter);
2156 err_set_interrupt:
2157         return err;
2158 }
2159
2160 /**
2161  * ixgbevf_sw_init - Initialize general software structures
2162  * (struct ixgbevf_adapter)
2163  * @adapter: board private structure to initialize
2164  *
2165  * ixgbevf_sw_init initializes the Adapter private data structure.
2166  * Fields are initialized based on PCI device information and
2167  * OS network device settings (MTU size).
2168  **/
2169 static int __devinit ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
2170 {
2171         struct ixgbe_hw *hw = &adapter->hw;
2172         struct pci_dev *pdev = adapter->pdev;
2173         int err;
2174
2175         /* PCI config space info */
2176
2177         hw->vendor_id = pdev->vendor;
2178         hw->device_id = pdev->device;
2179         hw->revision_id = pdev->revision;
2180         hw->subsystem_vendor_id = pdev->subsystem_vendor;
2181         hw->subsystem_device_id = pdev->subsystem_device;
2182
2183         hw->mbx.ops.init_params(hw);
2184         hw->mac.max_tx_queues = MAX_TX_QUEUES;
2185         hw->mac.max_rx_queues = MAX_RX_QUEUES;
2186         err = hw->mac.ops.reset_hw(hw);
2187         if (err) {
2188                 dev_info(&pdev->dev,
2189                          "PF still in reset state, assigning new address\n");
2190                 dev_hw_addr_random(adapter->netdev, hw->mac.addr);
2191         } else {
2192                 err = hw->mac.ops.init_hw(hw);
2193                 if (err) {
2194                         printk(KERN_ERR "init_shared_code failed: %d\n", err);
2195                         goto out;
2196                 }
2197         }
2198
2199         /* Enable dynamic interrupt throttling rates */
2200         adapter->eitr_param = 20000;
2201         adapter->itr_setting = 1;
2202
2203         /* set defaults for eitr in MegaBytes */
2204         adapter->eitr_low = 10;
2205         adapter->eitr_high = 20;
2206
2207         /* set default ring sizes */
2208         adapter->tx_ring_count = IXGBEVF_DEFAULT_TXD;
2209         adapter->rx_ring_count = IXGBEVF_DEFAULT_RXD;
2210
2211         /* enable rx csum by default */
2212         adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED;
2213
2214         set_bit(__IXGBEVF_DOWN, &adapter->state);
2215
2216 out:
2217         return err;
2218 }
2219
2220 #define UPDATE_VF_COUNTER_32bit(reg, last_counter, counter)     \
2221         {                                                       \
2222                 u32 current_counter = IXGBE_READ_REG(hw, reg);  \
2223                 if (current_counter < last_counter)             \
2224                         counter += 0x100000000LL;               \
2225                 last_counter = current_counter;                 \
2226                 counter &= 0xFFFFFFFF00000000LL;                \
2227                 counter |= current_counter;                     \
2228         }
2229
2230 #define UPDATE_VF_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
2231         {                                                                \
2232                 u64 current_counter_lsb = IXGBE_READ_REG(hw, reg_lsb);   \
2233                 u64 current_counter_msb = IXGBE_READ_REG(hw, reg_msb);   \
2234                 u64 current_counter = (current_counter_msb << 32) |      \
2235                         current_counter_lsb;                             \
2236                 if (current_counter < last_counter)                      \
2237                         counter += 0x1000000000LL;                       \
2238                 last_counter = current_counter;                          \
2239                 counter &= 0xFFFFFFF000000000LL;                         \
2240                 counter |= current_counter;                              \
2241         }
2242 /**
2243  * ixgbevf_update_stats - Update the board statistics counters.
2244  * @adapter: board private structure
2245  **/
2246 void ixgbevf_update_stats(struct ixgbevf_adapter *adapter)
2247 {
2248         struct ixgbe_hw *hw = &adapter->hw;
2249
2250         UPDATE_VF_COUNTER_32bit(IXGBE_VFGPRC, adapter->stats.last_vfgprc,
2251                                 adapter->stats.vfgprc);
2252         UPDATE_VF_COUNTER_32bit(IXGBE_VFGPTC, adapter->stats.last_vfgptc,
2253                                 adapter->stats.vfgptc);
2254         UPDATE_VF_COUNTER_36bit(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
2255                                 adapter->stats.last_vfgorc,
2256                                 adapter->stats.vfgorc);
2257         UPDATE_VF_COUNTER_36bit(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
2258                                 adapter->stats.last_vfgotc,
2259                                 adapter->stats.vfgotc);
2260         UPDATE_VF_COUNTER_32bit(IXGBE_VFMPRC, adapter->stats.last_vfmprc,
2261                                 adapter->stats.vfmprc);
2262 }
2263
2264 /**
2265  * ixgbevf_watchdog - Timer Call-back
2266  * @data: pointer to adapter cast into an unsigned long
2267  **/
2268 static void ixgbevf_watchdog(unsigned long data)
2269 {
2270         struct ixgbevf_adapter *adapter = (struct ixgbevf_adapter *)data;
2271         struct ixgbe_hw *hw = &adapter->hw;
2272         u64 eics = 0;
2273         int i;
2274
2275         /*
2276          * Do the watchdog outside of interrupt context due to the lovely
2277          * delays that some of the newer hardware requires
2278          */
2279
2280         if (test_bit(__IXGBEVF_DOWN, &adapter->state))
2281                 goto watchdog_short_circuit;
2282
2283         /* get one bit for every active tx/rx interrupt vector */
2284         for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) {
2285                 struct ixgbevf_q_vector *qv = adapter->q_vector[i];
2286                 if (qv->rxr_count || qv->txr_count)
2287                         eics |= (1 << i);
2288         }
2289
2290         IXGBE_WRITE_REG(hw, IXGBE_VTEICS, (u32)eics);
2291
2292 watchdog_short_circuit:
2293         schedule_work(&adapter->watchdog_task);
2294 }
2295
2296 /**
2297  * ixgbevf_tx_timeout - Respond to a Tx Hang
2298  * @netdev: network interface device structure
2299  **/
2300 static void ixgbevf_tx_timeout(struct net_device *netdev)
2301 {
2302         struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2303
2304         /* Do the reset outside of interrupt context */
2305         schedule_work(&adapter->reset_task);
2306 }
2307
2308 static void ixgbevf_reset_task(struct work_struct *work)
2309 {
2310         struct ixgbevf_adapter *adapter;
2311         adapter = container_of(work, struct ixgbevf_adapter, reset_task);
2312
2313         /* If we're already down or resetting, just bail */
2314         if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
2315             test_bit(__IXGBEVF_RESETTING, &adapter->state))
2316                 return;
2317
2318         adapter->tx_timeout_count++;
2319
2320         ixgbevf_reinit_locked(adapter);
2321 }
2322
2323 /**
2324  * ixgbevf_watchdog_task - worker thread to bring link up
2325  * @work: pointer to work_struct containing our data
2326  **/
2327 static void ixgbevf_watchdog_task(struct work_struct *work)
2328 {
2329         struct ixgbevf_adapter *adapter = container_of(work,
2330                                                        struct ixgbevf_adapter,
2331                                                        watchdog_task);
2332         struct net_device *netdev = adapter->netdev;
2333         struct ixgbe_hw *hw = &adapter->hw;
2334         u32 link_speed = adapter->link_speed;
2335         bool link_up = adapter->link_up;
2336
2337         adapter->flags |= IXGBE_FLAG_IN_WATCHDOG_TASK;
2338
2339         /*
2340          * Always check the link on the watchdog because we have
2341          * no LSC interrupt
2342          */
2343         if (hw->mac.ops.check_link) {
2344                 if ((hw->mac.ops.check_link(hw, &link_speed,
2345                                             &link_up, false)) != 0) {
2346                         adapter->link_up = link_up;
2347                         adapter->link_speed = link_speed;
2348                         netif_carrier_off(netdev);
2349                         netif_tx_stop_all_queues(netdev);
2350                         schedule_work(&adapter->reset_task);
2351                         goto pf_has_reset;
2352                 }
2353         } else {
2354                 /* always assume link is up, if no check link
2355                  * function */
2356                 link_speed = IXGBE_LINK_SPEED_10GB_FULL;
2357                 link_up = true;
2358         }
2359         adapter->link_up = link_up;
2360         adapter->link_speed = link_speed;
2361
2362         if (link_up) {
2363                 if (!netif_carrier_ok(netdev)) {
2364                         hw_dbg(&adapter->hw, "NIC Link is Up, %u Gbps\n",
2365                                (link_speed == IXGBE_LINK_SPEED_10GB_FULL) ?
2366                                10 : 1);
2367                         netif_carrier_on(netdev);
2368                         netif_tx_wake_all_queues(netdev);
2369                 }
2370         } else {
2371                 adapter->link_up = false;
2372                 adapter->link_speed = 0;
2373                 if (netif_carrier_ok(netdev)) {
2374                         hw_dbg(&adapter->hw, "NIC Link is Down\n");
2375                         netif_carrier_off(netdev);
2376                         netif_tx_stop_all_queues(netdev);
2377                 }
2378         }
2379
2380         ixgbevf_update_stats(adapter);
2381
2382 pf_has_reset:
2383         /* Reset the timer */
2384         if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
2385                 mod_timer(&adapter->watchdog_timer,
2386                           round_jiffies(jiffies + (2 * HZ)));
2387
2388         adapter->flags &= ~IXGBE_FLAG_IN_WATCHDOG_TASK;
2389 }
2390
2391 /**
2392  * ixgbevf_free_tx_resources - Free Tx Resources per Queue
2393  * @adapter: board private structure
2394  * @tx_ring: Tx descriptor ring for a specific queue
2395  *
2396  * Free all transmit software resources
2397  **/
2398 void ixgbevf_free_tx_resources(struct ixgbevf_adapter *adapter,
2399                                struct ixgbevf_ring *tx_ring)
2400 {
2401         struct pci_dev *pdev = adapter->pdev;
2402
2403         ixgbevf_clean_tx_ring(adapter, tx_ring);
2404
2405         vfree(tx_ring->tx_buffer_info);
2406         tx_ring->tx_buffer_info = NULL;
2407
2408         dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
2409                           tx_ring->dma);
2410
2411         tx_ring->desc = NULL;
2412 }
2413
2414 /**
2415  * ixgbevf_free_all_tx_resources - Free Tx Resources for All Queues
2416  * @adapter: board private structure
2417  *
2418  * Free all transmit software resources
2419  **/
2420 static void ixgbevf_free_all_tx_resources(struct ixgbevf_adapter *adapter)
2421 {
2422         int i;
2423
2424         for (i = 0; i < adapter->num_tx_queues; i++)
2425                 if (adapter->tx_ring[i].desc)
2426                         ixgbevf_free_tx_resources(adapter,
2427                                                   &adapter->tx_ring[i]);
2428
2429 }
2430
2431 /**
2432  * ixgbevf_setup_tx_resources - allocate Tx resources (Descriptors)
2433  * @adapter: board private structure
2434  * @tx_ring:    tx descriptor ring (for a specific queue) to setup
2435  *
2436  * Return 0 on success, negative on failure
2437  **/
2438 int ixgbevf_setup_tx_resources(struct ixgbevf_adapter *adapter,
2439                                struct ixgbevf_ring *tx_ring)
2440 {
2441         struct pci_dev *pdev = adapter->pdev;
2442         int size;
2443
2444         size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
2445         tx_ring->tx_buffer_info = vzalloc(size);
2446         if (!tx_ring->tx_buffer_info)
2447                 goto err;
2448
2449         /* round up to nearest 4K */
2450         tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
2451         tx_ring->size = ALIGN(tx_ring->size, 4096);
2452
2453         tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
2454                                            &tx_ring->dma, GFP_KERNEL);
2455         if (!tx_ring->desc)
2456                 goto err;
2457
2458         tx_ring->next_to_use = 0;
2459         tx_ring->next_to_clean = 0;
2460         tx_ring->work_limit = tx_ring->count;
2461         return 0;
2462
2463 err:
2464         vfree(tx_ring->tx_buffer_info);
2465         tx_ring->tx_buffer_info = NULL;
2466         hw_dbg(&adapter->hw, "Unable to allocate memory for the transmit "
2467                "descriptor ring\n");
2468         return -ENOMEM;
2469 }
2470
2471 /**
2472  * ixgbevf_setup_all_tx_resources - allocate all queues Tx resources
2473  * @adapter: board private structure
2474  *
2475  * If this function returns with an error, then it's possible one or
2476  * more of the rings is populated (while the rest are not).  It is the
2477  * callers duty to clean those orphaned rings.
2478  *
2479  * Return 0 on success, negative on failure
2480  **/
2481 static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter)
2482 {
2483         int i, err = 0;
2484
2485         for (i = 0; i < adapter->num_tx_queues; i++) {
2486                 err = ixgbevf_setup_tx_resources(adapter, &adapter->tx_ring[i]);
2487                 if (!err)
2488                         continue;
2489                 hw_dbg(&adapter->hw,
2490                        "Allocation for Tx Queue %u failed\n", i);
2491                 break;
2492         }
2493
2494         return err;
2495 }
2496
2497 /**
2498  * ixgbevf_setup_rx_resources - allocate Rx resources (Descriptors)
2499  * @adapter: board private structure
2500  * @rx_ring:    rx descriptor ring (for a specific queue) to setup
2501  *
2502  * Returns 0 on success, negative on failure
2503  **/
2504 int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *adapter,
2505                                struct ixgbevf_ring *rx_ring)
2506 {
2507         struct pci_dev *pdev = adapter->pdev;
2508         int size;
2509
2510         size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
2511         rx_ring->rx_buffer_info = vzalloc(size);
2512         if (!rx_ring->rx_buffer_info) {
2513                 hw_dbg(&adapter->hw,
2514                        "Unable to vmalloc buffer memory for "
2515                        "the receive descriptor ring\n");
2516                 goto alloc_failed;
2517         }
2518
2519         /* Round up to nearest 4K */
2520         rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
2521         rx_ring->size = ALIGN(rx_ring->size, 4096);
2522
2523         rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
2524                                            &rx_ring->dma, GFP_KERNEL);
2525
2526         if (!rx_ring->desc) {
2527                 hw_dbg(&adapter->hw,
2528                        "Unable to allocate memory for "
2529                        "the receive descriptor ring\n");
2530                 vfree(rx_ring->rx_buffer_info);
2531                 rx_ring->rx_buffer_info = NULL;
2532                 goto alloc_failed;
2533         }
2534
2535         rx_ring->next_to_clean = 0;
2536         rx_ring->next_to_use = 0;
2537
2538         return 0;
2539 alloc_failed:
2540         return -ENOMEM;
2541 }
2542
2543 /**
2544  * ixgbevf_setup_all_rx_resources - allocate all queues Rx resources
2545  * @adapter: board private structure
2546  *
2547  * If this function returns with an error, then it's possible one or
2548  * more of the rings is populated (while the rest are not).  It is the
2549  * callers duty to clean those orphaned rings.
2550  *
2551  * Return 0 on success, negative on failure
2552  **/
2553 static int ixgbevf_setup_all_rx_resources(struct ixgbevf_adapter *adapter)
2554 {
2555         int i, err = 0;
2556
2557         for (i = 0; i < adapter->num_rx_queues; i++) {
2558                 err = ixgbevf_setup_rx_resources(adapter, &adapter->rx_ring[i]);
2559                 if (!err)
2560                         continue;
2561                 hw_dbg(&adapter->hw,
2562                        "Allocation for Rx Queue %u failed\n", i);
2563                 break;
2564         }
2565         return err;
2566 }
2567
2568 /**
2569  * ixgbevf_free_rx_resources - Free Rx Resources
2570  * @adapter: board private structure
2571  * @rx_ring: ring to clean the resources from
2572  *
2573  * Free all receive software resources
2574  **/
2575 void ixgbevf_free_rx_resources(struct ixgbevf_adapter *adapter,
2576                                struct ixgbevf_ring *rx_ring)
2577 {
2578         struct pci_dev *pdev = adapter->pdev;
2579
2580         ixgbevf_clean_rx_ring(adapter, rx_ring);
2581
2582         vfree(rx_ring->rx_buffer_info);
2583         rx_ring->rx_buffer_info = NULL;
2584
2585         dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
2586                           rx_ring->dma);
2587
2588         rx_ring->desc = NULL;
2589 }
2590
2591 /**
2592  * ixgbevf_free_all_rx_resources - Free Rx Resources for All Queues
2593  * @adapter: board private structure
2594  *
2595  * Free all receive software resources
2596  **/
2597 static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter)
2598 {
2599         int i;
2600
2601         for (i = 0; i < adapter->num_rx_queues; i++)
2602                 if (adapter->rx_ring[i].desc)
2603                         ixgbevf_free_rx_resources(adapter,
2604                                                   &adapter->rx_ring[i]);
2605 }
2606
2607 /**
2608  * ixgbevf_open - Called when a network interface is made active
2609  * @netdev: network interface device structure
2610  *
2611  * Returns 0 on success, negative value on failure
2612  *
2613  * The open entry point is called when a network interface is made
2614  * active by the system (IFF_UP).  At this point all resources needed
2615  * for transmit and receive operations are allocated, the interrupt
2616  * handler is registered with the OS, the watchdog timer is started,
2617  * and the stack is notified that the interface is ready.
2618  **/
2619 static int ixgbevf_open(struct net_device *netdev)
2620 {
2621         struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2622         struct ixgbe_hw *hw = &adapter->hw;
2623         int err;
2624
2625         /* disallow open during test */
2626         if (test_bit(__IXGBEVF_TESTING, &adapter->state))
2627                 return -EBUSY;
2628
2629         if (hw->adapter_stopped) {
2630                 ixgbevf_reset(adapter);
2631                 /* if adapter is still stopped then PF isn't up and
2632                  * the vf can't start. */
2633                 if (hw->adapter_stopped) {
2634                         err = IXGBE_ERR_MBX;
2635                         printk(KERN_ERR "Unable to start - perhaps the PF"
2636                                " Driver isn't up yet\n");
2637                         goto err_setup_reset;
2638                 }
2639         }
2640
2641         /* allocate transmit descriptors */
2642         err = ixgbevf_setup_all_tx_resources(adapter);
2643         if (err)
2644                 goto err_setup_tx;
2645
2646         /* allocate receive descriptors */
2647         err = ixgbevf_setup_all_rx_resources(adapter);
2648         if (err)
2649                 goto err_setup_rx;
2650
2651         ixgbevf_configure(adapter);
2652
2653         /*
2654          * Map the Tx/Rx rings to the vectors we were allotted.
2655          * if request_irq will be called in this function map_rings
2656          * must be called *before* up_complete
2657          */
2658         ixgbevf_map_rings_to_vectors(adapter);
2659
2660         err = ixgbevf_up_complete(adapter);
2661         if (err)
2662                 goto err_up;
2663
2664         /* clear any pending interrupts, may auto mask */
2665         IXGBE_READ_REG(hw, IXGBE_VTEICR);
2666         err = ixgbevf_request_irq(adapter);
2667         if (err)
2668                 goto err_req_irq;
2669
2670         ixgbevf_irq_enable(adapter, true, true);
2671
2672         return 0;
2673
2674 err_req_irq:
2675         ixgbevf_down(adapter);
2676 err_up:
2677         ixgbevf_free_irq(adapter);
2678 err_setup_rx:
2679         ixgbevf_free_all_rx_resources(adapter);
2680 err_setup_tx:
2681         ixgbevf_free_all_tx_resources(adapter);
2682         ixgbevf_reset(adapter);
2683
2684 err_setup_reset:
2685
2686         return err;
2687 }
2688
2689 /**
2690  * ixgbevf_close - Disables a network interface
2691  * @netdev: network interface device structure
2692  *
2693  * Returns 0, this is not allowed to fail
2694  *
2695  * The close entry point is called when an interface is de-activated
2696  * by the OS.  The hardware is still under the drivers control, but
2697  * needs to be disabled.  A global MAC reset is issued to stop the
2698  * hardware, and all transmit and receive resources are freed.
2699  **/
2700 static int ixgbevf_close(struct net_device *netdev)
2701 {
2702         struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2703
2704         ixgbevf_down(adapter);
2705         ixgbevf_free_irq(adapter);
2706
2707         ixgbevf_free_all_tx_resources(adapter);
2708         ixgbevf_free_all_rx_resources(adapter);
2709
2710         return 0;
2711 }
2712
2713 static int ixgbevf_tso(struct ixgbevf_adapter *adapter,
2714                        struct ixgbevf_ring *tx_ring,
2715                        struct sk_buff *skb, u32 tx_flags, u8 *hdr_len)
2716 {
2717         struct ixgbe_adv_tx_context_desc *context_desc;
2718         unsigned int i;
2719         int err;
2720         struct ixgbevf_tx_buffer *tx_buffer_info;
2721         u32 vlan_macip_lens = 0, type_tucmd_mlhl;
2722         u32 mss_l4len_idx, l4len;
2723
2724         if (skb_is_gso(skb)) {
2725                 if (skb_header_cloned(skb)) {
2726                         err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2727                         if (err)
2728                                 return err;
2729                 }
2730                 l4len = tcp_hdrlen(skb);
2731                 *hdr_len += l4len;
2732
2733                 if (skb->protocol == htons(ETH_P_IP)) {
2734                         struct iphdr *iph = ip_hdr(skb);
2735                         iph->tot_len = 0;
2736                         iph->check = 0;
2737                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2738                                                                  iph->daddr, 0,
2739                                                                  IPPROTO_TCP,
2740                                                                  0);
2741                         adapter->hw_tso_ctxt++;
2742                 } else if (skb_is_gso_v6(skb)) {
2743                         ipv6_hdr(skb)->payload_len = 0;
2744                         tcp_hdr(skb)->check =
2745                             ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2746                                              &ipv6_hdr(skb)->daddr,
2747                                              0, IPPROTO_TCP, 0);
2748                         adapter->hw_tso6_ctxt++;
2749                 }
2750
2751                 i = tx_ring->next_to_use;
2752
2753                 tx_buffer_info = &tx_ring->tx_buffer_info[i];
2754                 context_desc = IXGBE_TX_CTXTDESC_ADV(*tx_ring, i);
2755
2756                 /* VLAN MACLEN IPLEN */
2757                 if (tx_flags & IXGBE_TX_FLAGS_VLAN)
2758                         vlan_macip_lens |=
2759                                 (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK);
2760                 vlan_macip_lens |= ((skb_network_offset(skb)) <<
2761                                     IXGBE_ADVTXD_MACLEN_SHIFT);
2762                 *hdr_len += skb_network_offset(skb);
2763                 vlan_macip_lens |=
2764                         (skb_transport_header(skb) - skb_network_header(skb));
2765                 *hdr_len +=
2766                         (skb_transport_header(skb) - skb_network_header(skb));
2767                 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
2768                 context_desc->seqnum_seed = 0;
2769
2770                 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
2771                 type_tucmd_mlhl = (IXGBE_TXD_CMD_DEXT |
2772                                     IXGBE_ADVTXD_DTYP_CTXT);
2773
2774                 if (skb->protocol == htons(ETH_P_IP))
2775                         type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
2776                 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
2777                 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
2778
2779                 /* MSS L4LEN IDX */
2780                 mss_l4len_idx =
2781                         (skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT);
2782                 mss_l4len_idx |= (l4len << IXGBE_ADVTXD_L4LEN_SHIFT);
2783                 /* use index 1 for TSO */
2784                 mss_l4len_idx |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
2785                 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
2786
2787                 tx_buffer_info->time_stamp = jiffies;
2788                 tx_buffer_info->next_to_watch = i;
2789
2790                 i++;
2791                 if (i == tx_ring->count)
2792                         i = 0;
2793                 tx_ring->next_to_use = i;
2794
2795                 return true;
2796         }
2797
2798         return false;
2799 }
2800
2801 static bool ixgbevf_tx_csum(struct ixgbevf_adapter *adapter,
2802                             struct ixgbevf_ring *tx_ring,
2803                             struct sk_buff *skb, u32 tx_flags)
2804 {
2805         struct ixgbe_adv_tx_context_desc *context_desc;
2806         unsigned int i;
2807         struct ixgbevf_tx_buffer *tx_buffer_info;
2808         u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
2809
2810         if (skb->ip_summed == CHECKSUM_PARTIAL ||
2811             (tx_flags & IXGBE_TX_FLAGS_VLAN)) {
2812                 i = tx_ring->next_to_use;
2813                 tx_buffer_info = &tx_ring->tx_buffer_info[i];
2814                 context_desc = IXGBE_TX_CTXTDESC_ADV(*tx_ring, i);
2815
2816                 if (tx_flags & IXGBE_TX_FLAGS_VLAN)
2817                         vlan_macip_lens |= (tx_flags &
2818                                             IXGBE_TX_FLAGS_VLAN_MASK);
2819                 vlan_macip_lens |= (skb_network_offset(skb) <<
2820                                     IXGBE_ADVTXD_MACLEN_SHIFT);
2821                 if (skb->ip_summed == CHECKSUM_PARTIAL)
2822                         vlan_macip_lens |= (skb_transport_header(skb) -
2823                                             skb_network_header(skb));
2824
2825                 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
2826                 context_desc->seqnum_seed = 0;
2827
2828                 type_tucmd_mlhl |= (IXGBE_TXD_CMD_DEXT |
2829                                     IXGBE_ADVTXD_DTYP_CTXT);
2830
2831                 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2832                         switch (skb->protocol) {
2833                         case __constant_htons(ETH_P_IP):
2834                                 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
2835                                 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2836                                         type_tucmd_mlhl |=
2837                                             IXGBE_ADVTXD_TUCMD_L4T_TCP;
2838                                 break;
2839                         case __constant_htons(ETH_P_IPV6):
2840                                 /* XXX what about other V6 headers?? */
2841                                 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
2842                                         type_tucmd_mlhl |=
2843                                                 IXGBE_ADVTXD_TUCMD_L4T_TCP;
2844                                 break;
2845                         default:
2846                                 if (unlikely(net_ratelimit())) {
2847                                         printk(KERN_WARNING
2848                                                "partial checksum but "
2849                                                "proto=%x!\n",
2850                                                skb->protocol);
2851                                 }
2852                                 break;
2853                         }
2854                 }
2855
2856                 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
2857                 /* use index zero for tx checksum offload */
2858                 context_desc->mss_l4len_idx = 0;
2859
2860                 tx_buffer_info->time_stamp = jiffies;
2861                 tx_buffer_info->next_to_watch = i;
2862
2863                 adapter->hw_csum_tx_good++;
2864                 i++;
2865                 if (i == tx_ring->count)
2866                         i = 0;
2867                 tx_ring->next_to_use = i;
2868
2869                 return true;
2870         }
2871
2872         return false;
2873 }
2874
2875 static int ixgbevf_tx_map(struct ixgbevf_adapter *adapter,
2876                           struct ixgbevf_ring *tx_ring,
2877                           struct sk_buff *skb, u32 tx_flags,
2878                           unsigned int first)
2879 {
2880         struct pci_dev *pdev = adapter->pdev;
2881         struct ixgbevf_tx_buffer *tx_buffer_info;
2882         unsigned int len;
2883         unsigned int total = skb->len;
2884         unsigned int offset = 0, size;
2885         int count = 0;
2886         unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
2887         unsigned int f;
2888         int i;
2889
2890         i = tx_ring->next_to_use;
2891
2892         len = min(skb_headlen(skb), total);
2893         while (len) {
2894                 tx_buffer_info = &tx_ring->tx_buffer_info[i];
2895                 size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD);
2896
2897                 tx_buffer_info->length = size;
2898                 tx_buffer_info->mapped_as_page = false;
2899                 tx_buffer_info->dma = dma_map_single(&adapter->pdev->dev,
2900                                                      skb->data + offset,
2901                                                      size, DMA_TO_DEVICE);
2902                 if (dma_mapping_error(&pdev->dev, tx_buffer_info->dma))
2903                         goto dma_error;
2904                 tx_buffer_info->time_stamp = jiffies;
2905                 tx_buffer_info->next_to_watch = i;
2906
2907                 len -= size;
2908                 total -= size;
2909                 offset += size;
2910                 count++;
2911                 i++;
2912                 if (i == tx_ring->count)
2913                         i = 0;
2914         }
2915
2916         for (f = 0; f < nr_frags; f++) {
2917                 struct skb_frag_struct *frag;
2918
2919                 frag = &skb_shinfo(skb)->frags[f];
2920                 len = min((unsigned int)frag->size, total);
2921                 offset = frag->page_offset;
2922
2923                 while (len) {
2924                         tx_buffer_info = &tx_ring->tx_buffer_info[i];
2925                         size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD);
2926
2927                         tx_buffer_info->length = size;
2928                         tx_buffer_info->dma = dma_map_page(&adapter->pdev->dev,
2929                                                            frag->page,
2930                                                            offset,
2931                                                            size,
2932                                                            DMA_TO_DEVICE);
2933                         tx_buffer_info->mapped_as_page = true;
2934                         if (dma_mapping_error(&pdev->dev, tx_buffer_info->dma))
2935                                 goto dma_error;
2936                         tx_buffer_info->time_stamp = jiffies;
2937                         tx_buffer_info->next_to_watch = i;
2938
2939                         len -= size;
2940                         total -= size;
2941                         offset += size;
2942                         count++;
2943                         i++;
2944                         if (i == tx_ring->count)
2945                                 i = 0;
2946                 }
2947                 if (total == 0)
2948                         break;
2949         }
2950
2951         if (i == 0)
2952                 i = tx_ring->count - 1;
2953         else
2954                 i = i - 1;
2955         tx_ring->tx_buffer_info[i].skb = skb;
2956         tx_ring->tx_buffer_info[first].next_to_watch = i;
2957
2958         return count;
2959
2960 dma_error:
2961         dev_err(&pdev->dev, "TX DMA map failed\n");
2962
2963         /* clear timestamp and dma mappings for failed tx_buffer_info map */
2964         tx_buffer_info->dma = 0;
2965         tx_buffer_info->time_stamp = 0;
2966         tx_buffer_info->next_to_watch = 0;
2967         count--;
2968
2969         /* clear timestamp and dma mappings for remaining portion of packet */
2970         while (count >= 0) {
2971                 count--;
2972                 i--;
2973                 if (i < 0)
2974                         i += tx_ring->count;
2975                 tx_buffer_info = &tx_ring->tx_buffer_info[i];
2976                 ixgbevf_unmap_and_free_tx_resource(adapter, tx_buffer_info);
2977         }
2978
2979         return count;
2980 }
2981
2982 static void ixgbevf_tx_queue(struct ixgbevf_adapter *adapter,
2983                              struct ixgbevf_ring *tx_ring, int tx_flags,
2984                              int count, u32 paylen, u8 hdr_len)
2985 {
2986         union ixgbe_adv_tx_desc *tx_desc = NULL;
2987         struct ixgbevf_tx_buffer *tx_buffer_info;
2988         u32 olinfo_status = 0, cmd_type_len = 0;
2989         unsigned int i;
2990
2991         u32 txd_cmd = IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS | IXGBE_TXD_CMD_IFCS;
2992
2993         cmd_type_len |= IXGBE_ADVTXD_DTYP_DATA;
2994
2995         cmd_type_len |= IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT;
2996
2997         if (tx_flags & IXGBE_TX_FLAGS_VLAN)
2998                 cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
2999
3000         if (tx_flags & IXGBE_TX_FLAGS_TSO) {
3001                 cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
3002
3003                 olinfo_status |= IXGBE_TXD_POPTS_TXSM <<
3004                         IXGBE_ADVTXD_POPTS_SHIFT;
3005
3006                 /* use index 1 context for tso */
3007                 olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
3008                 if (tx_flags & IXGBE_TX_FLAGS_IPV4)
3009                         olinfo_status |= IXGBE_TXD_POPTS_IXSM <<
3010                                 IXGBE_ADVTXD_POPTS_SHIFT;
3011
3012         } else if (tx_flags & IXGBE_TX_FLAGS_CSUM)
3013                 olinfo_status |= IXGBE_TXD_POPTS_TXSM <<
3014                         IXGBE_ADVTXD_POPTS_SHIFT;
3015
3016         olinfo_status |= ((paylen - hdr_len) << IXGBE_ADVTXD_PAYLEN_SHIFT);
3017
3018         i = tx_ring->next_to_use;
3019         while (count--) {
3020                 tx_buffer_info = &tx_ring->tx_buffer_info[i];
3021                 tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i);
3022                 tx_desc->read.buffer_addr = cpu_to_le64(tx_buffer_info->dma);
3023                 tx_desc->read.cmd_type_len =
3024                         cpu_to_le32(cmd_type_len | tx_buffer_info->length);
3025                 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
3026                 i++;
3027                 if (i == tx_ring->count)
3028                         i = 0;
3029         }
3030
3031         tx_desc->read.cmd_type_len |= cpu_to_le32(txd_cmd);
3032
3033         /*
3034          * Force memory writes to complete before letting h/w
3035          * know there are new descriptors to fetch.  (Only
3036          * applicable for weak-ordered memory model archs,
3037          * such as IA-64).
3038          */
3039         wmb();
3040
3041         tx_ring->next_to_use = i;
3042         writel(i, adapter->hw.hw_addr + tx_ring->tail);
3043 }
3044
3045 static int __ixgbevf_maybe_stop_tx(struct net_device *netdev,
3046                                    struct ixgbevf_ring *tx_ring, int size)
3047 {
3048         struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3049
3050         netif_stop_subqueue(netdev, tx_ring->queue_index);
3051         /* Herbert's original patch had:
3052          *  smp_mb__after_netif_stop_queue();
3053          * but since that doesn't exist yet, just open code it. */
3054         smp_mb();
3055
3056         /* We need to check again in a case another CPU has just
3057          * made room available. */
3058         if (likely(IXGBE_DESC_UNUSED(tx_ring) < size))
3059                 return -EBUSY;
3060
3061         /* A reprieve! - use start_queue because it doesn't call schedule */
3062         netif_start_subqueue(netdev, tx_ring->queue_index);
3063         ++adapter->restart_queue;
3064         return 0;
3065 }
3066
3067 static int ixgbevf_maybe_stop_tx(struct net_device *netdev,
3068                                  struct ixgbevf_ring *tx_ring, int size)
3069 {
3070         if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size))
3071                 return 0;
3072         return __ixgbevf_maybe_stop_tx(netdev, tx_ring, size);
3073 }
3074
3075 static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
3076 {
3077         struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3078         struct ixgbevf_ring *tx_ring;
3079         unsigned int first;
3080         unsigned int tx_flags = 0;
3081         u8 hdr_len = 0;
3082         int r_idx = 0, tso;
3083         int count = 0;
3084
3085         unsigned int f;
3086
3087         tx_ring = &adapter->tx_ring[r_idx];
3088
3089         if (vlan_tx_tag_present(skb)) {
3090                 tx_flags |= vlan_tx_tag_get(skb);
3091                 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
3092                 tx_flags |= IXGBE_TX_FLAGS_VLAN;
3093         }
3094
3095         /* four things can cause us to need a context descriptor */
3096         if (skb_is_gso(skb) ||
3097             (skb->ip_summed == CHECKSUM_PARTIAL) ||
3098             (tx_flags & IXGBE_TX_FLAGS_VLAN))
3099                 count++;
3100
3101         count += TXD_USE_COUNT(skb_headlen(skb));
3102         for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
3103                 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
3104
3105         if (ixgbevf_maybe_stop_tx(netdev, tx_ring, count)) {
3106                 adapter->tx_busy++;
3107                 return NETDEV_TX_BUSY;
3108         }
3109
3110         first = tx_ring->next_to_use;
3111
3112         if (skb->protocol == htons(ETH_P_IP))
3113                 tx_flags |= IXGBE_TX_FLAGS_IPV4;
3114         tso = ixgbevf_tso(adapter, tx_ring, skb, tx_flags, &hdr_len);
3115         if (tso < 0) {
3116                 dev_kfree_skb_any(skb);
3117                 return NETDEV_TX_OK;
3118         }
3119
3120         if (tso)
3121                 tx_flags |= IXGBE_TX_FLAGS_TSO;
3122         else if (ixgbevf_tx_csum(adapter, tx_ring, skb, tx_flags) &&
3123                  (skb->ip_summed == CHECKSUM_PARTIAL))
3124                 tx_flags |= IXGBE_TX_FLAGS_CSUM;
3125
3126         ixgbevf_tx_queue(adapter, tx_ring, tx_flags,
3127                          ixgbevf_tx_map(adapter, tx_ring, skb, tx_flags, first),
3128                          skb->len, hdr_len);
3129
3130         ixgbevf_maybe_stop_tx(netdev, tx_ring, DESC_NEEDED);
3131
3132         return NETDEV_TX_OK;
3133 }
3134
3135 /**
3136  * ixgbevf_set_mac - Change the Ethernet Address of the NIC
3137  * @netdev: network interface device structure
3138  * @p: pointer to an address structure
3139  *
3140  * Returns 0 on success, negative on failure
3141  **/
3142 static int ixgbevf_set_mac(struct net_device *netdev, void *p)
3143 {
3144         struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3145         struct ixgbe_hw *hw = &adapter->hw;
3146         struct sockaddr *addr = p;
3147
3148         if (!is_valid_ether_addr(addr->sa_data))
3149                 return -EADDRNOTAVAIL;
3150
3151         memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
3152         memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
3153
3154         if (hw->mac.ops.set_rar)
3155                 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
3156
3157         return 0;
3158 }
3159
3160 /**
3161  * ixgbevf_change_mtu - Change the Maximum Transfer Unit
3162  * @netdev: network interface device structure
3163  * @new_mtu: new value for maximum frame size
3164  *
3165  * Returns 0 on success, negative on failure
3166  **/
3167 static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
3168 {
3169         struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3170         struct ixgbe_hw *hw = &adapter->hw;
3171         int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
3172         int max_possible_frame = MAXIMUM_ETHERNET_VLAN_SIZE;
3173         u32 msg[2];
3174
3175         if (adapter->hw.mac.type == ixgbe_mac_X540_vf)
3176                 max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE;
3177
3178         /* MTU < 68 is an error and causes problems on some kernels */
3179         if ((new_mtu < 68) || (max_frame > max_possible_frame))
3180                 return -EINVAL;
3181
3182         hw_dbg(&adapter->hw, "changing MTU from %d to %d\n",
3183                netdev->mtu, new_mtu);
3184         /* must set new MTU before calling down or up */
3185         netdev->mtu = new_mtu;
3186
3187         msg[0] = IXGBE_VF_SET_LPE;
3188         msg[1] = max_frame;
3189         hw->mbx.ops.write_posted(hw, msg, 2);
3190
3191         if (netif_running(netdev))
3192                 ixgbevf_reinit_locked(adapter);
3193
3194         return 0;
3195 }
3196
3197 static void ixgbevf_shutdown(struct pci_dev *pdev)
3198 {
3199         struct net_device *netdev = pci_get_drvdata(pdev);
3200         struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3201
3202         netif_device_detach(netdev);
3203
3204         if (netif_running(netdev)) {
3205                 ixgbevf_down(adapter);
3206                 ixgbevf_free_irq(adapter);
3207                 ixgbevf_free_all_tx_resources(adapter);
3208                 ixgbevf_free_all_rx_resources(adapter);
3209         }
3210
3211 #ifdef CONFIG_PM
3212         pci_save_state(pdev);
3213 #endif
3214
3215         pci_disable_device(pdev);
3216 }
3217
3218 static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev,
3219                                                 struct rtnl_link_stats64 *stats)
3220 {
3221         struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3222         unsigned int start;
3223         u64 bytes, packets;
3224         const struct ixgbevf_ring *ring;
3225         int i;
3226
3227         ixgbevf_update_stats(adapter);
3228
3229         stats->multicast = adapter->stats.vfmprc - adapter->stats.base_vfmprc;
3230
3231         for (i = 0; i < adapter->num_rx_queues; i++) {
3232                 ring = &adapter->rx_ring[i];
3233                 do {
3234                         start = u64_stats_fetch_begin_bh(&ring->syncp);
3235                         bytes = ring->total_bytes;
3236                         packets = ring->total_packets;
3237                 } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
3238                 stats->rx_bytes += bytes;
3239                 stats->rx_packets += packets;
3240         }
3241
3242         for (i = 0; i < adapter->num_tx_queues; i++) {
3243                 ring = &adapter->tx_ring[i];
3244                 do {
3245                         start = u64_stats_fetch_begin_bh(&ring->syncp);
3246                         bytes = ring->total_bytes;
3247                         packets = ring->total_packets;
3248                 } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
3249                 stats->tx_bytes += bytes;
3250                 stats->tx_packets += packets;
3251         }
3252
3253         return stats;
3254 }
3255
3256 static int ixgbevf_set_features(struct net_device *netdev, u32 features)
3257 {
3258         struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3259
3260         if (features & NETIF_F_RXCSUM)
3261                 adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED;
3262         else
3263                 adapter->flags &= ~IXGBE_FLAG_RX_CSUM_ENABLED;
3264
3265         return 0;
3266 }
3267
3268 static const struct net_device_ops ixgbe_netdev_ops = {
3269         .ndo_open               = ixgbevf_open,
3270         .ndo_stop               = ixgbevf_close,
3271         .ndo_start_xmit         = ixgbevf_xmit_frame,
3272         .ndo_set_rx_mode        = ixgbevf_set_rx_mode,
3273         .ndo_get_stats64        = ixgbevf_get_stats,
3274         .ndo_validate_addr      = eth_validate_addr,
3275         .ndo_set_mac_address    = ixgbevf_set_mac,
3276         .ndo_change_mtu         = ixgbevf_change_mtu,
3277         .ndo_tx_timeout         = ixgbevf_tx_timeout,
3278         .ndo_vlan_rx_add_vid    = ixgbevf_vlan_rx_add_vid,
3279         .ndo_vlan_rx_kill_vid   = ixgbevf_vlan_rx_kill_vid,
3280         .ndo_set_features       = ixgbevf_set_features,
3281 };
3282
3283 static void ixgbevf_assign_netdev_ops(struct net_device *dev)
3284 {
3285         dev->netdev_ops = &ixgbe_netdev_ops;
3286         ixgbevf_set_ethtool_ops(dev);
3287         dev->watchdog_timeo = 5 * HZ;
3288 }
3289
3290 /**
3291  * ixgbevf_probe - Device Initialization Routine
3292  * @pdev: PCI device information struct
3293  * @ent: entry in ixgbevf_pci_tbl
3294  *
3295  * Returns 0 on success, negative on failure
3296  *
3297  * ixgbevf_probe initializes an adapter identified by a pci_dev structure.
3298  * The OS initialization, configuring of the adapter private structure,
3299  * and a hardware reset occur.
3300  **/
3301 static int __devinit ixgbevf_probe(struct pci_dev *pdev,
3302                                    const struct pci_device_id *ent)
3303 {
3304         struct net_device *netdev;
3305         struct ixgbevf_adapter *adapter = NULL;
3306         struct ixgbe_hw *hw = NULL;
3307         const struct ixgbevf_info *ii = ixgbevf_info_tbl[ent->driver_data];
3308         static int cards_found;
3309         int err, pci_using_dac;
3310
3311         err = pci_enable_device(pdev);
3312         if (err)
3313                 return err;
3314
3315         if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
3316             !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
3317                 pci_using_dac = 1;
3318         } else {
3319                 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
3320                 if (err) {
3321                         err = dma_set_coherent_mask(&pdev->dev,
3322                                                     DMA_BIT_MASK(32));
3323                         if (err) {
3324                                 dev_err(&pdev->dev, "No usable DMA "
3325                                         "configuration, aborting\n");
3326                                 goto err_dma;
3327                         }
3328                 }
3329                 pci_using_dac = 0;
3330         }
3331
3332         err = pci_request_regions(pdev, ixgbevf_driver_name);
3333         if (err) {
3334                 dev_err(&pdev->dev, "pci_request_regions failed 0x%x\n", err);
3335                 goto err_pci_reg;
3336         }
3337
3338         pci_set_master(pdev);
3339
3340 #ifdef HAVE_TX_MQ
3341         netdev = alloc_etherdev_mq(sizeof(struct ixgbevf_adapter),
3342                                    MAX_TX_QUEUES);
3343 #else
3344         netdev = alloc_etherdev(sizeof(struct ixgbevf_adapter));
3345 #endif
3346         if (!netdev) {
3347                 err = -ENOMEM;
3348                 goto err_alloc_etherdev;
3349         }
3350
3351         SET_NETDEV_DEV(netdev, &pdev->dev);
3352
3353         pci_set_drvdata(pdev, netdev);
3354         adapter = netdev_priv(netdev);
3355
3356         adapter->netdev = netdev;
3357         adapter->pdev = pdev;
3358         hw = &adapter->hw;
3359         hw->back = adapter;
3360         adapter->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
3361
3362         /*
3363          * call save state here in standalone driver because it relies on
3364          * adapter struct to exist, and needs to call netdev_priv
3365          */
3366         pci_save_state(pdev);
3367
3368         hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
3369                               pci_resource_len(pdev, 0));
3370         if (!hw->hw_addr) {
3371                 err = -EIO;
3372                 goto err_ioremap;
3373         }
3374
3375         ixgbevf_assign_netdev_ops(netdev);
3376
3377         adapter->bd_number = cards_found;
3378
3379         /* Setup hw api */
3380         memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
3381         hw->mac.type  = ii->mac;
3382
3383         memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops,
3384                sizeof(struct ixgbe_mbx_operations));
3385
3386         adapter->flags &= ~IXGBE_FLAG_RX_PS_CAPABLE;
3387         adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED;
3388         adapter->flags |= IXGBE_FLAG_RX_1BUF_CAPABLE;
3389
3390         /* setup the private structure */
3391         err = ixgbevf_sw_init(adapter);
3392
3393         netdev->hw_features = NETIF_F_SG |
3394                            NETIF_F_IP_CSUM |
3395                            NETIF_F_IPV6_CSUM |
3396                            NETIF_F_TSO |
3397                            NETIF_F_TSO6 |
3398                            NETIF_F_RXCSUM;
3399
3400         netdev->features = netdev->hw_features |
3401                            NETIF_F_HW_VLAN_TX |
3402                            NETIF_F_HW_VLAN_RX |
3403                            NETIF_F_HW_VLAN_FILTER;
3404
3405         netdev->vlan_features |= NETIF_F_TSO;
3406         netdev->vlan_features |= NETIF_F_TSO6;
3407         netdev->vlan_features |= NETIF_F_IP_CSUM;
3408         netdev->vlan_features |= NETIF_F_IPV6_CSUM;
3409         netdev->vlan_features |= NETIF_F_SG;
3410
3411         if (pci_using_dac)
3412                 netdev->features |= NETIF_F_HIGHDMA;
3413
3414         netdev->priv_flags |= IFF_UNICAST_FLT;
3415
3416         /* The HW MAC address was set and/or determined in sw_init */
3417         memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len);
3418         memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len);
3419
3420         if (!is_valid_ether_addr(netdev->dev_addr)) {
3421                 printk(KERN_ERR "invalid MAC address\n");
3422                 err = -EIO;
3423                 goto err_sw_init;
3424         }
3425
3426         init_timer(&adapter->watchdog_timer);
3427         adapter->watchdog_timer.function = ixgbevf_watchdog;
3428         adapter->watchdog_timer.data = (unsigned long)adapter;
3429
3430         INIT_WORK(&adapter->reset_task, ixgbevf_reset_task);
3431         INIT_WORK(&adapter->watchdog_task, ixgbevf_watchdog_task);
3432
3433         err = ixgbevf_init_interrupt_scheme(adapter);
3434         if (err)
3435                 goto err_sw_init;
3436
3437         /* pick up the PCI bus settings for reporting later */
3438         if (hw->mac.ops.get_bus_info)
3439                 hw->mac.ops.get_bus_info(hw);
3440
3441         strcpy(netdev->name, "eth%d");
3442
3443         err = register_netdev(netdev);
3444         if (err)
3445                 goto err_register;
3446
3447         adapter->netdev_registered = true;
3448
3449         netif_carrier_off(netdev);
3450
3451         ixgbevf_init_last_counter_stats(adapter);
3452
3453         /* print the MAC address */
3454         hw_dbg(hw, "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x\n",
3455                netdev->dev_addr[0],
3456                netdev->dev_addr[1],
3457                netdev->dev_addr[2],
3458                netdev->dev_addr[3],
3459                netdev->dev_addr[4],
3460                netdev->dev_addr[5]);
3461
3462         hw_dbg(hw, "MAC: %d\n", hw->mac.type);
3463
3464         hw_dbg(hw, "LRO is disabled\n");
3465
3466         hw_dbg(hw, "Intel(R) 82599 Virtual Function\n");
3467         cards_found++;
3468         return 0;
3469
3470 err_register:
3471 err_sw_init:
3472         ixgbevf_reset_interrupt_capability(adapter);
3473         iounmap(hw->hw_addr);
3474 err_ioremap:
3475         free_netdev(netdev);
3476 err_alloc_etherdev:
3477         pci_release_regions(pdev);
3478 err_pci_reg:
3479 err_dma:
3480         pci_disable_device(pdev);
3481         return err;
3482 }
3483
3484 /**
3485  * ixgbevf_remove - Device Removal Routine
3486  * @pdev: PCI device information struct
3487  *
3488  * ixgbevf_remove is called by the PCI subsystem to alert the driver
3489  * that it should release a PCI device.  The could be caused by a
3490  * Hot-Plug event, or because the driver is going to be removed from
3491  * memory.
3492  **/
3493 static void __devexit ixgbevf_remove(struct pci_dev *pdev)
3494 {
3495         struct net_device *netdev = pci_get_drvdata(pdev);
3496         struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3497
3498         set_bit(__IXGBEVF_DOWN, &adapter->state);
3499
3500         del_timer_sync(&adapter->watchdog_timer);
3501
3502         cancel_work_sync(&adapter->reset_task);
3503         cancel_work_sync(&adapter->watchdog_task);
3504
3505         if (adapter->netdev_registered) {
3506                 unregister_netdev(netdev);
3507                 adapter->netdev_registered = false;
3508         }
3509
3510         ixgbevf_reset_interrupt_capability(adapter);
3511
3512         iounmap(adapter->hw.hw_addr);
3513         pci_release_regions(pdev);
3514
3515         hw_dbg(&adapter->hw, "Remove complete\n");
3516
3517         kfree(adapter->tx_ring);
3518         kfree(adapter->rx_ring);
3519
3520         free_netdev(netdev);
3521
3522         pci_disable_device(pdev);
3523 }
3524
3525 static struct pci_driver ixgbevf_driver = {
3526         .name     = ixgbevf_driver_name,
3527         .id_table = ixgbevf_pci_tbl,
3528         .probe    = ixgbevf_probe,
3529         .remove   = __devexit_p(ixgbevf_remove),
3530         .shutdown = ixgbevf_shutdown,
3531 };
3532
3533 /**
3534  * ixgbevf_init_module - Driver Registration Routine
3535  *
3536  * ixgbevf_init_module is the first routine called when the driver is
3537  * loaded. All it does is register with the PCI subsystem.
3538  **/
3539 static int __init ixgbevf_init_module(void)
3540 {
3541         int ret;
3542         printk(KERN_INFO "ixgbevf: %s - version %s\n", ixgbevf_driver_string,
3543                ixgbevf_driver_version);
3544
3545         printk(KERN_INFO "%s\n", ixgbevf_copyright);
3546
3547         ret = pci_register_driver(&ixgbevf_driver);
3548         return ret;
3549 }
3550
3551 module_init(ixgbevf_init_module);
3552
3553 /**
3554  * ixgbevf_exit_module - Driver Exit Cleanup Routine
3555  *
3556  * ixgbevf_exit_module is called just before the driver is removed
3557  * from memory.
3558  **/
3559 static void __exit ixgbevf_exit_module(void)
3560 {
3561         pci_unregister_driver(&ixgbevf_driver);
3562 }
3563
3564 #ifdef DEBUG
3565 /**
3566  * ixgbevf_get_hw_dev_name - return device name string
3567  * used by hardware layer to print debugging information
3568  **/
3569 char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw)
3570 {
3571         struct ixgbevf_adapter *adapter = hw->back;
3572         return adapter->netdev->name;
3573 }
3574
3575 #endif
3576 module_exit(ixgbevf_exit_module);
3577
3578 /* ixgbevf_main.c */