Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
[pandora-kernel.git] / drivers / net / virtio_net.c
1 /* A network driver using virtio.
2  *
3  * Copyright 2007 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation; either version 2 of the License, or
8  * (at your option) any later version.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, write to the Free Software
17  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
18  */
19 //#define DEBUG
20 #include <linux/netdevice.h>
21 #include <linux/etherdevice.h>
22 #include <linux/ethtool.h>
23 #include <linux/module.h>
24 #include <linux/virtio.h>
25 #include <linux/virtio_net.h>
26 #include <linux/scatterlist.h>
27 #include <linux/if_vlan.h>
28 #include <linux/slab.h>
29
30 static int napi_weight = 128;
31 module_param(napi_weight, int, 0444);
32
33 static int csum = 1, gso = 1;
34 module_param(csum, bool, 0444);
35 module_param(gso, bool, 0444);
36
37 /* FIXME: MTU in config. */
38 #define MAX_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN)
39 #define GOOD_COPY_LEN   128
40
41 #define VIRTNET_SEND_COMMAND_SG_MAX    2
42
43 struct virtnet_stats {
44         struct u64_stats_sync syncp;
45         u64 tx_bytes;
46         u64 tx_packets;
47
48         u64 rx_bytes;
49         u64 rx_packets;
50 };
51
52 struct virtnet_info {
53         struct virtio_device *vdev;
54         struct virtqueue *rvq, *svq, *cvq;
55         struct net_device *dev;
56         struct napi_struct napi;
57         unsigned int status;
58
59         /* Number of input buffers, and max we've ever had. */
60         unsigned int num, max;
61
62         /* I like... big packets and I cannot lie! */
63         bool big_packets;
64
65         /* Host will merge rx buffers for big packets (shake it! shake it!) */
66         bool mergeable_rx_bufs;
67
68         /* Active statistics */
69         struct virtnet_stats __percpu *stats;
70
71         /* Work struct for refilling if we run low on memory. */
72         struct delayed_work refill;
73
74         /* Chain pages by the private ptr. */
75         struct page *pages;
76
77         /* fragments + linear part + virtio header */
78         struct scatterlist rx_sg[MAX_SKB_FRAGS + 2];
79         struct scatterlist tx_sg[MAX_SKB_FRAGS + 2];
80 };
81
82 struct skb_vnet_hdr {
83         union {
84                 struct virtio_net_hdr hdr;
85                 struct virtio_net_hdr_mrg_rxbuf mhdr;
86         };
87         unsigned int num_sg;
88 };
89
90 struct padded_vnet_hdr {
91         struct virtio_net_hdr hdr;
92         /*
93          * virtio_net_hdr should be in a separated sg buffer because of a
94          * QEMU bug, and data sg buffer shares same page with this header sg.
95          * This padding makes next sg 16 byte aligned after virtio_net_hdr.
96          */
97         char padding[6];
98 };
99
100 static inline struct skb_vnet_hdr *skb_vnet_hdr(struct sk_buff *skb)
101 {
102         return (struct skb_vnet_hdr *)skb->cb;
103 }
104
105 /*
106  * private is used to chain pages for big packets, put the whole
107  * most recent used list in the beginning for reuse
108  */
109 static void give_pages(struct virtnet_info *vi, struct page *page)
110 {
111         struct page *end;
112
113         /* Find end of list, sew whole thing into vi->pages. */
114         for (end = page; end->private; end = (struct page *)end->private);
115         end->private = (unsigned long)vi->pages;
116         vi->pages = page;
117 }
118
119 static struct page *get_a_page(struct virtnet_info *vi, gfp_t gfp_mask)
120 {
121         struct page *p = vi->pages;
122
123         if (p) {
124                 vi->pages = (struct page *)p->private;
125                 /* clear private here, it is used to chain pages */
126                 p->private = 0;
127         } else
128                 p = alloc_page(gfp_mask);
129         return p;
130 }
131
132 static void skb_xmit_done(struct virtqueue *svq)
133 {
134         struct virtnet_info *vi = svq->vdev->priv;
135
136         /* Suppress further interrupts. */
137         virtqueue_disable_cb(svq);
138
139         /* We were probably waiting for more output buffers. */
140         netif_wake_queue(vi->dev);
141 }
142
143 static void set_skb_frag(struct sk_buff *skb, struct page *page,
144                          unsigned int offset, unsigned int *len)
145 {
146         int i = skb_shinfo(skb)->nr_frags;
147         skb_frag_t *f;
148
149         f = &skb_shinfo(skb)->frags[i];
150         f->size = min((unsigned)PAGE_SIZE - offset, *len);
151         f->page_offset = offset;
152         f->page = page;
153
154         skb->data_len += f->size;
155         skb->len += f->size;
156         skb_shinfo(skb)->nr_frags++;
157         *len -= f->size;
158 }
159
160 static struct sk_buff *page_to_skb(struct virtnet_info *vi,
161                                    struct page *page, unsigned int len)
162 {
163         struct sk_buff *skb;
164         struct skb_vnet_hdr *hdr;
165         unsigned int copy, hdr_len, offset;
166         char *p;
167
168         p = page_address(page);
169
170         /* copy small packet so we can reuse these pages for small data */
171         skb = netdev_alloc_skb_ip_align(vi->dev, GOOD_COPY_LEN);
172         if (unlikely(!skb))
173                 return NULL;
174
175         hdr = skb_vnet_hdr(skb);
176
177         if (vi->mergeable_rx_bufs) {
178                 hdr_len = sizeof hdr->mhdr;
179                 offset = hdr_len;
180         } else {
181                 hdr_len = sizeof hdr->hdr;
182                 offset = sizeof(struct padded_vnet_hdr);
183         }
184
185         memcpy(hdr, p, hdr_len);
186
187         len -= hdr_len;
188         p += offset;
189
190         copy = len;
191         if (copy > skb_tailroom(skb))
192                 copy = skb_tailroom(skb);
193         memcpy(skb_put(skb, copy), p, copy);
194
195         len -= copy;
196         offset += copy;
197
198         while (len) {
199                 set_skb_frag(skb, page, offset, &len);
200                 page = (struct page *)page->private;
201                 offset = 0;
202         }
203
204         if (page)
205                 give_pages(vi, page);
206
207         return skb;
208 }
209
210 static int receive_mergeable(struct virtnet_info *vi, struct sk_buff *skb)
211 {
212         struct skb_vnet_hdr *hdr = skb_vnet_hdr(skb);
213         struct page *page;
214         int num_buf, i, len;
215
216         num_buf = hdr->mhdr.num_buffers;
217         while (--num_buf) {
218                 i = skb_shinfo(skb)->nr_frags;
219                 if (i >= MAX_SKB_FRAGS) {
220                         pr_debug("%s: packet too long\n", skb->dev->name);
221                         skb->dev->stats.rx_length_errors++;
222                         return -EINVAL;
223                 }
224                 page = virtqueue_get_buf(vi->rvq, &len);
225                 if (!page) {
226                         pr_debug("%s: rx error: %d buffers missing\n",
227                                  skb->dev->name, hdr->mhdr.num_buffers);
228                         skb->dev->stats.rx_length_errors++;
229                         return -EINVAL;
230                 }
231
232                 if (len > PAGE_SIZE)
233                         len = PAGE_SIZE;
234
235                 set_skb_frag(skb, page, 0, &len);
236
237                 --vi->num;
238         }
239         return 0;
240 }
241
242 static void receive_buf(struct net_device *dev, void *buf, unsigned int len)
243 {
244         struct virtnet_info *vi = netdev_priv(dev);
245         struct virtnet_stats __percpu *stats = this_cpu_ptr(vi->stats);
246         struct sk_buff *skb;
247         struct page *page;
248         struct skb_vnet_hdr *hdr;
249
250         if (unlikely(len < sizeof(struct virtio_net_hdr) + ETH_HLEN)) {
251                 pr_debug("%s: short packet %i\n", dev->name, len);
252                 dev->stats.rx_length_errors++;
253                 if (vi->mergeable_rx_bufs || vi->big_packets)
254                         give_pages(vi, buf);
255                 else
256                         dev_kfree_skb(buf);
257                 return;
258         }
259
260         if (!vi->mergeable_rx_bufs && !vi->big_packets) {
261                 skb = buf;
262                 len -= sizeof(struct virtio_net_hdr);
263                 skb_trim(skb, len);
264         } else {
265                 page = buf;
266                 skb = page_to_skb(vi, page, len);
267                 if (unlikely(!skb)) {
268                         dev->stats.rx_dropped++;
269                         give_pages(vi, page);
270                         return;
271                 }
272                 if (vi->mergeable_rx_bufs)
273                         if (receive_mergeable(vi, skb)) {
274                                 dev_kfree_skb(skb);
275                                 return;
276                         }
277         }
278
279         hdr = skb_vnet_hdr(skb);
280         skb->truesize += skb->data_len;
281
282         u64_stats_update_begin(&stats->syncp);
283         stats->rx_bytes += skb->len;
284         stats->rx_packets++;
285         u64_stats_update_end(&stats->syncp);
286
287         if (hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
288                 pr_debug("Needs csum!\n");
289                 if (!skb_partial_csum_set(skb,
290                                           hdr->hdr.csum_start,
291                                           hdr->hdr.csum_offset))
292                         goto frame_err;
293         } else if (hdr->hdr.flags & VIRTIO_NET_HDR_F_DATA_VALID) {
294                 skb->ip_summed = CHECKSUM_UNNECESSARY;
295         }
296
297         skb->protocol = eth_type_trans(skb, dev);
298         pr_debug("Receiving skb proto 0x%04x len %i type %i\n",
299                  ntohs(skb->protocol), skb->len, skb->pkt_type);
300
301         if (hdr->hdr.gso_type != VIRTIO_NET_HDR_GSO_NONE) {
302                 pr_debug("GSO!\n");
303                 switch (hdr->hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
304                 case VIRTIO_NET_HDR_GSO_TCPV4:
305                         skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
306                         break;
307                 case VIRTIO_NET_HDR_GSO_UDP:
308                         skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
309                         break;
310                 case VIRTIO_NET_HDR_GSO_TCPV6:
311                         skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
312                         break;
313                 default:
314                         if (net_ratelimit())
315                                 printk(KERN_WARNING "%s: bad gso type %u.\n",
316                                        dev->name, hdr->hdr.gso_type);
317                         goto frame_err;
318                 }
319
320                 if (hdr->hdr.gso_type & VIRTIO_NET_HDR_GSO_ECN)
321                         skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
322
323                 skb_shinfo(skb)->gso_size = hdr->hdr.gso_size;
324                 if (skb_shinfo(skb)->gso_size == 0) {
325                         if (net_ratelimit())
326                                 printk(KERN_WARNING "%s: zero gso size.\n",
327                                        dev->name);
328                         goto frame_err;
329                 }
330
331                 /* Header must be checked, and gso_segs computed. */
332                 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
333                 skb_shinfo(skb)->gso_segs = 0;
334         }
335
336         netif_receive_skb(skb);
337         return;
338
339 frame_err:
340         dev->stats.rx_frame_errors++;
341         dev_kfree_skb(skb);
342 }
343
344 static int add_recvbuf_small(struct virtnet_info *vi, gfp_t gfp)
345 {
346         struct sk_buff *skb;
347         struct skb_vnet_hdr *hdr;
348         int err;
349
350         skb = netdev_alloc_skb_ip_align(vi->dev, MAX_PACKET_LEN);
351         if (unlikely(!skb))
352                 return -ENOMEM;
353
354         skb_put(skb, MAX_PACKET_LEN);
355
356         hdr = skb_vnet_hdr(skb);
357         sg_set_buf(vi->rx_sg, &hdr->hdr, sizeof hdr->hdr);
358
359         skb_to_sgvec(skb, vi->rx_sg + 1, 0, skb->len);
360
361         err = virtqueue_add_buf_gfp(vi->rvq, vi->rx_sg, 0, 2, skb, gfp);
362         if (err < 0)
363                 dev_kfree_skb(skb);
364
365         return err;
366 }
367
368 static int add_recvbuf_big(struct virtnet_info *vi, gfp_t gfp)
369 {
370         struct page *first, *list = NULL;
371         char *p;
372         int i, err, offset;
373
374         /* page in vi->rx_sg[MAX_SKB_FRAGS + 1] is list tail */
375         for (i = MAX_SKB_FRAGS + 1; i > 1; --i) {
376                 first = get_a_page(vi, gfp);
377                 if (!first) {
378                         if (list)
379                                 give_pages(vi, list);
380                         return -ENOMEM;
381                 }
382                 sg_set_buf(&vi->rx_sg[i], page_address(first), PAGE_SIZE);
383
384                 /* chain new page in list head to match sg */
385                 first->private = (unsigned long)list;
386                 list = first;
387         }
388
389         first = get_a_page(vi, gfp);
390         if (!first) {
391                 give_pages(vi, list);
392                 return -ENOMEM;
393         }
394         p = page_address(first);
395
396         /* vi->rx_sg[0], vi->rx_sg[1] share the same page */
397         /* a separated vi->rx_sg[0] for virtio_net_hdr only due to QEMU bug */
398         sg_set_buf(&vi->rx_sg[0], p, sizeof(struct virtio_net_hdr));
399
400         /* vi->rx_sg[1] for data packet, from offset */
401         offset = sizeof(struct padded_vnet_hdr);
402         sg_set_buf(&vi->rx_sg[1], p + offset, PAGE_SIZE - offset);
403
404         /* chain first in list head */
405         first->private = (unsigned long)list;
406         err = virtqueue_add_buf_gfp(vi->rvq, vi->rx_sg, 0, MAX_SKB_FRAGS + 2,
407                                     first, gfp);
408         if (err < 0)
409                 give_pages(vi, first);
410
411         return err;
412 }
413
414 static int add_recvbuf_mergeable(struct virtnet_info *vi, gfp_t gfp)
415 {
416         struct page *page;
417         int err;
418
419         page = get_a_page(vi, gfp);
420         if (!page)
421                 return -ENOMEM;
422
423         sg_init_one(vi->rx_sg, page_address(page), PAGE_SIZE);
424
425         err = virtqueue_add_buf_gfp(vi->rvq, vi->rx_sg, 0, 1, page, gfp);
426         if (err < 0)
427                 give_pages(vi, page);
428
429         return err;
430 }
431
432 /* Returns false if we couldn't fill entirely (OOM). */
433 static bool try_fill_recv(struct virtnet_info *vi, gfp_t gfp)
434 {
435         int err;
436         bool oom;
437
438         do {
439                 if (vi->mergeable_rx_bufs)
440                         err = add_recvbuf_mergeable(vi, gfp);
441                 else if (vi->big_packets)
442                         err = add_recvbuf_big(vi, gfp);
443                 else
444                         err = add_recvbuf_small(vi, gfp);
445
446                 oom = err == -ENOMEM;
447                 if (err < 0)
448                         break;
449                 ++vi->num;
450         } while (err > 0);
451         if (unlikely(vi->num > vi->max))
452                 vi->max = vi->num;
453         virtqueue_kick(vi->rvq);
454         return !oom;
455 }
456
457 static void skb_recv_done(struct virtqueue *rvq)
458 {
459         struct virtnet_info *vi = rvq->vdev->priv;
460         /* Schedule NAPI, Suppress further interrupts if successful. */
461         if (napi_schedule_prep(&vi->napi)) {
462                 virtqueue_disable_cb(rvq);
463                 __napi_schedule(&vi->napi);
464         }
465 }
466
467 static void virtnet_napi_enable(struct virtnet_info *vi)
468 {
469         napi_enable(&vi->napi);
470
471         /* If all buffers were filled by other side before we napi_enabled, we
472          * won't get another interrupt, so process any outstanding packets
473          * now.  virtnet_poll wants re-enable the queue, so we disable here.
474          * We synchronize against interrupts via NAPI_STATE_SCHED */
475         if (napi_schedule_prep(&vi->napi)) {
476                 virtqueue_disable_cb(vi->rvq);
477                 __napi_schedule(&vi->napi);
478         }
479 }
480
481 static void refill_work(struct work_struct *work)
482 {
483         struct virtnet_info *vi;
484         bool still_empty;
485
486         vi = container_of(work, struct virtnet_info, refill.work);
487         napi_disable(&vi->napi);
488         still_empty = !try_fill_recv(vi, GFP_KERNEL);
489         virtnet_napi_enable(vi);
490
491         /* In theory, this can happen: if we don't get any buffers in
492          * we will *never* try to fill again. */
493         if (still_empty)
494                 schedule_delayed_work(&vi->refill, HZ/2);
495 }
496
497 static int virtnet_poll(struct napi_struct *napi, int budget)
498 {
499         struct virtnet_info *vi = container_of(napi, struct virtnet_info, napi);
500         void *buf;
501         unsigned int len, received = 0;
502
503 again:
504         while (received < budget &&
505                (buf = virtqueue_get_buf(vi->rvq, &len)) != NULL) {
506                 receive_buf(vi->dev, buf, len);
507                 --vi->num;
508                 received++;
509         }
510
511         if (vi->num < vi->max / 2) {
512                 if (!try_fill_recv(vi, GFP_ATOMIC))
513                         schedule_delayed_work(&vi->refill, 0);
514         }
515
516         /* Out of packets? */
517         if (received < budget) {
518                 napi_complete(napi);
519                 if (unlikely(!virtqueue_enable_cb(vi->rvq)) &&
520                     napi_schedule_prep(napi)) {
521                         virtqueue_disable_cb(vi->rvq);
522                         __napi_schedule(napi);
523                         goto again;
524                 }
525         }
526
527         return received;
528 }
529
530 static unsigned int free_old_xmit_skbs(struct virtnet_info *vi)
531 {
532         struct sk_buff *skb;
533         unsigned int len, tot_sgs = 0;
534         struct virtnet_stats __percpu *stats = this_cpu_ptr(vi->stats);
535
536         while ((skb = virtqueue_get_buf(vi->svq, &len)) != NULL) {
537                 pr_debug("Sent skb %p\n", skb);
538
539                 u64_stats_update_begin(&stats->syncp);
540                 stats->tx_bytes += skb->len;
541                 stats->tx_packets++;
542                 u64_stats_update_end(&stats->syncp);
543
544                 tot_sgs += skb_vnet_hdr(skb)->num_sg;
545                 dev_kfree_skb_any(skb);
546         }
547         return tot_sgs;
548 }
549
550 static int xmit_skb(struct virtnet_info *vi, struct sk_buff *skb)
551 {
552         struct skb_vnet_hdr *hdr = skb_vnet_hdr(skb);
553         const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest;
554
555         pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest);
556
557         if (skb->ip_summed == CHECKSUM_PARTIAL) {
558                 hdr->hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
559                 hdr->hdr.csum_start = skb_checksum_start_offset(skb);
560                 hdr->hdr.csum_offset = skb->csum_offset;
561         } else {
562                 hdr->hdr.flags = 0;
563                 hdr->hdr.csum_offset = hdr->hdr.csum_start = 0;
564         }
565
566         if (skb_is_gso(skb)) {
567                 hdr->hdr.hdr_len = skb_headlen(skb);
568                 hdr->hdr.gso_size = skb_shinfo(skb)->gso_size;
569                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
570                         hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
571                 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
572                         hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
573                 else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
574                         hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_UDP;
575                 else
576                         BUG();
577                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_ECN)
578                         hdr->hdr.gso_type |= VIRTIO_NET_HDR_GSO_ECN;
579         } else {
580                 hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE;
581                 hdr->hdr.gso_size = hdr->hdr.hdr_len = 0;
582         }
583
584         hdr->mhdr.num_buffers = 0;
585
586         /* Encode metadata header at front. */
587         if (vi->mergeable_rx_bufs)
588                 sg_set_buf(vi->tx_sg, &hdr->mhdr, sizeof hdr->mhdr);
589         else
590                 sg_set_buf(vi->tx_sg, &hdr->hdr, sizeof hdr->hdr);
591
592         hdr->num_sg = skb_to_sgvec(skb, vi->tx_sg + 1, 0, skb->len) + 1;
593         return virtqueue_add_buf(vi->svq, vi->tx_sg, hdr->num_sg,
594                                         0, skb);
595 }
596
597 static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
598 {
599         struct virtnet_info *vi = netdev_priv(dev);
600         int capacity;
601
602         /* Free up any pending old buffers before queueing new ones. */
603         free_old_xmit_skbs(vi);
604
605         /* Try to transmit */
606         capacity = xmit_skb(vi, skb);
607
608         /* This can happen with OOM and indirect buffers. */
609         if (unlikely(capacity < 0)) {
610                 if (net_ratelimit()) {
611                         if (likely(capacity == -ENOMEM)) {
612                                 dev_warn(&dev->dev,
613                                          "TX queue failure: out of memory\n");
614                         } else {
615                                 dev->stats.tx_fifo_errors++;
616                                 dev_warn(&dev->dev,
617                                          "Unexpected TX queue failure: %d\n",
618                                          capacity);
619                         }
620                 }
621                 dev->stats.tx_dropped++;
622                 kfree_skb(skb);
623                 return NETDEV_TX_OK;
624         }
625         virtqueue_kick(vi->svq);
626
627         /* Don't wait up for transmitted skbs to be freed. */
628         skb_orphan(skb);
629         nf_reset(skb);
630
631         /* Apparently nice girls don't return TX_BUSY; stop the queue
632          * before it gets out of hand.  Naturally, this wastes entries. */
633         if (capacity < 2+MAX_SKB_FRAGS) {
634                 netif_stop_queue(dev);
635                 if (unlikely(!virtqueue_enable_cb_delayed(vi->svq))) {
636                         /* More just got used, free them then recheck. */
637                         capacity += free_old_xmit_skbs(vi);
638                         if (capacity >= 2+MAX_SKB_FRAGS) {
639                                 netif_start_queue(dev);
640                                 virtqueue_disable_cb(vi->svq);
641                         }
642                 }
643         }
644
645         return NETDEV_TX_OK;
646 }
647
648 static int virtnet_set_mac_address(struct net_device *dev, void *p)
649 {
650         struct virtnet_info *vi = netdev_priv(dev);
651         struct virtio_device *vdev = vi->vdev;
652         int ret;
653
654         ret = eth_mac_addr(dev, p);
655         if (ret)
656                 return ret;
657
658         if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC))
659                 vdev->config->set(vdev, offsetof(struct virtio_net_config, mac),
660                                   dev->dev_addr, dev->addr_len);
661
662         return 0;
663 }
664
665 static struct rtnl_link_stats64 *virtnet_stats(struct net_device *dev,
666                                                struct rtnl_link_stats64 *tot)
667 {
668         struct virtnet_info *vi = netdev_priv(dev);
669         int cpu;
670         unsigned int start;
671
672         for_each_possible_cpu(cpu) {
673                 struct virtnet_stats __percpu *stats
674                         = per_cpu_ptr(vi->stats, cpu);
675                 u64 tpackets, tbytes, rpackets, rbytes;
676
677                 do {
678                         start = u64_stats_fetch_begin(&stats->syncp);
679                         tpackets = stats->tx_packets;
680                         tbytes   = stats->tx_bytes;
681                         rpackets = stats->rx_packets;
682                         rbytes   = stats->rx_bytes;
683                 } while (u64_stats_fetch_retry(&stats->syncp, start));
684
685                 tot->rx_packets += rpackets;
686                 tot->tx_packets += tpackets;
687                 tot->rx_bytes   += rbytes;
688                 tot->tx_bytes   += tbytes;
689         }
690
691         tot->tx_dropped = dev->stats.tx_dropped;
692         tot->rx_dropped = dev->stats.rx_dropped;
693         tot->rx_length_errors = dev->stats.rx_length_errors;
694         tot->rx_frame_errors = dev->stats.rx_frame_errors;
695
696         return tot;
697 }
698
699 #ifdef CONFIG_NET_POLL_CONTROLLER
700 static void virtnet_netpoll(struct net_device *dev)
701 {
702         struct virtnet_info *vi = netdev_priv(dev);
703
704         napi_schedule(&vi->napi);
705 }
706 #endif
707
708 static int virtnet_open(struct net_device *dev)
709 {
710         struct virtnet_info *vi = netdev_priv(dev);
711
712         virtnet_napi_enable(vi);
713         return 0;
714 }
715
716 /*
717  * Send command via the control virtqueue and check status.  Commands
718  * supported by the hypervisor, as indicated by feature bits, should
719  * never fail unless improperly formated.
720  */
721 static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
722                                  struct scatterlist *data, int out, int in)
723 {
724         struct scatterlist *s, sg[VIRTNET_SEND_COMMAND_SG_MAX + 2];
725         struct virtio_net_ctrl_hdr ctrl;
726         virtio_net_ctrl_ack status = ~0;
727         unsigned int tmp;
728         int i;
729
730         /* Caller should know better */
731         BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ) ||
732                 (out + in > VIRTNET_SEND_COMMAND_SG_MAX));
733
734         out++; /* Add header */
735         in++; /* Add return status */
736
737         ctrl.class = class;
738         ctrl.cmd = cmd;
739
740         sg_init_table(sg, out + in);
741
742         sg_set_buf(&sg[0], &ctrl, sizeof(ctrl));
743         for_each_sg(data, s, out + in - 2, i)
744                 sg_set_buf(&sg[i + 1], sg_virt(s), s->length);
745         sg_set_buf(&sg[out + in - 1], &status, sizeof(status));
746
747         BUG_ON(virtqueue_add_buf(vi->cvq, sg, out, in, vi) < 0);
748
749         virtqueue_kick(vi->cvq);
750
751         /*
752          * Spin for a response, the kick causes an ioport write, trapping
753          * into the hypervisor, so the request should be handled immediately.
754          */
755         while (!virtqueue_get_buf(vi->cvq, &tmp))
756                 cpu_relax();
757
758         return status == VIRTIO_NET_OK;
759 }
760
761 static int virtnet_close(struct net_device *dev)
762 {
763         struct virtnet_info *vi = netdev_priv(dev);
764
765         napi_disable(&vi->napi);
766
767         return 0;
768 }
769
770 static void virtnet_set_rx_mode(struct net_device *dev)
771 {
772         struct virtnet_info *vi = netdev_priv(dev);
773         struct scatterlist sg[2];
774         u8 promisc, allmulti;
775         struct virtio_net_ctrl_mac *mac_data;
776         struct netdev_hw_addr *ha;
777         int uc_count;
778         int mc_count;
779         void *buf;
780         int i;
781
782         /* We can't dynamicaly set ndo_set_rx_mode, so return gracefully */
783         if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX))
784                 return;
785
786         promisc = ((dev->flags & IFF_PROMISC) != 0);
787         allmulti = ((dev->flags & IFF_ALLMULTI) != 0);
788
789         sg_init_one(sg, &promisc, sizeof(promisc));
790
791         if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
792                                   VIRTIO_NET_CTRL_RX_PROMISC,
793                                   sg, 1, 0))
794                 dev_warn(&dev->dev, "Failed to %sable promisc mode.\n",
795                          promisc ? "en" : "dis");
796
797         sg_init_one(sg, &allmulti, sizeof(allmulti));
798
799         if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
800                                   VIRTIO_NET_CTRL_RX_ALLMULTI,
801                                   sg, 1, 0))
802                 dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n",
803                          allmulti ? "en" : "dis");
804
805         uc_count = netdev_uc_count(dev);
806         mc_count = netdev_mc_count(dev);
807         /* MAC filter - use one buffer for both lists */
808         buf = kzalloc(((uc_count + mc_count) * ETH_ALEN) +
809                       (2 * sizeof(mac_data->entries)), GFP_ATOMIC);
810         mac_data = buf;
811         if (!buf) {
812                 dev_warn(&dev->dev, "No memory for MAC address buffer\n");
813                 return;
814         }
815
816         sg_init_table(sg, 2);
817
818         /* Store the unicast list and count in the front of the buffer */
819         mac_data->entries = uc_count;
820         i = 0;
821         netdev_for_each_uc_addr(ha, dev)
822                 memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
823
824         sg_set_buf(&sg[0], mac_data,
825                    sizeof(mac_data->entries) + (uc_count * ETH_ALEN));
826
827         /* multicast list and count fill the end */
828         mac_data = (void *)&mac_data->macs[uc_count][0];
829
830         mac_data->entries = mc_count;
831         i = 0;
832         netdev_for_each_mc_addr(ha, dev)
833                 memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
834
835         sg_set_buf(&sg[1], mac_data,
836                    sizeof(mac_data->entries) + (mc_count * ETH_ALEN));
837
838         if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
839                                   VIRTIO_NET_CTRL_MAC_TABLE_SET,
840                                   sg, 2, 0))
841                 dev_warn(&dev->dev, "Failed to set MAC fitler table.\n");
842
843         kfree(buf);
844 }
845
846 static void virtnet_vlan_rx_add_vid(struct net_device *dev, u16 vid)
847 {
848         struct virtnet_info *vi = netdev_priv(dev);
849         struct scatterlist sg;
850
851         sg_init_one(&sg, &vid, sizeof(vid));
852
853         if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
854                                   VIRTIO_NET_CTRL_VLAN_ADD, &sg, 1, 0))
855                 dev_warn(&dev->dev, "Failed to add VLAN ID %d.\n", vid);
856 }
857
858 static void virtnet_vlan_rx_kill_vid(struct net_device *dev, u16 vid)
859 {
860         struct virtnet_info *vi = netdev_priv(dev);
861         struct scatterlist sg;
862
863         sg_init_one(&sg, &vid, sizeof(vid));
864
865         if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
866                                   VIRTIO_NET_CTRL_VLAN_DEL, &sg, 1, 0))
867                 dev_warn(&dev->dev, "Failed to kill VLAN ID %d.\n", vid);
868 }
869
870 static const struct ethtool_ops virtnet_ethtool_ops = {
871         .get_link = ethtool_op_get_link,
872 };
873
874 #define MIN_MTU 68
875 #define MAX_MTU 65535
876
877 static int virtnet_change_mtu(struct net_device *dev, int new_mtu)
878 {
879         if (new_mtu < MIN_MTU || new_mtu > MAX_MTU)
880                 return -EINVAL;
881         dev->mtu = new_mtu;
882         return 0;
883 }
884
885 static const struct net_device_ops virtnet_netdev = {
886         .ndo_open            = virtnet_open,
887         .ndo_stop            = virtnet_close,
888         .ndo_start_xmit      = start_xmit,
889         .ndo_validate_addr   = eth_validate_addr,
890         .ndo_set_mac_address = virtnet_set_mac_address,
891         .ndo_set_rx_mode     = virtnet_set_rx_mode,
892         .ndo_change_mtu      = virtnet_change_mtu,
893         .ndo_get_stats64     = virtnet_stats,
894         .ndo_vlan_rx_add_vid = virtnet_vlan_rx_add_vid,
895         .ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid,
896 #ifdef CONFIG_NET_POLL_CONTROLLER
897         .ndo_poll_controller = virtnet_netpoll,
898 #endif
899 };
900
901 static void virtnet_update_status(struct virtnet_info *vi)
902 {
903         u16 v;
904
905         if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS))
906                 return;
907
908         vi->vdev->config->get(vi->vdev,
909                               offsetof(struct virtio_net_config, status),
910                               &v, sizeof(v));
911
912         /* Ignore unknown (future) status bits */
913         v &= VIRTIO_NET_S_LINK_UP;
914
915         if (vi->status == v)
916                 return;
917
918         vi->status = v;
919
920         if (vi->status & VIRTIO_NET_S_LINK_UP) {
921                 netif_carrier_on(vi->dev);
922                 netif_wake_queue(vi->dev);
923         } else {
924                 netif_carrier_off(vi->dev);
925                 netif_stop_queue(vi->dev);
926         }
927 }
928
929 static void virtnet_config_changed(struct virtio_device *vdev)
930 {
931         struct virtnet_info *vi = vdev->priv;
932
933         virtnet_update_status(vi);
934 }
935
936 static int virtnet_probe(struct virtio_device *vdev)
937 {
938         int err;
939         struct net_device *dev;
940         struct virtnet_info *vi;
941         struct virtqueue *vqs[3];
942         vq_callback_t *callbacks[] = { skb_recv_done, skb_xmit_done, NULL};
943         const char *names[] = { "input", "output", "control" };
944         int nvqs;
945
946         /* Allocate ourselves a network device with room for our info */
947         dev = alloc_etherdev(sizeof(struct virtnet_info));
948         if (!dev)
949                 return -ENOMEM;
950
951         /* Set up network device as normal. */
952         dev->netdev_ops = &virtnet_netdev;
953         dev->features = NETIF_F_HIGHDMA;
954
955         SET_ETHTOOL_OPS(dev, &virtnet_ethtool_ops);
956         SET_NETDEV_DEV(dev, &vdev->dev);
957
958         /* Do we support "hardware" checksums? */
959         if (virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) {
960                 /* This opens up the world of extra features. */
961                 dev->hw_features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST;
962                 if (csum)
963                         dev->features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST;
964
965                 if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) {
966                         dev->hw_features |= NETIF_F_TSO | NETIF_F_UFO
967                                 | NETIF_F_TSO_ECN | NETIF_F_TSO6;
968                 }
969                 /* Individual feature bits: what can host handle? */
970                 if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO4))
971                         dev->hw_features |= NETIF_F_TSO;
972                 if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO6))
973                         dev->hw_features |= NETIF_F_TSO6;
974                 if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN))
975                         dev->hw_features |= NETIF_F_TSO_ECN;
976                 if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_UFO))
977                         dev->hw_features |= NETIF_F_UFO;
978
979                 if (gso)
980                         dev->features |= dev->hw_features & (NETIF_F_ALL_TSO|NETIF_F_UFO);
981                 /* (!csum && gso) case will be fixed by register_netdev() */
982         }
983
984         /* Configuration may specify what MAC to use.  Otherwise random. */
985         if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) {
986                 vdev->config->get(vdev,
987                                   offsetof(struct virtio_net_config, mac),
988                                   dev->dev_addr, dev->addr_len);
989         } else
990                 random_ether_addr(dev->dev_addr);
991
992         /* Set up our device-specific information */
993         vi = netdev_priv(dev);
994         netif_napi_add(dev, &vi->napi, virtnet_poll, napi_weight);
995         vi->dev = dev;
996         vi->vdev = vdev;
997         vdev->priv = vi;
998         vi->pages = NULL;
999         vi->stats = alloc_percpu(struct virtnet_stats);
1000         err = -ENOMEM;
1001         if (vi->stats == NULL)
1002                 goto free;
1003
1004         INIT_DELAYED_WORK(&vi->refill, refill_work);
1005         sg_init_table(vi->rx_sg, ARRAY_SIZE(vi->rx_sg));
1006         sg_init_table(vi->tx_sg, ARRAY_SIZE(vi->tx_sg));
1007
1008         /* If we can receive ANY GSO packets, we must allocate large ones. */
1009         if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) ||
1010             virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6) ||
1011             virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN))
1012                 vi->big_packets = true;
1013
1014         if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF))
1015                 vi->mergeable_rx_bufs = true;
1016
1017         /* We expect two virtqueues, receive then send,
1018          * and optionally control. */
1019         nvqs = virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ) ? 3 : 2;
1020
1021         err = vdev->config->find_vqs(vdev, nvqs, vqs, callbacks, names);
1022         if (err)
1023                 goto free_stats;
1024
1025         vi->rvq = vqs[0];
1026         vi->svq = vqs[1];
1027
1028         if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)) {
1029                 vi->cvq = vqs[2];
1030
1031                 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN))
1032                         dev->features |= NETIF_F_HW_VLAN_FILTER;
1033         }
1034
1035         err = register_netdev(dev);
1036         if (err) {
1037                 pr_debug("virtio_net: registering device failed\n");
1038                 goto free_vqs;
1039         }
1040
1041         /* Last of all, set up some receive buffers. */
1042         try_fill_recv(vi, GFP_KERNEL);
1043
1044         /* If we didn't even get one input buffer, we're useless. */
1045         if (vi->num == 0) {
1046                 err = -ENOMEM;
1047                 goto unregister;
1048         }
1049
1050         /* Assume link up if device can't report link status,
1051            otherwise get link status from config. */
1052         if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) {
1053                 netif_carrier_off(dev);
1054                 virtnet_update_status(vi);
1055         } else {
1056                 vi->status = VIRTIO_NET_S_LINK_UP;
1057                 netif_carrier_on(dev);
1058         }
1059
1060         pr_debug("virtnet: registered device %s\n", dev->name);
1061         return 0;
1062
1063 unregister:
1064         unregister_netdev(dev);
1065         cancel_delayed_work_sync(&vi->refill);
1066 free_vqs:
1067         vdev->config->del_vqs(vdev);
1068 free_stats:
1069         free_percpu(vi->stats);
1070 free:
1071         free_netdev(dev);
1072         return err;
1073 }
1074
1075 static void free_unused_bufs(struct virtnet_info *vi)
1076 {
1077         void *buf;
1078         while (1) {
1079                 buf = virtqueue_detach_unused_buf(vi->svq);
1080                 if (!buf)
1081                         break;
1082                 dev_kfree_skb(buf);
1083         }
1084         while (1) {
1085                 buf = virtqueue_detach_unused_buf(vi->rvq);
1086                 if (!buf)
1087                         break;
1088                 if (vi->mergeable_rx_bufs || vi->big_packets)
1089                         give_pages(vi, buf);
1090                 else
1091                         dev_kfree_skb(buf);
1092                 --vi->num;
1093         }
1094         BUG_ON(vi->num != 0);
1095 }
1096
1097 static void __devexit virtnet_remove(struct virtio_device *vdev)
1098 {
1099         struct virtnet_info *vi = vdev->priv;
1100
1101         /* Stop all the virtqueues. */
1102         vdev->config->reset(vdev);
1103
1104
1105         unregister_netdev(vi->dev);
1106         cancel_delayed_work_sync(&vi->refill);
1107
1108         /* Free unused buffers in both send and recv, if any. */
1109         free_unused_bufs(vi);
1110
1111         vdev->config->del_vqs(vi->vdev);
1112
1113         while (vi->pages)
1114                 __free_pages(get_a_page(vi, GFP_KERNEL), 0);
1115
1116         free_percpu(vi->stats);
1117         free_netdev(vi->dev);
1118 }
1119
1120 static struct virtio_device_id id_table[] = {
1121         { VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID },
1122         { 0 },
1123 };
1124
1125 static unsigned int features[] = {
1126         VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM,
1127         VIRTIO_NET_F_GSO, VIRTIO_NET_F_MAC,
1128         VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6,
1129         VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6,
1130         VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO,
1131         VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ,
1132         VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN,
1133 };
1134
1135 static struct virtio_driver virtio_net_driver = {
1136         .feature_table = features,
1137         .feature_table_size = ARRAY_SIZE(features),
1138         .driver.name =  KBUILD_MODNAME,
1139         .driver.owner = THIS_MODULE,
1140         .id_table =     id_table,
1141         .probe =        virtnet_probe,
1142         .remove =       __devexit_p(virtnet_remove),
1143         .config_changed = virtnet_config_changed,
1144 };
1145
1146 static int __init init(void)
1147 {
1148         return register_virtio_driver(&virtio_net_driver);
1149 }
1150
1151 static void __exit fini(void)
1152 {
1153         unregister_virtio_driver(&virtio_net_driver);
1154 }
1155 module_init(init);
1156 module_exit(fini);
1157
1158 MODULE_DEVICE_TABLE(virtio, id_table);
1159 MODULE_DESCRIPTION("Virtio network driver");
1160 MODULE_LICENSE("GPL");