Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
[pandora-kernel.git] / drivers / net / xen-netback / netback.c
1 /*
2  * Back-end of the driver for virtual network devices. This portion of the
3  * driver exports a 'unified' network-device interface that can be accessed
4  * by any operating system that implements a compatible front end. A
5  * reference front-end implementation can be found in:
6  *  drivers/net/xen-netfront.c
7  *
8  * Copyright (c) 2002-2005, K A Fraser
9  *
10  * This program is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU General Public License version 2
12  * as published by the Free Software Foundation; or, when distributed
13  * separately from the Linux kernel or incorporated into other
14  * software packages, subject to the following license:
15  *
16  * Permission is hereby granted, free of charge, to any person obtaining a copy
17  * of this source file (the "Software"), to deal in the Software without
18  * restriction, including without limitation the rights to use, copy, modify,
19  * merge, publish, distribute, sublicense, and/or sell copies of the Software,
20  * and to permit persons to whom the Software is furnished to do so, subject to
21  * the following conditions:
22  *
23  * The above copyright notice and this permission notice shall be included in
24  * all copies or substantial portions of the Software.
25  *
26  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
27  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
28  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
29  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
30  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
31  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
32  * IN THE SOFTWARE.
33  */
34
35 #include "common.h"
36
37 #include <linux/kthread.h>
38 #include <linux/if_vlan.h>
39 #include <linux/udp.h>
40
41 #include <net/tcp.h>
42
43 #include <xen/xen.h>
44 #include <xen/events.h>
45 #include <xen/interface/memory.h>
46
47 #include <asm/xen/hypercall.h>
48 #include <asm/xen/page.h>
49
50 /* Provide an option to disable split event channels at load time as
51  * event channels are limited resource. Split event channels are
52  * enabled by default.
53  */
54 bool separate_tx_rx_irq = 1;
55 module_param(separate_tx_rx_irq, bool, 0644);
56
57 /*
58  * This is the maximum slots a skb can have. If a guest sends a skb
59  * which exceeds this limit it is considered malicious.
60  */
61 #define FATAL_SKB_SLOTS_DEFAULT 20
62 static unsigned int fatal_skb_slots = FATAL_SKB_SLOTS_DEFAULT;
63 module_param(fatal_skb_slots, uint, 0444);
64
65 /*
66  * To avoid confusion, we define XEN_NETBK_LEGACY_SLOTS_MAX indicating
67  * the maximum slots a valid packet can use. Now this value is defined
68  * to be XEN_NETIF_NR_SLOTS_MIN, which is supposed to be supported by
69  * all backend.
70  */
71 #define XEN_NETBK_LEGACY_SLOTS_MAX XEN_NETIF_NR_SLOTS_MIN
72
73 typedef unsigned int pending_ring_idx_t;
74 #define INVALID_PENDING_RING_IDX (~0U)
75
76 struct pending_tx_info {
77         struct xen_netif_tx_request req; /* coalesced tx request */
78         struct xenvif *vif;
79         pending_ring_idx_t head; /* head != INVALID_PENDING_RING_IDX
80                                   * if it is head of one or more tx
81                                   * reqs
82                                   */
83 };
84
85 struct netbk_rx_meta {
86         int id;
87         int size;
88         int gso_size;
89 };
90
91 #define MAX_PENDING_REQS 256
92
93 /* Discriminate from any valid pending_idx value. */
94 #define INVALID_PENDING_IDX 0xFFFF
95
96 #define MAX_BUFFER_OFFSET PAGE_SIZE
97
98 /* extra field used in struct page */
99 union page_ext {
100         struct {
101 #if BITS_PER_LONG < 64
102 #define IDX_WIDTH   8
103 #define GROUP_WIDTH (BITS_PER_LONG - IDX_WIDTH)
104                 unsigned int group:GROUP_WIDTH;
105                 unsigned int idx:IDX_WIDTH;
106 #else
107                 unsigned int group, idx;
108 #endif
109         } e;
110         void *mapping;
111 };
112
113 struct xen_netbk {
114         wait_queue_head_t wq;
115         struct task_struct *task;
116
117         struct sk_buff_head rx_queue;
118         struct sk_buff_head tx_queue;
119
120         struct timer_list net_timer;
121
122         struct page *mmap_pages[MAX_PENDING_REQS];
123
124         pending_ring_idx_t pending_prod;
125         pending_ring_idx_t pending_cons;
126         struct list_head net_schedule_list;
127
128         /* Protect the net_schedule_list in netif. */
129         spinlock_t net_schedule_list_lock;
130
131         atomic_t netfront_count;
132
133         struct pending_tx_info pending_tx_info[MAX_PENDING_REQS];
134         /* Coalescing tx requests before copying makes number of grant
135          * copy ops greater or equal to number of slots required. In
136          * worst case a tx request consumes 2 gnttab_copy.
137          */
138         struct gnttab_copy tx_copy_ops[2*MAX_PENDING_REQS];
139
140         u16 pending_ring[MAX_PENDING_REQS];
141
142         /*
143          * Given MAX_BUFFER_OFFSET of 4096 the worst case is that each
144          * head/fragment page uses 2 copy operations because it
145          * straddles two buffers in the frontend.
146          */
147         struct gnttab_copy grant_copy_op[2*XEN_NETIF_RX_RING_SIZE];
148         struct netbk_rx_meta meta[2*XEN_NETIF_RX_RING_SIZE];
149 };
150
151 static struct xen_netbk *xen_netbk;
152 static int xen_netbk_group_nr;
153
154 /*
155  * If head != INVALID_PENDING_RING_IDX, it means this tx request is head of
156  * one or more merged tx requests, otherwise it is the continuation of
157  * previous tx request.
158  */
159 static inline int pending_tx_is_head(struct xen_netbk *netbk, RING_IDX idx)
160 {
161         return netbk->pending_tx_info[idx].head != INVALID_PENDING_RING_IDX;
162 }
163
164 void xen_netbk_add_xenvif(struct xenvif *vif)
165 {
166         int i;
167         int min_netfront_count;
168         int min_group = 0;
169         struct xen_netbk *netbk;
170
171         min_netfront_count = atomic_read(&xen_netbk[0].netfront_count);
172         for (i = 0; i < xen_netbk_group_nr; i++) {
173                 int netfront_count = atomic_read(&xen_netbk[i].netfront_count);
174                 if (netfront_count < min_netfront_count) {
175                         min_group = i;
176                         min_netfront_count = netfront_count;
177                 }
178         }
179
180         netbk = &xen_netbk[min_group];
181
182         vif->netbk = netbk;
183         atomic_inc(&netbk->netfront_count);
184 }
185
186 void xen_netbk_remove_xenvif(struct xenvif *vif)
187 {
188         struct xen_netbk *netbk = vif->netbk;
189         vif->netbk = NULL;
190         atomic_dec(&netbk->netfront_count);
191 }
192
193 static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx,
194                                   u8 status);
195 static void make_tx_response(struct xenvif *vif,
196                              struct xen_netif_tx_request *txp,
197                              s8       st);
198 static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif,
199                                              u16      id,
200                                              s8       st,
201                                              u16      offset,
202                                              u16      size,
203                                              u16      flags);
204
205 static inline unsigned long idx_to_pfn(struct xen_netbk *netbk,
206                                        u16 idx)
207 {
208         return page_to_pfn(netbk->mmap_pages[idx]);
209 }
210
211 static inline unsigned long idx_to_kaddr(struct xen_netbk *netbk,
212                                          u16 idx)
213 {
214         return (unsigned long)pfn_to_kaddr(idx_to_pfn(netbk, idx));
215 }
216
217 /* extra field used in struct page */
218 static inline void set_page_ext(struct page *pg, struct xen_netbk *netbk,
219                                 unsigned int idx)
220 {
221         unsigned int group = netbk - xen_netbk;
222         union page_ext ext = { .e = { .group = group + 1, .idx = idx } };
223
224         BUILD_BUG_ON(sizeof(ext) > sizeof(ext.mapping));
225         pg->mapping = ext.mapping;
226 }
227
228 static int get_page_ext(struct page *pg,
229                         unsigned int *pgroup, unsigned int *pidx)
230 {
231         union page_ext ext = { .mapping = pg->mapping };
232         struct xen_netbk *netbk;
233         unsigned int group, idx;
234
235         group = ext.e.group - 1;
236
237         if (group < 0 || group >= xen_netbk_group_nr)
238                 return 0;
239
240         netbk = &xen_netbk[group];
241
242         idx = ext.e.idx;
243
244         if ((idx < 0) || (idx >= MAX_PENDING_REQS))
245                 return 0;
246
247         if (netbk->mmap_pages[idx] != pg)
248                 return 0;
249
250         *pgroup = group;
251         *pidx = idx;
252
253         return 1;
254 }
255
256 /*
257  * This is the amount of packet we copy rather than map, so that the
258  * guest can't fiddle with the contents of the headers while we do
259  * packet processing on them (netfilter, routing, etc).
260  */
261 #define PKT_PROT_LEN    (ETH_HLEN + \
262                          VLAN_HLEN + \
263                          sizeof(struct iphdr) + MAX_IPOPTLEN + \
264                          sizeof(struct tcphdr) + MAX_TCP_OPTION_SPACE)
265
266 static u16 frag_get_pending_idx(skb_frag_t *frag)
267 {
268         return (u16)frag->page_offset;
269 }
270
271 static void frag_set_pending_idx(skb_frag_t *frag, u16 pending_idx)
272 {
273         frag->page_offset = pending_idx;
274 }
275
276 static inline pending_ring_idx_t pending_index(unsigned i)
277 {
278         return i & (MAX_PENDING_REQS-1);
279 }
280
281 static inline pending_ring_idx_t nr_pending_reqs(struct xen_netbk *netbk)
282 {
283         return MAX_PENDING_REQS -
284                 netbk->pending_prod + netbk->pending_cons;
285 }
286
287 static void xen_netbk_kick_thread(struct xen_netbk *netbk)
288 {
289         wake_up(&netbk->wq);
290 }
291
292 static int max_required_rx_slots(struct xenvif *vif)
293 {
294         int max = DIV_ROUND_UP(vif->dev->mtu, PAGE_SIZE);
295
296         /* XXX FIXME: RX path dependent on MAX_SKB_FRAGS */
297         if (vif->can_sg || vif->gso || vif->gso_prefix)
298                 max += MAX_SKB_FRAGS + 1; /* extra_info + frags */
299
300         return max;
301 }
302
303 int xen_netbk_rx_ring_full(struct xenvif *vif)
304 {
305         RING_IDX peek   = vif->rx_req_cons_peek;
306         RING_IDX needed = max_required_rx_slots(vif);
307
308         return ((vif->rx.sring->req_prod - peek) < needed) ||
309                ((vif->rx.rsp_prod_pvt + XEN_NETIF_RX_RING_SIZE - peek) < needed);
310 }
311
312 int xen_netbk_must_stop_queue(struct xenvif *vif)
313 {
314         if (!xen_netbk_rx_ring_full(vif))
315                 return 0;
316
317         vif->rx.sring->req_event = vif->rx_req_cons_peek +
318                 max_required_rx_slots(vif);
319         mb(); /* request notification /then/ check the queue */
320
321         return xen_netbk_rx_ring_full(vif);
322 }
323
324 /*
325  * Returns true if we should start a new receive buffer instead of
326  * adding 'size' bytes to a buffer which currently contains 'offset'
327  * bytes.
328  */
329 static bool start_new_rx_buffer(int offset, unsigned long size, int head)
330 {
331         /* simple case: we have completely filled the current buffer. */
332         if (offset == MAX_BUFFER_OFFSET)
333                 return true;
334
335         /*
336          * complex case: start a fresh buffer if the current frag
337          * would overflow the current buffer but only if:
338          *     (i)   this frag would fit completely in the next buffer
339          * and (ii)  there is already some data in the current buffer
340          * and (iii) this is not the head buffer.
341          *
342          * Where:
343          * - (i) stops us splitting a frag into two copies
344          *   unless the frag is too large for a single buffer.
345          * - (ii) stops us from leaving a buffer pointlessly empty.
346          * - (iii) stops us leaving the first buffer
347          *   empty. Strictly speaking this is already covered
348          *   by (ii) but is explicitly checked because
349          *   netfront relies on the first buffer being
350          *   non-empty and can crash otherwise.
351          *
352          * This means we will effectively linearise small
353          * frags but do not needlessly split large buffers
354          * into multiple copies tend to give large frags their
355          * own buffers as before.
356          */
357         if ((offset + size > MAX_BUFFER_OFFSET) &&
358             (size <= MAX_BUFFER_OFFSET) && offset && !head)
359                 return true;
360
361         return false;
362 }
363
364 /*
365  * Figure out how many ring slots we're going to need to send @skb to
366  * the guest. This function is essentially a dry run of
367  * netbk_gop_frag_copy.
368  */
369 unsigned int xen_netbk_count_skb_slots(struct xenvif *vif, struct sk_buff *skb)
370 {
371         unsigned int count;
372         int i, copy_off;
373
374         count = DIV_ROUND_UP(skb_headlen(skb), PAGE_SIZE);
375
376         copy_off = skb_headlen(skb) % PAGE_SIZE;
377
378         if (skb_shinfo(skb)->gso_size)
379                 count++;
380
381         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
382                 unsigned long size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
383                 unsigned long offset = skb_shinfo(skb)->frags[i].page_offset;
384                 unsigned long bytes;
385
386                 offset &= ~PAGE_MASK;
387
388                 while (size > 0) {
389                         BUG_ON(offset >= PAGE_SIZE);
390                         BUG_ON(copy_off > MAX_BUFFER_OFFSET);
391
392                         bytes = PAGE_SIZE - offset;
393
394                         if (bytes > size)
395                                 bytes = size;
396
397                         if (start_new_rx_buffer(copy_off, bytes, 0)) {
398                                 count++;
399                                 copy_off = 0;
400                         }
401
402                         if (copy_off + bytes > MAX_BUFFER_OFFSET)
403                                 bytes = MAX_BUFFER_OFFSET - copy_off;
404
405                         copy_off += bytes;
406
407                         offset += bytes;
408                         size -= bytes;
409
410                         if (offset == PAGE_SIZE)
411                                 offset = 0;
412                 }
413         }
414         return count;
415 }
416
417 struct netrx_pending_operations {
418         unsigned copy_prod, copy_cons;
419         unsigned meta_prod, meta_cons;
420         struct gnttab_copy *copy;
421         struct netbk_rx_meta *meta;
422         int copy_off;
423         grant_ref_t copy_gref;
424 };
425
426 static struct netbk_rx_meta *get_next_rx_buffer(struct xenvif *vif,
427                                                 struct netrx_pending_operations *npo)
428 {
429         struct netbk_rx_meta *meta;
430         struct xen_netif_rx_request *req;
431
432         req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
433
434         meta = npo->meta + npo->meta_prod++;
435         meta->gso_size = 0;
436         meta->size = 0;
437         meta->id = req->id;
438
439         npo->copy_off = 0;
440         npo->copy_gref = req->gref;
441
442         return meta;
443 }
444
445 /*
446  * Set up the grant operations for this fragment. If it's a flipping
447  * interface, we also set up the unmap request from here.
448  */
449 static void netbk_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
450                                 struct netrx_pending_operations *npo,
451                                 struct page *page, unsigned long size,
452                                 unsigned long offset, int *head)
453 {
454         struct gnttab_copy *copy_gop;
455         struct netbk_rx_meta *meta;
456         /*
457          * These variables are used iff get_page_ext returns true,
458          * in which case they are guaranteed to be initialized.
459          */
460         unsigned int uninitialized_var(group), uninitialized_var(idx);
461         int foreign = get_page_ext(page, &group, &idx);
462         unsigned long bytes;
463
464         /* Data must not cross a page boundary. */
465         BUG_ON(size + offset > PAGE_SIZE<<compound_order(page));
466
467         meta = npo->meta + npo->meta_prod - 1;
468
469         /* Skip unused frames from start of page */
470         page += offset >> PAGE_SHIFT;
471         offset &= ~PAGE_MASK;
472
473         while (size > 0) {
474                 BUG_ON(offset >= PAGE_SIZE);
475                 BUG_ON(npo->copy_off > MAX_BUFFER_OFFSET);
476
477                 bytes = PAGE_SIZE - offset;
478
479                 if (bytes > size)
480                         bytes = size;
481
482                 if (start_new_rx_buffer(npo->copy_off, bytes, *head)) {
483                         /*
484                          * Netfront requires there to be some data in the head
485                          * buffer.
486                          */
487                         BUG_ON(*head);
488
489                         meta = get_next_rx_buffer(vif, npo);
490                 }
491
492                 if (npo->copy_off + bytes > MAX_BUFFER_OFFSET)
493                         bytes = MAX_BUFFER_OFFSET - npo->copy_off;
494
495                 copy_gop = npo->copy + npo->copy_prod++;
496                 copy_gop->flags = GNTCOPY_dest_gref;
497                 if (foreign) {
498                         struct xen_netbk *netbk = &xen_netbk[group];
499                         struct pending_tx_info *src_pend;
500
501                         src_pend = &netbk->pending_tx_info[idx];
502
503                         copy_gop->source.domid = src_pend->vif->domid;
504                         copy_gop->source.u.ref = src_pend->req.gref;
505                         copy_gop->flags |= GNTCOPY_source_gref;
506                 } else {
507                         void *vaddr = page_address(page);
508                         copy_gop->source.domid = DOMID_SELF;
509                         copy_gop->source.u.gmfn = virt_to_mfn(vaddr);
510                 }
511                 copy_gop->source.offset = offset;
512                 copy_gop->dest.domid = vif->domid;
513
514                 copy_gop->dest.offset = npo->copy_off;
515                 copy_gop->dest.u.ref = npo->copy_gref;
516                 copy_gop->len = bytes;
517
518                 npo->copy_off += bytes;
519                 meta->size += bytes;
520
521                 offset += bytes;
522                 size -= bytes;
523
524                 /* Next frame */
525                 if (offset == PAGE_SIZE && size) {
526                         BUG_ON(!PageCompound(page));
527                         page++;
528                         offset = 0;
529                 }
530
531                 /* Leave a gap for the GSO descriptor. */
532                 if (*head && skb_shinfo(skb)->gso_size && !vif->gso_prefix)
533                         vif->rx.req_cons++;
534
535                 *head = 0; /* There must be something in this buffer now. */
536
537         }
538 }
539
540 /*
541  * Prepare an SKB to be transmitted to the frontend.
542  *
543  * This function is responsible for allocating grant operations, meta
544  * structures, etc.
545  *
546  * It returns the number of meta structures consumed. The number of
547  * ring slots used is always equal to the number of meta slots used
548  * plus the number of GSO descriptors used. Currently, we use either
549  * zero GSO descriptors (for non-GSO packets) or one descriptor (for
550  * frontend-side LRO).
551  */
552 static int netbk_gop_skb(struct sk_buff *skb,
553                          struct netrx_pending_operations *npo)
554 {
555         struct xenvif *vif = netdev_priv(skb->dev);
556         int nr_frags = skb_shinfo(skb)->nr_frags;
557         int i;
558         struct xen_netif_rx_request *req;
559         struct netbk_rx_meta *meta;
560         unsigned char *data;
561         int head = 1;
562         int old_meta_prod;
563
564         old_meta_prod = npo->meta_prod;
565
566         /* Set up a GSO prefix descriptor, if necessary */
567         if (skb_shinfo(skb)->gso_size && vif->gso_prefix) {
568                 req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
569                 meta = npo->meta + npo->meta_prod++;
570                 meta->gso_size = skb_shinfo(skb)->gso_size;
571                 meta->size = 0;
572                 meta->id = req->id;
573         }
574
575         req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
576         meta = npo->meta + npo->meta_prod++;
577
578         if (!vif->gso_prefix)
579                 meta->gso_size = skb_shinfo(skb)->gso_size;
580         else
581                 meta->gso_size = 0;
582
583         meta->size = 0;
584         meta->id = req->id;
585         npo->copy_off = 0;
586         npo->copy_gref = req->gref;
587
588         data = skb->data;
589         while (data < skb_tail_pointer(skb)) {
590                 unsigned int offset = offset_in_page(data);
591                 unsigned int len = PAGE_SIZE - offset;
592
593                 if (data + len > skb_tail_pointer(skb))
594                         len = skb_tail_pointer(skb) - data;
595
596                 netbk_gop_frag_copy(vif, skb, npo,
597                                     virt_to_page(data), len, offset, &head);
598                 data += len;
599         }
600
601         for (i = 0; i < nr_frags; i++) {
602                 netbk_gop_frag_copy(vif, skb, npo,
603                                     skb_frag_page(&skb_shinfo(skb)->frags[i]),
604                                     skb_frag_size(&skb_shinfo(skb)->frags[i]),
605                                     skb_shinfo(skb)->frags[i].page_offset,
606                                     &head);
607         }
608
609         return npo->meta_prod - old_meta_prod;
610 }
611
612 /*
613  * This is a twin to netbk_gop_skb.  Assume that netbk_gop_skb was
614  * used to set up the operations on the top of
615  * netrx_pending_operations, which have since been done.  Check that
616  * they didn't give any errors and advance over them.
617  */
618 static int netbk_check_gop(struct xenvif *vif, int nr_meta_slots,
619                            struct netrx_pending_operations *npo)
620 {
621         struct gnttab_copy     *copy_op;
622         int status = XEN_NETIF_RSP_OKAY;
623         int i;
624
625         for (i = 0; i < nr_meta_slots; i++) {
626                 copy_op = npo->copy + npo->copy_cons++;
627                 if (copy_op->status != GNTST_okay) {
628                         netdev_dbg(vif->dev,
629                                    "Bad status %d from copy to DOM%d.\n",
630                                    copy_op->status, vif->domid);
631                         status = XEN_NETIF_RSP_ERROR;
632                 }
633         }
634
635         return status;
636 }
637
638 static void netbk_add_frag_responses(struct xenvif *vif, int status,
639                                      struct netbk_rx_meta *meta,
640                                      int nr_meta_slots)
641 {
642         int i;
643         unsigned long offset;
644
645         /* No fragments used */
646         if (nr_meta_slots <= 1)
647                 return;
648
649         nr_meta_slots--;
650
651         for (i = 0; i < nr_meta_slots; i++) {
652                 int flags;
653                 if (i == nr_meta_slots - 1)
654                         flags = 0;
655                 else
656                         flags = XEN_NETRXF_more_data;
657
658                 offset = 0;
659                 make_rx_response(vif, meta[i].id, status, offset,
660                                  meta[i].size, flags);
661         }
662 }
663
664 struct skb_cb_overlay {
665         int meta_slots_used;
666 };
667
668 static void xen_netbk_rx_action(struct xen_netbk *netbk)
669 {
670         struct xenvif *vif = NULL, *tmp;
671         s8 status;
672         u16 flags;
673         struct xen_netif_rx_response *resp;
674         struct sk_buff_head rxq;
675         struct sk_buff *skb;
676         LIST_HEAD(notify);
677         int ret;
678         int nr_frags;
679         int count;
680         unsigned long offset;
681         struct skb_cb_overlay *sco;
682
683         struct netrx_pending_operations npo = {
684                 .copy  = netbk->grant_copy_op,
685                 .meta  = netbk->meta,
686         };
687
688         skb_queue_head_init(&rxq);
689
690         count = 0;
691
692         while ((skb = skb_dequeue(&netbk->rx_queue)) != NULL) {
693                 vif = netdev_priv(skb->dev);
694                 nr_frags = skb_shinfo(skb)->nr_frags;
695
696                 sco = (struct skb_cb_overlay *)skb->cb;
697                 sco->meta_slots_used = netbk_gop_skb(skb, &npo);
698
699                 count += nr_frags + 1;
700
701                 __skb_queue_tail(&rxq, skb);
702
703                 /* Filled the batch queue? */
704                 /* XXX FIXME: RX path dependent on MAX_SKB_FRAGS */
705                 if (count + MAX_SKB_FRAGS >= XEN_NETIF_RX_RING_SIZE)
706                         break;
707         }
708
709         BUG_ON(npo.meta_prod > ARRAY_SIZE(netbk->meta));
710
711         if (!npo.copy_prod)
712                 return;
713
714         BUG_ON(npo.copy_prod > ARRAY_SIZE(netbk->grant_copy_op));
715         gnttab_batch_copy(netbk->grant_copy_op, npo.copy_prod);
716
717         while ((skb = __skb_dequeue(&rxq)) != NULL) {
718                 sco = (struct skb_cb_overlay *)skb->cb;
719
720                 vif = netdev_priv(skb->dev);
721
722                 if (netbk->meta[npo.meta_cons].gso_size && vif->gso_prefix) {
723                         resp = RING_GET_RESPONSE(&vif->rx,
724                                                 vif->rx.rsp_prod_pvt++);
725
726                         resp->flags = XEN_NETRXF_gso_prefix | XEN_NETRXF_more_data;
727
728                         resp->offset = netbk->meta[npo.meta_cons].gso_size;
729                         resp->id = netbk->meta[npo.meta_cons].id;
730                         resp->status = sco->meta_slots_used;
731
732                         npo.meta_cons++;
733                         sco->meta_slots_used--;
734                 }
735
736
737                 vif->dev->stats.tx_bytes += skb->len;
738                 vif->dev->stats.tx_packets++;
739
740                 status = netbk_check_gop(vif, sco->meta_slots_used, &npo);
741
742                 if (sco->meta_slots_used == 1)
743                         flags = 0;
744                 else
745                         flags = XEN_NETRXF_more_data;
746
747                 if (skb->ip_summed == CHECKSUM_PARTIAL) /* local packet? */
748                         flags |= XEN_NETRXF_csum_blank | XEN_NETRXF_data_validated;
749                 else if (skb->ip_summed == CHECKSUM_UNNECESSARY)
750                         /* remote but checksummed. */
751                         flags |= XEN_NETRXF_data_validated;
752
753                 offset = 0;
754                 resp = make_rx_response(vif, netbk->meta[npo.meta_cons].id,
755                                         status, offset,
756                                         netbk->meta[npo.meta_cons].size,
757                                         flags);
758
759                 if (netbk->meta[npo.meta_cons].gso_size && !vif->gso_prefix) {
760                         struct xen_netif_extra_info *gso =
761                                 (struct xen_netif_extra_info *)
762                                 RING_GET_RESPONSE(&vif->rx,
763                                                   vif->rx.rsp_prod_pvt++);
764
765                         resp->flags |= XEN_NETRXF_extra_info;
766
767                         gso->u.gso.size = netbk->meta[npo.meta_cons].gso_size;
768                         gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4;
769                         gso->u.gso.pad = 0;
770                         gso->u.gso.features = 0;
771
772                         gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
773                         gso->flags = 0;
774                 }
775
776                 netbk_add_frag_responses(vif, status,
777                                          netbk->meta + npo.meta_cons + 1,
778                                          sco->meta_slots_used);
779
780                 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->rx, ret);
781                 if (ret && list_empty(&vif->notify_list))
782                         list_add_tail(&vif->notify_list, &notify);
783
784                 xenvif_notify_tx_completion(vif);
785
786                 xenvif_put(vif);
787                 npo.meta_cons += sco->meta_slots_used;
788                 dev_kfree_skb(skb);
789         }
790
791         list_for_each_entry_safe(vif, tmp, &notify, notify_list) {
792                 notify_remote_via_irq(vif->rx_irq);
793                 list_del_init(&vif->notify_list);
794         }
795
796         /* More work to do? */
797         if (!skb_queue_empty(&netbk->rx_queue) &&
798                         !timer_pending(&netbk->net_timer))
799                 xen_netbk_kick_thread(netbk);
800 }
801
802 void xen_netbk_queue_tx_skb(struct xenvif *vif, struct sk_buff *skb)
803 {
804         struct xen_netbk *netbk = vif->netbk;
805
806         skb_queue_tail(&netbk->rx_queue, skb);
807
808         xen_netbk_kick_thread(netbk);
809 }
810
811 static void xen_netbk_alarm(unsigned long data)
812 {
813         struct xen_netbk *netbk = (struct xen_netbk *)data;
814         xen_netbk_kick_thread(netbk);
815 }
816
817 static int __on_net_schedule_list(struct xenvif *vif)
818 {
819         return !list_empty(&vif->schedule_list);
820 }
821
822 /* Must be called with net_schedule_list_lock held */
823 static void remove_from_net_schedule_list(struct xenvif *vif)
824 {
825         if (likely(__on_net_schedule_list(vif))) {
826                 list_del_init(&vif->schedule_list);
827                 xenvif_put(vif);
828         }
829 }
830
831 static struct xenvif *poll_net_schedule_list(struct xen_netbk *netbk)
832 {
833         struct xenvif *vif = NULL;
834
835         spin_lock_irq(&netbk->net_schedule_list_lock);
836         if (list_empty(&netbk->net_schedule_list))
837                 goto out;
838
839         vif = list_first_entry(&netbk->net_schedule_list,
840                                struct xenvif, schedule_list);
841         if (!vif)
842                 goto out;
843
844         xenvif_get(vif);
845
846         remove_from_net_schedule_list(vif);
847 out:
848         spin_unlock_irq(&netbk->net_schedule_list_lock);
849         return vif;
850 }
851
852 void xen_netbk_schedule_xenvif(struct xenvif *vif)
853 {
854         unsigned long flags;
855         struct xen_netbk *netbk = vif->netbk;
856
857         if (__on_net_schedule_list(vif))
858                 goto kick;
859
860         spin_lock_irqsave(&netbk->net_schedule_list_lock, flags);
861         if (!__on_net_schedule_list(vif) &&
862             likely(xenvif_schedulable(vif))) {
863                 list_add_tail(&vif->schedule_list, &netbk->net_schedule_list);
864                 xenvif_get(vif);
865         }
866         spin_unlock_irqrestore(&netbk->net_schedule_list_lock, flags);
867
868 kick:
869         smp_mb();
870         if ((nr_pending_reqs(netbk) < (MAX_PENDING_REQS/2)) &&
871             !list_empty(&netbk->net_schedule_list))
872                 xen_netbk_kick_thread(netbk);
873 }
874
875 void xen_netbk_deschedule_xenvif(struct xenvif *vif)
876 {
877         struct xen_netbk *netbk = vif->netbk;
878         spin_lock_irq(&netbk->net_schedule_list_lock);
879         remove_from_net_schedule_list(vif);
880         spin_unlock_irq(&netbk->net_schedule_list_lock);
881 }
882
883 void xen_netbk_check_rx_xenvif(struct xenvif *vif)
884 {
885         int more_to_do;
886
887         RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, more_to_do);
888
889         if (more_to_do)
890                 xen_netbk_schedule_xenvif(vif);
891 }
892
893 static void tx_add_credit(struct xenvif *vif)
894 {
895         unsigned long max_burst, max_credit;
896
897         /*
898          * Allow a burst big enough to transmit a jumbo packet of up to 128kB.
899          * Otherwise the interface can seize up due to insufficient credit.
900          */
901         max_burst = RING_GET_REQUEST(&vif->tx, vif->tx.req_cons)->size;
902         max_burst = min(max_burst, 131072UL);
903         max_burst = max(max_burst, vif->credit_bytes);
904
905         /* Take care that adding a new chunk of credit doesn't wrap to zero. */
906         max_credit = vif->remaining_credit + vif->credit_bytes;
907         if (max_credit < vif->remaining_credit)
908                 max_credit = ULONG_MAX; /* wrapped: clamp to ULONG_MAX */
909
910         vif->remaining_credit = min(max_credit, max_burst);
911 }
912
913 static void tx_credit_callback(unsigned long data)
914 {
915         struct xenvif *vif = (struct xenvif *)data;
916         tx_add_credit(vif);
917         xen_netbk_check_rx_xenvif(vif);
918 }
919
920 static void netbk_tx_err(struct xenvif *vif,
921                          struct xen_netif_tx_request *txp, RING_IDX end)
922 {
923         RING_IDX cons = vif->tx.req_cons;
924
925         do {
926                 make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR);
927                 if (cons == end)
928                         break;
929                 txp = RING_GET_REQUEST(&vif->tx, cons++);
930         } while (1);
931         vif->tx.req_cons = cons;
932         xen_netbk_check_rx_xenvif(vif);
933         xenvif_put(vif);
934 }
935
936 static void netbk_fatal_tx_err(struct xenvif *vif)
937 {
938         netdev_err(vif->dev, "fatal error; disabling device\n");
939         xenvif_carrier_off(vif);
940         xenvif_put(vif);
941 }
942
943 static int netbk_count_requests(struct xenvif *vif,
944                                 struct xen_netif_tx_request *first,
945                                 struct xen_netif_tx_request *txp,
946                                 int work_to_do)
947 {
948         RING_IDX cons = vif->tx.req_cons;
949         int slots = 0;
950         int drop_err = 0;
951         int more_data;
952
953         if (!(first->flags & XEN_NETTXF_more_data))
954                 return 0;
955
956         do {
957                 struct xen_netif_tx_request dropped_tx = { 0 };
958
959                 if (slots >= work_to_do) {
960                         netdev_err(vif->dev,
961                                    "Asked for %d slots but exceeds this limit\n",
962                                    work_to_do);
963                         netbk_fatal_tx_err(vif);
964                         return -ENODATA;
965                 }
966
967                 /* This guest is really using too many slots and
968                  * considered malicious.
969                  */
970                 if (unlikely(slots >= fatal_skb_slots)) {
971                         netdev_err(vif->dev,
972                                    "Malicious frontend using %d slots, threshold %u\n",
973                                    slots, fatal_skb_slots);
974                         netbk_fatal_tx_err(vif);
975                         return -E2BIG;
976                 }
977
978                 /* Xen network protocol had implicit dependency on
979                  * MAX_SKB_FRAGS. XEN_NETBK_LEGACY_SLOTS_MAX is set to
980                  * the historical MAX_SKB_FRAGS value 18 to honor the
981                  * same behavior as before. Any packet using more than
982                  * 18 slots but less than fatal_skb_slots slots is
983                  * dropped
984                  */
985                 if (!drop_err && slots >= XEN_NETBK_LEGACY_SLOTS_MAX) {
986                         if (net_ratelimit())
987                                 netdev_dbg(vif->dev,
988                                            "Too many slots (%d) exceeding limit (%d), dropping packet\n",
989                                            slots, XEN_NETBK_LEGACY_SLOTS_MAX);
990                         drop_err = -E2BIG;
991                 }
992
993                 if (drop_err)
994                         txp = &dropped_tx;
995
996                 memcpy(txp, RING_GET_REQUEST(&vif->tx, cons + slots),
997                        sizeof(*txp));
998
999                 /* If the guest submitted a frame >= 64 KiB then
1000                  * first->size overflowed and following slots will
1001                  * appear to be larger than the frame.
1002                  *
1003                  * This cannot be fatal error as there are buggy
1004                  * frontends that do this.
1005                  *
1006                  * Consume all slots and drop the packet.
1007                  */
1008                 if (!drop_err && txp->size > first->size) {
1009                         if (net_ratelimit())
1010                                 netdev_dbg(vif->dev,
1011                                            "Invalid tx request, slot size %u > remaining size %u\n",
1012                                            txp->size, first->size);
1013                         drop_err = -EIO;
1014                 }
1015
1016                 first->size -= txp->size;
1017                 slots++;
1018
1019                 if (unlikely((txp->offset + txp->size) > PAGE_SIZE)) {
1020                         netdev_err(vif->dev, "Cross page boundary, txp->offset: %x, size: %u\n",
1021                                  txp->offset, txp->size);
1022                         netbk_fatal_tx_err(vif);
1023                         return -EINVAL;
1024                 }
1025
1026                 more_data = txp->flags & XEN_NETTXF_more_data;
1027
1028                 if (!drop_err)
1029                         txp++;
1030
1031         } while (more_data);
1032
1033         if (drop_err) {
1034                 netbk_tx_err(vif, first, cons + slots);
1035                 return drop_err;
1036         }
1037
1038         return slots;
1039 }
1040
1041 static struct page *xen_netbk_alloc_page(struct xen_netbk *netbk,
1042                                          u16 pending_idx)
1043 {
1044         struct page *page;
1045         page = alloc_page(GFP_KERNEL|__GFP_COLD);
1046         if (!page)
1047                 return NULL;
1048         set_page_ext(page, netbk, pending_idx);
1049         netbk->mmap_pages[pending_idx] = page;
1050         return page;
1051 }
1052
1053 static struct gnttab_copy *xen_netbk_get_requests(struct xen_netbk *netbk,
1054                                                   struct xenvif *vif,
1055                                                   struct sk_buff *skb,
1056                                                   struct xen_netif_tx_request *txp,
1057                                                   struct gnttab_copy *gop)
1058 {
1059         struct skb_shared_info *shinfo = skb_shinfo(skb);
1060         skb_frag_t *frags = shinfo->frags;
1061         u16 pending_idx = *((u16 *)skb->data);
1062         u16 head_idx = 0;
1063         int slot, start;
1064         struct page *page;
1065         pending_ring_idx_t index, start_idx = 0;
1066         uint16_t dst_offset;
1067         unsigned int nr_slots;
1068         struct pending_tx_info *first = NULL;
1069
1070         /* At this point shinfo->nr_frags is in fact the number of
1071          * slots, which can be as large as XEN_NETBK_LEGACY_SLOTS_MAX.
1072          */
1073         nr_slots = shinfo->nr_frags;
1074
1075         /* Skip first skb fragment if it is on same page as header fragment. */
1076         start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx);
1077
1078         /* Coalesce tx requests, at this point the packet passed in
1079          * should be <= 64K. Any packets larger than 64K have been
1080          * handled in netbk_count_requests().
1081          */
1082         for (shinfo->nr_frags = slot = start; slot < nr_slots;
1083              shinfo->nr_frags++) {
1084                 struct pending_tx_info *pending_tx_info =
1085                         netbk->pending_tx_info;
1086
1087                 page = alloc_page(GFP_KERNEL|__GFP_COLD);
1088                 if (!page)
1089                         goto err;
1090
1091                 dst_offset = 0;
1092                 first = NULL;
1093                 while (dst_offset < PAGE_SIZE && slot < nr_slots) {
1094                         gop->flags = GNTCOPY_source_gref;
1095
1096                         gop->source.u.ref = txp->gref;
1097                         gop->source.domid = vif->domid;
1098                         gop->source.offset = txp->offset;
1099
1100                         gop->dest.domid = DOMID_SELF;
1101
1102                         gop->dest.offset = dst_offset;
1103                         gop->dest.u.gmfn = virt_to_mfn(page_address(page));
1104
1105                         if (dst_offset + txp->size > PAGE_SIZE) {
1106                                 /* This page can only merge a portion
1107                                  * of tx request. Do not increment any
1108                                  * pointer / counter here. The txp
1109                                  * will be dealt with in future
1110                                  * rounds, eventually hitting the
1111                                  * `else` branch.
1112                                  */
1113                                 gop->len = PAGE_SIZE - dst_offset;
1114                                 txp->offset += gop->len;
1115                                 txp->size -= gop->len;
1116                                 dst_offset += gop->len; /* quit loop */
1117                         } else {
1118                                 /* This tx request can be merged in the page */
1119                                 gop->len = txp->size;
1120                                 dst_offset += gop->len;
1121
1122                                 index = pending_index(netbk->pending_cons++);
1123
1124                                 pending_idx = netbk->pending_ring[index];
1125
1126                                 memcpy(&pending_tx_info[pending_idx].req, txp,
1127                                        sizeof(*txp));
1128                                 xenvif_get(vif);
1129
1130                                 pending_tx_info[pending_idx].vif = vif;
1131
1132                                 /* Poison these fields, corresponding
1133                                  * fields for head tx req will be set
1134                                  * to correct values after the loop.
1135                                  */
1136                                 netbk->mmap_pages[pending_idx] = (void *)(~0UL);
1137                                 pending_tx_info[pending_idx].head =
1138                                         INVALID_PENDING_RING_IDX;
1139
1140                                 if (!first) {
1141                                         first = &pending_tx_info[pending_idx];
1142                                         start_idx = index;
1143                                         head_idx = pending_idx;
1144                                 }
1145
1146                                 txp++;
1147                                 slot++;
1148                         }
1149
1150                         gop++;
1151                 }
1152
1153                 first->req.offset = 0;
1154                 first->req.size = dst_offset;
1155                 first->head = start_idx;
1156                 set_page_ext(page, netbk, head_idx);
1157                 netbk->mmap_pages[head_idx] = page;
1158                 frag_set_pending_idx(&frags[shinfo->nr_frags], head_idx);
1159         }
1160
1161         BUG_ON(shinfo->nr_frags > MAX_SKB_FRAGS);
1162
1163         return gop;
1164 err:
1165         /* Unwind, freeing all pages and sending error responses. */
1166         while (shinfo->nr_frags-- > start) {
1167                 xen_netbk_idx_release(netbk,
1168                                 frag_get_pending_idx(&frags[shinfo->nr_frags]),
1169                                 XEN_NETIF_RSP_ERROR);
1170         }
1171         /* The head too, if necessary. */
1172         if (start)
1173                 xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR);
1174
1175         return NULL;
1176 }
1177
1178 static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
1179                                   struct sk_buff *skb,
1180                                   struct gnttab_copy **gopp)
1181 {
1182         struct gnttab_copy *gop = *gopp;
1183         u16 pending_idx = *((u16 *)skb->data);
1184         struct skb_shared_info *shinfo = skb_shinfo(skb);
1185         struct pending_tx_info *tx_info;
1186         int nr_frags = shinfo->nr_frags;
1187         int i, err, start;
1188         u16 peek; /* peek into next tx request */
1189
1190         /* Check status of header. */
1191         err = gop->status;
1192         if (unlikely(err))
1193                 xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR);
1194
1195         /* Skip first skb fragment if it is on same page as header fragment. */
1196         start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx);
1197
1198         for (i = start; i < nr_frags; i++) {
1199                 int j, newerr;
1200                 pending_ring_idx_t head;
1201
1202                 pending_idx = frag_get_pending_idx(&shinfo->frags[i]);
1203                 tx_info = &netbk->pending_tx_info[pending_idx];
1204                 head = tx_info->head;
1205
1206                 /* Check error status: if okay then remember grant handle. */
1207                 do {
1208                         newerr = (++gop)->status;
1209                         if (newerr)
1210                                 break;
1211                         peek = netbk->pending_ring[pending_index(++head)];
1212                 } while (!pending_tx_is_head(netbk, peek));
1213
1214                 if (likely(!newerr)) {
1215                         /* Had a previous error? Invalidate this fragment. */
1216                         if (unlikely(err))
1217                                 xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
1218                         continue;
1219                 }
1220
1221                 /* Error on this fragment: respond to client with an error. */
1222                 xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR);
1223
1224                 /* Not the first error? Preceding frags already invalidated. */
1225                 if (err)
1226                         continue;
1227
1228                 /* First error: invalidate header and preceding fragments. */
1229                 pending_idx = *((u16 *)skb->data);
1230                 xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
1231                 for (j = start; j < i; j++) {
1232                         pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
1233                         xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
1234                 }
1235
1236                 /* Remember the error: invalidate all subsequent fragments. */
1237                 err = newerr;
1238         }
1239
1240         *gopp = gop + 1;
1241         return err;
1242 }
1243
1244 static void xen_netbk_fill_frags(struct xen_netbk *netbk, struct sk_buff *skb)
1245 {
1246         struct skb_shared_info *shinfo = skb_shinfo(skb);
1247         int nr_frags = shinfo->nr_frags;
1248         int i;
1249
1250         for (i = 0; i < nr_frags; i++) {
1251                 skb_frag_t *frag = shinfo->frags + i;
1252                 struct xen_netif_tx_request *txp;
1253                 struct page *page;
1254                 u16 pending_idx;
1255
1256                 pending_idx = frag_get_pending_idx(frag);
1257
1258                 txp = &netbk->pending_tx_info[pending_idx].req;
1259                 page = virt_to_page(idx_to_kaddr(netbk, pending_idx));
1260                 __skb_fill_page_desc(skb, i, page, txp->offset, txp->size);
1261                 skb->len += txp->size;
1262                 skb->data_len += txp->size;
1263                 skb->truesize += txp->size;
1264
1265                 /* Take an extra reference to offset xen_netbk_idx_release */
1266                 get_page(netbk->mmap_pages[pending_idx]);
1267                 xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
1268         }
1269 }
1270
1271 static int xen_netbk_get_extras(struct xenvif *vif,
1272                                 struct xen_netif_extra_info *extras,
1273                                 int work_to_do)
1274 {
1275         struct xen_netif_extra_info extra;
1276         RING_IDX cons = vif->tx.req_cons;
1277
1278         do {
1279                 if (unlikely(work_to_do-- <= 0)) {
1280                         netdev_err(vif->dev, "Missing extra info\n");
1281                         netbk_fatal_tx_err(vif);
1282                         return -EBADR;
1283                 }
1284
1285                 memcpy(&extra, RING_GET_REQUEST(&vif->tx, cons),
1286                        sizeof(extra));
1287                 if (unlikely(!extra.type ||
1288                              extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
1289                         vif->tx.req_cons = ++cons;
1290                         netdev_err(vif->dev,
1291                                    "Invalid extra type: %d\n", extra.type);
1292                         netbk_fatal_tx_err(vif);
1293                         return -EINVAL;
1294                 }
1295
1296                 memcpy(&extras[extra.type - 1], &extra, sizeof(extra));
1297                 vif->tx.req_cons = ++cons;
1298         } while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE);
1299
1300         return work_to_do;
1301 }
1302
1303 static int netbk_set_skb_gso(struct xenvif *vif,
1304                              struct sk_buff *skb,
1305                              struct xen_netif_extra_info *gso)
1306 {
1307         if (!gso->u.gso.size) {
1308                 netdev_err(vif->dev, "GSO size must not be zero.\n");
1309                 netbk_fatal_tx_err(vif);
1310                 return -EINVAL;
1311         }
1312
1313         /* Currently only TCPv4 S.O. is supported. */
1314         if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) {
1315                 netdev_err(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type);
1316                 netbk_fatal_tx_err(vif);
1317                 return -EINVAL;
1318         }
1319
1320         skb_shinfo(skb)->gso_size = gso->u.gso.size;
1321         skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
1322
1323         /* Header must be checked, and gso_segs computed. */
1324         skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
1325         skb_shinfo(skb)->gso_segs = 0;
1326
1327         return 0;
1328 }
1329
1330 static int checksum_setup(struct xenvif *vif, struct sk_buff *skb)
1331 {
1332         struct iphdr *iph;
1333         int err = -EPROTO;
1334         int recalculate_partial_csum = 0;
1335
1336         /*
1337          * A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
1338          * peers can fail to set NETRXF_csum_blank when sending a GSO
1339          * frame. In this case force the SKB to CHECKSUM_PARTIAL and
1340          * recalculate the partial checksum.
1341          */
1342         if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) {
1343                 vif->rx_gso_checksum_fixup++;
1344                 skb->ip_summed = CHECKSUM_PARTIAL;
1345                 recalculate_partial_csum = 1;
1346         }
1347
1348         /* A non-CHECKSUM_PARTIAL SKB does not require setup. */
1349         if (skb->ip_summed != CHECKSUM_PARTIAL)
1350                 return 0;
1351
1352         if (skb->protocol != htons(ETH_P_IP))
1353                 goto out;
1354
1355         iph = (void *)skb->data;
1356         switch (iph->protocol) {
1357         case IPPROTO_TCP:
1358                 if (!skb_partial_csum_set(skb, 4 * iph->ihl,
1359                                           offsetof(struct tcphdr, check)))
1360                         goto out;
1361
1362                 if (recalculate_partial_csum) {
1363                         struct tcphdr *tcph = tcp_hdr(skb);
1364                         tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
1365                                                          skb->len - iph->ihl*4,
1366                                                          IPPROTO_TCP, 0);
1367                 }
1368                 break;
1369         case IPPROTO_UDP:
1370                 if (!skb_partial_csum_set(skb, 4 * iph->ihl,
1371                                           offsetof(struct udphdr, check)))
1372                         goto out;
1373
1374                 if (recalculate_partial_csum) {
1375                         struct udphdr *udph = udp_hdr(skb);
1376                         udph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
1377                                                          skb->len - iph->ihl*4,
1378                                                          IPPROTO_UDP, 0);
1379                 }
1380                 break;
1381         default:
1382                 if (net_ratelimit())
1383                         netdev_err(vif->dev,
1384                                    "Attempting to checksum a non-TCP/UDP packet, dropping a protocol %d packet\n",
1385                                    iph->protocol);
1386                 goto out;
1387         }
1388
1389         err = 0;
1390
1391 out:
1392         return err;
1393 }
1394
1395 static bool tx_credit_exceeded(struct xenvif *vif, unsigned size)
1396 {
1397         unsigned long now = jiffies;
1398         unsigned long next_credit =
1399                 vif->credit_timeout.expires +
1400                 msecs_to_jiffies(vif->credit_usec / 1000);
1401
1402         /* Timer could already be pending in rare cases. */
1403         if (timer_pending(&vif->credit_timeout))
1404                 return true;
1405
1406         /* Passed the point where we can replenish credit? */
1407         if (time_after_eq(now, next_credit)) {
1408                 vif->credit_timeout.expires = now;
1409                 tx_add_credit(vif);
1410         }
1411
1412         /* Still too big to send right now? Set a callback. */
1413         if (size > vif->remaining_credit) {
1414                 vif->credit_timeout.data     =
1415                         (unsigned long)vif;
1416                 vif->credit_timeout.function =
1417                         tx_credit_callback;
1418                 mod_timer(&vif->credit_timeout,
1419                           next_credit);
1420
1421                 return true;
1422         }
1423
1424         return false;
1425 }
1426
1427 static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
1428 {
1429         struct gnttab_copy *gop = netbk->tx_copy_ops, *request_gop;
1430         struct sk_buff *skb;
1431         int ret;
1432
1433         while ((nr_pending_reqs(netbk) + XEN_NETBK_LEGACY_SLOTS_MAX
1434                 < MAX_PENDING_REQS) &&
1435                 !list_empty(&netbk->net_schedule_list)) {
1436                 struct xenvif *vif;
1437                 struct xen_netif_tx_request txreq;
1438                 struct xen_netif_tx_request txfrags[XEN_NETBK_LEGACY_SLOTS_MAX];
1439                 struct page *page;
1440                 struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX-1];
1441                 u16 pending_idx;
1442                 RING_IDX idx;
1443                 int work_to_do;
1444                 unsigned int data_len;
1445                 pending_ring_idx_t index;
1446
1447                 /* Get a netif from the list with work to do. */
1448                 vif = poll_net_schedule_list(netbk);
1449                 /* This can sometimes happen because the test of
1450                  * list_empty(net_schedule_list) at the top of the
1451                  * loop is unlocked.  Just go back and have another
1452                  * look.
1453                  */
1454                 if (!vif)
1455                         continue;
1456
1457                 if (vif->tx.sring->req_prod - vif->tx.req_cons >
1458                     XEN_NETIF_TX_RING_SIZE) {
1459                         netdev_err(vif->dev,
1460                                    "Impossible number of requests. "
1461                                    "req_prod %d, req_cons %d, size %ld\n",
1462                                    vif->tx.sring->req_prod, vif->tx.req_cons,
1463                                    XEN_NETIF_TX_RING_SIZE);
1464                         netbk_fatal_tx_err(vif);
1465                         continue;
1466                 }
1467
1468                 RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, work_to_do);
1469                 if (!work_to_do) {
1470                         xenvif_put(vif);
1471                         continue;
1472                 }
1473
1474                 idx = vif->tx.req_cons;
1475                 rmb(); /* Ensure that we see the request before we copy it. */
1476                 memcpy(&txreq, RING_GET_REQUEST(&vif->tx, idx), sizeof(txreq));
1477
1478                 /* Credit-based scheduling. */
1479                 if (txreq.size > vif->remaining_credit &&
1480                     tx_credit_exceeded(vif, txreq.size)) {
1481                         xenvif_put(vif);
1482                         continue;
1483                 }
1484
1485                 vif->remaining_credit -= txreq.size;
1486
1487                 work_to_do--;
1488                 vif->tx.req_cons = ++idx;
1489
1490                 memset(extras, 0, sizeof(extras));
1491                 if (txreq.flags & XEN_NETTXF_extra_info) {
1492                         work_to_do = xen_netbk_get_extras(vif, extras,
1493                                                           work_to_do);
1494                         idx = vif->tx.req_cons;
1495                         if (unlikely(work_to_do < 0))
1496                                 continue;
1497                 }
1498
1499                 ret = netbk_count_requests(vif, &txreq, txfrags, work_to_do);
1500                 if (unlikely(ret < 0))
1501                         continue;
1502
1503                 idx += ret;
1504
1505                 if (unlikely(txreq.size < ETH_HLEN)) {
1506                         netdev_dbg(vif->dev,
1507                                    "Bad packet size: %d\n", txreq.size);
1508                         netbk_tx_err(vif, &txreq, idx);
1509                         continue;
1510                 }
1511
1512                 /* No crossing a page as the payload mustn't fragment. */
1513                 if (unlikely((txreq.offset + txreq.size) > PAGE_SIZE)) {
1514                         netdev_err(vif->dev,
1515                                    "txreq.offset: %x, size: %u, end: %lu\n",
1516                                    txreq.offset, txreq.size,
1517                                    (txreq.offset&~PAGE_MASK) + txreq.size);
1518                         netbk_fatal_tx_err(vif);
1519                         continue;
1520                 }
1521
1522                 index = pending_index(netbk->pending_cons);
1523                 pending_idx = netbk->pending_ring[index];
1524
1525                 data_len = (txreq.size > PKT_PROT_LEN &&
1526                             ret < XEN_NETBK_LEGACY_SLOTS_MAX) ?
1527                         PKT_PROT_LEN : txreq.size;
1528
1529                 skb = alloc_skb(data_len + NET_SKB_PAD + NET_IP_ALIGN,
1530                                 GFP_ATOMIC | __GFP_NOWARN);
1531                 if (unlikely(skb == NULL)) {
1532                         netdev_dbg(vif->dev,
1533                                    "Can't allocate a skb in start_xmit.\n");
1534                         netbk_tx_err(vif, &txreq, idx);
1535                         break;
1536                 }
1537
1538                 /* Packets passed to netif_rx() must have some headroom. */
1539                 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
1540
1541                 if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
1542                         struct xen_netif_extra_info *gso;
1543                         gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
1544
1545                         if (netbk_set_skb_gso(vif, skb, gso)) {
1546                                 /* Failure in netbk_set_skb_gso is fatal. */
1547                                 kfree_skb(skb);
1548                                 continue;
1549                         }
1550                 }
1551
1552                 /* XXX could copy straight to head */
1553                 page = xen_netbk_alloc_page(netbk, pending_idx);
1554                 if (!page) {
1555                         kfree_skb(skb);
1556                         netbk_tx_err(vif, &txreq, idx);
1557                         continue;
1558                 }
1559
1560                 gop->source.u.ref = txreq.gref;
1561                 gop->source.domid = vif->domid;
1562                 gop->source.offset = txreq.offset;
1563
1564                 gop->dest.u.gmfn = virt_to_mfn(page_address(page));
1565                 gop->dest.domid = DOMID_SELF;
1566                 gop->dest.offset = txreq.offset;
1567
1568                 gop->len = txreq.size;
1569                 gop->flags = GNTCOPY_source_gref;
1570
1571                 gop++;
1572
1573                 memcpy(&netbk->pending_tx_info[pending_idx].req,
1574                        &txreq, sizeof(txreq));
1575                 netbk->pending_tx_info[pending_idx].vif = vif;
1576                 netbk->pending_tx_info[pending_idx].head = index;
1577                 *((u16 *)skb->data) = pending_idx;
1578
1579                 __skb_put(skb, data_len);
1580
1581                 skb_shinfo(skb)->nr_frags = ret;
1582                 if (data_len < txreq.size) {
1583                         skb_shinfo(skb)->nr_frags++;
1584                         frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
1585                                              pending_idx);
1586                 } else {
1587                         frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
1588                                              INVALID_PENDING_IDX);
1589                 }
1590
1591                 netbk->pending_cons++;
1592
1593                 request_gop = xen_netbk_get_requests(netbk, vif,
1594                                                      skb, txfrags, gop);
1595                 if (request_gop == NULL) {
1596                         kfree_skb(skb);
1597                         netbk_tx_err(vif, &txreq, idx);
1598                         continue;
1599                 }
1600                 gop = request_gop;
1601
1602                 __skb_queue_tail(&netbk->tx_queue, skb);
1603
1604                 vif->tx.req_cons = idx;
1605                 xen_netbk_check_rx_xenvif(vif);
1606
1607                 if ((gop-netbk->tx_copy_ops) >= ARRAY_SIZE(netbk->tx_copy_ops))
1608                         break;
1609         }
1610
1611         return gop - netbk->tx_copy_ops;
1612 }
1613
1614 static void xen_netbk_tx_submit(struct xen_netbk *netbk)
1615 {
1616         struct gnttab_copy *gop = netbk->tx_copy_ops;
1617         struct sk_buff *skb;
1618
1619         while ((skb = __skb_dequeue(&netbk->tx_queue)) != NULL) {
1620                 struct xen_netif_tx_request *txp;
1621                 struct xenvif *vif;
1622                 u16 pending_idx;
1623                 unsigned data_len;
1624
1625                 pending_idx = *((u16 *)skb->data);
1626                 vif = netbk->pending_tx_info[pending_idx].vif;
1627                 txp = &netbk->pending_tx_info[pending_idx].req;
1628
1629                 /* Check the remap error code. */
1630                 if (unlikely(xen_netbk_tx_check_gop(netbk, skb, &gop))) {
1631                         netdev_dbg(vif->dev, "netback grant failed.\n");
1632                         skb_shinfo(skb)->nr_frags = 0;
1633                         kfree_skb(skb);
1634                         continue;
1635                 }
1636
1637                 data_len = skb->len;
1638                 memcpy(skb->data,
1639                        (void *)(idx_to_kaddr(netbk, pending_idx)|txp->offset),
1640                        data_len);
1641                 if (data_len < txp->size) {
1642                         /* Append the packet payload as a fragment. */
1643                         txp->offset += data_len;
1644                         txp->size -= data_len;
1645                 } else {
1646                         /* Schedule a response immediately. */
1647                         xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
1648                 }
1649
1650                 if (txp->flags & XEN_NETTXF_csum_blank)
1651                         skb->ip_summed = CHECKSUM_PARTIAL;
1652                 else if (txp->flags & XEN_NETTXF_data_validated)
1653                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1654
1655                 xen_netbk_fill_frags(netbk, skb);
1656
1657                 /*
1658                  * If the initial fragment was < PKT_PROT_LEN then
1659                  * pull through some bytes from the other fragments to
1660                  * increase the linear region to PKT_PROT_LEN bytes.
1661                  */
1662                 if (skb_headlen(skb) < PKT_PROT_LEN && skb_is_nonlinear(skb)) {
1663                         int target = min_t(int, skb->len, PKT_PROT_LEN);
1664                         __pskb_pull_tail(skb, target - skb_headlen(skb));
1665                 }
1666
1667                 skb->dev      = vif->dev;
1668                 skb->protocol = eth_type_trans(skb, skb->dev);
1669                 skb_reset_network_header(skb);
1670
1671                 if (checksum_setup(vif, skb)) {
1672                         netdev_dbg(vif->dev,
1673                                    "Can't setup checksum in net_tx_action\n");
1674                         kfree_skb(skb);
1675                         continue;
1676                 }
1677
1678                 skb_probe_transport_header(skb, 0);
1679
1680                 vif->dev->stats.rx_bytes += skb->len;
1681                 vif->dev->stats.rx_packets++;
1682
1683                 xenvif_receive_skb(vif, skb);
1684         }
1685 }
1686
1687 /* Called after netfront has transmitted */
1688 static void xen_netbk_tx_action(struct xen_netbk *netbk)
1689 {
1690         unsigned nr_gops;
1691
1692         nr_gops = xen_netbk_tx_build_gops(netbk);
1693
1694         if (nr_gops == 0)
1695                 return;
1696
1697         gnttab_batch_copy(netbk->tx_copy_ops, nr_gops);
1698
1699         xen_netbk_tx_submit(netbk);
1700 }
1701
1702 static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx,
1703                                   u8 status)
1704 {
1705         struct xenvif *vif;
1706         struct pending_tx_info *pending_tx_info;
1707         pending_ring_idx_t head;
1708         u16 peek; /* peek into next tx request */
1709
1710         BUG_ON(netbk->mmap_pages[pending_idx] == (void *)(~0UL));
1711
1712         /* Already complete? */
1713         if (netbk->mmap_pages[pending_idx] == NULL)
1714                 return;
1715
1716         pending_tx_info = &netbk->pending_tx_info[pending_idx];
1717
1718         vif = pending_tx_info->vif;
1719         head = pending_tx_info->head;
1720
1721         BUG_ON(!pending_tx_is_head(netbk, head));
1722         BUG_ON(netbk->pending_ring[pending_index(head)] != pending_idx);
1723
1724         do {
1725                 pending_ring_idx_t index;
1726                 pending_ring_idx_t idx = pending_index(head);
1727                 u16 info_idx = netbk->pending_ring[idx];
1728
1729                 pending_tx_info = &netbk->pending_tx_info[info_idx];
1730                 make_tx_response(vif, &pending_tx_info->req, status);
1731
1732                 /* Setting any number other than
1733                  * INVALID_PENDING_RING_IDX indicates this slot is
1734                  * starting a new packet / ending a previous packet.
1735                  */
1736                 pending_tx_info->head = 0;
1737
1738                 index = pending_index(netbk->pending_prod++);
1739                 netbk->pending_ring[index] = netbk->pending_ring[info_idx];
1740
1741                 xenvif_put(vif);
1742
1743                 peek = netbk->pending_ring[pending_index(++head)];
1744
1745         } while (!pending_tx_is_head(netbk, peek));
1746
1747         netbk->mmap_pages[pending_idx]->mapping = 0;
1748         put_page(netbk->mmap_pages[pending_idx]);
1749         netbk->mmap_pages[pending_idx] = NULL;
1750 }
1751
1752
1753 static void make_tx_response(struct xenvif *vif,
1754                              struct xen_netif_tx_request *txp,
1755                              s8       st)
1756 {
1757         RING_IDX i = vif->tx.rsp_prod_pvt;
1758         struct xen_netif_tx_response *resp;
1759         int notify;
1760
1761         resp = RING_GET_RESPONSE(&vif->tx, i);
1762         resp->id     = txp->id;
1763         resp->status = st;
1764
1765         if (txp->flags & XEN_NETTXF_extra_info)
1766                 RING_GET_RESPONSE(&vif->tx, ++i)->status = XEN_NETIF_RSP_NULL;
1767
1768         vif->tx.rsp_prod_pvt = ++i;
1769         RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->tx, notify);
1770         if (notify)
1771                 notify_remote_via_irq(vif->tx_irq);
1772 }
1773
1774 static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif,
1775                                              u16      id,
1776                                              s8       st,
1777                                              u16      offset,
1778                                              u16      size,
1779                                              u16      flags)
1780 {
1781         RING_IDX i = vif->rx.rsp_prod_pvt;
1782         struct xen_netif_rx_response *resp;
1783
1784         resp = RING_GET_RESPONSE(&vif->rx, i);
1785         resp->offset     = offset;
1786         resp->flags      = flags;
1787         resp->id         = id;
1788         resp->status     = (s16)size;
1789         if (st < 0)
1790                 resp->status = (s16)st;
1791
1792         vif->rx.rsp_prod_pvt = ++i;
1793
1794         return resp;
1795 }
1796
1797 static inline int rx_work_todo(struct xen_netbk *netbk)
1798 {
1799         return !skb_queue_empty(&netbk->rx_queue);
1800 }
1801
1802 static inline int tx_work_todo(struct xen_netbk *netbk)
1803 {
1804
1805         if ((nr_pending_reqs(netbk) + XEN_NETBK_LEGACY_SLOTS_MAX
1806              < MAX_PENDING_REQS) &&
1807              !list_empty(&netbk->net_schedule_list))
1808                 return 1;
1809
1810         return 0;
1811 }
1812
1813 static int xen_netbk_kthread(void *data)
1814 {
1815         struct xen_netbk *netbk = data;
1816         while (!kthread_should_stop()) {
1817                 wait_event_interruptible(netbk->wq,
1818                                 rx_work_todo(netbk) ||
1819                                 tx_work_todo(netbk) ||
1820                                 kthread_should_stop());
1821                 cond_resched();
1822
1823                 if (kthread_should_stop())
1824                         break;
1825
1826                 if (rx_work_todo(netbk))
1827                         xen_netbk_rx_action(netbk);
1828
1829                 if (tx_work_todo(netbk))
1830                         xen_netbk_tx_action(netbk);
1831         }
1832
1833         return 0;
1834 }
1835
1836 void xen_netbk_unmap_frontend_rings(struct xenvif *vif)
1837 {
1838         if (vif->tx.sring)
1839                 xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif),
1840                                         vif->tx.sring);
1841         if (vif->rx.sring)
1842                 xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif),
1843                                         vif->rx.sring);
1844 }
1845
1846 int xen_netbk_map_frontend_rings(struct xenvif *vif,
1847                                  grant_ref_t tx_ring_ref,
1848                                  grant_ref_t rx_ring_ref)
1849 {
1850         void *addr;
1851         struct xen_netif_tx_sring *txs;
1852         struct xen_netif_rx_sring *rxs;
1853
1854         int err = -ENOMEM;
1855
1856         err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(vif),
1857                                      tx_ring_ref, &addr);
1858         if (err)
1859                 goto err;
1860
1861         txs = (struct xen_netif_tx_sring *)addr;
1862         BACK_RING_INIT(&vif->tx, txs, PAGE_SIZE);
1863
1864         err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(vif),
1865                                      rx_ring_ref, &addr);
1866         if (err)
1867                 goto err;
1868
1869         rxs = (struct xen_netif_rx_sring *)addr;
1870         BACK_RING_INIT(&vif->rx, rxs, PAGE_SIZE);
1871
1872         vif->rx_req_cons_peek = 0;
1873
1874         return 0;
1875
1876 err:
1877         xen_netbk_unmap_frontend_rings(vif);
1878         return err;
1879 }
1880
1881 static int __init netback_init(void)
1882 {
1883         int i;
1884         int rc = 0;
1885         int group;
1886
1887         if (!xen_domain())
1888                 return -ENODEV;
1889
1890         if (fatal_skb_slots < XEN_NETBK_LEGACY_SLOTS_MAX) {
1891                 printk(KERN_INFO
1892                        "xen-netback: fatal_skb_slots too small (%d), bump it to XEN_NETBK_LEGACY_SLOTS_MAX (%d)\n",
1893                        fatal_skb_slots, XEN_NETBK_LEGACY_SLOTS_MAX);
1894                 fatal_skb_slots = XEN_NETBK_LEGACY_SLOTS_MAX;
1895         }
1896
1897         xen_netbk_group_nr = num_online_cpus();
1898         xen_netbk = vzalloc(sizeof(struct xen_netbk) * xen_netbk_group_nr);
1899         if (!xen_netbk)
1900                 return -ENOMEM;
1901
1902         for (group = 0; group < xen_netbk_group_nr; group++) {
1903                 struct xen_netbk *netbk = &xen_netbk[group];
1904                 skb_queue_head_init(&netbk->rx_queue);
1905                 skb_queue_head_init(&netbk->tx_queue);
1906
1907                 init_timer(&netbk->net_timer);
1908                 netbk->net_timer.data = (unsigned long)netbk;
1909                 netbk->net_timer.function = xen_netbk_alarm;
1910
1911                 netbk->pending_cons = 0;
1912                 netbk->pending_prod = MAX_PENDING_REQS;
1913                 for (i = 0; i < MAX_PENDING_REQS; i++)
1914                         netbk->pending_ring[i] = i;
1915
1916                 init_waitqueue_head(&netbk->wq);
1917                 netbk->task = kthread_create(xen_netbk_kthread,
1918                                              (void *)netbk,
1919                                              "netback/%u", group);
1920
1921                 if (IS_ERR(netbk->task)) {
1922                         printk(KERN_ALERT "kthread_create() fails at netback\n");
1923                         del_timer(&netbk->net_timer);
1924                         rc = PTR_ERR(netbk->task);
1925                         goto failed_init;
1926                 }
1927
1928                 kthread_bind(netbk->task, group);
1929
1930                 INIT_LIST_HEAD(&netbk->net_schedule_list);
1931
1932                 spin_lock_init(&netbk->net_schedule_list_lock);
1933
1934                 atomic_set(&netbk->netfront_count, 0);
1935
1936                 wake_up_process(netbk->task);
1937         }
1938
1939         rc = xenvif_xenbus_init();
1940         if (rc)
1941                 goto failed_init;
1942
1943         return 0;
1944
1945 failed_init:
1946         while (--group >= 0) {
1947                 struct xen_netbk *netbk = &xen_netbk[group];
1948                 del_timer(&netbk->net_timer);
1949                 kthread_stop(netbk->task);
1950         }
1951         vfree(xen_netbk);
1952         return rc;
1953
1954 }
1955
1956 module_init(netback_init);
1957
1958 static void __exit netback_fini(void)
1959 {
1960         int i, j;
1961
1962         xenvif_xenbus_fini();
1963
1964         for (i = 0; i < xen_netbk_group_nr; i++) {
1965                 struct xen_netbk *netbk = &xen_netbk[i];
1966                 del_timer_sync(&netbk->net_timer);
1967                 kthread_stop(netbk->task);
1968                 for (j = 0; j < MAX_PENDING_REQS; j++) {
1969                         if (netbk->mmap_pages[i])
1970                                 __free_page(netbk->mmap_pages[i]);
1971                 }
1972         }
1973
1974         vfree(xen_netbk);
1975 }
1976 module_exit(netback_fini);
1977
1978 MODULE_LICENSE("Dual BSD/GPL");
1979 MODULE_ALIAS("xen-backend:vif");