Merge git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/usb-2.6
[pandora-kernel.git] / arch / x86 / kvm / mmu.c
1 /*
2  * Kernel-based Virtual Machine driver for Linux
3  *
4  * This module enables machines with Intel VT-x extensions to run virtual
5  * machines without emulation or binary translation.
6  *
7  * MMU support
8  *
9  * Copyright (C) 2006 Qumranet, Inc.
10  *
11  * Authors:
12  *   Yaniv Kamay  <yaniv@qumranet.com>
13  *   Avi Kivity   <avi@qumranet.com>
14  *
15  * This work is licensed under the terms of the GNU GPL, version 2.  See
16  * the COPYING file in the top-level directory.
17  *
18  */
19
20 #include "vmx.h"
21 #include "mmu.h"
22
23 #include <linux/kvm_host.h>
24 #include <linux/types.h>
25 #include <linux/string.h>
26 #include <linux/mm.h>
27 #include <linux/highmem.h>
28 #include <linux/module.h>
29 #include <linux/swap.h>
30 #include <linux/hugetlb.h>
31 #include <linux/compiler.h>
32
33 #include <asm/page.h>
34 #include <asm/cmpxchg.h>
35 #include <asm/io.h>
36
37 /*
38  * When setting this variable to true it enables Two-Dimensional-Paging
39  * where the hardware walks 2 page tables:
40  * 1. the guest-virtual to guest-physical
41  * 2. while doing 1. it walks guest-physical to host-physical
42  * If the hardware supports that we don't need to do shadow paging.
43  */
44 bool tdp_enabled = false;
45
46 #undef MMU_DEBUG
47
48 #undef AUDIT
49
50 #ifdef AUDIT
51 static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg);
52 #else
53 static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg) {}
54 #endif
55
56 #ifdef MMU_DEBUG
57
58 #define pgprintk(x...) do { if (dbg) printk(x); } while (0)
59 #define rmap_printk(x...) do { if (dbg) printk(x); } while (0)
60
61 #else
62
63 #define pgprintk(x...) do { } while (0)
64 #define rmap_printk(x...) do { } while (0)
65
66 #endif
67
68 #if defined(MMU_DEBUG) || defined(AUDIT)
69 static int dbg = 1;
70 #endif
71
72 #ifndef MMU_DEBUG
73 #define ASSERT(x) do { } while (0)
74 #else
75 #define ASSERT(x)                                                       \
76         if (!(x)) {                                                     \
77                 printk(KERN_WARNING "assertion failed %s:%d: %s\n",     \
78                        __FILE__, __LINE__, #x);                         \
79         }
80 #endif
81
82 #define PT_FIRST_AVAIL_BITS_SHIFT 9
83 #define PT64_SECOND_AVAIL_BITS_SHIFT 52
84
85 #define VALID_PAGE(x) ((x) != INVALID_PAGE)
86
87 #define PT64_LEVEL_BITS 9
88
89 #define PT64_LEVEL_SHIFT(level) \
90                 (PAGE_SHIFT + (level - 1) * PT64_LEVEL_BITS)
91
92 #define PT64_LEVEL_MASK(level) \
93                 (((1ULL << PT64_LEVEL_BITS) - 1) << PT64_LEVEL_SHIFT(level))
94
95 #define PT64_INDEX(address, level)\
96         (((address) >> PT64_LEVEL_SHIFT(level)) & ((1 << PT64_LEVEL_BITS) - 1))
97
98
99 #define PT32_LEVEL_BITS 10
100
101 #define PT32_LEVEL_SHIFT(level) \
102                 (PAGE_SHIFT + (level - 1) * PT32_LEVEL_BITS)
103
104 #define PT32_LEVEL_MASK(level) \
105                 (((1ULL << PT32_LEVEL_BITS) - 1) << PT32_LEVEL_SHIFT(level))
106
107 #define PT32_INDEX(address, level)\
108         (((address) >> PT32_LEVEL_SHIFT(level)) & ((1 << PT32_LEVEL_BITS) - 1))
109
110
111 #define PT64_BASE_ADDR_MASK (((1ULL << 52) - 1) & ~(u64)(PAGE_SIZE-1))
112 #define PT64_DIR_BASE_ADDR_MASK \
113         (PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + PT64_LEVEL_BITS)) - 1))
114
115 #define PT32_BASE_ADDR_MASK PAGE_MASK
116 #define PT32_DIR_BASE_ADDR_MASK \
117         (PAGE_MASK & ~((1ULL << (PAGE_SHIFT + PT32_LEVEL_BITS)) - 1))
118
119 #define PT64_PERM_MASK (PT_PRESENT_MASK | PT_WRITABLE_MASK | PT_USER_MASK \
120                         | PT64_NX_MASK)
121
122 #define PFERR_PRESENT_MASK (1U << 0)
123 #define PFERR_WRITE_MASK (1U << 1)
124 #define PFERR_USER_MASK (1U << 2)
125 #define PFERR_FETCH_MASK (1U << 4)
126
127 #define PT_DIRECTORY_LEVEL 2
128 #define PT_PAGE_TABLE_LEVEL 1
129
130 #define RMAP_EXT 4
131
132 #define ACC_EXEC_MASK    1
133 #define ACC_WRITE_MASK   PT_WRITABLE_MASK
134 #define ACC_USER_MASK    PT_USER_MASK
135 #define ACC_ALL          (ACC_EXEC_MASK | ACC_WRITE_MASK | ACC_USER_MASK)
136
137 struct kvm_pv_mmu_op_buffer {
138         void *ptr;
139         unsigned len;
140         unsigned processed;
141         char buf[512] __aligned(sizeof(long));
142 };
143
144 struct kvm_rmap_desc {
145         u64 *shadow_ptes[RMAP_EXT];
146         struct kvm_rmap_desc *more;
147 };
148
149 static struct kmem_cache *pte_chain_cache;
150 static struct kmem_cache *rmap_desc_cache;
151 static struct kmem_cache *mmu_page_header_cache;
152
153 static u64 __read_mostly shadow_trap_nonpresent_pte;
154 static u64 __read_mostly shadow_notrap_nonpresent_pte;
155 static u64 __read_mostly shadow_base_present_pte;
156 static u64 __read_mostly shadow_nx_mask;
157 static u64 __read_mostly shadow_x_mask; /* mutual exclusive with nx_mask */
158 static u64 __read_mostly shadow_user_mask;
159 static u64 __read_mostly shadow_accessed_mask;
160 static u64 __read_mostly shadow_dirty_mask;
161
162 void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte)
163 {
164         shadow_trap_nonpresent_pte = trap_pte;
165         shadow_notrap_nonpresent_pte = notrap_pte;
166 }
167 EXPORT_SYMBOL_GPL(kvm_mmu_set_nonpresent_ptes);
168
169 void kvm_mmu_set_base_ptes(u64 base_pte)
170 {
171         shadow_base_present_pte = base_pte;
172 }
173 EXPORT_SYMBOL_GPL(kvm_mmu_set_base_ptes);
174
175 void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
176                 u64 dirty_mask, u64 nx_mask, u64 x_mask)
177 {
178         shadow_user_mask = user_mask;
179         shadow_accessed_mask = accessed_mask;
180         shadow_dirty_mask = dirty_mask;
181         shadow_nx_mask = nx_mask;
182         shadow_x_mask = x_mask;
183 }
184 EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes);
185
186 static int is_write_protection(struct kvm_vcpu *vcpu)
187 {
188         return vcpu->arch.cr0 & X86_CR0_WP;
189 }
190
191 static int is_cpuid_PSE36(void)
192 {
193         return 1;
194 }
195
196 static int is_nx(struct kvm_vcpu *vcpu)
197 {
198         return vcpu->arch.shadow_efer & EFER_NX;
199 }
200
201 static int is_present_pte(unsigned long pte)
202 {
203         return pte & PT_PRESENT_MASK;
204 }
205
206 static int is_shadow_present_pte(u64 pte)
207 {
208         return pte != shadow_trap_nonpresent_pte
209                 && pte != shadow_notrap_nonpresent_pte;
210 }
211
212 static int is_large_pte(u64 pte)
213 {
214         return pte & PT_PAGE_SIZE_MASK;
215 }
216
217 static int is_writeble_pte(unsigned long pte)
218 {
219         return pte & PT_WRITABLE_MASK;
220 }
221
222 static int is_dirty_pte(unsigned long pte)
223 {
224         return pte & shadow_dirty_mask;
225 }
226
227 static int is_rmap_pte(u64 pte)
228 {
229         return is_shadow_present_pte(pte);
230 }
231
232 static pfn_t spte_to_pfn(u64 pte)
233 {
234         return (pte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
235 }
236
237 static gfn_t pse36_gfn_delta(u32 gpte)
238 {
239         int shift = 32 - PT32_DIR_PSE36_SHIFT - PAGE_SHIFT;
240
241         return (gpte & PT32_DIR_PSE36_MASK) << shift;
242 }
243
244 static void set_shadow_pte(u64 *sptep, u64 spte)
245 {
246 #ifdef CONFIG_X86_64
247         set_64bit((unsigned long *)sptep, spte);
248 #else
249         set_64bit((unsigned long long *)sptep, spte);
250 #endif
251 }
252
253 static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
254                                   struct kmem_cache *base_cache, int min)
255 {
256         void *obj;
257
258         if (cache->nobjs >= min)
259                 return 0;
260         while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
261                 obj = kmem_cache_zalloc(base_cache, GFP_KERNEL);
262                 if (!obj)
263                         return -ENOMEM;
264                 cache->objects[cache->nobjs++] = obj;
265         }
266         return 0;
267 }
268
269 static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
270 {
271         while (mc->nobjs)
272                 kfree(mc->objects[--mc->nobjs]);
273 }
274
275 static int mmu_topup_memory_cache_page(struct kvm_mmu_memory_cache *cache,
276                                        int min)
277 {
278         struct page *page;
279
280         if (cache->nobjs >= min)
281                 return 0;
282         while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
283                 page = alloc_page(GFP_KERNEL);
284                 if (!page)
285                         return -ENOMEM;
286                 set_page_private(page, 0);
287                 cache->objects[cache->nobjs++] = page_address(page);
288         }
289         return 0;
290 }
291
292 static void mmu_free_memory_cache_page(struct kvm_mmu_memory_cache *mc)
293 {
294         while (mc->nobjs)
295                 free_page((unsigned long)mc->objects[--mc->nobjs]);
296 }
297
298 static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu)
299 {
300         int r;
301
302         r = mmu_topup_memory_cache(&vcpu->arch.mmu_pte_chain_cache,
303                                    pte_chain_cache, 4);
304         if (r)
305                 goto out;
306         r = mmu_topup_memory_cache(&vcpu->arch.mmu_rmap_desc_cache,
307                                    rmap_desc_cache, 1);
308         if (r)
309                 goto out;
310         r = mmu_topup_memory_cache_page(&vcpu->arch.mmu_page_cache, 8);
311         if (r)
312                 goto out;
313         r = mmu_topup_memory_cache(&vcpu->arch.mmu_page_header_cache,
314                                    mmu_page_header_cache, 4);
315 out:
316         return r;
317 }
318
319 static void mmu_free_memory_caches(struct kvm_vcpu *vcpu)
320 {
321         mmu_free_memory_cache(&vcpu->arch.mmu_pte_chain_cache);
322         mmu_free_memory_cache(&vcpu->arch.mmu_rmap_desc_cache);
323         mmu_free_memory_cache_page(&vcpu->arch.mmu_page_cache);
324         mmu_free_memory_cache(&vcpu->arch.mmu_page_header_cache);
325 }
326
327 static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc,
328                                     size_t size)
329 {
330         void *p;
331
332         BUG_ON(!mc->nobjs);
333         p = mc->objects[--mc->nobjs];
334         memset(p, 0, size);
335         return p;
336 }
337
338 static struct kvm_pte_chain *mmu_alloc_pte_chain(struct kvm_vcpu *vcpu)
339 {
340         return mmu_memory_cache_alloc(&vcpu->arch.mmu_pte_chain_cache,
341                                       sizeof(struct kvm_pte_chain));
342 }
343
344 static void mmu_free_pte_chain(struct kvm_pte_chain *pc)
345 {
346         kfree(pc);
347 }
348
349 static struct kvm_rmap_desc *mmu_alloc_rmap_desc(struct kvm_vcpu *vcpu)
350 {
351         return mmu_memory_cache_alloc(&vcpu->arch.mmu_rmap_desc_cache,
352                                       sizeof(struct kvm_rmap_desc));
353 }
354
355 static void mmu_free_rmap_desc(struct kvm_rmap_desc *rd)
356 {
357         kfree(rd);
358 }
359
360 /*
361  * Return the pointer to the largepage write count for a given
362  * gfn, handling slots that are not large page aligned.
363  */
364 static int *slot_largepage_idx(gfn_t gfn, struct kvm_memory_slot *slot)
365 {
366         unsigned long idx;
367
368         idx = (gfn / KVM_PAGES_PER_HPAGE) -
369               (slot->base_gfn / KVM_PAGES_PER_HPAGE);
370         return &slot->lpage_info[idx].write_count;
371 }
372
373 static void account_shadowed(struct kvm *kvm, gfn_t gfn)
374 {
375         int *write_count;
376
377         write_count = slot_largepage_idx(gfn, gfn_to_memslot(kvm, gfn));
378         *write_count += 1;
379 }
380
381 static void unaccount_shadowed(struct kvm *kvm, gfn_t gfn)
382 {
383         int *write_count;
384
385         write_count = slot_largepage_idx(gfn, gfn_to_memslot(kvm, gfn));
386         *write_count -= 1;
387         WARN_ON(*write_count < 0);
388 }
389
390 static int has_wrprotected_page(struct kvm *kvm, gfn_t gfn)
391 {
392         struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
393         int *largepage_idx;
394
395         if (slot) {
396                 largepage_idx = slot_largepage_idx(gfn, slot);
397                 return *largepage_idx;
398         }
399
400         return 1;
401 }
402
403 static int host_largepage_backed(struct kvm *kvm, gfn_t gfn)
404 {
405         struct vm_area_struct *vma;
406         unsigned long addr;
407
408         addr = gfn_to_hva(kvm, gfn);
409         if (kvm_is_error_hva(addr))
410                 return 0;
411
412         vma = find_vma(current->mm, addr);
413         if (vma && is_vm_hugetlb_page(vma))
414                 return 1;
415
416         return 0;
417 }
418
419 static int is_largepage_backed(struct kvm_vcpu *vcpu, gfn_t large_gfn)
420 {
421         struct kvm_memory_slot *slot;
422
423         if (has_wrprotected_page(vcpu->kvm, large_gfn))
424                 return 0;
425
426         if (!host_largepage_backed(vcpu->kvm, large_gfn))
427                 return 0;
428
429         slot = gfn_to_memslot(vcpu->kvm, large_gfn);
430         if (slot && slot->dirty_bitmap)
431                 return 0;
432
433         return 1;
434 }
435
436 /*
437  * Take gfn and return the reverse mapping to it.
438  * Note: gfn must be unaliased before this function get called
439  */
440
441 static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn, int lpage)
442 {
443         struct kvm_memory_slot *slot;
444         unsigned long idx;
445
446         slot = gfn_to_memslot(kvm, gfn);
447         if (!lpage)
448                 return &slot->rmap[gfn - slot->base_gfn];
449
450         idx = (gfn / KVM_PAGES_PER_HPAGE) -
451               (slot->base_gfn / KVM_PAGES_PER_HPAGE);
452
453         return &slot->lpage_info[idx].rmap_pde;
454 }
455
456 /*
457  * Reverse mapping data structures:
458  *
459  * If rmapp bit zero is zero, then rmapp point to the shadw page table entry
460  * that points to page_address(page).
461  *
462  * If rmapp bit zero is one, (then rmap & ~1) points to a struct kvm_rmap_desc
463  * containing more mappings.
464  */
465 static void rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn, int lpage)
466 {
467         struct kvm_mmu_page *sp;
468         struct kvm_rmap_desc *desc;
469         unsigned long *rmapp;
470         int i;
471
472         if (!is_rmap_pte(*spte))
473                 return;
474         gfn = unalias_gfn(vcpu->kvm, gfn);
475         sp = page_header(__pa(spte));
476         sp->gfns[spte - sp->spt] = gfn;
477         rmapp = gfn_to_rmap(vcpu->kvm, gfn, lpage);
478         if (!*rmapp) {
479                 rmap_printk("rmap_add: %p %llx 0->1\n", spte, *spte);
480                 *rmapp = (unsigned long)spte;
481         } else if (!(*rmapp & 1)) {
482                 rmap_printk("rmap_add: %p %llx 1->many\n", spte, *spte);
483                 desc = mmu_alloc_rmap_desc(vcpu);
484                 desc->shadow_ptes[0] = (u64 *)*rmapp;
485                 desc->shadow_ptes[1] = spte;
486                 *rmapp = (unsigned long)desc | 1;
487         } else {
488                 rmap_printk("rmap_add: %p %llx many->many\n", spte, *spte);
489                 desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
490                 while (desc->shadow_ptes[RMAP_EXT-1] && desc->more)
491                         desc = desc->more;
492                 if (desc->shadow_ptes[RMAP_EXT-1]) {
493                         desc->more = mmu_alloc_rmap_desc(vcpu);
494                         desc = desc->more;
495                 }
496                 for (i = 0; desc->shadow_ptes[i]; ++i)
497                         ;
498                 desc->shadow_ptes[i] = spte;
499         }
500 }
501
502 static void rmap_desc_remove_entry(unsigned long *rmapp,
503                                    struct kvm_rmap_desc *desc,
504                                    int i,
505                                    struct kvm_rmap_desc *prev_desc)
506 {
507         int j;
508
509         for (j = RMAP_EXT - 1; !desc->shadow_ptes[j] && j > i; --j)
510                 ;
511         desc->shadow_ptes[i] = desc->shadow_ptes[j];
512         desc->shadow_ptes[j] = NULL;
513         if (j != 0)
514                 return;
515         if (!prev_desc && !desc->more)
516                 *rmapp = (unsigned long)desc->shadow_ptes[0];
517         else
518                 if (prev_desc)
519                         prev_desc->more = desc->more;
520                 else
521                         *rmapp = (unsigned long)desc->more | 1;
522         mmu_free_rmap_desc(desc);
523 }
524
525 static void rmap_remove(struct kvm *kvm, u64 *spte)
526 {
527         struct kvm_rmap_desc *desc;
528         struct kvm_rmap_desc *prev_desc;
529         struct kvm_mmu_page *sp;
530         pfn_t pfn;
531         unsigned long *rmapp;
532         int i;
533
534         if (!is_rmap_pte(*spte))
535                 return;
536         sp = page_header(__pa(spte));
537         pfn = spte_to_pfn(*spte);
538         if (*spte & shadow_accessed_mask)
539                 kvm_set_pfn_accessed(pfn);
540         if (is_writeble_pte(*spte))
541                 kvm_release_pfn_dirty(pfn);
542         else
543                 kvm_release_pfn_clean(pfn);
544         rmapp = gfn_to_rmap(kvm, sp->gfns[spte - sp->spt], is_large_pte(*spte));
545         if (!*rmapp) {
546                 printk(KERN_ERR "rmap_remove: %p %llx 0->BUG\n", spte, *spte);
547                 BUG();
548         } else if (!(*rmapp & 1)) {
549                 rmap_printk("rmap_remove:  %p %llx 1->0\n", spte, *spte);
550                 if ((u64 *)*rmapp != spte) {
551                         printk(KERN_ERR "rmap_remove:  %p %llx 1->BUG\n",
552                                spte, *spte);
553                         BUG();
554                 }
555                 *rmapp = 0;
556         } else {
557                 rmap_printk("rmap_remove:  %p %llx many->many\n", spte, *spte);
558                 desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
559                 prev_desc = NULL;
560                 while (desc) {
561                         for (i = 0; i < RMAP_EXT && desc->shadow_ptes[i]; ++i)
562                                 if (desc->shadow_ptes[i] == spte) {
563                                         rmap_desc_remove_entry(rmapp,
564                                                                desc, i,
565                                                                prev_desc);
566                                         return;
567                                 }
568                         prev_desc = desc;
569                         desc = desc->more;
570                 }
571                 BUG();
572         }
573 }
574
575 static u64 *rmap_next(struct kvm *kvm, unsigned long *rmapp, u64 *spte)
576 {
577         struct kvm_rmap_desc *desc;
578         struct kvm_rmap_desc *prev_desc;
579         u64 *prev_spte;
580         int i;
581
582         if (!*rmapp)
583                 return NULL;
584         else if (!(*rmapp & 1)) {
585                 if (!spte)
586                         return (u64 *)*rmapp;
587                 return NULL;
588         }
589         desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
590         prev_desc = NULL;
591         prev_spte = NULL;
592         while (desc) {
593                 for (i = 0; i < RMAP_EXT && desc->shadow_ptes[i]; ++i) {
594                         if (prev_spte == spte)
595                                 return desc->shadow_ptes[i];
596                         prev_spte = desc->shadow_ptes[i];
597                 }
598                 desc = desc->more;
599         }
600         return NULL;
601 }
602
603 static void rmap_write_protect(struct kvm *kvm, u64 gfn)
604 {
605         unsigned long *rmapp;
606         u64 *spte;
607         int write_protected = 0;
608
609         gfn = unalias_gfn(kvm, gfn);
610         rmapp = gfn_to_rmap(kvm, gfn, 0);
611
612         spte = rmap_next(kvm, rmapp, NULL);
613         while (spte) {
614                 BUG_ON(!spte);
615                 BUG_ON(!(*spte & PT_PRESENT_MASK));
616                 rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte);
617                 if (is_writeble_pte(*spte)) {
618                         set_shadow_pte(spte, *spte & ~PT_WRITABLE_MASK);
619                         write_protected = 1;
620                 }
621                 spte = rmap_next(kvm, rmapp, spte);
622         }
623         if (write_protected) {
624                 pfn_t pfn;
625
626                 spte = rmap_next(kvm, rmapp, NULL);
627                 pfn = spte_to_pfn(*spte);
628                 kvm_set_pfn_dirty(pfn);
629         }
630
631         /* check for huge page mappings */
632         rmapp = gfn_to_rmap(kvm, gfn, 1);
633         spte = rmap_next(kvm, rmapp, NULL);
634         while (spte) {
635                 BUG_ON(!spte);
636                 BUG_ON(!(*spte & PT_PRESENT_MASK));
637                 BUG_ON((*spte & (PT_PAGE_SIZE_MASK|PT_PRESENT_MASK)) != (PT_PAGE_SIZE_MASK|PT_PRESENT_MASK));
638                 pgprintk("rmap_write_protect(large): spte %p %llx %lld\n", spte, *spte, gfn);
639                 if (is_writeble_pte(*spte)) {
640                         rmap_remove(kvm, spte);
641                         --kvm->stat.lpages;
642                         set_shadow_pte(spte, shadow_trap_nonpresent_pte);
643                         write_protected = 1;
644                 }
645                 spte = rmap_next(kvm, rmapp, spte);
646         }
647
648         if (write_protected)
649                 kvm_flush_remote_tlbs(kvm);
650
651         account_shadowed(kvm, gfn);
652 }
653
654 #ifdef MMU_DEBUG
655 static int is_empty_shadow_page(u64 *spt)
656 {
657         u64 *pos;
658         u64 *end;
659
660         for (pos = spt, end = pos + PAGE_SIZE / sizeof(u64); pos != end; pos++)
661                 if (is_shadow_present_pte(*pos)) {
662                         printk(KERN_ERR "%s: %p %llx\n", __func__,
663                                pos, *pos);
664                         return 0;
665                 }
666         return 1;
667 }
668 #endif
669
670 static void kvm_mmu_free_page(struct kvm *kvm, struct kvm_mmu_page *sp)
671 {
672         ASSERT(is_empty_shadow_page(sp->spt));
673         list_del(&sp->link);
674         __free_page(virt_to_page(sp->spt));
675         __free_page(virt_to_page(sp->gfns));
676         kfree(sp);
677         ++kvm->arch.n_free_mmu_pages;
678 }
679
680 static unsigned kvm_page_table_hashfn(gfn_t gfn)
681 {
682         return gfn & ((1 << KVM_MMU_HASH_SHIFT) - 1);
683 }
684
685 static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu,
686                                                u64 *parent_pte)
687 {
688         struct kvm_mmu_page *sp;
689
690         sp = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache, sizeof *sp);
691         sp->spt = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache, PAGE_SIZE);
692         sp->gfns = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache, PAGE_SIZE);
693         set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
694         list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages);
695         ASSERT(is_empty_shadow_page(sp->spt));
696         sp->slot_bitmap = 0;
697         sp->multimapped = 0;
698         sp->parent_pte = parent_pte;
699         --vcpu->kvm->arch.n_free_mmu_pages;
700         return sp;
701 }
702
703 static void mmu_page_add_parent_pte(struct kvm_vcpu *vcpu,
704                                     struct kvm_mmu_page *sp, u64 *parent_pte)
705 {
706         struct kvm_pte_chain *pte_chain;
707         struct hlist_node *node;
708         int i;
709
710         if (!parent_pte)
711                 return;
712         if (!sp->multimapped) {
713                 u64 *old = sp->parent_pte;
714
715                 if (!old) {
716                         sp->parent_pte = parent_pte;
717                         return;
718                 }
719                 sp->multimapped = 1;
720                 pte_chain = mmu_alloc_pte_chain(vcpu);
721                 INIT_HLIST_HEAD(&sp->parent_ptes);
722                 hlist_add_head(&pte_chain->link, &sp->parent_ptes);
723                 pte_chain->parent_ptes[0] = old;
724         }
725         hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link) {
726                 if (pte_chain->parent_ptes[NR_PTE_CHAIN_ENTRIES-1])
727                         continue;
728                 for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i)
729                         if (!pte_chain->parent_ptes[i]) {
730                                 pte_chain->parent_ptes[i] = parent_pte;
731                                 return;
732                         }
733         }
734         pte_chain = mmu_alloc_pte_chain(vcpu);
735         BUG_ON(!pte_chain);
736         hlist_add_head(&pte_chain->link, &sp->parent_ptes);
737         pte_chain->parent_ptes[0] = parent_pte;
738 }
739
740 static void mmu_page_remove_parent_pte(struct kvm_mmu_page *sp,
741                                        u64 *parent_pte)
742 {
743         struct kvm_pte_chain *pte_chain;
744         struct hlist_node *node;
745         int i;
746
747         if (!sp->multimapped) {
748                 BUG_ON(sp->parent_pte != parent_pte);
749                 sp->parent_pte = NULL;
750                 return;
751         }
752         hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link)
753                 for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i) {
754                         if (!pte_chain->parent_ptes[i])
755                                 break;
756                         if (pte_chain->parent_ptes[i] != parent_pte)
757                                 continue;
758                         while (i + 1 < NR_PTE_CHAIN_ENTRIES
759                                 && pte_chain->parent_ptes[i + 1]) {
760                                 pte_chain->parent_ptes[i]
761                                         = pte_chain->parent_ptes[i + 1];
762                                 ++i;
763                         }
764                         pte_chain->parent_ptes[i] = NULL;
765                         if (i == 0) {
766                                 hlist_del(&pte_chain->link);
767                                 mmu_free_pte_chain(pte_chain);
768                                 if (hlist_empty(&sp->parent_ptes)) {
769                                         sp->multimapped = 0;
770                                         sp->parent_pte = NULL;
771                                 }
772                         }
773                         return;
774                 }
775         BUG();
776 }
777
778 static struct kvm_mmu_page *kvm_mmu_lookup_page(struct kvm *kvm, gfn_t gfn)
779 {
780         unsigned index;
781         struct hlist_head *bucket;
782         struct kvm_mmu_page *sp;
783         struct hlist_node *node;
784
785         pgprintk("%s: looking for gfn %lx\n", __func__, gfn);
786         index = kvm_page_table_hashfn(gfn);
787         bucket = &kvm->arch.mmu_page_hash[index];
788         hlist_for_each_entry(sp, node, bucket, hash_link)
789                 if (sp->gfn == gfn && !sp->role.metaphysical
790                     && !sp->role.invalid) {
791                         pgprintk("%s: found role %x\n",
792                                  __func__, sp->role.word);
793                         return sp;
794                 }
795         return NULL;
796 }
797
798 static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
799                                              gfn_t gfn,
800                                              gva_t gaddr,
801                                              unsigned level,
802                                              int metaphysical,
803                                              unsigned access,
804                                              u64 *parent_pte)
805 {
806         union kvm_mmu_page_role role;
807         unsigned index;
808         unsigned quadrant;
809         struct hlist_head *bucket;
810         struct kvm_mmu_page *sp;
811         struct hlist_node *node;
812
813         role.word = 0;
814         role.glevels = vcpu->arch.mmu.root_level;
815         role.level = level;
816         role.metaphysical = metaphysical;
817         role.access = access;
818         if (vcpu->arch.mmu.root_level <= PT32_ROOT_LEVEL) {
819                 quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level));
820                 quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1;
821                 role.quadrant = quadrant;
822         }
823         pgprintk("%s: looking gfn %lx role %x\n", __func__,
824                  gfn, role.word);
825         index = kvm_page_table_hashfn(gfn);
826         bucket = &vcpu->kvm->arch.mmu_page_hash[index];
827         hlist_for_each_entry(sp, node, bucket, hash_link)
828                 if (sp->gfn == gfn && sp->role.word == role.word) {
829                         mmu_page_add_parent_pte(vcpu, sp, parent_pte);
830                         pgprintk("%s: found\n", __func__);
831                         return sp;
832                 }
833         ++vcpu->kvm->stat.mmu_cache_miss;
834         sp = kvm_mmu_alloc_page(vcpu, parent_pte);
835         if (!sp)
836                 return sp;
837         pgprintk("%s: adding gfn %lx role %x\n", __func__, gfn, role.word);
838         sp->gfn = gfn;
839         sp->role = role;
840         hlist_add_head(&sp->hash_link, bucket);
841         if (!metaphysical)
842                 rmap_write_protect(vcpu->kvm, gfn);
843         vcpu->arch.mmu.prefetch_page(vcpu, sp);
844         return sp;
845 }
846
847 static void kvm_mmu_page_unlink_children(struct kvm *kvm,
848                                          struct kvm_mmu_page *sp)
849 {
850         unsigned i;
851         u64 *pt;
852         u64 ent;
853
854         pt = sp->spt;
855
856         if (sp->role.level == PT_PAGE_TABLE_LEVEL) {
857                 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
858                         if (is_shadow_present_pte(pt[i]))
859                                 rmap_remove(kvm, &pt[i]);
860                         pt[i] = shadow_trap_nonpresent_pte;
861                 }
862                 kvm_flush_remote_tlbs(kvm);
863                 return;
864         }
865
866         for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
867                 ent = pt[i];
868
869                 if (is_shadow_present_pte(ent)) {
870                         if (!is_large_pte(ent)) {
871                                 ent &= PT64_BASE_ADDR_MASK;
872                                 mmu_page_remove_parent_pte(page_header(ent),
873                                                            &pt[i]);
874                         } else {
875                                 --kvm->stat.lpages;
876                                 rmap_remove(kvm, &pt[i]);
877                         }
878                 }
879                 pt[i] = shadow_trap_nonpresent_pte;
880         }
881         kvm_flush_remote_tlbs(kvm);
882 }
883
884 static void kvm_mmu_put_page(struct kvm_mmu_page *sp, u64 *parent_pte)
885 {
886         mmu_page_remove_parent_pte(sp, parent_pte);
887 }
888
889 static void kvm_mmu_reset_last_pte_updated(struct kvm *kvm)
890 {
891         int i;
892
893         for (i = 0; i < KVM_MAX_VCPUS; ++i)
894                 if (kvm->vcpus[i])
895                         kvm->vcpus[i]->arch.last_pte_updated = NULL;
896 }
897
898 static void kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp)
899 {
900         u64 *parent_pte;
901
902         ++kvm->stat.mmu_shadow_zapped;
903         while (sp->multimapped || sp->parent_pte) {
904                 if (!sp->multimapped)
905                         parent_pte = sp->parent_pte;
906                 else {
907                         struct kvm_pte_chain *chain;
908
909                         chain = container_of(sp->parent_ptes.first,
910                                              struct kvm_pte_chain, link);
911                         parent_pte = chain->parent_ptes[0];
912                 }
913                 BUG_ON(!parent_pte);
914                 kvm_mmu_put_page(sp, parent_pte);
915                 set_shadow_pte(parent_pte, shadow_trap_nonpresent_pte);
916         }
917         kvm_mmu_page_unlink_children(kvm, sp);
918         if (!sp->root_count) {
919                 if (!sp->role.metaphysical)
920                         unaccount_shadowed(kvm, sp->gfn);
921                 hlist_del(&sp->hash_link);
922                 kvm_mmu_free_page(kvm, sp);
923         } else {
924                 list_move(&sp->link, &kvm->arch.active_mmu_pages);
925                 sp->role.invalid = 1;
926                 kvm_reload_remote_mmus(kvm);
927         }
928         kvm_mmu_reset_last_pte_updated(kvm);
929 }
930
931 /*
932  * Changing the number of mmu pages allocated to the vm
933  * Note: if kvm_nr_mmu_pages is too small, you will get dead lock
934  */
935 void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages)
936 {
937         /*
938          * If we set the number of mmu pages to be smaller be than the
939          * number of actived pages , we must to free some mmu pages before we
940          * change the value
941          */
942
943         if ((kvm->arch.n_alloc_mmu_pages - kvm->arch.n_free_mmu_pages) >
944             kvm_nr_mmu_pages) {
945                 int n_used_mmu_pages = kvm->arch.n_alloc_mmu_pages
946                                        - kvm->arch.n_free_mmu_pages;
947
948                 while (n_used_mmu_pages > kvm_nr_mmu_pages) {
949                         struct kvm_mmu_page *page;
950
951                         page = container_of(kvm->arch.active_mmu_pages.prev,
952                                             struct kvm_mmu_page, link);
953                         kvm_mmu_zap_page(kvm, page);
954                         n_used_mmu_pages--;
955                 }
956                 kvm->arch.n_free_mmu_pages = 0;
957         }
958         else
959                 kvm->arch.n_free_mmu_pages += kvm_nr_mmu_pages
960                                          - kvm->arch.n_alloc_mmu_pages;
961
962         kvm->arch.n_alloc_mmu_pages = kvm_nr_mmu_pages;
963 }
964
965 static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
966 {
967         unsigned index;
968         struct hlist_head *bucket;
969         struct kvm_mmu_page *sp;
970         struct hlist_node *node, *n;
971         int r;
972
973         pgprintk("%s: looking for gfn %lx\n", __func__, gfn);
974         r = 0;
975         index = kvm_page_table_hashfn(gfn);
976         bucket = &kvm->arch.mmu_page_hash[index];
977         hlist_for_each_entry_safe(sp, node, n, bucket, hash_link)
978                 if (sp->gfn == gfn && !sp->role.metaphysical) {
979                         pgprintk("%s: gfn %lx role %x\n", __func__, gfn,
980                                  sp->role.word);
981                         kvm_mmu_zap_page(kvm, sp);
982                         r = 1;
983                 }
984         return r;
985 }
986
987 static void mmu_unshadow(struct kvm *kvm, gfn_t gfn)
988 {
989         struct kvm_mmu_page *sp;
990
991         while ((sp = kvm_mmu_lookup_page(kvm, gfn)) != NULL) {
992                 pgprintk("%s: zap %lx %x\n", __func__, gfn, sp->role.word);
993                 kvm_mmu_zap_page(kvm, sp);
994         }
995 }
996
997 static void page_header_update_slot(struct kvm *kvm, void *pte, gfn_t gfn)
998 {
999         int slot = memslot_id(kvm, gfn_to_memslot(kvm, gfn));
1000         struct kvm_mmu_page *sp = page_header(__pa(pte));
1001
1002         __set_bit(slot, &sp->slot_bitmap);
1003 }
1004
1005 struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva)
1006 {
1007         struct page *page;
1008
1009         gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva);
1010
1011         if (gpa == UNMAPPED_GVA)
1012                 return NULL;
1013
1014         down_read(&current->mm->mmap_sem);
1015         page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
1016         up_read(&current->mm->mmap_sem);
1017
1018         return page;
1019 }
1020
1021 static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
1022                          unsigned pt_access, unsigned pte_access,
1023                          int user_fault, int write_fault, int dirty,
1024                          int *ptwrite, int largepage, gfn_t gfn,
1025                          pfn_t pfn, bool speculative)
1026 {
1027         u64 spte;
1028         int was_rmapped = 0;
1029         int was_writeble = is_writeble_pte(*shadow_pte);
1030
1031         pgprintk("%s: spte %llx access %x write_fault %d"
1032                  " user_fault %d gfn %lx\n",
1033                  __func__, *shadow_pte, pt_access,
1034                  write_fault, user_fault, gfn);
1035
1036         if (is_rmap_pte(*shadow_pte)) {
1037                 /*
1038                  * If we overwrite a PTE page pointer with a 2MB PMD, unlink
1039                  * the parent of the now unreachable PTE.
1040                  */
1041                 if (largepage && !is_large_pte(*shadow_pte)) {
1042                         struct kvm_mmu_page *child;
1043                         u64 pte = *shadow_pte;
1044
1045                         child = page_header(pte & PT64_BASE_ADDR_MASK);
1046                         mmu_page_remove_parent_pte(child, shadow_pte);
1047                 } else if (pfn != spte_to_pfn(*shadow_pte)) {
1048                         pgprintk("hfn old %lx new %lx\n",
1049                                  spte_to_pfn(*shadow_pte), pfn);
1050                         rmap_remove(vcpu->kvm, shadow_pte);
1051                 } else {
1052                         if (largepage)
1053                                 was_rmapped = is_large_pte(*shadow_pte);
1054                         else
1055                                 was_rmapped = 1;
1056                 }
1057         }
1058
1059         /*
1060          * We don't set the accessed bit, since we sometimes want to see
1061          * whether the guest actually used the pte (in order to detect
1062          * demand paging).
1063          */
1064         spte = shadow_base_present_pte | shadow_dirty_mask;
1065         if (!speculative)
1066                 pte_access |= PT_ACCESSED_MASK;
1067         if (!dirty)
1068                 pte_access &= ~ACC_WRITE_MASK;
1069         if (pte_access & ACC_EXEC_MASK)
1070                 spte |= shadow_x_mask;
1071         else
1072                 spte |= shadow_nx_mask;
1073         if (pte_access & ACC_USER_MASK)
1074                 spte |= shadow_user_mask;
1075         if (largepage)
1076                 spte |= PT_PAGE_SIZE_MASK;
1077
1078         spte |= (u64)pfn << PAGE_SHIFT;
1079
1080         if ((pte_access & ACC_WRITE_MASK)
1081             || (write_fault && !is_write_protection(vcpu) && !user_fault)) {
1082                 struct kvm_mmu_page *shadow;
1083
1084                 spte |= PT_WRITABLE_MASK;
1085                 if (user_fault) {
1086                         mmu_unshadow(vcpu->kvm, gfn);
1087                         goto unshadowed;
1088                 }
1089
1090                 shadow = kvm_mmu_lookup_page(vcpu->kvm, gfn);
1091                 if (shadow ||
1092                    (largepage && has_wrprotected_page(vcpu->kvm, gfn))) {
1093                         pgprintk("%s: found shadow page for %lx, marking ro\n",
1094                                  __func__, gfn);
1095                         pte_access &= ~ACC_WRITE_MASK;
1096                         if (is_writeble_pte(spte)) {
1097                                 spte &= ~PT_WRITABLE_MASK;
1098                                 kvm_x86_ops->tlb_flush(vcpu);
1099                         }
1100                         if (write_fault)
1101                                 *ptwrite = 1;
1102                 }
1103         }
1104
1105 unshadowed:
1106
1107         if (pte_access & ACC_WRITE_MASK)
1108                 mark_page_dirty(vcpu->kvm, gfn);
1109
1110         pgprintk("%s: setting spte %llx\n", __func__, spte);
1111         pgprintk("instantiating %s PTE (%s) at %d (%llx) addr %llx\n",
1112                  (spte&PT_PAGE_SIZE_MASK)? "2MB" : "4kB",
1113                  (spte&PT_WRITABLE_MASK)?"RW":"R", gfn, spte, shadow_pte);
1114         set_shadow_pte(shadow_pte, spte);
1115         if (!was_rmapped && (spte & PT_PAGE_SIZE_MASK)
1116             && (spte & PT_PRESENT_MASK))
1117                 ++vcpu->kvm->stat.lpages;
1118
1119         page_header_update_slot(vcpu->kvm, shadow_pte, gfn);
1120         if (!was_rmapped) {
1121                 rmap_add(vcpu, shadow_pte, gfn, largepage);
1122                 if (!is_rmap_pte(*shadow_pte))
1123                         kvm_release_pfn_clean(pfn);
1124         } else {
1125                 if (was_writeble)
1126                         kvm_release_pfn_dirty(pfn);
1127                 else
1128                         kvm_release_pfn_clean(pfn);
1129         }
1130         if (!ptwrite || !*ptwrite)
1131                 vcpu->arch.last_pte_updated = shadow_pte;
1132 }
1133
1134 static void nonpaging_new_cr3(struct kvm_vcpu *vcpu)
1135 {
1136 }
1137
1138 static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
1139                            int largepage, gfn_t gfn, pfn_t pfn,
1140                            int level)
1141 {
1142         hpa_t table_addr = vcpu->arch.mmu.root_hpa;
1143         int pt_write = 0;
1144
1145         for (; ; level--) {
1146                 u32 index = PT64_INDEX(v, level);
1147                 u64 *table;
1148
1149                 ASSERT(VALID_PAGE(table_addr));
1150                 table = __va(table_addr);
1151
1152                 if (level == 1) {
1153                         mmu_set_spte(vcpu, &table[index], ACC_ALL, ACC_ALL,
1154                                      0, write, 1, &pt_write, 0, gfn, pfn, false);
1155                         return pt_write;
1156                 }
1157
1158                 if (largepage && level == 2) {
1159                         mmu_set_spte(vcpu, &table[index], ACC_ALL, ACC_ALL,
1160                                      0, write, 1, &pt_write, 1, gfn, pfn, false);
1161                         return pt_write;
1162                 }
1163
1164                 if (table[index] == shadow_trap_nonpresent_pte) {
1165                         struct kvm_mmu_page *new_table;
1166                         gfn_t pseudo_gfn;
1167
1168                         pseudo_gfn = (v & PT64_DIR_BASE_ADDR_MASK)
1169                                 >> PAGE_SHIFT;
1170                         new_table = kvm_mmu_get_page(vcpu, pseudo_gfn,
1171                                                      v, level - 1,
1172                                                      1, ACC_ALL, &table[index]);
1173                         if (!new_table) {
1174                                 pgprintk("nonpaging_map: ENOMEM\n");
1175                                 kvm_release_pfn_clean(pfn);
1176                                 return -ENOMEM;
1177                         }
1178
1179                         table[index] = __pa(new_table->spt)
1180                                 | PT_PRESENT_MASK | PT_WRITABLE_MASK
1181                                 | shadow_user_mask | shadow_x_mask;
1182                 }
1183                 table_addr = table[index] & PT64_BASE_ADDR_MASK;
1184         }
1185 }
1186
1187 static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn)
1188 {
1189         int r;
1190         int largepage = 0;
1191         pfn_t pfn;
1192
1193         down_read(&current->mm->mmap_sem);
1194         if (is_largepage_backed(vcpu, gfn & ~(KVM_PAGES_PER_HPAGE-1))) {
1195                 gfn &= ~(KVM_PAGES_PER_HPAGE-1);
1196                 largepage = 1;
1197         }
1198
1199         pfn = gfn_to_pfn(vcpu->kvm, gfn);
1200         up_read(&current->mm->mmap_sem);
1201
1202         /* mmio */
1203         if (is_error_pfn(pfn)) {
1204                 kvm_release_pfn_clean(pfn);
1205                 return 1;
1206         }
1207
1208         spin_lock(&vcpu->kvm->mmu_lock);
1209         kvm_mmu_free_some_pages(vcpu);
1210         r = __direct_map(vcpu, v, write, largepage, gfn, pfn,
1211                          PT32E_ROOT_LEVEL);
1212         spin_unlock(&vcpu->kvm->mmu_lock);
1213
1214
1215         return r;
1216 }
1217
1218
1219 static void nonpaging_prefetch_page(struct kvm_vcpu *vcpu,
1220                                     struct kvm_mmu_page *sp)
1221 {
1222         int i;
1223
1224         for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
1225                 sp->spt[i] = shadow_trap_nonpresent_pte;
1226 }
1227
1228 static void mmu_free_roots(struct kvm_vcpu *vcpu)
1229 {
1230         int i;
1231         struct kvm_mmu_page *sp;
1232
1233         if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
1234                 return;
1235         spin_lock(&vcpu->kvm->mmu_lock);
1236         if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
1237                 hpa_t root = vcpu->arch.mmu.root_hpa;
1238
1239                 sp = page_header(root);
1240                 --sp->root_count;
1241                 if (!sp->root_count && sp->role.invalid)
1242                         kvm_mmu_zap_page(vcpu->kvm, sp);
1243                 vcpu->arch.mmu.root_hpa = INVALID_PAGE;
1244                 spin_unlock(&vcpu->kvm->mmu_lock);
1245                 return;
1246         }
1247         for (i = 0; i < 4; ++i) {
1248                 hpa_t root = vcpu->arch.mmu.pae_root[i];
1249
1250                 if (root) {
1251                         root &= PT64_BASE_ADDR_MASK;
1252                         sp = page_header(root);
1253                         --sp->root_count;
1254                         if (!sp->root_count && sp->role.invalid)
1255                                 kvm_mmu_zap_page(vcpu->kvm, sp);
1256                 }
1257                 vcpu->arch.mmu.pae_root[i] = INVALID_PAGE;
1258         }
1259         spin_unlock(&vcpu->kvm->mmu_lock);
1260         vcpu->arch.mmu.root_hpa = INVALID_PAGE;
1261 }
1262
1263 static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
1264 {
1265         int i;
1266         gfn_t root_gfn;
1267         struct kvm_mmu_page *sp;
1268         int metaphysical = 0;
1269
1270         root_gfn = vcpu->arch.cr3 >> PAGE_SHIFT;
1271
1272         if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
1273                 hpa_t root = vcpu->arch.mmu.root_hpa;
1274
1275                 ASSERT(!VALID_PAGE(root));
1276                 if (tdp_enabled)
1277                         metaphysical = 1;
1278                 sp = kvm_mmu_get_page(vcpu, root_gfn, 0,
1279                                       PT64_ROOT_LEVEL, metaphysical,
1280                                       ACC_ALL, NULL);
1281                 root = __pa(sp->spt);
1282                 ++sp->root_count;
1283                 vcpu->arch.mmu.root_hpa = root;
1284                 return;
1285         }
1286         metaphysical = !is_paging(vcpu);
1287         if (tdp_enabled)
1288                 metaphysical = 1;
1289         for (i = 0; i < 4; ++i) {
1290                 hpa_t root = vcpu->arch.mmu.pae_root[i];
1291
1292                 ASSERT(!VALID_PAGE(root));
1293                 if (vcpu->arch.mmu.root_level == PT32E_ROOT_LEVEL) {
1294                         if (!is_present_pte(vcpu->arch.pdptrs[i])) {
1295                                 vcpu->arch.mmu.pae_root[i] = 0;
1296                                 continue;
1297                         }
1298                         root_gfn = vcpu->arch.pdptrs[i] >> PAGE_SHIFT;
1299                 } else if (vcpu->arch.mmu.root_level == 0)
1300                         root_gfn = 0;
1301                 sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30,
1302                                       PT32_ROOT_LEVEL, metaphysical,
1303                                       ACC_ALL, NULL);
1304                 root = __pa(sp->spt);
1305                 ++sp->root_count;
1306                 vcpu->arch.mmu.pae_root[i] = root | PT_PRESENT_MASK;
1307         }
1308         vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.pae_root);
1309 }
1310
1311 static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr)
1312 {
1313         return vaddr;
1314 }
1315
1316 static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
1317                                 u32 error_code)
1318 {
1319         gfn_t gfn;
1320         int r;
1321
1322         pgprintk("%s: gva %lx error %x\n", __func__, gva, error_code);
1323         r = mmu_topup_memory_caches(vcpu);
1324         if (r)
1325                 return r;
1326
1327         ASSERT(vcpu);
1328         ASSERT(VALID_PAGE(vcpu->arch.mmu.root_hpa));
1329
1330         gfn = gva >> PAGE_SHIFT;
1331
1332         return nonpaging_map(vcpu, gva & PAGE_MASK,
1333                              error_code & PFERR_WRITE_MASK, gfn);
1334 }
1335
1336 static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa,
1337                                 u32 error_code)
1338 {
1339         pfn_t pfn;
1340         int r;
1341         int largepage = 0;
1342         gfn_t gfn = gpa >> PAGE_SHIFT;
1343
1344         ASSERT(vcpu);
1345         ASSERT(VALID_PAGE(vcpu->arch.mmu.root_hpa));
1346
1347         r = mmu_topup_memory_caches(vcpu);
1348         if (r)
1349                 return r;
1350
1351         down_read(&current->mm->mmap_sem);
1352         if (is_largepage_backed(vcpu, gfn & ~(KVM_PAGES_PER_HPAGE-1))) {
1353                 gfn &= ~(KVM_PAGES_PER_HPAGE-1);
1354                 largepage = 1;
1355         }
1356         pfn = gfn_to_pfn(vcpu->kvm, gfn);
1357         up_read(&current->mm->mmap_sem);
1358         if (is_error_pfn(pfn)) {
1359                 kvm_release_pfn_clean(pfn);
1360                 return 1;
1361         }
1362         spin_lock(&vcpu->kvm->mmu_lock);
1363         kvm_mmu_free_some_pages(vcpu);
1364         r = __direct_map(vcpu, gpa, error_code & PFERR_WRITE_MASK,
1365                          largepage, gfn, pfn, kvm_x86_ops->get_tdp_level());
1366         spin_unlock(&vcpu->kvm->mmu_lock);
1367
1368         return r;
1369 }
1370
1371 static void nonpaging_free(struct kvm_vcpu *vcpu)
1372 {
1373         mmu_free_roots(vcpu);
1374 }
1375
1376 static int nonpaging_init_context(struct kvm_vcpu *vcpu)
1377 {
1378         struct kvm_mmu *context = &vcpu->arch.mmu;
1379
1380         context->new_cr3 = nonpaging_new_cr3;
1381         context->page_fault = nonpaging_page_fault;
1382         context->gva_to_gpa = nonpaging_gva_to_gpa;
1383         context->free = nonpaging_free;
1384         context->prefetch_page = nonpaging_prefetch_page;
1385         context->root_level = 0;
1386         context->shadow_root_level = PT32E_ROOT_LEVEL;
1387         context->root_hpa = INVALID_PAGE;
1388         return 0;
1389 }
1390
1391 void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu)
1392 {
1393         ++vcpu->stat.tlb_flush;
1394         kvm_x86_ops->tlb_flush(vcpu);
1395 }
1396
1397 static void paging_new_cr3(struct kvm_vcpu *vcpu)
1398 {
1399         pgprintk("%s: cr3 %lx\n", __func__, vcpu->arch.cr3);
1400         mmu_free_roots(vcpu);
1401 }
1402
1403 static void inject_page_fault(struct kvm_vcpu *vcpu,
1404                               u64 addr,
1405                               u32 err_code)
1406 {
1407         kvm_inject_page_fault(vcpu, addr, err_code);
1408 }
1409
1410 static void paging_free(struct kvm_vcpu *vcpu)
1411 {
1412         nonpaging_free(vcpu);
1413 }
1414
1415 #define PTTYPE 64
1416 #include "paging_tmpl.h"
1417 #undef PTTYPE
1418
1419 #define PTTYPE 32
1420 #include "paging_tmpl.h"
1421 #undef PTTYPE
1422
1423 static int paging64_init_context_common(struct kvm_vcpu *vcpu, int level)
1424 {
1425         struct kvm_mmu *context = &vcpu->arch.mmu;
1426
1427         ASSERT(is_pae(vcpu));
1428         context->new_cr3 = paging_new_cr3;
1429         context->page_fault = paging64_page_fault;
1430         context->gva_to_gpa = paging64_gva_to_gpa;
1431         context->prefetch_page = paging64_prefetch_page;
1432         context->free = paging_free;
1433         context->root_level = level;
1434         context->shadow_root_level = level;
1435         context->root_hpa = INVALID_PAGE;
1436         return 0;
1437 }
1438
1439 static int paging64_init_context(struct kvm_vcpu *vcpu)
1440 {
1441         return paging64_init_context_common(vcpu, PT64_ROOT_LEVEL);
1442 }
1443
1444 static int paging32_init_context(struct kvm_vcpu *vcpu)
1445 {
1446         struct kvm_mmu *context = &vcpu->arch.mmu;
1447
1448         context->new_cr3 = paging_new_cr3;
1449         context->page_fault = paging32_page_fault;
1450         context->gva_to_gpa = paging32_gva_to_gpa;
1451         context->free = paging_free;
1452         context->prefetch_page = paging32_prefetch_page;
1453         context->root_level = PT32_ROOT_LEVEL;
1454         context->shadow_root_level = PT32E_ROOT_LEVEL;
1455         context->root_hpa = INVALID_PAGE;
1456         return 0;
1457 }
1458
1459 static int paging32E_init_context(struct kvm_vcpu *vcpu)
1460 {
1461         return paging64_init_context_common(vcpu, PT32E_ROOT_LEVEL);
1462 }
1463
1464 static int init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
1465 {
1466         struct kvm_mmu *context = &vcpu->arch.mmu;
1467
1468         context->new_cr3 = nonpaging_new_cr3;
1469         context->page_fault = tdp_page_fault;
1470         context->free = nonpaging_free;
1471         context->prefetch_page = nonpaging_prefetch_page;
1472         context->shadow_root_level = kvm_x86_ops->get_tdp_level();
1473         context->root_hpa = INVALID_PAGE;
1474
1475         if (!is_paging(vcpu)) {
1476                 context->gva_to_gpa = nonpaging_gva_to_gpa;
1477                 context->root_level = 0;
1478         } else if (is_long_mode(vcpu)) {
1479                 context->gva_to_gpa = paging64_gva_to_gpa;
1480                 context->root_level = PT64_ROOT_LEVEL;
1481         } else if (is_pae(vcpu)) {
1482                 context->gva_to_gpa = paging64_gva_to_gpa;
1483                 context->root_level = PT32E_ROOT_LEVEL;
1484         } else {
1485                 context->gva_to_gpa = paging32_gva_to_gpa;
1486                 context->root_level = PT32_ROOT_LEVEL;
1487         }
1488
1489         return 0;
1490 }
1491
1492 static int init_kvm_softmmu(struct kvm_vcpu *vcpu)
1493 {
1494         ASSERT(vcpu);
1495         ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
1496
1497         if (!is_paging(vcpu))
1498                 return nonpaging_init_context(vcpu);
1499         else if (is_long_mode(vcpu))
1500                 return paging64_init_context(vcpu);
1501         else if (is_pae(vcpu))
1502                 return paging32E_init_context(vcpu);
1503         else
1504                 return paging32_init_context(vcpu);
1505 }
1506
1507 static int init_kvm_mmu(struct kvm_vcpu *vcpu)
1508 {
1509         vcpu->arch.update_pte.pfn = bad_pfn;
1510
1511         if (tdp_enabled)
1512                 return init_kvm_tdp_mmu(vcpu);
1513         else
1514                 return init_kvm_softmmu(vcpu);
1515 }
1516
1517 static void destroy_kvm_mmu(struct kvm_vcpu *vcpu)
1518 {
1519         ASSERT(vcpu);
1520         if (VALID_PAGE(vcpu->arch.mmu.root_hpa)) {
1521                 vcpu->arch.mmu.free(vcpu);
1522                 vcpu->arch.mmu.root_hpa = INVALID_PAGE;
1523         }
1524 }
1525
1526 int kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
1527 {
1528         destroy_kvm_mmu(vcpu);
1529         return init_kvm_mmu(vcpu);
1530 }
1531 EXPORT_SYMBOL_GPL(kvm_mmu_reset_context);
1532
1533 int kvm_mmu_load(struct kvm_vcpu *vcpu)
1534 {
1535         int r;
1536
1537         r = mmu_topup_memory_caches(vcpu);
1538         if (r)
1539                 goto out;
1540         spin_lock(&vcpu->kvm->mmu_lock);
1541         kvm_mmu_free_some_pages(vcpu);
1542         mmu_alloc_roots(vcpu);
1543         spin_unlock(&vcpu->kvm->mmu_lock);
1544         kvm_x86_ops->set_cr3(vcpu, vcpu->arch.mmu.root_hpa);
1545         kvm_mmu_flush_tlb(vcpu);
1546 out:
1547         return r;
1548 }
1549 EXPORT_SYMBOL_GPL(kvm_mmu_load);
1550
1551 void kvm_mmu_unload(struct kvm_vcpu *vcpu)
1552 {
1553         mmu_free_roots(vcpu);
1554 }
1555
1556 static void mmu_pte_write_zap_pte(struct kvm_vcpu *vcpu,
1557                                   struct kvm_mmu_page *sp,
1558                                   u64 *spte)
1559 {
1560         u64 pte;
1561         struct kvm_mmu_page *child;
1562
1563         pte = *spte;
1564         if (is_shadow_present_pte(pte)) {
1565                 if (sp->role.level == PT_PAGE_TABLE_LEVEL ||
1566                     is_large_pte(pte))
1567                         rmap_remove(vcpu->kvm, spte);
1568                 else {
1569                         child = page_header(pte & PT64_BASE_ADDR_MASK);
1570                         mmu_page_remove_parent_pte(child, spte);
1571                 }
1572         }
1573         set_shadow_pte(spte, shadow_trap_nonpresent_pte);
1574         if (is_large_pte(pte))
1575                 --vcpu->kvm->stat.lpages;
1576 }
1577
1578 static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu,
1579                                   struct kvm_mmu_page *sp,
1580                                   u64 *spte,
1581                                   const void *new)
1582 {
1583         if ((sp->role.level != PT_PAGE_TABLE_LEVEL)
1584             && !vcpu->arch.update_pte.largepage) {
1585                 ++vcpu->kvm->stat.mmu_pde_zapped;
1586                 return;
1587         }
1588
1589         ++vcpu->kvm->stat.mmu_pte_updated;
1590         if (sp->role.glevels == PT32_ROOT_LEVEL)
1591                 paging32_update_pte(vcpu, sp, spte, new);
1592         else
1593                 paging64_update_pte(vcpu, sp, spte, new);
1594 }
1595
1596 static bool need_remote_flush(u64 old, u64 new)
1597 {
1598         if (!is_shadow_present_pte(old))
1599                 return false;
1600         if (!is_shadow_present_pte(new))
1601                 return true;
1602         if ((old ^ new) & PT64_BASE_ADDR_MASK)
1603                 return true;
1604         old ^= PT64_NX_MASK;
1605         new ^= PT64_NX_MASK;
1606         return (old & ~new & PT64_PERM_MASK) != 0;
1607 }
1608
1609 static void mmu_pte_write_flush_tlb(struct kvm_vcpu *vcpu, u64 old, u64 new)
1610 {
1611         if (need_remote_flush(old, new))
1612                 kvm_flush_remote_tlbs(vcpu->kvm);
1613         else
1614                 kvm_mmu_flush_tlb(vcpu);
1615 }
1616
1617 static bool last_updated_pte_accessed(struct kvm_vcpu *vcpu)
1618 {
1619         u64 *spte = vcpu->arch.last_pte_updated;
1620
1621         return !!(spte && (*spte & shadow_accessed_mask));
1622 }
1623
1624 static void mmu_guess_page_from_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
1625                                           const u8 *new, int bytes)
1626 {
1627         gfn_t gfn;
1628         int r;
1629         u64 gpte = 0;
1630         pfn_t pfn;
1631
1632         vcpu->arch.update_pte.largepage = 0;
1633
1634         if (bytes != 4 && bytes != 8)
1635                 return;
1636
1637         /*
1638          * Assume that the pte write on a page table of the same type
1639          * as the current vcpu paging mode.  This is nearly always true
1640          * (might be false while changing modes).  Note it is verified later
1641          * by update_pte().
1642          */
1643         if (is_pae(vcpu)) {
1644                 /* Handle a 32-bit guest writing two halves of a 64-bit gpte */
1645                 if ((bytes == 4) && (gpa % 4 == 0)) {
1646                         r = kvm_read_guest(vcpu->kvm, gpa & ~(u64)7, &gpte, 8);
1647                         if (r)
1648                                 return;
1649                         memcpy((void *)&gpte + (gpa % 8), new, 4);
1650                 } else if ((bytes == 8) && (gpa % 8 == 0)) {
1651                         memcpy((void *)&gpte, new, 8);
1652                 }
1653         } else {
1654                 if ((bytes == 4) && (gpa % 4 == 0))
1655                         memcpy((void *)&gpte, new, 4);
1656         }
1657         if (!is_present_pte(gpte))
1658                 return;
1659         gfn = (gpte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
1660
1661         down_read(&current->mm->mmap_sem);
1662         if (is_large_pte(gpte) && is_largepage_backed(vcpu, gfn)) {
1663                 gfn &= ~(KVM_PAGES_PER_HPAGE-1);
1664                 vcpu->arch.update_pte.largepage = 1;
1665         }
1666         pfn = gfn_to_pfn(vcpu->kvm, gfn);
1667         up_read(&current->mm->mmap_sem);
1668
1669         if (is_error_pfn(pfn)) {
1670                 kvm_release_pfn_clean(pfn);
1671                 return;
1672         }
1673         vcpu->arch.update_pte.gfn = gfn;
1674         vcpu->arch.update_pte.pfn = pfn;
1675 }
1676
1677 void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
1678                        const u8 *new, int bytes)
1679 {
1680         gfn_t gfn = gpa >> PAGE_SHIFT;
1681         struct kvm_mmu_page *sp;
1682         struct hlist_node *node, *n;
1683         struct hlist_head *bucket;
1684         unsigned index;
1685         u64 entry, gentry;
1686         u64 *spte;
1687         unsigned offset = offset_in_page(gpa);
1688         unsigned pte_size;
1689         unsigned page_offset;
1690         unsigned misaligned;
1691         unsigned quadrant;
1692         int level;
1693         int flooded = 0;
1694         int npte;
1695         int r;
1696
1697         pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
1698         mmu_guess_page_from_pte_write(vcpu, gpa, new, bytes);
1699         spin_lock(&vcpu->kvm->mmu_lock);
1700         kvm_mmu_free_some_pages(vcpu);
1701         ++vcpu->kvm->stat.mmu_pte_write;
1702         kvm_mmu_audit(vcpu, "pre pte write");
1703         if (gfn == vcpu->arch.last_pt_write_gfn
1704             && !last_updated_pte_accessed(vcpu)) {
1705                 ++vcpu->arch.last_pt_write_count;
1706                 if (vcpu->arch.last_pt_write_count >= 3)
1707                         flooded = 1;
1708         } else {
1709                 vcpu->arch.last_pt_write_gfn = gfn;
1710                 vcpu->arch.last_pt_write_count = 1;
1711                 vcpu->arch.last_pte_updated = NULL;
1712         }
1713         index = kvm_page_table_hashfn(gfn);
1714         bucket = &vcpu->kvm->arch.mmu_page_hash[index];
1715         hlist_for_each_entry_safe(sp, node, n, bucket, hash_link) {
1716                 if (sp->gfn != gfn || sp->role.metaphysical)
1717                         continue;
1718                 pte_size = sp->role.glevels == PT32_ROOT_LEVEL ? 4 : 8;
1719                 misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1);
1720                 misaligned |= bytes < 4;
1721                 if (misaligned || flooded) {
1722                         /*
1723                          * Misaligned accesses are too much trouble to fix
1724                          * up; also, they usually indicate a page is not used
1725                          * as a page table.
1726                          *
1727                          * If we're seeing too many writes to a page,
1728                          * it may no longer be a page table, or we may be
1729                          * forking, in which case it is better to unmap the
1730                          * page.
1731                          */
1732                         pgprintk("misaligned: gpa %llx bytes %d role %x\n",
1733                                  gpa, bytes, sp->role.word);
1734                         kvm_mmu_zap_page(vcpu->kvm, sp);
1735                         ++vcpu->kvm->stat.mmu_flooded;
1736                         continue;
1737                 }
1738                 page_offset = offset;
1739                 level = sp->role.level;
1740                 npte = 1;
1741                 if (sp->role.glevels == PT32_ROOT_LEVEL) {
1742                         page_offset <<= 1;      /* 32->64 */
1743                         /*
1744                          * A 32-bit pde maps 4MB while the shadow pdes map
1745                          * only 2MB.  So we need to double the offset again
1746                          * and zap two pdes instead of one.
1747                          */
1748                         if (level == PT32_ROOT_LEVEL) {
1749                                 page_offset &= ~7; /* kill rounding error */
1750                                 page_offset <<= 1;
1751                                 npte = 2;
1752                         }
1753                         quadrant = page_offset >> PAGE_SHIFT;
1754                         page_offset &= ~PAGE_MASK;
1755                         if (quadrant != sp->role.quadrant)
1756                                 continue;
1757                 }
1758                 spte = &sp->spt[page_offset / sizeof(*spte)];
1759                 if ((gpa & (pte_size - 1)) || (bytes < pte_size)) {
1760                         gentry = 0;
1761                         r = kvm_read_guest_atomic(vcpu->kvm,
1762                                                   gpa & ~(u64)(pte_size - 1),
1763                                                   &gentry, pte_size);
1764                         new = (const void *)&gentry;
1765                         if (r < 0)
1766                                 new = NULL;
1767                 }
1768                 while (npte--) {
1769                         entry = *spte;
1770                         mmu_pte_write_zap_pte(vcpu, sp, spte);
1771                         if (new)
1772                                 mmu_pte_write_new_pte(vcpu, sp, spte, new);
1773                         mmu_pte_write_flush_tlb(vcpu, entry, *spte);
1774                         ++spte;
1775                 }
1776         }
1777         kvm_mmu_audit(vcpu, "post pte write");
1778         spin_unlock(&vcpu->kvm->mmu_lock);
1779         if (!is_error_pfn(vcpu->arch.update_pte.pfn)) {
1780                 kvm_release_pfn_clean(vcpu->arch.update_pte.pfn);
1781                 vcpu->arch.update_pte.pfn = bad_pfn;
1782         }
1783 }
1784
1785 int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
1786 {
1787         gpa_t gpa;
1788         int r;
1789
1790         gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva);
1791
1792         spin_lock(&vcpu->kvm->mmu_lock);
1793         r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);
1794         spin_unlock(&vcpu->kvm->mmu_lock);
1795         return r;
1796 }
1797
1798 void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
1799 {
1800         while (vcpu->kvm->arch.n_free_mmu_pages < KVM_REFILL_PAGES) {
1801                 struct kvm_mmu_page *sp;
1802
1803                 sp = container_of(vcpu->kvm->arch.active_mmu_pages.prev,
1804                                   struct kvm_mmu_page, link);
1805                 kvm_mmu_zap_page(vcpu->kvm, sp);
1806                 ++vcpu->kvm->stat.mmu_recycled;
1807         }
1808 }
1809
1810 int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code)
1811 {
1812         int r;
1813         enum emulation_result er;
1814
1815         r = vcpu->arch.mmu.page_fault(vcpu, cr2, error_code);
1816         if (r < 0)
1817                 goto out;
1818
1819         if (!r) {
1820                 r = 1;
1821                 goto out;
1822         }
1823
1824         r = mmu_topup_memory_caches(vcpu);
1825         if (r)
1826                 goto out;
1827
1828         er = emulate_instruction(vcpu, vcpu->run, cr2, error_code, 0);
1829
1830         switch (er) {
1831         case EMULATE_DONE:
1832                 return 1;
1833         case EMULATE_DO_MMIO:
1834                 ++vcpu->stat.mmio_exits;
1835                 return 0;
1836         case EMULATE_FAIL:
1837                 kvm_report_emulation_failure(vcpu, "pagetable");
1838                 return 1;
1839         default:
1840                 BUG();
1841         }
1842 out:
1843         return r;
1844 }
1845 EXPORT_SYMBOL_GPL(kvm_mmu_page_fault);
1846
1847 void kvm_enable_tdp(void)
1848 {
1849         tdp_enabled = true;
1850 }
1851 EXPORT_SYMBOL_GPL(kvm_enable_tdp);
1852
1853 static void free_mmu_pages(struct kvm_vcpu *vcpu)
1854 {
1855         struct kvm_mmu_page *sp;
1856
1857         while (!list_empty(&vcpu->kvm->arch.active_mmu_pages)) {
1858                 sp = container_of(vcpu->kvm->arch.active_mmu_pages.next,
1859                                   struct kvm_mmu_page, link);
1860                 kvm_mmu_zap_page(vcpu->kvm, sp);
1861                 cond_resched();
1862         }
1863         free_page((unsigned long)vcpu->arch.mmu.pae_root);
1864 }
1865
1866 static int alloc_mmu_pages(struct kvm_vcpu *vcpu)
1867 {
1868         struct page *page;
1869         int i;
1870
1871         ASSERT(vcpu);
1872
1873         if (vcpu->kvm->arch.n_requested_mmu_pages)
1874                 vcpu->kvm->arch.n_free_mmu_pages =
1875                                         vcpu->kvm->arch.n_requested_mmu_pages;
1876         else
1877                 vcpu->kvm->arch.n_free_mmu_pages =
1878                                         vcpu->kvm->arch.n_alloc_mmu_pages;
1879         /*
1880          * When emulating 32-bit mode, cr3 is only 32 bits even on x86_64.
1881          * Therefore we need to allocate shadow page tables in the first
1882          * 4GB of memory, which happens to fit the DMA32 zone.
1883          */
1884         page = alloc_page(GFP_KERNEL | __GFP_DMA32);
1885         if (!page)
1886                 goto error_1;
1887         vcpu->arch.mmu.pae_root = page_address(page);
1888         for (i = 0; i < 4; ++i)
1889                 vcpu->arch.mmu.pae_root[i] = INVALID_PAGE;
1890
1891         return 0;
1892
1893 error_1:
1894         free_mmu_pages(vcpu);
1895         return -ENOMEM;
1896 }
1897
1898 int kvm_mmu_create(struct kvm_vcpu *vcpu)
1899 {
1900         ASSERT(vcpu);
1901         ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
1902
1903         return alloc_mmu_pages(vcpu);
1904 }
1905
1906 int kvm_mmu_setup(struct kvm_vcpu *vcpu)
1907 {
1908         ASSERT(vcpu);
1909         ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
1910
1911         return init_kvm_mmu(vcpu);
1912 }
1913
1914 void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
1915 {
1916         ASSERT(vcpu);
1917
1918         destroy_kvm_mmu(vcpu);
1919         free_mmu_pages(vcpu);
1920         mmu_free_memory_caches(vcpu);
1921 }
1922
1923 void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
1924 {
1925         struct kvm_mmu_page *sp;
1926
1927         list_for_each_entry(sp, &kvm->arch.active_mmu_pages, link) {
1928                 int i;
1929                 u64 *pt;
1930
1931                 if (!test_bit(slot, &sp->slot_bitmap))
1932                         continue;
1933
1934                 pt = sp->spt;
1935                 for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
1936                         /* avoid RMW */
1937                         if (pt[i] & PT_WRITABLE_MASK)
1938                                 pt[i] &= ~PT_WRITABLE_MASK;
1939         }
1940 }
1941
1942 void kvm_mmu_zap_all(struct kvm *kvm)
1943 {
1944         struct kvm_mmu_page *sp, *node;
1945
1946         spin_lock(&kvm->mmu_lock);
1947         list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link)
1948                 kvm_mmu_zap_page(kvm, sp);
1949         spin_unlock(&kvm->mmu_lock);
1950
1951         kvm_flush_remote_tlbs(kvm);
1952 }
1953
1954 void kvm_mmu_remove_one_alloc_mmu_page(struct kvm *kvm)
1955 {
1956         struct kvm_mmu_page *page;
1957
1958         page = container_of(kvm->arch.active_mmu_pages.prev,
1959                             struct kvm_mmu_page, link);
1960         kvm_mmu_zap_page(kvm, page);
1961 }
1962
1963 static int mmu_shrink(int nr_to_scan, gfp_t gfp_mask)
1964 {
1965         struct kvm *kvm;
1966         struct kvm *kvm_freed = NULL;
1967         int cache_count = 0;
1968
1969         spin_lock(&kvm_lock);
1970
1971         list_for_each_entry(kvm, &vm_list, vm_list) {
1972                 int npages;
1973
1974                 spin_lock(&kvm->mmu_lock);
1975                 npages = kvm->arch.n_alloc_mmu_pages -
1976                          kvm->arch.n_free_mmu_pages;
1977                 cache_count += npages;
1978                 if (!kvm_freed && nr_to_scan > 0 && npages > 0) {
1979                         kvm_mmu_remove_one_alloc_mmu_page(kvm);
1980                         cache_count--;
1981                         kvm_freed = kvm;
1982                 }
1983                 nr_to_scan--;
1984
1985                 spin_unlock(&kvm->mmu_lock);
1986         }
1987         if (kvm_freed)
1988                 list_move_tail(&kvm_freed->vm_list, &vm_list);
1989
1990         spin_unlock(&kvm_lock);
1991
1992         return cache_count;
1993 }
1994
1995 static struct shrinker mmu_shrinker = {
1996         .shrink = mmu_shrink,
1997         .seeks = DEFAULT_SEEKS * 10,
1998 };
1999
2000 static void mmu_destroy_caches(void)
2001 {
2002         if (pte_chain_cache)
2003                 kmem_cache_destroy(pte_chain_cache);
2004         if (rmap_desc_cache)
2005                 kmem_cache_destroy(rmap_desc_cache);
2006         if (mmu_page_header_cache)
2007                 kmem_cache_destroy(mmu_page_header_cache);
2008 }
2009
2010 void kvm_mmu_module_exit(void)
2011 {
2012         mmu_destroy_caches();
2013         unregister_shrinker(&mmu_shrinker);
2014 }
2015
2016 int kvm_mmu_module_init(void)
2017 {
2018         pte_chain_cache = kmem_cache_create("kvm_pte_chain",
2019                                             sizeof(struct kvm_pte_chain),
2020                                             0, 0, NULL);
2021         if (!pte_chain_cache)
2022                 goto nomem;
2023         rmap_desc_cache = kmem_cache_create("kvm_rmap_desc",
2024                                             sizeof(struct kvm_rmap_desc),
2025                                             0, 0, NULL);
2026         if (!rmap_desc_cache)
2027                 goto nomem;
2028
2029         mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header",
2030                                                   sizeof(struct kvm_mmu_page),
2031                                                   0, 0, NULL);
2032         if (!mmu_page_header_cache)
2033                 goto nomem;
2034
2035         register_shrinker(&mmu_shrinker);
2036
2037         return 0;
2038
2039 nomem:
2040         mmu_destroy_caches();
2041         return -ENOMEM;
2042 }
2043
2044 /*
2045  * Caculate mmu pages needed for kvm.
2046  */
2047 unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm)
2048 {
2049         int i;
2050         unsigned int nr_mmu_pages;
2051         unsigned int  nr_pages = 0;
2052
2053         for (i = 0; i < kvm->nmemslots; i++)
2054                 nr_pages += kvm->memslots[i].npages;
2055
2056         nr_mmu_pages = nr_pages * KVM_PERMILLE_MMU_PAGES / 1000;
2057         nr_mmu_pages = max(nr_mmu_pages,
2058                         (unsigned int) KVM_MIN_ALLOC_MMU_PAGES);
2059
2060         return nr_mmu_pages;
2061 }
2062
2063 static void *pv_mmu_peek_buffer(struct kvm_pv_mmu_op_buffer *buffer,
2064                                 unsigned len)
2065 {
2066         if (len > buffer->len)
2067                 return NULL;
2068         return buffer->ptr;
2069 }
2070
2071 static void *pv_mmu_read_buffer(struct kvm_pv_mmu_op_buffer *buffer,
2072                                 unsigned len)
2073 {
2074         void *ret;
2075
2076         ret = pv_mmu_peek_buffer(buffer, len);
2077         if (!ret)
2078                 return ret;
2079         buffer->ptr += len;
2080         buffer->len -= len;
2081         buffer->processed += len;
2082         return ret;
2083 }
2084
2085 static int kvm_pv_mmu_write(struct kvm_vcpu *vcpu,
2086                              gpa_t addr, gpa_t value)
2087 {
2088         int bytes = 8;
2089         int r;
2090
2091         if (!is_long_mode(vcpu) && !is_pae(vcpu))
2092                 bytes = 4;
2093
2094         r = mmu_topup_memory_caches(vcpu);
2095         if (r)
2096                 return r;
2097
2098         if (!emulator_write_phys(vcpu, addr, &value, bytes))
2099                 return -EFAULT;
2100
2101         return 1;
2102 }
2103
2104 static int kvm_pv_mmu_flush_tlb(struct kvm_vcpu *vcpu)
2105 {
2106         kvm_x86_ops->tlb_flush(vcpu);
2107         return 1;
2108 }
2109
2110 static int kvm_pv_mmu_release_pt(struct kvm_vcpu *vcpu, gpa_t addr)
2111 {
2112         spin_lock(&vcpu->kvm->mmu_lock);
2113         mmu_unshadow(vcpu->kvm, addr >> PAGE_SHIFT);
2114         spin_unlock(&vcpu->kvm->mmu_lock);
2115         return 1;
2116 }
2117
2118 static int kvm_pv_mmu_op_one(struct kvm_vcpu *vcpu,
2119                              struct kvm_pv_mmu_op_buffer *buffer)
2120 {
2121         struct kvm_mmu_op_header *header;
2122
2123         header = pv_mmu_peek_buffer(buffer, sizeof *header);
2124         if (!header)
2125                 return 0;
2126         switch (header->op) {
2127         case KVM_MMU_OP_WRITE_PTE: {
2128                 struct kvm_mmu_op_write_pte *wpte;
2129
2130                 wpte = pv_mmu_read_buffer(buffer, sizeof *wpte);
2131                 if (!wpte)
2132                         return 0;
2133                 return kvm_pv_mmu_write(vcpu, wpte->pte_phys,
2134                                         wpte->pte_val);
2135         }
2136         case KVM_MMU_OP_FLUSH_TLB: {
2137                 struct kvm_mmu_op_flush_tlb *ftlb;
2138
2139                 ftlb = pv_mmu_read_buffer(buffer, sizeof *ftlb);
2140                 if (!ftlb)
2141                         return 0;
2142                 return kvm_pv_mmu_flush_tlb(vcpu);
2143         }
2144         case KVM_MMU_OP_RELEASE_PT: {
2145                 struct kvm_mmu_op_release_pt *rpt;
2146
2147                 rpt = pv_mmu_read_buffer(buffer, sizeof *rpt);
2148                 if (!rpt)
2149                         return 0;
2150                 return kvm_pv_mmu_release_pt(vcpu, rpt->pt_phys);
2151         }
2152         default: return 0;
2153         }
2154 }
2155
2156 int kvm_pv_mmu_op(struct kvm_vcpu *vcpu, unsigned long bytes,
2157                   gpa_t addr, unsigned long *ret)
2158 {
2159         int r;
2160         struct kvm_pv_mmu_op_buffer buffer;
2161
2162         buffer.ptr = buffer.buf;
2163         buffer.len = min_t(unsigned long, bytes, sizeof buffer.buf);
2164         buffer.processed = 0;
2165
2166         r = kvm_read_guest(vcpu->kvm, addr, buffer.buf, buffer.len);
2167         if (r)
2168                 goto out;
2169
2170         while (buffer.len) {
2171                 r = kvm_pv_mmu_op_one(vcpu, &buffer);
2172                 if (r < 0)
2173                         goto out;
2174                 if (r == 0)
2175                         break;
2176         }
2177
2178         r = 1;
2179 out:
2180         *ret = buffer.processed;
2181         return r;
2182 }
2183
2184 #ifdef AUDIT
2185
2186 static const char *audit_msg;
2187
2188 static gva_t canonicalize(gva_t gva)
2189 {
2190 #ifdef CONFIG_X86_64
2191         gva = (long long)(gva << 16) >> 16;
2192 #endif
2193         return gva;
2194 }
2195
2196 static void audit_mappings_page(struct kvm_vcpu *vcpu, u64 page_pte,
2197                                 gva_t va, int level)
2198 {
2199         u64 *pt = __va(page_pte & PT64_BASE_ADDR_MASK);
2200         int i;
2201         gva_t va_delta = 1ul << (PAGE_SHIFT + 9 * (level - 1));
2202
2203         for (i = 0; i < PT64_ENT_PER_PAGE; ++i, va += va_delta) {
2204                 u64 ent = pt[i];
2205
2206                 if (ent == shadow_trap_nonpresent_pte)
2207                         continue;
2208
2209                 va = canonicalize(va);
2210                 if (level > 1) {
2211                         if (ent == shadow_notrap_nonpresent_pte)
2212                                 printk(KERN_ERR "audit: (%s) nontrapping pte"
2213                                        " in nonleaf level: levels %d gva %lx"
2214                                        " level %d pte %llx\n", audit_msg,
2215                                        vcpu->arch.mmu.root_level, va, level, ent);
2216
2217                         audit_mappings_page(vcpu, ent, va, level - 1);
2218                 } else {
2219                         gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, va);
2220                         hpa_t hpa = (hpa_t)gpa_to_pfn(vcpu, gpa) << PAGE_SHIFT;
2221
2222                         if (is_shadow_present_pte(ent)
2223                             && (ent & PT64_BASE_ADDR_MASK) != hpa)
2224                                 printk(KERN_ERR "xx audit error: (%s) levels %d"
2225                                        " gva %lx gpa %llx hpa %llx ent %llx %d\n",
2226                                        audit_msg, vcpu->arch.mmu.root_level,
2227                                        va, gpa, hpa, ent,
2228                                        is_shadow_present_pte(ent));
2229                         else if (ent == shadow_notrap_nonpresent_pte
2230                                  && !is_error_hpa(hpa))
2231                                 printk(KERN_ERR "audit: (%s) notrap shadow,"
2232                                        " valid guest gva %lx\n", audit_msg, va);
2233                         kvm_release_pfn_clean(pfn);
2234
2235                 }
2236         }
2237 }
2238
2239 static void audit_mappings(struct kvm_vcpu *vcpu)
2240 {
2241         unsigned i;
2242
2243         if (vcpu->arch.mmu.root_level == 4)
2244                 audit_mappings_page(vcpu, vcpu->arch.mmu.root_hpa, 0, 4);
2245         else
2246                 for (i = 0; i < 4; ++i)
2247                         if (vcpu->arch.mmu.pae_root[i] & PT_PRESENT_MASK)
2248                                 audit_mappings_page(vcpu,
2249                                                     vcpu->arch.mmu.pae_root[i],
2250                                                     i << 30,
2251                                                     2);
2252 }
2253
2254 static int count_rmaps(struct kvm_vcpu *vcpu)
2255 {
2256         int nmaps = 0;
2257         int i, j, k;
2258
2259         for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
2260                 struct kvm_memory_slot *m = &vcpu->kvm->memslots[i];
2261                 struct kvm_rmap_desc *d;
2262
2263                 for (j = 0; j < m->npages; ++j) {
2264                         unsigned long *rmapp = &m->rmap[j];
2265
2266                         if (!*rmapp)
2267                                 continue;
2268                         if (!(*rmapp & 1)) {
2269                                 ++nmaps;
2270                                 continue;
2271                         }
2272                         d = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
2273                         while (d) {
2274                                 for (k = 0; k < RMAP_EXT; ++k)
2275                                         if (d->shadow_ptes[k])
2276                                                 ++nmaps;
2277                                         else
2278                                                 break;
2279                                 d = d->more;
2280                         }
2281                 }
2282         }
2283         return nmaps;
2284 }
2285
2286 static int count_writable_mappings(struct kvm_vcpu *vcpu)
2287 {
2288         int nmaps = 0;
2289         struct kvm_mmu_page *sp;
2290         int i;
2291
2292         list_for_each_entry(sp, &vcpu->kvm->arch.active_mmu_pages, link) {
2293                 u64 *pt = sp->spt;
2294
2295                 if (sp->role.level != PT_PAGE_TABLE_LEVEL)
2296                         continue;
2297
2298                 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
2299                         u64 ent = pt[i];
2300
2301                         if (!(ent & PT_PRESENT_MASK))
2302                                 continue;
2303                         if (!(ent & PT_WRITABLE_MASK))
2304                                 continue;
2305                         ++nmaps;
2306                 }
2307         }
2308         return nmaps;
2309 }
2310
2311 static void audit_rmap(struct kvm_vcpu *vcpu)
2312 {
2313         int n_rmap = count_rmaps(vcpu);
2314         int n_actual = count_writable_mappings(vcpu);
2315
2316         if (n_rmap != n_actual)
2317                 printk(KERN_ERR "%s: (%s) rmap %d actual %d\n",
2318                        __func__, audit_msg, n_rmap, n_actual);
2319 }
2320
2321 static void audit_write_protection(struct kvm_vcpu *vcpu)
2322 {
2323         struct kvm_mmu_page *sp;
2324         struct kvm_memory_slot *slot;
2325         unsigned long *rmapp;
2326         gfn_t gfn;
2327
2328         list_for_each_entry(sp, &vcpu->kvm->arch.active_mmu_pages, link) {
2329                 if (sp->role.metaphysical)
2330                         continue;
2331
2332                 slot = gfn_to_memslot(vcpu->kvm, sp->gfn);
2333                 gfn = unalias_gfn(vcpu->kvm, sp->gfn);
2334                 rmapp = &slot->rmap[gfn - slot->base_gfn];
2335                 if (*rmapp)
2336                         printk(KERN_ERR "%s: (%s) shadow page has writable"
2337                                " mappings: gfn %lx role %x\n",
2338                                __func__, audit_msg, sp->gfn,
2339                                sp->role.word);
2340         }
2341 }
2342
2343 static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg)
2344 {
2345         int olddbg = dbg;
2346
2347         dbg = 0;
2348         audit_msg = msg;
2349         audit_rmap(vcpu);
2350         audit_write_protection(vcpu);
2351         audit_mappings(vcpu);
2352         dbg = olddbg;
2353 }
2354
2355 #endif