KVM: x86 emulator: drop unused #ifndef __KERNEL__
[pandora-kernel.git] / arch / x86 / kvm / mmu.c
1 /*
2  * Kernel-based Virtual Machine driver for Linux
3  *
4  * This module enables machines with Intel VT-x extensions to run virtual
5  * machines without emulation or binary translation.
6  *
7  * MMU support
8  *
9  * Copyright (C) 2006 Qumranet, Inc.
10  * Copyright 2010 Red Hat, Inc. and/or its affiliates.
11  *
12  * Authors:
13  *   Yaniv Kamay  <yaniv@qumranet.com>
14  *   Avi Kivity   <avi@qumranet.com>
15  *
16  * This work is licensed under the terms of the GNU GPL, version 2.  See
17  * the COPYING file in the top-level directory.
18  *
19  */
20
21 #include "irq.h"
22 #include "mmu.h"
23 #include "x86.h"
24 #include "kvm_cache_regs.h"
25 #include "x86.h"
26
27 #include <linux/kvm_host.h>
28 #include <linux/types.h>
29 #include <linux/string.h>
30 #include <linux/mm.h>
31 #include <linux/highmem.h>
32 #include <linux/module.h>
33 #include <linux/swap.h>
34 #include <linux/hugetlb.h>
35 #include <linux/compiler.h>
36 #include <linux/srcu.h>
37 #include <linux/slab.h>
38 #include <linux/uaccess.h>
39
40 #include <asm/page.h>
41 #include <asm/cmpxchg.h>
42 #include <asm/io.h>
43 #include <asm/vmx.h>
44
45 /*
46  * When setting this variable to true it enables Two-Dimensional-Paging
47  * where the hardware walks 2 page tables:
48  * 1. the guest-virtual to guest-physical
49  * 2. while doing 1. it walks guest-physical to host-physical
50  * If the hardware supports that we don't need to do shadow paging.
51  */
52 bool tdp_enabled = false;
53
54 enum {
55         AUDIT_PRE_PAGE_FAULT,
56         AUDIT_POST_PAGE_FAULT,
57         AUDIT_PRE_PTE_WRITE,
58         AUDIT_POST_PTE_WRITE,
59         AUDIT_PRE_SYNC,
60         AUDIT_POST_SYNC
61 };
62
63 char *audit_point_name[] = {
64         "pre page fault",
65         "post page fault",
66         "pre pte write",
67         "post pte write",
68         "pre sync",
69         "post sync"
70 };
71
72 #undef MMU_DEBUG
73
74 #ifdef MMU_DEBUG
75
76 #define pgprintk(x...) do { if (dbg) printk(x); } while (0)
77 #define rmap_printk(x...) do { if (dbg) printk(x); } while (0)
78
79 #else
80
81 #define pgprintk(x...) do { } while (0)
82 #define rmap_printk(x...) do { } while (0)
83
84 #endif
85
86 #ifdef MMU_DEBUG
87 static int dbg = 0;
88 module_param(dbg, bool, 0644);
89 #endif
90
91 static int oos_shadow = 1;
92 module_param(oos_shadow, bool, 0644);
93
94 #ifndef MMU_DEBUG
95 #define ASSERT(x) do { } while (0)
96 #else
97 #define ASSERT(x)                                                       \
98         if (!(x)) {                                                     \
99                 printk(KERN_WARNING "assertion failed %s:%d: %s\n",     \
100                        __FILE__, __LINE__, #x);                         \
101         }
102 #endif
103
104 #define PTE_PREFETCH_NUM                8
105
106 #define PT_FIRST_AVAIL_BITS_SHIFT 9
107 #define PT64_SECOND_AVAIL_BITS_SHIFT 52
108
109 #define PT64_LEVEL_BITS 9
110
111 #define PT64_LEVEL_SHIFT(level) \
112                 (PAGE_SHIFT + (level - 1) * PT64_LEVEL_BITS)
113
114 #define PT64_LEVEL_MASK(level) \
115                 (((1ULL << PT64_LEVEL_BITS) - 1) << PT64_LEVEL_SHIFT(level))
116
117 #define PT64_INDEX(address, level)\
118         (((address) >> PT64_LEVEL_SHIFT(level)) & ((1 << PT64_LEVEL_BITS) - 1))
119
120
121 #define PT32_LEVEL_BITS 10
122
123 #define PT32_LEVEL_SHIFT(level) \
124                 (PAGE_SHIFT + (level - 1) * PT32_LEVEL_BITS)
125
126 #define PT32_LEVEL_MASK(level) \
127                 (((1ULL << PT32_LEVEL_BITS) - 1) << PT32_LEVEL_SHIFT(level))
128 #define PT32_LVL_OFFSET_MASK(level) \
129         (PT32_BASE_ADDR_MASK & ((1ULL << (PAGE_SHIFT + (((level) - 1) \
130                                                 * PT32_LEVEL_BITS))) - 1))
131
132 #define PT32_INDEX(address, level)\
133         (((address) >> PT32_LEVEL_SHIFT(level)) & ((1 << PT32_LEVEL_BITS) - 1))
134
135
136 #define PT64_BASE_ADDR_MASK (((1ULL << 52) - 1) & ~(u64)(PAGE_SIZE-1))
137 #define PT64_DIR_BASE_ADDR_MASK \
138         (PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + PT64_LEVEL_BITS)) - 1))
139 #define PT64_LVL_ADDR_MASK(level) \
140         (PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + (((level) - 1) \
141                                                 * PT64_LEVEL_BITS))) - 1))
142 #define PT64_LVL_OFFSET_MASK(level) \
143         (PT64_BASE_ADDR_MASK & ((1ULL << (PAGE_SHIFT + (((level) - 1) \
144                                                 * PT64_LEVEL_BITS))) - 1))
145
146 #define PT32_BASE_ADDR_MASK PAGE_MASK
147 #define PT32_DIR_BASE_ADDR_MASK \
148         (PAGE_MASK & ~((1ULL << (PAGE_SHIFT + PT32_LEVEL_BITS)) - 1))
149 #define PT32_LVL_ADDR_MASK(level) \
150         (PAGE_MASK & ~((1ULL << (PAGE_SHIFT + (((level) - 1) \
151                                             * PT32_LEVEL_BITS))) - 1))
152
153 #define PT64_PERM_MASK (PT_PRESENT_MASK | PT_WRITABLE_MASK | PT_USER_MASK \
154                         | PT64_NX_MASK)
155
156 #define RMAP_EXT 4
157
158 #define ACC_EXEC_MASK    1
159 #define ACC_WRITE_MASK   PT_WRITABLE_MASK
160 #define ACC_USER_MASK    PT_USER_MASK
161 #define ACC_ALL          (ACC_EXEC_MASK | ACC_WRITE_MASK | ACC_USER_MASK)
162
163 #include <trace/events/kvm.h>
164
165 #define CREATE_TRACE_POINTS
166 #include "mmutrace.h"
167
168 #define SPTE_HOST_WRITEABLE (1ULL << PT_FIRST_AVAIL_BITS_SHIFT)
169
170 #define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)
171
172 struct kvm_rmap_desc {
173         u64 *sptes[RMAP_EXT];
174         struct kvm_rmap_desc *more;
175 };
176
177 struct kvm_shadow_walk_iterator {
178         u64 addr;
179         hpa_t shadow_addr;
180         int level;
181         u64 *sptep;
182         unsigned index;
183 };
184
185 #define for_each_shadow_entry(_vcpu, _addr, _walker)    \
186         for (shadow_walk_init(&(_walker), _vcpu, _addr);        \
187              shadow_walk_okay(&(_walker));                      \
188              shadow_walk_next(&(_walker)))
189
190 typedef void (*mmu_parent_walk_fn) (struct kvm_mmu_page *sp, u64 *spte);
191
192 static struct kmem_cache *pte_chain_cache;
193 static struct kmem_cache *rmap_desc_cache;
194 static struct kmem_cache *mmu_page_header_cache;
195 static struct percpu_counter kvm_total_used_mmu_pages;
196
197 static u64 __read_mostly shadow_trap_nonpresent_pte;
198 static u64 __read_mostly shadow_notrap_nonpresent_pte;
199 static u64 __read_mostly shadow_nx_mask;
200 static u64 __read_mostly shadow_x_mask; /* mutual exclusive with nx_mask */
201 static u64 __read_mostly shadow_user_mask;
202 static u64 __read_mostly shadow_accessed_mask;
203 static u64 __read_mostly shadow_dirty_mask;
204
205 static inline u64 rsvd_bits(int s, int e)
206 {
207         return ((1ULL << (e - s + 1)) - 1) << s;
208 }
209
210 void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte)
211 {
212         shadow_trap_nonpresent_pte = trap_pte;
213         shadow_notrap_nonpresent_pte = notrap_pte;
214 }
215 EXPORT_SYMBOL_GPL(kvm_mmu_set_nonpresent_ptes);
216
217 void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
218                 u64 dirty_mask, u64 nx_mask, u64 x_mask)
219 {
220         shadow_user_mask = user_mask;
221         shadow_accessed_mask = accessed_mask;
222         shadow_dirty_mask = dirty_mask;
223         shadow_nx_mask = nx_mask;
224         shadow_x_mask = x_mask;
225 }
226 EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes);
227
228 static bool is_write_protection(struct kvm_vcpu *vcpu)
229 {
230         return kvm_read_cr0_bits(vcpu, X86_CR0_WP);
231 }
232
233 static int is_cpuid_PSE36(void)
234 {
235         return 1;
236 }
237
238 static int is_nx(struct kvm_vcpu *vcpu)
239 {
240         return vcpu->arch.efer & EFER_NX;
241 }
242
243 static int is_shadow_present_pte(u64 pte)
244 {
245         return pte != shadow_trap_nonpresent_pte
246                 && pte != shadow_notrap_nonpresent_pte;
247 }
248
249 static int is_large_pte(u64 pte)
250 {
251         return pte & PT_PAGE_SIZE_MASK;
252 }
253
254 static int is_writable_pte(unsigned long pte)
255 {
256         return pte & PT_WRITABLE_MASK;
257 }
258
259 static int is_dirty_gpte(unsigned long pte)
260 {
261         return pte & PT_DIRTY_MASK;
262 }
263
264 static int is_rmap_spte(u64 pte)
265 {
266         return is_shadow_present_pte(pte);
267 }
268
269 static int is_last_spte(u64 pte, int level)
270 {
271         if (level == PT_PAGE_TABLE_LEVEL)
272                 return 1;
273         if (is_large_pte(pte))
274                 return 1;
275         return 0;
276 }
277
278 static pfn_t spte_to_pfn(u64 pte)
279 {
280         return (pte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
281 }
282
283 static gfn_t pse36_gfn_delta(u32 gpte)
284 {
285         int shift = 32 - PT32_DIR_PSE36_SHIFT - PAGE_SHIFT;
286
287         return (gpte & PT32_DIR_PSE36_MASK) << shift;
288 }
289
290 static void __set_spte(u64 *sptep, u64 spte)
291 {
292         set_64bit(sptep, spte);
293 }
294
295 static u64 __xchg_spte(u64 *sptep, u64 new_spte)
296 {
297 #ifdef CONFIG_X86_64
298         return xchg(sptep, new_spte);
299 #else
300         u64 old_spte;
301
302         do {
303                 old_spte = *sptep;
304         } while (cmpxchg64(sptep, old_spte, new_spte) != old_spte);
305
306         return old_spte;
307 #endif
308 }
309
310 static bool spte_has_volatile_bits(u64 spte)
311 {
312         if (!shadow_accessed_mask)
313                 return false;
314
315         if (!is_shadow_present_pte(spte))
316                 return false;
317
318         if ((spte & shadow_accessed_mask) &&
319               (!is_writable_pte(spte) || (spte & shadow_dirty_mask)))
320                 return false;
321
322         return true;
323 }
324
325 static bool spte_is_bit_cleared(u64 old_spte, u64 new_spte, u64 bit_mask)
326 {
327         return (old_spte & bit_mask) && !(new_spte & bit_mask);
328 }
329
330 static void update_spte(u64 *sptep, u64 new_spte)
331 {
332         u64 mask, old_spte = *sptep;
333
334         WARN_ON(!is_rmap_spte(new_spte));
335
336         new_spte |= old_spte & shadow_dirty_mask;
337
338         mask = shadow_accessed_mask;
339         if (is_writable_pte(old_spte))
340                 mask |= shadow_dirty_mask;
341
342         if (!spte_has_volatile_bits(old_spte) || (new_spte & mask) == mask)
343                 __set_spte(sptep, new_spte);
344         else
345                 old_spte = __xchg_spte(sptep, new_spte);
346
347         if (!shadow_accessed_mask)
348                 return;
349
350         if (spte_is_bit_cleared(old_spte, new_spte, shadow_accessed_mask))
351                 kvm_set_pfn_accessed(spte_to_pfn(old_spte));
352         if (spte_is_bit_cleared(old_spte, new_spte, shadow_dirty_mask))
353                 kvm_set_pfn_dirty(spte_to_pfn(old_spte));
354 }
355
356 static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
357                                   struct kmem_cache *base_cache, int min)
358 {
359         void *obj;
360
361         if (cache->nobjs >= min)
362                 return 0;
363         while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
364                 obj = kmem_cache_zalloc(base_cache, GFP_KERNEL);
365                 if (!obj)
366                         return -ENOMEM;
367                 cache->objects[cache->nobjs++] = obj;
368         }
369         return 0;
370 }
371
372 static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc,
373                                   struct kmem_cache *cache)
374 {
375         while (mc->nobjs)
376                 kmem_cache_free(cache, mc->objects[--mc->nobjs]);
377 }
378
379 static int mmu_topup_memory_cache_page(struct kvm_mmu_memory_cache *cache,
380                                        int min)
381 {
382         struct page *page;
383
384         if (cache->nobjs >= min)
385                 return 0;
386         while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
387                 page = alloc_page(GFP_KERNEL);
388                 if (!page)
389                         return -ENOMEM;
390                 cache->objects[cache->nobjs++] = page_address(page);
391         }
392         return 0;
393 }
394
395 static void mmu_free_memory_cache_page(struct kvm_mmu_memory_cache *mc)
396 {
397         while (mc->nobjs)
398                 free_page((unsigned long)mc->objects[--mc->nobjs]);
399 }
400
401 static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu)
402 {
403         int r;
404
405         r = mmu_topup_memory_cache(&vcpu->arch.mmu_pte_chain_cache,
406                                    pte_chain_cache, 4);
407         if (r)
408                 goto out;
409         r = mmu_topup_memory_cache(&vcpu->arch.mmu_rmap_desc_cache,
410                                    rmap_desc_cache, 4 + PTE_PREFETCH_NUM);
411         if (r)
412                 goto out;
413         r = mmu_topup_memory_cache_page(&vcpu->arch.mmu_page_cache, 8);
414         if (r)
415                 goto out;
416         r = mmu_topup_memory_cache(&vcpu->arch.mmu_page_header_cache,
417                                    mmu_page_header_cache, 4);
418 out:
419         return r;
420 }
421
422 static void mmu_free_memory_caches(struct kvm_vcpu *vcpu)
423 {
424         mmu_free_memory_cache(&vcpu->arch.mmu_pte_chain_cache, pte_chain_cache);
425         mmu_free_memory_cache(&vcpu->arch.mmu_rmap_desc_cache, rmap_desc_cache);
426         mmu_free_memory_cache_page(&vcpu->arch.mmu_page_cache);
427         mmu_free_memory_cache(&vcpu->arch.mmu_page_header_cache,
428                                 mmu_page_header_cache);
429 }
430
431 static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc,
432                                     size_t size)
433 {
434         void *p;
435
436         BUG_ON(!mc->nobjs);
437         p = mc->objects[--mc->nobjs];
438         return p;
439 }
440
441 static struct kvm_pte_chain *mmu_alloc_pte_chain(struct kvm_vcpu *vcpu)
442 {
443         return mmu_memory_cache_alloc(&vcpu->arch.mmu_pte_chain_cache,
444                                       sizeof(struct kvm_pte_chain));
445 }
446
447 static void mmu_free_pte_chain(struct kvm_pte_chain *pc)
448 {
449         kmem_cache_free(pte_chain_cache, pc);
450 }
451
452 static struct kvm_rmap_desc *mmu_alloc_rmap_desc(struct kvm_vcpu *vcpu)
453 {
454         return mmu_memory_cache_alloc(&vcpu->arch.mmu_rmap_desc_cache,
455                                       sizeof(struct kvm_rmap_desc));
456 }
457
458 static void mmu_free_rmap_desc(struct kvm_rmap_desc *rd)
459 {
460         kmem_cache_free(rmap_desc_cache, rd);
461 }
462
463 static gfn_t kvm_mmu_page_get_gfn(struct kvm_mmu_page *sp, int index)
464 {
465         if (!sp->role.direct)
466                 return sp->gfns[index];
467
468         return sp->gfn + (index << ((sp->role.level - 1) * PT64_LEVEL_BITS));
469 }
470
471 static void kvm_mmu_page_set_gfn(struct kvm_mmu_page *sp, int index, gfn_t gfn)
472 {
473         if (sp->role.direct)
474                 BUG_ON(gfn != kvm_mmu_page_get_gfn(sp, index));
475         else
476                 sp->gfns[index] = gfn;
477 }
478
479 /*
480  * Return the pointer to the largepage write count for a given
481  * gfn, handling slots that are not large page aligned.
482  */
483 static int *slot_largepage_idx(gfn_t gfn,
484                                struct kvm_memory_slot *slot,
485                                int level)
486 {
487         unsigned long idx;
488
489         idx = (gfn >> KVM_HPAGE_GFN_SHIFT(level)) -
490               (slot->base_gfn >> KVM_HPAGE_GFN_SHIFT(level));
491         return &slot->lpage_info[level - 2][idx].write_count;
492 }
493
494 static void account_shadowed(struct kvm *kvm, gfn_t gfn)
495 {
496         struct kvm_memory_slot *slot;
497         int *write_count;
498         int i;
499
500         slot = gfn_to_memslot(kvm, gfn);
501         for (i = PT_DIRECTORY_LEVEL;
502              i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) {
503                 write_count   = slot_largepage_idx(gfn, slot, i);
504                 *write_count += 1;
505         }
506 }
507
508 static void unaccount_shadowed(struct kvm *kvm, gfn_t gfn)
509 {
510         struct kvm_memory_slot *slot;
511         int *write_count;
512         int i;
513
514         slot = gfn_to_memslot(kvm, gfn);
515         for (i = PT_DIRECTORY_LEVEL;
516              i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) {
517                 write_count   = slot_largepage_idx(gfn, slot, i);
518                 *write_count -= 1;
519                 WARN_ON(*write_count < 0);
520         }
521 }
522
523 static int has_wrprotected_page(struct kvm *kvm,
524                                 gfn_t gfn,
525                                 int level)
526 {
527         struct kvm_memory_slot *slot;
528         int *largepage_idx;
529
530         slot = gfn_to_memslot(kvm, gfn);
531         if (slot) {
532                 largepage_idx = slot_largepage_idx(gfn, slot, level);
533                 return *largepage_idx;
534         }
535
536         return 1;
537 }
538
539 static int host_mapping_level(struct kvm *kvm, gfn_t gfn)
540 {
541         unsigned long page_size;
542         int i, ret = 0;
543
544         page_size = kvm_host_page_size(kvm, gfn);
545
546         for (i = PT_PAGE_TABLE_LEVEL;
547              i < (PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES); ++i) {
548                 if (page_size >= KVM_HPAGE_SIZE(i))
549                         ret = i;
550                 else
551                         break;
552         }
553
554         return ret;
555 }
556
557 static int mapping_level(struct kvm_vcpu *vcpu, gfn_t large_gfn)
558 {
559         struct kvm_memory_slot *slot;
560         int host_level, level, max_level;
561
562         slot = gfn_to_memslot(vcpu->kvm, large_gfn);
563         if (slot && slot->dirty_bitmap)
564                 return PT_PAGE_TABLE_LEVEL;
565
566         host_level = host_mapping_level(vcpu->kvm, large_gfn);
567
568         if (host_level == PT_PAGE_TABLE_LEVEL)
569                 return host_level;
570
571         max_level = kvm_x86_ops->get_lpage_level() < host_level ?
572                 kvm_x86_ops->get_lpage_level() : host_level;
573
574         for (level = PT_DIRECTORY_LEVEL; level <= max_level; ++level)
575                 if (has_wrprotected_page(vcpu->kvm, large_gfn, level))
576                         break;
577
578         return level - 1;
579 }
580
581 /*
582  * Take gfn and return the reverse mapping to it.
583  */
584
585 static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn, int level)
586 {
587         struct kvm_memory_slot *slot;
588         unsigned long idx;
589
590         slot = gfn_to_memslot(kvm, gfn);
591         if (likely(level == PT_PAGE_TABLE_LEVEL))
592                 return &slot->rmap[gfn - slot->base_gfn];
593
594         idx = (gfn >> KVM_HPAGE_GFN_SHIFT(level)) -
595                 (slot->base_gfn >> KVM_HPAGE_GFN_SHIFT(level));
596
597         return &slot->lpage_info[level - 2][idx].rmap_pde;
598 }
599
600 /*
601  * Reverse mapping data structures:
602  *
603  * If rmapp bit zero is zero, then rmapp point to the shadw page table entry
604  * that points to page_address(page).
605  *
606  * If rmapp bit zero is one, (then rmap & ~1) points to a struct kvm_rmap_desc
607  * containing more mappings.
608  *
609  * Returns the number of rmap entries before the spte was added or zero if
610  * the spte was not added.
611  *
612  */
613 static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
614 {
615         struct kvm_mmu_page *sp;
616         struct kvm_rmap_desc *desc;
617         unsigned long *rmapp;
618         int i, count = 0;
619
620         if (!is_rmap_spte(*spte))
621                 return count;
622         sp = page_header(__pa(spte));
623         kvm_mmu_page_set_gfn(sp, spte - sp->spt, gfn);
624         rmapp = gfn_to_rmap(vcpu->kvm, gfn, sp->role.level);
625         if (!*rmapp) {
626                 rmap_printk("rmap_add: %p %llx 0->1\n", spte, *spte);
627                 *rmapp = (unsigned long)spte;
628         } else if (!(*rmapp & 1)) {
629                 rmap_printk("rmap_add: %p %llx 1->many\n", spte, *spte);
630                 desc = mmu_alloc_rmap_desc(vcpu);
631                 desc->sptes[0] = (u64 *)*rmapp;
632                 desc->sptes[1] = spte;
633                 *rmapp = (unsigned long)desc | 1;
634                 ++count;
635         } else {
636                 rmap_printk("rmap_add: %p %llx many->many\n", spte, *spte);
637                 desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
638                 while (desc->sptes[RMAP_EXT-1] && desc->more) {
639                         desc = desc->more;
640                         count += RMAP_EXT;
641                 }
642                 if (desc->sptes[RMAP_EXT-1]) {
643                         desc->more = mmu_alloc_rmap_desc(vcpu);
644                         desc = desc->more;
645                 }
646                 for (i = 0; desc->sptes[i]; ++i)
647                         ++count;
648                 desc->sptes[i] = spte;
649         }
650         return count;
651 }
652
653 static void rmap_desc_remove_entry(unsigned long *rmapp,
654                                    struct kvm_rmap_desc *desc,
655                                    int i,
656                                    struct kvm_rmap_desc *prev_desc)
657 {
658         int j;
659
660         for (j = RMAP_EXT - 1; !desc->sptes[j] && j > i; --j)
661                 ;
662         desc->sptes[i] = desc->sptes[j];
663         desc->sptes[j] = NULL;
664         if (j != 0)
665                 return;
666         if (!prev_desc && !desc->more)
667                 *rmapp = (unsigned long)desc->sptes[0];
668         else
669                 if (prev_desc)
670                         prev_desc->more = desc->more;
671                 else
672                         *rmapp = (unsigned long)desc->more | 1;
673         mmu_free_rmap_desc(desc);
674 }
675
676 static void rmap_remove(struct kvm *kvm, u64 *spte)
677 {
678         struct kvm_rmap_desc *desc;
679         struct kvm_rmap_desc *prev_desc;
680         struct kvm_mmu_page *sp;
681         gfn_t gfn;
682         unsigned long *rmapp;
683         int i;
684
685         sp = page_header(__pa(spte));
686         gfn = kvm_mmu_page_get_gfn(sp, spte - sp->spt);
687         rmapp = gfn_to_rmap(kvm, gfn, sp->role.level);
688         if (!*rmapp) {
689                 printk(KERN_ERR "rmap_remove: %p 0->BUG\n", spte);
690                 BUG();
691         } else if (!(*rmapp & 1)) {
692                 rmap_printk("rmap_remove:  %p 1->0\n", spte);
693                 if ((u64 *)*rmapp != spte) {
694                         printk(KERN_ERR "rmap_remove:  %p 1->BUG\n", spte);
695                         BUG();
696                 }
697                 *rmapp = 0;
698         } else {
699                 rmap_printk("rmap_remove:  %p many->many\n", spte);
700                 desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
701                 prev_desc = NULL;
702                 while (desc) {
703                         for (i = 0; i < RMAP_EXT && desc->sptes[i]; ++i)
704                                 if (desc->sptes[i] == spte) {
705                                         rmap_desc_remove_entry(rmapp,
706                                                                desc, i,
707                                                                prev_desc);
708                                         return;
709                                 }
710                         prev_desc = desc;
711                         desc = desc->more;
712                 }
713                 pr_err("rmap_remove: %p many->many\n", spte);
714                 BUG();
715         }
716 }
717
718 static int set_spte_track_bits(u64 *sptep, u64 new_spte)
719 {
720         pfn_t pfn;
721         u64 old_spte = *sptep;
722
723         if (!spte_has_volatile_bits(old_spte))
724                 __set_spte(sptep, new_spte);
725         else
726                 old_spte = __xchg_spte(sptep, new_spte);
727
728         if (!is_rmap_spte(old_spte))
729                 return 0;
730
731         pfn = spte_to_pfn(old_spte);
732         if (!shadow_accessed_mask || old_spte & shadow_accessed_mask)
733                 kvm_set_pfn_accessed(pfn);
734         if (!shadow_dirty_mask || (old_spte & shadow_dirty_mask))
735                 kvm_set_pfn_dirty(pfn);
736         return 1;
737 }
738
739 static void drop_spte(struct kvm *kvm, u64 *sptep, u64 new_spte)
740 {
741         if (set_spte_track_bits(sptep, new_spte))
742                 rmap_remove(kvm, sptep);
743 }
744
745 static u64 *rmap_next(struct kvm *kvm, unsigned long *rmapp, u64 *spte)
746 {
747         struct kvm_rmap_desc *desc;
748         u64 *prev_spte;
749         int i;
750
751         if (!*rmapp)
752                 return NULL;
753         else if (!(*rmapp & 1)) {
754                 if (!spte)
755                         return (u64 *)*rmapp;
756                 return NULL;
757         }
758         desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
759         prev_spte = NULL;
760         while (desc) {
761                 for (i = 0; i < RMAP_EXT && desc->sptes[i]; ++i) {
762                         if (prev_spte == spte)
763                                 return desc->sptes[i];
764                         prev_spte = desc->sptes[i];
765                 }
766                 desc = desc->more;
767         }
768         return NULL;
769 }
770
771 static int rmap_write_protect(struct kvm *kvm, u64 gfn)
772 {
773         unsigned long *rmapp;
774         u64 *spte;
775         int i, write_protected = 0;
776
777         rmapp = gfn_to_rmap(kvm, gfn, PT_PAGE_TABLE_LEVEL);
778
779         spte = rmap_next(kvm, rmapp, NULL);
780         while (spte) {
781                 BUG_ON(!spte);
782                 BUG_ON(!(*spte & PT_PRESENT_MASK));
783                 rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte);
784                 if (is_writable_pte(*spte)) {
785                         update_spte(spte, *spte & ~PT_WRITABLE_MASK);
786                         write_protected = 1;
787                 }
788                 spte = rmap_next(kvm, rmapp, spte);
789         }
790
791         /* check for huge page mappings */
792         for (i = PT_DIRECTORY_LEVEL;
793              i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) {
794                 rmapp = gfn_to_rmap(kvm, gfn, i);
795                 spte = rmap_next(kvm, rmapp, NULL);
796                 while (spte) {
797                         BUG_ON(!spte);
798                         BUG_ON(!(*spte & PT_PRESENT_MASK));
799                         BUG_ON((*spte & (PT_PAGE_SIZE_MASK|PT_PRESENT_MASK)) != (PT_PAGE_SIZE_MASK|PT_PRESENT_MASK));
800                         pgprintk("rmap_write_protect(large): spte %p %llx %lld\n", spte, *spte, gfn);
801                         if (is_writable_pte(*spte)) {
802                                 drop_spte(kvm, spte,
803                                           shadow_trap_nonpresent_pte);
804                                 --kvm->stat.lpages;
805                                 spte = NULL;
806                                 write_protected = 1;
807                         }
808                         spte = rmap_next(kvm, rmapp, spte);
809                 }
810         }
811
812         return write_protected;
813 }
814
815 static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp,
816                            unsigned long data)
817 {
818         u64 *spte;
819         int need_tlb_flush = 0;
820
821         while ((spte = rmap_next(kvm, rmapp, NULL))) {
822                 BUG_ON(!(*spte & PT_PRESENT_MASK));
823                 rmap_printk("kvm_rmap_unmap_hva: spte %p %llx\n", spte, *spte);
824                 drop_spte(kvm, spte, shadow_trap_nonpresent_pte);
825                 need_tlb_flush = 1;
826         }
827         return need_tlb_flush;
828 }
829
830 static int kvm_set_pte_rmapp(struct kvm *kvm, unsigned long *rmapp,
831                              unsigned long data)
832 {
833         int need_flush = 0;
834         u64 *spte, new_spte;
835         pte_t *ptep = (pte_t *)data;
836         pfn_t new_pfn;
837
838         WARN_ON(pte_huge(*ptep));
839         new_pfn = pte_pfn(*ptep);
840         spte = rmap_next(kvm, rmapp, NULL);
841         while (spte) {
842                 BUG_ON(!is_shadow_present_pte(*spte));
843                 rmap_printk("kvm_set_pte_rmapp: spte %p %llx\n", spte, *spte);
844                 need_flush = 1;
845                 if (pte_write(*ptep)) {
846                         drop_spte(kvm, spte, shadow_trap_nonpresent_pte);
847                         spte = rmap_next(kvm, rmapp, NULL);
848                 } else {
849                         new_spte = *spte &~ (PT64_BASE_ADDR_MASK);
850                         new_spte |= (u64)new_pfn << PAGE_SHIFT;
851
852                         new_spte &= ~PT_WRITABLE_MASK;
853                         new_spte &= ~SPTE_HOST_WRITEABLE;
854                         new_spte &= ~shadow_accessed_mask;
855                         set_spte_track_bits(spte, new_spte);
856                         spte = rmap_next(kvm, rmapp, spte);
857                 }
858         }
859         if (need_flush)
860                 kvm_flush_remote_tlbs(kvm);
861
862         return 0;
863 }
864
865 static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
866                           unsigned long data,
867                           int (*handler)(struct kvm *kvm, unsigned long *rmapp,
868                                          unsigned long data))
869 {
870         int i, j;
871         int ret;
872         int retval = 0;
873         struct kvm_memslots *slots;
874
875         slots = kvm_memslots(kvm);
876
877         for (i = 0; i < slots->nmemslots; i++) {
878                 struct kvm_memory_slot *memslot = &slots->memslots[i];
879                 unsigned long start = memslot->userspace_addr;
880                 unsigned long end;
881
882                 end = start + (memslot->npages << PAGE_SHIFT);
883                 if (hva >= start && hva < end) {
884                         gfn_t gfn_offset = (hva - start) >> PAGE_SHIFT;
885
886                         ret = handler(kvm, &memslot->rmap[gfn_offset], data);
887
888                         for (j = 0; j < KVM_NR_PAGE_SIZES - 1; ++j) {
889                                 unsigned long idx;
890                                 int sh;
891
892                                 sh = KVM_HPAGE_GFN_SHIFT(PT_DIRECTORY_LEVEL+j);
893                                 idx = ((memslot->base_gfn+gfn_offset) >> sh) -
894                                         (memslot->base_gfn >> sh);
895                                 ret |= handler(kvm,
896                                         &memslot->lpage_info[j][idx].rmap_pde,
897                                         data);
898                         }
899                         trace_kvm_age_page(hva, memslot, ret);
900                         retval |= ret;
901                 }
902         }
903
904         return retval;
905 }
906
907 int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
908 {
909         return kvm_handle_hva(kvm, hva, 0, kvm_unmap_rmapp);
910 }
911
912 void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
913 {
914         kvm_handle_hva(kvm, hva, (unsigned long)&pte, kvm_set_pte_rmapp);
915 }
916
917 static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
918                          unsigned long data)
919 {
920         u64 *spte;
921         int young = 0;
922
923         /*
924          * Emulate the accessed bit for EPT, by checking if this page has
925          * an EPT mapping, and clearing it if it does. On the next access,
926          * a new EPT mapping will be established.
927          * This has some overhead, but not as much as the cost of swapping
928          * out actively used pages or breaking up actively used hugepages.
929          */
930         if (!shadow_accessed_mask)
931                 return kvm_unmap_rmapp(kvm, rmapp, data);
932
933         spte = rmap_next(kvm, rmapp, NULL);
934         while (spte) {
935                 int _young;
936                 u64 _spte = *spte;
937                 BUG_ON(!(_spte & PT_PRESENT_MASK));
938                 _young = _spte & PT_ACCESSED_MASK;
939                 if (_young) {
940                         young = 1;
941                         clear_bit(PT_ACCESSED_SHIFT, (unsigned long *)spte);
942                 }
943                 spte = rmap_next(kvm, rmapp, spte);
944         }
945         return young;
946 }
947
948 #define RMAP_RECYCLE_THRESHOLD 1000
949
950 static void rmap_recycle(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
951 {
952         unsigned long *rmapp;
953         struct kvm_mmu_page *sp;
954
955         sp = page_header(__pa(spte));
956
957         rmapp = gfn_to_rmap(vcpu->kvm, gfn, sp->role.level);
958
959         kvm_unmap_rmapp(vcpu->kvm, rmapp, 0);
960         kvm_flush_remote_tlbs(vcpu->kvm);
961 }
962
963 int kvm_age_hva(struct kvm *kvm, unsigned long hva)
964 {
965         return kvm_handle_hva(kvm, hva, 0, kvm_age_rmapp);
966 }
967
968 #ifdef MMU_DEBUG
969 static int is_empty_shadow_page(u64 *spt)
970 {
971         u64 *pos;
972         u64 *end;
973
974         for (pos = spt, end = pos + PAGE_SIZE / sizeof(u64); pos != end; pos++)
975                 if (is_shadow_present_pte(*pos)) {
976                         printk(KERN_ERR "%s: %p %llx\n", __func__,
977                                pos, *pos);
978                         return 0;
979                 }
980         return 1;
981 }
982 #endif
983
984 /*
985  * This value is the sum of all of the kvm instances's
986  * kvm->arch.n_used_mmu_pages values.  We need a global,
987  * aggregate version in order to make the slab shrinker
988  * faster
989  */
990 static inline void kvm_mod_used_mmu_pages(struct kvm *kvm, int nr)
991 {
992         kvm->arch.n_used_mmu_pages += nr;
993         percpu_counter_add(&kvm_total_used_mmu_pages, nr);
994 }
995
996 static void kvm_mmu_free_page(struct kvm *kvm, struct kvm_mmu_page *sp)
997 {
998         ASSERT(is_empty_shadow_page(sp->spt));
999         hlist_del(&sp->hash_link);
1000         list_del(&sp->link);
1001         __free_page(virt_to_page(sp->spt));
1002         if (!sp->role.direct)
1003                 __free_page(virt_to_page(sp->gfns));
1004         kmem_cache_free(mmu_page_header_cache, sp);
1005         kvm_mod_used_mmu_pages(kvm, -1);
1006 }
1007
1008 static unsigned kvm_page_table_hashfn(gfn_t gfn)
1009 {
1010         return gfn & ((1 << KVM_MMU_HASH_SHIFT) - 1);
1011 }
1012
1013 static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu,
1014                                                u64 *parent_pte, int direct)
1015 {
1016         struct kvm_mmu_page *sp;
1017
1018         sp = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache, sizeof *sp);
1019         sp->spt = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache, PAGE_SIZE);
1020         if (!direct)
1021                 sp->gfns = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache,
1022                                                   PAGE_SIZE);
1023         set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
1024         list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages);
1025         bitmap_zero(sp->slot_bitmap, KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS);
1026         sp->multimapped = 0;
1027         sp->parent_pte = parent_pte;
1028         kvm_mod_used_mmu_pages(vcpu->kvm, +1);
1029         return sp;
1030 }
1031
1032 static void mmu_page_add_parent_pte(struct kvm_vcpu *vcpu,
1033                                     struct kvm_mmu_page *sp, u64 *parent_pte)
1034 {
1035         struct kvm_pte_chain *pte_chain;
1036         struct hlist_node *node;
1037         int i;
1038
1039         if (!parent_pte)
1040                 return;
1041         if (!sp->multimapped) {
1042                 u64 *old = sp->parent_pte;
1043
1044                 if (!old) {
1045                         sp->parent_pte = parent_pte;
1046                         return;
1047                 }
1048                 sp->multimapped = 1;
1049                 pte_chain = mmu_alloc_pte_chain(vcpu);
1050                 INIT_HLIST_HEAD(&sp->parent_ptes);
1051                 hlist_add_head(&pte_chain->link, &sp->parent_ptes);
1052                 pte_chain->parent_ptes[0] = old;
1053         }
1054         hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link) {
1055                 if (pte_chain->parent_ptes[NR_PTE_CHAIN_ENTRIES-1])
1056                         continue;
1057                 for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i)
1058                         if (!pte_chain->parent_ptes[i]) {
1059                                 pte_chain->parent_ptes[i] = parent_pte;
1060                                 return;
1061                         }
1062         }
1063         pte_chain = mmu_alloc_pte_chain(vcpu);
1064         BUG_ON(!pte_chain);
1065         hlist_add_head(&pte_chain->link, &sp->parent_ptes);
1066         pte_chain->parent_ptes[0] = parent_pte;
1067 }
1068
1069 static void mmu_page_remove_parent_pte(struct kvm_mmu_page *sp,
1070                                        u64 *parent_pte)
1071 {
1072         struct kvm_pte_chain *pte_chain;
1073         struct hlist_node *node;
1074         int i;
1075
1076         if (!sp->multimapped) {
1077                 BUG_ON(sp->parent_pte != parent_pte);
1078                 sp->parent_pte = NULL;
1079                 return;
1080         }
1081         hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link)
1082                 for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i) {
1083                         if (!pte_chain->parent_ptes[i])
1084                                 break;
1085                         if (pte_chain->parent_ptes[i] != parent_pte)
1086                                 continue;
1087                         while (i + 1 < NR_PTE_CHAIN_ENTRIES
1088                                 && pte_chain->parent_ptes[i + 1]) {
1089                                 pte_chain->parent_ptes[i]
1090                                         = pte_chain->parent_ptes[i + 1];
1091                                 ++i;
1092                         }
1093                         pte_chain->parent_ptes[i] = NULL;
1094                         if (i == 0) {
1095                                 hlist_del(&pte_chain->link);
1096                                 mmu_free_pte_chain(pte_chain);
1097                                 if (hlist_empty(&sp->parent_ptes)) {
1098                                         sp->multimapped = 0;
1099                                         sp->parent_pte = NULL;
1100                                 }
1101                         }
1102                         return;
1103                 }
1104         BUG();
1105 }
1106
1107 static void mmu_parent_walk(struct kvm_mmu_page *sp, mmu_parent_walk_fn fn)
1108 {
1109         struct kvm_pte_chain *pte_chain;
1110         struct hlist_node *node;
1111         struct kvm_mmu_page *parent_sp;
1112         int i;
1113
1114         if (!sp->multimapped && sp->parent_pte) {
1115                 parent_sp = page_header(__pa(sp->parent_pte));
1116                 fn(parent_sp, sp->parent_pte);
1117                 return;
1118         }
1119
1120         hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link)
1121                 for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i) {
1122                         u64 *spte = pte_chain->parent_ptes[i];
1123
1124                         if (!spte)
1125                                 break;
1126                         parent_sp = page_header(__pa(spte));
1127                         fn(parent_sp, spte);
1128                 }
1129 }
1130
1131 static void mark_unsync(struct kvm_mmu_page *sp, u64 *spte);
1132 static void kvm_mmu_mark_parents_unsync(struct kvm_mmu_page *sp)
1133 {
1134         mmu_parent_walk(sp, mark_unsync);
1135 }
1136
1137 static void mark_unsync(struct kvm_mmu_page *sp, u64 *spte)
1138 {
1139         unsigned int index;
1140
1141         index = spte - sp->spt;
1142         if (__test_and_set_bit(index, sp->unsync_child_bitmap))
1143                 return;
1144         if (sp->unsync_children++)
1145                 return;
1146         kvm_mmu_mark_parents_unsync(sp);
1147 }
1148
1149 static void nonpaging_prefetch_page(struct kvm_vcpu *vcpu,
1150                                     struct kvm_mmu_page *sp)
1151 {
1152         int i;
1153
1154         for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
1155                 sp->spt[i] = shadow_trap_nonpresent_pte;
1156 }
1157
1158 static int nonpaging_sync_page(struct kvm_vcpu *vcpu,
1159                                struct kvm_mmu_page *sp, bool clear_unsync)
1160 {
1161         return 1;
1162 }
1163
1164 static void nonpaging_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
1165 {
1166 }
1167
1168 #define KVM_PAGE_ARRAY_NR 16
1169
1170 struct kvm_mmu_pages {
1171         struct mmu_page_and_offset {
1172                 struct kvm_mmu_page *sp;
1173                 unsigned int idx;
1174         } page[KVM_PAGE_ARRAY_NR];
1175         unsigned int nr;
1176 };
1177
1178 #define for_each_unsync_children(bitmap, idx)           \
1179         for (idx = find_first_bit(bitmap, 512);         \
1180              idx < 512;                                 \
1181              idx = find_next_bit(bitmap, 512, idx+1))
1182
1183 static int mmu_pages_add(struct kvm_mmu_pages *pvec, struct kvm_mmu_page *sp,
1184                          int idx)
1185 {
1186         int i;
1187
1188         if (sp->unsync)
1189                 for (i=0; i < pvec->nr; i++)
1190                         if (pvec->page[i].sp == sp)
1191                                 return 0;
1192
1193         pvec->page[pvec->nr].sp = sp;
1194         pvec->page[pvec->nr].idx = idx;
1195         pvec->nr++;
1196         return (pvec->nr == KVM_PAGE_ARRAY_NR);
1197 }
1198
1199 static int __mmu_unsync_walk(struct kvm_mmu_page *sp,
1200                            struct kvm_mmu_pages *pvec)
1201 {
1202         int i, ret, nr_unsync_leaf = 0;
1203
1204         for_each_unsync_children(sp->unsync_child_bitmap, i) {
1205                 struct kvm_mmu_page *child;
1206                 u64 ent = sp->spt[i];
1207
1208                 if (!is_shadow_present_pte(ent) || is_large_pte(ent))
1209                         goto clear_child_bitmap;
1210
1211                 child = page_header(ent & PT64_BASE_ADDR_MASK);
1212
1213                 if (child->unsync_children) {
1214                         if (mmu_pages_add(pvec, child, i))
1215                                 return -ENOSPC;
1216
1217                         ret = __mmu_unsync_walk(child, pvec);
1218                         if (!ret)
1219                                 goto clear_child_bitmap;
1220                         else if (ret > 0)
1221                                 nr_unsync_leaf += ret;
1222                         else
1223                                 return ret;
1224                 } else if (child->unsync) {
1225                         nr_unsync_leaf++;
1226                         if (mmu_pages_add(pvec, child, i))
1227                                 return -ENOSPC;
1228                 } else
1229                          goto clear_child_bitmap;
1230
1231                 continue;
1232
1233 clear_child_bitmap:
1234                 __clear_bit(i, sp->unsync_child_bitmap);
1235                 sp->unsync_children--;
1236                 WARN_ON((int)sp->unsync_children < 0);
1237         }
1238
1239
1240         return nr_unsync_leaf;
1241 }
1242
1243 static int mmu_unsync_walk(struct kvm_mmu_page *sp,
1244                            struct kvm_mmu_pages *pvec)
1245 {
1246         if (!sp->unsync_children)
1247                 return 0;
1248
1249         mmu_pages_add(pvec, sp, 0);
1250         return __mmu_unsync_walk(sp, pvec);
1251 }
1252
1253 static void kvm_unlink_unsync_page(struct kvm *kvm, struct kvm_mmu_page *sp)
1254 {
1255         WARN_ON(!sp->unsync);
1256         trace_kvm_mmu_sync_page(sp);
1257         sp->unsync = 0;
1258         --kvm->stat.mmu_unsync;
1259 }
1260
1261 static int kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
1262                                     struct list_head *invalid_list);
1263 static void kvm_mmu_commit_zap_page(struct kvm *kvm,
1264                                     struct list_head *invalid_list);
1265
1266 #define for_each_gfn_sp(kvm, sp, gfn, pos)                              \
1267   hlist_for_each_entry(sp, pos,                                         \
1268    &(kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)], hash_link)   \
1269         if ((sp)->gfn != (gfn)) {} else
1270
1271 #define for_each_gfn_indirect_valid_sp(kvm, sp, gfn, pos)               \
1272   hlist_for_each_entry(sp, pos,                                         \
1273    &(kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)], hash_link)   \
1274                 if ((sp)->gfn != (gfn) || (sp)->role.direct ||          \
1275                         (sp)->role.invalid) {} else
1276
1277 /* @sp->gfn should be write-protected at the call site */
1278 static int __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
1279                            struct list_head *invalid_list, bool clear_unsync)
1280 {
1281         if (sp->role.cr4_pae != !!is_pae(vcpu)) {
1282                 kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list);
1283                 return 1;
1284         }
1285
1286         if (clear_unsync)
1287                 kvm_unlink_unsync_page(vcpu->kvm, sp);
1288
1289         if (vcpu->arch.mmu.sync_page(vcpu, sp, clear_unsync)) {
1290                 kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list);
1291                 return 1;
1292         }
1293
1294         kvm_mmu_flush_tlb(vcpu);
1295         return 0;
1296 }
1297
1298 static int kvm_sync_page_transient(struct kvm_vcpu *vcpu,
1299                                    struct kvm_mmu_page *sp)
1300 {
1301         LIST_HEAD(invalid_list);
1302         int ret;
1303
1304         ret = __kvm_sync_page(vcpu, sp, &invalid_list, false);
1305         if (ret)
1306                 kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
1307
1308         return ret;
1309 }
1310
1311 static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
1312                          struct list_head *invalid_list)
1313 {
1314         return __kvm_sync_page(vcpu, sp, invalid_list, true);
1315 }
1316
1317 /* @gfn should be write-protected at the call site */
1318 static void kvm_sync_pages(struct kvm_vcpu *vcpu,  gfn_t gfn)
1319 {
1320         struct kvm_mmu_page *s;
1321         struct hlist_node *node;
1322         LIST_HEAD(invalid_list);
1323         bool flush = false;
1324
1325         for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn, node) {
1326                 if (!s->unsync)
1327                         continue;
1328
1329                 WARN_ON(s->role.level != PT_PAGE_TABLE_LEVEL);
1330                 if ((s->role.cr4_pae != !!is_pae(vcpu)) ||
1331                         (vcpu->arch.mmu.sync_page(vcpu, s, true))) {
1332                         kvm_mmu_prepare_zap_page(vcpu->kvm, s, &invalid_list);
1333                         continue;
1334                 }
1335                 kvm_unlink_unsync_page(vcpu->kvm, s);
1336                 flush = true;
1337         }
1338
1339         kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
1340         if (flush)
1341                 kvm_mmu_flush_tlb(vcpu);
1342 }
1343
1344 struct mmu_page_path {
1345         struct kvm_mmu_page *parent[PT64_ROOT_LEVEL-1];
1346         unsigned int idx[PT64_ROOT_LEVEL-1];
1347 };
1348
1349 #define for_each_sp(pvec, sp, parents, i)                       \
1350                 for (i = mmu_pages_next(&pvec, &parents, -1),   \
1351                         sp = pvec.page[i].sp;                   \
1352                         i < pvec.nr && ({ sp = pvec.page[i].sp; 1;});   \
1353                         i = mmu_pages_next(&pvec, &parents, i))
1354
1355 static int mmu_pages_next(struct kvm_mmu_pages *pvec,
1356                           struct mmu_page_path *parents,
1357                           int i)
1358 {
1359         int n;
1360
1361         for (n = i+1; n < pvec->nr; n++) {
1362                 struct kvm_mmu_page *sp = pvec->page[n].sp;
1363
1364                 if (sp->role.level == PT_PAGE_TABLE_LEVEL) {
1365                         parents->idx[0] = pvec->page[n].idx;
1366                         return n;
1367                 }
1368
1369                 parents->parent[sp->role.level-2] = sp;
1370                 parents->idx[sp->role.level-1] = pvec->page[n].idx;
1371         }
1372
1373         return n;
1374 }
1375
1376 static void mmu_pages_clear_parents(struct mmu_page_path *parents)
1377 {
1378         struct kvm_mmu_page *sp;
1379         unsigned int level = 0;
1380
1381         do {
1382                 unsigned int idx = parents->idx[level];
1383
1384                 sp = parents->parent[level];
1385                 if (!sp)
1386                         return;
1387
1388                 --sp->unsync_children;
1389                 WARN_ON((int)sp->unsync_children < 0);
1390                 __clear_bit(idx, sp->unsync_child_bitmap);
1391                 level++;
1392         } while (level < PT64_ROOT_LEVEL-1 && !sp->unsync_children);
1393 }
1394
1395 static void kvm_mmu_pages_init(struct kvm_mmu_page *parent,
1396                                struct mmu_page_path *parents,
1397                                struct kvm_mmu_pages *pvec)
1398 {
1399         parents->parent[parent->role.level-1] = NULL;
1400         pvec->nr = 0;
1401 }
1402
1403 static void mmu_sync_children(struct kvm_vcpu *vcpu,
1404                               struct kvm_mmu_page *parent)
1405 {
1406         int i;
1407         struct kvm_mmu_page *sp;
1408         struct mmu_page_path parents;
1409         struct kvm_mmu_pages pages;
1410         LIST_HEAD(invalid_list);
1411
1412         kvm_mmu_pages_init(parent, &parents, &pages);
1413         while (mmu_unsync_walk(parent, &pages)) {
1414                 int protected = 0;
1415
1416                 for_each_sp(pages, sp, parents, i)
1417                         protected |= rmap_write_protect(vcpu->kvm, sp->gfn);
1418
1419                 if (protected)
1420                         kvm_flush_remote_tlbs(vcpu->kvm);
1421
1422                 for_each_sp(pages, sp, parents, i) {
1423                         kvm_sync_page(vcpu, sp, &invalid_list);
1424                         mmu_pages_clear_parents(&parents);
1425                 }
1426                 kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
1427                 cond_resched_lock(&vcpu->kvm->mmu_lock);
1428                 kvm_mmu_pages_init(parent, &parents, &pages);
1429         }
1430 }
1431
1432 static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
1433                                              gfn_t gfn,
1434                                              gva_t gaddr,
1435                                              unsigned level,
1436                                              int direct,
1437                                              unsigned access,
1438                                              u64 *parent_pte)
1439 {
1440         union kvm_mmu_page_role role;
1441         unsigned quadrant;
1442         struct kvm_mmu_page *sp;
1443         struct hlist_node *node;
1444         bool need_sync = false;
1445
1446         role = vcpu->arch.mmu.base_role;
1447         role.level = level;
1448         role.direct = direct;
1449         if (role.direct)
1450                 role.cr4_pae = 0;
1451         role.access = access;
1452         if (!vcpu->arch.mmu.direct_map
1453             && vcpu->arch.mmu.root_level <= PT32_ROOT_LEVEL) {
1454                 quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level));
1455                 quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1;
1456                 role.quadrant = quadrant;
1457         }
1458         for_each_gfn_sp(vcpu->kvm, sp, gfn, node) {
1459                 if (!need_sync && sp->unsync)
1460                         need_sync = true;
1461
1462                 if (sp->role.word != role.word)
1463                         continue;
1464
1465                 if (sp->unsync && kvm_sync_page_transient(vcpu, sp))
1466                         break;
1467
1468                 mmu_page_add_parent_pte(vcpu, sp, parent_pte);
1469                 if (sp->unsync_children) {
1470                         kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
1471                         kvm_mmu_mark_parents_unsync(sp);
1472                 } else if (sp->unsync)
1473                         kvm_mmu_mark_parents_unsync(sp);
1474
1475                 trace_kvm_mmu_get_page(sp, false);
1476                 return sp;
1477         }
1478         ++vcpu->kvm->stat.mmu_cache_miss;
1479         sp = kvm_mmu_alloc_page(vcpu, parent_pte, direct);
1480         if (!sp)
1481                 return sp;
1482         sp->gfn = gfn;
1483         sp->role = role;
1484         hlist_add_head(&sp->hash_link,
1485                 &vcpu->kvm->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)]);
1486         if (!direct) {
1487                 if (rmap_write_protect(vcpu->kvm, gfn))
1488                         kvm_flush_remote_tlbs(vcpu->kvm);
1489                 if (level > PT_PAGE_TABLE_LEVEL && need_sync)
1490                         kvm_sync_pages(vcpu, gfn);
1491
1492                 account_shadowed(vcpu->kvm, gfn);
1493         }
1494         if (shadow_trap_nonpresent_pte != shadow_notrap_nonpresent_pte)
1495                 vcpu->arch.mmu.prefetch_page(vcpu, sp);
1496         else
1497                 nonpaging_prefetch_page(vcpu, sp);
1498         trace_kvm_mmu_get_page(sp, true);
1499         return sp;
1500 }
1501
1502 static void shadow_walk_init(struct kvm_shadow_walk_iterator *iterator,
1503                              struct kvm_vcpu *vcpu, u64 addr)
1504 {
1505         iterator->addr = addr;
1506         iterator->shadow_addr = vcpu->arch.mmu.root_hpa;
1507         iterator->level = vcpu->arch.mmu.shadow_root_level;
1508
1509         if (iterator->level == PT64_ROOT_LEVEL &&
1510             vcpu->arch.mmu.root_level < PT64_ROOT_LEVEL &&
1511             !vcpu->arch.mmu.direct_map)
1512                 --iterator->level;
1513
1514         if (iterator->level == PT32E_ROOT_LEVEL) {
1515                 iterator->shadow_addr
1516                         = vcpu->arch.mmu.pae_root[(addr >> 30) & 3];
1517                 iterator->shadow_addr &= PT64_BASE_ADDR_MASK;
1518                 --iterator->level;
1519                 if (!iterator->shadow_addr)
1520                         iterator->level = 0;
1521         }
1522 }
1523
1524 static bool shadow_walk_okay(struct kvm_shadow_walk_iterator *iterator)
1525 {
1526         if (iterator->level < PT_PAGE_TABLE_LEVEL)
1527                 return false;
1528
1529         if (iterator->level == PT_PAGE_TABLE_LEVEL)
1530                 if (is_large_pte(*iterator->sptep))
1531                         return false;
1532
1533         iterator->index = SHADOW_PT_INDEX(iterator->addr, iterator->level);
1534         iterator->sptep = ((u64 *)__va(iterator->shadow_addr)) + iterator->index;
1535         return true;
1536 }
1537
1538 static void shadow_walk_next(struct kvm_shadow_walk_iterator *iterator)
1539 {
1540         iterator->shadow_addr = *iterator->sptep & PT64_BASE_ADDR_MASK;
1541         --iterator->level;
1542 }
1543
1544 static void link_shadow_page(u64 *sptep, struct kvm_mmu_page *sp)
1545 {
1546         u64 spte;
1547
1548         spte = __pa(sp->spt)
1549                 | PT_PRESENT_MASK | PT_ACCESSED_MASK
1550                 | PT_WRITABLE_MASK | PT_USER_MASK;
1551         __set_spte(sptep, spte);
1552 }
1553
1554 static void drop_large_spte(struct kvm_vcpu *vcpu, u64 *sptep)
1555 {
1556         if (is_large_pte(*sptep)) {
1557                 drop_spte(vcpu->kvm, sptep, shadow_trap_nonpresent_pte);
1558                 kvm_flush_remote_tlbs(vcpu->kvm);
1559         }
1560 }
1561
1562 static void validate_direct_spte(struct kvm_vcpu *vcpu, u64 *sptep,
1563                                    unsigned direct_access)
1564 {
1565         if (is_shadow_present_pte(*sptep) && !is_large_pte(*sptep)) {
1566                 struct kvm_mmu_page *child;
1567
1568                 /*
1569                  * For the direct sp, if the guest pte's dirty bit
1570                  * changed form clean to dirty, it will corrupt the
1571                  * sp's access: allow writable in the read-only sp,
1572                  * so we should update the spte at this point to get
1573                  * a new sp with the correct access.
1574                  */
1575                 child = page_header(*sptep & PT64_BASE_ADDR_MASK);
1576                 if (child->role.access == direct_access)
1577                         return;
1578
1579                 mmu_page_remove_parent_pte(child, sptep);
1580                 __set_spte(sptep, shadow_trap_nonpresent_pte);
1581                 kvm_flush_remote_tlbs(vcpu->kvm);
1582         }
1583 }
1584
1585 static void kvm_mmu_page_unlink_children(struct kvm *kvm,
1586                                          struct kvm_mmu_page *sp)
1587 {
1588         unsigned i;
1589         u64 *pt;
1590         u64 ent;
1591
1592         pt = sp->spt;
1593
1594         for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
1595                 ent = pt[i];
1596
1597                 if (is_shadow_present_pte(ent)) {
1598                         if (!is_last_spte(ent, sp->role.level)) {
1599                                 ent &= PT64_BASE_ADDR_MASK;
1600                                 mmu_page_remove_parent_pte(page_header(ent),
1601                                                            &pt[i]);
1602                         } else {
1603                                 if (is_large_pte(ent))
1604                                         --kvm->stat.lpages;
1605                                 drop_spte(kvm, &pt[i],
1606                                           shadow_trap_nonpresent_pte);
1607                         }
1608                 }
1609                 pt[i] = shadow_trap_nonpresent_pte;
1610         }
1611 }
1612
1613 static void kvm_mmu_put_page(struct kvm_mmu_page *sp, u64 *parent_pte)
1614 {
1615         mmu_page_remove_parent_pte(sp, parent_pte);
1616 }
1617
1618 static void kvm_mmu_reset_last_pte_updated(struct kvm *kvm)
1619 {
1620         int i;
1621         struct kvm_vcpu *vcpu;
1622
1623         kvm_for_each_vcpu(i, vcpu, kvm)
1624                 vcpu->arch.last_pte_updated = NULL;
1625 }
1626
1627 static void kvm_mmu_unlink_parents(struct kvm *kvm, struct kvm_mmu_page *sp)
1628 {
1629         u64 *parent_pte;
1630
1631         while (sp->multimapped || sp->parent_pte) {
1632                 if (!sp->multimapped)
1633                         parent_pte = sp->parent_pte;
1634                 else {
1635                         struct kvm_pte_chain *chain;
1636
1637                         chain = container_of(sp->parent_ptes.first,
1638                                              struct kvm_pte_chain, link);
1639                         parent_pte = chain->parent_ptes[0];
1640                 }
1641                 BUG_ON(!parent_pte);
1642                 kvm_mmu_put_page(sp, parent_pte);
1643                 __set_spte(parent_pte, shadow_trap_nonpresent_pte);
1644         }
1645 }
1646
1647 static int mmu_zap_unsync_children(struct kvm *kvm,
1648                                    struct kvm_mmu_page *parent,
1649                                    struct list_head *invalid_list)
1650 {
1651         int i, zapped = 0;
1652         struct mmu_page_path parents;
1653         struct kvm_mmu_pages pages;
1654
1655         if (parent->role.level == PT_PAGE_TABLE_LEVEL)
1656                 return 0;
1657
1658         kvm_mmu_pages_init(parent, &parents, &pages);
1659         while (mmu_unsync_walk(parent, &pages)) {
1660                 struct kvm_mmu_page *sp;
1661
1662                 for_each_sp(pages, sp, parents, i) {
1663                         kvm_mmu_prepare_zap_page(kvm, sp, invalid_list);
1664                         mmu_pages_clear_parents(&parents);
1665                         zapped++;
1666                 }
1667                 kvm_mmu_pages_init(parent, &parents, &pages);
1668         }
1669
1670         return zapped;
1671 }
1672
1673 static int kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
1674                                     struct list_head *invalid_list)
1675 {
1676         int ret;
1677
1678         trace_kvm_mmu_prepare_zap_page(sp);
1679         ++kvm->stat.mmu_shadow_zapped;
1680         ret = mmu_zap_unsync_children(kvm, sp, invalid_list);
1681         kvm_mmu_page_unlink_children(kvm, sp);
1682         kvm_mmu_unlink_parents(kvm, sp);
1683         if (!sp->role.invalid && !sp->role.direct)
1684                 unaccount_shadowed(kvm, sp->gfn);
1685         if (sp->unsync)
1686                 kvm_unlink_unsync_page(kvm, sp);
1687         if (!sp->root_count) {
1688                 /* Count self */
1689                 ret++;
1690                 list_move(&sp->link, invalid_list);
1691         } else {
1692                 list_move(&sp->link, &kvm->arch.active_mmu_pages);
1693                 kvm_reload_remote_mmus(kvm);
1694         }
1695
1696         sp->role.invalid = 1;
1697         kvm_mmu_reset_last_pte_updated(kvm);
1698         return ret;
1699 }
1700
1701 static void kvm_mmu_commit_zap_page(struct kvm *kvm,
1702                                     struct list_head *invalid_list)
1703 {
1704         struct kvm_mmu_page *sp;
1705
1706         if (list_empty(invalid_list))
1707                 return;
1708
1709         kvm_flush_remote_tlbs(kvm);
1710
1711         do {
1712                 sp = list_first_entry(invalid_list, struct kvm_mmu_page, link);
1713                 WARN_ON(!sp->role.invalid || sp->root_count);
1714                 kvm_mmu_free_page(kvm, sp);
1715         } while (!list_empty(invalid_list));
1716
1717 }
1718
1719 /*
1720  * Changing the number of mmu pages allocated to the vm
1721  * Note: if goal_nr_mmu_pages is too small, you will get dead lock
1722  */
1723 void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int goal_nr_mmu_pages)
1724 {
1725         LIST_HEAD(invalid_list);
1726         /*
1727          * If we set the number of mmu pages to be smaller be than the
1728          * number of actived pages , we must to free some mmu pages before we
1729          * change the value
1730          */
1731
1732         if (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages) {
1733                 while (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages &&
1734                         !list_empty(&kvm->arch.active_mmu_pages)) {
1735                         struct kvm_mmu_page *page;
1736
1737                         page = container_of(kvm->arch.active_mmu_pages.prev,
1738                                             struct kvm_mmu_page, link);
1739                         kvm_mmu_prepare_zap_page(kvm, page, &invalid_list);
1740                         kvm_mmu_commit_zap_page(kvm, &invalid_list);
1741                 }
1742                 goal_nr_mmu_pages = kvm->arch.n_used_mmu_pages;
1743         }
1744
1745         kvm->arch.n_max_mmu_pages = goal_nr_mmu_pages;
1746 }
1747
1748 static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
1749 {
1750         struct kvm_mmu_page *sp;
1751         struct hlist_node *node;
1752         LIST_HEAD(invalid_list);
1753         int r;
1754
1755         pgprintk("%s: looking for gfn %llx\n", __func__, gfn);
1756         r = 0;
1757
1758         for_each_gfn_indirect_valid_sp(kvm, sp, gfn, node) {
1759                 pgprintk("%s: gfn %llx role %x\n", __func__, gfn,
1760                          sp->role.word);
1761                 r = 1;
1762                 kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
1763         }
1764         kvm_mmu_commit_zap_page(kvm, &invalid_list);
1765         return r;
1766 }
1767
1768 static void mmu_unshadow(struct kvm *kvm, gfn_t gfn)
1769 {
1770         struct kvm_mmu_page *sp;
1771         struct hlist_node *node;
1772         LIST_HEAD(invalid_list);
1773
1774         for_each_gfn_indirect_valid_sp(kvm, sp, gfn, node) {
1775                 pgprintk("%s: zap %llx %x\n",
1776                          __func__, gfn, sp->role.word);
1777                 kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
1778         }
1779         kvm_mmu_commit_zap_page(kvm, &invalid_list);
1780 }
1781
1782 static void page_header_update_slot(struct kvm *kvm, void *pte, gfn_t gfn)
1783 {
1784         int slot = memslot_id(kvm, gfn);
1785         struct kvm_mmu_page *sp = page_header(__pa(pte));
1786
1787         __set_bit(slot, sp->slot_bitmap);
1788 }
1789
1790 static void mmu_convert_notrap(struct kvm_mmu_page *sp)
1791 {
1792         int i;
1793         u64 *pt = sp->spt;
1794
1795         if (shadow_trap_nonpresent_pte == shadow_notrap_nonpresent_pte)
1796                 return;
1797
1798         for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
1799                 if (pt[i] == shadow_notrap_nonpresent_pte)
1800                         __set_spte(&pt[i], shadow_trap_nonpresent_pte);
1801         }
1802 }
1803
1804 /*
1805  * The function is based on mtrr_type_lookup() in
1806  * arch/x86/kernel/cpu/mtrr/generic.c
1807  */
1808 static int get_mtrr_type(struct mtrr_state_type *mtrr_state,
1809                          u64 start, u64 end)
1810 {
1811         int i;
1812         u64 base, mask;
1813         u8 prev_match, curr_match;
1814         int num_var_ranges = KVM_NR_VAR_MTRR;
1815
1816         if (!mtrr_state->enabled)
1817                 return 0xFF;
1818
1819         /* Make end inclusive end, instead of exclusive */
1820         end--;
1821
1822         /* Look in fixed ranges. Just return the type as per start */
1823         if (mtrr_state->have_fixed && (start < 0x100000)) {
1824                 int idx;
1825
1826                 if (start < 0x80000) {
1827                         idx = 0;
1828                         idx += (start >> 16);
1829                         return mtrr_state->fixed_ranges[idx];
1830                 } else if (start < 0xC0000) {
1831                         idx = 1 * 8;
1832                         idx += ((start - 0x80000) >> 14);
1833                         return mtrr_state->fixed_ranges[idx];
1834                 } else if (start < 0x1000000) {
1835                         idx = 3 * 8;
1836                         idx += ((start - 0xC0000) >> 12);
1837                         return mtrr_state->fixed_ranges[idx];
1838                 }
1839         }
1840
1841         /*
1842          * Look in variable ranges
1843          * Look of multiple ranges matching this address and pick type
1844          * as per MTRR precedence
1845          */
1846         if (!(mtrr_state->enabled & 2))
1847                 return mtrr_state->def_type;
1848
1849         prev_match = 0xFF;
1850         for (i = 0; i < num_var_ranges; ++i) {
1851                 unsigned short start_state, end_state;
1852
1853                 if (!(mtrr_state->var_ranges[i].mask_lo & (1 << 11)))
1854                         continue;
1855
1856                 base = (((u64)mtrr_state->var_ranges[i].base_hi) << 32) +
1857                        (mtrr_state->var_ranges[i].base_lo & PAGE_MASK);
1858                 mask = (((u64)mtrr_state->var_ranges[i].mask_hi) << 32) +
1859                        (mtrr_state->var_ranges[i].mask_lo & PAGE_MASK);
1860
1861                 start_state = ((start & mask) == (base & mask));
1862                 end_state = ((end & mask) == (base & mask));
1863                 if (start_state != end_state)
1864                         return 0xFE;
1865
1866                 if ((start & mask) != (base & mask))
1867                         continue;
1868
1869                 curr_match = mtrr_state->var_ranges[i].base_lo & 0xff;
1870                 if (prev_match == 0xFF) {
1871                         prev_match = curr_match;
1872                         continue;
1873                 }
1874
1875                 if (prev_match == MTRR_TYPE_UNCACHABLE ||
1876                     curr_match == MTRR_TYPE_UNCACHABLE)
1877                         return MTRR_TYPE_UNCACHABLE;
1878
1879                 if ((prev_match == MTRR_TYPE_WRBACK &&
1880                      curr_match == MTRR_TYPE_WRTHROUGH) ||
1881                     (prev_match == MTRR_TYPE_WRTHROUGH &&
1882                      curr_match == MTRR_TYPE_WRBACK)) {
1883                         prev_match = MTRR_TYPE_WRTHROUGH;
1884                         curr_match = MTRR_TYPE_WRTHROUGH;
1885                 }
1886
1887                 if (prev_match != curr_match)
1888                         return MTRR_TYPE_UNCACHABLE;
1889         }
1890
1891         if (prev_match != 0xFF)
1892                 return prev_match;
1893
1894         return mtrr_state->def_type;
1895 }
1896
1897 u8 kvm_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn)
1898 {
1899         u8 mtrr;
1900
1901         mtrr = get_mtrr_type(&vcpu->arch.mtrr_state, gfn << PAGE_SHIFT,
1902                              (gfn << PAGE_SHIFT) + PAGE_SIZE);
1903         if (mtrr == 0xfe || mtrr == 0xff)
1904                 mtrr = MTRR_TYPE_WRBACK;
1905         return mtrr;
1906 }
1907 EXPORT_SYMBOL_GPL(kvm_get_guest_memory_type);
1908
1909 static void __kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
1910 {
1911         trace_kvm_mmu_unsync_page(sp);
1912         ++vcpu->kvm->stat.mmu_unsync;
1913         sp->unsync = 1;
1914
1915         kvm_mmu_mark_parents_unsync(sp);
1916         mmu_convert_notrap(sp);
1917 }
1918
1919 static void kvm_unsync_pages(struct kvm_vcpu *vcpu,  gfn_t gfn)
1920 {
1921         struct kvm_mmu_page *s;
1922         struct hlist_node *node;
1923
1924         for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn, node) {
1925                 if (s->unsync)
1926                         continue;
1927                 WARN_ON(s->role.level != PT_PAGE_TABLE_LEVEL);
1928                 __kvm_unsync_page(vcpu, s);
1929         }
1930 }
1931
1932 static int mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn,
1933                                   bool can_unsync)
1934 {
1935         struct kvm_mmu_page *s;
1936         struct hlist_node *node;
1937         bool need_unsync = false;
1938
1939         for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn, node) {
1940                 if (!can_unsync)
1941                         return 1;
1942
1943                 if (s->role.level != PT_PAGE_TABLE_LEVEL)
1944                         return 1;
1945
1946                 if (!need_unsync && !s->unsync) {
1947                         if (!oos_shadow)
1948                                 return 1;
1949                         need_unsync = true;
1950                 }
1951         }
1952         if (need_unsync)
1953                 kvm_unsync_pages(vcpu, gfn);
1954         return 0;
1955 }
1956
1957 static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
1958                     unsigned pte_access, int user_fault,
1959                     int write_fault, int dirty, int level,
1960                     gfn_t gfn, pfn_t pfn, bool speculative,
1961                     bool can_unsync, bool reset_host_protection)
1962 {
1963         u64 spte;
1964         int ret = 0;
1965
1966         /*
1967          * We don't set the accessed bit, since we sometimes want to see
1968          * whether the guest actually used the pte (in order to detect
1969          * demand paging).
1970          */
1971         spte = PT_PRESENT_MASK;
1972         if (!speculative)
1973                 spte |= shadow_accessed_mask;
1974         if (!dirty)
1975                 pte_access &= ~ACC_WRITE_MASK;
1976         if (pte_access & ACC_EXEC_MASK)
1977                 spte |= shadow_x_mask;
1978         else
1979                 spte |= shadow_nx_mask;
1980         if (pte_access & ACC_USER_MASK)
1981                 spte |= shadow_user_mask;
1982         if (level > PT_PAGE_TABLE_LEVEL)
1983                 spte |= PT_PAGE_SIZE_MASK;
1984         if (tdp_enabled)
1985                 spte |= kvm_x86_ops->get_mt_mask(vcpu, gfn,
1986                         kvm_is_mmio_pfn(pfn));
1987
1988         if (reset_host_protection)
1989                 spte |= SPTE_HOST_WRITEABLE;
1990
1991         spte |= (u64)pfn << PAGE_SHIFT;
1992
1993         if ((pte_access & ACC_WRITE_MASK)
1994             || (!vcpu->arch.mmu.direct_map && write_fault
1995                 && !is_write_protection(vcpu) && !user_fault)) {
1996
1997                 if (level > PT_PAGE_TABLE_LEVEL &&
1998                     has_wrprotected_page(vcpu->kvm, gfn, level)) {
1999                         ret = 1;
2000                         drop_spte(vcpu->kvm, sptep, shadow_trap_nonpresent_pte);
2001                         goto done;
2002                 }
2003
2004                 spte |= PT_WRITABLE_MASK;
2005
2006                 if (!vcpu->arch.mmu.direct_map
2007                     && !(pte_access & ACC_WRITE_MASK))
2008                         spte &= ~PT_USER_MASK;
2009
2010                 /*
2011                  * Optimization: for pte sync, if spte was writable the hash
2012                  * lookup is unnecessary (and expensive). Write protection
2013                  * is responsibility of mmu_get_page / kvm_sync_page.
2014                  * Same reasoning can be applied to dirty page accounting.
2015                  */
2016                 if (!can_unsync && is_writable_pte(*sptep))
2017                         goto set_pte;
2018
2019                 if (mmu_need_write_protect(vcpu, gfn, can_unsync)) {
2020                         pgprintk("%s: found shadow page for %llx, marking ro\n",
2021                                  __func__, gfn);
2022                         ret = 1;
2023                         pte_access &= ~ACC_WRITE_MASK;
2024                         if (is_writable_pte(spte))
2025                                 spte &= ~PT_WRITABLE_MASK;
2026                 }
2027         }
2028
2029         if (pte_access & ACC_WRITE_MASK)
2030                 mark_page_dirty(vcpu->kvm, gfn);
2031
2032 set_pte:
2033         update_spte(sptep, spte);
2034 done:
2035         return ret;
2036 }
2037
2038 static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
2039                          unsigned pt_access, unsigned pte_access,
2040                          int user_fault, int write_fault, int dirty,
2041                          int *ptwrite, int level, gfn_t gfn,
2042                          pfn_t pfn, bool speculative,
2043                          bool reset_host_protection)
2044 {
2045         int was_rmapped = 0;
2046         int rmap_count;
2047
2048         pgprintk("%s: spte %llx access %x write_fault %d"
2049                  " user_fault %d gfn %llx\n",
2050                  __func__, *sptep, pt_access,
2051                  write_fault, user_fault, gfn);
2052
2053         if (is_rmap_spte(*sptep)) {
2054                 /*
2055                  * If we overwrite a PTE page pointer with a 2MB PMD, unlink
2056                  * the parent of the now unreachable PTE.
2057                  */
2058                 if (level > PT_PAGE_TABLE_LEVEL &&
2059                     !is_large_pte(*sptep)) {
2060                         struct kvm_mmu_page *child;
2061                         u64 pte = *sptep;
2062
2063                         child = page_header(pte & PT64_BASE_ADDR_MASK);
2064                         mmu_page_remove_parent_pte(child, sptep);
2065                         __set_spte(sptep, shadow_trap_nonpresent_pte);
2066                         kvm_flush_remote_tlbs(vcpu->kvm);
2067                 } else if (pfn != spte_to_pfn(*sptep)) {
2068                         pgprintk("hfn old %llx new %llx\n",
2069                                  spte_to_pfn(*sptep), pfn);
2070                         drop_spte(vcpu->kvm, sptep, shadow_trap_nonpresent_pte);
2071                         kvm_flush_remote_tlbs(vcpu->kvm);
2072                 /*
2073                  * If we overwrite a writable spte with a read-only one,
2074                  * drop it and flush remote TLBs. Otherwise rmap_write_protect
2075                  * will find a read-only spte, even though the writable spte
2076                  * might be cached on a CPU's TLB.
2077                  */
2078                 } else if (is_writable_pte(*sptep) &&
2079                           (!(pte_access & ACC_WRITE_MASK) || !dirty)) {
2080                         drop_spte(vcpu->kvm, sptep, shadow_trap_nonpresent_pte);
2081                         kvm_flush_remote_tlbs(vcpu->kvm);
2082                 } else
2083                         was_rmapped = 1;
2084         }
2085
2086         if (set_spte(vcpu, sptep, pte_access, user_fault, write_fault,
2087                       dirty, level, gfn, pfn, speculative, true,
2088                       reset_host_protection)) {
2089                 if (write_fault)
2090                         *ptwrite = 1;
2091                 kvm_mmu_flush_tlb(vcpu);
2092         }
2093
2094         pgprintk("%s: setting spte %llx\n", __func__, *sptep);
2095         pgprintk("instantiating %s PTE (%s) at %llx (%llx) addr %p\n",
2096                  is_large_pte(*sptep)? "2MB" : "4kB",
2097                  *sptep & PT_PRESENT_MASK ?"RW":"R", gfn,
2098                  *sptep, sptep);
2099         if (!was_rmapped && is_large_pte(*sptep))
2100                 ++vcpu->kvm->stat.lpages;
2101
2102         page_header_update_slot(vcpu->kvm, sptep, gfn);
2103         if (!was_rmapped) {
2104                 rmap_count = rmap_add(vcpu, sptep, gfn);
2105                 if (rmap_count > RMAP_RECYCLE_THRESHOLD)
2106                         rmap_recycle(vcpu, sptep, gfn);
2107         }
2108         kvm_release_pfn_clean(pfn);
2109         if (speculative) {
2110                 vcpu->arch.last_pte_updated = sptep;
2111                 vcpu->arch.last_pte_gfn = gfn;
2112         }
2113 }
2114
2115 static void nonpaging_new_cr3(struct kvm_vcpu *vcpu)
2116 {
2117 }
2118
2119 static struct kvm_memory_slot *
2120 pte_prefetch_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn, bool no_dirty_log)
2121 {
2122         struct kvm_memory_slot *slot;
2123
2124         slot = gfn_to_memslot(vcpu->kvm, gfn);
2125         if (!slot || slot->flags & KVM_MEMSLOT_INVALID ||
2126               (no_dirty_log && slot->dirty_bitmap))
2127                 slot = NULL;
2128
2129         return slot;
2130 }
2131
2132 static pfn_t pte_prefetch_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn,
2133                                      bool no_dirty_log)
2134 {
2135         struct kvm_memory_slot *slot;
2136         unsigned long hva;
2137
2138         slot = pte_prefetch_gfn_to_memslot(vcpu, gfn, no_dirty_log);
2139         if (!slot) {
2140                 get_page(bad_page);
2141                 return page_to_pfn(bad_page);
2142         }
2143
2144         hva = gfn_to_hva_memslot(slot, gfn);
2145
2146         return hva_to_pfn_atomic(vcpu->kvm, hva);
2147 }
2148
2149 static int direct_pte_prefetch_many(struct kvm_vcpu *vcpu,
2150                                     struct kvm_mmu_page *sp,
2151                                     u64 *start, u64 *end)
2152 {
2153         struct page *pages[PTE_PREFETCH_NUM];
2154         unsigned access = sp->role.access;
2155         int i, ret;
2156         gfn_t gfn;
2157
2158         gfn = kvm_mmu_page_get_gfn(sp, start - sp->spt);
2159         if (!pte_prefetch_gfn_to_memslot(vcpu, gfn, access & ACC_WRITE_MASK))
2160                 return -1;
2161
2162         ret = gfn_to_page_many_atomic(vcpu->kvm, gfn, pages, end - start);
2163         if (ret <= 0)
2164                 return -1;
2165
2166         for (i = 0; i < ret; i++, gfn++, start++)
2167                 mmu_set_spte(vcpu, start, ACC_ALL,
2168                              access, 0, 0, 1, NULL,
2169                              sp->role.level, gfn,
2170                              page_to_pfn(pages[i]), true, true);
2171
2172         return 0;
2173 }
2174
2175 static void __direct_pte_prefetch(struct kvm_vcpu *vcpu,
2176                                   struct kvm_mmu_page *sp, u64 *sptep)
2177 {
2178         u64 *spte, *start = NULL;
2179         int i;
2180
2181         WARN_ON(!sp->role.direct);
2182
2183         i = (sptep - sp->spt) & ~(PTE_PREFETCH_NUM - 1);
2184         spte = sp->spt + i;
2185
2186         for (i = 0; i < PTE_PREFETCH_NUM; i++, spte++) {
2187                 if (*spte != shadow_trap_nonpresent_pte || spte == sptep) {
2188                         if (!start)
2189                                 continue;
2190                         if (direct_pte_prefetch_many(vcpu, sp, start, spte) < 0)
2191                                 break;
2192                         start = NULL;
2193                 } else if (!start)
2194                         start = spte;
2195         }
2196 }
2197
2198 static void direct_pte_prefetch(struct kvm_vcpu *vcpu, u64 *sptep)
2199 {
2200         struct kvm_mmu_page *sp;
2201
2202         /*
2203          * Since it's no accessed bit on EPT, it's no way to
2204          * distinguish between actually accessed translations
2205          * and prefetched, so disable pte prefetch if EPT is
2206          * enabled.
2207          */
2208         if (!shadow_accessed_mask)
2209                 return;
2210
2211         sp = page_header(__pa(sptep));
2212         if (sp->role.level > PT_PAGE_TABLE_LEVEL)
2213                 return;
2214
2215         __direct_pte_prefetch(vcpu, sp, sptep);
2216 }
2217
2218 static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
2219                         int map_writable, int level, gfn_t gfn, pfn_t pfn)
2220 {
2221         struct kvm_shadow_walk_iterator iterator;
2222         struct kvm_mmu_page *sp;
2223         int pt_write = 0;
2224         gfn_t pseudo_gfn;
2225
2226         for_each_shadow_entry(vcpu, (u64)gfn << PAGE_SHIFT, iterator) {
2227                 if (iterator.level == level) {
2228                         unsigned pte_access = ACC_ALL;
2229
2230                         if (!map_writable)
2231                                 pte_access &= ~ACC_WRITE_MASK;
2232                         mmu_set_spte(vcpu, iterator.sptep, ACC_ALL, pte_access,
2233                                      0, write, 1, &pt_write,
2234                                      level, gfn, pfn, false, map_writable);
2235                         direct_pte_prefetch(vcpu, iterator.sptep);
2236                         ++vcpu->stat.pf_fixed;
2237                         break;
2238                 }
2239
2240                 if (*iterator.sptep == shadow_trap_nonpresent_pte) {
2241                         u64 base_addr = iterator.addr;
2242
2243                         base_addr &= PT64_LVL_ADDR_MASK(iterator.level);
2244                         pseudo_gfn = base_addr >> PAGE_SHIFT;
2245                         sp = kvm_mmu_get_page(vcpu, pseudo_gfn, iterator.addr,
2246                                               iterator.level - 1,
2247                                               1, ACC_ALL, iterator.sptep);
2248                         if (!sp) {
2249                                 pgprintk("nonpaging_map: ENOMEM\n");
2250                                 kvm_release_pfn_clean(pfn);
2251                                 return -ENOMEM;
2252                         }
2253
2254                         __set_spte(iterator.sptep,
2255                                    __pa(sp->spt)
2256                                    | PT_PRESENT_MASK | PT_WRITABLE_MASK
2257                                    | shadow_user_mask | shadow_x_mask
2258                                    | shadow_accessed_mask);
2259                 }
2260         }
2261         return pt_write;
2262 }
2263
2264 static void kvm_send_hwpoison_signal(unsigned long address, struct task_struct *tsk)
2265 {
2266         siginfo_t info;
2267
2268         info.si_signo   = SIGBUS;
2269         info.si_errno   = 0;
2270         info.si_code    = BUS_MCEERR_AR;
2271         info.si_addr    = (void __user *)address;
2272         info.si_addr_lsb = PAGE_SHIFT;
2273
2274         send_sig_info(SIGBUS, &info, tsk);
2275 }
2276
2277 static int kvm_handle_bad_page(struct kvm *kvm, gfn_t gfn, pfn_t pfn)
2278 {
2279         kvm_release_pfn_clean(pfn);
2280         if (is_hwpoison_pfn(pfn)) {
2281                 kvm_send_hwpoison_signal(gfn_to_hva(kvm, gfn), current);
2282                 return 0;
2283         } else if (is_fault_pfn(pfn))
2284                 return -EFAULT;
2285
2286         return 1;
2287 }
2288
2289 static bool try_async_pf(struct kvm_vcpu *vcpu, bool no_apf, gfn_t gfn,
2290                          gva_t gva, pfn_t *pfn, bool write, bool *writable);
2291
2292 static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn,
2293                          bool no_apf)
2294 {
2295         int r;
2296         int level;
2297         pfn_t pfn;
2298         unsigned long mmu_seq;
2299         bool map_writable;
2300
2301         level = mapping_level(vcpu, gfn);
2302
2303         /*
2304          * This path builds a PAE pagetable - so we can map 2mb pages at
2305          * maximum. Therefore check if the level is larger than that.
2306          */
2307         if (level > PT_DIRECTORY_LEVEL)
2308                 level = PT_DIRECTORY_LEVEL;
2309
2310         gfn &= ~(KVM_PAGES_PER_HPAGE(level) - 1);
2311
2312         mmu_seq = vcpu->kvm->mmu_notifier_seq;
2313         smp_rmb();
2314
2315         if (try_async_pf(vcpu, no_apf, gfn, v, &pfn, write, &map_writable))
2316                 return 0;
2317
2318         /* mmio */
2319         if (is_error_pfn(pfn))
2320                 return kvm_handle_bad_page(vcpu->kvm, gfn, pfn);
2321
2322         spin_lock(&vcpu->kvm->mmu_lock);
2323         if (mmu_notifier_retry(vcpu, mmu_seq))
2324                 goto out_unlock;
2325         kvm_mmu_free_some_pages(vcpu);
2326         r = __direct_map(vcpu, v, write, map_writable, level, gfn, pfn);
2327         spin_unlock(&vcpu->kvm->mmu_lock);
2328
2329
2330         return r;
2331
2332 out_unlock:
2333         spin_unlock(&vcpu->kvm->mmu_lock);
2334         kvm_release_pfn_clean(pfn);
2335         return 0;
2336 }
2337
2338
2339 static void mmu_free_roots(struct kvm_vcpu *vcpu)
2340 {
2341         int i;
2342         struct kvm_mmu_page *sp;
2343         LIST_HEAD(invalid_list);
2344
2345         if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
2346                 return;
2347         spin_lock(&vcpu->kvm->mmu_lock);
2348         if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL &&
2349             (vcpu->arch.mmu.root_level == PT64_ROOT_LEVEL ||
2350              vcpu->arch.mmu.direct_map)) {
2351                 hpa_t root = vcpu->arch.mmu.root_hpa;
2352
2353                 sp = page_header(root);
2354                 --sp->root_count;
2355                 if (!sp->root_count && sp->role.invalid) {
2356                         kvm_mmu_prepare_zap_page(vcpu->kvm, sp, &invalid_list);
2357                         kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
2358                 }
2359                 vcpu->arch.mmu.root_hpa = INVALID_PAGE;
2360                 spin_unlock(&vcpu->kvm->mmu_lock);
2361                 return;
2362         }
2363         for (i = 0; i < 4; ++i) {
2364                 hpa_t root = vcpu->arch.mmu.pae_root[i];
2365
2366                 if (root) {
2367                         root &= PT64_BASE_ADDR_MASK;
2368                         sp = page_header(root);
2369                         --sp->root_count;
2370                         if (!sp->root_count && sp->role.invalid)
2371                                 kvm_mmu_prepare_zap_page(vcpu->kvm, sp,
2372                                                          &invalid_list);
2373                 }
2374                 vcpu->arch.mmu.pae_root[i] = INVALID_PAGE;
2375         }
2376         kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
2377         spin_unlock(&vcpu->kvm->mmu_lock);
2378         vcpu->arch.mmu.root_hpa = INVALID_PAGE;
2379 }
2380
2381 static int mmu_check_root(struct kvm_vcpu *vcpu, gfn_t root_gfn)
2382 {
2383         int ret = 0;
2384
2385         if (!kvm_is_visible_gfn(vcpu->kvm, root_gfn)) {
2386                 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
2387                 ret = 1;
2388         }
2389
2390         return ret;
2391 }
2392
2393 static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu)
2394 {
2395         struct kvm_mmu_page *sp;
2396         unsigned i;
2397
2398         if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
2399                 spin_lock(&vcpu->kvm->mmu_lock);
2400                 kvm_mmu_free_some_pages(vcpu);
2401                 sp = kvm_mmu_get_page(vcpu, 0, 0, PT64_ROOT_LEVEL,
2402                                       1, ACC_ALL, NULL);
2403                 ++sp->root_count;
2404                 spin_unlock(&vcpu->kvm->mmu_lock);
2405                 vcpu->arch.mmu.root_hpa = __pa(sp->spt);
2406         } else if (vcpu->arch.mmu.shadow_root_level == PT32E_ROOT_LEVEL) {
2407                 for (i = 0; i < 4; ++i) {
2408                         hpa_t root = vcpu->arch.mmu.pae_root[i];
2409
2410                         ASSERT(!VALID_PAGE(root));
2411                         spin_lock(&vcpu->kvm->mmu_lock);
2412                         kvm_mmu_free_some_pages(vcpu);
2413                         sp = kvm_mmu_get_page(vcpu, i << (30 - PAGE_SHIFT),
2414                                               i << 30,
2415                                               PT32_ROOT_LEVEL, 1, ACC_ALL,
2416                                               NULL);
2417                         root = __pa(sp->spt);
2418                         ++sp->root_count;
2419                         spin_unlock(&vcpu->kvm->mmu_lock);
2420                         vcpu->arch.mmu.pae_root[i] = root | PT_PRESENT_MASK;
2421                 }
2422                 vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.pae_root);
2423         } else
2424                 BUG();
2425
2426         return 0;
2427 }
2428
2429 static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
2430 {
2431         struct kvm_mmu_page *sp;
2432         u64 pdptr, pm_mask;
2433         gfn_t root_gfn;
2434         int i;
2435
2436         root_gfn = vcpu->arch.mmu.get_cr3(vcpu) >> PAGE_SHIFT;
2437
2438         if (mmu_check_root(vcpu, root_gfn))
2439                 return 1;
2440
2441         /*
2442          * Do we shadow a long mode page table? If so we need to
2443          * write-protect the guests page table root.
2444          */
2445         if (vcpu->arch.mmu.root_level == PT64_ROOT_LEVEL) {
2446                 hpa_t root = vcpu->arch.mmu.root_hpa;
2447
2448                 ASSERT(!VALID_PAGE(root));
2449
2450                 spin_lock(&vcpu->kvm->mmu_lock);
2451                 kvm_mmu_free_some_pages(vcpu);
2452                 sp = kvm_mmu_get_page(vcpu, root_gfn, 0, PT64_ROOT_LEVEL,
2453                                       0, ACC_ALL, NULL);
2454                 root = __pa(sp->spt);
2455                 ++sp->root_count;
2456                 spin_unlock(&vcpu->kvm->mmu_lock);
2457                 vcpu->arch.mmu.root_hpa = root;
2458                 return 0;
2459         }
2460
2461         /*
2462          * We shadow a 32 bit page table. This may be a legacy 2-level
2463          * or a PAE 3-level page table. In either case we need to be aware that
2464          * the shadow page table may be a PAE or a long mode page table.
2465          */
2466         pm_mask = PT_PRESENT_MASK;
2467         if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL)
2468                 pm_mask |= PT_ACCESSED_MASK | PT_WRITABLE_MASK | PT_USER_MASK;
2469
2470         for (i = 0; i < 4; ++i) {
2471                 hpa_t root = vcpu->arch.mmu.pae_root[i];
2472
2473                 ASSERT(!VALID_PAGE(root));
2474                 if (vcpu->arch.mmu.root_level == PT32E_ROOT_LEVEL) {
2475                         pdptr = kvm_pdptr_read_mmu(vcpu, &vcpu->arch.mmu, i);
2476                         if (!is_present_gpte(pdptr)) {
2477                                 vcpu->arch.mmu.pae_root[i] = 0;
2478                                 continue;
2479                         }
2480                         root_gfn = pdptr >> PAGE_SHIFT;
2481                         if (mmu_check_root(vcpu, root_gfn))
2482                                 return 1;
2483                 }
2484                 spin_lock(&vcpu->kvm->mmu_lock);
2485                 kvm_mmu_free_some_pages(vcpu);
2486                 sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30,
2487                                       PT32_ROOT_LEVEL, 0,
2488                                       ACC_ALL, NULL);
2489                 root = __pa(sp->spt);
2490                 ++sp->root_count;
2491                 spin_unlock(&vcpu->kvm->mmu_lock);
2492
2493                 vcpu->arch.mmu.pae_root[i] = root | pm_mask;
2494         }
2495         vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.pae_root);
2496
2497         /*
2498          * If we shadow a 32 bit page table with a long mode page
2499          * table we enter this path.
2500          */
2501         if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
2502                 if (vcpu->arch.mmu.lm_root == NULL) {
2503                         /*
2504                          * The additional page necessary for this is only
2505                          * allocated on demand.
2506                          */
2507
2508                         u64 *lm_root;
2509
2510                         lm_root = (void*)get_zeroed_page(GFP_KERNEL);
2511                         if (lm_root == NULL)
2512                                 return 1;
2513
2514                         lm_root[0] = __pa(vcpu->arch.mmu.pae_root) | pm_mask;
2515
2516                         vcpu->arch.mmu.lm_root = lm_root;
2517                 }
2518
2519                 vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.lm_root);
2520         }
2521
2522         return 0;
2523 }
2524
2525 static int mmu_alloc_roots(struct kvm_vcpu *vcpu)
2526 {
2527         if (vcpu->arch.mmu.direct_map)
2528                 return mmu_alloc_direct_roots(vcpu);
2529         else
2530                 return mmu_alloc_shadow_roots(vcpu);
2531 }
2532
2533 static void mmu_sync_roots(struct kvm_vcpu *vcpu)
2534 {
2535         int i;
2536         struct kvm_mmu_page *sp;
2537
2538         if (vcpu->arch.mmu.direct_map)
2539                 return;
2540
2541         if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
2542                 return;
2543
2544         trace_kvm_mmu_audit(vcpu, AUDIT_PRE_SYNC);
2545         if (vcpu->arch.mmu.root_level == PT64_ROOT_LEVEL) {
2546                 hpa_t root = vcpu->arch.mmu.root_hpa;
2547                 sp = page_header(root);
2548                 mmu_sync_children(vcpu, sp);
2549                 trace_kvm_mmu_audit(vcpu, AUDIT_POST_SYNC);
2550                 return;
2551         }
2552         for (i = 0; i < 4; ++i) {
2553                 hpa_t root = vcpu->arch.mmu.pae_root[i];
2554
2555                 if (root && VALID_PAGE(root)) {
2556                         root &= PT64_BASE_ADDR_MASK;
2557                         sp = page_header(root);
2558                         mmu_sync_children(vcpu, sp);
2559                 }
2560         }
2561         trace_kvm_mmu_audit(vcpu, AUDIT_POST_SYNC);
2562 }
2563
2564 void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
2565 {
2566         spin_lock(&vcpu->kvm->mmu_lock);
2567         mmu_sync_roots(vcpu);
2568         spin_unlock(&vcpu->kvm->mmu_lock);
2569 }
2570
2571 static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr,
2572                                   u32 access, u32 *error)
2573 {
2574         if (error)
2575                 *error = 0;
2576         return vaddr;
2577 }
2578
2579 static gpa_t nonpaging_gva_to_gpa_nested(struct kvm_vcpu *vcpu, gva_t vaddr,
2580                                          u32 access, u32 *error)
2581 {
2582         if (error)
2583                 *error = 0;
2584         return vcpu->arch.nested_mmu.translate_gpa(vcpu, vaddr, access);
2585 }
2586
2587 static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
2588                                 u32 error_code, bool no_apf)
2589 {
2590         gfn_t gfn;
2591         int r;
2592
2593         pgprintk("%s: gva %lx error %x\n", __func__, gva, error_code);
2594         r = mmu_topup_memory_caches(vcpu);
2595         if (r)
2596                 return r;
2597
2598         ASSERT(vcpu);
2599         ASSERT(VALID_PAGE(vcpu->arch.mmu.root_hpa));
2600
2601         gfn = gva >> PAGE_SHIFT;
2602
2603         return nonpaging_map(vcpu, gva & PAGE_MASK,
2604                              error_code & PFERR_WRITE_MASK, gfn, no_apf);
2605 }
2606
2607 static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn)
2608 {
2609         struct kvm_arch_async_pf arch;
2610         arch.token = (vcpu->arch.apf.id++ << 12) | vcpu->vcpu_id;
2611         arch.gfn = gfn;
2612         arch.direct_map = vcpu->arch.mmu.direct_map;
2613
2614         return kvm_setup_async_pf(vcpu, gva, gfn, &arch);
2615 }
2616
2617 static bool can_do_async_pf(struct kvm_vcpu *vcpu)
2618 {
2619         if (unlikely(!irqchip_in_kernel(vcpu->kvm) ||
2620                      kvm_event_needs_reinjection(vcpu)))
2621                 return false;
2622
2623         return kvm_x86_ops->interrupt_allowed(vcpu);
2624 }
2625
2626 static bool try_async_pf(struct kvm_vcpu *vcpu, bool no_apf, gfn_t gfn,
2627                          gva_t gva, pfn_t *pfn, bool write, bool *writable)
2628 {
2629         bool async;
2630
2631         *pfn = gfn_to_pfn_async(vcpu->kvm, gfn, &async, write, writable);
2632
2633         if (!async)
2634                 return false; /* *pfn has correct page already */
2635
2636         put_page(pfn_to_page(*pfn));
2637
2638         if (!no_apf && can_do_async_pf(vcpu)) {
2639                 trace_kvm_try_async_get_page(gva, gfn);
2640                 if (kvm_find_async_pf_gfn(vcpu, gfn)) {
2641                         trace_kvm_async_pf_doublefault(gva, gfn);
2642                         kvm_make_request(KVM_REQ_APF_HALT, vcpu);
2643                         return true;
2644                 } else if (kvm_arch_setup_async_pf(vcpu, gva, gfn))
2645                         return true;
2646         }
2647
2648         *pfn = gfn_to_pfn_prot(vcpu->kvm, gfn, write, writable);
2649
2650         return false;
2651 }
2652
2653 static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code,
2654                           bool no_apf)
2655 {
2656         pfn_t pfn;
2657         int r;
2658         int level;
2659         gfn_t gfn = gpa >> PAGE_SHIFT;
2660         unsigned long mmu_seq;
2661         int write = error_code & PFERR_WRITE_MASK;
2662         bool map_writable;
2663
2664         ASSERT(vcpu);
2665         ASSERT(VALID_PAGE(vcpu->arch.mmu.root_hpa));
2666
2667         r = mmu_topup_memory_caches(vcpu);
2668         if (r)
2669                 return r;
2670
2671         level = mapping_level(vcpu, gfn);
2672
2673         gfn &= ~(KVM_PAGES_PER_HPAGE(level) - 1);
2674
2675         mmu_seq = vcpu->kvm->mmu_notifier_seq;
2676         smp_rmb();
2677
2678         if (try_async_pf(vcpu, no_apf, gfn, gpa, &pfn, write, &map_writable))
2679                 return 0;
2680
2681         /* mmio */
2682         if (is_error_pfn(pfn))
2683                 return kvm_handle_bad_page(vcpu->kvm, gfn, pfn);
2684         spin_lock(&vcpu->kvm->mmu_lock);
2685         if (mmu_notifier_retry(vcpu, mmu_seq))
2686                 goto out_unlock;
2687         kvm_mmu_free_some_pages(vcpu);
2688         r = __direct_map(vcpu, gpa, write, map_writable,
2689                          level, gfn, pfn);
2690         spin_unlock(&vcpu->kvm->mmu_lock);
2691
2692         return r;
2693
2694 out_unlock:
2695         spin_unlock(&vcpu->kvm->mmu_lock);
2696         kvm_release_pfn_clean(pfn);
2697         return 0;
2698 }
2699
2700 static void nonpaging_free(struct kvm_vcpu *vcpu)
2701 {
2702         mmu_free_roots(vcpu);
2703 }
2704
2705 static int nonpaging_init_context(struct kvm_vcpu *vcpu,
2706                                   struct kvm_mmu *context)
2707 {
2708         context->new_cr3 = nonpaging_new_cr3;
2709         context->page_fault = nonpaging_page_fault;
2710         context->gva_to_gpa = nonpaging_gva_to_gpa;
2711         context->free = nonpaging_free;
2712         context->prefetch_page = nonpaging_prefetch_page;
2713         context->sync_page = nonpaging_sync_page;
2714         context->invlpg = nonpaging_invlpg;
2715         context->root_level = 0;
2716         context->shadow_root_level = PT32E_ROOT_LEVEL;
2717         context->root_hpa = INVALID_PAGE;
2718         context->direct_map = true;
2719         context->nx = false;
2720         return 0;
2721 }
2722
2723 void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu)
2724 {
2725         ++vcpu->stat.tlb_flush;
2726         kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
2727 }
2728
2729 static void paging_new_cr3(struct kvm_vcpu *vcpu)
2730 {
2731         pgprintk("%s: cr3 %lx\n", __func__, vcpu->arch.cr3);
2732         mmu_free_roots(vcpu);
2733 }
2734
2735 static unsigned long get_cr3(struct kvm_vcpu *vcpu)
2736 {
2737         return vcpu->arch.cr3;
2738 }
2739
2740 static void inject_page_fault(struct kvm_vcpu *vcpu)
2741 {
2742         vcpu->arch.mmu.inject_page_fault(vcpu);
2743 }
2744
2745 static void paging_free(struct kvm_vcpu *vcpu)
2746 {
2747         nonpaging_free(vcpu);
2748 }
2749
2750 static bool is_rsvd_bits_set(struct kvm_mmu *mmu, u64 gpte, int level)
2751 {
2752         int bit7;
2753
2754         bit7 = (gpte >> 7) & 1;
2755         return (gpte & mmu->rsvd_bits_mask[bit7][level-1]) != 0;
2756 }
2757
2758 #define PTTYPE 64
2759 #include "paging_tmpl.h"
2760 #undef PTTYPE
2761
2762 #define PTTYPE 32
2763 #include "paging_tmpl.h"
2764 #undef PTTYPE
2765
2766 static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu,
2767                                   struct kvm_mmu *context,
2768                                   int level)
2769 {
2770         int maxphyaddr = cpuid_maxphyaddr(vcpu);
2771         u64 exb_bit_rsvd = 0;
2772
2773         if (!context->nx)
2774                 exb_bit_rsvd = rsvd_bits(63, 63);
2775         switch (level) {
2776         case PT32_ROOT_LEVEL:
2777                 /* no rsvd bits for 2 level 4K page table entries */
2778                 context->rsvd_bits_mask[0][1] = 0;
2779                 context->rsvd_bits_mask[0][0] = 0;
2780                 context->rsvd_bits_mask[1][0] = context->rsvd_bits_mask[0][0];
2781
2782                 if (!is_pse(vcpu)) {
2783                         context->rsvd_bits_mask[1][1] = 0;
2784                         break;
2785                 }
2786
2787                 if (is_cpuid_PSE36())
2788                         /* 36bits PSE 4MB page */
2789                         context->rsvd_bits_mask[1][1] = rsvd_bits(17, 21);
2790                 else
2791                         /* 32 bits PSE 4MB page */
2792                         context->rsvd_bits_mask[1][1] = rsvd_bits(13, 21);
2793                 break;
2794         case PT32E_ROOT_LEVEL:
2795                 context->rsvd_bits_mask[0][2] =
2796                         rsvd_bits(maxphyaddr, 63) |
2797                         rsvd_bits(7, 8) | rsvd_bits(1, 2);      /* PDPTE */
2798                 context->rsvd_bits_mask[0][1] = exb_bit_rsvd |
2799                         rsvd_bits(maxphyaddr, 62);      /* PDE */
2800                 context->rsvd_bits_mask[0][0] = exb_bit_rsvd |
2801                         rsvd_bits(maxphyaddr, 62);      /* PTE */
2802                 context->rsvd_bits_mask[1][1] = exb_bit_rsvd |
2803                         rsvd_bits(maxphyaddr, 62) |
2804                         rsvd_bits(13, 20);              /* large page */
2805                 context->rsvd_bits_mask[1][0] = context->rsvd_bits_mask[0][0];
2806                 break;
2807         case PT64_ROOT_LEVEL:
2808                 context->rsvd_bits_mask[0][3] = exb_bit_rsvd |
2809                         rsvd_bits(maxphyaddr, 51) | rsvd_bits(7, 8);
2810                 context->rsvd_bits_mask[0][2] = exb_bit_rsvd |
2811                         rsvd_bits(maxphyaddr, 51) | rsvd_bits(7, 8);
2812                 context->rsvd_bits_mask[0][1] = exb_bit_rsvd |
2813                         rsvd_bits(maxphyaddr, 51);
2814                 context->rsvd_bits_mask[0][0] = exb_bit_rsvd |
2815                         rsvd_bits(maxphyaddr, 51);
2816                 context->rsvd_bits_mask[1][3] = context->rsvd_bits_mask[0][3];
2817                 context->rsvd_bits_mask[1][2] = exb_bit_rsvd |
2818                         rsvd_bits(maxphyaddr, 51) |
2819                         rsvd_bits(13, 29);
2820                 context->rsvd_bits_mask[1][1] = exb_bit_rsvd |
2821                         rsvd_bits(maxphyaddr, 51) |
2822                         rsvd_bits(13, 20);              /* large page */
2823                 context->rsvd_bits_mask[1][0] = context->rsvd_bits_mask[0][0];
2824                 break;
2825         }
2826 }
2827
2828 static int paging64_init_context_common(struct kvm_vcpu *vcpu,
2829                                         struct kvm_mmu *context,
2830                                         int level)
2831 {
2832         context->nx = is_nx(vcpu);
2833
2834         reset_rsvds_bits_mask(vcpu, context, level);
2835
2836         ASSERT(is_pae(vcpu));
2837         context->new_cr3 = paging_new_cr3;
2838         context->page_fault = paging64_page_fault;
2839         context->gva_to_gpa = paging64_gva_to_gpa;
2840         context->prefetch_page = paging64_prefetch_page;
2841         context->sync_page = paging64_sync_page;
2842         context->invlpg = paging64_invlpg;
2843         context->free = paging_free;
2844         context->root_level = level;
2845         context->shadow_root_level = level;
2846         context->root_hpa = INVALID_PAGE;
2847         context->direct_map = false;
2848         return 0;
2849 }
2850
2851 static int paging64_init_context(struct kvm_vcpu *vcpu,
2852                                  struct kvm_mmu *context)
2853 {
2854         return paging64_init_context_common(vcpu, context, PT64_ROOT_LEVEL);
2855 }
2856
2857 static int paging32_init_context(struct kvm_vcpu *vcpu,
2858                                  struct kvm_mmu *context)
2859 {
2860         context->nx = false;
2861
2862         reset_rsvds_bits_mask(vcpu, context, PT32_ROOT_LEVEL);
2863
2864         context->new_cr3 = paging_new_cr3;
2865         context->page_fault = paging32_page_fault;
2866         context->gva_to_gpa = paging32_gva_to_gpa;
2867         context->free = paging_free;
2868         context->prefetch_page = paging32_prefetch_page;
2869         context->sync_page = paging32_sync_page;
2870         context->invlpg = paging32_invlpg;
2871         context->root_level = PT32_ROOT_LEVEL;
2872         context->shadow_root_level = PT32E_ROOT_LEVEL;
2873         context->root_hpa = INVALID_PAGE;
2874         context->direct_map = false;
2875         return 0;
2876 }
2877
2878 static int paging32E_init_context(struct kvm_vcpu *vcpu,
2879                                   struct kvm_mmu *context)
2880 {
2881         return paging64_init_context_common(vcpu, context, PT32E_ROOT_LEVEL);
2882 }
2883
2884 static int init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
2885 {
2886         struct kvm_mmu *context = vcpu->arch.walk_mmu;
2887
2888         context->new_cr3 = nonpaging_new_cr3;
2889         context->page_fault = tdp_page_fault;
2890         context->free = nonpaging_free;
2891         context->prefetch_page = nonpaging_prefetch_page;
2892         context->sync_page = nonpaging_sync_page;
2893         context->invlpg = nonpaging_invlpg;
2894         context->shadow_root_level = kvm_x86_ops->get_tdp_level();
2895         context->root_hpa = INVALID_PAGE;
2896         context->direct_map = true;
2897         context->set_cr3 = kvm_x86_ops->set_tdp_cr3;
2898         context->get_cr3 = get_cr3;
2899         context->inject_page_fault = kvm_inject_page_fault;
2900         context->nx = is_nx(vcpu);
2901
2902         if (!is_paging(vcpu)) {
2903                 context->nx = false;
2904                 context->gva_to_gpa = nonpaging_gva_to_gpa;
2905                 context->root_level = 0;
2906         } else if (is_long_mode(vcpu)) {
2907                 context->nx = is_nx(vcpu);
2908                 reset_rsvds_bits_mask(vcpu, context, PT64_ROOT_LEVEL);
2909                 context->gva_to_gpa = paging64_gva_to_gpa;
2910                 context->root_level = PT64_ROOT_LEVEL;
2911         } else if (is_pae(vcpu)) {
2912                 context->nx = is_nx(vcpu);
2913                 reset_rsvds_bits_mask(vcpu, context, PT32E_ROOT_LEVEL);
2914                 context->gva_to_gpa = paging64_gva_to_gpa;
2915                 context->root_level = PT32E_ROOT_LEVEL;
2916         } else {
2917                 context->nx = false;
2918                 reset_rsvds_bits_mask(vcpu, context, PT32_ROOT_LEVEL);
2919                 context->gva_to_gpa = paging32_gva_to_gpa;
2920                 context->root_level = PT32_ROOT_LEVEL;
2921         }
2922
2923         return 0;
2924 }
2925
2926 int kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context)
2927 {
2928         int r;
2929         ASSERT(vcpu);
2930         ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
2931
2932         if (!is_paging(vcpu))
2933                 r = nonpaging_init_context(vcpu, context);
2934         else if (is_long_mode(vcpu))
2935                 r = paging64_init_context(vcpu, context);
2936         else if (is_pae(vcpu))
2937                 r = paging32E_init_context(vcpu, context);
2938         else
2939                 r = paging32_init_context(vcpu, context);
2940
2941         vcpu->arch.mmu.base_role.cr4_pae = !!is_pae(vcpu);
2942         vcpu->arch.mmu.base_role.cr0_wp  = is_write_protection(vcpu);
2943
2944         return r;
2945 }
2946 EXPORT_SYMBOL_GPL(kvm_init_shadow_mmu);
2947
2948 static int init_kvm_softmmu(struct kvm_vcpu *vcpu)
2949 {
2950         int r = kvm_init_shadow_mmu(vcpu, vcpu->arch.walk_mmu);
2951
2952         vcpu->arch.walk_mmu->set_cr3           = kvm_x86_ops->set_cr3;
2953         vcpu->arch.walk_mmu->get_cr3           = get_cr3;
2954         vcpu->arch.walk_mmu->inject_page_fault = kvm_inject_page_fault;
2955
2956         return r;
2957 }
2958
2959 static int init_kvm_nested_mmu(struct kvm_vcpu *vcpu)
2960 {
2961         struct kvm_mmu *g_context = &vcpu->arch.nested_mmu;
2962
2963         g_context->get_cr3           = get_cr3;
2964         g_context->inject_page_fault = kvm_inject_page_fault;
2965
2966         /*
2967          * Note that arch.mmu.gva_to_gpa translates l2_gva to l1_gpa. The
2968          * translation of l2_gpa to l1_gpa addresses is done using the
2969          * arch.nested_mmu.gva_to_gpa function. Basically the gva_to_gpa
2970          * functions between mmu and nested_mmu are swapped.
2971          */
2972         if (!is_paging(vcpu)) {
2973                 g_context->nx = false;
2974                 g_context->root_level = 0;
2975                 g_context->gva_to_gpa = nonpaging_gva_to_gpa_nested;
2976         } else if (is_long_mode(vcpu)) {
2977                 g_context->nx = is_nx(vcpu);
2978                 reset_rsvds_bits_mask(vcpu, g_context, PT64_ROOT_LEVEL);
2979                 g_context->root_level = PT64_ROOT_LEVEL;
2980                 g_context->gva_to_gpa = paging64_gva_to_gpa_nested;
2981         } else if (is_pae(vcpu)) {
2982                 g_context->nx = is_nx(vcpu);
2983                 reset_rsvds_bits_mask(vcpu, g_context, PT32E_ROOT_LEVEL);
2984                 g_context->root_level = PT32E_ROOT_LEVEL;
2985                 g_context->gva_to_gpa = paging64_gva_to_gpa_nested;
2986         } else {
2987                 g_context->nx = false;
2988                 reset_rsvds_bits_mask(vcpu, g_context, PT32_ROOT_LEVEL);
2989                 g_context->root_level = PT32_ROOT_LEVEL;
2990                 g_context->gva_to_gpa = paging32_gva_to_gpa_nested;
2991         }
2992
2993         return 0;
2994 }
2995
2996 static int init_kvm_mmu(struct kvm_vcpu *vcpu)
2997 {
2998         vcpu->arch.update_pte.pfn = bad_pfn;
2999
3000         if (mmu_is_nested(vcpu))
3001                 return init_kvm_nested_mmu(vcpu);
3002         else if (tdp_enabled)
3003                 return init_kvm_tdp_mmu(vcpu);
3004         else
3005                 return init_kvm_softmmu(vcpu);
3006 }
3007
3008 static void destroy_kvm_mmu(struct kvm_vcpu *vcpu)
3009 {
3010         ASSERT(vcpu);
3011         if (VALID_PAGE(vcpu->arch.mmu.root_hpa))
3012                 /* mmu.free() should set root_hpa = INVALID_PAGE */
3013                 vcpu->arch.mmu.free(vcpu);
3014 }
3015
3016 int kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
3017 {
3018         destroy_kvm_mmu(vcpu);
3019         return init_kvm_mmu(vcpu);
3020 }
3021 EXPORT_SYMBOL_GPL(kvm_mmu_reset_context);
3022
3023 int kvm_mmu_load(struct kvm_vcpu *vcpu)
3024 {
3025         int r;
3026
3027         r = mmu_topup_memory_caches(vcpu);
3028         if (r)
3029                 goto out;
3030         r = mmu_alloc_roots(vcpu);
3031         spin_lock(&vcpu->kvm->mmu_lock);
3032         mmu_sync_roots(vcpu);
3033         spin_unlock(&vcpu->kvm->mmu_lock);
3034         if (r)
3035                 goto out;
3036         /* set_cr3() should ensure TLB has been flushed */
3037         vcpu->arch.mmu.set_cr3(vcpu, vcpu->arch.mmu.root_hpa);
3038 out:
3039         return r;
3040 }
3041 EXPORT_SYMBOL_GPL(kvm_mmu_load);
3042
3043 void kvm_mmu_unload(struct kvm_vcpu *vcpu)
3044 {
3045         mmu_free_roots(vcpu);
3046 }
3047 EXPORT_SYMBOL_GPL(kvm_mmu_unload);
3048
3049 static void mmu_pte_write_zap_pte(struct kvm_vcpu *vcpu,
3050                                   struct kvm_mmu_page *sp,
3051                                   u64 *spte)
3052 {
3053         u64 pte;
3054         struct kvm_mmu_page *child;
3055
3056         pte = *spte;
3057         if (is_shadow_present_pte(pte)) {
3058                 if (is_last_spte(pte, sp->role.level))
3059                         drop_spte(vcpu->kvm, spte, shadow_trap_nonpresent_pte);
3060                 else {
3061                         child = page_header(pte & PT64_BASE_ADDR_MASK);
3062                         mmu_page_remove_parent_pte(child, spte);
3063                 }
3064         }
3065         __set_spte(spte, shadow_trap_nonpresent_pte);
3066         if (is_large_pte(pte))
3067                 --vcpu->kvm->stat.lpages;
3068 }
3069
3070 static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu,
3071                                   struct kvm_mmu_page *sp,
3072                                   u64 *spte,
3073                                   const void *new)
3074 {
3075         if (sp->role.level != PT_PAGE_TABLE_LEVEL) {
3076                 ++vcpu->kvm->stat.mmu_pde_zapped;
3077                 return;
3078         }
3079
3080         if (is_rsvd_bits_set(&vcpu->arch.mmu, *(u64 *)new, PT_PAGE_TABLE_LEVEL))
3081                 return;
3082
3083         ++vcpu->kvm->stat.mmu_pte_updated;
3084         if (!sp->role.cr4_pae)
3085                 paging32_update_pte(vcpu, sp, spte, new);
3086         else
3087                 paging64_update_pte(vcpu, sp, spte, new);
3088 }
3089
3090 static bool need_remote_flush(u64 old, u64 new)
3091 {
3092         if (!is_shadow_present_pte(old))
3093                 return false;
3094         if (!is_shadow_present_pte(new))
3095                 return true;
3096         if ((old ^ new) & PT64_BASE_ADDR_MASK)
3097                 return true;
3098         old ^= PT64_NX_MASK;
3099         new ^= PT64_NX_MASK;
3100         return (old & ~new & PT64_PERM_MASK) != 0;
3101 }
3102
3103 static void mmu_pte_write_flush_tlb(struct kvm_vcpu *vcpu, bool zap_page,
3104                                     bool remote_flush, bool local_flush)
3105 {
3106         if (zap_page)
3107                 return;
3108
3109         if (remote_flush)
3110                 kvm_flush_remote_tlbs(vcpu->kvm);
3111         else if (local_flush)
3112                 kvm_mmu_flush_tlb(vcpu);
3113 }
3114
3115 static bool last_updated_pte_accessed(struct kvm_vcpu *vcpu)
3116 {
3117         u64 *spte = vcpu->arch.last_pte_updated;
3118
3119         return !!(spte && (*spte & shadow_accessed_mask));
3120 }
3121
3122 static void mmu_guess_page_from_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
3123                                           u64 gpte)
3124 {
3125         gfn_t gfn;
3126         pfn_t pfn;
3127
3128         if (!is_present_gpte(gpte))
3129                 return;
3130         gfn = (gpte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
3131
3132         vcpu->arch.update_pte.mmu_seq = vcpu->kvm->mmu_notifier_seq;
3133         smp_rmb();
3134         pfn = gfn_to_pfn(vcpu->kvm, gfn);
3135
3136         if (is_error_pfn(pfn)) {
3137                 kvm_release_pfn_clean(pfn);
3138                 return;
3139         }
3140         vcpu->arch.update_pte.gfn = gfn;
3141         vcpu->arch.update_pte.pfn = pfn;
3142 }
3143
3144 static void kvm_mmu_access_page(struct kvm_vcpu *vcpu, gfn_t gfn)
3145 {
3146         u64 *spte = vcpu->arch.last_pte_updated;
3147
3148         if (spte
3149             && vcpu->arch.last_pte_gfn == gfn
3150             && shadow_accessed_mask
3151             && !(*spte & shadow_accessed_mask)
3152             && is_shadow_present_pte(*spte))
3153                 set_bit(PT_ACCESSED_SHIFT, (unsigned long *)spte);
3154 }
3155
3156 void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
3157                        const u8 *new, int bytes,
3158                        bool guest_initiated)
3159 {
3160         gfn_t gfn = gpa >> PAGE_SHIFT;
3161         union kvm_mmu_page_role mask = { .word = 0 };
3162         struct kvm_mmu_page *sp;
3163         struct hlist_node *node;
3164         LIST_HEAD(invalid_list);
3165         u64 entry, gentry;
3166         u64 *spte;
3167         unsigned offset = offset_in_page(gpa);
3168         unsigned pte_size;
3169         unsigned page_offset;
3170         unsigned misaligned;
3171         unsigned quadrant;
3172         int level;
3173         int flooded = 0;
3174         int npte;
3175         int r;
3176         int invlpg_counter;
3177         bool remote_flush, local_flush, zap_page;
3178
3179         zap_page = remote_flush = local_flush = false;
3180
3181         pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
3182
3183         invlpg_counter = atomic_read(&vcpu->kvm->arch.invlpg_counter);
3184
3185         /*
3186          * Assume that the pte write on a page table of the same type
3187          * as the current vcpu paging mode.  This is nearly always true
3188          * (might be false while changing modes).  Note it is verified later
3189          * by update_pte().
3190          */
3191         if ((is_pae(vcpu) && bytes == 4) || !new) {
3192                 /* Handle a 32-bit guest writing two halves of a 64-bit gpte */
3193                 if (is_pae(vcpu)) {
3194                         gpa &= ~(gpa_t)7;
3195                         bytes = 8;
3196                 }
3197                 r = kvm_read_guest(vcpu->kvm, gpa, &gentry, min(bytes, 8));
3198                 if (r)
3199                         gentry = 0;
3200                 new = (const u8 *)&gentry;
3201         }
3202
3203         switch (bytes) {
3204         case 4:
3205                 gentry = *(const u32 *)new;
3206                 break;
3207         case 8:
3208                 gentry = *(const u64 *)new;
3209                 break;
3210         default:
3211                 gentry = 0;
3212                 break;
3213         }
3214
3215         mmu_guess_page_from_pte_write(vcpu, gpa, gentry);
3216         spin_lock(&vcpu->kvm->mmu_lock);
3217         if (atomic_read(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter)
3218                 gentry = 0;
3219         kvm_mmu_access_page(vcpu, gfn);
3220         kvm_mmu_free_some_pages(vcpu);
3221         ++vcpu->kvm->stat.mmu_pte_write;
3222         trace_kvm_mmu_audit(vcpu, AUDIT_PRE_PTE_WRITE);
3223         if (guest_initiated) {
3224                 if (gfn == vcpu->arch.last_pt_write_gfn
3225                     && !last_updated_pte_accessed(vcpu)) {
3226                         ++vcpu->arch.last_pt_write_count;
3227                         if (vcpu->arch.last_pt_write_count >= 3)
3228                                 flooded = 1;
3229                 } else {
3230                         vcpu->arch.last_pt_write_gfn = gfn;
3231                         vcpu->arch.last_pt_write_count = 1;
3232                         vcpu->arch.last_pte_updated = NULL;
3233                 }
3234         }
3235
3236         mask.cr0_wp = mask.cr4_pae = mask.nxe = 1;
3237         for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn, node) {
3238                 pte_size = sp->role.cr4_pae ? 8 : 4;
3239                 misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1);
3240                 misaligned |= bytes < 4;
3241                 if (misaligned || flooded) {
3242                         /*
3243                          * Misaligned accesses are too much trouble to fix
3244                          * up; also, they usually indicate a page is not used
3245                          * as a page table.
3246                          *
3247                          * If we're seeing too many writes to a page,
3248                          * it may no longer be a page table, or we may be
3249                          * forking, in which case it is better to unmap the
3250                          * page.
3251                          */
3252                         pgprintk("misaligned: gpa %llx bytes %d role %x\n",
3253                                  gpa, bytes, sp->role.word);
3254                         zap_page |= !!kvm_mmu_prepare_zap_page(vcpu->kvm, sp,
3255                                                      &invalid_list);
3256                         ++vcpu->kvm->stat.mmu_flooded;
3257                         continue;
3258                 }
3259                 page_offset = offset;
3260                 level = sp->role.level;
3261                 npte = 1;
3262                 if (!sp->role.cr4_pae) {
3263                         page_offset <<= 1;      /* 32->64 */
3264                         /*
3265                          * A 32-bit pde maps 4MB while the shadow pdes map
3266                          * only 2MB.  So we need to double the offset again
3267                          * and zap two pdes instead of one.
3268                          */
3269                         if (level == PT32_ROOT_LEVEL) {
3270                                 page_offset &= ~7; /* kill rounding error */
3271                                 page_offset <<= 1;
3272                                 npte = 2;
3273                         }
3274                         quadrant = page_offset >> PAGE_SHIFT;
3275                         page_offset &= ~PAGE_MASK;
3276                         if (quadrant != sp->role.quadrant)
3277                                 continue;
3278                 }
3279                 local_flush = true;
3280                 spte = &sp->spt[page_offset / sizeof(*spte)];
3281                 while (npte--) {
3282                         entry = *spte;
3283                         mmu_pte_write_zap_pte(vcpu, sp, spte);
3284                         if (gentry &&
3285                               !((sp->role.word ^ vcpu->arch.mmu.base_role.word)
3286                               & mask.word))
3287                                 mmu_pte_write_new_pte(vcpu, sp, spte, &gentry);
3288                         if (!remote_flush && need_remote_flush(entry, *spte))
3289                                 remote_flush = true;
3290                         ++spte;
3291                 }
3292         }
3293         mmu_pte_write_flush_tlb(vcpu, zap_page, remote_flush, local_flush);
3294         kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
3295         trace_kvm_mmu_audit(vcpu, AUDIT_POST_PTE_WRITE);
3296         spin_unlock(&vcpu->kvm->mmu_lock);
3297         if (!is_error_pfn(vcpu->arch.update_pte.pfn)) {
3298                 kvm_release_pfn_clean(vcpu->arch.update_pte.pfn);
3299                 vcpu->arch.update_pte.pfn = bad_pfn;
3300         }
3301 }
3302
3303 int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
3304 {
3305         gpa_t gpa;
3306         int r;
3307
3308         if (vcpu->arch.mmu.direct_map)
3309                 return 0;
3310
3311         gpa = kvm_mmu_gva_to_gpa_read(vcpu, gva, NULL);
3312
3313         spin_lock(&vcpu->kvm->mmu_lock);
3314         r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);
3315         spin_unlock(&vcpu->kvm->mmu_lock);
3316         return r;
3317 }
3318 EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page_virt);
3319
3320 void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
3321 {
3322         LIST_HEAD(invalid_list);
3323
3324         while (kvm_mmu_available_pages(vcpu->kvm) < KVM_REFILL_PAGES &&
3325                !list_empty(&vcpu->kvm->arch.active_mmu_pages)) {
3326                 struct kvm_mmu_page *sp;
3327
3328                 sp = container_of(vcpu->kvm->arch.active_mmu_pages.prev,
3329                                   struct kvm_mmu_page, link);
3330                 kvm_mmu_prepare_zap_page(vcpu->kvm, sp, &invalid_list);
3331                 kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
3332                 ++vcpu->kvm->stat.mmu_recycled;
3333         }
3334 }
3335
3336 int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code)
3337 {
3338         int r;
3339         enum emulation_result er;
3340
3341         r = vcpu->arch.mmu.page_fault(vcpu, cr2, error_code, false);
3342         if (r < 0)
3343                 goto out;
3344
3345         if (!r) {
3346                 r = 1;
3347                 goto out;
3348         }
3349
3350         r = mmu_topup_memory_caches(vcpu);
3351         if (r)
3352                 goto out;
3353
3354         er = emulate_instruction(vcpu, cr2, error_code, 0);
3355
3356         switch (er) {
3357         case EMULATE_DONE:
3358                 return 1;
3359         case EMULATE_DO_MMIO:
3360                 ++vcpu->stat.mmio_exits;
3361                 /* fall through */
3362         case EMULATE_FAIL:
3363                 return 0;
3364         default:
3365                 BUG();
3366         }
3367 out:
3368         return r;
3369 }
3370 EXPORT_SYMBOL_GPL(kvm_mmu_page_fault);
3371
3372 void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
3373 {
3374         vcpu->arch.mmu.invlpg(vcpu, gva);
3375         kvm_mmu_flush_tlb(vcpu);
3376         ++vcpu->stat.invlpg;
3377 }
3378 EXPORT_SYMBOL_GPL(kvm_mmu_invlpg);
3379
3380 void kvm_enable_tdp(void)
3381 {
3382         tdp_enabled = true;
3383 }
3384 EXPORT_SYMBOL_GPL(kvm_enable_tdp);
3385
3386 void kvm_disable_tdp(void)
3387 {
3388         tdp_enabled = false;
3389 }
3390 EXPORT_SYMBOL_GPL(kvm_disable_tdp);
3391
3392 static void free_mmu_pages(struct kvm_vcpu *vcpu)
3393 {
3394         free_page((unsigned long)vcpu->arch.mmu.pae_root);
3395         if (vcpu->arch.mmu.lm_root != NULL)
3396                 free_page((unsigned long)vcpu->arch.mmu.lm_root);
3397 }
3398
3399 static int alloc_mmu_pages(struct kvm_vcpu *vcpu)
3400 {
3401         struct page *page;
3402         int i;
3403
3404         ASSERT(vcpu);
3405
3406         /*
3407          * When emulating 32-bit mode, cr3 is only 32 bits even on x86_64.
3408          * Therefore we need to allocate shadow page tables in the first
3409          * 4GB of memory, which happens to fit the DMA32 zone.
3410          */
3411         page = alloc_page(GFP_KERNEL | __GFP_DMA32);
3412         if (!page)
3413                 return -ENOMEM;
3414
3415         vcpu->arch.mmu.pae_root = page_address(page);
3416         for (i = 0; i < 4; ++i)
3417                 vcpu->arch.mmu.pae_root[i] = INVALID_PAGE;
3418
3419         return 0;
3420 }
3421
3422 int kvm_mmu_create(struct kvm_vcpu *vcpu)
3423 {
3424         ASSERT(vcpu);
3425         ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
3426
3427         return alloc_mmu_pages(vcpu);
3428 }
3429
3430 int kvm_mmu_setup(struct kvm_vcpu *vcpu)
3431 {
3432         ASSERT(vcpu);
3433         ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
3434
3435         return init_kvm_mmu(vcpu);
3436 }
3437
3438 void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
3439 {
3440         struct kvm_mmu_page *sp;
3441
3442         list_for_each_entry(sp, &kvm->arch.active_mmu_pages, link) {
3443                 int i;
3444                 u64 *pt;
3445
3446                 if (!test_bit(slot, sp->slot_bitmap))
3447                         continue;
3448
3449                 pt = sp->spt;
3450                 for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
3451                         /* avoid RMW */
3452                         if (is_writable_pte(pt[i]))
3453                                 pt[i] &= ~PT_WRITABLE_MASK;
3454         }
3455         kvm_flush_remote_tlbs(kvm);
3456 }
3457
3458 void kvm_mmu_zap_all(struct kvm *kvm)
3459 {
3460         struct kvm_mmu_page *sp, *node;
3461         LIST_HEAD(invalid_list);
3462
3463         spin_lock(&kvm->mmu_lock);
3464 restart:
3465         list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link)
3466                 if (kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list))
3467                         goto restart;
3468
3469         kvm_mmu_commit_zap_page(kvm, &invalid_list);
3470         spin_unlock(&kvm->mmu_lock);
3471 }
3472
3473 static int kvm_mmu_remove_some_alloc_mmu_pages(struct kvm *kvm,
3474                                                struct list_head *invalid_list)
3475 {
3476         struct kvm_mmu_page *page;
3477
3478         page = container_of(kvm->arch.active_mmu_pages.prev,
3479                             struct kvm_mmu_page, link);
3480         return kvm_mmu_prepare_zap_page(kvm, page, invalid_list);
3481 }
3482
3483 static int mmu_shrink(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask)
3484 {
3485         struct kvm *kvm;
3486         struct kvm *kvm_freed = NULL;
3487
3488         if (nr_to_scan == 0)
3489                 goto out;
3490
3491         spin_lock(&kvm_lock);
3492
3493         list_for_each_entry(kvm, &vm_list, vm_list) {
3494                 int idx, freed_pages;
3495                 LIST_HEAD(invalid_list);
3496
3497                 idx = srcu_read_lock(&kvm->srcu);
3498                 spin_lock(&kvm->mmu_lock);
3499                 if (!kvm_freed && nr_to_scan > 0 &&
3500                     kvm->arch.n_used_mmu_pages > 0) {
3501                         freed_pages = kvm_mmu_remove_some_alloc_mmu_pages(kvm,
3502                                                           &invalid_list);
3503                         kvm_freed = kvm;
3504                 }
3505                 nr_to_scan--;
3506
3507                 kvm_mmu_commit_zap_page(kvm, &invalid_list);
3508                 spin_unlock(&kvm->mmu_lock);
3509                 srcu_read_unlock(&kvm->srcu, idx);
3510         }
3511         if (kvm_freed)
3512                 list_move_tail(&kvm_freed->vm_list, &vm_list);
3513
3514         spin_unlock(&kvm_lock);
3515
3516 out:
3517         return percpu_counter_read_positive(&kvm_total_used_mmu_pages);
3518 }
3519
3520 static struct shrinker mmu_shrinker = {
3521         .shrink = mmu_shrink,
3522         .seeks = DEFAULT_SEEKS * 10,
3523 };
3524
3525 static void mmu_destroy_caches(void)
3526 {
3527         if (pte_chain_cache)
3528                 kmem_cache_destroy(pte_chain_cache);
3529         if (rmap_desc_cache)
3530                 kmem_cache_destroy(rmap_desc_cache);
3531         if (mmu_page_header_cache)
3532                 kmem_cache_destroy(mmu_page_header_cache);
3533 }
3534
3535 void kvm_mmu_module_exit(void)
3536 {
3537         mmu_destroy_caches();
3538         percpu_counter_destroy(&kvm_total_used_mmu_pages);
3539         unregister_shrinker(&mmu_shrinker);
3540 }
3541
3542 int kvm_mmu_module_init(void)
3543 {
3544         pte_chain_cache = kmem_cache_create("kvm_pte_chain",
3545                                             sizeof(struct kvm_pte_chain),
3546                                             0, 0, NULL);
3547         if (!pte_chain_cache)
3548                 goto nomem;
3549         rmap_desc_cache = kmem_cache_create("kvm_rmap_desc",
3550                                             sizeof(struct kvm_rmap_desc),
3551                                             0, 0, NULL);
3552         if (!rmap_desc_cache)
3553                 goto nomem;
3554
3555         mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header",
3556                                                   sizeof(struct kvm_mmu_page),
3557                                                   0, 0, NULL);
3558         if (!mmu_page_header_cache)
3559                 goto nomem;
3560
3561         if (percpu_counter_init(&kvm_total_used_mmu_pages, 0))
3562                 goto nomem;
3563
3564         register_shrinker(&mmu_shrinker);
3565
3566         return 0;
3567
3568 nomem:
3569         mmu_destroy_caches();
3570         return -ENOMEM;
3571 }
3572
3573 /*
3574  * Caculate mmu pages needed for kvm.
3575  */
3576 unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm)
3577 {
3578         int i;
3579         unsigned int nr_mmu_pages;
3580         unsigned int  nr_pages = 0;
3581         struct kvm_memslots *slots;
3582
3583         slots = kvm_memslots(kvm);
3584
3585         for (i = 0; i < slots->nmemslots; i++)
3586                 nr_pages += slots->memslots[i].npages;
3587
3588         nr_mmu_pages = nr_pages * KVM_PERMILLE_MMU_PAGES / 1000;
3589         nr_mmu_pages = max(nr_mmu_pages,
3590                         (unsigned int) KVM_MIN_ALLOC_MMU_PAGES);
3591
3592         return nr_mmu_pages;
3593 }
3594
3595 static void *pv_mmu_peek_buffer(struct kvm_pv_mmu_op_buffer *buffer,
3596                                 unsigned len)
3597 {
3598         if (len > buffer->len)
3599                 return NULL;
3600         return buffer->ptr;
3601 }
3602
3603 static void *pv_mmu_read_buffer(struct kvm_pv_mmu_op_buffer *buffer,
3604                                 unsigned len)
3605 {
3606         void *ret;
3607
3608         ret = pv_mmu_peek_buffer(buffer, len);
3609         if (!ret)
3610                 return ret;
3611         buffer->ptr += len;
3612         buffer->len -= len;
3613         buffer->processed += len;
3614         return ret;
3615 }
3616
3617 static int kvm_pv_mmu_write(struct kvm_vcpu *vcpu,
3618                              gpa_t addr, gpa_t value)
3619 {
3620         int bytes = 8;
3621         int r;
3622
3623         if (!is_long_mode(vcpu) && !is_pae(vcpu))
3624                 bytes = 4;
3625
3626         r = mmu_topup_memory_caches(vcpu);
3627         if (r)
3628                 return r;
3629
3630         if (!emulator_write_phys(vcpu, addr, &value, bytes))
3631                 return -EFAULT;
3632
3633         return 1;
3634 }
3635
3636 static int kvm_pv_mmu_flush_tlb(struct kvm_vcpu *vcpu)
3637 {
3638         (void)kvm_set_cr3(vcpu, vcpu->arch.cr3);
3639         return 1;
3640 }
3641
3642 static int kvm_pv_mmu_release_pt(struct kvm_vcpu *vcpu, gpa_t addr)
3643 {
3644         spin_lock(&vcpu->kvm->mmu_lock);
3645         mmu_unshadow(vcpu->kvm, addr >> PAGE_SHIFT);
3646         spin_unlock(&vcpu->kvm->mmu_lock);
3647         return 1;
3648 }
3649
3650 static int kvm_pv_mmu_op_one(struct kvm_vcpu *vcpu,
3651                              struct kvm_pv_mmu_op_buffer *buffer)
3652 {
3653         struct kvm_mmu_op_header *header;
3654
3655         header = pv_mmu_peek_buffer(buffer, sizeof *header);
3656         if (!header)
3657                 return 0;
3658         switch (header->op) {
3659         case KVM_MMU_OP_WRITE_PTE: {
3660                 struct kvm_mmu_op_write_pte *wpte;
3661
3662                 wpte = pv_mmu_read_buffer(buffer, sizeof *wpte);
3663                 if (!wpte)
3664                         return 0;
3665                 return kvm_pv_mmu_write(vcpu, wpte->pte_phys,
3666                                         wpte->pte_val);
3667         }
3668         case KVM_MMU_OP_FLUSH_TLB: {
3669                 struct kvm_mmu_op_flush_tlb *ftlb;
3670
3671                 ftlb = pv_mmu_read_buffer(buffer, sizeof *ftlb);
3672                 if (!ftlb)
3673                         return 0;
3674                 return kvm_pv_mmu_flush_tlb(vcpu);
3675         }
3676         case KVM_MMU_OP_RELEASE_PT: {
3677                 struct kvm_mmu_op_release_pt *rpt;
3678
3679                 rpt = pv_mmu_read_buffer(buffer, sizeof *rpt);
3680                 if (!rpt)
3681                         return 0;
3682                 return kvm_pv_mmu_release_pt(vcpu, rpt->pt_phys);
3683         }
3684         default: return 0;
3685         }
3686 }
3687
3688 int kvm_pv_mmu_op(struct kvm_vcpu *vcpu, unsigned long bytes,
3689                   gpa_t addr, unsigned long *ret)
3690 {
3691         int r;
3692         struct kvm_pv_mmu_op_buffer *buffer = &vcpu->arch.mmu_op_buffer;
3693
3694         buffer->ptr = buffer->buf;
3695         buffer->len = min_t(unsigned long, bytes, sizeof buffer->buf);
3696         buffer->processed = 0;
3697
3698         r = kvm_read_guest(vcpu->kvm, addr, buffer->buf, buffer->len);
3699         if (r)
3700                 goto out;
3701
3702         while (buffer->len) {
3703                 r = kvm_pv_mmu_op_one(vcpu, buffer);
3704                 if (r < 0)
3705                         goto out;
3706                 if (r == 0)
3707                         break;
3708         }
3709
3710         r = 1;
3711 out:
3712         *ret = buffer->processed;
3713         return r;
3714 }
3715
3716 int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4])
3717 {
3718         struct kvm_shadow_walk_iterator iterator;
3719         int nr_sptes = 0;
3720
3721         spin_lock(&vcpu->kvm->mmu_lock);
3722         for_each_shadow_entry(vcpu, addr, iterator) {
3723                 sptes[iterator.level-1] = *iterator.sptep;
3724                 nr_sptes++;
3725                 if (!is_shadow_present_pte(*iterator.sptep))
3726                         break;
3727         }
3728         spin_unlock(&vcpu->kvm->mmu_lock);
3729
3730         return nr_sptes;
3731 }
3732 EXPORT_SYMBOL_GPL(kvm_mmu_get_spte_hierarchy);
3733
3734 #ifdef CONFIG_KVM_MMU_AUDIT
3735 #include "mmu_audit.c"
3736 #else
3737 static void mmu_audit_disable(void) { }
3738 #endif
3739
3740 void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
3741 {
3742         ASSERT(vcpu);
3743
3744         destroy_kvm_mmu(vcpu);
3745         free_mmu_pages(vcpu);
3746         mmu_free_memory_caches(vcpu);
3747         mmu_audit_disable();
3748 }