KVM: MMU: remove unused macros
[pandora-kernel.git] / arch / x86 / kvm / mmu.c
1 /*
2  * Kernel-based Virtual Machine driver for Linux
3  *
4  * This module enables machines with Intel VT-x extensions to run virtual
5  * machines without emulation or binary translation.
6  *
7  * MMU support
8  *
9  * Copyright (C) 2006 Qumranet, Inc.
10  * Copyright 2010 Red Hat, Inc. and/or its affiliates.
11  *
12  * Authors:
13  *   Yaniv Kamay  <yaniv@qumranet.com>
14  *   Avi Kivity   <avi@qumranet.com>
15  *
16  * This work is licensed under the terms of the GNU GPL, version 2.  See
17  * the COPYING file in the top-level directory.
18  *
19  */
20
21 #include "irq.h"
22 #include "mmu.h"
23 #include "x86.h"
24 #include "kvm_cache_regs.h"
25 #include "x86.h"
26
27 #include <linux/kvm_host.h>
28 #include <linux/types.h>
29 #include <linux/string.h>
30 #include <linux/mm.h>
31 #include <linux/highmem.h>
32 #include <linux/module.h>
33 #include <linux/swap.h>
34 #include <linux/hugetlb.h>
35 #include <linux/compiler.h>
36 #include <linux/srcu.h>
37 #include <linux/slab.h>
38 #include <linux/uaccess.h>
39
40 #include <asm/page.h>
41 #include <asm/cmpxchg.h>
42 #include <asm/io.h>
43 #include <asm/vmx.h>
44
45 /*
46  * When setting this variable to true it enables Two-Dimensional-Paging
47  * where the hardware walks 2 page tables:
48  * 1. the guest-virtual to guest-physical
49  * 2. while doing 1. it walks guest-physical to host-physical
50  * If the hardware supports that we don't need to do shadow paging.
51  */
52 bool tdp_enabled = false;
53
54 enum {
55         AUDIT_PRE_PAGE_FAULT,
56         AUDIT_POST_PAGE_FAULT,
57         AUDIT_PRE_PTE_WRITE,
58         AUDIT_POST_PTE_WRITE,
59         AUDIT_PRE_SYNC,
60         AUDIT_POST_SYNC
61 };
62
63 char *audit_point_name[] = {
64         "pre page fault",
65         "post page fault",
66         "pre pte write",
67         "post pte write",
68         "pre sync",
69         "post sync"
70 };
71
72 #undef MMU_DEBUG
73
74 #ifdef MMU_DEBUG
75
76 #define pgprintk(x...) do { if (dbg) printk(x); } while (0)
77 #define rmap_printk(x...) do { if (dbg) printk(x); } while (0)
78
79 #else
80
81 #define pgprintk(x...) do { } while (0)
82 #define rmap_printk(x...) do { } while (0)
83
84 #endif
85
86 #ifdef MMU_DEBUG
87 static int dbg = 0;
88 module_param(dbg, bool, 0644);
89 #endif
90
91 static int oos_shadow = 1;
92 module_param(oos_shadow, bool, 0644);
93
94 #ifndef MMU_DEBUG
95 #define ASSERT(x) do { } while (0)
96 #else
97 #define ASSERT(x)                                                       \
98         if (!(x)) {                                                     \
99                 printk(KERN_WARNING "assertion failed %s:%d: %s\n",     \
100                        __FILE__, __LINE__, #x);                         \
101         }
102 #endif
103
104 #define PTE_PREFETCH_NUM                8
105
106 #define PT_FIRST_AVAIL_BITS_SHIFT 9
107 #define PT64_SECOND_AVAIL_BITS_SHIFT 52
108
109 #define PT64_LEVEL_BITS 9
110
111 #define PT64_LEVEL_SHIFT(level) \
112                 (PAGE_SHIFT + (level - 1) * PT64_LEVEL_BITS)
113
114 #define PT64_INDEX(address, level)\
115         (((address) >> PT64_LEVEL_SHIFT(level)) & ((1 << PT64_LEVEL_BITS) - 1))
116
117
118 #define PT32_LEVEL_BITS 10
119
120 #define PT32_LEVEL_SHIFT(level) \
121                 (PAGE_SHIFT + (level - 1) * PT32_LEVEL_BITS)
122
123 #define PT32_LVL_OFFSET_MASK(level) \
124         (PT32_BASE_ADDR_MASK & ((1ULL << (PAGE_SHIFT + (((level) - 1) \
125                                                 * PT32_LEVEL_BITS))) - 1))
126
127 #define PT32_INDEX(address, level)\
128         (((address) >> PT32_LEVEL_SHIFT(level)) & ((1 << PT32_LEVEL_BITS) - 1))
129
130
131 #define PT64_BASE_ADDR_MASK (((1ULL << 52) - 1) & ~(u64)(PAGE_SIZE-1))
132 #define PT64_DIR_BASE_ADDR_MASK \
133         (PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + PT64_LEVEL_BITS)) - 1))
134 #define PT64_LVL_ADDR_MASK(level) \
135         (PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + (((level) - 1) \
136                                                 * PT64_LEVEL_BITS))) - 1))
137 #define PT64_LVL_OFFSET_MASK(level) \
138         (PT64_BASE_ADDR_MASK & ((1ULL << (PAGE_SHIFT + (((level) - 1) \
139                                                 * PT64_LEVEL_BITS))) - 1))
140
141 #define PT32_BASE_ADDR_MASK PAGE_MASK
142 #define PT32_DIR_BASE_ADDR_MASK \
143         (PAGE_MASK & ~((1ULL << (PAGE_SHIFT + PT32_LEVEL_BITS)) - 1))
144 #define PT32_LVL_ADDR_MASK(level) \
145         (PAGE_MASK & ~((1ULL << (PAGE_SHIFT + (((level) - 1) \
146                                             * PT32_LEVEL_BITS))) - 1))
147
148 #define PT64_PERM_MASK (PT_PRESENT_MASK | PT_WRITABLE_MASK | PT_USER_MASK \
149                         | PT64_NX_MASK)
150
151 #define RMAP_EXT 4
152
153 #define ACC_EXEC_MASK    1
154 #define ACC_WRITE_MASK   PT_WRITABLE_MASK
155 #define ACC_USER_MASK    PT_USER_MASK
156 #define ACC_ALL          (ACC_EXEC_MASK | ACC_WRITE_MASK | ACC_USER_MASK)
157
158 #include <trace/events/kvm.h>
159
160 #define CREATE_TRACE_POINTS
161 #include "mmutrace.h"
162
163 #define SPTE_HOST_WRITEABLE (1ULL << PT_FIRST_AVAIL_BITS_SHIFT)
164
165 #define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)
166
167 struct kvm_rmap_desc {
168         u64 *sptes[RMAP_EXT];
169         struct kvm_rmap_desc *more;
170 };
171
172 struct kvm_shadow_walk_iterator {
173         u64 addr;
174         hpa_t shadow_addr;
175         int level;
176         u64 *sptep;
177         unsigned index;
178 };
179
180 #define for_each_shadow_entry(_vcpu, _addr, _walker)    \
181         for (shadow_walk_init(&(_walker), _vcpu, _addr);        \
182              shadow_walk_okay(&(_walker));                      \
183              shadow_walk_next(&(_walker)))
184
185 typedef void (*mmu_parent_walk_fn) (struct kvm_mmu_page *sp, u64 *spte);
186
187 static struct kmem_cache *pte_chain_cache;
188 static struct kmem_cache *rmap_desc_cache;
189 static struct kmem_cache *mmu_page_header_cache;
190 static struct percpu_counter kvm_total_used_mmu_pages;
191
192 static u64 __read_mostly shadow_trap_nonpresent_pte;
193 static u64 __read_mostly shadow_notrap_nonpresent_pte;
194 static u64 __read_mostly shadow_nx_mask;
195 static u64 __read_mostly shadow_x_mask; /* mutual exclusive with nx_mask */
196 static u64 __read_mostly shadow_user_mask;
197 static u64 __read_mostly shadow_accessed_mask;
198 static u64 __read_mostly shadow_dirty_mask;
199
200 static inline u64 rsvd_bits(int s, int e)
201 {
202         return ((1ULL << (e - s + 1)) - 1) << s;
203 }
204
205 void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte)
206 {
207         shadow_trap_nonpresent_pte = trap_pte;
208         shadow_notrap_nonpresent_pte = notrap_pte;
209 }
210 EXPORT_SYMBOL_GPL(kvm_mmu_set_nonpresent_ptes);
211
212 void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
213                 u64 dirty_mask, u64 nx_mask, u64 x_mask)
214 {
215         shadow_user_mask = user_mask;
216         shadow_accessed_mask = accessed_mask;
217         shadow_dirty_mask = dirty_mask;
218         shadow_nx_mask = nx_mask;
219         shadow_x_mask = x_mask;
220 }
221 EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes);
222
223 static bool is_write_protection(struct kvm_vcpu *vcpu)
224 {
225         return kvm_read_cr0_bits(vcpu, X86_CR0_WP);
226 }
227
228 static int is_cpuid_PSE36(void)
229 {
230         return 1;
231 }
232
233 static int is_nx(struct kvm_vcpu *vcpu)
234 {
235         return vcpu->arch.efer & EFER_NX;
236 }
237
238 static int is_shadow_present_pte(u64 pte)
239 {
240         return pte != shadow_trap_nonpresent_pte
241                 && pte != shadow_notrap_nonpresent_pte;
242 }
243
244 static int is_large_pte(u64 pte)
245 {
246         return pte & PT_PAGE_SIZE_MASK;
247 }
248
249 static int is_writable_pte(unsigned long pte)
250 {
251         return pte & PT_WRITABLE_MASK;
252 }
253
254 static int is_dirty_gpte(unsigned long pte)
255 {
256         return pte & PT_DIRTY_MASK;
257 }
258
259 static int is_rmap_spte(u64 pte)
260 {
261         return is_shadow_present_pte(pte);
262 }
263
264 static int is_last_spte(u64 pte, int level)
265 {
266         if (level == PT_PAGE_TABLE_LEVEL)
267                 return 1;
268         if (is_large_pte(pte))
269                 return 1;
270         return 0;
271 }
272
273 static pfn_t spte_to_pfn(u64 pte)
274 {
275         return (pte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
276 }
277
278 static gfn_t pse36_gfn_delta(u32 gpte)
279 {
280         int shift = 32 - PT32_DIR_PSE36_SHIFT - PAGE_SHIFT;
281
282         return (gpte & PT32_DIR_PSE36_MASK) << shift;
283 }
284
285 static void __set_spte(u64 *sptep, u64 spte)
286 {
287         set_64bit(sptep, spte);
288 }
289
290 static u64 __xchg_spte(u64 *sptep, u64 new_spte)
291 {
292 #ifdef CONFIG_X86_64
293         return xchg(sptep, new_spte);
294 #else
295         u64 old_spte;
296
297         do {
298                 old_spte = *sptep;
299         } while (cmpxchg64(sptep, old_spte, new_spte) != old_spte);
300
301         return old_spte;
302 #endif
303 }
304
305 static bool spte_has_volatile_bits(u64 spte)
306 {
307         if (!shadow_accessed_mask)
308                 return false;
309
310         if (!is_shadow_present_pte(spte))
311                 return false;
312
313         if ((spte & shadow_accessed_mask) &&
314               (!is_writable_pte(spte) || (spte & shadow_dirty_mask)))
315                 return false;
316
317         return true;
318 }
319
320 static bool spte_is_bit_cleared(u64 old_spte, u64 new_spte, u64 bit_mask)
321 {
322         return (old_spte & bit_mask) && !(new_spte & bit_mask);
323 }
324
325 static void update_spte(u64 *sptep, u64 new_spte)
326 {
327         u64 mask, old_spte = *sptep;
328
329         WARN_ON(!is_rmap_spte(new_spte));
330
331         new_spte |= old_spte & shadow_dirty_mask;
332
333         mask = shadow_accessed_mask;
334         if (is_writable_pte(old_spte))
335                 mask |= shadow_dirty_mask;
336
337         if (!spte_has_volatile_bits(old_spte) || (new_spte & mask) == mask)
338                 __set_spte(sptep, new_spte);
339         else
340                 old_spte = __xchg_spte(sptep, new_spte);
341
342         if (!shadow_accessed_mask)
343                 return;
344
345         if (spte_is_bit_cleared(old_spte, new_spte, shadow_accessed_mask))
346                 kvm_set_pfn_accessed(spte_to_pfn(old_spte));
347         if (spte_is_bit_cleared(old_spte, new_spte, shadow_dirty_mask))
348                 kvm_set_pfn_dirty(spte_to_pfn(old_spte));
349 }
350
351 static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
352                                   struct kmem_cache *base_cache, int min)
353 {
354         void *obj;
355
356         if (cache->nobjs >= min)
357                 return 0;
358         while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
359                 obj = kmem_cache_zalloc(base_cache, GFP_KERNEL);
360                 if (!obj)
361                         return -ENOMEM;
362                 cache->objects[cache->nobjs++] = obj;
363         }
364         return 0;
365 }
366
367 static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc,
368                                   struct kmem_cache *cache)
369 {
370         while (mc->nobjs)
371                 kmem_cache_free(cache, mc->objects[--mc->nobjs]);
372 }
373
374 static int mmu_topup_memory_cache_page(struct kvm_mmu_memory_cache *cache,
375                                        int min)
376 {
377         void *page;
378
379         if (cache->nobjs >= min)
380                 return 0;
381         while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
382                 page = (void *)__get_free_page(GFP_KERNEL);
383                 if (!page)
384                         return -ENOMEM;
385                 cache->objects[cache->nobjs++] = page;
386         }
387         return 0;
388 }
389
390 static void mmu_free_memory_cache_page(struct kvm_mmu_memory_cache *mc)
391 {
392         while (mc->nobjs)
393                 free_page((unsigned long)mc->objects[--mc->nobjs]);
394 }
395
396 static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu)
397 {
398         int r;
399
400         r = mmu_topup_memory_cache(&vcpu->arch.mmu_pte_chain_cache,
401                                    pte_chain_cache, 4);
402         if (r)
403                 goto out;
404         r = mmu_topup_memory_cache(&vcpu->arch.mmu_rmap_desc_cache,
405                                    rmap_desc_cache, 4 + PTE_PREFETCH_NUM);
406         if (r)
407                 goto out;
408         r = mmu_topup_memory_cache_page(&vcpu->arch.mmu_page_cache, 8);
409         if (r)
410                 goto out;
411         r = mmu_topup_memory_cache(&vcpu->arch.mmu_page_header_cache,
412                                    mmu_page_header_cache, 4);
413 out:
414         return r;
415 }
416
417 static void mmu_free_memory_caches(struct kvm_vcpu *vcpu)
418 {
419         mmu_free_memory_cache(&vcpu->arch.mmu_pte_chain_cache, pte_chain_cache);
420         mmu_free_memory_cache(&vcpu->arch.mmu_rmap_desc_cache, rmap_desc_cache);
421         mmu_free_memory_cache_page(&vcpu->arch.mmu_page_cache);
422         mmu_free_memory_cache(&vcpu->arch.mmu_page_header_cache,
423                                 mmu_page_header_cache);
424 }
425
426 static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc,
427                                     size_t size)
428 {
429         void *p;
430
431         BUG_ON(!mc->nobjs);
432         p = mc->objects[--mc->nobjs];
433         return p;
434 }
435
436 static struct kvm_pte_chain *mmu_alloc_pte_chain(struct kvm_vcpu *vcpu)
437 {
438         return mmu_memory_cache_alloc(&vcpu->arch.mmu_pte_chain_cache,
439                                       sizeof(struct kvm_pte_chain));
440 }
441
442 static void mmu_free_pte_chain(struct kvm_pte_chain *pc)
443 {
444         kmem_cache_free(pte_chain_cache, pc);
445 }
446
447 static struct kvm_rmap_desc *mmu_alloc_rmap_desc(struct kvm_vcpu *vcpu)
448 {
449         return mmu_memory_cache_alloc(&vcpu->arch.mmu_rmap_desc_cache,
450                                       sizeof(struct kvm_rmap_desc));
451 }
452
453 static void mmu_free_rmap_desc(struct kvm_rmap_desc *rd)
454 {
455         kmem_cache_free(rmap_desc_cache, rd);
456 }
457
458 static gfn_t kvm_mmu_page_get_gfn(struct kvm_mmu_page *sp, int index)
459 {
460         if (!sp->role.direct)
461                 return sp->gfns[index];
462
463         return sp->gfn + (index << ((sp->role.level - 1) * PT64_LEVEL_BITS));
464 }
465
466 static void kvm_mmu_page_set_gfn(struct kvm_mmu_page *sp, int index, gfn_t gfn)
467 {
468         if (sp->role.direct)
469                 BUG_ON(gfn != kvm_mmu_page_get_gfn(sp, index));
470         else
471                 sp->gfns[index] = gfn;
472 }
473
474 /*
475  * Return the pointer to the large page information for a given gfn,
476  * handling slots that are not large page aligned.
477  */
478 static struct kvm_lpage_info *lpage_info_slot(gfn_t gfn,
479                                               struct kvm_memory_slot *slot,
480                                               int level)
481 {
482         unsigned long idx;
483
484         idx = (gfn >> KVM_HPAGE_GFN_SHIFT(level)) -
485               (slot->base_gfn >> KVM_HPAGE_GFN_SHIFT(level));
486         return &slot->lpage_info[level - 2][idx];
487 }
488
489 static void account_shadowed(struct kvm *kvm, gfn_t gfn)
490 {
491         struct kvm_memory_slot *slot;
492         struct kvm_lpage_info *linfo;
493         int i;
494
495         slot = gfn_to_memslot(kvm, gfn);
496         for (i = PT_DIRECTORY_LEVEL;
497              i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) {
498                 linfo = lpage_info_slot(gfn, slot, i);
499                 linfo->write_count += 1;
500         }
501 }
502
503 static void unaccount_shadowed(struct kvm *kvm, gfn_t gfn)
504 {
505         struct kvm_memory_slot *slot;
506         struct kvm_lpage_info *linfo;
507         int i;
508
509         slot = gfn_to_memslot(kvm, gfn);
510         for (i = PT_DIRECTORY_LEVEL;
511              i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) {
512                 linfo = lpage_info_slot(gfn, slot, i);
513                 linfo->write_count -= 1;
514                 WARN_ON(linfo->write_count < 0);
515         }
516 }
517
518 static int has_wrprotected_page(struct kvm *kvm,
519                                 gfn_t gfn,
520                                 int level)
521 {
522         struct kvm_memory_slot *slot;
523         struct kvm_lpage_info *linfo;
524
525         slot = gfn_to_memslot(kvm, gfn);
526         if (slot) {
527                 linfo = lpage_info_slot(gfn, slot, level);
528                 return linfo->write_count;
529         }
530
531         return 1;
532 }
533
534 static int host_mapping_level(struct kvm *kvm, gfn_t gfn)
535 {
536         unsigned long page_size;
537         int i, ret = 0;
538
539         page_size = kvm_host_page_size(kvm, gfn);
540
541         for (i = PT_PAGE_TABLE_LEVEL;
542              i < (PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES); ++i) {
543                 if (page_size >= KVM_HPAGE_SIZE(i))
544                         ret = i;
545                 else
546                         break;
547         }
548
549         return ret;
550 }
551
552 static bool mapping_level_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t large_gfn)
553 {
554         struct kvm_memory_slot *slot;
555         slot = gfn_to_memslot(vcpu->kvm, large_gfn);
556         if (slot && slot->dirty_bitmap)
557                 return true;
558         return false;
559 }
560
561 static int mapping_level(struct kvm_vcpu *vcpu, gfn_t large_gfn)
562 {
563         int host_level, level, max_level;
564
565         host_level = host_mapping_level(vcpu->kvm, large_gfn);
566
567         if (host_level == PT_PAGE_TABLE_LEVEL)
568                 return host_level;
569
570         max_level = kvm_x86_ops->get_lpage_level() < host_level ?
571                 kvm_x86_ops->get_lpage_level() : host_level;
572
573         for (level = PT_DIRECTORY_LEVEL; level <= max_level; ++level)
574                 if (has_wrprotected_page(vcpu->kvm, large_gfn, level))
575                         break;
576
577         return level - 1;
578 }
579
580 /*
581  * Take gfn and return the reverse mapping to it.
582  */
583
584 static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn, int level)
585 {
586         struct kvm_memory_slot *slot;
587         struct kvm_lpage_info *linfo;
588
589         slot = gfn_to_memslot(kvm, gfn);
590         if (likely(level == PT_PAGE_TABLE_LEVEL))
591                 return &slot->rmap[gfn - slot->base_gfn];
592
593         linfo = lpage_info_slot(gfn, slot, level);
594
595         return &linfo->rmap_pde;
596 }
597
598 /*
599  * Reverse mapping data structures:
600  *
601  * If rmapp bit zero is zero, then rmapp point to the shadw page table entry
602  * that points to page_address(page).
603  *
604  * If rmapp bit zero is one, (then rmap & ~1) points to a struct kvm_rmap_desc
605  * containing more mappings.
606  *
607  * Returns the number of rmap entries before the spte was added or zero if
608  * the spte was not added.
609  *
610  */
611 static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
612 {
613         struct kvm_mmu_page *sp;
614         struct kvm_rmap_desc *desc;
615         unsigned long *rmapp;
616         int i, count = 0;
617
618         if (!is_rmap_spte(*spte))
619                 return count;
620         sp = page_header(__pa(spte));
621         kvm_mmu_page_set_gfn(sp, spte - sp->spt, gfn);
622         rmapp = gfn_to_rmap(vcpu->kvm, gfn, sp->role.level);
623         if (!*rmapp) {
624                 rmap_printk("rmap_add: %p %llx 0->1\n", spte, *spte);
625                 *rmapp = (unsigned long)spte;
626         } else if (!(*rmapp & 1)) {
627                 rmap_printk("rmap_add: %p %llx 1->many\n", spte, *spte);
628                 desc = mmu_alloc_rmap_desc(vcpu);
629                 desc->sptes[0] = (u64 *)*rmapp;
630                 desc->sptes[1] = spte;
631                 *rmapp = (unsigned long)desc | 1;
632                 ++count;
633         } else {
634                 rmap_printk("rmap_add: %p %llx many->many\n", spte, *spte);
635                 desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
636                 while (desc->sptes[RMAP_EXT-1] && desc->more) {
637                         desc = desc->more;
638                         count += RMAP_EXT;
639                 }
640                 if (desc->sptes[RMAP_EXT-1]) {
641                         desc->more = mmu_alloc_rmap_desc(vcpu);
642                         desc = desc->more;
643                 }
644                 for (i = 0; desc->sptes[i]; ++i)
645                         ++count;
646                 desc->sptes[i] = spte;
647         }
648         return count;
649 }
650
651 static void rmap_desc_remove_entry(unsigned long *rmapp,
652                                    struct kvm_rmap_desc *desc,
653                                    int i,
654                                    struct kvm_rmap_desc *prev_desc)
655 {
656         int j;
657
658         for (j = RMAP_EXT - 1; !desc->sptes[j] && j > i; --j)
659                 ;
660         desc->sptes[i] = desc->sptes[j];
661         desc->sptes[j] = NULL;
662         if (j != 0)
663                 return;
664         if (!prev_desc && !desc->more)
665                 *rmapp = (unsigned long)desc->sptes[0];
666         else
667                 if (prev_desc)
668                         prev_desc->more = desc->more;
669                 else
670                         *rmapp = (unsigned long)desc->more | 1;
671         mmu_free_rmap_desc(desc);
672 }
673
674 static void rmap_remove(struct kvm *kvm, u64 *spte)
675 {
676         struct kvm_rmap_desc *desc;
677         struct kvm_rmap_desc *prev_desc;
678         struct kvm_mmu_page *sp;
679         gfn_t gfn;
680         unsigned long *rmapp;
681         int i;
682
683         sp = page_header(__pa(spte));
684         gfn = kvm_mmu_page_get_gfn(sp, spte - sp->spt);
685         rmapp = gfn_to_rmap(kvm, gfn, sp->role.level);
686         if (!*rmapp) {
687                 printk(KERN_ERR "rmap_remove: %p 0->BUG\n", spte);
688                 BUG();
689         } else if (!(*rmapp & 1)) {
690                 rmap_printk("rmap_remove:  %p 1->0\n", spte);
691                 if ((u64 *)*rmapp != spte) {
692                         printk(KERN_ERR "rmap_remove:  %p 1->BUG\n", spte);
693                         BUG();
694                 }
695                 *rmapp = 0;
696         } else {
697                 rmap_printk("rmap_remove:  %p many->many\n", spte);
698                 desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
699                 prev_desc = NULL;
700                 while (desc) {
701                         for (i = 0; i < RMAP_EXT && desc->sptes[i]; ++i)
702                                 if (desc->sptes[i] == spte) {
703                                         rmap_desc_remove_entry(rmapp,
704                                                                desc, i,
705                                                                prev_desc);
706                                         return;
707                                 }
708                         prev_desc = desc;
709                         desc = desc->more;
710                 }
711                 pr_err("rmap_remove: %p many->many\n", spte);
712                 BUG();
713         }
714 }
715
716 static int set_spte_track_bits(u64 *sptep, u64 new_spte)
717 {
718         pfn_t pfn;
719         u64 old_spte = *sptep;
720
721         if (!spte_has_volatile_bits(old_spte))
722                 __set_spte(sptep, new_spte);
723         else
724                 old_spte = __xchg_spte(sptep, new_spte);
725
726         if (!is_rmap_spte(old_spte))
727                 return 0;
728
729         pfn = spte_to_pfn(old_spte);
730         if (!shadow_accessed_mask || old_spte & shadow_accessed_mask)
731                 kvm_set_pfn_accessed(pfn);
732         if (!shadow_dirty_mask || (old_spte & shadow_dirty_mask))
733                 kvm_set_pfn_dirty(pfn);
734         return 1;
735 }
736
737 static void drop_spte(struct kvm *kvm, u64 *sptep, u64 new_spte)
738 {
739         if (set_spte_track_bits(sptep, new_spte))
740                 rmap_remove(kvm, sptep);
741 }
742
743 static u64 *rmap_next(struct kvm *kvm, unsigned long *rmapp, u64 *spte)
744 {
745         struct kvm_rmap_desc *desc;
746         u64 *prev_spte;
747         int i;
748
749         if (!*rmapp)
750                 return NULL;
751         else if (!(*rmapp & 1)) {
752                 if (!spte)
753                         return (u64 *)*rmapp;
754                 return NULL;
755         }
756         desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
757         prev_spte = NULL;
758         while (desc) {
759                 for (i = 0; i < RMAP_EXT && desc->sptes[i]; ++i) {
760                         if (prev_spte == spte)
761                                 return desc->sptes[i];
762                         prev_spte = desc->sptes[i];
763                 }
764                 desc = desc->more;
765         }
766         return NULL;
767 }
768
769 static int rmap_write_protect(struct kvm *kvm, u64 gfn)
770 {
771         unsigned long *rmapp;
772         u64 *spte;
773         int i, write_protected = 0;
774
775         rmapp = gfn_to_rmap(kvm, gfn, PT_PAGE_TABLE_LEVEL);
776
777         spte = rmap_next(kvm, rmapp, NULL);
778         while (spte) {
779                 BUG_ON(!spte);
780                 BUG_ON(!(*spte & PT_PRESENT_MASK));
781                 rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte);
782                 if (is_writable_pte(*spte)) {
783                         update_spte(spte, *spte & ~PT_WRITABLE_MASK);
784                         write_protected = 1;
785                 }
786                 spte = rmap_next(kvm, rmapp, spte);
787         }
788
789         /* check for huge page mappings */
790         for (i = PT_DIRECTORY_LEVEL;
791              i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) {
792                 rmapp = gfn_to_rmap(kvm, gfn, i);
793                 spte = rmap_next(kvm, rmapp, NULL);
794                 while (spte) {
795                         BUG_ON(!spte);
796                         BUG_ON(!(*spte & PT_PRESENT_MASK));
797                         BUG_ON((*spte & (PT_PAGE_SIZE_MASK|PT_PRESENT_MASK)) != (PT_PAGE_SIZE_MASK|PT_PRESENT_MASK));
798                         pgprintk("rmap_write_protect(large): spte %p %llx %lld\n", spte, *spte, gfn);
799                         if (is_writable_pte(*spte)) {
800                                 drop_spte(kvm, spte,
801                                           shadow_trap_nonpresent_pte);
802                                 --kvm->stat.lpages;
803                                 spte = NULL;
804                                 write_protected = 1;
805                         }
806                         spte = rmap_next(kvm, rmapp, spte);
807                 }
808         }
809
810         return write_protected;
811 }
812
813 static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp,
814                            unsigned long data)
815 {
816         u64 *spte;
817         int need_tlb_flush = 0;
818
819         while ((spte = rmap_next(kvm, rmapp, NULL))) {
820                 BUG_ON(!(*spte & PT_PRESENT_MASK));
821                 rmap_printk("kvm_rmap_unmap_hva: spte %p %llx\n", spte, *spte);
822                 drop_spte(kvm, spte, shadow_trap_nonpresent_pte);
823                 need_tlb_flush = 1;
824         }
825         return need_tlb_flush;
826 }
827
828 static int kvm_set_pte_rmapp(struct kvm *kvm, unsigned long *rmapp,
829                              unsigned long data)
830 {
831         int need_flush = 0;
832         u64 *spte, new_spte;
833         pte_t *ptep = (pte_t *)data;
834         pfn_t new_pfn;
835
836         WARN_ON(pte_huge(*ptep));
837         new_pfn = pte_pfn(*ptep);
838         spte = rmap_next(kvm, rmapp, NULL);
839         while (spte) {
840                 BUG_ON(!is_shadow_present_pte(*spte));
841                 rmap_printk("kvm_set_pte_rmapp: spte %p %llx\n", spte, *spte);
842                 need_flush = 1;
843                 if (pte_write(*ptep)) {
844                         drop_spte(kvm, spte, shadow_trap_nonpresent_pte);
845                         spte = rmap_next(kvm, rmapp, NULL);
846                 } else {
847                         new_spte = *spte &~ (PT64_BASE_ADDR_MASK);
848                         new_spte |= (u64)new_pfn << PAGE_SHIFT;
849
850                         new_spte &= ~PT_WRITABLE_MASK;
851                         new_spte &= ~SPTE_HOST_WRITEABLE;
852                         new_spte &= ~shadow_accessed_mask;
853                         set_spte_track_bits(spte, new_spte);
854                         spte = rmap_next(kvm, rmapp, spte);
855                 }
856         }
857         if (need_flush)
858                 kvm_flush_remote_tlbs(kvm);
859
860         return 0;
861 }
862
863 static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
864                           unsigned long data,
865                           int (*handler)(struct kvm *kvm, unsigned long *rmapp,
866                                          unsigned long data))
867 {
868         int i, j;
869         int ret;
870         int retval = 0;
871         struct kvm_memslots *slots;
872
873         slots = kvm_memslots(kvm);
874
875         for (i = 0; i < slots->nmemslots; i++) {
876                 struct kvm_memory_slot *memslot = &slots->memslots[i];
877                 unsigned long start = memslot->userspace_addr;
878                 unsigned long end;
879
880                 end = start + (memslot->npages << PAGE_SHIFT);
881                 if (hva >= start && hva < end) {
882                         gfn_t gfn_offset = (hva - start) >> PAGE_SHIFT;
883                         gfn_t gfn = memslot->base_gfn + gfn_offset;
884
885                         ret = handler(kvm, &memslot->rmap[gfn_offset], data);
886
887                         for (j = 0; j < KVM_NR_PAGE_SIZES - 1; ++j) {
888                                 struct kvm_lpage_info *linfo;
889
890                                 linfo = lpage_info_slot(gfn, memslot,
891                                                         PT_DIRECTORY_LEVEL + j);
892                                 ret |= handler(kvm, &linfo->rmap_pde, data);
893                         }
894                         trace_kvm_age_page(hva, memslot, ret);
895                         retval |= ret;
896                 }
897         }
898
899         return retval;
900 }
901
902 int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
903 {
904         return kvm_handle_hva(kvm, hva, 0, kvm_unmap_rmapp);
905 }
906
907 void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
908 {
909         kvm_handle_hva(kvm, hva, (unsigned long)&pte, kvm_set_pte_rmapp);
910 }
911
912 static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
913                          unsigned long data)
914 {
915         u64 *spte;
916         int young = 0;
917
918         /*
919          * Emulate the accessed bit for EPT, by checking if this page has
920          * an EPT mapping, and clearing it if it does. On the next access,
921          * a new EPT mapping will be established.
922          * This has some overhead, but not as much as the cost of swapping
923          * out actively used pages or breaking up actively used hugepages.
924          */
925         if (!shadow_accessed_mask)
926                 return kvm_unmap_rmapp(kvm, rmapp, data);
927
928         spte = rmap_next(kvm, rmapp, NULL);
929         while (spte) {
930                 int _young;
931                 u64 _spte = *spte;
932                 BUG_ON(!(_spte & PT_PRESENT_MASK));
933                 _young = _spte & PT_ACCESSED_MASK;
934                 if (_young) {
935                         young = 1;
936                         clear_bit(PT_ACCESSED_SHIFT, (unsigned long *)spte);
937                 }
938                 spte = rmap_next(kvm, rmapp, spte);
939         }
940         return young;
941 }
942
943 static int kvm_test_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
944                               unsigned long data)
945 {
946         u64 *spte;
947         int young = 0;
948
949         /*
950          * If there's no access bit in the secondary pte set by the
951          * hardware it's up to gup-fast/gup to set the access bit in
952          * the primary pte or in the page structure.
953          */
954         if (!shadow_accessed_mask)
955                 goto out;
956
957         spte = rmap_next(kvm, rmapp, NULL);
958         while (spte) {
959                 u64 _spte = *spte;
960                 BUG_ON(!(_spte & PT_PRESENT_MASK));
961                 young = _spte & PT_ACCESSED_MASK;
962                 if (young) {
963                         young = 1;
964                         break;
965                 }
966                 spte = rmap_next(kvm, rmapp, spte);
967         }
968 out:
969         return young;
970 }
971
972 #define RMAP_RECYCLE_THRESHOLD 1000
973
974 static void rmap_recycle(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
975 {
976         unsigned long *rmapp;
977         struct kvm_mmu_page *sp;
978
979         sp = page_header(__pa(spte));
980
981         rmapp = gfn_to_rmap(vcpu->kvm, gfn, sp->role.level);
982
983         kvm_unmap_rmapp(vcpu->kvm, rmapp, 0);
984         kvm_flush_remote_tlbs(vcpu->kvm);
985 }
986
987 int kvm_age_hva(struct kvm *kvm, unsigned long hva)
988 {
989         return kvm_handle_hva(kvm, hva, 0, kvm_age_rmapp);
990 }
991
992 int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
993 {
994         return kvm_handle_hva(kvm, hva, 0, kvm_test_age_rmapp);
995 }
996
997 #ifdef MMU_DEBUG
998 static int is_empty_shadow_page(u64 *spt)
999 {
1000         u64 *pos;
1001         u64 *end;
1002
1003         for (pos = spt, end = pos + PAGE_SIZE / sizeof(u64); pos != end; pos++)
1004                 if (is_shadow_present_pte(*pos)) {
1005                         printk(KERN_ERR "%s: %p %llx\n", __func__,
1006                                pos, *pos);
1007                         return 0;
1008                 }
1009         return 1;
1010 }
1011 #endif
1012
1013 /*
1014  * This value is the sum of all of the kvm instances's
1015  * kvm->arch.n_used_mmu_pages values.  We need a global,
1016  * aggregate version in order to make the slab shrinker
1017  * faster
1018  */
1019 static inline void kvm_mod_used_mmu_pages(struct kvm *kvm, int nr)
1020 {
1021         kvm->arch.n_used_mmu_pages += nr;
1022         percpu_counter_add(&kvm_total_used_mmu_pages, nr);
1023 }
1024
1025 static void kvm_mmu_free_page(struct kvm *kvm, struct kvm_mmu_page *sp)
1026 {
1027         ASSERT(is_empty_shadow_page(sp->spt));
1028         hlist_del(&sp->hash_link);
1029         list_del(&sp->link);
1030         free_page((unsigned long)sp->spt);
1031         if (!sp->role.direct)
1032                 free_page((unsigned long)sp->gfns);
1033         kmem_cache_free(mmu_page_header_cache, sp);
1034         kvm_mod_used_mmu_pages(kvm, -1);
1035 }
1036
1037 static unsigned kvm_page_table_hashfn(gfn_t gfn)
1038 {
1039         return gfn & ((1 << KVM_MMU_HASH_SHIFT) - 1);
1040 }
1041
1042 static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu,
1043                                                u64 *parent_pte, int direct)
1044 {
1045         struct kvm_mmu_page *sp;
1046
1047         sp = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache, sizeof *sp);
1048         sp->spt = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache, PAGE_SIZE);
1049         if (!direct)
1050                 sp->gfns = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache,
1051                                                   PAGE_SIZE);
1052         set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
1053         list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages);
1054         bitmap_zero(sp->slot_bitmap, KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS);
1055         sp->multimapped = 0;
1056         sp->parent_pte = parent_pte;
1057         kvm_mod_used_mmu_pages(vcpu->kvm, +1);
1058         return sp;
1059 }
1060
1061 static void mmu_page_add_parent_pte(struct kvm_vcpu *vcpu,
1062                                     struct kvm_mmu_page *sp, u64 *parent_pte)
1063 {
1064         struct kvm_pte_chain *pte_chain;
1065         struct hlist_node *node;
1066         int i;
1067
1068         if (!parent_pte)
1069                 return;
1070         if (!sp->multimapped) {
1071                 u64 *old = sp->parent_pte;
1072
1073                 if (!old) {
1074                         sp->parent_pte = parent_pte;
1075                         return;
1076                 }
1077                 sp->multimapped = 1;
1078                 pte_chain = mmu_alloc_pte_chain(vcpu);
1079                 INIT_HLIST_HEAD(&sp->parent_ptes);
1080                 hlist_add_head(&pte_chain->link, &sp->parent_ptes);
1081                 pte_chain->parent_ptes[0] = old;
1082         }
1083         hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link) {
1084                 if (pte_chain->parent_ptes[NR_PTE_CHAIN_ENTRIES-1])
1085                         continue;
1086                 for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i)
1087                         if (!pte_chain->parent_ptes[i]) {
1088                                 pte_chain->parent_ptes[i] = parent_pte;
1089                                 return;
1090                         }
1091         }
1092         pte_chain = mmu_alloc_pte_chain(vcpu);
1093         BUG_ON(!pte_chain);
1094         hlist_add_head(&pte_chain->link, &sp->parent_ptes);
1095         pte_chain->parent_ptes[0] = parent_pte;
1096 }
1097
1098 static void mmu_page_remove_parent_pte(struct kvm_mmu_page *sp,
1099                                        u64 *parent_pte)
1100 {
1101         struct kvm_pte_chain *pte_chain;
1102         struct hlist_node *node;
1103         int i;
1104
1105         if (!sp->multimapped) {
1106                 BUG_ON(sp->parent_pte != parent_pte);
1107                 sp->parent_pte = NULL;
1108                 return;
1109         }
1110         hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link)
1111                 for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i) {
1112                         if (!pte_chain->parent_ptes[i])
1113                                 break;
1114                         if (pte_chain->parent_ptes[i] != parent_pte)
1115                                 continue;
1116                         while (i + 1 < NR_PTE_CHAIN_ENTRIES
1117                                 && pte_chain->parent_ptes[i + 1]) {
1118                                 pte_chain->parent_ptes[i]
1119                                         = pte_chain->parent_ptes[i + 1];
1120                                 ++i;
1121                         }
1122                         pte_chain->parent_ptes[i] = NULL;
1123                         if (i == 0) {
1124                                 hlist_del(&pte_chain->link);
1125                                 mmu_free_pte_chain(pte_chain);
1126                                 if (hlist_empty(&sp->parent_ptes)) {
1127                                         sp->multimapped = 0;
1128                                         sp->parent_pte = NULL;
1129                                 }
1130                         }
1131                         return;
1132                 }
1133         BUG();
1134 }
1135
1136 static void mmu_parent_walk(struct kvm_mmu_page *sp, mmu_parent_walk_fn fn)
1137 {
1138         struct kvm_pte_chain *pte_chain;
1139         struct hlist_node *node;
1140         struct kvm_mmu_page *parent_sp;
1141         int i;
1142
1143         if (!sp->multimapped && sp->parent_pte) {
1144                 parent_sp = page_header(__pa(sp->parent_pte));
1145                 fn(parent_sp, sp->parent_pte);
1146                 return;
1147         }
1148
1149         hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link)
1150                 for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i) {
1151                         u64 *spte = pte_chain->parent_ptes[i];
1152
1153                         if (!spte)
1154                                 break;
1155                         parent_sp = page_header(__pa(spte));
1156                         fn(parent_sp, spte);
1157                 }
1158 }
1159
1160 static void mark_unsync(struct kvm_mmu_page *sp, u64 *spte);
1161 static void kvm_mmu_mark_parents_unsync(struct kvm_mmu_page *sp)
1162 {
1163         mmu_parent_walk(sp, mark_unsync);
1164 }
1165
1166 static void mark_unsync(struct kvm_mmu_page *sp, u64 *spte)
1167 {
1168         unsigned int index;
1169
1170         index = spte - sp->spt;
1171         if (__test_and_set_bit(index, sp->unsync_child_bitmap))
1172                 return;
1173         if (sp->unsync_children++)
1174                 return;
1175         kvm_mmu_mark_parents_unsync(sp);
1176 }
1177
1178 static void nonpaging_prefetch_page(struct kvm_vcpu *vcpu,
1179                                     struct kvm_mmu_page *sp)
1180 {
1181         int i;
1182
1183         for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
1184                 sp->spt[i] = shadow_trap_nonpresent_pte;
1185 }
1186
1187 static int nonpaging_sync_page(struct kvm_vcpu *vcpu,
1188                                struct kvm_mmu_page *sp)
1189 {
1190         return 1;
1191 }
1192
1193 static void nonpaging_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
1194 {
1195 }
1196
1197 #define KVM_PAGE_ARRAY_NR 16
1198
1199 struct kvm_mmu_pages {
1200         struct mmu_page_and_offset {
1201                 struct kvm_mmu_page *sp;
1202                 unsigned int idx;
1203         } page[KVM_PAGE_ARRAY_NR];
1204         unsigned int nr;
1205 };
1206
1207 #define for_each_unsync_children(bitmap, idx)           \
1208         for (idx = find_first_bit(bitmap, 512);         \
1209              idx < 512;                                 \
1210              idx = find_next_bit(bitmap, 512, idx+1))
1211
1212 static int mmu_pages_add(struct kvm_mmu_pages *pvec, struct kvm_mmu_page *sp,
1213                          int idx)
1214 {
1215         int i;
1216
1217         if (sp->unsync)
1218                 for (i=0; i < pvec->nr; i++)
1219                         if (pvec->page[i].sp == sp)
1220                                 return 0;
1221
1222         pvec->page[pvec->nr].sp = sp;
1223         pvec->page[pvec->nr].idx = idx;
1224         pvec->nr++;
1225         return (pvec->nr == KVM_PAGE_ARRAY_NR);
1226 }
1227
1228 static int __mmu_unsync_walk(struct kvm_mmu_page *sp,
1229                            struct kvm_mmu_pages *pvec)
1230 {
1231         int i, ret, nr_unsync_leaf = 0;
1232
1233         for_each_unsync_children(sp->unsync_child_bitmap, i) {
1234                 struct kvm_mmu_page *child;
1235                 u64 ent = sp->spt[i];
1236
1237                 if (!is_shadow_present_pte(ent) || is_large_pte(ent))
1238                         goto clear_child_bitmap;
1239
1240                 child = page_header(ent & PT64_BASE_ADDR_MASK);
1241
1242                 if (child->unsync_children) {
1243                         if (mmu_pages_add(pvec, child, i))
1244                                 return -ENOSPC;
1245
1246                         ret = __mmu_unsync_walk(child, pvec);
1247                         if (!ret)
1248                                 goto clear_child_bitmap;
1249                         else if (ret > 0)
1250                                 nr_unsync_leaf += ret;
1251                         else
1252                                 return ret;
1253                 } else if (child->unsync) {
1254                         nr_unsync_leaf++;
1255                         if (mmu_pages_add(pvec, child, i))
1256                                 return -ENOSPC;
1257                 } else
1258                          goto clear_child_bitmap;
1259
1260                 continue;
1261
1262 clear_child_bitmap:
1263                 __clear_bit(i, sp->unsync_child_bitmap);
1264                 sp->unsync_children--;
1265                 WARN_ON((int)sp->unsync_children < 0);
1266         }
1267
1268
1269         return nr_unsync_leaf;
1270 }
1271
1272 static int mmu_unsync_walk(struct kvm_mmu_page *sp,
1273                            struct kvm_mmu_pages *pvec)
1274 {
1275         if (!sp->unsync_children)
1276                 return 0;
1277
1278         mmu_pages_add(pvec, sp, 0);
1279         return __mmu_unsync_walk(sp, pvec);
1280 }
1281
1282 static void kvm_unlink_unsync_page(struct kvm *kvm, struct kvm_mmu_page *sp)
1283 {
1284         WARN_ON(!sp->unsync);
1285         trace_kvm_mmu_sync_page(sp);
1286         sp->unsync = 0;
1287         --kvm->stat.mmu_unsync;
1288 }
1289
1290 static int kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
1291                                     struct list_head *invalid_list);
1292 static void kvm_mmu_commit_zap_page(struct kvm *kvm,
1293                                     struct list_head *invalid_list);
1294
1295 #define for_each_gfn_sp(kvm, sp, gfn, pos)                              \
1296   hlist_for_each_entry(sp, pos,                                         \
1297    &(kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)], hash_link)   \
1298         if ((sp)->gfn != (gfn)) {} else
1299
1300 #define for_each_gfn_indirect_valid_sp(kvm, sp, gfn, pos)               \
1301   hlist_for_each_entry(sp, pos,                                         \
1302    &(kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)], hash_link)   \
1303                 if ((sp)->gfn != (gfn) || (sp)->role.direct ||          \
1304                         (sp)->role.invalid) {} else
1305
1306 /* @sp->gfn should be write-protected at the call site */
1307 static int __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
1308                            struct list_head *invalid_list, bool clear_unsync)
1309 {
1310         if (sp->role.cr4_pae != !!is_pae(vcpu)) {
1311                 kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list);
1312                 return 1;
1313         }
1314
1315         if (clear_unsync)
1316                 kvm_unlink_unsync_page(vcpu->kvm, sp);
1317
1318         if (vcpu->arch.mmu.sync_page(vcpu, sp)) {
1319                 kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list);
1320                 return 1;
1321         }
1322
1323         kvm_mmu_flush_tlb(vcpu);
1324         return 0;
1325 }
1326
1327 static int kvm_sync_page_transient(struct kvm_vcpu *vcpu,
1328                                    struct kvm_mmu_page *sp)
1329 {
1330         LIST_HEAD(invalid_list);
1331         int ret;
1332
1333         ret = __kvm_sync_page(vcpu, sp, &invalid_list, false);
1334         if (ret)
1335                 kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
1336
1337         return ret;
1338 }
1339
1340 static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
1341                          struct list_head *invalid_list)
1342 {
1343         return __kvm_sync_page(vcpu, sp, invalid_list, true);
1344 }
1345
1346 /* @gfn should be write-protected at the call site */
1347 static void kvm_sync_pages(struct kvm_vcpu *vcpu,  gfn_t gfn)
1348 {
1349         struct kvm_mmu_page *s;
1350         struct hlist_node *node;
1351         LIST_HEAD(invalid_list);
1352         bool flush = false;
1353
1354         for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn, node) {
1355                 if (!s->unsync)
1356                         continue;
1357
1358                 WARN_ON(s->role.level != PT_PAGE_TABLE_LEVEL);
1359                 kvm_unlink_unsync_page(vcpu->kvm, s);
1360                 if ((s->role.cr4_pae != !!is_pae(vcpu)) ||
1361                         (vcpu->arch.mmu.sync_page(vcpu, s))) {
1362                         kvm_mmu_prepare_zap_page(vcpu->kvm, s, &invalid_list);
1363                         continue;
1364                 }
1365                 flush = true;
1366         }
1367
1368         kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
1369         if (flush)
1370                 kvm_mmu_flush_tlb(vcpu);
1371 }
1372
1373 struct mmu_page_path {
1374         struct kvm_mmu_page *parent[PT64_ROOT_LEVEL-1];
1375         unsigned int idx[PT64_ROOT_LEVEL-1];
1376 };
1377
1378 #define for_each_sp(pvec, sp, parents, i)                       \
1379                 for (i = mmu_pages_next(&pvec, &parents, -1),   \
1380                         sp = pvec.page[i].sp;                   \
1381                         i < pvec.nr && ({ sp = pvec.page[i].sp; 1;});   \
1382                         i = mmu_pages_next(&pvec, &parents, i))
1383
1384 static int mmu_pages_next(struct kvm_mmu_pages *pvec,
1385                           struct mmu_page_path *parents,
1386                           int i)
1387 {
1388         int n;
1389
1390         for (n = i+1; n < pvec->nr; n++) {
1391                 struct kvm_mmu_page *sp = pvec->page[n].sp;
1392
1393                 if (sp->role.level == PT_PAGE_TABLE_LEVEL) {
1394                         parents->idx[0] = pvec->page[n].idx;
1395                         return n;
1396                 }
1397
1398                 parents->parent[sp->role.level-2] = sp;
1399                 parents->idx[sp->role.level-1] = pvec->page[n].idx;
1400         }
1401
1402         return n;
1403 }
1404
1405 static void mmu_pages_clear_parents(struct mmu_page_path *parents)
1406 {
1407         struct kvm_mmu_page *sp;
1408         unsigned int level = 0;
1409
1410         do {
1411                 unsigned int idx = parents->idx[level];
1412
1413                 sp = parents->parent[level];
1414                 if (!sp)
1415                         return;
1416
1417                 --sp->unsync_children;
1418                 WARN_ON((int)sp->unsync_children < 0);
1419                 __clear_bit(idx, sp->unsync_child_bitmap);
1420                 level++;
1421         } while (level < PT64_ROOT_LEVEL-1 && !sp->unsync_children);
1422 }
1423
1424 static void kvm_mmu_pages_init(struct kvm_mmu_page *parent,
1425                                struct mmu_page_path *parents,
1426                                struct kvm_mmu_pages *pvec)
1427 {
1428         parents->parent[parent->role.level-1] = NULL;
1429         pvec->nr = 0;
1430 }
1431
1432 static void mmu_sync_children(struct kvm_vcpu *vcpu,
1433                               struct kvm_mmu_page *parent)
1434 {
1435         int i;
1436         struct kvm_mmu_page *sp;
1437         struct mmu_page_path parents;
1438         struct kvm_mmu_pages pages;
1439         LIST_HEAD(invalid_list);
1440
1441         kvm_mmu_pages_init(parent, &parents, &pages);
1442         while (mmu_unsync_walk(parent, &pages)) {
1443                 int protected = 0;
1444
1445                 for_each_sp(pages, sp, parents, i)
1446                         protected |= rmap_write_protect(vcpu->kvm, sp->gfn);
1447
1448                 if (protected)
1449                         kvm_flush_remote_tlbs(vcpu->kvm);
1450
1451                 for_each_sp(pages, sp, parents, i) {
1452                         kvm_sync_page(vcpu, sp, &invalid_list);
1453                         mmu_pages_clear_parents(&parents);
1454                 }
1455                 kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
1456                 cond_resched_lock(&vcpu->kvm->mmu_lock);
1457                 kvm_mmu_pages_init(parent, &parents, &pages);
1458         }
1459 }
1460
1461 static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
1462                                              gfn_t gfn,
1463                                              gva_t gaddr,
1464                                              unsigned level,
1465                                              int direct,
1466                                              unsigned access,
1467                                              u64 *parent_pte)
1468 {
1469         union kvm_mmu_page_role role;
1470         unsigned quadrant;
1471         struct kvm_mmu_page *sp;
1472         struct hlist_node *node;
1473         bool need_sync = false;
1474
1475         role = vcpu->arch.mmu.base_role;
1476         role.level = level;
1477         role.direct = direct;
1478         if (role.direct)
1479                 role.cr4_pae = 0;
1480         role.access = access;
1481         if (!vcpu->arch.mmu.direct_map
1482             && vcpu->arch.mmu.root_level <= PT32_ROOT_LEVEL) {
1483                 quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level));
1484                 quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1;
1485                 role.quadrant = quadrant;
1486         }
1487         for_each_gfn_sp(vcpu->kvm, sp, gfn, node) {
1488                 if (!need_sync && sp->unsync)
1489                         need_sync = true;
1490
1491                 if (sp->role.word != role.word)
1492                         continue;
1493
1494                 if (sp->unsync && kvm_sync_page_transient(vcpu, sp))
1495                         break;
1496
1497                 mmu_page_add_parent_pte(vcpu, sp, parent_pte);
1498                 if (sp->unsync_children) {
1499                         kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
1500                         kvm_mmu_mark_parents_unsync(sp);
1501                 } else if (sp->unsync)
1502                         kvm_mmu_mark_parents_unsync(sp);
1503
1504                 trace_kvm_mmu_get_page(sp, false);
1505                 return sp;
1506         }
1507         ++vcpu->kvm->stat.mmu_cache_miss;
1508         sp = kvm_mmu_alloc_page(vcpu, parent_pte, direct);
1509         if (!sp)
1510                 return sp;
1511         sp->gfn = gfn;
1512         sp->role = role;
1513         hlist_add_head(&sp->hash_link,
1514                 &vcpu->kvm->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)]);
1515         if (!direct) {
1516                 if (rmap_write_protect(vcpu->kvm, gfn))
1517                         kvm_flush_remote_tlbs(vcpu->kvm);
1518                 if (level > PT_PAGE_TABLE_LEVEL && need_sync)
1519                         kvm_sync_pages(vcpu, gfn);
1520
1521                 account_shadowed(vcpu->kvm, gfn);
1522         }
1523         if (shadow_trap_nonpresent_pte != shadow_notrap_nonpresent_pte)
1524                 vcpu->arch.mmu.prefetch_page(vcpu, sp);
1525         else
1526                 nonpaging_prefetch_page(vcpu, sp);
1527         trace_kvm_mmu_get_page(sp, true);
1528         return sp;
1529 }
1530
1531 static void shadow_walk_init(struct kvm_shadow_walk_iterator *iterator,
1532                              struct kvm_vcpu *vcpu, u64 addr)
1533 {
1534         iterator->addr = addr;
1535         iterator->shadow_addr = vcpu->arch.mmu.root_hpa;
1536         iterator->level = vcpu->arch.mmu.shadow_root_level;
1537
1538         if (iterator->level == PT64_ROOT_LEVEL &&
1539             vcpu->arch.mmu.root_level < PT64_ROOT_LEVEL &&
1540             !vcpu->arch.mmu.direct_map)
1541                 --iterator->level;
1542
1543         if (iterator->level == PT32E_ROOT_LEVEL) {
1544                 iterator->shadow_addr
1545                         = vcpu->arch.mmu.pae_root[(addr >> 30) & 3];
1546                 iterator->shadow_addr &= PT64_BASE_ADDR_MASK;
1547                 --iterator->level;
1548                 if (!iterator->shadow_addr)
1549                         iterator->level = 0;
1550         }
1551 }
1552
1553 static bool shadow_walk_okay(struct kvm_shadow_walk_iterator *iterator)
1554 {
1555         if (iterator->level < PT_PAGE_TABLE_LEVEL)
1556                 return false;
1557
1558         if (iterator->level == PT_PAGE_TABLE_LEVEL)
1559                 if (is_large_pte(*iterator->sptep))
1560                         return false;
1561
1562         iterator->index = SHADOW_PT_INDEX(iterator->addr, iterator->level);
1563         iterator->sptep = ((u64 *)__va(iterator->shadow_addr)) + iterator->index;
1564         return true;
1565 }
1566
1567 static void shadow_walk_next(struct kvm_shadow_walk_iterator *iterator)
1568 {
1569         iterator->shadow_addr = *iterator->sptep & PT64_BASE_ADDR_MASK;
1570         --iterator->level;
1571 }
1572
1573 static void link_shadow_page(u64 *sptep, struct kvm_mmu_page *sp)
1574 {
1575         u64 spte;
1576
1577         spte = __pa(sp->spt)
1578                 | PT_PRESENT_MASK | PT_ACCESSED_MASK
1579                 | PT_WRITABLE_MASK | PT_USER_MASK;
1580         __set_spte(sptep, spte);
1581 }
1582
1583 static void drop_large_spte(struct kvm_vcpu *vcpu, u64 *sptep)
1584 {
1585         if (is_large_pte(*sptep)) {
1586                 drop_spte(vcpu->kvm, sptep, shadow_trap_nonpresent_pte);
1587                 kvm_flush_remote_tlbs(vcpu->kvm);
1588         }
1589 }
1590
1591 static void validate_direct_spte(struct kvm_vcpu *vcpu, u64 *sptep,
1592                                    unsigned direct_access)
1593 {
1594         if (is_shadow_present_pte(*sptep) && !is_large_pte(*sptep)) {
1595                 struct kvm_mmu_page *child;
1596
1597                 /*
1598                  * For the direct sp, if the guest pte's dirty bit
1599                  * changed form clean to dirty, it will corrupt the
1600                  * sp's access: allow writable in the read-only sp,
1601                  * so we should update the spte at this point to get
1602                  * a new sp with the correct access.
1603                  */
1604                 child = page_header(*sptep & PT64_BASE_ADDR_MASK);
1605                 if (child->role.access == direct_access)
1606                         return;
1607
1608                 mmu_page_remove_parent_pte(child, sptep);
1609                 __set_spte(sptep, shadow_trap_nonpresent_pte);
1610                 kvm_flush_remote_tlbs(vcpu->kvm);
1611         }
1612 }
1613
1614 static void kvm_mmu_page_unlink_children(struct kvm *kvm,
1615                                          struct kvm_mmu_page *sp)
1616 {
1617         unsigned i;
1618         u64 *pt;
1619         u64 ent;
1620
1621         pt = sp->spt;
1622
1623         for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
1624                 ent = pt[i];
1625
1626                 if (is_shadow_present_pte(ent)) {
1627                         if (!is_last_spte(ent, sp->role.level)) {
1628                                 ent &= PT64_BASE_ADDR_MASK;
1629                                 mmu_page_remove_parent_pte(page_header(ent),
1630                                                            &pt[i]);
1631                         } else {
1632                                 if (is_large_pte(ent))
1633                                         --kvm->stat.lpages;
1634                                 drop_spte(kvm, &pt[i],
1635                                           shadow_trap_nonpresent_pte);
1636                         }
1637                 }
1638                 pt[i] = shadow_trap_nonpresent_pte;
1639         }
1640 }
1641
1642 static void kvm_mmu_put_page(struct kvm_mmu_page *sp, u64 *parent_pte)
1643 {
1644         mmu_page_remove_parent_pte(sp, parent_pte);
1645 }
1646
1647 static void kvm_mmu_reset_last_pte_updated(struct kvm *kvm)
1648 {
1649         int i;
1650         struct kvm_vcpu *vcpu;
1651
1652         kvm_for_each_vcpu(i, vcpu, kvm)
1653                 vcpu->arch.last_pte_updated = NULL;
1654 }
1655
1656 static void kvm_mmu_unlink_parents(struct kvm *kvm, struct kvm_mmu_page *sp)
1657 {
1658         u64 *parent_pte;
1659
1660         while (sp->multimapped || sp->parent_pte) {
1661                 if (!sp->multimapped)
1662                         parent_pte = sp->parent_pte;
1663                 else {
1664                         struct kvm_pte_chain *chain;
1665
1666                         chain = container_of(sp->parent_ptes.first,
1667                                              struct kvm_pte_chain, link);
1668                         parent_pte = chain->parent_ptes[0];
1669                 }
1670                 BUG_ON(!parent_pte);
1671                 kvm_mmu_put_page(sp, parent_pte);
1672                 __set_spte(parent_pte, shadow_trap_nonpresent_pte);
1673         }
1674 }
1675
1676 static int mmu_zap_unsync_children(struct kvm *kvm,
1677                                    struct kvm_mmu_page *parent,
1678                                    struct list_head *invalid_list)
1679 {
1680         int i, zapped = 0;
1681         struct mmu_page_path parents;
1682         struct kvm_mmu_pages pages;
1683
1684         if (parent->role.level == PT_PAGE_TABLE_LEVEL)
1685                 return 0;
1686
1687         kvm_mmu_pages_init(parent, &parents, &pages);
1688         while (mmu_unsync_walk(parent, &pages)) {
1689                 struct kvm_mmu_page *sp;
1690
1691                 for_each_sp(pages, sp, parents, i) {
1692                         kvm_mmu_prepare_zap_page(kvm, sp, invalid_list);
1693                         mmu_pages_clear_parents(&parents);
1694                         zapped++;
1695                 }
1696                 kvm_mmu_pages_init(parent, &parents, &pages);
1697         }
1698
1699         return zapped;
1700 }
1701
1702 static int kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
1703                                     struct list_head *invalid_list)
1704 {
1705         int ret;
1706
1707         trace_kvm_mmu_prepare_zap_page(sp);
1708         ++kvm->stat.mmu_shadow_zapped;
1709         ret = mmu_zap_unsync_children(kvm, sp, invalid_list);
1710         kvm_mmu_page_unlink_children(kvm, sp);
1711         kvm_mmu_unlink_parents(kvm, sp);
1712         if (!sp->role.invalid && !sp->role.direct)
1713                 unaccount_shadowed(kvm, sp->gfn);
1714         if (sp->unsync)
1715                 kvm_unlink_unsync_page(kvm, sp);
1716         if (!sp->root_count) {
1717                 /* Count self */
1718                 ret++;
1719                 list_move(&sp->link, invalid_list);
1720         } else {
1721                 list_move(&sp->link, &kvm->arch.active_mmu_pages);
1722                 kvm_reload_remote_mmus(kvm);
1723         }
1724
1725         sp->role.invalid = 1;
1726         kvm_mmu_reset_last_pte_updated(kvm);
1727         return ret;
1728 }
1729
1730 static void kvm_mmu_commit_zap_page(struct kvm *kvm,
1731                                     struct list_head *invalid_list)
1732 {
1733         struct kvm_mmu_page *sp;
1734
1735         if (list_empty(invalid_list))
1736                 return;
1737
1738         kvm_flush_remote_tlbs(kvm);
1739
1740         do {
1741                 sp = list_first_entry(invalid_list, struct kvm_mmu_page, link);
1742                 WARN_ON(!sp->role.invalid || sp->root_count);
1743                 kvm_mmu_free_page(kvm, sp);
1744         } while (!list_empty(invalid_list));
1745
1746 }
1747
1748 /*
1749  * Changing the number of mmu pages allocated to the vm
1750  * Note: if goal_nr_mmu_pages is too small, you will get dead lock
1751  */
1752 void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int goal_nr_mmu_pages)
1753 {
1754         LIST_HEAD(invalid_list);
1755         /*
1756          * If we set the number of mmu pages to be smaller be than the
1757          * number of actived pages , we must to free some mmu pages before we
1758          * change the value
1759          */
1760
1761         if (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages) {
1762                 while (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages &&
1763                         !list_empty(&kvm->arch.active_mmu_pages)) {
1764                         struct kvm_mmu_page *page;
1765
1766                         page = container_of(kvm->arch.active_mmu_pages.prev,
1767                                             struct kvm_mmu_page, link);
1768                         kvm_mmu_prepare_zap_page(kvm, page, &invalid_list);
1769                         kvm_mmu_commit_zap_page(kvm, &invalid_list);
1770                 }
1771                 goal_nr_mmu_pages = kvm->arch.n_used_mmu_pages;
1772         }
1773
1774         kvm->arch.n_max_mmu_pages = goal_nr_mmu_pages;
1775 }
1776
1777 static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
1778 {
1779         struct kvm_mmu_page *sp;
1780         struct hlist_node *node;
1781         LIST_HEAD(invalid_list);
1782         int r;
1783
1784         pgprintk("%s: looking for gfn %llx\n", __func__, gfn);
1785         r = 0;
1786
1787         for_each_gfn_indirect_valid_sp(kvm, sp, gfn, node) {
1788                 pgprintk("%s: gfn %llx role %x\n", __func__, gfn,
1789                          sp->role.word);
1790                 r = 1;
1791                 kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
1792         }
1793         kvm_mmu_commit_zap_page(kvm, &invalid_list);
1794         return r;
1795 }
1796
1797 static void mmu_unshadow(struct kvm *kvm, gfn_t gfn)
1798 {
1799         struct kvm_mmu_page *sp;
1800         struct hlist_node *node;
1801         LIST_HEAD(invalid_list);
1802
1803         for_each_gfn_indirect_valid_sp(kvm, sp, gfn, node) {
1804                 pgprintk("%s: zap %llx %x\n",
1805                          __func__, gfn, sp->role.word);
1806                 kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
1807         }
1808         kvm_mmu_commit_zap_page(kvm, &invalid_list);
1809 }
1810
1811 static void page_header_update_slot(struct kvm *kvm, void *pte, gfn_t gfn)
1812 {
1813         int slot = memslot_id(kvm, gfn);
1814         struct kvm_mmu_page *sp = page_header(__pa(pte));
1815
1816         __set_bit(slot, sp->slot_bitmap);
1817 }
1818
1819 static void mmu_convert_notrap(struct kvm_mmu_page *sp)
1820 {
1821         int i;
1822         u64 *pt = sp->spt;
1823
1824         if (shadow_trap_nonpresent_pte == shadow_notrap_nonpresent_pte)
1825                 return;
1826
1827         for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
1828                 if (pt[i] == shadow_notrap_nonpresent_pte)
1829                         __set_spte(&pt[i], shadow_trap_nonpresent_pte);
1830         }
1831 }
1832
1833 /*
1834  * The function is based on mtrr_type_lookup() in
1835  * arch/x86/kernel/cpu/mtrr/generic.c
1836  */
1837 static int get_mtrr_type(struct mtrr_state_type *mtrr_state,
1838                          u64 start, u64 end)
1839 {
1840         int i;
1841         u64 base, mask;
1842         u8 prev_match, curr_match;
1843         int num_var_ranges = KVM_NR_VAR_MTRR;
1844
1845         if (!mtrr_state->enabled)
1846                 return 0xFF;
1847
1848         /* Make end inclusive end, instead of exclusive */
1849         end--;
1850
1851         /* Look in fixed ranges. Just return the type as per start */
1852         if (mtrr_state->have_fixed && (start < 0x100000)) {
1853                 int idx;
1854
1855                 if (start < 0x80000) {
1856                         idx = 0;
1857                         idx += (start >> 16);
1858                         return mtrr_state->fixed_ranges[idx];
1859                 } else if (start < 0xC0000) {
1860                         idx = 1 * 8;
1861                         idx += ((start - 0x80000) >> 14);
1862                         return mtrr_state->fixed_ranges[idx];
1863                 } else if (start < 0x1000000) {
1864                         idx = 3 * 8;
1865                         idx += ((start - 0xC0000) >> 12);
1866                         return mtrr_state->fixed_ranges[idx];
1867                 }
1868         }
1869
1870         /*
1871          * Look in variable ranges
1872          * Look of multiple ranges matching this address and pick type
1873          * as per MTRR precedence
1874          */
1875         if (!(mtrr_state->enabled & 2))
1876                 return mtrr_state->def_type;
1877
1878         prev_match = 0xFF;
1879         for (i = 0; i < num_var_ranges; ++i) {
1880                 unsigned short start_state, end_state;
1881
1882                 if (!(mtrr_state->var_ranges[i].mask_lo & (1 << 11)))
1883                         continue;
1884
1885                 base = (((u64)mtrr_state->var_ranges[i].base_hi) << 32) +
1886                        (mtrr_state->var_ranges[i].base_lo & PAGE_MASK);
1887                 mask = (((u64)mtrr_state->var_ranges[i].mask_hi) << 32) +
1888                        (mtrr_state->var_ranges[i].mask_lo & PAGE_MASK);
1889
1890                 start_state = ((start & mask) == (base & mask));
1891                 end_state = ((end & mask) == (base & mask));
1892                 if (start_state != end_state)
1893                         return 0xFE;
1894
1895                 if ((start & mask) != (base & mask))
1896                         continue;
1897
1898                 curr_match = mtrr_state->var_ranges[i].base_lo & 0xff;
1899                 if (prev_match == 0xFF) {
1900                         prev_match = curr_match;
1901                         continue;
1902                 }
1903
1904                 if (prev_match == MTRR_TYPE_UNCACHABLE ||
1905                     curr_match == MTRR_TYPE_UNCACHABLE)
1906                         return MTRR_TYPE_UNCACHABLE;
1907
1908                 if ((prev_match == MTRR_TYPE_WRBACK &&
1909                      curr_match == MTRR_TYPE_WRTHROUGH) ||
1910                     (prev_match == MTRR_TYPE_WRTHROUGH &&
1911                      curr_match == MTRR_TYPE_WRBACK)) {
1912                         prev_match = MTRR_TYPE_WRTHROUGH;
1913                         curr_match = MTRR_TYPE_WRTHROUGH;
1914                 }
1915
1916                 if (prev_match != curr_match)
1917                         return MTRR_TYPE_UNCACHABLE;
1918         }
1919
1920         if (prev_match != 0xFF)
1921                 return prev_match;
1922
1923         return mtrr_state->def_type;
1924 }
1925
1926 u8 kvm_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn)
1927 {
1928         u8 mtrr;
1929
1930         mtrr = get_mtrr_type(&vcpu->arch.mtrr_state, gfn << PAGE_SHIFT,
1931                              (gfn << PAGE_SHIFT) + PAGE_SIZE);
1932         if (mtrr == 0xfe || mtrr == 0xff)
1933                 mtrr = MTRR_TYPE_WRBACK;
1934         return mtrr;
1935 }
1936 EXPORT_SYMBOL_GPL(kvm_get_guest_memory_type);
1937
1938 static void __kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
1939 {
1940         trace_kvm_mmu_unsync_page(sp);
1941         ++vcpu->kvm->stat.mmu_unsync;
1942         sp->unsync = 1;
1943
1944         kvm_mmu_mark_parents_unsync(sp);
1945         mmu_convert_notrap(sp);
1946 }
1947
1948 static void kvm_unsync_pages(struct kvm_vcpu *vcpu,  gfn_t gfn)
1949 {
1950         struct kvm_mmu_page *s;
1951         struct hlist_node *node;
1952
1953         for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn, node) {
1954                 if (s->unsync)
1955                         continue;
1956                 WARN_ON(s->role.level != PT_PAGE_TABLE_LEVEL);
1957                 __kvm_unsync_page(vcpu, s);
1958         }
1959 }
1960
1961 static int mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn,
1962                                   bool can_unsync)
1963 {
1964         struct kvm_mmu_page *s;
1965         struct hlist_node *node;
1966         bool need_unsync = false;
1967
1968         for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn, node) {
1969                 if (!can_unsync)
1970                         return 1;
1971
1972                 if (s->role.level != PT_PAGE_TABLE_LEVEL)
1973                         return 1;
1974
1975                 if (!need_unsync && !s->unsync) {
1976                         if (!oos_shadow)
1977                                 return 1;
1978                         need_unsync = true;
1979                 }
1980         }
1981         if (need_unsync)
1982                 kvm_unsync_pages(vcpu, gfn);
1983         return 0;
1984 }
1985
1986 static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
1987                     unsigned pte_access, int user_fault,
1988                     int write_fault, int dirty, int level,
1989                     gfn_t gfn, pfn_t pfn, bool speculative,
1990                     bool can_unsync, bool host_writable)
1991 {
1992         u64 spte, entry = *sptep;
1993         int ret = 0;
1994
1995         /*
1996          * We don't set the accessed bit, since we sometimes want to see
1997          * whether the guest actually used the pte (in order to detect
1998          * demand paging).
1999          */
2000         spte = PT_PRESENT_MASK;
2001         if (!speculative)
2002                 spte |= shadow_accessed_mask;
2003         if (!dirty)
2004                 pte_access &= ~ACC_WRITE_MASK;
2005         if (pte_access & ACC_EXEC_MASK)
2006                 spte |= shadow_x_mask;
2007         else
2008                 spte |= shadow_nx_mask;
2009         if (pte_access & ACC_USER_MASK)
2010                 spte |= shadow_user_mask;
2011         if (level > PT_PAGE_TABLE_LEVEL)
2012                 spte |= PT_PAGE_SIZE_MASK;
2013         if (tdp_enabled)
2014                 spte |= kvm_x86_ops->get_mt_mask(vcpu, gfn,
2015                         kvm_is_mmio_pfn(pfn));
2016
2017         if (host_writable)
2018                 spte |= SPTE_HOST_WRITEABLE;
2019         else
2020                 pte_access &= ~ACC_WRITE_MASK;
2021
2022         spte |= (u64)pfn << PAGE_SHIFT;
2023
2024         if ((pte_access & ACC_WRITE_MASK)
2025             || (!vcpu->arch.mmu.direct_map && write_fault
2026                 && !is_write_protection(vcpu) && !user_fault)) {
2027
2028                 if (level > PT_PAGE_TABLE_LEVEL &&
2029                     has_wrprotected_page(vcpu->kvm, gfn, level)) {
2030                         ret = 1;
2031                         drop_spte(vcpu->kvm, sptep, shadow_trap_nonpresent_pte);
2032                         goto done;
2033                 }
2034
2035                 spte |= PT_WRITABLE_MASK;
2036
2037                 if (!vcpu->arch.mmu.direct_map
2038                     && !(pte_access & ACC_WRITE_MASK))
2039                         spte &= ~PT_USER_MASK;
2040
2041                 /*
2042                  * Optimization: for pte sync, if spte was writable the hash
2043                  * lookup is unnecessary (and expensive). Write protection
2044                  * is responsibility of mmu_get_page / kvm_sync_page.
2045                  * Same reasoning can be applied to dirty page accounting.
2046                  */
2047                 if (!can_unsync && is_writable_pte(*sptep))
2048                         goto set_pte;
2049
2050                 if (mmu_need_write_protect(vcpu, gfn, can_unsync)) {
2051                         pgprintk("%s: found shadow page for %llx, marking ro\n",
2052                                  __func__, gfn);
2053                         ret = 1;
2054                         pte_access &= ~ACC_WRITE_MASK;
2055                         if (is_writable_pte(spte))
2056                                 spte &= ~PT_WRITABLE_MASK;
2057                 }
2058         }
2059
2060         if (pte_access & ACC_WRITE_MASK)
2061                 mark_page_dirty(vcpu->kvm, gfn);
2062
2063 set_pte:
2064         update_spte(sptep, spte);
2065         /*
2066          * If we overwrite a writable spte with a read-only one we
2067          * should flush remote TLBs. Otherwise rmap_write_protect
2068          * will find a read-only spte, even though the writable spte
2069          * might be cached on a CPU's TLB.
2070          */
2071         if (is_writable_pte(entry) && !is_writable_pte(*sptep))
2072                 kvm_flush_remote_tlbs(vcpu->kvm);
2073 done:
2074         return ret;
2075 }
2076
2077 static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
2078                          unsigned pt_access, unsigned pte_access,
2079                          int user_fault, int write_fault, int dirty,
2080                          int *ptwrite, int level, gfn_t gfn,
2081                          pfn_t pfn, bool speculative,
2082                          bool host_writable)
2083 {
2084         int was_rmapped = 0;
2085         int rmap_count;
2086
2087         pgprintk("%s: spte %llx access %x write_fault %d"
2088                  " user_fault %d gfn %llx\n",
2089                  __func__, *sptep, pt_access,
2090                  write_fault, user_fault, gfn);
2091
2092         if (is_rmap_spte(*sptep)) {
2093                 /*
2094                  * If we overwrite a PTE page pointer with a 2MB PMD, unlink
2095                  * the parent of the now unreachable PTE.
2096                  */
2097                 if (level > PT_PAGE_TABLE_LEVEL &&
2098                     !is_large_pte(*sptep)) {
2099                         struct kvm_mmu_page *child;
2100                         u64 pte = *sptep;
2101
2102                         child = page_header(pte & PT64_BASE_ADDR_MASK);
2103                         mmu_page_remove_parent_pte(child, sptep);
2104                         __set_spte(sptep, shadow_trap_nonpresent_pte);
2105                         kvm_flush_remote_tlbs(vcpu->kvm);
2106                 } else if (pfn != spte_to_pfn(*sptep)) {
2107                         pgprintk("hfn old %llx new %llx\n",
2108                                  spte_to_pfn(*sptep), pfn);
2109                         drop_spte(vcpu->kvm, sptep, shadow_trap_nonpresent_pte);
2110                         kvm_flush_remote_tlbs(vcpu->kvm);
2111                 } else
2112                         was_rmapped = 1;
2113         }
2114
2115         if (set_spte(vcpu, sptep, pte_access, user_fault, write_fault,
2116                       dirty, level, gfn, pfn, speculative, true,
2117                       host_writable)) {
2118                 if (write_fault)
2119                         *ptwrite = 1;
2120                 kvm_mmu_flush_tlb(vcpu);
2121         }
2122
2123         pgprintk("%s: setting spte %llx\n", __func__, *sptep);
2124         pgprintk("instantiating %s PTE (%s) at %llx (%llx) addr %p\n",
2125                  is_large_pte(*sptep)? "2MB" : "4kB",
2126                  *sptep & PT_PRESENT_MASK ?"RW":"R", gfn,
2127                  *sptep, sptep);
2128         if (!was_rmapped && is_large_pte(*sptep))
2129                 ++vcpu->kvm->stat.lpages;
2130
2131         page_header_update_slot(vcpu->kvm, sptep, gfn);
2132         if (!was_rmapped) {
2133                 rmap_count = rmap_add(vcpu, sptep, gfn);
2134                 if (rmap_count > RMAP_RECYCLE_THRESHOLD)
2135                         rmap_recycle(vcpu, sptep, gfn);
2136         }
2137         kvm_release_pfn_clean(pfn);
2138         if (speculative) {
2139                 vcpu->arch.last_pte_updated = sptep;
2140                 vcpu->arch.last_pte_gfn = gfn;
2141         }
2142 }
2143
2144 static void nonpaging_new_cr3(struct kvm_vcpu *vcpu)
2145 {
2146 }
2147
2148 static struct kvm_memory_slot *
2149 pte_prefetch_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn, bool no_dirty_log)
2150 {
2151         struct kvm_memory_slot *slot;
2152
2153         slot = gfn_to_memslot(vcpu->kvm, gfn);
2154         if (!slot || slot->flags & KVM_MEMSLOT_INVALID ||
2155               (no_dirty_log && slot->dirty_bitmap))
2156                 slot = NULL;
2157
2158         return slot;
2159 }
2160
2161 static pfn_t pte_prefetch_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn,
2162                                      bool no_dirty_log)
2163 {
2164         struct kvm_memory_slot *slot;
2165         unsigned long hva;
2166
2167         slot = pte_prefetch_gfn_to_memslot(vcpu, gfn, no_dirty_log);
2168         if (!slot) {
2169                 get_page(bad_page);
2170                 return page_to_pfn(bad_page);
2171         }
2172
2173         hva = gfn_to_hva_memslot(slot, gfn);
2174
2175         return hva_to_pfn_atomic(vcpu->kvm, hva);
2176 }
2177
2178 static int direct_pte_prefetch_many(struct kvm_vcpu *vcpu,
2179                                     struct kvm_mmu_page *sp,
2180                                     u64 *start, u64 *end)
2181 {
2182         struct page *pages[PTE_PREFETCH_NUM];
2183         unsigned access = sp->role.access;
2184         int i, ret;
2185         gfn_t gfn;
2186
2187         gfn = kvm_mmu_page_get_gfn(sp, start - sp->spt);
2188         if (!pte_prefetch_gfn_to_memslot(vcpu, gfn, access & ACC_WRITE_MASK))
2189                 return -1;
2190
2191         ret = gfn_to_page_many_atomic(vcpu->kvm, gfn, pages, end - start);
2192         if (ret <= 0)
2193                 return -1;
2194
2195         for (i = 0; i < ret; i++, gfn++, start++)
2196                 mmu_set_spte(vcpu, start, ACC_ALL,
2197                              access, 0, 0, 1, NULL,
2198                              sp->role.level, gfn,
2199                              page_to_pfn(pages[i]), true, true);
2200
2201         return 0;
2202 }
2203
2204 static void __direct_pte_prefetch(struct kvm_vcpu *vcpu,
2205                                   struct kvm_mmu_page *sp, u64 *sptep)
2206 {
2207         u64 *spte, *start = NULL;
2208         int i;
2209
2210         WARN_ON(!sp->role.direct);
2211
2212         i = (sptep - sp->spt) & ~(PTE_PREFETCH_NUM - 1);
2213         spte = sp->spt + i;
2214
2215         for (i = 0; i < PTE_PREFETCH_NUM; i++, spte++) {
2216                 if (*spte != shadow_trap_nonpresent_pte || spte == sptep) {
2217                         if (!start)
2218                                 continue;
2219                         if (direct_pte_prefetch_many(vcpu, sp, start, spte) < 0)
2220                                 break;
2221                         start = NULL;
2222                 } else if (!start)
2223                         start = spte;
2224         }
2225 }
2226
2227 static void direct_pte_prefetch(struct kvm_vcpu *vcpu, u64 *sptep)
2228 {
2229         struct kvm_mmu_page *sp;
2230
2231         /*
2232          * Since it's no accessed bit on EPT, it's no way to
2233          * distinguish between actually accessed translations
2234          * and prefetched, so disable pte prefetch if EPT is
2235          * enabled.
2236          */
2237         if (!shadow_accessed_mask)
2238                 return;
2239
2240         sp = page_header(__pa(sptep));
2241         if (sp->role.level > PT_PAGE_TABLE_LEVEL)
2242                 return;
2243
2244         __direct_pte_prefetch(vcpu, sp, sptep);
2245 }
2246
2247 static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
2248                         int map_writable, int level, gfn_t gfn, pfn_t pfn,
2249                         bool prefault)
2250 {
2251         struct kvm_shadow_walk_iterator iterator;
2252         struct kvm_mmu_page *sp;
2253         int pt_write = 0;
2254         gfn_t pseudo_gfn;
2255
2256         for_each_shadow_entry(vcpu, (u64)gfn << PAGE_SHIFT, iterator) {
2257                 if (iterator.level == level) {
2258                         unsigned pte_access = ACC_ALL;
2259
2260                         mmu_set_spte(vcpu, iterator.sptep, ACC_ALL, pte_access,
2261                                      0, write, 1, &pt_write,
2262                                      level, gfn, pfn, prefault, map_writable);
2263                         direct_pte_prefetch(vcpu, iterator.sptep);
2264                         ++vcpu->stat.pf_fixed;
2265                         break;
2266                 }
2267
2268                 if (*iterator.sptep == shadow_trap_nonpresent_pte) {
2269                         u64 base_addr = iterator.addr;
2270
2271                         base_addr &= PT64_LVL_ADDR_MASK(iterator.level);
2272                         pseudo_gfn = base_addr >> PAGE_SHIFT;
2273                         sp = kvm_mmu_get_page(vcpu, pseudo_gfn, iterator.addr,
2274                                               iterator.level - 1,
2275                                               1, ACC_ALL, iterator.sptep);
2276                         if (!sp) {
2277                                 pgprintk("nonpaging_map: ENOMEM\n");
2278                                 kvm_release_pfn_clean(pfn);
2279                                 return -ENOMEM;
2280                         }
2281
2282                         __set_spte(iterator.sptep,
2283                                    __pa(sp->spt)
2284                                    | PT_PRESENT_MASK | PT_WRITABLE_MASK
2285                                    | shadow_user_mask | shadow_x_mask
2286                                    | shadow_accessed_mask);
2287                 }
2288         }
2289         return pt_write;
2290 }
2291
2292 static void kvm_send_hwpoison_signal(unsigned long address, struct task_struct *tsk)
2293 {
2294         siginfo_t info;
2295
2296         info.si_signo   = SIGBUS;
2297         info.si_errno   = 0;
2298         info.si_code    = BUS_MCEERR_AR;
2299         info.si_addr    = (void __user *)address;
2300         info.si_addr_lsb = PAGE_SHIFT;
2301
2302         send_sig_info(SIGBUS, &info, tsk);
2303 }
2304
2305 static int kvm_handle_bad_page(struct kvm *kvm, gfn_t gfn, pfn_t pfn)
2306 {
2307         kvm_release_pfn_clean(pfn);
2308         if (is_hwpoison_pfn(pfn)) {
2309                 kvm_send_hwpoison_signal(gfn_to_hva(kvm, gfn), current);
2310                 return 0;
2311         } else if (is_fault_pfn(pfn))
2312                 return -EFAULT;
2313
2314         return 1;
2315 }
2316
2317 static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu,
2318                                         gfn_t *gfnp, pfn_t *pfnp, int *levelp)
2319 {
2320         pfn_t pfn = *pfnp;
2321         gfn_t gfn = *gfnp;
2322         int level = *levelp;
2323
2324         /*
2325          * Check if it's a transparent hugepage. If this would be an
2326          * hugetlbfs page, level wouldn't be set to
2327          * PT_PAGE_TABLE_LEVEL and there would be no adjustment done
2328          * here.
2329          */
2330         if (!is_error_pfn(pfn) && !kvm_is_mmio_pfn(pfn) &&
2331             level == PT_PAGE_TABLE_LEVEL &&
2332             PageTransCompound(pfn_to_page(pfn)) &&
2333             !has_wrprotected_page(vcpu->kvm, gfn, PT_DIRECTORY_LEVEL)) {
2334                 unsigned long mask;
2335                 /*
2336                  * mmu_notifier_retry was successful and we hold the
2337                  * mmu_lock here, so the pmd can't become splitting
2338                  * from under us, and in turn
2339                  * __split_huge_page_refcount() can't run from under
2340                  * us and we can safely transfer the refcount from
2341                  * PG_tail to PG_head as we switch the pfn to tail to
2342                  * head.
2343                  */
2344                 *levelp = level = PT_DIRECTORY_LEVEL;
2345                 mask = KVM_PAGES_PER_HPAGE(level) - 1;
2346                 VM_BUG_ON((gfn & mask) != (pfn & mask));
2347                 if (pfn & mask) {
2348                         gfn &= ~mask;
2349                         *gfnp = gfn;
2350                         kvm_release_pfn_clean(pfn);
2351                         pfn &= ~mask;
2352                         if (!get_page_unless_zero(pfn_to_page(pfn)))
2353                                 BUG();
2354                         *pfnp = pfn;
2355                 }
2356         }
2357 }
2358
2359 static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
2360                          gva_t gva, pfn_t *pfn, bool write, bool *writable);
2361
2362 static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn,
2363                          bool prefault)
2364 {
2365         int r;
2366         int level;
2367         int force_pt_level;
2368         pfn_t pfn;
2369         unsigned long mmu_seq;
2370         bool map_writable;
2371
2372         force_pt_level = mapping_level_dirty_bitmap(vcpu, gfn);
2373         if (likely(!force_pt_level)) {
2374                 level = mapping_level(vcpu, gfn);
2375                 /*
2376                  * This path builds a PAE pagetable - so we can map
2377                  * 2mb pages at maximum. Therefore check if the level
2378                  * is larger than that.
2379                  */
2380                 if (level > PT_DIRECTORY_LEVEL)
2381                         level = PT_DIRECTORY_LEVEL;
2382
2383                 gfn &= ~(KVM_PAGES_PER_HPAGE(level) - 1);
2384         } else
2385                 level = PT_PAGE_TABLE_LEVEL;
2386
2387         mmu_seq = vcpu->kvm->mmu_notifier_seq;
2388         smp_rmb();
2389
2390         if (try_async_pf(vcpu, prefault, gfn, v, &pfn, write, &map_writable))
2391                 return 0;
2392
2393         /* mmio */
2394         if (is_error_pfn(pfn))
2395                 return kvm_handle_bad_page(vcpu->kvm, gfn, pfn);
2396
2397         spin_lock(&vcpu->kvm->mmu_lock);
2398         if (mmu_notifier_retry(vcpu, mmu_seq))
2399                 goto out_unlock;
2400         kvm_mmu_free_some_pages(vcpu);
2401         if (likely(!force_pt_level))
2402                 transparent_hugepage_adjust(vcpu, &gfn, &pfn, &level);
2403         r = __direct_map(vcpu, v, write, map_writable, level, gfn, pfn,
2404                          prefault);
2405         spin_unlock(&vcpu->kvm->mmu_lock);
2406
2407
2408         return r;
2409
2410 out_unlock:
2411         spin_unlock(&vcpu->kvm->mmu_lock);
2412         kvm_release_pfn_clean(pfn);
2413         return 0;
2414 }
2415
2416
2417 static void mmu_free_roots(struct kvm_vcpu *vcpu)
2418 {
2419         int i;
2420         struct kvm_mmu_page *sp;
2421         LIST_HEAD(invalid_list);
2422
2423         if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
2424                 return;
2425         spin_lock(&vcpu->kvm->mmu_lock);
2426         if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL &&
2427             (vcpu->arch.mmu.root_level == PT64_ROOT_LEVEL ||
2428              vcpu->arch.mmu.direct_map)) {
2429                 hpa_t root = vcpu->arch.mmu.root_hpa;
2430
2431                 sp = page_header(root);
2432                 --sp->root_count;
2433                 if (!sp->root_count && sp->role.invalid) {
2434                         kvm_mmu_prepare_zap_page(vcpu->kvm, sp, &invalid_list);
2435                         kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
2436                 }
2437                 vcpu->arch.mmu.root_hpa = INVALID_PAGE;
2438                 spin_unlock(&vcpu->kvm->mmu_lock);
2439                 return;
2440         }
2441         for (i = 0; i < 4; ++i) {
2442                 hpa_t root = vcpu->arch.mmu.pae_root[i];
2443
2444                 if (root) {
2445                         root &= PT64_BASE_ADDR_MASK;
2446                         sp = page_header(root);
2447                         --sp->root_count;
2448                         if (!sp->root_count && sp->role.invalid)
2449                                 kvm_mmu_prepare_zap_page(vcpu->kvm, sp,
2450                                                          &invalid_list);
2451                 }
2452                 vcpu->arch.mmu.pae_root[i] = INVALID_PAGE;
2453         }
2454         kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
2455         spin_unlock(&vcpu->kvm->mmu_lock);
2456         vcpu->arch.mmu.root_hpa = INVALID_PAGE;
2457 }
2458
2459 static int mmu_check_root(struct kvm_vcpu *vcpu, gfn_t root_gfn)
2460 {
2461         int ret = 0;
2462
2463         if (!kvm_is_visible_gfn(vcpu->kvm, root_gfn)) {
2464                 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
2465                 ret = 1;
2466         }
2467
2468         return ret;
2469 }
2470
2471 static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu)
2472 {
2473         struct kvm_mmu_page *sp;
2474         unsigned i;
2475
2476         if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
2477                 spin_lock(&vcpu->kvm->mmu_lock);
2478                 kvm_mmu_free_some_pages(vcpu);
2479                 sp = kvm_mmu_get_page(vcpu, 0, 0, PT64_ROOT_LEVEL,
2480                                       1, ACC_ALL, NULL);
2481                 ++sp->root_count;
2482                 spin_unlock(&vcpu->kvm->mmu_lock);
2483                 vcpu->arch.mmu.root_hpa = __pa(sp->spt);
2484         } else if (vcpu->arch.mmu.shadow_root_level == PT32E_ROOT_LEVEL) {
2485                 for (i = 0; i < 4; ++i) {
2486                         hpa_t root = vcpu->arch.mmu.pae_root[i];
2487
2488                         ASSERT(!VALID_PAGE(root));
2489                         spin_lock(&vcpu->kvm->mmu_lock);
2490                         kvm_mmu_free_some_pages(vcpu);
2491                         sp = kvm_mmu_get_page(vcpu, i << (30 - PAGE_SHIFT),
2492                                               i << 30,
2493                                               PT32_ROOT_LEVEL, 1, ACC_ALL,
2494                                               NULL);
2495                         root = __pa(sp->spt);
2496                         ++sp->root_count;
2497                         spin_unlock(&vcpu->kvm->mmu_lock);
2498                         vcpu->arch.mmu.pae_root[i] = root | PT_PRESENT_MASK;
2499                 }
2500                 vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.pae_root);
2501         } else
2502                 BUG();
2503
2504         return 0;
2505 }
2506
2507 static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
2508 {
2509         struct kvm_mmu_page *sp;
2510         u64 pdptr, pm_mask;
2511         gfn_t root_gfn;
2512         int i;
2513
2514         root_gfn = vcpu->arch.mmu.get_cr3(vcpu) >> PAGE_SHIFT;
2515
2516         if (mmu_check_root(vcpu, root_gfn))
2517                 return 1;
2518
2519         /*
2520          * Do we shadow a long mode page table? If so we need to
2521          * write-protect the guests page table root.
2522          */
2523         if (vcpu->arch.mmu.root_level == PT64_ROOT_LEVEL) {
2524                 hpa_t root = vcpu->arch.mmu.root_hpa;
2525
2526                 ASSERT(!VALID_PAGE(root));
2527
2528                 spin_lock(&vcpu->kvm->mmu_lock);
2529                 kvm_mmu_free_some_pages(vcpu);
2530                 sp = kvm_mmu_get_page(vcpu, root_gfn, 0, PT64_ROOT_LEVEL,
2531                                       0, ACC_ALL, NULL);
2532                 root = __pa(sp->spt);
2533                 ++sp->root_count;
2534                 spin_unlock(&vcpu->kvm->mmu_lock);
2535                 vcpu->arch.mmu.root_hpa = root;
2536                 return 0;
2537         }
2538
2539         /*
2540          * We shadow a 32 bit page table. This may be a legacy 2-level
2541          * or a PAE 3-level page table. In either case we need to be aware that
2542          * the shadow page table may be a PAE or a long mode page table.
2543          */
2544         pm_mask = PT_PRESENT_MASK;
2545         if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL)
2546                 pm_mask |= PT_ACCESSED_MASK | PT_WRITABLE_MASK | PT_USER_MASK;
2547
2548         for (i = 0; i < 4; ++i) {
2549                 hpa_t root = vcpu->arch.mmu.pae_root[i];
2550
2551                 ASSERT(!VALID_PAGE(root));
2552                 if (vcpu->arch.mmu.root_level == PT32E_ROOT_LEVEL) {
2553                         pdptr = kvm_pdptr_read_mmu(vcpu, &vcpu->arch.mmu, i);
2554                         if (!is_present_gpte(pdptr)) {
2555                                 vcpu->arch.mmu.pae_root[i] = 0;
2556                                 continue;
2557                         }
2558                         root_gfn = pdptr >> PAGE_SHIFT;
2559                         if (mmu_check_root(vcpu, root_gfn))
2560                                 return 1;
2561                 }
2562                 spin_lock(&vcpu->kvm->mmu_lock);
2563                 kvm_mmu_free_some_pages(vcpu);
2564                 sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30,
2565                                       PT32_ROOT_LEVEL, 0,
2566                                       ACC_ALL, NULL);
2567                 root = __pa(sp->spt);
2568                 ++sp->root_count;
2569                 spin_unlock(&vcpu->kvm->mmu_lock);
2570
2571                 vcpu->arch.mmu.pae_root[i] = root | pm_mask;
2572         }
2573         vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.pae_root);
2574
2575         /*
2576          * If we shadow a 32 bit page table with a long mode page
2577          * table we enter this path.
2578          */
2579         if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
2580                 if (vcpu->arch.mmu.lm_root == NULL) {
2581                         /*
2582                          * The additional page necessary for this is only
2583                          * allocated on demand.
2584                          */
2585
2586                         u64 *lm_root;
2587
2588                         lm_root = (void*)get_zeroed_page(GFP_KERNEL);
2589                         if (lm_root == NULL)
2590                                 return 1;
2591
2592                         lm_root[0] = __pa(vcpu->arch.mmu.pae_root) | pm_mask;
2593
2594                         vcpu->arch.mmu.lm_root = lm_root;
2595                 }
2596
2597                 vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.lm_root);
2598         }
2599
2600         return 0;
2601 }
2602
2603 static int mmu_alloc_roots(struct kvm_vcpu *vcpu)
2604 {
2605         if (vcpu->arch.mmu.direct_map)
2606                 return mmu_alloc_direct_roots(vcpu);
2607         else
2608                 return mmu_alloc_shadow_roots(vcpu);
2609 }
2610
2611 static void mmu_sync_roots(struct kvm_vcpu *vcpu)
2612 {
2613         int i;
2614         struct kvm_mmu_page *sp;
2615
2616         if (vcpu->arch.mmu.direct_map)
2617                 return;
2618
2619         if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
2620                 return;
2621
2622         trace_kvm_mmu_audit(vcpu, AUDIT_PRE_SYNC);
2623         if (vcpu->arch.mmu.root_level == PT64_ROOT_LEVEL) {
2624                 hpa_t root = vcpu->arch.mmu.root_hpa;
2625                 sp = page_header(root);
2626                 mmu_sync_children(vcpu, sp);
2627                 trace_kvm_mmu_audit(vcpu, AUDIT_POST_SYNC);
2628                 return;
2629         }
2630         for (i = 0; i < 4; ++i) {
2631                 hpa_t root = vcpu->arch.mmu.pae_root[i];
2632
2633                 if (root && VALID_PAGE(root)) {
2634                         root &= PT64_BASE_ADDR_MASK;
2635                         sp = page_header(root);
2636                         mmu_sync_children(vcpu, sp);
2637                 }
2638         }
2639         trace_kvm_mmu_audit(vcpu, AUDIT_POST_SYNC);
2640 }
2641
2642 void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
2643 {
2644         spin_lock(&vcpu->kvm->mmu_lock);
2645         mmu_sync_roots(vcpu);
2646         spin_unlock(&vcpu->kvm->mmu_lock);
2647 }
2648
2649 static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr,
2650                                   u32 access, struct x86_exception *exception)
2651 {
2652         if (exception)
2653                 exception->error_code = 0;
2654         return vaddr;
2655 }
2656
2657 static gpa_t nonpaging_gva_to_gpa_nested(struct kvm_vcpu *vcpu, gva_t vaddr,
2658                                          u32 access,
2659                                          struct x86_exception *exception)
2660 {
2661         if (exception)
2662                 exception->error_code = 0;
2663         return vcpu->arch.nested_mmu.translate_gpa(vcpu, vaddr, access);
2664 }
2665
2666 static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
2667                                 u32 error_code, bool prefault)
2668 {
2669         gfn_t gfn;
2670         int r;
2671
2672         pgprintk("%s: gva %lx error %x\n", __func__, gva, error_code);
2673         r = mmu_topup_memory_caches(vcpu);
2674         if (r)
2675                 return r;
2676
2677         ASSERT(vcpu);
2678         ASSERT(VALID_PAGE(vcpu->arch.mmu.root_hpa));
2679
2680         gfn = gva >> PAGE_SHIFT;
2681
2682         return nonpaging_map(vcpu, gva & PAGE_MASK,
2683                              error_code & PFERR_WRITE_MASK, gfn, prefault);
2684 }
2685
2686 static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn)
2687 {
2688         struct kvm_arch_async_pf arch;
2689
2690         arch.token = (vcpu->arch.apf.id++ << 12) | vcpu->vcpu_id;
2691         arch.gfn = gfn;
2692         arch.direct_map = vcpu->arch.mmu.direct_map;
2693         arch.cr3 = vcpu->arch.mmu.get_cr3(vcpu);
2694
2695         return kvm_setup_async_pf(vcpu, gva, gfn, &arch);
2696 }
2697
2698 static bool can_do_async_pf(struct kvm_vcpu *vcpu)
2699 {
2700         if (unlikely(!irqchip_in_kernel(vcpu->kvm) ||
2701                      kvm_event_needs_reinjection(vcpu)))
2702                 return false;
2703
2704         return kvm_x86_ops->interrupt_allowed(vcpu);
2705 }
2706
2707 static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
2708                          gva_t gva, pfn_t *pfn, bool write, bool *writable)
2709 {
2710         bool async;
2711
2712         *pfn = gfn_to_pfn_async(vcpu->kvm, gfn, &async, write, writable);
2713
2714         if (!async)
2715                 return false; /* *pfn has correct page already */
2716
2717         put_page(pfn_to_page(*pfn));
2718
2719         if (!prefault && can_do_async_pf(vcpu)) {
2720                 trace_kvm_try_async_get_page(gva, gfn);
2721                 if (kvm_find_async_pf_gfn(vcpu, gfn)) {
2722                         trace_kvm_async_pf_doublefault(gva, gfn);
2723                         kvm_make_request(KVM_REQ_APF_HALT, vcpu);
2724                         return true;
2725                 } else if (kvm_arch_setup_async_pf(vcpu, gva, gfn))
2726                         return true;
2727         }
2728
2729         *pfn = gfn_to_pfn_prot(vcpu->kvm, gfn, write, writable);
2730
2731         return false;
2732 }
2733
2734 static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code,
2735                           bool prefault)
2736 {
2737         pfn_t pfn;
2738         int r;
2739         int level;
2740         int force_pt_level;
2741         gfn_t gfn = gpa >> PAGE_SHIFT;
2742         unsigned long mmu_seq;
2743         int write = error_code & PFERR_WRITE_MASK;
2744         bool map_writable;
2745
2746         ASSERT(vcpu);
2747         ASSERT(VALID_PAGE(vcpu->arch.mmu.root_hpa));
2748
2749         r = mmu_topup_memory_caches(vcpu);
2750         if (r)
2751                 return r;
2752
2753         force_pt_level = mapping_level_dirty_bitmap(vcpu, gfn);
2754         if (likely(!force_pt_level)) {
2755                 level = mapping_level(vcpu, gfn);
2756                 gfn &= ~(KVM_PAGES_PER_HPAGE(level) - 1);
2757         } else
2758                 level = PT_PAGE_TABLE_LEVEL;
2759
2760         mmu_seq = vcpu->kvm->mmu_notifier_seq;
2761         smp_rmb();
2762
2763         if (try_async_pf(vcpu, prefault, gfn, gpa, &pfn, write, &map_writable))
2764                 return 0;
2765
2766         /* mmio */
2767         if (is_error_pfn(pfn))
2768                 return kvm_handle_bad_page(vcpu->kvm, gfn, pfn);
2769         spin_lock(&vcpu->kvm->mmu_lock);
2770         if (mmu_notifier_retry(vcpu, mmu_seq))
2771                 goto out_unlock;
2772         kvm_mmu_free_some_pages(vcpu);
2773         if (likely(!force_pt_level))
2774                 transparent_hugepage_adjust(vcpu, &gfn, &pfn, &level);
2775         r = __direct_map(vcpu, gpa, write, map_writable,
2776                          level, gfn, pfn, prefault);
2777         spin_unlock(&vcpu->kvm->mmu_lock);
2778
2779         return r;
2780
2781 out_unlock:
2782         spin_unlock(&vcpu->kvm->mmu_lock);
2783         kvm_release_pfn_clean(pfn);
2784         return 0;
2785 }
2786
2787 static void nonpaging_free(struct kvm_vcpu *vcpu)
2788 {
2789         mmu_free_roots(vcpu);
2790 }
2791
2792 static int nonpaging_init_context(struct kvm_vcpu *vcpu,
2793                                   struct kvm_mmu *context)
2794 {
2795         context->new_cr3 = nonpaging_new_cr3;
2796         context->page_fault = nonpaging_page_fault;
2797         context->gva_to_gpa = nonpaging_gva_to_gpa;
2798         context->free = nonpaging_free;
2799         context->prefetch_page = nonpaging_prefetch_page;
2800         context->sync_page = nonpaging_sync_page;
2801         context->invlpg = nonpaging_invlpg;
2802         context->root_level = 0;
2803         context->shadow_root_level = PT32E_ROOT_LEVEL;
2804         context->root_hpa = INVALID_PAGE;
2805         context->direct_map = true;
2806         context->nx = false;
2807         return 0;
2808 }
2809
2810 void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu)
2811 {
2812         ++vcpu->stat.tlb_flush;
2813         kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
2814 }
2815
2816 static void paging_new_cr3(struct kvm_vcpu *vcpu)
2817 {
2818         pgprintk("%s: cr3 %lx\n", __func__, kvm_read_cr3(vcpu));
2819         mmu_free_roots(vcpu);
2820 }
2821
2822 static unsigned long get_cr3(struct kvm_vcpu *vcpu)
2823 {
2824         return kvm_read_cr3(vcpu);
2825 }
2826
2827 static void inject_page_fault(struct kvm_vcpu *vcpu,
2828                               struct x86_exception *fault)
2829 {
2830         vcpu->arch.mmu.inject_page_fault(vcpu, fault);
2831 }
2832
2833 static void paging_free(struct kvm_vcpu *vcpu)
2834 {
2835         nonpaging_free(vcpu);
2836 }
2837
2838 static bool is_rsvd_bits_set(struct kvm_mmu *mmu, u64 gpte, int level)
2839 {
2840         int bit7;
2841
2842         bit7 = (gpte >> 7) & 1;
2843         return (gpte & mmu->rsvd_bits_mask[bit7][level-1]) != 0;
2844 }
2845
2846 #define PTTYPE 64
2847 #include "paging_tmpl.h"
2848 #undef PTTYPE
2849
2850 #define PTTYPE 32
2851 #include "paging_tmpl.h"
2852 #undef PTTYPE
2853
2854 static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu,
2855                                   struct kvm_mmu *context,
2856                                   int level)
2857 {
2858         int maxphyaddr = cpuid_maxphyaddr(vcpu);
2859         u64 exb_bit_rsvd = 0;
2860
2861         if (!context->nx)
2862                 exb_bit_rsvd = rsvd_bits(63, 63);
2863         switch (level) {
2864         case PT32_ROOT_LEVEL:
2865                 /* no rsvd bits for 2 level 4K page table entries */
2866                 context->rsvd_bits_mask[0][1] = 0;
2867                 context->rsvd_bits_mask[0][0] = 0;
2868                 context->rsvd_bits_mask[1][0] = context->rsvd_bits_mask[0][0];
2869
2870                 if (!is_pse(vcpu)) {
2871                         context->rsvd_bits_mask[1][1] = 0;
2872                         break;
2873                 }
2874
2875                 if (is_cpuid_PSE36())
2876                         /* 36bits PSE 4MB page */
2877                         context->rsvd_bits_mask[1][1] = rsvd_bits(17, 21);
2878                 else
2879                         /* 32 bits PSE 4MB page */
2880                         context->rsvd_bits_mask[1][1] = rsvd_bits(13, 21);
2881                 break;
2882         case PT32E_ROOT_LEVEL:
2883                 context->rsvd_bits_mask[0][2] =
2884                         rsvd_bits(maxphyaddr, 63) |
2885                         rsvd_bits(7, 8) | rsvd_bits(1, 2);      /* PDPTE */
2886                 context->rsvd_bits_mask[0][1] = exb_bit_rsvd |
2887                         rsvd_bits(maxphyaddr, 62);      /* PDE */
2888                 context->rsvd_bits_mask[0][0] = exb_bit_rsvd |
2889                         rsvd_bits(maxphyaddr, 62);      /* PTE */
2890                 context->rsvd_bits_mask[1][1] = exb_bit_rsvd |
2891                         rsvd_bits(maxphyaddr, 62) |
2892                         rsvd_bits(13, 20);              /* large page */
2893                 context->rsvd_bits_mask[1][0] = context->rsvd_bits_mask[0][0];
2894                 break;
2895         case PT64_ROOT_LEVEL:
2896                 context->rsvd_bits_mask[0][3] = exb_bit_rsvd |
2897                         rsvd_bits(maxphyaddr, 51) | rsvd_bits(7, 8);
2898                 context->rsvd_bits_mask[0][2] = exb_bit_rsvd |
2899                         rsvd_bits(maxphyaddr, 51) | rsvd_bits(7, 8);
2900                 context->rsvd_bits_mask[0][1] = exb_bit_rsvd |
2901                         rsvd_bits(maxphyaddr, 51);
2902                 context->rsvd_bits_mask[0][0] = exb_bit_rsvd |
2903                         rsvd_bits(maxphyaddr, 51);
2904                 context->rsvd_bits_mask[1][3] = context->rsvd_bits_mask[0][3];
2905                 context->rsvd_bits_mask[1][2] = exb_bit_rsvd |
2906                         rsvd_bits(maxphyaddr, 51) |
2907                         rsvd_bits(13, 29);
2908                 context->rsvd_bits_mask[1][1] = exb_bit_rsvd |
2909                         rsvd_bits(maxphyaddr, 51) |
2910                         rsvd_bits(13, 20);              /* large page */
2911                 context->rsvd_bits_mask[1][0] = context->rsvd_bits_mask[0][0];
2912                 break;
2913         }
2914 }
2915
2916 static int paging64_init_context_common(struct kvm_vcpu *vcpu,
2917                                         struct kvm_mmu *context,
2918                                         int level)
2919 {
2920         context->nx = is_nx(vcpu);
2921
2922         reset_rsvds_bits_mask(vcpu, context, level);
2923
2924         ASSERT(is_pae(vcpu));
2925         context->new_cr3 = paging_new_cr3;
2926         context->page_fault = paging64_page_fault;
2927         context->gva_to_gpa = paging64_gva_to_gpa;
2928         context->prefetch_page = paging64_prefetch_page;
2929         context->sync_page = paging64_sync_page;
2930         context->invlpg = paging64_invlpg;
2931         context->free = paging_free;
2932         context->root_level = level;
2933         context->shadow_root_level = level;
2934         context->root_hpa = INVALID_PAGE;
2935         context->direct_map = false;
2936         return 0;
2937 }
2938
2939 static int paging64_init_context(struct kvm_vcpu *vcpu,
2940                                  struct kvm_mmu *context)
2941 {
2942         return paging64_init_context_common(vcpu, context, PT64_ROOT_LEVEL);
2943 }
2944
2945 static int paging32_init_context(struct kvm_vcpu *vcpu,
2946                                  struct kvm_mmu *context)
2947 {
2948         context->nx = false;
2949
2950         reset_rsvds_bits_mask(vcpu, context, PT32_ROOT_LEVEL);
2951
2952         context->new_cr3 = paging_new_cr3;
2953         context->page_fault = paging32_page_fault;
2954         context->gva_to_gpa = paging32_gva_to_gpa;
2955         context->free = paging_free;
2956         context->prefetch_page = paging32_prefetch_page;
2957         context->sync_page = paging32_sync_page;
2958         context->invlpg = paging32_invlpg;
2959         context->root_level = PT32_ROOT_LEVEL;
2960         context->shadow_root_level = PT32E_ROOT_LEVEL;
2961         context->root_hpa = INVALID_PAGE;
2962         context->direct_map = false;
2963         return 0;
2964 }
2965
2966 static int paging32E_init_context(struct kvm_vcpu *vcpu,
2967                                   struct kvm_mmu *context)
2968 {
2969         return paging64_init_context_common(vcpu, context, PT32E_ROOT_LEVEL);
2970 }
2971
2972 static int init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
2973 {
2974         struct kvm_mmu *context = vcpu->arch.walk_mmu;
2975
2976         context->base_role.word = 0;
2977         context->new_cr3 = nonpaging_new_cr3;
2978         context->page_fault = tdp_page_fault;
2979         context->free = nonpaging_free;
2980         context->prefetch_page = nonpaging_prefetch_page;
2981         context->sync_page = nonpaging_sync_page;
2982         context->invlpg = nonpaging_invlpg;
2983         context->shadow_root_level = kvm_x86_ops->get_tdp_level();
2984         context->root_hpa = INVALID_PAGE;
2985         context->direct_map = true;
2986         context->set_cr3 = kvm_x86_ops->set_tdp_cr3;
2987         context->get_cr3 = get_cr3;
2988         context->inject_page_fault = kvm_inject_page_fault;
2989         context->nx = is_nx(vcpu);
2990
2991         if (!is_paging(vcpu)) {
2992                 context->nx = false;
2993                 context->gva_to_gpa = nonpaging_gva_to_gpa;
2994                 context->root_level = 0;
2995         } else if (is_long_mode(vcpu)) {
2996                 context->nx = is_nx(vcpu);
2997                 reset_rsvds_bits_mask(vcpu, context, PT64_ROOT_LEVEL);
2998                 context->gva_to_gpa = paging64_gva_to_gpa;
2999                 context->root_level = PT64_ROOT_LEVEL;
3000         } else if (is_pae(vcpu)) {
3001                 context->nx = is_nx(vcpu);
3002                 reset_rsvds_bits_mask(vcpu, context, PT32E_ROOT_LEVEL);
3003                 context->gva_to_gpa = paging64_gva_to_gpa;
3004                 context->root_level = PT32E_ROOT_LEVEL;
3005         } else {
3006                 context->nx = false;
3007                 reset_rsvds_bits_mask(vcpu, context, PT32_ROOT_LEVEL);
3008                 context->gva_to_gpa = paging32_gva_to_gpa;
3009                 context->root_level = PT32_ROOT_LEVEL;
3010         }
3011
3012         return 0;
3013 }
3014
3015 int kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context)
3016 {
3017         int r;
3018         ASSERT(vcpu);
3019         ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
3020
3021         if (!is_paging(vcpu))
3022                 r = nonpaging_init_context(vcpu, context);
3023         else if (is_long_mode(vcpu))
3024                 r = paging64_init_context(vcpu, context);
3025         else if (is_pae(vcpu))
3026                 r = paging32E_init_context(vcpu, context);
3027         else
3028                 r = paging32_init_context(vcpu, context);
3029
3030         vcpu->arch.mmu.base_role.cr4_pae = !!is_pae(vcpu);
3031         vcpu->arch.mmu.base_role.cr0_wp  = is_write_protection(vcpu);
3032
3033         return r;
3034 }
3035 EXPORT_SYMBOL_GPL(kvm_init_shadow_mmu);
3036
3037 static int init_kvm_softmmu(struct kvm_vcpu *vcpu)
3038 {
3039         int r = kvm_init_shadow_mmu(vcpu, vcpu->arch.walk_mmu);
3040
3041         vcpu->arch.walk_mmu->set_cr3           = kvm_x86_ops->set_cr3;
3042         vcpu->arch.walk_mmu->get_cr3           = get_cr3;
3043         vcpu->arch.walk_mmu->inject_page_fault = kvm_inject_page_fault;
3044
3045         return r;
3046 }
3047
3048 static int init_kvm_nested_mmu(struct kvm_vcpu *vcpu)
3049 {
3050         struct kvm_mmu *g_context = &vcpu->arch.nested_mmu;
3051
3052         g_context->get_cr3           = get_cr3;
3053         g_context->inject_page_fault = kvm_inject_page_fault;
3054
3055         /*
3056          * Note that arch.mmu.gva_to_gpa translates l2_gva to l1_gpa. The
3057          * translation of l2_gpa to l1_gpa addresses is done using the
3058          * arch.nested_mmu.gva_to_gpa function. Basically the gva_to_gpa
3059          * functions between mmu and nested_mmu are swapped.
3060          */
3061         if (!is_paging(vcpu)) {
3062                 g_context->nx = false;
3063                 g_context->root_level = 0;
3064                 g_context->gva_to_gpa = nonpaging_gva_to_gpa_nested;
3065         } else if (is_long_mode(vcpu)) {
3066                 g_context->nx = is_nx(vcpu);
3067                 reset_rsvds_bits_mask(vcpu, g_context, PT64_ROOT_LEVEL);
3068                 g_context->root_level = PT64_ROOT_LEVEL;
3069                 g_context->gva_to_gpa = paging64_gva_to_gpa_nested;
3070         } else if (is_pae(vcpu)) {
3071                 g_context->nx = is_nx(vcpu);
3072                 reset_rsvds_bits_mask(vcpu, g_context, PT32E_ROOT_LEVEL);
3073                 g_context->root_level = PT32E_ROOT_LEVEL;
3074                 g_context->gva_to_gpa = paging64_gva_to_gpa_nested;
3075         } else {
3076                 g_context->nx = false;
3077                 reset_rsvds_bits_mask(vcpu, g_context, PT32_ROOT_LEVEL);
3078                 g_context->root_level = PT32_ROOT_LEVEL;
3079                 g_context->gva_to_gpa = paging32_gva_to_gpa_nested;
3080         }
3081
3082         return 0;
3083 }
3084
3085 static int init_kvm_mmu(struct kvm_vcpu *vcpu)
3086 {
3087         vcpu->arch.update_pte.pfn = bad_pfn;
3088
3089         if (mmu_is_nested(vcpu))
3090                 return init_kvm_nested_mmu(vcpu);
3091         else if (tdp_enabled)
3092                 return init_kvm_tdp_mmu(vcpu);
3093         else
3094                 return init_kvm_softmmu(vcpu);
3095 }
3096
3097 static void destroy_kvm_mmu(struct kvm_vcpu *vcpu)
3098 {
3099         ASSERT(vcpu);
3100         if (VALID_PAGE(vcpu->arch.mmu.root_hpa))
3101                 /* mmu.free() should set root_hpa = INVALID_PAGE */
3102                 vcpu->arch.mmu.free(vcpu);
3103 }
3104
3105 int kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
3106 {
3107         destroy_kvm_mmu(vcpu);
3108         return init_kvm_mmu(vcpu);
3109 }
3110 EXPORT_SYMBOL_GPL(kvm_mmu_reset_context);
3111
3112 int kvm_mmu_load(struct kvm_vcpu *vcpu)
3113 {
3114         int r;
3115
3116         r = mmu_topup_memory_caches(vcpu);
3117         if (r)
3118                 goto out;
3119         r = mmu_alloc_roots(vcpu);
3120         spin_lock(&vcpu->kvm->mmu_lock);
3121         mmu_sync_roots(vcpu);
3122         spin_unlock(&vcpu->kvm->mmu_lock);
3123         if (r)
3124                 goto out;
3125         /* set_cr3() should ensure TLB has been flushed */
3126         vcpu->arch.mmu.set_cr3(vcpu, vcpu->arch.mmu.root_hpa);
3127 out:
3128         return r;
3129 }
3130 EXPORT_SYMBOL_GPL(kvm_mmu_load);
3131
3132 void kvm_mmu_unload(struct kvm_vcpu *vcpu)
3133 {
3134         mmu_free_roots(vcpu);
3135 }
3136 EXPORT_SYMBOL_GPL(kvm_mmu_unload);
3137
3138 static void mmu_pte_write_zap_pte(struct kvm_vcpu *vcpu,
3139                                   struct kvm_mmu_page *sp,
3140                                   u64 *spte)
3141 {
3142         u64 pte;
3143         struct kvm_mmu_page *child;
3144
3145         pte = *spte;
3146         if (is_shadow_present_pte(pte)) {
3147                 if (is_last_spte(pte, sp->role.level))
3148                         drop_spte(vcpu->kvm, spte, shadow_trap_nonpresent_pte);
3149                 else {
3150                         child = page_header(pte & PT64_BASE_ADDR_MASK);
3151                         mmu_page_remove_parent_pte(child, spte);
3152                 }
3153         }
3154         __set_spte(spte, shadow_trap_nonpresent_pte);
3155         if (is_large_pte(pte))
3156                 --vcpu->kvm->stat.lpages;
3157 }
3158
3159 static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu,
3160                                   struct kvm_mmu_page *sp,
3161                                   u64 *spte,
3162                                   const void *new)
3163 {
3164         if (sp->role.level != PT_PAGE_TABLE_LEVEL) {
3165                 ++vcpu->kvm->stat.mmu_pde_zapped;
3166                 return;
3167         }
3168
3169         ++vcpu->kvm->stat.mmu_pte_updated;
3170         if (!sp->role.cr4_pae)
3171                 paging32_update_pte(vcpu, sp, spte, new);
3172         else
3173                 paging64_update_pte(vcpu, sp, spte, new);
3174 }
3175
3176 static bool need_remote_flush(u64 old, u64 new)
3177 {
3178         if (!is_shadow_present_pte(old))
3179                 return false;
3180         if (!is_shadow_present_pte(new))
3181                 return true;
3182         if ((old ^ new) & PT64_BASE_ADDR_MASK)
3183                 return true;
3184         old ^= PT64_NX_MASK;
3185         new ^= PT64_NX_MASK;
3186         return (old & ~new & PT64_PERM_MASK) != 0;
3187 }
3188
3189 static void mmu_pte_write_flush_tlb(struct kvm_vcpu *vcpu, bool zap_page,
3190                                     bool remote_flush, bool local_flush)
3191 {
3192         if (zap_page)
3193                 return;
3194
3195         if (remote_flush)
3196                 kvm_flush_remote_tlbs(vcpu->kvm);
3197         else if (local_flush)
3198                 kvm_mmu_flush_tlb(vcpu);
3199 }
3200
3201 static bool last_updated_pte_accessed(struct kvm_vcpu *vcpu)
3202 {
3203         u64 *spte = vcpu->arch.last_pte_updated;
3204
3205         return !!(spte && (*spte & shadow_accessed_mask));
3206 }
3207
3208 static void mmu_guess_page_from_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
3209                                           u64 gpte)
3210 {
3211         gfn_t gfn;
3212         pfn_t pfn;
3213
3214         if (!is_present_gpte(gpte))
3215                 return;
3216         gfn = (gpte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
3217
3218         vcpu->arch.update_pte.mmu_seq = vcpu->kvm->mmu_notifier_seq;
3219         smp_rmb();
3220         pfn = gfn_to_pfn(vcpu->kvm, gfn);
3221
3222         if (is_error_pfn(pfn)) {
3223                 kvm_release_pfn_clean(pfn);
3224                 return;
3225         }
3226         vcpu->arch.update_pte.pfn = pfn;
3227 }
3228
3229 static void kvm_mmu_access_page(struct kvm_vcpu *vcpu, gfn_t gfn)
3230 {
3231         u64 *spte = vcpu->arch.last_pte_updated;
3232
3233         if (spte
3234             && vcpu->arch.last_pte_gfn == gfn
3235             && shadow_accessed_mask
3236             && !(*spte & shadow_accessed_mask)
3237             && is_shadow_present_pte(*spte))
3238                 set_bit(PT_ACCESSED_SHIFT, (unsigned long *)spte);
3239 }
3240
3241 void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
3242                        const u8 *new, int bytes,
3243                        bool guest_initiated)
3244 {
3245         gfn_t gfn = gpa >> PAGE_SHIFT;
3246         union kvm_mmu_page_role mask = { .word = 0 };
3247         struct kvm_mmu_page *sp;
3248         struct hlist_node *node;
3249         LIST_HEAD(invalid_list);
3250         u64 entry, gentry;
3251         u64 *spte;
3252         unsigned offset = offset_in_page(gpa);
3253         unsigned pte_size;
3254         unsigned page_offset;
3255         unsigned misaligned;
3256         unsigned quadrant;
3257         int level;
3258         int flooded = 0;
3259         int npte;
3260         int r;
3261         int invlpg_counter;
3262         bool remote_flush, local_flush, zap_page;
3263
3264         zap_page = remote_flush = local_flush = false;
3265
3266         pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
3267
3268         invlpg_counter = atomic_read(&vcpu->kvm->arch.invlpg_counter);
3269
3270         /*
3271          * Assume that the pte write on a page table of the same type
3272          * as the current vcpu paging mode since we update the sptes only
3273          * when they have the same mode.
3274          */
3275         if ((is_pae(vcpu) && bytes == 4) || !new) {
3276                 /* Handle a 32-bit guest writing two halves of a 64-bit gpte */
3277                 if (is_pae(vcpu)) {
3278                         gpa &= ~(gpa_t)7;
3279                         bytes = 8;
3280                 }
3281                 r = kvm_read_guest(vcpu->kvm, gpa, &gentry, min(bytes, 8));
3282                 if (r)
3283                         gentry = 0;
3284                 new = (const u8 *)&gentry;
3285         }
3286
3287         switch (bytes) {
3288         case 4:
3289                 gentry = *(const u32 *)new;
3290                 break;
3291         case 8:
3292                 gentry = *(const u64 *)new;
3293                 break;
3294         default:
3295                 gentry = 0;
3296                 break;
3297         }
3298
3299         mmu_guess_page_from_pte_write(vcpu, gpa, gentry);
3300         spin_lock(&vcpu->kvm->mmu_lock);
3301         if (atomic_read(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter)
3302                 gentry = 0;
3303         kvm_mmu_free_some_pages(vcpu);
3304         ++vcpu->kvm->stat.mmu_pte_write;
3305         trace_kvm_mmu_audit(vcpu, AUDIT_PRE_PTE_WRITE);
3306         if (guest_initiated) {
3307                 kvm_mmu_access_page(vcpu, gfn);
3308                 if (gfn == vcpu->arch.last_pt_write_gfn
3309                     && !last_updated_pte_accessed(vcpu)) {
3310                         ++vcpu->arch.last_pt_write_count;
3311                         if (vcpu->arch.last_pt_write_count >= 3)
3312                                 flooded = 1;
3313                 } else {
3314                         vcpu->arch.last_pt_write_gfn = gfn;
3315                         vcpu->arch.last_pt_write_count = 1;
3316                         vcpu->arch.last_pte_updated = NULL;
3317                 }
3318         }
3319
3320         mask.cr0_wp = mask.cr4_pae = mask.nxe = 1;
3321         for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn, node) {
3322                 pte_size = sp->role.cr4_pae ? 8 : 4;
3323                 misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1);
3324                 misaligned |= bytes < 4;
3325                 if (misaligned || flooded) {
3326                         /*
3327                          * Misaligned accesses are too much trouble to fix
3328                          * up; also, they usually indicate a page is not used
3329                          * as a page table.
3330                          *
3331                          * If we're seeing too many writes to a page,
3332                          * it may no longer be a page table, or we may be
3333                          * forking, in which case it is better to unmap the
3334                          * page.
3335                          */
3336                         pgprintk("misaligned: gpa %llx bytes %d role %x\n",
3337                                  gpa, bytes, sp->role.word);
3338                         zap_page |= !!kvm_mmu_prepare_zap_page(vcpu->kvm, sp,
3339                                                      &invalid_list);
3340                         ++vcpu->kvm->stat.mmu_flooded;
3341                         continue;
3342                 }
3343                 page_offset = offset;
3344                 level = sp->role.level;
3345                 npte = 1;
3346                 if (!sp->role.cr4_pae) {
3347                         page_offset <<= 1;      /* 32->64 */
3348                         /*
3349                          * A 32-bit pde maps 4MB while the shadow pdes map
3350                          * only 2MB.  So we need to double the offset again
3351                          * and zap two pdes instead of one.
3352                          */
3353                         if (level == PT32_ROOT_LEVEL) {
3354                                 page_offset &= ~7; /* kill rounding error */
3355                                 page_offset <<= 1;
3356                                 npte = 2;
3357                         }
3358                         quadrant = page_offset >> PAGE_SHIFT;
3359                         page_offset &= ~PAGE_MASK;
3360                         if (quadrant != sp->role.quadrant)
3361                                 continue;
3362                 }
3363                 local_flush = true;
3364                 spte = &sp->spt[page_offset / sizeof(*spte)];
3365                 while (npte--) {
3366                         entry = *spte;
3367                         mmu_pte_write_zap_pte(vcpu, sp, spte);
3368                         if (gentry &&
3369                               !((sp->role.word ^ vcpu->arch.mmu.base_role.word)
3370                               & mask.word))
3371                                 mmu_pte_write_new_pte(vcpu, sp, spte, &gentry);
3372                         if (!remote_flush && need_remote_flush(entry, *spte))
3373                                 remote_flush = true;
3374                         ++spte;
3375                 }
3376         }
3377         mmu_pte_write_flush_tlb(vcpu, zap_page, remote_flush, local_flush);
3378         kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
3379         trace_kvm_mmu_audit(vcpu, AUDIT_POST_PTE_WRITE);
3380         spin_unlock(&vcpu->kvm->mmu_lock);
3381         if (!is_error_pfn(vcpu->arch.update_pte.pfn)) {
3382                 kvm_release_pfn_clean(vcpu->arch.update_pte.pfn);
3383                 vcpu->arch.update_pte.pfn = bad_pfn;
3384         }
3385 }
3386
3387 int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
3388 {
3389         gpa_t gpa;
3390         int r;
3391
3392         if (vcpu->arch.mmu.direct_map)
3393                 return 0;
3394
3395         gpa = kvm_mmu_gva_to_gpa_read(vcpu, gva, NULL);
3396
3397         spin_lock(&vcpu->kvm->mmu_lock);
3398         r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);
3399         spin_unlock(&vcpu->kvm->mmu_lock);
3400         return r;
3401 }
3402 EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page_virt);
3403
3404 void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
3405 {
3406         LIST_HEAD(invalid_list);
3407
3408         while (kvm_mmu_available_pages(vcpu->kvm) < KVM_REFILL_PAGES &&
3409                !list_empty(&vcpu->kvm->arch.active_mmu_pages)) {
3410                 struct kvm_mmu_page *sp;
3411
3412                 sp = container_of(vcpu->kvm->arch.active_mmu_pages.prev,
3413                                   struct kvm_mmu_page, link);
3414                 kvm_mmu_prepare_zap_page(vcpu->kvm, sp, &invalid_list);
3415                 kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
3416                 ++vcpu->kvm->stat.mmu_recycled;
3417         }
3418 }
3419
3420 int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code,
3421                        void *insn, int insn_len)
3422 {
3423         int r;
3424         enum emulation_result er;
3425
3426         r = vcpu->arch.mmu.page_fault(vcpu, cr2, error_code, false);
3427         if (r < 0)
3428                 goto out;
3429
3430         if (!r) {
3431                 r = 1;
3432                 goto out;
3433         }
3434
3435         r = mmu_topup_memory_caches(vcpu);
3436         if (r)
3437                 goto out;
3438
3439         er = x86_emulate_instruction(vcpu, cr2, 0, insn, insn_len);
3440
3441         switch (er) {
3442         case EMULATE_DONE:
3443                 return 1;
3444         case EMULATE_DO_MMIO:
3445                 ++vcpu->stat.mmio_exits;
3446                 /* fall through */
3447         case EMULATE_FAIL:
3448                 return 0;
3449         default:
3450                 BUG();
3451         }
3452 out:
3453         return r;
3454 }
3455 EXPORT_SYMBOL_GPL(kvm_mmu_page_fault);
3456
3457 void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
3458 {
3459         vcpu->arch.mmu.invlpg(vcpu, gva);
3460         kvm_mmu_flush_tlb(vcpu);
3461         ++vcpu->stat.invlpg;
3462 }
3463 EXPORT_SYMBOL_GPL(kvm_mmu_invlpg);
3464
3465 void kvm_enable_tdp(void)
3466 {
3467         tdp_enabled = true;
3468 }
3469 EXPORT_SYMBOL_GPL(kvm_enable_tdp);
3470
3471 void kvm_disable_tdp(void)
3472 {
3473         tdp_enabled = false;
3474 }
3475 EXPORT_SYMBOL_GPL(kvm_disable_tdp);
3476
3477 static void free_mmu_pages(struct kvm_vcpu *vcpu)
3478 {
3479         free_page((unsigned long)vcpu->arch.mmu.pae_root);
3480         if (vcpu->arch.mmu.lm_root != NULL)
3481                 free_page((unsigned long)vcpu->arch.mmu.lm_root);
3482 }
3483
3484 static int alloc_mmu_pages(struct kvm_vcpu *vcpu)
3485 {
3486         struct page *page;
3487         int i;
3488
3489         ASSERT(vcpu);
3490
3491         /*
3492          * When emulating 32-bit mode, cr3 is only 32 bits even on x86_64.
3493          * Therefore we need to allocate shadow page tables in the first
3494          * 4GB of memory, which happens to fit the DMA32 zone.
3495          */
3496         page = alloc_page(GFP_KERNEL | __GFP_DMA32);
3497         if (!page)
3498                 return -ENOMEM;
3499
3500         vcpu->arch.mmu.pae_root = page_address(page);
3501         for (i = 0; i < 4; ++i)
3502                 vcpu->arch.mmu.pae_root[i] = INVALID_PAGE;
3503
3504         return 0;
3505 }
3506
3507 int kvm_mmu_create(struct kvm_vcpu *vcpu)
3508 {
3509         ASSERT(vcpu);
3510         ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
3511
3512         return alloc_mmu_pages(vcpu);
3513 }
3514
3515 int kvm_mmu_setup(struct kvm_vcpu *vcpu)
3516 {
3517         ASSERT(vcpu);
3518         ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
3519
3520         return init_kvm_mmu(vcpu);
3521 }
3522
3523 void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
3524 {
3525         struct kvm_mmu_page *sp;
3526
3527         list_for_each_entry(sp, &kvm->arch.active_mmu_pages, link) {
3528                 int i;
3529                 u64 *pt;
3530
3531                 if (!test_bit(slot, sp->slot_bitmap))
3532                         continue;
3533
3534                 pt = sp->spt;
3535                 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
3536                         if (!is_shadow_present_pte(pt[i]) ||
3537                               !is_last_spte(pt[i], sp->role.level))
3538                                 continue;
3539
3540                         if (is_large_pte(pt[i])) {
3541                                 drop_spte(kvm, &pt[i],
3542                                           shadow_trap_nonpresent_pte);
3543                                 --kvm->stat.lpages;
3544                                 continue;
3545                         }
3546
3547                         /* avoid RMW */
3548                         if (is_writable_pte(pt[i]))
3549                                 update_spte(&pt[i], pt[i] & ~PT_WRITABLE_MASK);
3550                 }
3551         }
3552         kvm_flush_remote_tlbs(kvm);
3553 }
3554
3555 void kvm_mmu_zap_all(struct kvm *kvm)
3556 {
3557         struct kvm_mmu_page *sp, *node;
3558         LIST_HEAD(invalid_list);
3559
3560         spin_lock(&kvm->mmu_lock);
3561 restart:
3562         list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link)
3563                 if (kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list))
3564                         goto restart;
3565
3566         kvm_mmu_commit_zap_page(kvm, &invalid_list);
3567         spin_unlock(&kvm->mmu_lock);
3568 }
3569
3570 static int kvm_mmu_remove_some_alloc_mmu_pages(struct kvm *kvm,
3571                                                struct list_head *invalid_list)
3572 {
3573         struct kvm_mmu_page *page;
3574
3575         page = container_of(kvm->arch.active_mmu_pages.prev,
3576                             struct kvm_mmu_page, link);
3577         return kvm_mmu_prepare_zap_page(kvm, page, invalid_list);
3578 }
3579
3580 static int mmu_shrink(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask)
3581 {
3582         struct kvm *kvm;
3583         struct kvm *kvm_freed = NULL;
3584
3585         if (nr_to_scan == 0)
3586                 goto out;
3587
3588         raw_spin_lock(&kvm_lock);
3589
3590         list_for_each_entry(kvm, &vm_list, vm_list) {
3591                 int idx, freed_pages;
3592                 LIST_HEAD(invalid_list);
3593
3594                 idx = srcu_read_lock(&kvm->srcu);
3595                 spin_lock(&kvm->mmu_lock);
3596                 if (!kvm_freed && nr_to_scan > 0 &&
3597                     kvm->arch.n_used_mmu_pages > 0) {
3598                         freed_pages = kvm_mmu_remove_some_alloc_mmu_pages(kvm,
3599                                                           &invalid_list);
3600                         kvm_freed = kvm;
3601                 }
3602                 nr_to_scan--;
3603
3604                 kvm_mmu_commit_zap_page(kvm, &invalid_list);
3605                 spin_unlock(&kvm->mmu_lock);
3606                 srcu_read_unlock(&kvm->srcu, idx);
3607         }
3608         if (kvm_freed)
3609                 list_move_tail(&kvm_freed->vm_list, &vm_list);
3610
3611         raw_spin_unlock(&kvm_lock);
3612
3613 out:
3614         return percpu_counter_read_positive(&kvm_total_used_mmu_pages);
3615 }
3616
3617 static struct shrinker mmu_shrinker = {
3618         .shrink = mmu_shrink,
3619         .seeks = DEFAULT_SEEKS * 10,
3620 };
3621
3622 static void mmu_destroy_caches(void)
3623 {
3624         if (pte_chain_cache)
3625                 kmem_cache_destroy(pte_chain_cache);
3626         if (rmap_desc_cache)
3627                 kmem_cache_destroy(rmap_desc_cache);
3628         if (mmu_page_header_cache)
3629                 kmem_cache_destroy(mmu_page_header_cache);
3630 }
3631
3632 int kvm_mmu_module_init(void)
3633 {
3634         pte_chain_cache = kmem_cache_create("kvm_pte_chain",
3635                                             sizeof(struct kvm_pte_chain),
3636                                             0, 0, NULL);
3637         if (!pte_chain_cache)
3638                 goto nomem;
3639         rmap_desc_cache = kmem_cache_create("kvm_rmap_desc",
3640                                             sizeof(struct kvm_rmap_desc),
3641                                             0, 0, NULL);
3642         if (!rmap_desc_cache)
3643                 goto nomem;
3644
3645         mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header",
3646                                                   sizeof(struct kvm_mmu_page),
3647                                                   0, 0, NULL);
3648         if (!mmu_page_header_cache)
3649                 goto nomem;
3650
3651         if (percpu_counter_init(&kvm_total_used_mmu_pages, 0))
3652                 goto nomem;
3653
3654         register_shrinker(&mmu_shrinker);
3655
3656         return 0;
3657
3658 nomem:
3659         mmu_destroy_caches();
3660         return -ENOMEM;
3661 }
3662
3663 /*
3664  * Caculate mmu pages needed for kvm.
3665  */
3666 unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm)
3667 {
3668         int i;
3669         unsigned int nr_mmu_pages;
3670         unsigned int  nr_pages = 0;
3671         struct kvm_memslots *slots;
3672
3673         slots = kvm_memslots(kvm);
3674
3675         for (i = 0; i < slots->nmemslots; i++)
3676                 nr_pages += slots->memslots[i].npages;
3677
3678         nr_mmu_pages = nr_pages * KVM_PERMILLE_MMU_PAGES / 1000;
3679         nr_mmu_pages = max(nr_mmu_pages,
3680                         (unsigned int) KVM_MIN_ALLOC_MMU_PAGES);
3681
3682         return nr_mmu_pages;
3683 }
3684
3685 static void *pv_mmu_peek_buffer(struct kvm_pv_mmu_op_buffer *buffer,
3686                                 unsigned len)
3687 {
3688         if (len > buffer->len)
3689                 return NULL;
3690         return buffer->ptr;
3691 }
3692
3693 static void *pv_mmu_read_buffer(struct kvm_pv_mmu_op_buffer *buffer,
3694                                 unsigned len)
3695 {
3696         void *ret;
3697
3698         ret = pv_mmu_peek_buffer(buffer, len);
3699         if (!ret)
3700                 return ret;
3701         buffer->ptr += len;
3702         buffer->len -= len;
3703         buffer->processed += len;
3704         return ret;
3705 }
3706
3707 static int kvm_pv_mmu_write(struct kvm_vcpu *vcpu,
3708                              gpa_t addr, gpa_t value)
3709 {
3710         int bytes = 8;
3711         int r;
3712
3713         if (!is_long_mode(vcpu) && !is_pae(vcpu))
3714                 bytes = 4;
3715
3716         r = mmu_topup_memory_caches(vcpu);
3717         if (r)
3718                 return r;
3719
3720         if (!emulator_write_phys(vcpu, addr, &value, bytes))
3721                 return -EFAULT;
3722
3723         return 1;
3724 }
3725
3726 static int kvm_pv_mmu_flush_tlb(struct kvm_vcpu *vcpu)
3727 {
3728         (void)kvm_set_cr3(vcpu, kvm_read_cr3(vcpu));
3729         return 1;
3730 }
3731
3732 static int kvm_pv_mmu_release_pt(struct kvm_vcpu *vcpu, gpa_t addr)
3733 {
3734         spin_lock(&vcpu->kvm->mmu_lock);
3735         mmu_unshadow(vcpu->kvm, addr >> PAGE_SHIFT);
3736         spin_unlock(&vcpu->kvm->mmu_lock);
3737         return 1;
3738 }
3739
3740 static int kvm_pv_mmu_op_one(struct kvm_vcpu *vcpu,
3741                              struct kvm_pv_mmu_op_buffer *buffer)
3742 {
3743         struct kvm_mmu_op_header *header;
3744
3745         header = pv_mmu_peek_buffer(buffer, sizeof *header);
3746         if (!header)
3747                 return 0;
3748         switch (header->op) {
3749         case KVM_MMU_OP_WRITE_PTE: {
3750                 struct kvm_mmu_op_write_pte *wpte;
3751
3752                 wpte = pv_mmu_read_buffer(buffer, sizeof *wpte);
3753                 if (!wpte)
3754                         return 0;
3755                 return kvm_pv_mmu_write(vcpu, wpte->pte_phys,
3756                                         wpte->pte_val);
3757         }
3758         case KVM_MMU_OP_FLUSH_TLB: {
3759                 struct kvm_mmu_op_flush_tlb *ftlb;
3760
3761                 ftlb = pv_mmu_read_buffer(buffer, sizeof *ftlb);
3762                 if (!ftlb)
3763                         return 0;
3764                 return kvm_pv_mmu_flush_tlb(vcpu);
3765         }
3766         case KVM_MMU_OP_RELEASE_PT: {
3767                 struct kvm_mmu_op_release_pt *rpt;
3768
3769                 rpt = pv_mmu_read_buffer(buffer, sizeof *rpt);
3770                 if (!rpt)
3771                         return 0;
3772                 return kvm_pv_mmu_release_pt(vcpu, rpt->pt_phys);
3773         }
3774         default: return 0;
3775         }
3776 }
3777
3778 int kvm_pv_mmu_op(struct kvm_vcpu *vcpu, unsigned long bytes,
3779                   gpa_t addr, unsigned long *ret)
3780 {
3781         int r;
3782         struct kvm_pv_mmu_op_buffer *buffer = &vcpu->arch.mmu_op_buffer;
3783
3784         buffer->ptr = buffer->buf;
3785         buffer->len = min_t(unsigned long, bytes, sizeof buffer->buf);
3786         buffer->processed = 0;
3787
3788         r = kvm_read_guest(vcpu->kvm, addr, buffer->buf, buffer->len);
3789         if (r)
3790                 goto out;
3791
3792         while (buffer->len) {
3793                 r = kvm_pv_mmu_op_one(vcpu, buffer);
3794                 if (r < 0)
3795                         goto out;
3796                 if (r == 0)
3797                         break;
3798         }
3799
3800         r = 1;
3801 out:
3802         *ret = buffer->processed;
3803         return r;
3804 }
3805
3806 int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4])
3807 {
3808         struct kvm_shadow_walk_iterator iterator;
3809         int nr_sptes = 0;
3810
3811         spin_lock(&vcpu->kvm->mmu_lock);
3812         for_each_shadow_entry(vcpu, addr, iterator) {
3813                 sptes[iterator.level-1] = *iterator.sptep;
3814                 nr_sptes++;
3815                 if (!is_shadow_present_pte(*iterator.sptep))
3816                         break;
3817         }
3818         spin_unlock(&vcpu->kvm->mmu_lock);
3819
3820         return nr_sptes;
3821 }
3822 EXPORT_SYMBOL_GPL(kvm_mmu_get_spte_hierarchy);
3823
3824 void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
3825 {
3826         ASSERT(vcpu);
3827
3828         destroy_kvm_mmu(vcpu);
3829         free_mmu_pages(vcpu);
3830         mmu_free_memory_caches(vcpu);
3831 }
3832
3833 #ifdef CONFIG_KVM_MMU_AUDIT
3834 #include "mmu_audit.c"
3835 #else
3836 static void mmu_audit_disable(void) { }
3837 #endif
3838
3839 void kvm_mmu_module_exit(void)
3840 {
3841         mmu_destroy_caches();
3842         percpu_counter_destroy(&kvm_total_used_mmu_pages);
3843         unregister_shrinker(&mmu_shrinker);
3844         mmu_audit_disable();
3845 }