treewide: remove duplicate includes
[pandora-kernel.git] / arch / x86 / kvm / mmu.c
1 /*
2  * Kernel-based Virtual Machine driver for Linux
3  *
4  * This module enables machines with Intel VT-x extensions to run virtual
5  * machines without emulation or binary translation.
6  *
7  * MMU support
8  *
9  * Copyright (C) 2006 Qumranet, Inc.
10  * Copyright 2010 Red Hat, Inc. and/or its affiliates.
11  *
12  * Authors:
13  *   Yaniv Kamay  <yaniv@qumranet.com>
14  *   Avi Kivity   <avi@qumranet.com>
15  *
16  * This work is licensed under the terms of the GNU GPL, version 2.  See
17  * the COPYING file in the top-level directory.
18  *
19  */
20
21 #include "irq.h"
22 #include "mmu.h"
23 #include "x86.h"
24 #include "kvm_cache_regs.h"
25
26 #include <linux/kvm_host.h>
27 #include <linux/types.h>
28 #include <linux/string.h>
29 #include <linux/mm.h>
30 #include <linux/highmem.h>
31 #include <linux/module.h>
32 #include <linux/swap.h>
33 #include <linux/hugetlb.h>
34 #include <linux/compiler.h>
35 #include <linux/srcu.h>
36 #include <linux/slab.h>
37 #include <linux/uaccess.h>
38
39 #include <asm/page.h>
40 #include <asm/cmpxchg.h>
41 #include <asm/io.h>
42 #include <asm/vmx.h>
43
44 /*
45  * When setting this variable to true it enables Two-Dimensional-Paging
46  * where the hardware walks 2 page tables:
47  * 1. the guest-virtual to guest-physical
48  * 2. while doing 1. it walks guest-physical to host-physical
49  * If the hardware supports that we don't need to do shadow paging.
50  */
51 bool tdp_enabled = false;
52
53 enum {
54         AUDIT_PRE_PAGE_FAULT,
55         AUDIT_POST_PAGE_FAULT,
56         AUDIT_PRE_PTE_WRITE,
57         AUDIT_POST_PTE_WRITE,
58         AUDIT_PRE_SYNC,
59         AUDIT_POST_SYNC
60 };
61
62 char *audit_point_name[] = {
63         "pre page fault",
64         "post page fault",
65         "pre pte write",
66         "post pte write",
67         "pre sync",
68         "post sync"
69 };
70
71 #undef MMU_DEBUG
72
73 #ifdef MMU_DEBUG
74
75 #define pgprintk(x...) do { if (dbg) printk(x); } while (0)
76 #define rmap_printk(x...) do { if (dbg) printk(x); } while (0)
77
78 #else
79
80 #define pgprintk(x...) do { } while (0)
81 #define rmap_printk(x...) do { } while (0)
82
83 #endif
84
85 #ifdef MMU_DEBUG
86 static int dbg = 0;
87 module_param(dbg, bool, 0644);
88 #endif
89
90 static int oos_shadow = 1;
91 module_param(oos_shadow, bool, 0644);
92
93 #ifndef MMU_DEBUG
94 #define ASSERT(x) do { } while (0)
95 #else
96 #define ASSERT(x)                                                       \
97         if (!(x)) {                                                     \
98                 printk(KERN_WARNING "assertion failed %s:%d: %s\n",     \
99                        __FILE__, __LINE__, #x);                         \
100         }
101 #endif
102
103 #define PTE_PREFETCH_NUM                8
104
105 #define PT_FIRST_AVAIL_BITS_SHIFT 9
106 #define PT64_SECOND_AVAIL_BITS_SHIFT 52
107
108 #define PT64_LEVEL_BITS 9
109
110 #define PT64_LEVEL_SHIFT(level) \
111                 (PAGE_SHIFT + (level - 1) * PT64_LEVEL_BITS)
112
113 #define PT64_INDEX(address, level)\
114         (((address) >> PT64_LEVEL_SHIFT(level)) & ((1 << PT64_LEVEL_BITS) - 1))
115
116
117 #define PT32_LEVEL_BITS 10
118
119 #define PT32_LEVEL_SHIFT(level) \
120                 (PAGE_SHIFT + (level - 1) * PT32_LEVEL_BITS)
121
122 #define PT32_LVL_OFFSET_MASK(level) \
123         (PT32_BASE_ADDR_MASK & ((1ULL << (PAGE_SHIFT + (((level) - 1) \
124                                                 * PT32_LEVEL_BITS))) - 1))
125
126 #define PT32_INDEX(address, level)\
127         (((address) >> PT32_LEVEL_SHIFT(level)) & ((1 << PT32_LEVEL_BITS) - 1))
128
129
130 #define PT64_BASE_ADDR_MASK (((1ULL << 52) - 1) & ~(u64)(PAGE_SIZE-1))
131 #define PT64_DIR_BASE_ADDR_MASK \
132         (PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + PT64_LEVEL_BITS)) - 1))
133 #define PT64_LVL_ADDR_MASK(level) \
134         (PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + (((level) - 1) \
135                                                 * PT64_LEVEL_BITS))) - 1))
136 #define PT64_LVL_OFFSET_MASK(level) \
137         (PT64_BASE_ADDR_MASK & ((1ULL << (PAGE_SHIFT + (((level) - 1) \
138                                                 * PT64_LEVEL_BITS))) - 1))
139
140 #define PT32_BASE_ADDR_MASK PAGE_MASK
141 #define PT32_DIR_BASE_ADDR_MASK \
142         (PAGE_MASK & ~((1ULL << (PAGE_SHIFT + PT32_LEVEL_BITS)) - 1))
143 #define PT32_LVL_ADDR_MASK(level) \
144         (PAGE_MASK & ~((1ULL << (PAGE_SHIFT + (((level) - 1) \
145                                             * PT32_LEVEL_BITS))) - 1))
146
147 #define PT64_PERM_MASK (PT_PRESENT_MASK | PT_WRITABLE_MASK | PT_USER_MASK \
148                         | PT64_NX_MASK)
149
150 #define RMAP_EXT 4
151
152 #define ACC_EXEC_MASK    1
153 #define ACC_WRITE_MASK   PT_WRITABLE_MASK
154 #define ACC_USER_MASK    PT_USER_MASK
155 #define ACC_ALL          (ACC_EXEC_MASK | ACC_WRITE_MASK | ACC_USER_MASK)
156
157 #include <trace/events/kvm.h>
158
159 #define CREATE_TRACE_POINTS
160 #include "mmutrace.h"
161
162 #define SPTE_HOST_WRITEABLE (1ULL << PT_FIRST_AVAIL_BITS_SHIFT)
163
164 #define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)
165
166 struct kvm_rmap_desc {
167         u64 *sptes[RMAP_EXT];
168         struct kvm_rmap_desc *more;
169 };
170
171 struct kvm_shadow_walk_iterator {
172         u64 addr;
173         hpa_t shadow_addr;
174         int level;
175         u64 *sptep;
176         unsigned index;
177 };
178
179 #define for_each_shadow_entry(_vcpu, _addr, _walker)    \
180         for (shadow_walk_init(&(_walker), _vcpu, _addr);        \
181              shadow_walk_okay(&(_walker));                      \
182              shadow_walk_next(&(_walker)))
183
184 typedef void (*mmu_parent_walk_fn) (struct kvm_mmu_page *sp, u64 *spte);
185
186 static struct kmem_cache *pte_chain_cache;
187 static struct kmem_cache *rmap_desc_cache;
188 static struct kmem_cache *mmu_page_header_cache;
189 static struct percpu_counter kvm_total_used_mmu_pages;
190
191 static u64 __read_mostly shadow_trap_nonpresent_pte;
192 static u64 __read_mostly shadow_notrap_nonpresent_pte;
193 static u64 __read_mostly shadow_nx_mask;
194 static u64 __read_mostly shadow_x_mask; /* mutual exclusive with nx_mask */
195 static u64 __read_mostly shadow_user_mask;
196 static u64 __read_mostly shadow_accessed_mask;
197 static u64 __read_mostly shadow_dirty_mask;
198
199 static inline u64 rsvd_bits(int s, int e)
200 {
201         return ((1ULL << (e - s + 1)) - 1) << s;
202 }
203
204 void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte)
205 {
206         shadow_trap_nonpresent_pte = trap_pte;
207         shadow_notrap_nonpresent_pte = notrap_pte;
208 }
209 EXPORT_SYMBOL_GPL(kvm_mmu_set_nonpresent_ptes);
210
211 void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
212                 u64 dirty_mask, u64 nx_mask, u64 x_mask)
213 {
214         shadow_user_mask = user_mask;
215         shadow_accessed_mask = accessed_mask;
216         shadow_dirty_mask = dirty_mask;
217         shadow_nx_mask = nx_mask;
218         shadow_x_mask = x_mask;
219 }
220 EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes);
221
222 static bool is_write_protection(struct kvm_vcpu *vcpu)
223 {
224         return kvm_read_cr0_bits(vcpu, X86_CR0_WP);
225 }
226
227 static int is_cpuid_PSE36(void)
228 {
229         return 1;
230 }
231
232 static int is_nx(struct kvm_vcpu *vcpu)
233 {
234         return vcpu->arch.efer & EFER_NX;
235 }
236
237 static int is_shadow_present_pte(u64 pte)
238 {
239         return pte != shadow_trap_nonpresent_pte
240                 && pte != shadow_notrap_nonpresent_pte;
241 }
242
243 static int is_large_pte(u64 pte)
244 {
245         return pte & PT_PAGE_SIZE_MASK;
246 }
247
248 static int is_writable_pte(unsigned long pte)
249 {
250         return pte & PT_WRITABLE_MASK;
251 }
252
253 static int is_dirty_gpte(unsigned long pte)
254 {
255         return pte & PT_DIRTY_MASK;
256 }
257
258 static int is_rmap_spte(u64 pte)
259 {
260         return is_shadow_present_pte(pte);
261 }
262
263 static int is_last_spte(u64 pte, int level)
264 {
265         if (level == PT_PAGE_TABLE_LEVEL)
266                 return 1;
267         if (is_large_pte(pte))
268                 return 1;
269         return 0;
270 }
271
272 static pfn_t spte_to_pfn(u64 pte)
273 {
274         return (pte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
275 }
276
277 static gfn_t pse36_gfn_delta(u32 gpte)
278 {
279         int shift = 32 - PT32_DIR_PSE36_SHIFT - PAGE_SHIFT;
280
281         return (gpte & PT32_DIR_PSE36_MASK) << shift;
282 }
283
284 static void __set_spte(u64 *sptep, u64 spte)
285 {
286         set_64bit(sptep, spte);
287 }
288
289 static u64 __xchg_spte(u64 *sptep, u64 new_spte)
290 {
291 #ifdef CONFIG_X86_64
292         return xchg(sptep, new_spte);
293 #else
294         u64 old_spte;
295
296         do {
297                 old_spte = *sptep;
298         } while (cmpxchg64(sptep, old_spte, new_spte) != old_spte);
299
300         return old_spte;
301 #endif
302 }
303
304 static bool spte_has_volatile_bits(u64 spte)
305 {
306         if (!shadow_accessed_mask)
307                 return false;
308
309         if (!is_shadow_present_pte(spte))
310                 return false;
311
312         if ((spte & shadow_accessed_mask) &&
313               (!is_writable_pte(spte) || (spte & shadow_dirty_mask)))
314                 return false;
315
316         return true;
317 }
318
319 static bool spte_is_bit_cleared(u64 old_spte, u64 new_spte, u64 bit_mask)
320 {
321         return (old_spte & bit_mask) && !(new_spte & bit_mask);
322 }
323
324 static void update_spte(u64 *sptep, u64 new_spte)
325 {
326         u64 mask, old_spte = *sptep;
327
328         WARN_ON(!is_rmap_spte(new_spte));
329
330         new_spte |= old_spte & shadow_dirty_mask;
331
332         mask = shadow_accessed_mask;
333         if (is_writable_pte(old_spte))
334                 mask |= shadow_dirty_mask;
335
336         if (!spte_has_volatile_bits(old_spte) || (new_spte & mask) == mask)
337                 __set_spte(sptep, new_spte);
338         else
339                 old_spte = __xchg_spte(sptep, new_spte);
340
341         if (!shadow_accessed_mask)
342                 return;
343
344         if (spte_is_bit_cleared(old_spte, new_spte, shadow_accessed_mask))
345                 kvm_set_pfn_accessed(spte_to_pfn(old_spte));
346         if (spte_is_bit_cleared(old_spte, new_spte, shadow_dirty_mask))
347                 kvm_set_pfn_dirty(spte_to_pfn(old_spte));
348 }
349
350 static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
351                                   struct kmem_cache *base_cache, int min)
352 {
353         void *obj;
354
355         if (cache->nobjs >= min)
356                 return 0;
357         while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
358                 obj = kmem_cache_zalloc(base_cache, GFP_KERNEL);
359                 if (!obj)
360                         return -ENOMEM;
361                 cache->objects[cache->nobjs++] = obj;
362         }
363         return 0;
364 }
365
366 static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc,
367                                   struct kmem_cache *cache)
368 {
369         while (mc->nobjs)
370                 kmem_cache_free(cache, mc->objects[--mc->nobjs]);
371 }
372
373 static int mmu_topup_memory_cache_page(struct kvm_mmu_memory_cache *cache,
374                                        int min)
375 {
376         void *page;
377
378         if (cache->nobjs >= min)
379                 return 0;
380         while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
381                 page = (void *)__get_free_page(GFP_KERNEL);
382                 if (!page)
383                         return -ENOMEM;
384                 cache->objects[cache->nobjs++] = page;
385         }
386         return 0;
387 }
388
389 static void mmu_free_memory_cache_page(struct kvm_mmu_memory_cache *mc)
390 {
391         while (mc->nobjs)
392                 free_page((unsigned long)mc->objects[--mc->nobjs]);
393 }
394
395 static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu)
396 {
397         int r;
398
399         r = mmu_topup_memory_cache(&vcpu->arch.mmu_pte_chain_cache,
400                                    pte_chain_cache, 4);
401         if (r)
402                 goto out;
403         r = mmu_topup_memory_cache(&vcpu->arch.mmu_rmap_desc_cache,
404                                    rmap_desc_cache, 4 + PTE_PREFETCH_NUM);
405         if (r)
406                 goto out;
407         r = mmu_topup_memory_cache_page(&vcpu->arch.mmu_page_cache, 8);
408         if (r)
409                 goto out;
410         r = mmu_topup_memory_cache(&vcpu->arch.mmu_page_header_cache,
411                                    mmu_page_header_cache, 4);
412 out:
413         return r;
414 }
415
416 static void mmu_free_memory_caches(struct kvm_vcpu *vcpu)
417 {
418         mmu_free_memory_cache(&vcpu->arch.mmu_pte_chain_cache, pte_chain_cache);
419         mmu_free_memory_cache(&vcpu->arch.mmu_rmap_desc_cache, rmap_desc_cache);
420         mmu_free_memory_cache_page(&vcpu->arch.mmu_page_cache);
421         mmu_free_memory_cache(&vcpu->arch.mmu_page_header_cache,
422                                 mmu_page_header_cache);
423 }
424
425 static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc,
426                                     size_t size)
427 {
428         void *p;
429
430         BUG_ON(!mc->nobjs);
431         p = mc->objects[--mc->nobjs];
432         return p;
433 }
434
435 static struct kvm_pte_chain *mmu_alloc_pte_chain(struct kvm_vcpu *vcpu)
436 {
437         return mmu_memory_cache_alloc(&vcpu->arch.mmu_pte_chain_cache,
438                                       sizeof(struct kvm_pte_chain));
439 }
440
441 static void mmu_free_pte_chain(struct kvm_pte_chain *pc)
442 {
443         kmem_cache_free(pte_chain_cache, pc);
444 }
445
446 static struct kvm_rmap_desc *mmu_alloc_rmap_desc(struct kvm_vcpu *vcpu)
447 {
448         return mmu_memory_cache_alloc(&vcpu->arch.mmu_rmap_desc_cache,
449                                       sizeof(struct kvm_rmap_desc));
450 }
451
452 static void mmu_free_rmap_desc(struct kvm_rmap_desc *rd)
453 {
454         kmem_cache_free(rmap_desc_cache, rd);
455 }
456
457 static gfn_t kvm_mmu_page_get_gfn(struct kvm_mmu_page *sp, int index)
458 {
459         if (!sp->role.direct)
460                 return sp->gfns[index];
461
462         return sp->gfn + (index << ((sp->role.level - 1) * PT64_LEVEL_BITS));
463 }
464
465 static void kvm_mmu_page_set_gfn(struct kvm_mmu_page *sp, int index, gfn_t gfn)
466 {
467         if (sp->role.direct)
468                 BUG_ON(gfn != kvm_mmu_page_get_gfn(sp, index));
469         else
470                 sp->gfns[index] = gfn;
471 }
472
473 /*
474  * Return the pointer to the large page information for a given gfn,
475  * handling slots that are not large page aligned.
476  */
477 static struct kvm_lpage_info *lpage_info_slot(gfn_t gfn,
478                                               struct kvm_memory_slot *slot,
479                                               int level)
480 {
481         unsigned long idx;
482
483         idx = (gfn >> KVM_HPAGE_GFN_SHIFT(level)) -
484               (slot->base_gfn >> KVM_HPAGE_GFN_SHIFT(level));
485         return &slot->lpage_info[level - 2][idx];
486 }
487
488 static void account_shadowed(struct kvm *kvm, gfn_t gfn)
489 {
490         struct kvm_memory_slot *slot;
491         struct kvm_lpage_info *linfo;
492         int i;
493
494         slot = gfn_to_memslot(kvm, gfn);
495         for (i = PT_DIRECTORY_LEVEL;
496              i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) {
497                 linfo = lpage_info_slot(gfn, slot, i);
498                 linfo->write_count += 1;
499         }
500 }
501
502 static void unaccount_shadowed(struct kvm *kvm, gfn_t gfn)
503 {
504         struct kvm_memory_slot *slot;
505         struct kvm_lpage_info *linfo;
506         int i;
507
508         slot = gfn_to_memslot(kvm, gfn);
509         for (i = PT_DIRECTORY_LEVEL;
510              i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) {
511                 linfo = lpage_info_slot(gfn, slot, i);
512                 linfo->write_count -= 1;
513                 WARN_ON(linfo->write_count < 0);
514         }
515 }
516
517 static int has_wrprotected_page(struct kvm *kvm,
518                                 gfn_t gfn,
519                                 int level)
520 {
521         struct kvm_memory_slot *slot;
522         struct kvm_lpage_info *linfo;
523
524         slot = gfn_to_memslot(kvm, gfn);
525         if (slot) {
526                 linfo = lpage_info_slot(gfn, slot, level);
527                 return linfo->write_count;
528         }
529
530         return 1;
531 }
532
533 static int host_mapping_level(struct kvm *kvm, gfn_t gfn)
534 {
535         unsigned long page_size;
536         int i, ret = 0;
537
538         page_size = kvm_host_page_size(kvm, gfn);
539
540         for (i = PT_PAGE_TABLE_LEVEL;
541              i < (PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES); ++i) {
542                 if (page_size >= KVM_HPAGE_SIZE(i))
543                         ret = i;
544                 else
545                         break;
546         }
547
548         return ret;
549 }
550
551 static struct kvm_memory_slot *
552 gfn_to_memslot_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t gfn,
553                             bool no_dirty_log)
554 {
555         struct kvm_memory_slot *slot;
556
557         slot = gfn_to_memslot(vcpu->kvm, gfn);
558         if (!slot || slot->flags & KVM_MEMSLOT_INVALID ||
559               (no_dirty_log && slot->dirty_bitmap))
560                 slot = NULL;
561
562         return slot;
563 }
564
565 static bool mapping_level_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t large_gfn)
566 {
567         return gfn_to_memslot_dirty_bitmap(vcpu, large_gfn, true);
568 }
569
570 static int mapping_level(struct kvm_vcpu *vcpu, gfn_t large_gfn)
571 {
572         int host_level, level, max_level;
573
574         host_level = host_mapping_level(vcpu->kvm, large_gfn);
575
576         if (host_level == PT_PAGE_TABLE_LEVEL)
577                 return host_level;
578
579         max_level = kvm_x86_ops->get_lpage_level() < host_level ?
580                 kvm_x86_ops->get_lpage_level() : host_level;
581
582         for (level = PT_DIRECTORY_LEVEL; level <= max_level; ++level)
583                 if (has_wrprotected_page(vcpu->kvm, large_gfn, level))
584                         break;
585
586         return level - 1;
587 }
588
589 /*
590  * Take gfn and return the reverse mapping to it.
591  */
592
593 static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn, int level)
594 {
595         struct kvm_memory_slot *slot;
596         struct kvm_lpage_info *linfo;
597
598         slot = gfn_to_memslot(kvm, gfn);
599         if (likely(level == PT_PAGE_TABLE_LEVEL))
600                 return &slot->rmap[gfn - slot->base_gfn];
601
602         linfo = lpage_info_slot(gfn, slot, level);
603
604         return &linfo->rmap_pde;
605 }
606
607 /*
608  * Reverse mapping data structures:
609  *
610  * If rmapp bit zero is zero, then rmapp point to the shadw page table entry
611  * that points to page_address(page).
612  *
613  * If rmapp bit zero is one, (then rmap & ~1) points to a struct kvm_rmap_desc
614  * containing more mappings.
615  *
616  * Returns the number of rmap entries before the spte was added or zero if
617  * the spte was not added.
618  *
619  */
620 static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
621 {
622         struct kvm_mmu_page *sp;
623         struct kvm_rmap_desc *desc;
624         unsigned long *rmapp;
625         int i, count = 0;
626
627         if (!is_rmap_spte(*spte))
628                 return count;
629         sp = page_header(__pa(spte));
630         kvm_mmu_page_set_gfn(sp, spte - sp->spt, gfn);
631         rmapp = gfn_to_rmap(vcpu->kvm, gfn, sp->role.level);
632         if (!*rmapp) {
633                 rmap_printk("rmap_add: %p %llx 0->1\n", spte, *spte);
634                 *rmapp = (unsigned long)spte;
635         } else if (!(*rmapp & 1)) {
636                 rmap_printk("rmap_add: %p %llx 1->many\n", spte, *spte);
637                 desc = mmu_alloc_rmap_desc(vcpu);
638                 desc->sptes[0] = (u64 *)*rmapp;
639                 desc->sptes[1] = spte;
640                 *rmapp = (unsigned long)desc | 1;
641                 ++count;
642         } else {
643                 rmap_printk("rmap_add: %p %llx many->many\n", spte, *spte);
644                 desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
645                 while (desc->sptes[RMAP_EXT-1] && desc->more) {
646                         desc = desc->more;
647                         count += RMAP_EXT;
648                 }
649                 if (desc->sptes[RMAP_EXT-1]) {
650                         desc->more = mmu_alloc_rmap_desc(vcpu);
651                         desc = desc->more;
652                 }
653                 for (i = 0; desc->sptes[i]; ++i)
654                         ++count;
655                 desc->sptes[i] = spte;
656         }
657         return count;
658 }
659
660 static void rmap_desc_remove_entry(unsigned long *rmapp,
661                                    struct kvm_rmap_desc *desc,
662                                    int i,
663                                    struct kvm_rmap_desc *prev_desc)
664 {
665         int j;
666
667         for (j = RMAP_EXT - 1; !desc->sptes[j] && j > i; --j)
668                 ;
669         desc->sptes[i] = desc->sptes[j];
670         desc->sptes[j] = NULL;
671         if (j != 0)
672                 return;
673         if (!prev_desc && !desc->more)
674                 *rmapp = (unsigned long)desc->sptes[0];
675         else
676                 if (prev_desc)
677                         prev_desc->more = desc->more;
678                 else
679                         *rmapp = (unsigned long)desc->more | 1;
680         mmu_free_rmap_desc(desc);
681 }
682
683 static void rmap_remove(struct kvm *kvm, u64 *spte)
684 {
685         struct kvm_rmap_desc *desc;
686         struct kvm_rmap_desc *prev_desc;
687         struct kvm_mmu_page *sp;
688         gfn_t gfn;
689         unsigned long *rmapp;
690         int i;
691
692         sp = page_header(__pa(spte));
693         gfn = kvm_mmu_page_get_gfn(sp, spte - sp->spt);
694         rmapp = gfn_to_rmap(kvm, gfn, sp->role.level);
695         if (!*rmapp) {
696                 printk(KERN_ERR "rmap_remove: %p 0->BUG\n", spte);
697                 BUG();
698         } else if (!(*rmapp & 1)) {
699                 rmap_printk("rmap_remove:  %p 1->0\n", spte);
700                 if ((u64 *)*rmapp != spte) {
701                         printk(KERN_ERR "rmap_remove:  %p 1->BUG\n", spte);
702                         BUG();
703                 }
704                 *rmapp = 0;
705         } else {
706                 rmap_printk("rmap_remove:  %p many->many\n", spte);
707                 desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
708                 prev_desc = NULL;
709                 while (desc) {
710                         for (i = 0; i < RMAP_EXT && desc->sptes[i]; ++i)
711                                 if (desc->sptes[i] == spte) {
712                                         rmap_desc_remove_entry(rmapp,
713                                                                desc, i,
714                                                                prev_desc);
715                                         return;
716                                 }
717                         prev_desc = desc;
718                         desc = desc->more;
719                 }
720                 pr_err("rmap_remove: %p many->many\n", spte);
721                 BUG();
722         }
723 }
724
725 static int set_spte_track_bits(u64 *sptep, u64 new_spte)
726 {
727         pfn_t pfn;
728         u64 old_spte = *sptep;
729
730         if (!spte_has_volatile_bits(old_spte))
731                 __set_spte(sptep, new_spte);
732         else
733                 old_spte = __xchg_spte(sptep, new_spte);
734
735         if (!is_rmap_spte(old_spte))
736                 return 0;
737
738         pfn = spte_to_pfn(old_spte);
739         if (!shadow_accessed_mask || old_spte & shadow_accessed_mask)
740                 kvm_set_pfn_accessed(pfn);
741         if (!shadow_dirty_mask || (old_spte & shadow_dirty_mask))
742                 kvm_set_pfn_dirty(pfn);
743         return 1;
744 }
745
746 static void drop_spte(struct kvm *kvm, u64 *sptep, u64 new_spte)
747 {
748         if (set_spte_track_bits(sptep, new_spte))
749                 rmap_remove(kvm, sptep);
750 }
751
752 static u64 *rmap_next(struct kvm *kvm, unsigned long *rmapp, u64 *spte)
753 {
754         struct kvm_rmap_desc *desc;
755         u64 *prev_spte;
756         int i;
757
758         if (!*rmapp)
759                 return NULL;
760         else if (!(*rmapp & 1)) {
761                 if (!spte)
762                         return (u64 *)*rmapp;
763                 return NULL;
764         }
765         desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
766         prev_spte = NULL;
767         while (desc) {
768                 for (i = 0; i < RMAP_EXT && desc->sptes[i]; ++i) {
769                         if (prev_spte == spte)
770                                 return desc->sptes[i];
771                         prev_spte = desc->sptes[i];
772                 }
773                 desc = desc->more;
774         }
775         return NULL;
776 }
777
778 static int rmap_write_protect(struct kvm *kvm, u64 gfn)
779 {
780         unsigned long *rmapp;
781         u64 *spte;
782         int i, write_protected = 0;
783
784         rmapp = gfn_to_rmap(kvm, gfn, PT_PAGE_TABLE_LEVEL);
785
786         spte = rmap_next(kvm, rmapp, NULL);
787         while (spte) {
788                 BUG_ON(!spte);
789                 BUG_ON(!(*spte & PT_PRESENT_MASK));
790                 rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte);
791                 if (is_writable_pte(*spte)) {
792                         update_spte(spte, *spte & ~PT_WRITABLE_MASK);
793                         write_protected = 1;
794                 }
795                 spte = rmap_next(kvm, rmapp, spte);
796         }
797
798         /* check for huge page mappings */
799         for (i = PT_DIRECTORY_LEVEL;
800              i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) {
801                 rmapp = gfn_to_rmap(kvm, gfn, i);
802                 spte = rmap_next(kvm, rmapp, NULL);
803                 while (spte) {
804                         BUG_ON(!spte);
805                         BUG_ON(!(*spte & PT_PRESENT_MASK));
806                         BUG_ON((*spte & (PT_PAGE_SIZE_MASK|PT_PRESENT_MASK)) != (PT_PAGE_SIZE_MASK|PT_PRESENT_MASK));
807                         pgprintk("rmap_write_protect(large): spte %p %llx %lld\n", spte, *spte, gfn);
808                         if (is_writable_pte(*spte)) {
809                                 drop_spte(kvm, spte,
810                                           shadow_trap_nonpresent_pte);
811                                 --kvm->stat.lpages;
812                                 spte = NULL;
813                                 write_protected = 1;
814                         }
815                         spte = rmap_next(kvm, rmapp, spte);
816                 }
817         }
818
819         return write_protected;
820 }
821
822 static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp,
823                            unsigned long data)
824 {
825         u64 *spte;
826         int need_tlb_flush = 0;
827
828         while ((spte = rmap_next(kvm, rmapp, NULL))) {
829                 BUG_ON(!(*spte & PT_PRESENT_MASK));
830                 rmap_printk("kvm_rmap_unmap_hva: spte %p %llx\n", spte, *spte);
831                 drop_spte(kvm, spte, shadow_trap_nonpresent_pte);
832                 need_tlb_flush = 1;
833         }
834         return need_tlb_flush;
835 }
836
837 static int kvm_set_pte_rmapp(struct kvm *kvm, unsigned long *rmapp,
838                              unsigned long data)
839 {
840         int need_flush = 0;
841         u64 *spte, new_spte;
842         pte_t *ptep = (pte_t *)data;
843         pfn_t new_pfn;
844
845         WARN_ON(pte_huge(*ptep));
846         new_pfn = pte_pfn(*ptep);
847         spte = rmap_next(kvm, rmapp, NULL);
848         while (spte) {
849                 BUG_ON(!is_shadow_present_pte(*spte));
850                 rmap_printk("kvm_set_pte_rmapp: spte %p %llx\n", spte, *spte);
851                 need_flush = 1;
852                 if (pte_write(*ptep)) {
853                         drop_spte(kvm, spte, shadow_trap_nonpresent_pte);
854                         spte = rmap_next(kvm, rmapp, NULL);
855                 } else {
856                         new_spte = *spte &~ (PT64_BASE_ADDR_MASK);
857                         new_spte |= (u64)new_pfn << PAGE_SHIFT;
858
859                         new_spte &= ~PT_WRITABLE_MASK;
860                         new_spte &= ~SPTE_HOST_WRITEABLE;
861                         new_spte &= ~shadow_accessed_mask;
862                         set_spte_track_bits(spte, new_spte);
863                         spte = rmap_next(kvm, rmapp, spte);
864                 }
865         }
866         if (need_flush)
867                 kvm_flush_remote_tlbs(kvm);
868
869         return 0;
870 }
871
872 static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
873                           unsigned long data,
874                           int (*handler)(struct kvm *kvm, unsigned long *rmapp,
875                                          unsigned long data))
876 {
877         int i, j;
878         int ret;
879         int retval = 0;
880         struct kvm_memslots *slots;
881
882         slots = kvm_memslots(kvm);
883
884         for (i = 0; i < slots->nmemslots; i++) {
885                 struct kvm_memory_slot *memslot = &slots->memslots[i];
886                 unsigned long start = memslot->userspace_addr;
887                 unsigned long end;
888
889                 end = start + (memslot->npages << PAGE_SHIFT);
890                 if (hva >= start && hva < end) {
891                         gfn_t gfn_offset = (hva - start) >> PAGE_SHIFT;
892                         gfn_t gfn = memslot->base_gfn + gfn_offset;
893
894                         ret = handler(kvm, &memslot->rmap[gfn_offset], data);
895
896                         for (j = 0; j < KVM_NR_PAGE_SIZES - 1; ++j) {
897                                 struct kvm_lpage_info *linfo;
898
899                                 linfo = lpage_info_slot(gfn, memslot,
900                                                         PT_DIRECTORY_LEVEL + j);
901                                 ret |= handler(kvm, &linfo->rmap_pde, data);
902                         }
903                         trace_kvm_age_page(hva, memslot, ret);
904                         retval |= ret;
905                 }
906         }
907
908         return retval;
909 }
910
911 int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
912 {
913         return kvm_handle_hva(kvm, hva, 0, kvm_unmap_rmapp);
914 }
915
916 void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
917 {
918         kvm_handle_hva(kvm, hva, (unsigned long)&pte, kvm_set_pte_rmapp);
919 }
920
921 static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
922                          unsigned long data)
923 {
924         u64 *spte;
925         int young = 0;
926
927         /*
928          * Emulate the accessed bit for EPT, by checking if this page has
929          * an EPT mapping, and clearing it if it does. On the next access,
930          * a new EPT mapping will be established.
931          * This has some overhead, but not as much as the cost of swapping
932          * out actively used pages or breaking up actively used hugepages.
933          */
934         if (!shadow_accessed_mask)
935                 return kvm_unmap_rmapp(kvm, rmapp, data);
936
937         spte = rmap_next(kvm, rmapp, NULL);
938         while (spte) {
939                 int _young;
940                 u64 _spte = *spte;
941                 BUG_ON(!(_spte & PT_PRESENT_MASK));
942                 _young = _spte & PT_ACCESSED_MASK;
943                 if (_young) {
944                         young = 1;
945                         clear_bit(PT_ACCESSED_SHIFT, (unsigned long *)spte);
946                 }
947                 spte = rmap_next(kvm, rmapp, spte);
948         }
949         return young;
950 }
951
952 static int kvm_test_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
953                               unsigned long data)
954 {
955         u64 *spte;
956         int young = 0;
957
958         /*
959          * If there's no access bit in the secondary pte set by the
960          * hardware it's up to gup-fast/gup to set the access bit in
961          * the primary pte or in the page structure.
962          */
963         if (!shadow_accessed_mask)
964                 goto out;
965
966         spte = rmap_next(kvm, rmapp, NULL);
967         while (spte) {
968                 u64 _spte = *spte;
969                 BUG_ON(!(_spte & PT_PRESENT_MASK));
970                 young = _spte & PT_ACCESSED_MASK;
971                 if (young) {
972                         young = 1;
973                         break;
974                 }
975                 spte = rmap_next(kvm, rmapp, spte);
976         }
977 out:
978         return young;
979 }
980
981 #define RMAP_RECYCLE_THRESHOLD 1000
982
983 static void rmap_recycle(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
984 {
985         unsigned long *rmapp;
986         struct kvm_mmu_page *sp;
987
988         sp = page_header(__pa(spte));
989
990         rmapp = gfn_to_rmap(vcpu->kvm, gfn, sp->role.level);
991
992         kvm_unmap_rmapp(vcpu->kvm, rmapp, 0);
993         kvm_flush_remote_tlbs(vcpu->kvm);
994 }
995
996 int kvm_age_hva(struct kvm *kvm, unsigned long hva)
997 {
998         return kvm_handle_hva(kvm, hva, 0, kvm_age_rmapp);
999 }
1000
1001 int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
1002 {
1003         return kvm_handle_hva(kvm, hva, 0, kvm_test_age_rmapp);
1004 }
1005
1006 #ifdef MMU_DEBUG
1007 static int is_empty_shadow_page(u64 *spt)
1008 {
1009         u64 *pos;
1010         u64 *end;
1011
1012         for (pos = spt, end = pos + PAGE_SIZE / sizeof(u64); pos != end; pos++)
1013                 if (is_shadow_present_pte(*pos)) {
1014                         printk(KERN_ERR "%s: %p %llx\n", __func__,
1015                                pos, *pos);
1016                         return 0;
1017                 }
1018         return 1;
1019 }
1020 #endif
1021
1022 /*
1023  * This value is the sum of all of the kvm instances's
1024  * kvm->arch.n_used_mmu_pages values.  We need a global,
1025  * aggregate version in order to make the slab shrinker
1026  * faster
1027  */
1028 static inline void kvm_mod_used_mmu_pages(struct kvm *kvm, int nr)
1029 {
1030         kvm->arch.n_used_mmu_pages += nr;
1031         percpu_counter_add(&kvm_total_used_mmu_pages, nr);
1032 }
1033
1034 static void kvm_mmu_free_page(struct kvm *kvm, struct kvm_mmu_page *sp)
1035 {
1036         ASSERT(is_empty_shadow_page(sp->spt));
1037         hlist_del(&sp->hash_link);
1038         list_del(&sp->link);
1039         free_page((unsigned long)sp->spt);
1040         if (!sp->role.direct)
1041                 free_page((unsigned long)sp->gfns);
1042         kmem_cache_free(mmu_page_header_cache, sp);
1043         kvm_mod_used_mmu_pages(kvm, -1);
1044 }
1045
1046 static unsigned kvm_page_table_hashfn(gfn_t gfn)
1047 {
1048         return gfn & ((1 << KVM_MMU_HASH_SHIFT) - 1);
1049 }
1050
1051 static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu,
1052                                                u64 *parent_pte, int direct)
1053 {
1054         struct kvm_mmu_page *sp;
1055
1056         sp = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache, sizeof *sp);
1057         sp->spt = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache, PAGE_SIZE);
1058         if (!direct)
1059                 sp->gfns = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache,
1060                                                   PAGE_SIZE);
1061         set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
1062         list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages);
1063         bitmap_zero(sp->slot_bitmap, KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS);
1064         sp->multimapped = 0;
1065         sp->parent_pte = parent_pte;
1066         kvm_mod_used_mmu_pages(vcpu->kvm, +1);
1067         return sp;
1068 }
1069
1070 static void mmu_page_add_parent_pte(struct kvm_vcpu *vcpu,
1071                                     struct kvm_mmu_page *sp, u64 *parent_pte)
1072 {
1073         struct kvm_pte_chain *pte_chain;
1074         struct hlist_node *node;
1075         int i;
1076
1077         if (!parent_pte)
1078                 return;
1079         if (!sp->multimapped) {
1080                 u64 *old = sp->parent_pte;
1081
1082                 if (!old) {
1083                         sp->parent_pte = parent_pte;
1084                         return;
1085                 }
1086                 sp->multimapped = 1;
1087                 pte_chain = mmu_alloc_pte_chain(vcpu);
1088                 INIT_HLIST_HEAD(&sp->parent_ptes);
1089                 hlist_add_head(&pte_chain->link, &sp->parent_ptes);
1090                 pte_chain->parent_ptes[0] = old;
1091         }
1092         hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link) {
1093                 if (pte_chain->parent_ptes[NR_PTE_CHAIN_ENTRIES-1])
1094                         continue;
1095                 for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i)
1096                         if (!pte_chain->parent_ptes[i]) {
1097                                 pte_chain->parent_ptes[i] = parent_pte;
1098                                 return;
1099                         }
1100         }
1101         pte_chain = mmu_alloc_pte_chain(vcpu);
1102         BUG_ON(!pte_chain);
1103         hlist_add_head(&pte_chain->link, &sp->parent_ptes);
1104         pte_chain->parent_ptes[0] = parent_pte;
1105 }
1106
1107 static void mmu_page_remove_parent_pte(struct kvm_mmu_page *sp,
1108                                        u64 *parent_pte)
1109 {
1110         struct kvm_pte_chain *pte_chain;
1111         struct hlist_node *node;
1112         int i;
1113
1114         if (!sp->multimapped) {
1115                 BUG_ON(sp->parent_pte != parent_pte);
1116                 sp->parent_pte = NULL;
1117                 return;
1118         }
1119         hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link)
1120                 for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i) {
1121                         if (!pte_chain->parent_ptes[i])
1122                                 break;
1123                         if (pte_chain->parent_ptes[i] != parent_pte)
1124                                 continue;
1125                         while (i + 1 < NR_PTE_CHAIN_ENTRIES
1126                                 && pte_chain->parent_ptes[i + 1]) {
1127                                 pte_chain->parent_ptes[i]
1128                                         = pte_chain->parent_ptes[i + 1];
1129                                 ++i;
1130                         }
1131                         pte_chain->parent_ptes[i] = NULL;
1132                         if (i == 0) {
1133                                 hlist_del(&pte_chain->link);
1134                                 mmu_free_pte_chain(pte_chain);
1135                                 if (hlist_empty(&sp->parent_ptes)) {
1136                                         sp->multimapped = 0;
1137                                         sp->parent_pte = NULL;
1138                                 }
1139                         }
1140                         return;
1141                 }
1142         BUG();
1143 }
1144
1145 static void mmu_parent_walk(struct kvm_mmu_page *sp, mmu_parent_walk_fn fn)
1146 {
1147         struct kvm_pte_chain *pte_chain;
1148         struct hlist_node *node;
1149         struct kvm_mmu_page *parent_sp;
1150         int i;
1151
1152         if (!sp->multimapped && sp->parent_pte) {
1153                 parent_sp = page_header(__pa(sp->parent_pte));
1154                 fn(parent_sp, sp->parent_pte);
1155                 return;
1156         }
1157
1158         hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link)
1159                 for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i) {
1160                         u64 *spte = pte_chain->parent_ptes[i];
1161
1162                         if (!spte)
1163                                 break;
1164                         parent_sp = page_header(__pa(spte));
1165                         fn(parent_sp, spte);
1166                 }
1167 }
1168
1169 static void mark_unsync(struct kvm_mmu_page *sp, u64 *spte);
1170 static void kvm_mmu_mark_parents_unsync(struct kvm_mmu_page *sp)
1171 {
1172         mmu_parent_walk(sp, mark_unsync);
1173 }
1174
1175 static void mark_unsync(struct kvm_mmu_page *sp, u64 *spte)
1176 {
1177         unsigned int index;
1178
1179         index = spte - sp->spt;
1180         if (__test_and_set_bit(index, sp->unsync_child_bitmap))
1181                 return;
1182         if (sp->unsync_children++)
1183                 return;
1184         kvm_mmu_mark_parents_unsync(sp);
1185 }
1186
1187 static void nonpaging_prefetch_page(struct kvm_vcpu *vcpu,
1188                                     struct kvm_mmu_page *sp)
1189 {
1190         int i;
1191
1192         for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
1193                 sp->spt[i] = shadow_trap_nonpresent_pte;
1194 }
1195
1196 static int nonpaging_sync_page(struct kvm_vcpu *vcpu,
1197                                struct kvm_mmu_page *sp)
1198 {
1199         return 1;
1200 }
1201
1202 static void nonpaging_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
1203 {
1204 }
1205
1206 static void nonpaging_update_pte(struct kvm_vcpu *vcpu,
1207                                  struct kvm_mmu_page *sp, u64 *spte,
1208                                  const void *pte)
1209 {
1210         WARN_ON(1);
1211 }
1212
1213 #define KVM_PAGE_ARRAY_NR 16
1214
1215 struct kvm_mmu_pages {
1216         struct mmu_page_and_offset {
1217                 struct kvm_mmu_page *sp;
1218                 unsigned int idx;
1219         } page[KVM_PAGE_ARRAY_NR];
1220         unsigned int nr;
1221 };
1222
1223 #define for_each_unsync_children(bitmap, idx)           \
1224         for (idx = find_first_bit(bitmap, 512);         \
1225              idx < 512;                                 \
1226              idx = find_next_bit(bitmap, 512, idx+1))
1227
1228 static int mmu_pages_add(struct kvm_mmu_pages *pvec, struct kvm_mmu_page *sp,
1229                          int idx)
1230 {
1231         int i;
1232
1233         if (sp->unsync)
1234                 for (i=0; i < pvec->nr; i++)
1235                         if (pvec->page[i].sp == sp)
1236                                 return 0;
1237
1238         pvec->page[pvec->nr].sp = sp;
1239         pvec->page[pvec->nr].idx = idx;
1240         pvec->nr++;
1241         return (pvec->nr == KVM_PAGE_ARRAY_NR);
1242 }
1243
1244 static int __mmu_unsync_walk(struct kvm_mmu_page *sp,
1245                            struct kvm_mmu_pages *pvec)
1246 {
1247         int i, ret, nr_unsync_leaf = 0;
1248
1249         for_each_unsync_children(sp->unsync_child_bitmap, i) {
1250                 struct kvm_mmu_page *child;
1251                 u64 ent = sp->spt[i];
1252
1253                 if (!is_shadow_present_pte(ent) || is_large_pte(ent))
1254                         goto clear_child_bitmap;
1255
1256                 child = page_header(ent & PT64_BASE_ADDR_MASK);
1257
1258                 if (child->unsync_children) {
1259                         if (mmu_pages_add(pvec, child, i))
1260                                 return -ENOSPC;
1261
1262                         ret = __mmu_unsync_walk(child, pvec);
1263                         if (!ret)
1264                                 goto clear_child_bitmap;
1265                         else if (ret > 0)
1266                                 nr_unsync_leaf += ret;
1267                         else
1268                                 return ret;
1269                 } else if (child->unsync) {
1270                         nr_unsync_leaf++;
1271                         if (mmu_pages_add(pvec, child, i))
1272                                 return -ENOSPC;
1273                 } else
1274                          goto clear_child_bitmap;
1275
1276                 continue;
1277
1278 clear_child_bitmap:
1279                 __clear_bit(i, sp->unsync_child_bitmap);
1280                 sp->unsync_children--;
1281                 WARN_ON((int)sp->unsync_children < 0);
1282         }
1283
1284
1285         return nr_unsync_leaf;
1286 }
1287
1288 static int mmu_unsync_walk(struct kvm_mmu_page *sp,
1289                            struct kvm_mmu_pages *pvec)
1290 {
1291         if (!sp->unsync_children)
1292                 return 0;
1293
1294         mmu_pages_add(pvec, sp, 0);
1295         return __mmu_unsync_walk(sp, pvec);
1296 }
1297
1298 static void kvm_unlink_unsync_page(struct kvm *kvm, struct kvm_mmu_page *sp)
1299 {
1300         WARN_ON(!sp->unsync);
1301         trace_kvm_mmu_sync_page(sp);
1302         sp->unsync = 0;
1303         --kvm->stat.mmu_unsync;
1304 }
1305
1306 static int kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
1307                                     struct list_head *invalid_list);
1308 static void kvm_mmu_commit_zap_page(struct kvm *kvm,
1309                                     struct list_head *invalid_list);
1310
1311 #define for_each_gfn_sp(kvm, sp, gfn, pos)                              \
1312   hlist_for_each_entry(sp, pos,                                         \
1313    &(kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)], hash_link)   \
1314         if ((sp)->gfn != (gfn)) {} else
1315
1316 #define for_each_gfn_indirect_valid_sp(kvm, sp, gfn, pos)               \
1317   hlist_for_each_entry(sp, pos,                                         \
1318    &(kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)], hash_link)   \
1319                 if ((sp)->gfn != (gfn) || (sp)->role.direct ||          \
1320                         (sp)->role.invalid) {} else
1321
1322 /* @sp->gfn should be write-protected at the call site */
1323 static int __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
1324                            struct list_head *invalid_list, bool clear_unsync)
1325 {
1326         if (sp->role.cr4_pae != !!is_pae(vcpu)) {
1327                 kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list);
1328                 return 1;
1329         }
1330
1331         if (clear_unsync)
1332                 kvm_unlink_unsync_page(vcpu->kvm, sp);
1333
1334         if (vcpu->arch.mmu.sync_page(vcpu, sp)) {
1335                 kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list);
1336                 return 1;
1337         }
1338
1339         kvm_mmu_flush_tlb(vcpu);
1340         return 0;
1341 }
1342
1343 static int kvm_sync_page_transient(struct kvm_vcpu *vcpu,
1344                                    struct kvm_mmu_page *sp)
1345 {
1346         LIST_HEAD(invalid_list);
1347         int ret;
1348
1349         ret = __kvm_sync_page(vcpu, sp, &invalid_list, false);
1350         if (ret)
1351                 kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
1352
1353         return ret;
1354 }
1355
1356 static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
1357                          struct list_head *invalid_list)
1358 {
1359         return __kvm_sync_page(vcpu, sp, invalid_list, true);
1360 }
1361
1362 /* @gfn should be write-protected at the call site */
1363 static void kvm_sync_pages(struct kvm_vcpu *vcpu,  gfn_t gfn)
1364 {
1365         struct kvm_mmu_page *s;
1366         struct hlist_node *node;
1367         LIST_HEAD(invalid_list);
1368         bool flush = false;
1369
1370         for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn, node) {
1371                 if (!s->unsync)
1372                         continue;
1373
1374                 WARN_ON(s->role.level != PT_PAGE_TABLE_LEVEL);
1375                 kvm_unlink_unsync_page(vcpu->kvm, s);
1376                 if ((s->role.cr4_pae != !!is_pae(vcpu)) ||
1377                         (vcpu->arch.mmu.sync_page(vcpu, s))) {
1378                         kvm_mmu_prepare_zap_page(vcpu->kvm, s, &invalid_list);
1379                         continue;
1380                 }
1381                 flush = true;
1382         }
1383
1384         kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
1385         if (flush)
1386                 kvm_mmu_flush_tlb(vcpu);
1387 }
1388
1389 struct mmu_page_path {
1390         struct kvm_mmu_page *parent[PT64_ROOT_LEVEL-1];
1391         unsigned int idx[PT64_ROOT_LEVEL-1];
1392 };
1393
1394 #define for_each_sp(pvec, sp, parents, i)                       \
1395                 for (i = mmu_pages_next(&pvec, &parents, -1),   \
1396                         sp = pvec.page[i].sp;                   \
1397                         i < pvec.nr && ({ sp = pvec.page[i].sp; 1;});   \
1398                         i = mmu_pages_next(&pvec, &parents, i))
1399
1400 static int mmu_pages_next(struct kvm_mmu_pages *pvec,
1401                           struct mmu_page_path *parents,
1402                           int i)
1403 {
1404         int n;
1405
1406         for (n = i+1; n < pvec->nr; n++) {
1407                 struct kvm_mmu_page *sp = pvec->page[n].sp;
1408
1409                 if (sp->role.level == PT_PAGE_TABLE_LEVEL) {
1410                         parents->idx[0] = pvec->page[n].idx;
1411                         return n;
1412                 }
1413
1414                 parents->parent[sp->role.level-2] = sp;
1415                 parents->idx[sp->role.level-1] = pvec->page[n].idx;
1416         }
1417
1418         return n;
1419 }
1420
1421 static void mmu_pages_clear_parents(struct mmu_page_path *parents)
1422 {
1423         struct kvm_mmu_page *sp;
1424         unsigned int level = 0;
1425
1426         do {
1427                 unsigned int idx = parents->idx[level];
1428
1429                 sp = parents->parent[level];
1430                 if (!sp)
1431                         return;
1432
1433                 --sp->unsync_children;
1434                 WARN_ON((int)sp->unsync_children < 0);
1435                 __clear_bit(idx, sp->unsync_child_bitmap);
1436                 level++;
1437         } while (level < PT64_ROOT_LEVEL-1 && !sp->unsync_children);
1438 }
1439
1440 static void kvm_mmu_pages_init(struct kvm_mmu_page *parent,
1441                                struct mmu_page_path *parents,
1442                                struct kvm_mmu_pages *pvec)
1443 {
1444         parents->parent[parent->role.level-1] = NULL;
1445         pvec->nr = 0;
1446 }
1447
1448 static void mmu_sync_children(struct kvm_vcpu *vcpu,
1449                               struct kvm_mmu_page *parent)
1450 {
1451         int i;
1452         struct kvm_mmu_page *sp;
1453         struct mmu_page_path parents;
1454         struct kvm_mmu_pages pages;
1455         LIST_HEAD(invalid_list);
1456
1457         kvm_mmu_pages_init(parent, &parents, &pages);
1458         while (mmu_unsync_walk(parent, &pages)) {
1459                 int protected = 0;
1460
1461                 for_each_sp(pages, sp, parents, i)
1462                         protected |= rmap_write_protect(vcpu->kvm, sp->gfn);
1463
1464                 if (protected)
1465                         kvm_flush_remote_tlbs(vcpu->kvm);
1466
1467                 for_each_sp(pages, sp, parents, i) {
1468                         kvm_sync_page(vcpu, sp, &invalid_list);
1469                         mmu_pages_clear_parents(&parents);
1470                 }
1471                 kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
1472                 cond_resched_lock(&vcpu->kvm->mmu_lock);
1473                 kvm_mmu_pages_init(parent, &parents, &pages);
1474         }
1475 }
1476
1477 static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
1478                                              gfn_t gfn,
1479                                              gva_t gaddr,
1480                                              unsigned level,
1481                                              int direct,
1482                                              unsigned access,
1483                                              u64 *parent_pte)
1484 {
1485         union kvm_mmu_page_role role;
1486         unsigned quadrant;
1487         struct kvm_mmu_page *sp;
1488         struct hlist_node *node;
1489         bool need_sync = false;
1490
1491         role = vcpu->arch.mmu.base_role;
1492         role.level = level;
1493         role.direct = direct;
1494         if (role.direct)
1495                 role.cr4_pae = 0;
1496         role.access = access;
1497         if (!vcpu->arch.mmu.direct_map
1498             && vcpu->arch.mmu.root_level <= PT32_ROOT_LEVEL) {
1499                 quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level));
1500                 quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1;
1501                 role.quadrant = quadrant;
1502         }
1503         for_each_gfn_sp(vcpu->kvm, sp, gfn, node) {
1504                 if (!need_sync && sp->unsync)
1505                         need_sync = true;
1506
1507                 if (sp->role.word != role.word)
1508                         continue;
1509
1510                 if (sp->unsync && kvm_sync_page_transient(vcpu, sp))
1511                         break;
1512
1513                 mmu_page_add_parent_pte(vcpu, sp, parent_pte);
1514                 if (sp->unsync_children) {
1515                         kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
1516                         kvm_mmu_mark_parents_unsync(sp);
1517                 } else if (sp->unsync)
1518                         kvm_mmu_mark_parents_unsync(sp);
1519
1520                 trace_kvm_mmu_get_page(sp, false);
1521                 return sp;
1522         }
1523         ++vcpu->kvm->stat.mmu_cache_miss;
1524         sp = kvm_mmu_alloc_page(vcpu, parent_pte, direct);
1525         if (!sp)
1526                 return sp;
1527         sp->gfn = gfn;
1528         sp->role = role;
1529         hlist_add_head(&sp->hash_link,
1530                 &vcpu->kvm->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)]);
1531         if (!direct) {
1532                 if (rmap_write_protect(vcpu->kvm, gfn))
1533                         kvm_flush_remote_tlbs(vcpu->kvm);
1534                 if (level > PT_PAGE_TABLE_LEVEL && need_sync)
1535                         kvm_sync_pages(vcpu, gfn);
1536
1537                 account_shadowed(vcpu->kvm, gfn);
1538         }
1539         if (shadow_trap_nonpresent_pte != shadow_notrap_nonpresent_pte)
1540                 vcpu->arch.mmu.prefetch_page(vcpu, sp);
1541         else
1542                 nonpaging_prefetch_page(vcpu, sp);
1543         trace_kvm_mmu_get_page(sp, true);
1544         return sp;
1545 }
1546
1547 static void shadow_walk_init(struct kvm_shadow_walk_iterator *iterator,
1548                              struct kvm_vcpu *vcpu, u64 addr)
1549 {
1550         iterator->addr = addr;
1551         iterator->shadow_addr = vcpu->arch.mmu.root_hpa;
1552         iterator->level = vcpu->arch.mmu.shadow_root_level;
1553
1554         if (iterator->level == PT64_ROOT_LEVEL &&
1555             vcpu->arch.mmu.root_level < PT64_ROOT_LEVEL &&
1556             !vcpu->arch.mmu.direct_map)
1557                 --iterator->level;
1558
1559         if (iterator->level == PT32E_ROOT_LEVEL) {
1560                 iterator->shadow_addr
1561                         = vcpu->arch.mmu.pae_root[(addr >> 30) & 3];
1562                 iterator->shadow_addr &= PT64_BASE_ADDR_MASK;
1563                 --iterator->level;
1564                 if (!iterator->shadow_addr)
1565                         iterator->level = 0;
1566         }
1567 }
1568
1569 static bool shadow_walk_okay(struct kvm_shadow_walk_iterator *iterator)
1570 {
1571         if (iterator->level < PT_PAGE_TABLE_LEVEL)
1572                 return false;
1573
1574         if (iterator->level == PT_PAGE_TABLE_LEVEL)
1575                 if (is_large_pte(*iterator->sptep))
1576                         return false;
1577
1578         iterator->index = SHADOW_PT_INDEX(iterator->addr, iterator->level);
1579         iterator->sptep = ((u64 *)__va(iterator->shadow_addr)) + iterator->index;
1580         return true;
1581 }
1582
1583 static void shadow_walk_next(struct kvm_shadow_walk_iterator *iterator)
1584 {
1585         iterator->shadow_addr = *iterator->sptep & PT64_BASE_ADDR_MASK;
1586         --iterator->level;
1587 }
1588
1589 static void link_shadow_page(u64 *sptep, struct kvm_mmu_page *sp)
1590 {
1591         u64 spte;
1592
1593         spte = __pa(sp->spt)
1594                 | PT_PRESENT_MASK | PT_ACCESSED_MASK
1595                 | PT_WRITABLE_MASK | PT_USER_MASK;
1596         __set_spte(sptep, spte);
1597 }
1598
1599 static void drop_large_spte(struct kvm_vcpu *vcpu, u64 *sptep)
1600 {
1601         if (is_large_pte(*sptep)) {
1602                 drop_spte(vcpu->kvm, sptep, shadow_trap_nonpresent_pte);
1603                 kvm_flush_remote_tlbs(vcpu->kvm);
1604         }
1605 }
1606
1607 static void validate_direct_spte(struct kvm_vcpu *vcpu, u64 *sptep,
1608                                    unsigned direct_access)
1609 {
1610         if (is_shadow_present_pte(*sptep) && !is_large_pte(*sptep)) {
1611                 struct kvm_mmu_page *child;
1612
1613                 /*
1614                  * For the direct sp, if the guest pte's dirty bit
1615                  * changed form clean to dirty, it will corrupt the
1616                  * sp's access: allow writable in the read-only sp,
1617                  * so we should update the spte at this point to get
1618                  * a new sp with the correct access.
1619                  */
1620                 child = page_header(*sptep & PT64_BASE_ADDR_MASK);
1621                 if (child->role.access == direct_access)
1622                         return;
1623
1624                 mmu_page_remove_parent_pte(child, sptep);
1625                 __set_spte(sptep, shadow_trap_nonpresent_pte);
1626                 kvm_flush_remote_tlbs(vcpu->kvm);
1627         }
1628 }
1629
1630 static void kvm_mmu_page_unlink_children(struct kvm *kvm,
1631                                          struct kvm_mmu_page *sp)
1632 {
1633         unsigned i;
1634         u64 *pt;
1635         u64 ent;
1636
1637         pt = sp->spt;
1638
1639         for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
1640                 ent = pt[i];
1641
1642                 if (is_shadow_present_pte(ent)) {
1643                         if (!is_last_spte(ent, sp->role.level)) {
1644                                 ent &= PT64_BASE_ADDR_MASK;
1645                                 mmu_page_remove_parent_pte(page_header(ent),
1646                                                            &pt[i]);
1647                         } else {
1648                                 if (is_large_pte(ent))
1649                                         --kvm->stat.lpages;
1650                                 drop_spte(kvm, &pt[i],
1651                                           shadow_trap_nonpresent_pte);
1652                         }
1653                 }
1654                 pt[i] = shadow_trap_nonpresent_pte;
1655         }
1656 }
1657
1658 static void kvm_mmu_put_page(struct kvm_mmu_page *sp, u64 *parent_pte)
1659 {
1660         mmu_page_remove_parent_pte(sp, parent_pte);
1661 }
1662
1663 static void kvm_mmu_reset_last_pte_updated(struct kvm *kvm)
1664 {
1665         int i;
1666         struct kvm_vcpu *vcpu;
1667
1668         kvm_for_each_vcpu(i, vcpu, kvm)
1669                 vcpu->arch.last_pte_updated = NULL;
1670 }
1671
1672 static void kvm_mmu_unlink_parents(struct kvm *kvm, struct kvm_mmu_page *sp)
1673 {
1674         u64 *parent_pte;
1675
1676         while (sp->multimapped || sp->parent_pte) {
1677                 if (!sp->multimapped)
1678                         parent_pte = sp->parent_pte;
1679                 else {
1680                         struct kvm_pte_chain *chain;
1681
1682                         chain = container_of(sp->parent_ptes.first,
1683                                              struct kvm_pte_chain, link);
1684                         parent_pte = chain->parent_ptes[0];
1685                 }
1686                 BUG_ON(!parent_pte);
1687                 kvm_mmu_put_page(sp, parent_pte);
1688                 __set_spte(parent_pte, shadow_trap_nonpresent_pte);
1689         }
1690 }
1691
1692 static int mmu_zap_unsync_children(struct kvm *kvm,
1693                                    struct kvm_mmu_page *parent,
1694                                    struct list_head *invalid_list)
1695 {
1696         int i, zapped = 0;
1697         struct mmu_page_path parents;
1698         struct kvm_mmu_pages pages;
1699
1700         if (parent->role.level == PT_PAGE_TABLE_LEVEL)
1701                 return 0;
1702
1703         kvm_mmu_pages_init(parent, &parents, &pages);
1704         while (mmu_unsync_walk(parent, &pages)) {
1705                 struct kvm_mmu_page *sp;
1706
1707                 for_each_sp(pages, sp, parents, i) {
1708                         kvm_mmu_prepare_zap_page(kvm, sp, invalid_list);
1709                         mmu_pages_clear_parents(&parents);
1710                         zapped++;
1711                 }
1712                 kvm_mmu_pages_init(parent, &parents, &pages);
1713         }
1714
1715         return zapped;
1716 }
1717
1718 static int kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
1719                                     struct list_head *invalid_list)
1720 {
1721         int ret;
1722
1723         trace_kvm_mmu_prepare_zap_page(sp);
1724         ++kvm->stat.mmu_shadow_zapped;
1725         ret = mmu_zap_unsync_children(kvm, sp, invalid_list);
1726         kvm_mmu_page_unlink_children(kvm, sp);
1727         kvm_mmu_unlink_parents(kvm, sp);
1728         if (!sp->role.invalid && !sp->role.direct)
1729                 unaccount_shadowed(kvm, sp->gfn);
1730         if (sp->unsync)
1731                 kvm_unlink_unsync_page(kvm, sp);
1732         if (!sp->root_count) {
1733                 /* Count self */
1734                 ret++;
1735                 list_move(&sp->link, invalid_list);
1736         } else {
1737                 list_move(&sp->link, &kvm->arch.active_mmu_pages);
1738                 kvm_reload_remote_mmus(kvm);
1739         }
1740
1741         sp->role.invalid = 1;
1742         kvm_mmu_reset_last_pte_updated(kvm);
1743         return ret;
1744 }
1745
1746 static void kvm_mmu_commit_zap_page(struct kvm *kvm,
1747                                     struct list_head *invalid_list)
1748 {
1749         struct kvm_mmu_page *sp;
1750
1751         if (list_empty(invalid_list))
1752                 return;
1753
1754         kvm_flush_remote_tlbs(kvm);
1755
1756         do {
1757                 sp = list_first_entry(invalid_list, struct kvm_mmu_page, link);
1758                 WARN_ON(!sp->role.invalid || sp->root_count);
1759                 kvm_mmu_free_page(kvm, sp);
1760         } while (!list_empty(invalid_list));
1761
1762 }
1763
1764 /*
1765  * Changing the number of mmu pages allocated to the vm
1766  * Note: if goal_nr_mmu_pages is too small, you will get dead lock
1767  */
1768 void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int goal_nr_mmu_pages)
1769 {
1770         LIST_HEAD(invalid_list);
1771         /*
1772          * If we set the number of mmu pages to be smaller be than the
1773          * number of actived pages , we must to free some mmu pages before we
1774          * change the value
1775          */
1776
1777         if (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages) {
1778                 while (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages &&
1779                         !list_empty(&kvm->arch.active_mmu_pages)) {
1780                         struct kvm_mmu_page *page;
1781
1782                         page = container_of(kvm->arch.active_mmu_pages.prev,
1783                                             struct kvm_mmu_page, link);
1784                         kvm_mmu_prepare_zap_page(kvm, page, &invalid_list);
1785                         kvm_mmu_commit_zap_page(kvm, &invalid_list);
1786                 }
1787                 goal_nr_mmu_pages = kvm->arch.n_used_mmu_pages;
1788         }
1789
1790         kvm->arch.n_max_mmu_pages = goal_nr_mmu_pages;
1791 }
1792
1793 static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
1794 {
1795         struct kvm_mmu_page *sp;
1796         struct hlist_node *node;
1797         LIST_HEAD(invalid_list);
1798         int r;
1799
1800         pgprintk("%s: looking for gfn %llx\n", __func__, gfn);
1801         r = 0;
1802
1803         for_each_gfn_indirect_valid_sp(kvm, sp, gfn, node) {
1804                 pgprintk("%s: gfn %llx role %x\n", __func__, gfn,
1805                          sp->role.word);
1806                 r = 1;
1807                 kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
1808         }
1809         kvm_mmu_commit_zap_page(kvm, &invalid_list);
1810         return r;
1811 }
1812
1813 static void mmu_unshadow(struct kvm *kvm, gfn_t gfn)
1814 {
1815         struct kvm_mmu_page *sp;
1816         struct hlist_node *node;
1817         LIST_HEAD(invalid_list);
1818
1819         for_each_gfn_indirect_valid_sp(kvm, sp, gfn, node) {
1820                 pgprintk("%s: zap %llx %x\n",
1821                          __func__, gfn, sp->role.word);
1822                 kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
1823         }
1824         kvm_mmu_commit_zap_page(kvm, &invalid_list);
1825 }
1826
1827 static void page_header_update_slot(struct kvm *kvm, void *pte, gfn_t gfn)
1828 {
1829         int slot = memslot_id(kvm, gfn);
1830         struct kvm_mmu_page *sp = page_header(__pa(pte));
1831
1832         __set_bit(slot, sp->slot_bitmap);
1833 }
1834
1835 static void mmu_convert_notrap(struct kvm_mmu_page *sp)
1836 {
1837         int i;
1838         u64 *pt = sp->spt;
1839
1840         if (shadow_trap_nonpresent_pte == shadow_notrap_nonpresent_pte)
1841                 return;
1842
1843         for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
1844                 if (pt[i] == shadow_notrap_nonpresent_pte)
1845                         __set_spte(&pt[i], shadow_trap_nonpresent_pte);
1846         }
1847 }
1848
1849 /*
1850  * The function is based on mtrr_type_lookup() in
1851  * arch/x86/kernel/cpu/mtrr/generic.c
1852  */
1853 static int get_mtrr_type(struct mtrr_state_type *mtrr_state,
1854                          u64 start, u64 end)
1855 {
1856         int i;
1857         u64 base, mask;
1858         u8 prev_match, curr_match;
1859         int num_var_ranges = KVM_NR_VAR_MTRR;
1860
1861         if (!mtrr_state->enabled)
1862                 return 0xFF;
1863
1864         /* Make end inclusive end, instead of exclusive */
1865         end--;
1866
1867         /* Look in fixed ranges. Just return the type as per start */
1868         if (mtrr_state->have_fixed && (start < 0x100000)) {
1869                 int idx;
1870
1871                 if (start < 0x80000) {
1872                         idx = 0;
1873                         idx += (start >> 16);
1874                         return mtrr_state->fixed_ranges[idx];
1875                 } else if (start < 0xC0000) {
1876                         idx = 1 * 8;
1877                         idx += ((start - 0x80000) >> 14);
1878                         return mtrr_state->fixed_ranges[idx];
1879                 } else if (start < 0x1000000) {
1880                         idx = 3 * 8;
1881                         idx += ((start - 0xC0000) >> 12);
1882                         return mtrr_state->fixed_ranges[idx];
1883                 }
1884         }
1885
1886         /*
1887          * Look in variable ranges
1888          * Look of multiple ranges matching this address and pick type
1889          * as per MTRR precedence
1890          */
1891         if (!(mtrr_state->enabled & 2))
1892                 return mtrr_state->def_type;
1893
1894         prev_match = 0xFF;
1895         for (i = 0; i < num_var_ranges; ++i) {
1896                 unsigned short start_state, end_state;
1897
1898                 if (!(mtrr_state->var_ranges[i].mask_lo & (1 << 11)))
1899                         continue;
1900
1901                 base = (((u64)mtrr_state->var_ranges[i].base_hi) << 32) +
1902                        (mtrr_state->var_ranges[i].base_lo & PAGE_MASK);
1903                 mask = (((u64)mtrr_state->var_ranges[i].mask_hi) << 32) +
1904                        (mtrr_state->var_ranges[i].mask_lo & PAGE_MASK);
1905
1906                 start_state = ((start & mask) == (base & mask));
1907                 end_state = ((end & mask) == (base & mask));
1908                 if (start_state != end_state)
1909                         return 0xFE;
1910
1911                 if ((start & mask) != (base & mask))
1912                         continue;
1913
1914                 curr_match = mtrr_state->var_ranges[i].base_lo & 0xff;
1915                 if (prev_match == 0xFF) {
1916                         prev_match = curr_match;
1917                         continue;
1918                 }
1919
1920                 if (prev_match == MTRR_TYPE_UNCACHABLE ||
1921                     curr_match == MTRR_TYPE_UNCACHABLE)
1922                         return MTRR_TYPE_UNCACHABLE;
1923
1924                 if ((prev_match == MTRR_TYPE_WRBACK &&
1925                      curr_match == MTRR_TYPE_WRTHROUGH) ||
1926                     (prev_match == MTRR_TYPE_WRTHROUGH &&
1927                      curr_match == MTRR_TYPE_WRBACK)) {
1928                         prev_match = MTRR_TYPE_WRTHROUGH;
1929                         curr_match = MTRR_TYPE_WRTHROUGH;
1930                 }
1931
1932                 if (prev_match != curr_match)
1933                         return MTRR_TYPE_UNCACHABLE;
1934         }
1935
1936         if (prev_match != 0xFF)
1937                 return prev_match;
1938
1939         return mtrr_state->def_type;
1940 }
1941
1942 u8 kvm_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn)
1943 {
1944         u8 mtrr;
1945
1946         mtrr = get_mtrr_type(&vcpu->arch.mtrr_state, gfn << PAGE_SHIFT,
1947                              (gfn << PAGE_SHIFT) + PAGE_SIZE);
1948         if (mtrr == 0xfe || mtrr == 0xff)
1949                 mtrr = MTRR_TYPE_WRBACK;
1950         return mtrr;
1951 }
1952 EXPORT_SYMBOL_GPL(kvm_get_guest_memory_type);
1953
1954 static void __kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
1955 {
1956         trace_kvm_mmu_unsync_page(sp);
1957         ++vcpu->kvm->stat.mmu_unsync;
1958         sp->unsync = 1;
1959
1960         kvm_mmu_mark_parents_unsync(sp);
1961         mmu_convert_notrap(sp);
1962 }
1963
1964 static void kvm_unsync_pages(struct kvm_vcpu *vcpu,  gfn_t gfn)
1965 {
1966         struct kvm_mmu_page *s;
1967         struct hlist_node *node;
1968
1969         for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn, node) {
1970                 if (s->unsync)
1971                         continue;
1972                 WARN_ON(s->role.level != PT_PAGE_TABLE_LEVEL);
1973                 __kvm_unsync_page(vcpu, s);
1974         }
1975 }
1976
1977 static int mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn,
1978                                   bool can_unsync)
1979 {
1980         struct kvm_mmu_page *s;
1981         struct hlist_node *node;
1982         bool need_unsync = false;
1983
1984         for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn, node) {
1985                 if (!can_unsync)
1986                         return 1;
1987
1988                 if (s->role.level != PT_PAGE_TABLE_LEVEL)
1989                         return 1;
1990
1991                 if (!need_unsync && !s->unsync) {
1992                         if (!oos_shadow)
1993                                 return 1;
1994                         need_unsync = true;
1995                 }
1996         }
1997         if (need_unsync)
1998                 kvm_unsync_pages(vcpu, gfn);
1999         return 0;
2000 }
2001
2002 static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
2003                     unsigned pte_access, int user_fault,
2004                     int write_fault, int dirty, int level,
2005                     gfn_t gfn, pfn_t pfn, bool speculative,
2006                     bool can_unsync, bool host_writable)
2007 {
2008         u64 spte, entry = *sptep;
2009         int ret = 0;
2010
2011         /*
2012          * We don't set the accessed bit, since we sometimes want to see
2013          * whether the guest actually used the pte (in order to detect
2014          * demand paging).
2015          */
2016         spte = PT_PRESENT_MASK;
2017         if (!speculative)
2018                 spte |= shadow_accessed_mask;
2019         if (!dirty)
2020                 pte_access &= ~ACC_WRITE_MASK;
2021         if (pte_access & ACC_EXEC_MASK)
2022                 spte |= shadow_x_mask;
2023         else
2024                 spte |= shadow_nx_mask;
2025         if (pte_access & ACC_USER_MASK)
2026                 spte |= shadow_user_mask;
2027         if (level > PT_PAGE_TABLE_LEVEL)
2028                 spte |= PT_PAGE_SIZE_MASK;
2029         if (tdp_enabled)
2030                 spte |= kvm_x86_ops->get_mt_mask(vcpu, gfn,
2031                         kvm_is_mmio_pfn(pfn));
2032
2033         if (host_writable)
2034                 spte |= SPTE_HOST_WRITEABLE;
2035         else
2036                 pte_access &= ~ACC_WRITE_MASK;
2037
2038         spte |= (u64)pfn << PAGE_SHIFT;
2039
2040         if ((pte_access & ACC_WRITE_MASK)
2041             || (!vcpu->arch.mmu.direct_map && write_fault
2042                 && !is_write_protection(vcpu) && !user_fault)) {
2043
2044                 if (level > PT_PAGE_TABLE_LEVEL &&
2045                     has_wrprotected_page(vcpu->kvm, gfn, level)) {
2046                         ret = 1;
2047                         drop_spte(vcpu->kvm, sptep, shadow_trap_nonpresent_pte);
2048                         goto done;
2049                 }
2050
2051                 spte |= PT_WRITABLE_MASK;
2052
2053                 if (!vcpu->arch.mmu.direct_map
2054                     && !(pte_access & ACC_WRITE_MASK))
2055                         spte &= ~PT_USER_MASK;
2056
2057                 /*
2058                  * Optimization: for pte sync, if spte was writable the hash
2059                  * lookup is unnecessary (and expensive). Write protection
2060                  * is responsibility of mmu_get_page / kvm_sync_page.
2061                  * Same reasoning can be applied to dirty page accounting.
2062                  */
2063                 if (!can_unsync && is_writable_pte(*sptep))
2064                         goto set_pte;
2065
2066                 if (mmu_need_write_protect(vcpu, gfn, can_unsync)) {
2067                         pgprintk("%s: found shadow page for %llx, marking ro\n",
2068                                  __func__, gfn);
2069                         ret = 1;
2070                         pte_access &= ~ACC_WRITE_MASK;
2071                         if (is_writable_pte(spte))
2072                                 spte &= ~PT_WRITABLE_MASK;
2073                 }
2074         }
2075
2076         if (pte_access & ACC_WRITE_MASK)
2077                 mark_page_dirty(vcpu->kvm, gfn);
2078
2079 set_pte:
2080         update_spte(sptep, spte);
2081         /*
2082          * If we overwrite a writable spte with a read-only one we
2083          * should flush remote TLBs. Otherwise rmap_write_protect
2084          * will find a read-only spte, even though the writable spte
2085          * might be cached on a CPU's TLB.
2086          */
2087         if (is_writable_pte(entry) && !is_writable_pte(*sptep))
2088                 kvm_flush_remote_tlbs(vcpu->kvm);
2089 done:
2090         return ret;
2091 }
2092
2093 static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
2094                          unsigned pt_access, unsigned pte_access,
2095                          int user_fault, int write_fault, int dirty,
2096                          int *ptwrite, int level, gfn_t gfn,
2097                          pfn_t pfn, bool speculative,
2098                          bool host_writable)
2099 {
2100         int was_rmapped = 0;
2101         int rmap_count;
2102
2103         pgprintk("%s: spte %llx access %x write_fault %d"
2104                  " user_fault %d gfn %llx\n",
2105                  __func__, *sptep, pt_access,
2106                  write_fault, user_fault, gfn);
2107
2108         if (is_rmap_spte(*sptep)) {
2109                 /*
2110                  * If we overwrite a PTE page pointer with a 2MB PMD, unlink
2111                  * the parent of the now unreachable PTE.
2112                  */
2113                 if (level > PT_PAGE_TABLE_LEVEL &&
2114                     !is_large_pte(*sptep)) {
2115                         struct kvm_mmu_page *child;
2116                         u64 pte = *sptep;
2117
2118                         child = page_header(pte & PT64_BASE_ADDR_MASK);
2119                         mmu_page_remove_parent_pte(child, sptep);
2120                         __set_spte(sptep, shadow_trap_nonpresent_pte);
2121                         kvm_flush_remote_tlbs(vcpu->kvm);
2122                 } else if (pfn != spte_to_pfn(*sptep)) {
2123                         pgprintk("hfn old %llx new %llx\n",
2124                                  spte_to_pfn(*sptep), pfn);
2125                         drop_spte(vcpu->kvm, sptep, shadow_trap_nonpresent_pte);
2126                         kvm_flush_remote_tlbs(vcpu->kvm);
2127                 } else
2128                         was_rmapped = 1;
2129         }
2130
2131         if (set_spte(vcpu, sptep, pte_access, user_fault, write_fault,
2132                       dirty, level, gfn, pfn, speculative, true,
2133                       host_writable)) {
2134                 if (write_fault)
2135                         *ptwrite = 1;
2136                 kvm_mmu_flush_tlb(vcpu);
2137         }
2138
2139         pgprintk("%s: setting spte %llx\n", __func__, *sptep);
2140         pgprintk("instantiating %s PTE (%s) at %llx (%llx) addr %p\n",
2141                  is_large_pte(*sptep)? "2MB" : "4kB",
2142                  *sptep & PT_PRESENT_MASK ?"RW":"R", gfn,
2143                  *sptep, sptep);
2144         if (!was_rmapped && is_large_pte(*sptep))
2145                 ++vcpu->kvm->stat.lpages;
2146
2147         page_header_update_slot(vcpu->kvm, sptep, gfn);
2148         if (!was_rmapped) {
2149                 rmap_count = rmap_add(vcpu, sptep, gfn);
2150                 if (rmap_count > RMAP_RECYCLE_THRESHOLD)
2151                         rmap_recycle(vcpu, sptep, gfn);
2152         }
2153         kvm_release_pfn_clean(pfn);
2154         if (speculative) {
2155                 vcpu->arch.last_pte_updated = sptep;
2156                 vcpu->arch.last_pte_gfn = gfn;
2157         }
2158 }
2159
2160 static void nonpaging_new_cr3(struct kvm_vcpu *vcpu)
2161 {
2162 }
2163
2164 static pfn_t pte_prefetch_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn,
2165                                      bool no_dirty_log)
2166 {
2167         struct kvm_memory_slot *slot;
2168         unsigned long hva;
2169
2170         slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, no_dirty_log);
2171         if (!slot) {
2172                 get_page(bad_page);
2173                 return page_to_pfn(bad_page);
2174         }
2175
2176         hva = gfn_to_hva_memslot(slot, gfn);
2177
2178         return hva_to_pfn_atomic(vcpu->kvm, hva);
2179 }
2180
2181 static int direct_pte_prefetch_many(struct kvm_vcpu *vcpu,
2182                                     struct kvm_mmu_page *sp,
2183                                     u64 *start, u64 *end)
2184 {
2185         struct page *pages[PTE_PREFETCH_NUM];
2186         unsigned access = sp->role.access;
2187         int i, ret;
2188         gfn_t gfn;
2189
2190         gfn = kvm_mmu_page_get_gfn(sp, start - sp->spt);
2191         if (!gfn_to_memslot_dirty_bitmap(vcpu, gfn, access & ACC_WRITE_MASK))
2192                 return -1;
2193
2194         ret = gfn_to_page_many_atomic(vcpu->kvm, gfn, pages, end - start);
2195         if (ret <= 0)
2196                 return -1;
2197
2198         for (i = 0; i < ret; i++, gfn++, start++)
2199                 mmu_set_spte(vcpu, start, ACC_ALL,
2200                              access, 0, 0, 1, NULL,
2201                              sp->role.level, gfn,
2202                              page_to_pfn(pages[i]), true, true);
2203
2204         return 0;
2205 }
2206
2207 static void __direct_pte_prefetch(struct kvm_vcpu *vcpu,
2208                                   struct kvm_mmu_page *sp, u64 *sptep)
2209 {
2210         u64 *spte, *start = NULL;
2211         int i;
2212
2213         WARN_ON(!sp->role.direct);
2214
2215         i = (sptep - sp->spt) & ~(PTE_PREFETCH_NUM - 1);
2216         spte = sp->spt + i;
2217
2218         for (i = 0; i < PTE_PREFETCH_NUM; i++, spte++) {
2219                 if (*spte != shadow_trap_nonpresent_pte || spte == sptep) {
2220                         if (!start)
2221                                 continue;
2222                         if (direct_pte_prefetch_many(vcpu, sp, start, spte) < 0)
2223                                 break;
2224                         start = NULL;
2225                 } else if (!start)
2226                         start = spte;
2227         }
2228 }
2229
2230 static void direct_pte_prefetch(struct kvm_vcpu *vcpu, u64 *sptep)
2231 {
2232         struct kvm_mmu_page *sp;
2233
2234         /*
2235          * Since it's no accessed bit on EPT, it's no way to
2236          * distinguish between actually accessed translations
2237          * and prefetched, so disable pte prefetch if EPT is
2238          * enabled.
2239          */
2240         if (!shadow_accessed_mask)
2241                 return;
2242
2243         sp = page_header(__pa(sptep));
2244         if (sp->role.level > PT_PAGE_TABLE_LEVEL)
2245                 return;
2246
2247         __direct_pte_prefetch(vcpu, sp, sptep);
2248 }
2249
2250 static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
2251                         int map_writable, int level, gfn_t gfn, pfn_t pfn,
2252                         bool prefault)
2253 {
2254         struct kvm_shadow_walk_iterator iterator;
2255         struct kvm_mmu_page *sp;
2256         int pt_write = 0;
2257         gfn_t pseudo_gfn;
2258
2259         for_each_shadow_entry(vcpu, (u64)gfn << PAGE_SHIFT, iterator) {
2260                 if (iterator.level == level) {
2261                         unsigned pte_access = ACC_ALL;
2262
2263                         mmu_set_spte(vcpu, iterator.sptep, ACC_ALL, pte_access,
2264                                      0, write, 1, &pt_write,
2265                                      level, gfn, pfn, prefault, map_writable);
2266                         direct_pte_prefetch(vcpu, iterator.sptep);
2267                         ++vcpu->stat.pf_fixed;
2268                         break;
2269                 }
2270
2271                 if (*iterator.sptep == shadow_trap_nonpresent_pte) {
2272                         u64 base_addr = iterator.addr;
2273
2274                         base_addr &= PT64_LVL_ADDR_MASK(iterator.level);
2275                         pseudo_gfn = base_addr >> PAGE_SHIFT;
2276                         sp = kvm_mmu_get_page(vcpu, pseudo_gfn, iterator.addr,
2277                                               iterator.level - 1,
2278                                               1, ACC_ALL, iterator.sptep);
2279                         if (!sp) {
2280                                 pgprintk("nonpaging_map: ENOMEM\n");
2281                                 kvm_release_pfn_clean(pfn);
2282                                 return -ENOMEM;
2283                         }
2284
2285                         __set_spte(iterator.sptep,
2286                                    __pa(sp->spt)
2287                                    | PT_PRESENT_MASK | PT_WRITABLE_MASK
2288                                    | shadow_user_mask | shadow_x_mask
2289                                    | shadow_accessed_mask);
2290                 }
2291         }
2292         return pt_write;
2293 }
2294
2295 static void kvm_send_hwpoison_signal(unsigned long address, struct task_struct *tsk)
2296 {
2297         siginfo_t info;
2298
2299         info.si_signo   = SIGBUS;
2300         info.si_errno   = 0;
2301         info.si_code    = BUS_MCEERR_AR;
2302         info.si_addr    = (void __user *)address;
2303         info.si_addr_lsb = PAGE_SHIFT;
2304
2305         send_sig_info(SIGBUS, &info, tsk);
2306 }
2307
2308 static int kvm_handle_bad_page(struct kvm *kvm, gfn_t gfn, pfn_t pfn)
2309 {
2310         kvm_release_pfn_clean(pfn);
2311         if (is_hwpoison_pfn(pfn)) {
2312                 kvm_send_hwpoison_signal(gfn_to_hva(kvm, gfn), current);
2313                 return 0;
2314         } else if (is_fault_pfn(pfn))
2315                 return -EFAULT;
2316
2317         return 1;
2318 }
2319
2320 static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu,
2321                                         gfn_t *gfnp, pfn_t *pfnp, int *levelp)
2322 {
2323         pfn_t pfn = *pfnp;
2324         gfn_t gfn = *gfnp;
2325         int level = *levelp;
2326
2327         /*
2328          * Check if it's a transparent hugepage. If this would be an
2329          * hugetlbfs page, level wouldn't be set to
2330          * PT_PAGE_TABLE_LEVEL and there would be no adjustment done
2331          * here.
2332          */
2333         if (!is_error_pfn(pfn) && !kvm_is_mmio_pfn(pfn) &&
2334             level == PT_PAGE_TABLE_LEVEL &&
2335             PageTransCompound(pfn_to_page(pfn)) &&
2336             !has_wrprotected_page(vcpu->kvm, gfn, PT_DIRECTORY_LEVEL)) {
2337                 unsigned long mask;
2338                 /*
2339                  * mmu_notifier_retry was successful and we hold the
2340                  * mmu_lock here, so the pmd can't become splitting
2341                  * from under us, and in turn
2342                  * __split_huge_page_refcount() can't run from under
2343                  * us and we can safely transfer the refcount from
2344                  * PG_tail to PG_head as we switch the pfn to tail to
2345                  * head.
2346                  */
2347                 *levelp = level = PT_DIRECTORY_LEVEL;
2348                 mask = KVM_PAGES_PER_HPAGE(level) - 1;
2349                 VM_BUG_ON((gfn & mask) != (pfn & mask));
2350                 if (pfn & mask) {
2351                         gfn &= ~mask;
2352                         *gfnp = gfn;
2353                         kvm_release_pfn_clean(pfn);
2354                         pfn &= ~mask;
2355                         if (!get_page_unless_zero(pfn_to_page(pfn)))
2356                                 BUG();
2357                         *pfnp = pfn;
2358                 }
2359         }
2360 }
2361
2362 static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
2363                          gva_t gva, pfn_t *pfn, bool write, bool *writable);
2364
2365 static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn,
2366                          bool prefault)
2367 {
2368         int r;
2369         int level;
2370         int force_pt_level;
2371         pfn_t pfn;
2372         unsigned long mmu_seq;
2373         bool map_writable;
2374
2375         force_pt_level = mapping_level_dirty_bitmap(vcpu, gfn);
2376         if (likely(!force_pt_level)) {
2377                 level = mapping_level(vcpu, gfn);
2378                 /*
2379                  * This path builds a PAE pagetable - so we can map
2380                  * 2mb pages at maximum. Therefore check if the level
2381                  * is larger than that.
2382                  */
2383                 if (level > PT_DIRECTORY_LEVEL)
2384                         level = PT_DIRECTORY_LEVEL;
2385
2386                 gfn &= ~(KVM_PAGES_PER_HPAGE(level) - 1);
2387         } else
2388                 level = PT_PAGE_TABLE_LEVEL;
2389
2390         mmu_seq = vcpu->kvm->mmu_notifier_seq;
2391         smp_rmb();
2392
2393         if (try_async_pf(vcpu, prefault, gfn, v, &pfn, write, &map_writable))
2394                 return 0;
2395
2396         /* mmio */
2397         if (is_error_pfn(pfn))
2398                 return kvm_handle_bad_page(vcpu->kvm, gfn, pfn);
2399
2400         spin_lock(&vcpu->kvm->mmu_lock);
2401         if (mmu_notifier_retry(vcpu, mmu_seq))
2402                 goto out_unlock;
2403         kvm_mmu_free_some_pages(vcpu);
2404         if (likely(!force_pt_level))
2405                 transparent_hugepage_adjust(vcpu, &gfn, &pfn, &level);
2406         r = __direct_map(vcpu, v, write, map_writable, level, gfn, pfn,
2407                          prefault);
2408         spin_unlock(&vcpu->kvm->mmu_lock);
2409
2410
2411         return r;
2412
2413 out_unlock:
2414         spin_unlock(&vcpu->kvm->mmu_lock);
2415         kvm_release_pfn_clean(pfn);
2416         return 0;
2417 }
2418
2419
2420 static void mmu_free_roots(struct kvm_vcpu *vcpu)
2421 {
2422         int i;
2423         struct kvm_mmu_page *sp;
2424         LIST_HEAD(invalid_list);
2425
2426         if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
2427                 return;
2428         spin_lock(&vcpu->kvm->mmu_lock);
2429         if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL &&
2430             (vcpu->arch.mmu.root_level == PT64_ROOT_LEVEL ||
2431              vcpu->arch.mmu.direct_map)) {
2432                 hpa_t root = vcpu->arch.mmu.root_hpa;
2433
2434                 sp = page_header(root);
2435                 --sp->root_count;
2436                 if (!sp->root_count && sp->role.invalid) {
2437                         kvm_mmu_prepare_zap_page(vcpu->kvm, sp, &invalid_list);
2438                         kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
2439                 }
2440                 vcpu->arch.mmu.root_hpa = INVALID_PAGE;
2441                 spin_unlock(&vcpu->kvm->mmu_lock);
2442                 return;
2443         }
2444         for (i = 0; i < 4; ++i) {
2445                 hpa_t root = vcpu->arch.mmu.pae_root[i];
2446
2447                 if (root) {
2448                         root &= PT64_BASE_ADDR_MASK;
2449                         sp = page_header(root);
2450                         --sp->root_count;
2451                         if (!sp->root_count && sp->role.invalid)
2452                                 kvm_mmu_prepare_zap_page(vcpu->kvm, sp,
2453                                                          &invalid_list);
2454                 }
2455                 vcpu->arch.mmu.pae_root[i] = INVALID_PAGE;
2456         }
2457         kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
2458         spin_unlock(&vcpu->kvm->mmu_lock);
2459         vcpu->arch.mmu.root_hpa = INVALID_PAGE;
2460 }
2461
2462 static int mmu_check_root(struct kvm_vcpu *vcpu, gfn_t root_gfn)
2463 {
2464         int ret = 0;
2465
2466         if (!kvm_is_visible_gfn(vcpu->kvm, root_gfn)) {
2467                 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
2468                 ret = 1;
2469         }
2470
2471         return ret;
2472 }
2473
2474 static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu)
2475 {
2476         struct kvm_mmu_page *sp;
2477         unsigned i;
2478
2479         if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
2480                 spin_lock(&vcpu->kvm->mmu_lock);
2481                 kvm_mmu_free_some_pages(vcpu);
2482                 sp = kvm_mmu_get_page(vcpu, 0, 0, PT64_ROOT_LEVEL,
2483                                       1, ACC_ALL, NULL);
2484                 ++sp->root_count;
2485                 spin_unlock(&vcpu->kvm->mmu_lock);
2486                 vcpu->arch.mmu.root_hpa = __pa(sp->spt);
2487         } else if (vcpu->arch.mmu.shadow_root_level == PT32E_ROOT_LEVEL) {
2488                 for (i = 0; i < 4; ++i) {
2489                         hpa_t root = vcpu->arch.mmu.pae_root[i];
2490
2491                         ASSERT(!VALID_PAGE(root));
2492                         spin_lock(&vcpu->kvm->mmu_lock);
2493                         kvm_mmu_free_some_pages(vcpu);
2494                         sp = kvm_mmu_get_page(vcpu, i << (30 - PAGE_SHIFT),
2495                                               i << 30,
2496                                               PT32_ROOT_LEVEL, 1, ACC_ALL,
2497                                               NULL);
2498                         root = __pa(sp->spt);
2499                         ++sp->root_count;
2500                         spin_unlock(&vcpu->kvm->mmu_lock);
2501                         vcpu->arch.mmu.pae_root[i] = root | PT_PRESENT_MASK;
2502                 }
2503                 vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.pae_root);
2504         } else
2505                 BUG();
2506
2507         return 0;
2508 }
2509
2510 static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
2511 {
2512         struct kvm_mmu_page *sp;
2513         u64 pdptr, pm_mask;
2514         gfn_t root_gfn;
2515         int i;
2516
2517         root_gfn = vcpu->arch.mmu.get_cr3(vcpu) >> PAGE_SHIFT;
2518
2519         if (mmu_check_root(vcpu, root_gfn))
2520                 return 1;
2521
2522         /*
2523          * Do we shadow a long mode page table? If so we need to
2524          * write-protect the guests page table root.
2525          */
2526         if (vcpu->arch.mmu.root_level == PT64_ROOT_LEVEL) {
2527                 hpa_t root = vcpu->arch.mmu.root_hpa;
2528
2529                 ASSERT(!VALID_PAGE(root));
2530
2531                 spin_lock(&vcpu->kvm->mmu_lock);
2532                 kvm_mmu_free_some_pages(vcpu);
2533                 sp = kvm_mmu_get_page(vcpu, root_gfn, 0, PT64_ROOT_LEVEL,
2534                                       0, ACC_ALL, NULL);
2535                 root = __pa(sp->spt);
2536                 ++sp->root_count;
2537                 spin_unlock(&vcpu->kvm->mmu_lock);
2538                 vcpu->arch.mmu.root_hpa = root;
2539                 return 0;
2540         }
2541
2542         /*
2543          * We shadow a 32 bit page table. This may be a legacy 2-level
2544          * or a PAE 3-level page table. In either case we need to be aware that
2545          * the shadow page table may be a PAE or a long mode page table.
2546          */
2547         pm_mask = PT_PRESENT_MASK;
2548         if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL)
2549                 pm_mask |= PT_ACCESSED_MASK | PT_WRITABLE_MASK | PT_USER_MASK;
2550
2551         for (i = 0; i < 4; ++i) {
2552                 hpa_t root = vcpu->arch.mmu.pae_root[i];
2553
2554                 ASSERT(!VALID_PAGE(root));
2555                 if (vcpu->arch.mmu.root_level == PT32E_ROOT_LEVEL) {
2556                         pdptr = kvm_pdptr_read_mmu(vcpu, &vcpu->arch.mmu, i);
2557                         if (!is_present_gpte(pdptr)) {
2558                                 vcpu->arch.mmu.pae_root[i] = 0;
2559                                 continue;
2560                         }
2561                         root_gfn = pdptr >> PAGE_SHIFT;
2562                         if (mmu_check_root(vcpu, root_gfn))
2563                                 return 1;
2564                 }
2565                 spin_lock(&vcpu->kvm->mmu_lock);
2566                 kvm_mmu_free_some_pages(vcpu);
2567                 sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30,
2568                                       PT32_ROOT_LEVEL, 0,
2569                                       ACC_ALL, NULL);
2570                 root = __pa(sp->spt);
2571                 ++sp->root_count;
2572                 spin_unlock(&vcpu->kvm->mmu_lock);
2573
2574                 vcpu->arch.mmu.pae_root[i] = root | pm_mask;
2575         }
2576         vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.pae_root);
2577
2578         /*
2579          * If we shadow a 32 bit page table with a long mode page
2580          * table we enter this path.
2581          */
2582         if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
2583                 if (vcpu->arch.mmu.lm_root == NULL) {
2584                         /*
2585                          * The additional page necessary for this is only
2586                          * allocated on demand.
2587                          */
2588
2589                         u64 *lm_root;
2590
2591                         lm_root = (void*)get_zeroed_page(GFP_KERNEL);
2592                         if (lm_root == NULL)
2593                                 return 1;
2594
2595                         lm_root[0] = __pa(vcpu->arch.mmu.pae_root) | pm_mask;
2596
2597                         vcpu->arch.mmu.lm_root = lm_root;
2598                 }
2599
2600                 vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.lm_root);
2601         }
2602
2603         return 0;
2604 }
2605
2606 static int mmu_alloc_roots(struct kvm_vcpu *vcpu)
2607 {
2608         if (vcpu->arch.mmu.direct_map)
2609                 return mmu_alloc_direct_roots(vcpu);
2610         else
2611                 return mmu_alloc_shadow_roots(vcpu);
2612 }
2613
2614 static void mmu_sync_roots(struct kvm_vcpu *vcpu)
2615 {
2616         int i;
2617         struct kvm_mmu_page *sp;
2618
2619         if (vcpu->arch.mmu.direct_map)
2620                 return;
2621
2622         if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
2623                 return;
2624
2625         trace_kvm_mmu_audit(vcpu, AUDIT_PRE_SYNC);
2626         if (vcpu->arch.mmu.root_level == PT64_ROOT_LEVEL) {
2627                 hpa_t root = vcpu->arch.mmu.root_hpa;
2628                 sp = page_header(root);
2629                 mmu_sync_children(vcpu, sp);
2630                 trace_kvm_mmu_audit(vcpu, AUDIT_POST_SYNC);
2631                 return;
2632         }
2633         for (i = 0; i < 4; ++i) {
2634                 hpa_t root = vcpu->arch.mmu.pae_root[i];
2635
2636                 if (root && VALID_PAGE(root)) {
2637                         root &= PT64_BASE_ADDR_MASK;
2638                         sp = page_header(root);
2639                         mmu_sync_children(vcpu, sp);
2640                 }
2641         }
2642         trace_kvm_mmu_audit(vcpu, AUDIT_POST_SYNC);
2643 }
2644
2645 void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
2646 {
2647         spin_lock(&vcpu->kvm->mmu_lock);
2648         mmu_sync_roots(vcpu);
2649         spin_unlock(&vcpu->kvm->mmu_lock);
2650 }
2651
2652 static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr,
2653                                   u32 access, struct x86_exception *exception)
2654 {
2655         if (exception)
2656                 exception->error_code = 0;
2657         return vaddr;
2658 }
2659
2660 static gpa_t nonpaging_gva_to_gpa_nested(struct kvm_vcpu *vcpu, gva_t vaddr,
2661                                          u32 access,
2662                                          struct x86_exception *exception)
2663 {
2664         if (exception)
2665                 exception->error_code = 0;
2666         return vcpu->arch.nested_mmu.translate_gpa(vcpu, vaddr, access);
2667 }
2668
2669 static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
2670                                 u32 error_code, bool prefault)
2671 {
2672         gfn_t gfn;
2673         int r;
2674
2675         pgprintk("%s: gva %lx error %x\n", __func__, gva, error_code);
2676         r = mmu_topup_memory_caches(vcpu);
2677         if (r)
2678                 return r;
2679
2680         ASSERT(vcpu);
2681         ASSERT(VALID_PAGE(vcpu->arch.mmu.root_hpa));
2682
2683         gfn = gva >> PAGE_SHIFT;
2684
2685         return nonpaging_map(vcpu, gva & PAGE_MASK,
2686                              error_code & PFERR_WRITE_MASK, gfn, prefault);
2687 }
2688
2689 static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn)
2690 {
2691         struct kvm_arch_async_pf arch;
2692
2693         arch.token = (vcpu->arch.apf.id++ << 12) | vcpu->vcpu_id;
2694         arch.gfn = gfn;
2695         arch.direct_map = vcpu->arch.mmu.direct_map;
2696         arch.cr3 = vcpu->arch.mmu.get_cr3(vcpu);
2697
2698         return kvm_setup_async_pf(vcpu, gva, gfn, &arch);
2699 }
2700
2701 static bool can_do_async_pf(struct kvm_vcpu *vcpu)
2702 {
2703         if (unlikely(!irqchip_in_kernel(vcpu->kvm) ||
2704                      kvm_event_needs_reinjection(vcpu)))
2705                 return false;
2706
2707         return kvm_x86_ops->interrupt_allowed(vcpu);
2708 }
2709
2710 static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
2711                          gva_t gva, pfn_t *pfn, bool write, bool *writable)
2712 {
2713         bool async;
2714
2715         *pfn = gfn_to_pfn_async(vcpu->kvm, gfn, &async, write, writable);
2716
2717         if (!async)
2718                 return false; /* *pfn has correct page already */
2719
2720         put_page(pfn_to_page(*pfn));
2721
2722         if (!prefault && can_do_async_pf(vcpu)) {
2723                 trace_kvm_try_async_get_page(gva, gfn);
2724                 if (kvm_find_async_pf_gfn(vcpu, gfn)) {
2725                         trace_kvm_async_pf_doublefault(gva, gfn);
2726                         kvm_make_request(KVM_REQ_APF_HALT, vcpu);
2727                         return true;
2728                 } else if (kvm_arch_setup_async_pf(vcpu, gva, gfn))
2729                         return true;
2730         }
2731
2732         *pfn = gfn_to_pfn_prot(vcpu->kvm, gfn, write, writable);
2733
2734         return false;
2735 }
2736
2737 static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code,
2738                           bool prefault)
2739 {
2740         pfn_t pfn;
2741         int r;
2742         int level;
2743         int force_pt_level;
2744         gfn_t gfn = gpa >> PAGE_SHIFT;
2745         unsigned long mmu_seq;
2746         int write = error_code & PFERR_WRITE_MASK;
2747         bool map_writable;
2748
2749         ASSERT(vcpu);
2750         ASSERT(VALID_PAGE(vcpu->arch.mmu.root_hpa));
2751
2752         r = mmu_topup_memory_caches(vcpu);
2753         if (r)
2754                 return r;
2755
2756         force_pt_level = mapping_level_dirty_bitmap(vcpu, gfn);
2757         if (likely(!force_pt_level)) {
2758                 level = mapping_level(vcpu, gfn);
2759                 gfn &= ~(KVM_PAGES_PER_HPAGE(level) - 1);
2760         } else
2761                 level = PT_PAGE_TABLE_LEVEL;
2762
2763         mmu_seq = vcpu->kvm->mmu_notifier_seq;
2764         smp_rmb();
2765
2766         if (try_async_pf(vcpu, prefault, gfn, gpa, &pfn, write, &map_writable))
2767                 return 0;
2768
2769         /* mmio */
2770         if (is_error_pfn(pfn))
2771                 return kvm_handle_bad_page(vcpu->kvm, gfn, pfn);
2772         spin_lock(&vcpu->kvm->mmu_lock);
2773         if (mmu_notifier_retry(vcpu, mmu_seq))
2774                 goto out_unlock;
2775         kvm_mmu_free_some_pages(vcpu);
2776         if (likely(!force_pt_level))
2777                 transparent_hugepage_adjust(vcpu, &gfn, &pfn, &level);
2778         r = __direct_map(vcpu, gpa, write, map_writable,
2779                          level, gfn, pfn, prefault);
2780         spin_unlock(&vcpu->kvm->mmu_lock);
2781
2782         return r;
2783
2784 out_unlock:
2785         spin_unlock(&vcpu->kvm->mmu_lock);
2786         kvm_release_pfn_clean(pfn);
2787         return 0;
2788 }
2789
2790 static void nonpaging_free(struct kvm_vcpu *vcpu)
2791 {
2792         mmu_free_roots(vcpu);
2793 }
2794
2795 static int nonpaging_init_context(struct kvm_vcpu *vcpu,
2796                                   struct kvm_mmu *context)
2797 {
2798         context->new_cr3 = nonpaging_new_cr3;
2799         context->page_fault = nonpaging_page_fault;
2800         context->gva_to_gpa = nonpaging_gva_to_gpa;
2801         context->free = nonpaging_free;
2802         context->prefetch_page = nonpaging_prefetch_page;
2803         context->sync_page = nonpaging_sync_page;
2804         context->invlpg = nonpaging_invlpg;
2805         context->update_pte = nonpaging_update_pte;
2806         context->root_level = 0;
2807         context->shadow_root_level = PT32E_ROOT_LEVEL;
2808         context->root_hpa = INVALID_PAGE;
2809         context->direct_map = true;
2810         context->nx = false;
2811         return 0;
2812 }
2813
2814 void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu)
2815 {
2816         ++vcpu->stat.tlb_flush;
2817         kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
2818 }
2819
2820 static void paging_new_cr3(struct kvm_vcpu *vcpu)
2821 {
2822         pgprintk("%s: cr3 %lx\n", __func__, kvm_read_cr3(vcpu));
2823         mmu_free_roots(vcpu);
2824 }
2825
2826 static unsigned long get_cr3(struct kvm_vcpu *vcpu)
2827 {
2828         return kvm_read_cr3(vcpu);
2829 }
2830
2831 static void inject_page_fault(struct kvm_vcpu *vcpu,
2832                               struct x86_exception *fault)
2833 {
2834         vcpu->arch.mmu.inject_page_fault(vcpu, fault);
2835 }
2836
2837 static void paging_free(struct kvm_vcpu *vcpu)
2838 {
2839         nonpaging_free(vcpu);
2840 }
2841
2842 static bool is_rsvd_bits_set(struct kvm_mmu *mmu, u64 gpte, int level)
2843 {
2844         int bit7;
2845
2846         bit7 = (gpte >> 7) & 1;
2847         return (gpte & mmu->rsvd_bits_mask[bit7][level-1]) != 0;
2848 }
2849
2850 #define PTTYPE 64
2851 #include "paging_tmpl.h"
2852 #undef PTTYPE
2853
2854 #define PTTYPE 32
2855 #include "paging_tmpl.h"
2856 #undef PTTYPE
2857
2858 static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu,
2859                                   struct kvm_mmu *context,
2860                                   int level)
2861 {
2862         int maxphyaddr = cpuid_maxphyaddr(vcpu);
2863         u64 exb_bit_rsvd = 0;
2864
2865         if (!context->nx)
2866                 exb_bit_rsvd = rsvd_bits(63, 63);
2867         switch (level) {
2868         case PT32_ROOT_LEVEL:
2869                 /* no rsvd bits for 2 level 4K page table entries */
2870                 context->rsvd_bits_mask[0][1] = 0;
2871                 context->rsvd_bits_mask[0][0] = 0;
2872                 context->rsvd_bits_mask[1][0] = context->rsvd_bits_mask[0][0];
2873
2874                 if (!is_pse(vcpu)) {
2875                         context->rsvd_bits_mask[1][1] = 0;
2876                         break;
2877                 }
2878
2879                 if (is_cpuid_PSE36())
2880                         /* 36bits PSE 4MB page */
2881                         context->rsvd_bits_mask[1][1] = rsvd_bits(17, 21);
2882                 else
2883                         /* 32 bits PSE 4MB page */
2884                         context->rsvd_bits_mask[1][1] = rsvd_bits(13, 21);
2885                 break;
2886         case PT32E_ROOT_LEVEL:
2887                 context->rsvd_bits_mask[0][2] =
2888                         rsvd_bits(maxphyaddr, 63) |
2889                         rsvd_bits(7, 8) | rsvd_bits(1, 2);      /* PDPTE */
2890                 context->rsvd_bits_mask[0][1] = exb_bit_rsvd |
2891                         rsvd_bits(maxphyaddr, 62);      /* PDE */
2892                 context->rsvd_bits_mask[0][0] = exb_bit_rsvd |
2893                         rsvd_bits(maxphyaddr, 62);      /* PTE */
2894                 context->rsvd_bits_mask[1][1] = exb_bit_rsvd |
2895                         rsvd_bits(maxphyaddr, 62) |
2896                         rsvd_bits(13, 20);              /* large page */
2897                 context->rsvd_bits_mask[1][0] = context->rsvd_bits_mask[0][0];
2898                 break;
2899         case PT64_ROOT_LEVEL:
2900                 context->rsvd_bits_mask[0][3] = exb_bit_rsvd |
2901                         rsvd_bits(maxphyaddr, 51) | rsvd_bits(7, 8);
2902                 context->rsvd_bits_mask[0][2] = exb_bit_rsvd |
2903                         rsvd_bits(maxphyaddr, 51) | rsvd_bits(7, 8);
2904                 context->rsvd_bits_mask[0][1] = exb_bit_rsvd |
2905                         rsvd_bits(maxphyaddr, 51);
2906                 context->rsvd_bits_mask[0][0] = exb_bit_rsvd |
2907                         rsvd_bits(maxphyaddr, 51);
2908                 context->rsvd_bits_mask[1][3] = context->rsvd_bits_mask[0][3];
2909                 context->rsvd_bits_mask[1][2] = exb_bit_rsvd |
2910                         rsvd_bits(maxphyaddr, 51) |
2911                         rsvd_bits(13, 29);
2912                 context->rsvd_bits_mask[1][1] = exb_bit_rsvd |
2913                         rsvd_bits(maxphyaddr, 51) |
2914                         rsvd_bits(13, 20);              /* large page */
2915                 context->rsvd_bits_mask[1][0] = context->rsvd_bits_mask[0][0];
2916                 break;
2917         }
2918 }
2919
2920 static int paging64_init_context_common(struct kvm_vcpu *vcpu,
2921                                         struct kvm_mmu *context,
2922                                         int level)
2923 {
2924         context->nx = is_nx(vcpu);
2925
2926         reset_rsvds_bits_mask(vcpu, context, level);
2927
2928         ASSERT(is_pae(vcpu));
2929         context->new_cr3 = paging_new_cr3;
2930         context->page_fault = paging64_page_fault;
2931         context->gva_to_gpa = paging64_gva_to_gpa;
2932         context->prefetch_page = paging64_prefetch_page;
2933         context->sync_page = paging64_sync_page;
2934         context->invlpg = paging64_invlpg;
2935         context->update_pte = paging64_update_pte;
2936         context->free = paging_free;
2937         context->root_level = level;
2938         context->shadow_root_level = level;
2939         context->root_hpa = INVALID_PAGE;
2940         context->direct_map = false;
2941         return 0;
2942 }
2943
2944 static int paging64_init_context(struct kvm_vcpu *vcpu,
2945                                  struct kvm_mmu *context)
2946 {
2947         return paging64_init_context_common(vcpu, context, PT64_ROOT_LEVEL);
2948 }
2949
2950 static int paging32_init_context(struct kvm_vcpu *vcpu,
2951                                  struct kvm_mmu *context)
2952 {
2953         context->nx = false;
2954
2955         reset_rsvds_bits_mask(vcpu, context, PT32_ROOT_LEVEL);
2956
2957         context->new_cr3 = paging_new_cr3;
2958         context->page_fault = paging32_page_fault;
2959         context->gva_to_gpa = paging32_gva_to_gpa;
2960         context->free = paging_free;
2961         context->prefetch_page = paging32_prefetch_page;
2962         context->sync_page = paging32_sync_page;
2963         context->invlpg = paging32_invlpg;
2964         context->update_pte = paging32_update_pte;
2965         context->root_level = PT32_ROOT_LEVEL;
2966         context->shadow_root_level = PT32E_ROOT_LEVEL;
2967         context->root_hpa = INVALID_PAGE;
2968         context->direct_map = false;
2969         return 0;
2970 }
2971
2972 static int paging32E_init_context(struct kvm_vcpu *vcpu,
2973                                   struct kvm_mmu *context)
2974 {
2975         return paging64_init_context_common(vcpu, context, PT32E_ROOT_LEVEL);
2976 }
2977
2978 static int init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
2979 {
2980         struct kvm_mmu *context = vcpu->arch.walk_mmu;
2981
2982         context->base_role.word = 0;
2983         context->new_cr3 = nonpaging_new_cr3;
2984         context->page_fault = tdp_page_fault;
2985         context->free = nonpaging_free;
2986         context->prefetch_page = nonpaging_prefetch_page;
2987         context->sync_page = nonpaging_sync_page;
2988         context->invlpg = nonpaging_invlpg;
2989         context->update_pte = nonpaging_update_pte;
2990         context->shadow_root_level = kvm_x86_ops->get_tdp_level();
2991         context->root_hpa = INVALID_PAGE;
2992         context->direct_map = true;
2993         context->set_cr3 = kvm_x86_ops->set_tdp_cr3;
2994         context->get_cr3 = get_cr3;
2995         context->inject_page_fault = kvm_inject_page_fault;
2996         context->nx = is_nx(vcpu);
2997
2998         if (!is_paging(vcpu)) {
2999                 context->nx = false;
3000                 context->gva_to_gpa = nonpaging_gva_to_gpa;
3001                 context->root_level = 0;
3002         } else if (is_long_mode(vcpu)) {
3003                 context->nx = is_nx(vcpu);
3004                 reset_rsvds_bits_mask(vcpu, context, PT64_ROOT_LEVEL);
3005                 context->gva_to_gpa = paging64_gva_to_gpa;
3006                 context->root_level = PT64_ROOT_LEVEL;
3007         } else if (is_pae(vcpu)) {
3008                 context->nx = is_nx(vcpu);
3009                 reset_rsvds_bits_mask(vcpu, context, PT32E_ROOT_LEVEL);
3010                 context->gva_to_gpa = paging64_gva_to_gpa;
3011                 context->root_level = PT32E_ROOT_LEVEL;
3012         } else {
3013                 context->nx = false;
3014                 reset_rsvds_bits_mask(vcpu, context, PT32_ROOT_LEVEL);
3015                 context->gva_to_gpa = paging32_gva_to_gpa;
3016                 context->root_level = PT32_ROOT_LEVEL;
3017         }
3018
3019         return 0;
3020 }
3021
3022 int kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context)
3023 {
3024         int r;
3025         ASSERT(vcpu);
3026         ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
3027
3028         if (!is_paging(vcpu))
3029                 r = nonpaging_init_context(vcpu, context);
3030         else if (is_long_mode(vcpu))
3031                 r = paging64_init_context(vcpu, context);
3032         else if (is_pae(vcpu))
3033                 r = paging32E_init_context(vcpu, context);
3034         else
3035                 r = paging32_init_context(vcpu, context);
3036
3037         vcpu->arch.mmu.base_role.cr4_pae = !!is_pae(vcpu);
3038         vcpu->arch.mmu.base_role.cr0_wp  = is_write_protection(vcpu);
3039
3040         return r;
3041 }
3042 EXPORT_SYMBOL_GPL(kvm_init_shadow_mmu);
3043
3044 static int init_kvm_softmmu(struct kvm_vcpu *vcpu)
3045 {
3046         int r = kvm_init_shadow_mmu(vcpu, vcpu->arch.walk_mmu);
3047
3048         vcpu->arch.walk_mmu->set_cr3           = kvm_x86_ops->set_cr3;
3049         vcpu->arch.walk_mmu->get_cr3           = get_cr3;
3050         vcpu->arch.walk_mmu->inject_page_fault = kvm_inject_page_fault;
3051
3052         return r;
3053 }
3054
3055 static int init_kvm_nested_mmu(struct kvm_vcpu *vcpu)
3056 {
3057         struct kvm_mmu *g_context = &vcpu->arch.nested_mmu;
3058
3059         g_context->get_cr3           = get_cr3;
3060         g_context->inject_page_fault = kvm_inject_page_fault;
3061
3062         /*
3063          * Note that arch.mmu.gva_to_gpa translates l2_gva to l1_gpa. The
3064          * translation of l2_gpa to l1_gpa addresses is done using the
3065          * arch.nested_mmu.gva_to_gpa function. Basically the gva_to_gpa
3066          * functions between mmu and nested_mmu are swapped.
3067          */
3068         if (!is_paging(vcpu)) {
3069                 g_context->nx = false;
3070                 g_context->root_level = 0;
3071                 g_context->gva_to_gpa = nonpaging_gva_to_gpa_nested;
3072         } else if (is_long_mode(vcpu)) {
3073                 g_context->nx = is_nx(vcpu);
3074                 reset_rsvds_bits_mask(vcpu, g_context, PT64_ROOT_LEVEL);
3075                 g_context->root_level = PT64_ROOT_LEVEL;
3076                 g_context->gva_to_gpa = paging64_gva_to_gpa_nested;
3077         } else if (is_pae(vcpu)) {
3078                 g_context->nx = is_nx(vcpu);
3079                 reset_rsvds_bits_mask(vcpu, g_context, PT32E_ROOT_LEVEL);
3080                 g_context->root_level = PT32E_ROOT_LEVEL;
3081                 g_context->gva_to_gpa = paging64_gva_to_gpa_nested;
3082         } else {
3083                 g_context->nx = false;
3084                 reset_rsvds_bits_mask(vcpu, g_context, PT32_ROOT_LEVEL);
3085                 g_context->root_level = PT32_ROOT_LEVEL;
3086                 g_context->gva_to_gpa = paging32_gva_to_gpa_nested;
3087         }
3088
3089         return 0;
3090 }
3091
3092 static int init_kvm_mmu(struct kvm_vcpu *vcpu)
3093 {
3094         if (mmu_is_nested(vcpu))
3095                 return init_kvm_nested_mmu(vcpu);
3096         else if (tdp_enabled)
3097                 return init_kvm_tdp_mmu(vcpu);
3098         else
3099                 return init_kvm_softmmu(vcpu);
3100 }
3101
3102 static void destroy_kvm_mmu(struct kvm_vcpu *vcpu)
3103 {
3104         ASSERT(vcpu);
3105         if (VALID_PAGE(vcpu->arch.mmu.root_hpa))
3106                 /* mmu.free() should set root_hpa = INVALID_PAGE */
3107                 vcpu->arch.mmu.free(vcpu);
3108 }
3109
3110 int kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
3111 {
3112         destroy_kvm_mmu(vcpu);
3113         return init_kvm_mmu(vcpu);
3114 }
3115 EXPORT_SYMBOL_GPL(kvm_mmu_reset_context);
3116
3117 int kvm_mmu_load(struct kvm_vcpu *vcpu)
3118 {
3119         int r;
3120
3121         r = mmu_topup_memory_caches(vcpu);
3122         if (r)
3123                 goto out;
3124         r = mmu_alloc_roots(vcpu);
3125         spin_lock(&vcpu->kvm->mmu_lock);
3126         mmu_sync_roots(vcpu);
3127         spin_unlock(&vcpu->kvm->mmu_lock);
3128         if (r)
3129                 goto out;
3130         /* set_cr3() should ensure TLB has been flushed */
3131         vcpu->arch.mmu.set_cr3(vcpu, vcpu->arch.mmu.root_hpa);
3132 out:
3133         return r;
3134 }
3135 EXPORT_SYMBOL_GPL(kvm_mmu_load);
3136
3137 void kvm_mmu_unload(struct kvm_vcpu *vcpu)
3138 {
3139         mmu_free_roots(vcpu);
3140 }
3141 EXPORT_SYMBOL_GPL(kvm_mmu_unload);
3142
3143 static void mmu_pte_write_zap_pte(struct kvm_vcpu *vcpu,
3144                                   struct kvm_mmu_page *sp,
3145                                   u64 *spte)
3146 {
3147         u64 pte;
3148         struct kvm_mmu_page *child;
3149
3150         pte = *spte;
3151         if (is_shadow_present_pte(pte)) {
3152                 if (is_last_spte(pte, sp->role.level))
3153                         drop_spte(vcpu->kvm, spte, shadow_trap_nonpresent_pte);
3154                 else {
3155                         child = page_header(pte & PT64_BASE_ADDR_MASK);
3156                         mmu_page_remove_parent_pte(child, spte);
3157                 }
3158         }
3159         __set_spte(spte, shadow_trap_nonpresent_pte);
3160         if (is_large_pte(pte))
3161                 --vcpu->kvm->stat.lpages;
3162 }
3163
3164 static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu,
3165                                   struct kvm_mmu_page *sp, u64 *spte,
3166                                   const void *new)
3167 {
3168         if (sp->role.level != PT_PAGE_TABLE_LEVEL) {
3169                 ++vcpu->kvm->stat.mmu_pde_zapped;
3170                 return;
3171         }
3172
3173         ++vcpu->kvm->stat.mmu_pte_updated;
3174         vcpu->arch.mmu.update_pte(vcpu, sp, spte, new);
3175 }
3176
3177 static bool need_remote_flush(u64 old, u64 new)
3178 {
3179         if (!is_shadow_present_pte(old))
3180                 return false;
3181         if (!is_shadow_present_pte(new))
3182                 return true;
3183         if ((old ^ new) & PT64_BASE_ADDR_MASK)
3184                 return true;
3185         old ^= PT64_NX_MASK;
3186         new ^= PT64_NX_MASK;
3187         return (old & ~new & PT64_PERM_MASK) != 0;
3188 }
3189
3190 static void mmu_pte_write_flush_tlb(struct kvm_vcpu *vcpu, bool zap_page,
3191                                     bool remote_flush, bool local_flush)
3192 {
3193         if (zap_page)
3194                 return;
3195
3196         if (remote_flush)
3197                 kvm_flush_remote_tlbs(vcpu->kvm);
3198         else if (local_flush)
3199                 kvm_mmu_flush_tlb(vcpu);
3200 }
3201
3202 static bool last_updated_pte_accessed(struct kvm_vcpu *vcpu)
3203 {
3204         u64 *spte = vcpu->arch.last_pte_updated;
3205
3206         return !!(spte && (*spte & shadow_accessed_mask));
3207 }
3208
3209 static void kvm_mmu_access_page(struct kvm_vcpu *vcpu, gfn_t gfn)
3210 {
3211         u64 *spte = vcpu->arch.last_pte_updated;
3212
3213         if (spte
3214             && vcpu->arch.last_pte_gfn == gfn
3215             && shadow_accessed_mask
3216             && !(*spte & shadow_accessed_mask)
3217             && is_shadow_present_pte(*spte))
3218                 set_bit(PT_ACCESSED_SHIFT, (unsigned long *)spte);
3219 }
3220
3221 void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
3222                        const u8 *new, int bytes,
3223                        bool guest_initiated)
3224 {
3225         gfn_t gfn = gpa >> PAGE_SHIFT;
3226         union kvm_mmu_page_role mask = { .word = 0 };
3227         struct kvm_mmu_page *sp;
3228         struct hlist_node *node;
3229         LIST_HEAD(invalid_list);
3230         u64 entry, gentry, *spte;
3231         unsigned pte_size, page_offset, misaligned, quadrant, offset;
3232         int level, npte, invlpg_counter, r, flooded = 0;
3233         bool remote_flush, local_flush, zap_page;
3234
3235         zap_page = remote_flush = local_flush = false;
3236         offset = offset_in_page(gpa);
3237
3238         pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
3239
3240         invlpg_counter = atomic_read(&vcpu->kvm->arch.invlpg_counter);
3241
3242         /*
3243          * Assume that the pte write on a page table of the same type
3244          * as the current vcpu paging mode since we update the sptes only
3245          * when they have the same mode.
3246          */
3247         if ((is_pae(vcpu) && bytes == 4) || !new) {
3248                 /* Handle a 32-bit guest writing two halves of a 64-bit gpte */
3249                 if (is_pae(vcpu)) {
3250                         gpa &= ~(gpa_t)7;
3251                         bytes = 8;
3252                 }
3253                 r = kvm_read_guest(vcpu->kvm, gpa, &gentry, min(bytes, 8));
3254                 if (r)
3255                         gentry = 0;
3256                 new = (const u8 *)&gentry;
3257         }
3258
3259         switch (bytes) {
3260         case 4:
3261                 gentry = *(const u32 *)new;
3262                 break;
3263         case 8:
3264                 gentry = *(const u64 *)new;
3265                 break;
3266         default:
3267                 gentry = 0;
3268                 break;
3269         }
3270
3271         spin_lock(&vcpu->kvm->mmu_lock);
3272         if (atomic_read(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter)
3273                 gentry = 0;
3274         kvm_mmu_free_some_pages(vcpu);
3275         ++vcpu->kvm->stat.mmu_pte_write;
3276         trace_kvm_mmu_audit(vcpu, AUDIT_PRE_PTE_WRITE);
3277         if (guest_initiated) {
3278                 kvm_mmu_access_page(vcpu, gfn);
3279                 if (gfn == vcpu->arch.last_pt_write_gfn
3280                     && !last_updated_pte_accessed(vcpu)) {
3281                         ++vcpu->arch.last_pt_write_count;
3282                         if (vcpu->arch.last_pt_write_count >= 3)
3283                                 flooded = 1;
3284                 } else {
3285                         vcpu->arch.last_pt_write_gfn = gfn;
3286                         vcpu->arch.last_pt_write_count = 1;
3287                         vcpu->arch.last_pte_updated = NULL;
3288                 }
3289         }
3290
3291         mask.cr0_wp = mask.cr4_pae = mask.nxe = 1;
3292         for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn, node) {
3293                 pte_size = sp->role.cr4_pae ? 8 : 4;
3294                 misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1);
3295                 misaligned |= bytes < 4;
3296                 if (misaligned || flooded) {
3297                         /*
3298                          * Misaligned accesses are too much trouble to fix
3299                          * up; also, they usually indicate a page is not used
3300                          * as a page table.
3301                          *
3302                          * If we're seeing too many writes to a page,
3303                          * it may no longer be a page table, or we may be
3304                          * forking, in which case it is better to unmap the
3305                          * page.
3306                          */
3307                         pgprintk("misaligned: gpa %llx bytes %d role %x\n",
3308                                  gpa, bytes, sp->role.word);
3309                         zap_page |= !!kvm_mmu_prepare_zap_page(vcpu->kvm, sp,
3310                                                      &invalid_list);
3311                         ++vcpu->kvm->stat.mmu_flooded;
3312                         continue;
3313                 }
3314                 page_offset = offset;
3315                 level = sp->role.level;
3316                 npte = 1;
3317                 if (!sp->role.cr4_pae) {
3318                         page_offset <<= 1;      /* 32->64 */
3319                         /*
3320                          * A 32-bit pde maps 4MB while the shadow pdes map
3321                          * only 2MB.  So we need to double the offset again
3322                          * and zap two pdes instead of one.
3323                          */
3324                         if (level == PT32_ROOT_LEVEL) {
3325                                 page_offset &= ~7; /* kill rounding error */
3326                                 page_offset <<= 1;
3327                                 npte = 2;
3328                         }
3329                         quadrant = page_offset >> PAGE_SHIFT;
3330                         page_offset &= ~PAGE_MASK;
3331                         if (quadrant != sp->role.quadrant)
3332                                 continue;
3333                 }
3334                 local_flush = true;
3335                 spte = &sp->spt[page_offset / sizeof(*spte)];
3336                 while (npte--) {
3337                         entry = *spte;
3338                         mmu_pte_write_zap_pte(vcpu, sp, spte);
3339                         if (gentry &&
3340                               !((sp->role.word ^ vcpu->arch.mmu.base_role.word)
3341                               & mask.word))
3342                                 mmu_pte_write_new_pte(vcpu, sp, spte, &gentry);
3343                         if (!remote_flush && need_remote_flush(entry, *spte))
3344                                 remote_flush = true;
3345                         ++spte;
3346                 }
3347         }
3348         mmu_pte_write_flush_tlb(vcpu, zap_page, remote_flush, local_flush);
3349         kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
3350         trace_kvm_mmu_audit(vcpu, AUDIT_POST_PTE_WRITE);
3351         spin_unlock(&vcpu->kvm->mmu_lock);
3352 }
3353
3354 int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
3355 {
3356         gpa_t gpa;
3357         int r;
3358
3359         if (vcpu->arch.mmu.direct_map)
3360                 return 0;
3361
3362         gpa = kvm_mmu_gva_to_gpa_read(vcpu, gva, NULL);
3363
3364         spin_lock(&vcpu->kvm->mmu_lock);
3365         r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);
3366         spin_unlock(&vcpu->kvm->mmu_lock);
3367         return r;
3368 }
3369 EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page_virt);
3370
3371 void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
3372 {
3373         LIST_HEAD(invalid_list);
3374
3375         while (kvm_mmu_available_pages(vcpu->kvm) < KVM_REFILL_PAGES &&
3376                !list_empty(&vcpu->kvm->arch.active_mmu_pages)) {
3377                 struct kvm_mmu_page *sp;
3378
3379                 sp = container_of(vcpu->kvm->arch.active_mmu_pages.prev,
3380                                   struct kvm_mmu_page, link);
3381                 kvm_mmu_prepare_zap_page(vcpu->kvm, sp, &invalid_list);
3382                 kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
3383                 ++vcpu->kvm->stat.mmu_recycled;
3384         }
3385 }
3386
3387 int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code,
3388                        void *insn, int insn_len)
3389 {
3390         int r;
3391         enum emulation_result er;
3392
3393         r = vcpu->arch.mmu.page_fault(vcpu, cr2, error_code, false);
3394         if (r < 0)
3395                 goto out;
3396
3397         if (!r) {
3398                 r = 1;
3399                 goto out;
3400         }
3401
3402         r = mmu_topup_memory_caches(vcpu);
3403         if (r)
3404                 goto out;
3405
3406         er = x86_emulate_instruction(vcpu, cr2, 0, insn, insn_len);
3407
3408         switch (er) {
3409         case EMULATE_DONE:
3410                 return 1;
3411         case EMULATE_DO_MMIO:
3412                 ++vcpu->stat.mmio_exits;
3413                 /* fall through */
3414         case EMULATE_FAIL:
3415                 return 0;
3416         default:
3417                 BUG();
3418         }
3419 out:
3420         return r;
3421 }
3422 EXPORT_SYMBOL_GPL(kvm_mmu_page_fault);
3423
3424 void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
3425 {
3426         vcpu->arch.mmu.invlpg(vcpu, gva);
3427         kvm_mmu_flush_tlb(vcpu);
3428         ++vcpu->stat.invlpg;
3429 }
3430 EXPORT_SYMBOL_GPL(kvm_mmu_invlpg);
3431
3432 void kvm_enable_tdp(void)
3433 {
3434         tdp_enabled = true;
3435 }
3436 EXPORT_SYMBOL_GPL(kvm_enable_tdp);
3437
3438 void kvm_disable_tdp(void)
3439 {
3440         tdp_enabled = false;
3441 }
3442 EXPORT_SYMBOL_GPL(kvm_disable_tdp);
3443
3444 static void free_mmu_pages(struct kvm_vcpu *vcpu)
3445 {
3446         free_page((unsigned long)vcpu->arch.mmu.pae_root);
3447         if (vcpu->arch.mmu.lm_root != NULL)
3448                 free_page((unsigned long)vcpu->arch.mmu.lm_root);
3449 }
3450
3451 static int alloc_mmu_pages(struct kvm_vcpu *vcpu)
3452 {
3453         struct page *page;
3454         int i;
3455
3456         ASSERT(vcpu);
3457
3458         /*
3459          * When emulating 32-bit mode, cr3 is only 32 bits even on x86_64.
3460          * Therefore we need to allocate shadow page tables in the first
3461          * 4GB of memory, which happens to fit the DMA32 zone.
3462          */
3463         page = alloc_page(GFP_KERNEL | __GFP_DMA32);
3464         if (!page)
3465                 return -ENOMEM;
3466
3467         vcpu->arch.mmu.pae_root = page_address(page);
3468         for (i = 0; i < 4; ++i)
3469                 vcpu->arch.mmu.pae_root[i] = INVALID_PAGE;
3470
3471         return 0;
3472 }
3473
3474 int kvm_mmu_create(struct kvm_vcpu *vcpu)
3475 {
3476         ASSERT(vcpu);
3477         ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
3478
3479         return alloc_mmu_pages(vcpu);
3480 }
3481
3482 int kvm_mmu_setup(struct kvm_vcpu *vcpu)
3483 {
3484         ASSERT(vcpu);
3485         ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
3486
3487         return init_kvm_mmu(vcpu);
3488 }
3489
3490 void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
3491 {
3492         struct kvm_mmu_page *sp;
3493
3494         list_for_each_entry(sp, &kvm->arch.active_mmu_pages, link) {
3495                 int i;
3496                 u64 *pt;
3497
3498                 if (!test_bit(slot, sp->slot_bitmap))
3499                         continue;
3500
3501                 pt = sp->spt;
3502                 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
3503                         if (!is_shadow_present_pte(pt[i]) ||
3504                               !is_last_spte(pt[i], sp->role.level))
3505                                 continue;
3506
3507                         if (is_large_pte(pt[i])) {
3508                                 drop_spte(kvm, &pt[i],
3509                                           shadow_trap_nonpresent_pte);
3510                                 --kvm->stat.lpages;
3511                                 continue;
3512                         }
3513
3514                         /* avoid RMW */
3515                         if (is_writable_pte(pt[i]))
3516                                 update_spte(&pt[i], pt[i] & ~PT_WRITABLE_MASK);
3517                 }
3518         }
3519         kvm_flush_remote_tlbs(kvm);
3520 }
3521
3522 void kvm_mmu_zap_all(struct kvm *kvm)
3523 {
3524         struct kvm_mmu_page *sp, *node;
3525         LIST_HEAD(invalid_list);
3526
3527         spin_lock(&kvm->mmu_lock);
3528 restart:
3529         list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link)
3530                 if (kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list))
3531                         goto restart;
3532
3533         kvm_mmu_commit_zap_page(kvm, &invalid_list);
3534         spin_unlock(&kvm->mmu_lock);
3535 }
3536
3537 static int kvm_mmu_remove_some_alloc_mmu_pages(struct kvm *kvm,
3538                                                struct list_head *invalid_list)
3539 {
3540         struct kvm_mmu_page *page;
3541
3542         page = container_of(kvm->arch.active_mmu_pages.prev,
3543                             struct kvm_mmu_page, link);
3544         return kvm_mmu_prepare_zap_page(kvm, page, invalid_list);
3545 }
3546
3547 static int mmu_shrink(struct shrinker *shrink, struct shrink_control *sc)
3548 {
3549         struct kvm *kvm;
3550         struct kvm *kvm_freed = NULL;
3551         int nr_to_scan = sc->nr_to_scan;
3552
3553         if (nr_to_scan == 0)
3554                 goto out;
3555
3556         raw_spin_lock(&kvm_lock);
3557
3558         list_for_each_entry(kvm, &vm_list, vm_list) {
3559                 int idx, freed_pages;
3560                 LIST_HEAD(invalid_list);
3561
3562                 idx = srcu_read_lock(&kvm->srcu);
3563                 spin_lock(&kvm->mmu_lock);
3564                 if (!kvm_freed && nr_to_scan > 0 &&
3565                     kvm->arch.n_used_mmu_pages > 0) {
3566                         freed_pages = kvm_mmu_remove_some_alloc_mmu_pages(kvm,
3567                                                           &invalid_list);
3568                         kvm_freed = kvm;
3569                 }
3570                 nr_to_scan--;
3571
3572                 kvm_mmu_commit_zap_page(kvm, &invalid_list);
3573                 spin_unlock(&kvm->mmu_lock);
3574                 srcu_read_unlock(&kvm->srcu, idx);
3575         }
3576         if (kvm_freed)
3577                 list_move_tail(&kvm_freed->vm_list, &vm_list);
3578
3579         raw_spin_unlock(&kvm_lock);
3580
3581 out:
3582         return percpu_counter_read_positive(&kvm_total_used_mmu_pages);
3583 }
3584
3585 static struct shrinker mmu_shrinker = {
3586         .shrink = mmu_shrink,
3587         .seeks = DEFAULT_SEEKS * 10,
3588 };
3589
3590 static void mmu_destroy_caches(void)
3591 {
3592         if (pte_chain_cache)
3593                 kmem_cache_destroy(pte_chain_cache);
3594         if (rmap_desc_cache)
3595                 kmem_cache_destroy(rmap_desc_cache);
3596         if (mmu_page_header_cache)
3597                 kmem_cache_destroy(mmu_page_header_cache);
3598 }
3599
3600 int kvm_mmu_module_init(void)
3601 {
3602         pte_chain_cache = kmem_cache_create("kvm_pte_chain",
3603                                             sizeof(struct kvm_pte_chain),
3604                                             0, 0, NULL);
3605         if (!pte_chain_cache)
3606                 goto nomem;
3607         rmap_desc_cache = kmem_cache_create("kvm_rmap_desc",
3608                                             sizeof(struct kvm_rmap_desc),
3609                                             0, 0, NULL);
3610         if (!rmap_desc_cache)
3611                 goto nomem;
3612
3613         mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header",
3614                                                   sizeof(struct kvm_mmu_page),
3615                                                   0, 0, NULL);
3616         if (!mmu_page_header_cache)
3617                 goto nomem;
3618
3619         if (percpu_counter_init(&kvm_total_used_mmu_pages, 0))
3620                 goto nomem;
3621
3622         register_shrinker(&mmu_shrinker);
3623
3624         return 0;
3625
3626 nomem:
3627         mmu_destroy_caches();
3628         return -ENOMEM;
3629 }
3630
3631 /*
3632  * Caculate mmu pages needed for kvm.
3633  */
3634 unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm)
3635 {
3636         int i;
3637         unsigned int nr_mmu_pages;
3638         unsigned int  nr_pages = 0;
3639         struct kvm_memslots *slots;
3640
3641         slots = kvm_memslots(kvm);
3642
3643         for (i = 0; i < slots->nmemslots; i++)
3644                 nr_pages += slots->memslots[i].npages;
3645
3646         nr_mmu_pages = nr_pages * KVM_PERMILLE_MMU_PAGES / 1000;
3647         nr_mmu_pages = max(nr_mmu_pages,
3648                         (unsigned int) KVM_MIN_ALLOC_MMU_PAGES);
3649
3650         return nr_mmu_pages;
3651 }
3652
3653 static void *pv_mmu_peek_buffer(struct kvm_pv_mmu_op_buffer *buffer,
3654                                 unsigned len)
3655 {
3656         if (len > buffer->len)
3657                 return NULL;
3658         return buffer->ptr;
3659 }
3660
3661 static void *pv_mmu_read_buffer(struct kvm_pv_mmu_op_buffer *buffer,
3662                                 unsigned len)
3663 {
3664         void *ret;
3665
3666         ret = pv_mmu_peek_buffer(buffer, len);
3667         if (!ret)
3668                 return ret;
3669         buffer->ptr += len;
3670         buffer->len -= len;
3671         buffer->processed += len;
3672         return ret;
3673 }
3674
3675 static int kvm_pv_mmu_write(struct kvm_vcpu *vcpu,
3676                              gpa_t addr, gpa_t value)
3677 {
3678         int bytes = 8;
3679         int r;
3680
3681         if (!is_long_mode(vcpu) && !is_pae(vcpu))
3682                 bytes = 4;
3683
3684         r = mmu_topup_memory_caches(vcpu);
3685         if (r)
3686                 return r;
3687
3688         if (!emulator_write_phys(vcpu, addr, &value, bytes))
3689                 return -EFAULT;
3690
3691         return 1;
3692 }
3693
3694 static int kvm_pv_mmu_flush_tlb(struct kvm_vcpu *vcpu)
3695 {
3696         (void)kvm_set_cr3(vcpu, kvm_read_cr3(vcpu));
3697         return 1;
3698 }
3699
3700 static int kvm_pv_mmu_release_pt(struct kvm_vcpu *vcpu, gpa_t addr)
3701 {
3702         spin_lock(&vcpu->kvm->mmu_lock);
3703         mmu_unshadow(vcpu->kvm, addr >> PAGE_SHIFT);
3704         spin_unlock(&vcpu->kvm->mmu_lock);
3705         return 1;
3706 }
3707
3708 static int kvm_pv_mmu_op_one(struct kvm_vcpu *vcpu,
3709                              struct kvm_pv_mmu_op_buffer *buffer)
3710 {
3711         struct kvm_mmu_op_header *header;
3712
3713         header = pv_mmu_peek_buffer(buffer, sizeof *header);
3714         if (!header)
3715                 return 0;
3716         switch (header->op) {
3717         case KVM_MMU_OP_WRITE_PTE: {
3718                 struct kvm_mmu_op_write_pte *wpte;
3719
3720                 wpte = pv_mmu_read_buffer(buffer, sizeof *wpte);
3721                 if (!wpte)
3722                         return 0;
3723                 return kvm_pv_mmu_write(vcpu, wpte->pte_phys,
3724                                         wpte->pte_val);
3725         }
3726         case KVM_MMU_OP_FLUSH_TLB: {
3727                 struct kvm_mmu_op_flush_tlb *ftlb;
3728
3729                 ftlb = pv_mmu_read_buffer(buffer, sizeof *ftlb);
3730                 if (!ftlb)
3731                         return 0;
3732                 return kvm_pv_mmu_flush_tlb(vcpu);
3733         }
3734         case KVM_MMU_OP_RELEASE_PT: {
3735                 struct kvm_mmu_op_release_pt *rpt;
3736
3737                 rpt = pv_mmu_read_buffer(buffer, sizeof *rpt);
3738                 if (!rpt)
3739                         return 0;
3740                 return kvm_pv_mmu_release_pt(vcpu, rpt->pt_phys);
3741         }
3742         default: return 0;
3743         }
3744 }
3745
3746 int kvm_pv_mmu_op(struct kvm_vcpu *vcpu, unsigned long bytes,
3747                   gpa_t addr, unsigned long *ret)
3748 {
3749         int r;
3750         struct kvm_pv_mmu_op_buffer *buffer = &vcpu->arch.mmu_op_buffer;
3751
3752         buffer->ptr = buffer->buf;
3753         buffer->len = min_t(unsigned long, bytes, sizeof buffer->buf);
3754         buffer->processed = 0;
3755
3756         r = kvm_read_guest(vcpu->kvm, addr, buffer->buf, buffer->len);
3757         if (r)
3758                 goto out;
3759
3760         while (buffer->len) {
3761                 r = kvm_pv_mmu_op_one(vcpu, buffer);
3762                 if (r < 0)
3763                         goto out;
3764                 if (r == 0)
3765                         break;
3766         }
3767
3768         r = 1;
3769 out:
3770         *ret = buffer->processed;
3771         return r;
3772 }
3773
3774 int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4])
3775 {
3776         struct kvm_shadow_walk_iterator iterator;
3777         int nr_sptes = 0;
3778
3779         spin_lock(&vcpu->kvm->mmu_lock);
3780         for_each_shadow_entry(vcpu, addr, iterator) {
3781                 sptes[iterator.level-1] = *iterator.sptep;
3782                 nr_sptes++;
3783                 if (!is_shadow_present_pte(*iterator.sptep))
3784                         break;
3785         }
3786         spin_unlock(&vcpu->kvm->mmu_lock);
3787
3788         return nr_sptes;
3789 }
3790 EXPORT_SYMBOL_GPL(kvm_mmu_get_spte_hierarchy);
3791
3792 void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
3793 {
3794         ASSERT(vcpu);
3795
3796         destroy_kvm_mmu(vcpu);
3797         free_mmu_pages(vcpu);
3798         mmu_free_memory_caches(vcpu);
3799 }
3800
3801 #ifdef CONFIG_KVM_MMU_AUDIT
3802 #include "mmu_audit.c"
3803 #else
3804 static void mmu_audit_disable(void) { }
3805 #endif
3806
3807 void kvm_mmu_module_exit(void)
3808 {
3809         mmu_destroy_caches();
3810         percpu_counter_destroy(&kvm_total_used_mmu_pages);
3811         unregister_shrinker(&mmu_shrinker);
3812         mmu_audit_disable();
3813 }