2 * Kernel-based Virtual Machine driver for Linux
4 * This module enables machines with Intel VT-x extensions to run virtual
5 * machines without emulation or binary translation.
9 * Copyright (C) 2006 Qumranet, Inc.
12 * Yaniv Kamay <yaniv@qumranet.com>
13 * Avi Kivity <avi@qumranet.com>
15 * This work is licensed under the terms of the GNU GPL, version 2. See
16 * the COPYING file in the top-level directory.
24 #include <linux/types.h>
25 #include <linux/string.h>
27 #include <linux/highmem.h>
28 #include <linux/module.h>
31 #include <asm/cmpxchg.h>
39 static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg);
41 static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg) {}
46 #define pgprintk(x...) do { if (dbg) printk(x); } while (0)
47 #define rmap_printk(x...) do { if (dbg) printk(x); } while (0)
51 #define pgprintk(x...) do { } while (0)
52 #define rmap_printk(x...) do { } while (0)
56 #if defined(MMU_DEBUG) || defined(AUDIT)
61 #define ASSERT(x) do { } while (0)
65 printk(KERN_WARNING "assertion failed %s:%d: %s\n", \
66 __FILE__, __LINE__, #x); \
70 #define PT64_PT_BITS 9
71 #define PT64_ENT_PER_PAGE (1 << PT64_PT_BITS)
72 #define PT32_PT_BITS 10
73 #define PT32_ENT_PER_PAGE (1 << PT32_PT_BITS)
75 #define PT_WRITABLE_SHIFT 1
77 #define PT_PRESENT_MASK (1ULL << 0)
78 #define PT_WRITABLE_MASK (1ULL << PT_WRITABLE_SHIFT)
79 #define PT_USER_MASK (1ULL << 2)
80 #define PT_PWT_MASK (1ULL << 3)
81 #define PT_PCD_MASK (1ULL << 4)
82 #define PT_ACCESSED_MASK (1ULL << 5)
83 #define PT_DIRTY_MASK (1ULL << 6)
84 #define PT_PAGE_SIZE_MASK (1ULL << 7)
85 #define PT_PAT_MASK (1ULL << 7)
86 #define PT_GLOBAL_MASK (1ULL << 8)
87 #define PT64_NX_MASK (1ULL << 63)
89 #define PT_PAT_SHIFT 7
90 #define PT_DIR_PAT_SHIFT 12
91 #define PT_DIR_PAT_MASK (1ULL << PT_DIR_PAT_SHIFT)
93 #define PT32_DIR_PSE36_SIZE 4
94 #define PT32_DIR_PSE36_SHIFT 13
95 #define PT32_DIR_PSE36_MASK \
96 (((1ULL << PT32_DIR_PSE36_SIZE) - 1) << PT32_DIR_PSE36_SHIFT)
99 #define PT_FIRST_AVAIL_BITS_SHIFT 9
100 #define PT64_SECOND_AVAIL_BITS_SHIFT 52
102 #define PT_SHADOW_IO_MARK (1ULL << PT_FIRST_AVAIL_BITS_SHIFT)
104 #define VALID_PAGE(x) ((x) != INVALID_PAGE)
106 #define PT64_LEVEL_BITS 9
108 #define PT64_LEVEL_SHIFT(level) \
109 (PAGE_SHIFT + (level - 1) * PT64_LEVEL_BITS)
111 #define PT64_LEVEL_MASK(level) \
112 (((1ULL << PT64_LEVEL_BITS) - 1) << PT64_LEVEL_SHIFT(level))
114 #define PT64_INDEX(address, level)\
115 (((address) >> PT64_LEVEL_SHIFT(level)) & ((1 << PT64_LEVEL_BITS) - 1))
118 #define PT32_LEVEL_BITS 10
120 #define PT32_LEVEL_SHIFT(level) \
121 (PAGE_SHIFT + (level - 1) * PT32_LEVEL_BITS)
123 #define PT32_LEVEL_MASK(level) \
124 (((1ULL << PT32_LEVEL_BITS) - 1) << PT32_LEVEL_SHIFT(level))
126 #define PT32_INDEX(address, level)\
127 (((address) >> PT32_LEVEL_SHIFT(level)) & ((1 << PT32_LEVEL_BITS) - 1))
130 #define PT64_BASE_ADDR_MASK (((1ULL << 52) - 1) & ~(u64)(PAGE_SIZE-1))
131 #define PT64_DIR_BASE_ADDR_MASK \
132 (PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + PT64_LEVEL_BITS)) - 1))
134 #define PT32_BASE_ADDR_MASK PAGE_MASK
135 #define PT32_DIR_BASE_ADDR_MASK \
136 (PAGE_MASK & ~((1ULL << (PAGE_SHIFT + PT32_LEVEL_BITS)) - 1))
138 #define PT64_PERM_MASK (PT_PRESENT_MASK | PT_WRITABLE_MASK | PT_USER_MASK \
141 #define PFERR_PRESENT_MASK (1U << 0)
142 #define PFERR_WRITE_MASK (1U << 1)
143 #define PFERR_USER_MASK (1U << 2)
144 #define PFERR_FETCH_MASK (1U << 4)
146 #define PT64_ROOT_LEVEL 4
147 #define PT32_ROOT_LEVEL 2
148 #define PT32E_ROOT_LEVEL 3
150 #define PT_DIRECTORY_LEVEL 2
151 #define PT_PAGE_TABLE_LEVEL 1
155 struct kvm_rmap_desc {
156 u64 *shadow_ptes[RMAP_EXT];
157 struct kvm_rmap_desc *more;
160 static struct kmem_cache *pte_chain_cache;
161 static struct kmem_cache *rmap_desc_cache;
162 static struct kmem_cache *mmu_page_header_cache;
164 static u64 __read_mostly shadow_trap_nonpresent_pte;
165 static u64 __read_mostly shadow_notrap_nonpresent_pte;
167 void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte)
169 shadow_trap_nonpresent_pte = trap_pte;
170 shadow_notrap_nonpresent_pte = notrap_pte;
172 EXPORT_SYMBOL_GPL(kvm_mmu_set_nonpresent_ptes);
174 static int is_write_protection(struct kvm_vcpu *vcpu)
176 return vcpu->cr0 & X86_CR0_WP;
179 static int is_cpuid_PSE36(void)
184 static int is_nx(struct kvm_vcpu *vcpu)
186 return vcpu->shadow_efer & EFER_NX;
189 static int is_present_pte(unsigned long pte)
191 return pte & PT_PRESENT_MASK;
194 static int is_shadow_present_pte(u64 pte)
196 pte &= ~PT_SHADOW_IO_MARK;
197 return pte != shadow_trap_nonpresent_pte
198 && pte != shadow_notrap_nonpresent_pte;
201 static int is_writeble_pte(unsigned long pte)
203 return pte & PT_WRITABLE_MASK;
206 static int is_dirty_pte(unsigned long pte)
208 return pte & PT_DIRTY_MASK;
211 static int is_io_pte(unsigned long pte)
213 return pte & PT_SHADOW_IO_MARK;
216 static int is_rmap_pte(u64 pte)
218 return pte != shadow_trap_nonpresent_pte
219 && pte != shadow_notrap_nonpresent_pte;
222 static gfn_t pse36_gfn_delta(u32 gpte)
224 int shift = 32 - PT32_DIR_PSE36_SHIFT - PAGE_SHIFT;
226 return (gpte & PT32_DIR_PSE36_MASK) << shift;
229 static void set_shadow_pte(u64 *sptep, u64 spte)
232 set_64bit((unsigned long *)sptep, spte);
234 set_64bit((unsigned long long *)sptep, spte);
238 static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
239 struct kmem_cache *base_cache, int min)
243 if (cache->nobjs >= min)
245 while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
246 obj = kmem_cache_zalloc(base_cache, GFP_KERNEL);
249 cache->objects[cache->nobjs++] = obj;
254 static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
257 kfree(mc->objects[--mc->nobjs]);
260 static int mmu_topup_memory_cache_page(struct kvm_mmu_memory_cache *cache,
265 if (cache->nobjs >= min)
267 while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
268 page = alloc_page(GFP_KERNEL);
271 set_page_private(page, 0);
272 cache->objects[cache->nobjs++] = page_address(page);
277 static void mmu_free_memory_cache_page(struct kvm_mmu_memory_cache *mc)
280 free_page((unsigned long)mc->objects[--mc->nobjs]);
283 static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu)
287 kvm_mmu_free_some_pages(vcpu);
288 r = mmu_topup_memory_cache(&vcpu->mmu_pte_chain_cache,
292 r = mmu_topup_memory_cache(&vcpu->mmu_rmap_desc_cache,
296 r = mmu_topup_memory_cache_page(&vcpu->mmu_page_cache, 8);
299 r = mmu_topup_memory_cache(&vcpu->mmu_page_header_cache,
300 mmu_page_header_cache, 4);
305 static void mmu_free_memory_caches(struct kvm_vcpu *vcpu)
307 mmu_free_memory_cache(&vcpu->mmu_pte_chain_cache);
308 mmu_free_memory_cache(&vcpu->mmu_rmap_desc_cache);
309 mmu_free_memory_cache_page(&vcpu->mmu_page_cache);
310 mmu_free_memory_cache(&vcpu->mmu_page_header_cache);
313 static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc,
319 p = mc->objects[--mc->nobjs];
324 static struct kvm_pte_chain *mmu_alloc_pte_chain(struct kvm_vcpu *vcpu)
326 return mmu_memory_cache_alloc(&vcpu->mmu_pte_chain_cache,
327 sizeof(struct kvm_pte_chain));
330 static void mmu_free_pte_chain(struct kvm_pte_chain *pc)
335 static struct kvm_rmap_desc *mmu_alloc_rmap_desc(struct kvm_vcpu *vcpu)
337 return mmu_memory_cache_alloc(&vcpu->mmu_rmap_desc_cache,
338 sizeof(struct kvm_rmap_desc));
341 static void mmu_free_rmap_desc(struct kvm_rmap_desc *rd)
347 * Take gfn and return the reverse mapping to it.
348 * Note: gfn must be unaliased before this function get called
351 static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn)
353 struct kvm_memory_slot *slot;
355 slot = gfn_to_memslot(kvm, gfn);
356 return &slot->rmap[gfn - slot->base_gfn];
360 * Reverse mapping data structures:
362 * If rmapp bit zero is zero, then rmapp point to the shadw page table entry
363 * that points to page_address(page).
365 * If rmapp bit zero is one, (then rmap & ~1) points to a struct kvm_rmap_desc
366 * containing more mappings.
368 static void rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
370 struct kvm_mmu_page *sp;
371 struct kvm_rmap_desc *desc;
372 unsigned long *rmapp;
375 if (!is_rmap_pte(*spte))
377 gfn = unalias_gfn(vcpu->kvm, gfn);
378 sp = page_header(__pa(spte));
379 sp->gfns[spte - sp->spt] = gfn;
380 rmapp = gfn_to_rmap(vcpu->kvm, gfn);
382 rmap_printk("rmap_add: %p %llx 0->1\n", spte, *spte);
383 *rmapp = (unsigned long)spte;
384 } else if (!(*rmapp & 1)) {
385 rmap_printk("rmap_add: %p %llx 1->many\n", spte, *spte);
386 desc = mmu_alloc_rmap_desc(vcpu);
387 desc->shadow_ptes[0] = (u64 *)*rmapp;
388 desc->shadow_ptes[1] = spte;
389 *rmapp = (unsigned long)desc | 1;
391 rmap_printk("rmap_add: %p %llx many->many\n", spte, *spte);
392 desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
393 while (desc->shadow_ptes[RMAP_EXT-1] && desc->more)
395 if (desc->shadow_ptes[RMAP_EXT-1]) {
396 desc->more = mmu_alloc_rmap_desc(vcpu);
399 for (i = 0; desc->shadow_ptes[i]; ++i)
401 desc->shadow_ptes[i] = spte;
405 static void rmap_desc_remove_entry(unsigned long *rmapp,
406 struct kvm_rmap_desc *desc,
408 struct kvm_rmap_desc *prev_desc)
412 for (j = RMAP_EXT - 1; !desc->shadow_ptes[j] && j > i; --j)
414 desc->shadow_ptes[i] = desc->shadow_ptes[j];
415 desc->shadow_ptes[j] = NULL;
418 if (!prev_desc && !desc->more)
419 *rmapp = (unsigned long)desc->shadow_ptes[0];
422 prev_desc->more = desc->more;
424 *rmapp = (unsigned long)desc->more | 1;
425 mmu_free_rmap_desc(desc);
428 static void rmap_remove(struct kvm *kvm, u64 *spte)
430 struct kvm_rmap_desc *desc;
431 struct kvm_rmap_desc *prev_desc;
432 struct kvm_mmu_page *sp;
433 struct page *release_page;
434 unsigned long *rmapp;
437 if (!is_rmap_pte(*spte))
439 sp = page_header(__pa(spte));
440 release_page = pfn_to_page((*spte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT);
441 if (is_writeble_pte(*spte))
442 kvm_release_page_dirty(release_page);
444 kvm_release_page_clean(release_page);
445 rmapp = gfn_to_rmap(kvm, sp->gfns[spte - sp->spt]);
447 printk(KERN_ERR "rmap_remove: %p %llx 0->BUG\n", spte, *spte);
449 } else if (!(*rmapp & 1)) {
450 rmap_printk("rmap_remove: %p %llx 1->0\n", spte, *spte);
451 if ((u64 *)*rmapp != spte) {
452 printk(KERN_ERR "rmap_remove: %p %llx 1->BUG\n",
458 rmap_printk("rmap_remove: %p %llx many->many\n", spte, *spte);
459 desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
462 for (i = 0; i < RMAP_EXT && desc->shadow_ptes[i]; ++i)
463 if (desc->shadow_ptes[i] == spte) {
464 rmap_desc_remove_entry(rmapp,
476 static u64 *rmap_next(struct kvm *kvm, unsigned long *rmapp, u64 *spte)
478 struct kvm_rmap_desc *desc;
479 struct kvm_rmap_desc *prev_desc;
485 else if (!(*rmapp & 1)) {
487 return (u64 *)*rmapp;
490 desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
494 for (i = 0; i < RMAP_EXT && desc->shadow_ptes[i]; ++i) {
495 if (prev_spte == spte)
496 return desc->shadow_ptes[i];
497 prev_spte = desc->shadow_ptes[i];
504 static void rmap_write_protect(struct kvm *kvm, u64 gfn)
506 unsigned long *rmapp;
509 gfn = unalias_gfn(kvm, gfn);
510 rmapp = gfn_to_rmap(kvm, gfn);
512 spte = rmap_next(kvm, rmapp, NULL);
515 BUG_ON(!(*spte & PT_PRESENT_MASK));
516 rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte);
517 if (is_writeble_pte(*spte))
518 set_shadow_pte(spte, *spte & ~PT_WRITABLE_MASK);
519 kvm_flush_remote_tlbs(kvm);
520 spte = rmap_next(kvm, rmapp, spte);
525 static int is_empty_shadow_page(u64 *spt)
530 for (pos = spt, end = pos + PAGE_SIZE / sizeof(u64); pos != end; pos++)
531 if ((*pos & ~PT_SHADOW_IO_MARK) != shadow_trap_nonpresent_pte) {
532 printk(KERN_ERR "%s: %p %llx\n", __FUNCTION__,
540 static void kvm_mmu_free_page(struct kvm *kvm, struct kvm_mmu_page *sp)
542 ASSERT(is_empty_shadow_page(sp->spt));
544 __free_page(virt_to_page(sp->spt));
545 __free_page(virt_to_page(sp->gfns));
547 ++kvm->n_free_mmu_pages;
550 static unsigned kvm_page_table_hashfn(gfn_t gfn)
555 static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu,
558 struct kvm_mmu_page *sp;
560 if (!vcpu->kvm->n_free_mmu_pages)
563 sp = mmu_memory_cache_alloc(&vcpu->mmu_page_header_cache, sizeof *sp);
564 sp->spt = mmu_memory_cache_alloc(&vcpu->mmu_page_cache, PAGE_SIZE);
565 sp->gfns = mmu_memory_cache_alloc(&vcpu->mmu_page_cache, PAGE_SIZE);
566 set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
567 list_add(&sp->link, &vcpu->kvm->active_mmu_pages);
568 ASSERT(is_empty_shadow_page(sp->spt));
571 sp->parent_pte = parent_pte;
572 --vcpu->kvm->n_free_mmu_pages;
576 static void mmu_page_add_parent_pte(struct kvm_vcpu *vcpu,
577 struct kvm_mmu_page *sp, u64 *parent_pte)
579 struct kvm_pte_chain *pte_chain;
580 struct hlist_node *node;
585 if (!sp->multimapped) {
586 u64 *old = sp->parent_pte;
589 sp->parent_pte = parent_pte;
593 pte_chain = mmu_alloc_pte_chain(vcpu);
594 INIT_HLIST_HEAD(&sp->parent_ptes);
595 hlist_add_head(&pte_chain->link, &sp->parent_ptes);
596 pte_chain->parent_ptes[0] = old;
598 hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link) {
599 if (pte_chain->parent_ptes[NR_PTE_CHAIN_ENTRIES-1])
601 for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i)
602 if (!pte_chain->parent_ptes[i]) {
603 pte_chain->parent_ptes[i] = parent_pte;
607 pte_chain = mmu_alloc_pte_chain(vcpu);
609 hlist_add_head(&pte_chain->link, &sp->parent_ptes);
610 pte_chain->parent_ptes[0] = parent_pte;
613 static void mmu_page_remove_parent_pte(struct kvm_mmu_page *sp,
616 struct kvm_pte_chain *pte_chain;
617 struct hlist_node *node;
620 if (!sp->multimapped) {
621 BUG_ON(sp->parent_pte != parent_pte);
622 sp->parent_pte = NULL;
625 hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link)
626 for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i) {
627 if (!pte_chain->parent_ptes[i])
629 if (pte_chain->parent_ptes[i] != parent_pte)
631 while (i + 1 < NR_PTE_CHAIN_ENTRIES
632 && pte_chain->parent_ptes[i + 1]) {
633 pte_chain->parent_ptes[i]
634 = pte_chain->parent_ptes[i + 1];
637 pte_chain->parent_ptes[i] = NULL;
639 hlist_del(&pte_chain->link);
640 mmu_free_pte_chain(pte_chain);
641 if (hlist_empty(&sp->parent_ptes)) {
643 sp->parent_pte = NULL;
651 static struct kvm_mmu_page *kvm_mmu_lookup_page(struct kvm *kvm, gfn_t gfn)
654 struct hlist_head *bucket;
655 struct kvm_mmu_page *sp;
656 struct hlist_node *node;
658 pgprintk("%s: looking for gfn %lx\n", __FUNCTION__, gfn);
659 index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
660 bucket = &kvm->mmu_page_hash[index];
661 hlist_for_each_entry(sp, node, bucket, hash_link)
662 if (sp->gfn == gfn && !sp->role.metaphysical) {
663 pgprintk("%s: found role %x\n",
664 __FUNCTION__, sp->role.word);
670 static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
675 unsigned hugepage_access,
678 union kvm_mmu_page_role role;
681 struct hlist_head *bucket;
682 struct kvm_mmu_page *sp;
683 struct hlist_node *node;
686 role.glevels = vcpu->mmu.root_level;
688 role.metaphysical = metaphysical;
689 role.hugepage_access = hugepage_access;
690 if (vcpu->mmu.root_level <= PT32_ROOT_LEVEL) {
691 quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level));
692 quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1;
693 role.quadrant = quadrant;
695 pgprintk("%s: looking gfn %lx role %x\n", __FUNCTION__,
697 index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
698 bucket = &vcpu->kvm->mmu_page_hash[index];
699 hlist_for_each_entry(sp, node, bucket, hash_link)
700 if (sp->gfn == gfn && sp->role.word == role.word) {
701 mmu_page_add_parent_pte(vcpu, sp, parent_pte);
702 pgprintk("%s: found\n", __FUNCTION__);
705 sp = kvm_mmu_alloc_page(vcpu, parent_pte);
708 pgprintk("%s: adding gfn %lx role %x\n", __FUNCTION__, gfn, role.word);
711 hlist_add_head(&sp->hash_link, bucket);
712 vcpu->mmu.prefetch_page(vcpu, sp);
714 rmap_write_protect(vcpu->kvm, gfn);
718 static void kvm_mmu_page_unlink_children(struct kvm *kvm,
719 struct kvm_mmu_page *sp)
727 if (sp->role.level == PT_PAGE_TABLE_LEVEL) {
728 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
729 if (is_shadow_present_pte(pt[i]))
730 rmap_remove(kvm, &pt[i]);
731 pt[i] = shadow_trap_nonpresent_pte;
733 kvm_flush_remote_tlbs(kvm);
737 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
740 pt[i] = shadow_trap_nonpresent_pte;
741 if (!is_shadow_present_pte(ent))
743 ent &= PT64_BASE_ADDR_MASK;
744 mmu_page_remove_parent_pte(page_header(ent), &pt[i]);
746 kvm_flush_remote_tlbs(kvm);
749 static void kvm_mmu_put_page(struct kvm_mmu_page *sp, u64 *parent_pte)
751 mmu_page_remove_parent_pte(sp, parent_pte);
754 static void kvm_mmu_reset_last_pte_updated(struct kvm *kvm)
758 for (i = 0; i < KVM_MAX_VCPUS; ++i)
760 kvm->vcpus[i]->last_pte_updated = NULL;
763 static void kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp)
767 ++kvm->stat.mmu_shadow_zapped;
768 while (sp->multimapped || sp->parent_pte) {
769 if (!sp->multimapped)
770 parent_pte = sp->parent_pte;
772 struct kvm_pte_chain *chain;
774 chain = container_of(sp->parent_ptes.first,
775 struct kvm_pte_chain, link);
776 parent_pte = chain->parent_ptes[0];
779 kvm_mmu_put_page(sp, parent_pte);
780 set_shadow_pte(parent_pte, shadow_trap_nonpresent_pte);
782 kvm_mmu_page_unlink_children(kvm, sp);
783 if (!sp->root_count) {
784 hlist_del(&sp->hash_link);
785 kvm_mmu_free_page(kvm, sp);
787 list_move(&sp->link, &kvm->active_mmu_pages);
788 kvm_mmu_reset_last_pte_updated(kvm);
792 * Changing the number of mmu pages allocated to the vm
793 * Note: if kvm_nr_mmu_pages is too small, you will get dead lock
795 void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages)
798 * If we set the number of mmu pages to be smaller be than the
799 * number of actived pages , we must to free some mmu pages before we
803 if ((kvm->n_alloc_mmu_pages - kvm->n_free_mmu_pages) >
805 int n_used_mmu_pages = kvm->n_alloc_mmu_pages
806 - kvm->n_free_mmu_pages;
808 while (n_used_mmu_pages > kvm_nr_mmu_pages) {
809 struct kvm_mmu_page *page;
811 page = container_of(kvm->active_mmu_pages.prev,
812 struct kvm_mmu_page, link);
813 kvm_mmu_zap_page(kvm, page);
816 kvm->n_free_mmu_pages = 0;
819 kvm->n_free_mmu_pages += kvm_nr_mmu_pages
820 - kvm->n_alloc_mmu_pages;
822 kvm->n_alloc_mmu_pages = kvm_nr_mmu_pages;
825 static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
828 struct hlist_head *bucket;
829 struct kvm_mmu_page *sp;
830 struct hlist_node *node, *n;
833 pgprintk("%s: looking for gfn %lx\n", __FUNCTION__, gfn);
835 index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
836 bucket = &kvm->mmu_page_hash[index];
837 hlist_for_each_entry_safe(sp, node, n, bucket, hash_link)
838 if (sp->gfn == gfn && !sp->role.metaphysical) {
839 pgprintk("%s: gfn %lx role %x\n", __FUNCTION__, gfn,
841 kvm_mmu_zap_page(kvm, sp);
847 static void mmu_unshadow(struct kvm *kvm, gfn_t gfn)
849 struct kvm_mmu_page *sp;
851 while ((sp = kvm_mmu_lookup_page(kvm, gfn)) != NULL) {
852 pgprintk("%s: zap %lx %x\n", __FUNCTION__, gfn, sp->role.word);
853 kvm_mmu_zap_page(kvm, sp);
857 static void page_header_update_slot(struct kvm *kvm, void *pte, gfn_t gfn)
859 int slot = memslot_id(kvm, gfn_to_memslot(kvm, gfn));
860 struct kvm_mmu_page *sp = page_header(__pa(pte));
862 __set_bit(slot, &sp->slot_bitmap);
865 struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva)
867 gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, gva);
869 if (gpa == UNMAPPED_GVA)
871 return gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
874 static void nonpaging_new_cr3(struct kvm_vcpu *vcpu)
878 static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, struct page *page)
880 int level = PT32E_ROOT_LEVEL;
881 hpa_t table_addr = vcpu->mmu.root_hpa;
884 u32 index = PT64_INDEX(v, level);
888 ASSERT(VALID_PAGE(table_addr));
889 table = __va(table_addr);
895 was_rmapped = is_rmap_pte(pte);
896 if (is_shadow_present_pte(pte) && is_writeble_pte(pte)) {
897 kvm_release_page_clean(page);
900 mark_page_dirty(vcpu->kvm, v >> PAGE_SHIFT);
901 page_header_update_slot(vcpu->kvm, table,
903 table[index] = page_to_phys(page)
904 | PT_PRESENT_MASK | PT_WRITABLE_MASK
907 rmap_add(vcpu, &table[index], v >> PAGE_SHIFT);
909 kvm_release_page_clean(page);
914 if (table[index] == shadow_trap_nonpresent_pte) {
915 struct kvm_mmu_page *new_table;
918 pseudo_gfn = (v & PT64_DIR_BASE_ADDR_MASK)
920 new_table = kvm_mmu_get_page(vcpu, pseudo_gfn,
922 1, 3, &table[index]);
924 pgprintk("nonpaging_map: ENOMEM\n");
925 kvm_release_page_clean(page);
929 table[index] = __pa(new_table->spt) | PT_PRESENT_MASK
930 | PT_WRITABLE_MASK | PT_USER_MASK;
932 table_addr = table[index] & PT64_BASE_ADDR_MASK;
936 static void nonpaging_prefetch_page(struct kvm_vcpu *vcpu,
937 struct kvm_mmu_page *sp)
941 for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
942 sp->spt[i] = shadow_trap_nonpresent_pte;
945 static void mmu_free_roots(struct kvm_vcpu *vcpu)
948 struct kvm_mmu_page *sp;
950 if (!VALID_PAGE(vcpu->mmu.root_hpa))
953 if (vcpu->mmu.shadow_root_level == PT64_ROOT_LEVEL) {
954 hpa_t root = vcpu->mmu.root_hpa;
956 sp = page_header(root);
958 vcpu->mmu.root_hpa = INVALID_PAGE;
962 for (i = 0; i < 4; ++i) {
963 hpa_t root = vcpu->mmu.pae_root[i];
966 root &= PT64_BASE_ADDR_MASK;
967 sp = page_header(root);
970 vcpu->mmu.pae_root[i] = INVALID_PAGE;
972 vcpu->mmu.root_hpa = INVALID_PAGE;
975 static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
979 struct kvm_mmu_page *sp;
981 root_gfn = vcpu->cr3 >> PAGE_SHIFT;
984 if (vcpu->mmu.shadow_root_level == PT64_ROOT_LEVEL) {
985 hpa_t root = vcpu->mmu.root_hpa;
987 ASSERT(!VALID_PAGE(root));
988 sp = kvm_mmu_get_page(vcpu, root_gfn, 0,
989 PT64_ROOT_LEVEL, 0, 0, NULL);
990 root = __pa(sp->spt);
992 vcpu->mmu.root_hpa = root;
996 for (i = 0; i < 4; ++i) {
997 hpa_t root = vcpu->mmu.pae_root[i];
999 ASSERT(!VALID_PAGE(root));
1000 if (vcpu->mmu.root_level == PT32E_ROOT_LEVEL) {
1001 if (!is_present_pte(vcpu->pdptrs[i])) {
1002 vcpu->mmu.pae_root[i] = 0;
1005 root_gfn = vcpu->pdptrs[i] >> PAGE_SHIFT;
1006 } else if (vcpu->mmu.root_level == 0)
1008 sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30,
1009 PT32_ROOT_LEVEL, !is_paging(vcpu),
1011 root = __pa(sp->spt);
1013 vcpu->mmu.pae_root[i] = root | PT_PRESENT_MASK;
1015 vcpu->mmu.root_hpa = __pa(vcpu->mmu.pae_root);
1018 static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr)
1023 static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
1029 r = mmu_topup_memory_caches(vcpu);
1034 ASSERT(VALID_PAGE(vcpu->mmu.root_hpa));
1036 page = gfn_to_page(vcpu->kvm, gva >> PAGE_SHIFT);
1038 if (is_error_page(page)) {
1039 kvm_release_page_clean(page);
1043 return nonpaging_map(vcpu, gva & PAGE_MASK, page);
1046 static void nonpaging_free(struct kvm_vcpu *vcpu)
1048 mmu_free_roots(vcpu);
1051 static int nonpaging_init_context(struct kvm_vcpu *vcpu)
1053 struct kvm_mmu *context = &vcpu->mmu;
1055 context->new_cr3 = nonpaging_new_cr3;
1056 context->page_fault = nonpaging_page_fault;
1057 context->gva_to_gpa = nonpaging_gva_to_gpa;
1058 context->free = nonpaging_free;
1059 context->prefetch_page = nonpaging_prefetch_page;
1060 context->root_level = 0;
1061 context->shadow_root_level = PT32E_ROOT_LEVEL;
1062 context->root_hpa = INVALID_PAGE;
1066 void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu)
1068 ++vcpu->stat.tlb_flush;
1069 kvm_x86_ops->tlb_flush(vcpu);
1072 static void paging_new_cr3(struct kvm_vcpu *vcpu)
1074 pgprintk("%s: cr3 %lx\n", __FUNCTION__, vcpu->cr3);
1075 mmu_free_roots(vcpu);
1078 static void inject_page_fault(struct kvm_vcpu *vcpu,
1082 kvm_x86_ops->inject_page_fault(vcpu, addr, err_code);
1085 static void paging_free(struct kvm_vcpu *vcpu)
1087 nonpaging_free(vcpu);
1091 #include "paging_tmpl.h"
1095 #include "paging_tmpl.h"
1098 static int paging64_init_context_common(struct kvm_vcpu *vcpu, int level)
1100 struct kvm_mmu *context = &vcpu->mmu;
1102 ASSERT(is_pae(vcpu));
1103 context->new_cr3 = paging_new_cr3;
1104 context->page_fault = paging64_page_fault;
1105 context->gva_to_gpa = paging64_gva_to_gpa;
1106 context->prefetch_page = paging64_prefetch_page;
1107 context->free = paging_free;
1108 context->root_level = level;
1109 context->shadow_root_level = level;
1110 context->root_hpa = INVALID_PAGE;
1114 static int paging64_init_context(struct kvm_vcpu *vcpu)
1116 return paging64_init_context_common(vcpu, PT64_ROOT_LEVEL);
1119 static int paging32_init_context(struct kvm_vcpu *vcpu)
1121 struct kvm_mmu *context = &vcpu->mmu;
1123 context->new_cr3 = paging_new_cr3;
1124 context->page_fault = paging32_page_fault;
1125 context->gva_to_gpa = paging32_gva_to_gpa;
1126 context->free = paging_free;
1127 context->prefetch_page = paging32_prefetch_page;
1128 context->root_level = PT32_ROOT_LEVEL;
1129 context->shadow_root_level = PT32E_ROOT_LEVEL;
1130 context->root_hpa = INVALID_PAGE;
1134 static int paging32E_init_context(struct kvm_vcpu *vcpu)
1136 return paging64_init_context_common(vcpu, PT32E_ROOT_LEVEL);
1139 static int init_kvm_mmu(struct kvm_vcpu *vcpu)
1142 ASSERT(!VALID_PAGE(vcpu->mmu.root_hpa));
1144 if (!is_paging(vcpu))
1145 return nonpaging_init_context(vcpu);
1146 else if (is_long_mode(vcpu))
1147 return paging64_init_context(vcpu);
1148 else if (is_pae(vcpu))
1149 return paging32E_init_context(vcpu);
1151 return paging32_init_context(vcpu);
1154 static void destroy_kvm_mmu(struct kvm_vcpu *vcpu)
1157 if (VALID_PAGE(vcpu->mmu.root_hpa)) {
1158 vcpu->mmu.free(vcpu);
1159 vcpu->mmu.root_hpa = INVALID_PAGE;
1163 int kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
1165 destroy_kvm_mmu(vcpu);
1166 return init_kvm_mmu(vcpu);
1168 EXPORT_SYMBOL_GPL(kvm_mmu_reset_context);
1170 int kvm_mmu_load(struct kvm_vcpu *vcpu)
1174 mutex_lock(&vcpu->kvm->lock);
1175 r = mmu_topup_memory_caches(vcpu);
1178 mmu_alloc_roots(vcpu);
1179 kvm_x86_ops->set_cr3(vcpu, vcpu->mmu.root_hpa);
1180 kvm_mmu_flush_tlb(vcpu);
1182 mutex_unlock(&vcpu->kvm->lock);
1185 EXPORT_SYMBOL_GPL(kvm_mmu_load);
1187 void kvm_mmu_unload(struct kvm_vcpu *vcpu)
1189 mmu_free_roots(vcpu);
1192 static void mmu_pte_write_zap_pte(struct kvm_vcpu *vcpu,
1193 struct kvm_mmu_page *sp,
1197 struct kvm_mmu_page *child;
1200 if (is_shadow_present_pte(pte)) {
1201 if (sp->role.level == PT_PAGE_TABLE_LEVEL)
1202 rmap_remove(vcpu->kvm, spte);
1204 child = page_header(pte & PT64_BASE_ADDR_MASK);
1205 mmu_page_remove_parent_pte(child, spte);
1208 set_shadow_pte(spte, shadow_trap_nonpresent_pte);
1211 static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu,
1212 struct kvm_mmu_page *sp,
1214 const void *new, int bytes,
1217 if (sp->role.level != PT_PAGE_TABLE_LEVEL) {
1218 ++vcpu->kvm->stat.mmu_pde_zapped;
1222 ++vcpu->kvm->stat.mmu_pte_updated;
1223 if (sp->role.glevels == PT32_ROOT_LEVEL)
1224 paging32_update_pte(vcpu, sp, spte, new, bytes, offset_in_pte);
1226 paging64_update_pte(vcpu, sp, spte, new, bytes, offset_in_pte);
1229 static bool need_remote_flush(u64 old, u64 new)
1231 if (!is_shadow_present_pte(old))
1233 if (!is_shadow_present_pte(new))
1235 if ((old ^ new) & PT64_BASE_ADDR_MASK)
1237 old ^= PT64_NX_MASK;
1238 new ^= PT64_NX_MASK;
1239 return (old & ~new & PT64_PERM_MASK) != 0;
1242 static void mmu_pte_write_flush_tlb(struct kvm_vcpu *vcpu, u64 old, u64 new)
1244 if (need_remote_flush(old, new))
1245 kvm_flush_remote_tlbs(vcpu->kvm);
1247 kvm_mmu_flush_tlb(vcpu);
1250 static bool last_updated_pte_accessed(struct kvm_vcpu *vcpu)
1252 u64 *spte = vcpu->last_pte_updated;
1254 return !!(spte && (*spte & PT_ACCESSED_MASK));
1257 void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
1258 const u8 *new, int bytes)
1260 gfn_t gfn = gpa >> PAGE_SHIFT;
1261 struct kvm_mmu_page *sp;
1262 struct hlist_node *node, *n;
1263 struct hlist_head *bucket;
1267 unsigned offset = offset_in_page(gpa);
1269 unsigned page_offset;
1270 unsigned misaligned;
1276 pgprintk("%s: gpa %llx bytes %d\n", __FUNCTION__, gpa, bytes);
1277 ++vcpu->kvm->stat.mmu_pte_write;
1278 kvm_mmu_audit(vcpu, "pre pte write");
1279 if (gfn == vcpu->last_pt_write_gfn
1280 && !last_updated_pte_accessed(vcpu)) {
1281 ++vcpu->last_pt_write_count;
1282 if (vcpu->last_pt_write_count >= 3)
1285 vcpu->last_pt_write_gfn = gfn;
1286 vcpu->last_pt_write_count = 1;
1287 vcpu->last_pte_updated = NULL;
1289 index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
1290 bucket = &vcpu->kvm->mmu_page_hash[index];
1291 hlist_for_each_entry_safe(sp, node, n, bucket, hash_link) {
1292 if (sp->gfn != gfn || sp->role.metaphysical)
1294 pte_size = sp->role.glevels == PT32_ROOT_LEVEL ? 4 : 8;
1295 misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1);
1296 misaligned |= bytes < 4;
1297 if (misaligned || flooded) {
1299 * Misaligned accesses are too much trouble to fix
1300 * up; also, they usually indicate a page is not used
1303 * If we're seeing too many writes to a page,
1304 * it may no longer be a page table, or we may be
1305 * forking, in which case it is better to unmap the
1308 pgprintk("misaligned: gpa %llx bytes %d role %x\n",
1309 gpa, bytes, sp->role.word);
1310 kvm_mmu_zap_page(vcpu->kvm, sp);
1311 ++vcpu->kvm->stat.mmu_flooded;
1314 page_offset = offset;
1315 level = sp->role.level;
1317 if (sp->role.glevels == PT32_ROOT_LEVEL) {
1318 page_offset <<= 1; /* 32->64 */
1320 * A 32-bit pde maps 4MB while the shadow pdes map
1321 * only 2MB. So we need to double the offset again
1322 * and zap two pdes instead of one.
1324 if (level == PT32_ROOT_LEVEL) {
1325 page_offset &= ~7; /* kill rounding error */
1329 quadrant = page_offset >> PAGE_SHIFT;
1330 page_offset &= ~PAGE_MASK;
1331 if (quadrant != sp->role.quadrant)
1334 spte = &sp->spt[page_offset / sizeof(*spte)];
1337 mmu_pte_write_zap_pte(vcpu, sp, spte);
1338 mmu_pte_write_new_pte(vcpu, sp, spte, new, bytes,
1339 page_offset & (pte_size - 1));
1340 mmu_pte_write_flush_tlb(vcpu, entry, *spte);
1344 kvm_mmu_audit(vcpu, "post pte write");
1347 int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
1349 gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, gva);
1351 return kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);
1354 void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
1356 while (vcpu->kvm->n_free_mmu_pages < KVM_REFILL_PAGES) {
1357 struct kvm_mmu_page *sp;
1359 sp = container_of(vcpu->kvm->active_mmu_pages.prev,
1360 struct kvm_mmu_page, link);
1361 kvm_mmu_zap_page(vcpu->kvm, sp);
1362 ++vcpu->kvm->stat.mmu_recycled;
1366 int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code)
1369 enum emulation_result er;
1371 mutex_lock(&vcpu->kvm->lock);
1372 r = vcpu->mmu.page_fault(vcpu, cr2, error_code);
1381 r = mmu_topup_memory_caches(vcpu);
1385 er = emulate_instruction(vcpu, vcpu->run, cr2, error_code, 0);
1386 mutex_unlock(&vcpu->kvm->lock);
1391 case EMULATE_DO_MMIO:
1392 ++vcpu->stat.mmio_exits;
1395 kvm_report_emulation_failure(vcpu, "pagetable");
1401 mutex_unlock(&vcpu->kvm->lock);
1404 EXPORT_SYMBOL_GPL(kvm_mmu_page_fault);
1406 static void free_mmu_pages(struct kvm_vcpu *vcpu)
1408 struct kvm_mmu_page *sp;
1410 while (!list_empty(&vcpu->kvm->active_mmu_pages)) {
1411 sp = container_of(vcpu->kvm->active_mmu_pages.next,
1412 struct kvm_mmu_page, link);
1413 kvm_mmu_zap_page(vcpu->kvm, sp);
1415 free_page((unsigned long)vcpu->mmu.pae_root);
1418 static int alloc_mmu_pages(struct kvm_vcpu *vcpu)
1425 if (vcpu->kvm->n_requested_mmu_pages)
1426 vcpu->kvm->n_free_mmu_pages = vcpu->kvm->n_requested_mmu_pages;
1428 vcpu->kvm->n_free_mmu_pages = vcpu->kvm->n_alloc_mmu_pages;
1430 * When emulating 32-bit mode, cr3 is only 32 bits even on x86_64.
1431 * Therefore we need to allocate shadow page tables in the first
1432 * 4GB of memory, which happens to fit the DMA32 zone.
1434 page = alloc_page(GFP_KERNEL | __GFP_DMA32);
1437 vcpu->mmu.pae_root = page_address(page);
1438 for (i = 0; i < 4; ++i)
1439 vcpu->mmu.pae_root[i] = INVALID_PAGE;
1444 free_mmu_pages(vcpu);
1448 int kvm_mmu_create(struct kvm_vcpu *vcpu)
1451 ASSERT(!VALID_PAGE(vcpu->mmu.root_hpa));
1453 return alloc_mmu_pages(vcpu);
1456 int kvm_mmu_setup(struct kvm_vcpu *vcpu)
1459 ASSERT(!VALID_PAGE(vcpu->mmu.root_hpa));
1461 return init_kvm_mmu(vcpu);
1464 void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
1468 destroy_kvm_mmu(vcpu);
1469 free_mmu_pages(vcpu);
1470 mmu_free_memory_caches(vcpu);
1473 void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
1475 struct kvm_mmu_page *sp;
1477 list_for_each_entry(sp, &kvm->active_mmu_pages, link) {
1481 if (!test_bit(slot, &sp->slot_bitmap))
1485 for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
1487 if (pt[i] & PT_WRITABLE_MASK)
1488 pt[i] &= ~PT_WRITABLE_MASK;
1492 void kvm_mmu_zap_all(struct kvm *kvm)
1494 struct kvm_mmu_page *sp, *node;
1496 list_for_each_entry_safe(sp, node, &kvm->active_mmu_pages, link)
1497 kvm_mmu_zap_page(kvm, sp);
1499 kvm_flush_remote_tlbs(kvm);
1502 void kvm_mmu_module_exit(void)
1504 if (pte_chain_cache)
1505 kmem_cache_destroy(pte_chain_cache);
1506 if (rmap_desc_cache)
1507 kmem_cache_destroy(rmap_desc_cache);
1508 if (mmu_page_header_cache)
1509 kmem_cache_destroy(mmu_page_header_cache);
1512 int kvm_mmu_module_init(void)
1514 pte_chain_cache = kmem_cache_create("kvm_pte_chain",
1515 sizeof(struct kvm_pte_chain),
1517 if (!pte_chain_cache)
1519 rmap_desc_cache = kmem_cache_create("kvm_rmap_desc",
1520 sizeof(struct kvm_rmap_desc),
1522 if (!rmap_desc_cache)
1525 mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header",
1526 sizeof(struct kvm_mmu_page),
1528 if (!mmu_page_header_cache)
1534 kvm_mmu_module_exit();
1539 * Caculate mmu pages needed for kvm.
1541 unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm)
1544 unsigned int nr_mmu_pages;
1545 unsigned int nr_pages = 0;
1547 for (i = 0; i < kvm->nmemslots; i++)
1548 nr_pages += kvm->memslots[i].npages;
1550 nr_mmu_pages = nr_pages * KVM_PERMILLE_MMU_PAGES / 1000;
1551 nr_mmu_pages = max(nr_mmu_pages,
1552 (unsigned int) KVM_MIN_ALLOC_MMU_PAGES);
1554 return nr_mmu_pages;
1559 static const char *audit_msg;
1561 static gva_t canonicalize(gva_t gva)
1563 #ifdef CONFIG_X86_64
1564 gva = (long long)(gva << 16) >> 16;
1569 static void audit_mappings_page(struct kvm_vcpu *vcpu, u64 page_pte,
1570 gva_t va, int level)
1572 u64 *pt = __va(page_pte & PT64_BASE_ADDR_MASK);
1574 gva_t va_delta = 1ul << (PAGE_SHIFT + 9 * (level - 1));
1576 for (i = 0; i < PT64_ENT_PER_PAGE; ++i, va += va_delta) {
1579 if (ent == shadow_trap_nonpresent_pte)
1582 va = canonicalize(va);
1584 if (ent == shadow_notrap_nonpresent_pte)
1585 printk(KERN_ERR "audit: (%s) nontrapping pte"
1586 " in nonleaf level: levels %d gva %lx"
1587 " level %d pte %llx\n", audit_msg,
1588 vcpu->mmu.root_level, va, level, ent);
1590 audit_mappings_page(vcpu, ent, va, level - 1);
1592 gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, va);
1593 struct page *page = gpa_to_page(vcpu, gpa);
1594 hpa_t hpa = page_to_phys(page);
1596 if (is_shadow_present_pte(ent)
1597 && (ent & PT64_BASE_ADDR_MASK) != hpa)
1598 printk(KERN_ERR "xx audit error: (%s) levels %d"
1599 " gva %lx gpa %llx hpa %llx ent %llx %d\n",
1600 audit_msg, vcpu->mmu.root_level,
1602 is_shadow_present_pte(ent));
1603 else if (ent == shadow_notrap_nonpresent_pte
1604 && !is_error_hpa(hpa))
1605 printk(KERN_ERR "audit: (%s) notrap shadow,"
1606 " valid guest gva %lx\n", audit_msg, va);
1607 kvm_release_page_clean(page);
1613 static void audit_mappings(struct kvm_vcpu *vcpu)
1617 if (vcpu->mmu.root_level == 4)
1618 audit_mappings_page(vcpu, vcpu->mmu.root_hpa, 0, 4);
1620 for (i = 0; i < 4; ++i)
1621 if (vcpu->mmu.pae_root[i] & PT_PRESENT_MASK)
1622 audit_mappings_page(vcpu,
1623 vcpu->mmu.pae_root[i],
1628 static int count_rmaps(struct kvm_vcpu *vcpu)
1633 for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
1634 struct kvm_memory_slot *m = &vcpu->kvm->memslots[i];
1635 struct kvm_rmap_desc *d;
1637 for (j = 0; j < m->npages; ++j) {
1638 unsigned long *rmapp = &m->rmap[j];
1642 if (!(*rmapp & 1)) {
1646 d = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
1648 for (k = 0; k < RMAP_EXT; ++k)
1649 if (d->shadow_ptes[k])
1660 static int count_writable_mappings(struct kvm_vcpu *vcpu)
1663 struct kvm_mmu_page *sp;
1666 list_for_each_entry(sp, &vcpu->kvm->active_mmu_pages, link) {
1669 if (sp->role.level != PT_PAGE_TABLE_LEVEL)
1672 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
1675 if (!(ent & PT_PRESENT_MASK))
1677 if (!(ent & PT_WRITABLE_MASK))
1685 static void audit_rmap(struct kvm_vcpu *vcpu)
1687 int n_rmap = count_rmaps(vcpu);
1688 int n_actual = count_writable_mappings(vcpu);
1690 if (n_rmap != n_actual)
1691 printk(KERN_ERR "%s: (%s) rmap %d actual %d\n",
1692 __FUNCTION__, audit_msg, n_rmap, n_actual);
1695 static void audit_write_protection(struct kvm_vcpu *vcpu)
1697 struct kvm_mmu_page *sp;
1698 struct kvm_memory_slot *slot;
1699 unsigned long *rmapp;
1702 list_for_each_entry(sp, &vcpu->kvm->active_mmu_pages, link) {
1703 if (sp->role.metaphysical)
1706 slot = gfn_to_memslot(vcpu->kvm, sp->gfn);
1707 gfn = unalias_gfn(vcpu->kvm, sp->gfn);
1708 rmapp = &slot->rmap[gfn - slot->base_gfn];
1710 printk(KERN_ERR "%s: (%s) shadow page has writable"
1711 " mappings: gfn %lx role %x\n",
1712 __FUNCTION__, audit_msg, sp->gfn,
1717 static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg)
1724 audit_write_protection(vcpu);
1725 audit_mappings(vcpu);