2 * Kernel-based Virtual Machine driver for Linux
4 * This module enables machines with Intel VT-x extensions to run virtual
5 * machines without emulation or binary translation.
9 * Copyright (C) 2006 Qumranet, Inc.
12 * Yaniv Kamay <yaniv@qumranet.com>
13 * Avi Kivity <avi@qumranet.com>
15 * This work is licensed under the terms of the GNU GPL, version 2. See
16 * the COPYING file in the top-level directory.
24 #include <linux/types.h>
25 #include <linux/string.h>
27 #include <linux/highmem.h>
28 #include <linux/module.h>
31 #include <asm/cmpxchg.h>
39 static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg);
41 static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg) {}
46 #define pgprintk(x...) do { if (dbg) printk(x); } while (0)
47 #define rmap_printk(x...) do { if (dbg) printk(x); } while (0)
51 #define pgprintk(x...) do { } while (0)
52 #define rmap_printk(x...) do { } while (0)
56 #if defined(MMU_DEBUG) || defined(AUDIT)
61 #define ASSERT(x) do { } while (0)
65 printk(KERN_WARNING "assertion failed %s:%d: %s\n", \
66 __FILE__, __LINE__, #x); \
70 #define PT64_PT_BITS 9
71 #define PT64_ENT_PER_PAGE (1 << PT64_PT_BITS)
72 #define PT32_PT_BITS 10
73 #define PT32_ENT_PER_PAGE (1 << PT32_PT_BITS)
75 #define PT_WRITABLE_SHIFT 1
77 #define PT_PRESENT_MASK (1ULL << 0)
78 #define PT_WRITABLE_MASK (1ULL << PT_WRITABLE_SHIFT)
79 #define PT_USER_MASK (1ULL << 2)
80 #define PT_PWT_MASK (1ULL << 3)
81 #define PT_PCD_MASK (1ULL << 4)
82 #define PT_ACCESSED_MASK (1ULL << 5)
83 #define PT_DIRTY_MASK (1ULL << 6)
84 #define PT_PAGE_SIZE_MASK (1ULL << 7)
85 #define PT_PAT_MASK (1ULL << 7)
86 #define PT_GLOBAL_MASK (1ULL << 8)
87 #define PT64_NX_MASK (1ULL << 63)
89 #define PT_PAT_SHIFT 7
90 #define PT_DIR_PAT_SHIFT 12
91 #define PT_DIR_PAT_MASK (1ULL << PT_DIR_PAT_SHIFT)
93 #define PT32_DIR_PSE36_SIZE 4
94 #define PT32_DIR_PSE36_SHIFT 13
95 #define PT32_DIR_PSE36_MASK \
96 (((1ULL << PT32_DIR_PSE36_SIZE) - 1) << PT32_DIR_PSE36_SHIFT)
99 #define PT_FIRST_AVAIL_BITS_SHIFT 9
100 #define PT64_SECOND_AVAIL_BITS_SHIFT 52
102 #define PT_SHADOW_IO_MARK (1ULL << PT_FIRST_AVAIL_BITS_SHIFT)
104 #define VALID_PAGE(x) ((x) != INVALID_PAGE)
106 #define PT64_LEVEL_BITS 9
108 #define PT64_LEVEL_SHIFT(level) \
109 (PAGE_SHIFT + (level - 1) * PT64_LEVEL_BITS)
111 #define PT64_LEVEL_MASK(level) \
112 (((1ULL << PT64_LEVEL_BITS) - 1) << PT64_LEVEL_SHIFT(level))
114 #define PT64_INDEX(address, level)\
115 (((address) >> PT64_LEVEL_SHIFT(level)) & ((1 << PT64_LEVEL_BITS) - 1))
118 #define PT32_LEVEL_BITS 10
120 #define PT32_LEVEL_SHIFT(level) \
121 (PAGE_SHIFT + (level - 1) * PT32_LEVEL_BITS)
123 #define PT32_LEVEL_MASK(level) \
124 (((1ULL << PT32_LEVEL_BITS) - 1) << PT32_LEVEL_SHIFT(level))
126 #define PT32_INDEX(address, level)\
127 (((address) >> PT32_LEVEL_SHIFT(level)) & ((1 << PT32_LEVEL_BITS) - 1))
130 #define PT64_BASE_ADDR_MASK (((1ULL << 52) - 1) & ~(u64)(PAGE_SIZE-1))
131 #define PT64_DIR_BASE_ADDR_MASK \
132 (PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + PT64_LEVEL_BITS)) - 1))
134 #define PT32_BASE_ADDR_MASK PAGE_MASK
135 #define PT32_DIR_BASE_ADDR_MASK \
136 (PAGE_MASK & ~((1ULL << (PAGE_SHIFT + PT32_LEVEL_BITS)) - 1))
138 #define PT64_PERM_MASK (PT_PRESENT_MASK | PT_WRITABLE_MASK | PT_USER_MASK \
141 #define PFERR_PRESENT_MASK (1U << 0)
142 #define PFERR_WRITE_MASK (1U << 1)
143 #define PFERR_USER_MASK (1U << 2)
144 #define PFERR_FETCH_MASK (1U << 4)
146 #define PT64_ROOT_LEVEL 4
147 #define PT32_ROOT_LEVEL 2
148 #define PT32E_ROOT_LEVEL 3
150 #define PT_DIRECTORY_LEVEL 2
151 #define PT_PAGE_TABLE_LEVEL 1
155 struct kvm_rmap_desc {
156 u64 *shadow_ptes[RMAP_EXT];
157 struct kvm_rmap_desc *more;
160 static struct kmem_cache *pte_chain_cache;
161 static struct kmem_cache *rmap_desc_cache;
162 static struct kmem_cache *mmu_page_header_cache;
164 static u64 __read_mostly shadow_trap_nonpresent_pte;
165 static u64 __read_mostly shadow_notrap_nonpresent_pte;
167 void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte)
169 shadow_trap_nonpresent_pte = trap_pte;
170 shadow_notrap_nonpresent_pte = notrap_pte;
172 EXPORT_SYMBOL_GPL(kvm_mmu_set_nonpresent_ptes);
174 static int is_write_protection(struct kvm_vcpu *vcpu)
176 return vcpu->cr0 & X86_CR0_WP;
179 static int is_cpuid_PSE36(void)
184 static int is_nx(struct kvm_vcpu *vcpu)
186 return vcpu->shadow_efer & EFER_NX;
189 static int is_present_pte(unsigned long pte)
191 return pte & PT_PRESENT_MASK;
194 static int is_shadow_present_pte(u64 pte)
196 pte &= ~PT_SHADOW_IO_MARK;
197 return pte != shadow_trap_nonpresent_pte
198 && pte != shadow_notrap_nonpresent_pte;
201 static int is_writeble_pte(unsigned long pte)
203 return pte & PT_WRITABLE_MASK;
206 static int is_dirty_pte(unsigned long pte)
208 return pte & PT_DIRTY_MASK;
211 static int is_io_pte(unsigned long pte)
213 return pte & PT_SHADOW_IO_MARK;
216 static int is_rmap_pte(u64 pte)
218 return pte != shadow_trap_nonpresent_pte
219 && pte != shadow_notrap_nonpresent_pte;
222 static gfn_t pse36_gfn_delta(u32 gpte)
224 int shift = 32 - PT32_DIR_PSE36_SHIFT - PAGE_SHIFT;
226 return (gpte & PT32_DIR_PSE36_MASK) << shift;
229 static void set_shadow_pte(u64 *sptep, u64 spte)
232 set_64bit((unsigned long *)sptep, spte);
234 set_64bit((unsigned long long *)sptep, spte);
238 static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
239 struct kmem_cache *base_cache, int min)
243 if (cache->nobjs >= min)
245 while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
246 obj = kmem_cache_zalloc(base_cache, GFP_KERNEL);
249 cache->objects[cache->nobjs++] = obj;
254 static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
257 kfree(mc->objects[--mc->nobjs]);
260 static int mmu_topup_memory_cache_page(struct kvm_mmu_memory_cache *cache,
265 if (cache->nobjs >= min)
267 while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
268 page = alloc_page(GFP_KERNEL);
271 set_page_private(page, 0);
272 cache->objects[cache->nobjs++] = page_address(page);
277 static void mmu_free_memory_cache_page(struct kvm_mmu_memory_cache *mc)
280 free_page((unsigned long)mc->objects[--mc->nobjs]);
283 static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu)
287 kvm_mmu_free_some_pages(vcpu);
288 r = mmu_topup_memory_cache(&vcpu->mmu_pte_chain_cache,
292 r = mmu_topup_memory_cache(&vcpu->mmu_rmap_desc_cache,
296 r = mmu_topup_memory_cache_page(&vcpu->mmu_page_cache, 8);
299 r = mmu_topup_memory_cache(&vcpu->mmu_page_header_cache,
300 mmu_page_header_cache, 4);
305 static void mmu_free_memory_caches(struct kvm_vcpu *vcpu)
307 mmu_free_memory_cache(&vcpu->mmu_pte_chain_cache);
308 mmu_free_memory_cache(&vcpu->mmu_rmap_desc_cache);
309 mmu_free_memory_cache_page(&vcpu->mmu_page_cache);
310 mmu_free_memory_cache(&vcpu->mmu_page_header_cache);
313 static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc,
319 p = mc->objects[--mc->nobjs];
324 static struct kvm_pte_chain *mmu_alloc_pte_chain(struct kvm_vcpu *vcpu)
326 return mmu_memory_cache_alloc(&vcpu->mmu_pte_chain_cache,
327 sizeof(struct kvm_pte_chain));
330 static void mmu_free_pte_chain(struct kvm_pte_chain *pc)
335 static struct kvm_rmap_desc *mmu_alloc_rmap_desc(struct kvm_vcpu *vcpu)
337 return mmu_memory_cache_alloc(&vcpu->mmu_rmap_desc_cache,
338 sizeof(struct kvm_rmap_desc));
341 static void mmu_free_rmap_desc(struct kvm_rmap_desc *rd)
347 * Take gfn and return the reverse mapping to it.
348 * Note: gfn must be unaliased before this function get called
351 static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn)
353 struct kvm_memory_slot *slot;
355 slot = gfn_to_memslot(kvm, gfn);
356 return &slot->rmap[gfn - slot->base_gfn];
360 * Reverse mapping data structures:
362 * If rmapp bit zero is zero, then rmapp point to the shadw page table entry
363 * that points to page_address(page).
365 * If rmapp bit zero is one, (then rmap & ~1) points to a struct kvm_rmap_desc
366 * containing more mappings.
368 static void rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
370 struct kvm_mmu_page *page;
371 struct kvm_rmap_desc *desc;
372 unsigned long *rmapp;
375 if (!is_rmap_pte(*spte))
377 gfn = unalias_gfn(vcpu->kvm, gfn);
378 page = page_header(__pa(spte));
379 page->gfns[spte - page->spt] = gfn;
380 rmapp = gfn_to_rmap(vcpu->kvm, gfn);
382 rmap_printk("rmap_add: %p %llx 0->1\n", spte, *spte);
383 *rmapp = (unsigned long)spte;
384 } else if (!(*rmapp & 1)) {
385 rmap_printk("rmap_add: %p %llx 1->many\n", spte, *spte);
386 desc = mmu_alloc_rmap_desc(vcpu);
387 desc->shadow_ptes[0] = (u64 *)*rmapp;
388 desc->shadow_ptes[1] = spte;
389 *rmapp = (unsigned long)desc | 1;
391 rmap_printk("rmap_add: %p %llx many->many\n", spte, *spte);
392 desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
393 while (desc->shadow_ptes[RMAP_EXT-1] && desc->more)
395 if (desc->shadow_ptes[RMAP_EXT-1]) {
396 desc->more = mmu_alloc_rmap_desc(vcpu);
399 for (i = 0; desc->shadow_ptes[i]; ++i)
401 desc->shadow_ptes[i] = spte;
405 static void rmap_desc_remove_entry(unsigned long *rmapp,
406 struct kvm_rmap_desc *desc,
408 struct kvm_rmap_desc *prev_desc)
412 for (j = RMAP_EXT - 1; !desc->shadow_ptes[j] && j > i; --j)
414 desc->shadow_ptes[i] = desc->shadow_ptes[j];
415 desc->shadow_ptes[j] = NULL;
418 if (!prev_desc && !desc->more)
419 *rmapp = (unsigned long)desc->shadow_ptes[0];
422 prev_desc->more = desc->more;
424 *rmapp = (unsigned long)desc->more | 1;
425 mmu_free_rmap_desc(desc);
428 static void rmap_remove(struct kvm *kvm, u64 *spte)
430 struct kvm_rmap_desc *desc;
431 struct kvm_rmap_desc *prev_desc;
432 struct kvm_mmu_page *page;
433 struct page *release_page;
434 unsigned long *rmapp;
437 if (!is_rmap_pte(*spte))
439 page = page_header(__pa(spte));
440 release_page = pfn_to_page((*spte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT);
441 if (is_writeble_pte(*spte))
442 kvm_release_page_dirty(release_page);
444 kvm_release_page_clean(release_page);
445 rmapp = gfn_to_rmap(kvm, page->gfns[spte - page->spt]);
447 printk(KERN_ERR "rmap_remove: %p %llx 0->BUG\n", spte, *spte);
449 } else if (!(*rmapp & 1)) {
450 rmap_printk("rmap_remove: %p %llx 1->0\n", spte, *spte);
451 if ((u64 *)*rmapp != spte) {
452 printk(KERN_ERR "rmap_remove: %p %llx 1->BUG\n",
458 rmap_printk("rmap_remove: %p %llx many->many\n", spte, *spte);
459 desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
462 for (i = 0; i < RMAP_EXT && desc->shadow_ptes[i]; ++i)
463 if (desc->shadow_ptes[i] == spte) {
464 rmap_desc_remove_entry(rmapp,
476 static u64 *rmap_next(struct kvm *kvm, unsigned long *rmapp, u64 *spte)
478 struct kvm_rmap_desc *desc;
479 struct kvm_rmap_desc *prev_desc;
485 else if (!(*rmapp & 1)) {
487 return (u64 *)*rmapp;
490 desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
494 for (i = 0; i < RMAP_EXT && desc->shadow_ptes[i]; ++i) {
495 if (prev_spte == spte)
496 return desc->shadow_ptes[i];
497 prev_spte = desc->shadow_ptes[i];
504 static void rmap_write_protect(struct kvm *kvm, u64 gfn)
506 unsigned long *rmapp;
509 gfn = unalias_gfn(kvm, gfn);
510 rmapp = gfn_to_rmap(kvm, gfn);
512 spte = rmap_next(kvm, rmapp, NULL);
515 BUG_ON(!(*spte & PT_PRESENT_MASK));
516 rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte);
517 if (is_writeble_pte(*spte))
518 set_shadow_pte(spte, *spte & ~PT_WRITABLE_MASK);
519 kvm_flush_remote_tlbs(kvm);
520 spte = rmap_next(kvm, rmapp, spte);
525 static int is_empty_shadow_page(u64 *spt)
530 for (pos = spt, end = pos + PAGE_SIZE / sizeof(u64); pos != end; pos++)
531 if ((*pos & ~PT_SHADOW_IO_MARK) != shadow_trap_nonpresent_pte) {
532 printk(KERN_ERR "%s: %p %llx\n", __FUNCTION__,
540 static void kvm_mmu_free_page(struct kvm *kvm,
541 struct kvm_mmu_page *page_head)
543 ASSERT(is_empty_shadow_page(page_head->spt));
544 list_del(&page_head->link);
545 __free_page(virt_to_page(page_head->spt));
546 __free_page(virt_to_page(page_head->gfns));
548 ++kvm->n_free_mmu_pages;
551 static unsigned kvm_page_table_hashfn(gfn_t gfn)
556 static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu,
559 struct kvm_mmu_page *page;
561 if (!vcpu->kvm->n_free_mmu_pages)
564 page = mmu_memory_cache_alloc(&vcpu->mmu_page_header_cache,
566 page->spt = mmu_memory_cache_alloc(&vcpu->mmu_page_cache, PAGE_SIZE);
567 page->gfns = mmu_memory_cache_alloc(&vcpu->mmu_page_cache, PAGE_SIZE);
568 set_page_private(virt_to_page(page->spt), (unsigned long)page);
569 list_add(&page->link, &vcpu->kvm->active_mmu_pages);
570 ASSERT(is_empty_shadow_page(page->spt));
571 page->slot_bitmap = 0;
572 page->multimapped = 0;
573 page->parent_pte = parent_pte;
574 --vcpu->kvm->n_free_mmu_pages;
578 static void mmu_page_add_parent_pte(struct kvm_vcpu *vcpu,
579 struct kvm_mmu_page *page, u64 *parent_pte)
581 struct kvm_pte_chain *pte_chain;
582 struct hlist_node *node;
587 if (!page->multimapped) {
588 u64 *old = page->parent_pte;
591 page->parent_pte = parent_pte;
594 page->multimapped = 1;
595 pte_chain = mmu_alloc_pte_chain(vcpu);
596 INIT_HLIST_HEAD(&page->parent_ptes);
597 hlist_add_head(&pte_chain->link, &page->parent_ptes);
598 pte_chain->parent_ptes[0] = old;
600 hlist_for_each_entry(pte_chain, node, &page->parent_ptes, link) {
601 if (pte_chain->parent_ptes[NR_PTE_CHAIN_ENTRIES-1])
603 for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i)
604 if (!pte_chain->parent_ptes[i]) {
605 pte_chain->parent_ptes[i] = parent_pte;
609 pte_chain = mmu_alloc_pte_chain(vcpu);
611 hlist_add_head(&pte_chain->link, &page->parent_ptes);
612 pte_chain->parent_ptes[0] = parent_pte;
615 static void mmu_page_remove_parent_pte(struct kvm_mmu_page *page,
618 struct kvm_pte_chain *pte_chain;
619 struct hlist_node *node;
622 if (!page->multimapped) {
623 BUG_ON(page->parent_pte != parent_pte);
624 page->parent_pte = NULL;
627 hlist_for_each_entry(pte_chain, node, &page->parent_ptes, link)
628 for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i) {
629 if (!pte_chain->parent_ptes[i])
631 if (pte_chain->parent_ptes[i] != parent_pte)
633 while (i + 1 < NR_PTE_CHAIN_ENTRIES
634 && pte_chain->parent_ptes[i + 1]) {
635 pte_chain->parent_ptes[i]
636 = pte_chain->parent_ptes[i + 1];
639 pte_chain->parent_ptes[i] = NULL;
641 hlist_del(&pte_chain->link);
642 mmu_free_pte_chain(pte_chain);
643 if (hlist_empty(&page->parent_ptes)) {
644 page->multimapped = 0;
645 page->parent_pte = NULL;
653 static struct kvm_mmu_page *kvm_mmu_lookup_page(struct kvm *kvm,
657 struct hlist_head *bucket;
658 struct kvm_mmu_page *page;
659 struct hlist_node *node;
661 pgprintk("%s: looking for gfn %lx\n", __FUNCTION__, gfn);
662 index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
663 bucket = &kvm->mmu_page_hash[index];
664 hlist_for_each_entry(page, node, bucket, hash_link)
665 if (page->gfn == gfn && !page->role.metaphysical) {
666 pgprintk("%s: found role %x\n",
667 __FUNCTION__, page->role.word);
673 static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
678 unsigned hugepage_access,
681 union kvm_mmu_page_role role;
684 struct hlist_head *bucket;
685 struct kvm_mmu_page *page;
686 struct hlist_node *node;
689 role.glevels = vcpu->mmu.root_level;
691 role.metaphysical = metaphysical;
692 role.hugepage_access = hugepage_access;
693 if (vcpu->mmu.root_level <= PT32_ROOT_LEVEL) {
694 quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level));
695 quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1;
696 role.quadrant = quadrant;
698 pgprintk("%s: looking gfn %lx role %x\n", __FUNCTION__,
700 index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
701 bucket = &vcpu->kvm->mmu_page_hash[index];
702 hlist_for_each_entry(page, node, bucket, hash_link)
703 if (page->gfn == gfn && page->role.word == role.word) {
704 mmu_page_add_parent_pte(vcpu, page, parent_pte);
705 pgprintk("%s: found\n", __FUNCTION__);
708 page = kvm_mmu_alloc_page(vcpu, parent_pte);
711 pgprintk("%s: adding gfn %lx role %x\n", __FUNCTION__, gfn, role.word);
714 hlist_add_head(&page->hash_link, bucket);
715 vcpu->mmu.prefetch_page(vcpu, page);
717 rmap_write_protect(vcpu->kvm, gfn);
721 static void kvm_mmu_page_unlink_children(struct kvm *kvm,
722 struct kvm_mmu_page *page)
730 if (page->role.level == PT_PAGE_TABLE_LEVEL) {
731 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
732 if (is_shadow_present_pte(pt[i]))
733 rmap_remove(kvm, &pt[i]);
734 pt[i] = shadow_trap_nonpresent_pte;
736 kvm_flush_remote_tlbs(kvm);
740 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
743 pt[i] = shadow_trap_nonpresent_pte;
744 if (!is_shadow_present_pte(ent))
746 ent &= PT64_BASE_ADDR_MASK;
747 mmu_page_remove_parent_pte(page_header(ent), &pt[i]);
749 kvm_flush_remote_tlbs(kvm);
752 static void kvm_mmu_put_page(struct kvm_mmu_page *page,
755 mmu_page_remove_parent_pte(page, parent_pte);
758 static void kvm_mmu_reset_last_pte_updated(struct kvm *kvm)
762 for (i = 0; i < KVM_MAX_VCPUS; ++i)
764 kvm->vcpus[i]->last_pte_updated = NULL;
767 static void kvm_mmu_zap_page(struct kvm *kvm,
768 struct kvm_mmu_page *page)
772 ++kvm->stat.mmu_shadow_zapped;
773 while (page->multimapped || page->parent_pte) {
774 if (!page->multimapped)
775 parent_pte = page->parent_pte;
777 struct kvm_pte_chain *chain;
779 chain = container_of(page->parent_ptes.first,
780 struct kvm_pte_chain, link);
781 parent_pte = chain->parent_ptes[0];
784 kvm_mmu_put_page(page, parent_pte);
785 set_shadow_pte(parent_pte, shadow_trap_nonpresent_pte);
787 kvm_mmu_page_unlink_children(kvm, page);
788 if (!page->root_count) {
789 hlist_del(&page->hash_link);
790 kvm_mmu_free_page(kvm, page);
792 list_move(&page->link, &kvm->active_mmu_pages);
793 kvm_mmu_reset_last_pte_updated(kvm);
797 * Changing the number of mmu pages allocated to the vm
798 * Note: if kvm_nr_mmu_pages is too small, you will get dead lock
800 void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages)
803 * If we set the number of mmu pages to be smaller be than the
804 * number of actived pages , we must to free some mmu pages before we
808 if ((kvm->n_alloc_mmu_pages - kvm->n_free_mmu_pages) >
810 int n_used_mmu_pages = kvm->n_alloc_mmu_pages
811 - kvm->n_free_mmu_pages;
813 while (n_used_mmu_pages > kvm_nr_mmu_pages) {
814 struct kvm_mmu_page *page;
816 page = container_of(kvm->active_mmu_pages.prev,
817 struct kvm_mmu_page, link);
818 kvm_mmu_zap_page(kvm, page);
821 kvm->n_free_mmu_pages = 0;
824 kvm->n_free_mmu_pages += kvm_nr_mmu_pages
825 - kvm->n_alloc_mmu_pages;
827 kvm->n_alloc_mmu_pages = kvm_nr_mmu_pages;
830 static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
833 struct hlist_head *bucket;
834 struct kvm_mmu_page *page;
835 struct hlist_node *node, *n;
838 pgprintk("%s: looking for gfn %lx\n", __FUNCTION__, gfn);
840 index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
841 bucket = &kvm->mmu_page_hash[index];
842 hlist_for_each_entry_safe(page, node, n, bucket, hash_link)
843 if (page->gfn == gfn && !page->role.metaphysical) {
844 pgprintk("%s: gfn %lx role %x\n", __FUNCTION__, gfn,
846 kvm_mmu_zap_page(kvm, page);
852 static void mmu_unshadow(struct kvm *kvm, gfn_t gfn)
854 struct kvm_mmu_page *page;
856 while ((page = kvm_mmu_lookup_page(kvm, gfn)) != NULL) {
857 pgprintk("%s: zap %lx %x\n",
858 __FUNCTION__, gfn, page->role.word);
859 kvm_mmu_zap_page(kvm, page);
863 static void page_header_update_slot(struct kvm *kvm, void *pte, gfn_t gfn)
865 int slot = memslot_id(kvm, gfn_to_memslot(kvm, gfn));
866 struct kvm_mmu_page *page_head = page_header(__pa(pte));
868 __set_bit(slot, &page_head->slot_bitmap);
871 hpa_t gpa_to_hpa(struct kvm *kvm, gpa_t gpa)
876 ASSERT((gpa & HPA_ERR_MASK) == 0);
877 page = gfn_to_page(kvm, gpa >> PAGE_SHIFT);
878 hpa = ((hpa_t)page_to_pfn(page) << PAGE_SHIFT) | (gpa & (PAGE_SIZE-1));
879 if (is_error_page(page))
880 return hpa | HPA_ERR_MASK;
884 hpa_t gva_to_hpa(struct kvm_vcpu *vcpu, gva_t gva)
886 gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, gva);
888 if (gpa == UNMAPPED_GVA)
890 return gpa_to_hpa(vcpu->kvm, gpa);
893 struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva)
895 gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, gva);
897 if (gpa == UNMAPPED_GVA)
899 return pfn_to_page(gpa_to_hpa(vcpu->kvm, gpa) >> PAGE_SHIFT);
902 static void nonpaging_new_cr3(struct kvm_vcpu *vcpu)
906 static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, hpa_t p)
908 int level = PT32E_ROOT_LEVEL;
909 hpa_t table_addr = vcpu->mmu.root_hpa;
912 page = pfn_to_page(p >> PAGE_SHIFT);
914 u32 index = PT64_INDEX(v, level);
918 ASSERT(VALID_PAGE(table_addr));
919 table = __va(table_addr);
925 was_rmapped = is_rmap_pte(pte);
926 if (is_shadow_present_pte(pte) && is_writeble_pte(pte)) {
927 kvm_release_page_clean(page);
930 mark_page_dirty(vcpu->kvm, v >> PAGE_SHIFT);
931 page_header_update_slot(vcpu->kvm, table,
933 table[index] = p | PT_PRESENT_MASK | PT_WRITABLE_MASK |
936 rmap_add(vcpu, &table[index], v >> PAGE_SHIFT);
938 kvm_release_page_clean(page);
943 if (table[index] == shadow_trap_nonpresent_pte) {
944 struct kvm_mmu_page *new_table;
947 pseudo_gfn = (v & PT64_DIR_BASE_ADDR_MASK)
949 new_table = kvm_mmu_get_page(vcpu, pseudo_gfn,
951 1, 3, &table[index]);
953 pgprintk("nonpaging_map: ENOMEM\n");
954 kvm_release_page_clean(page);
958 table[index] = __pa(new_table->spt) | PT_PRESENT_MASK
959 | PT_WRITABLE_MASK | PT_USER_MASK;
961 table_addr = table[index] & PT64_BASE_ADDR_MASK;
965 static void nonpaging_prefetch_page(struct kvm_vcpu *vcpu,
966 struct kvm_mmu_page *sp)
970 for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
971 sp->spt[i] = shadow_trap_nonpresent_pte;
974 static void mmu_free_roots(struct kvm_vcpu *vcpu)
977 struct kvm_mmu_page *page;
979 if (!VALID_PAGE(vcpu->mmu.root_hpa))
982 if (vcpu->mmu.shadow_root_level == PT64_ROOT_LEVEL) {
983 hpa_t root = vcpu->mmu.root_hpa;
985 page = page_header(root);
987 vcpu->mmu.root_hpa = INVALID_PAGE;
991 for (i = 0; i < 4; ++i) {
992 hpa_t root = vcpu->mmu.pae_root[i];
995 root &= PT64_BASE_ADDR_MASK;
996 page = page_header(root);
999 vcpu->mmu.pae_root[i] = INVALID_PAGE;
1001 vcpu->mmu.root_hpa = INVALID_PAGE;
1004 static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
1008 struct kvm_mmu_page *page;
1010 root_gfn = vcpu->cr3 >> PAGE_SHIFT;
1012 #ifdef CONFIG_X86_64
1013 if (vcpu->mmu.shadow_root_level == PT64_ROOT_LEVEL) {
1014 hpa_t root = vcpu->mmu.root_hpa;
1016 ASSERT(!VALID_PAGE(root));
1017 page = kvm_mmu_get_page(vcpu, root_gfn, 0,
1018 PT64_ROOT_LEVEL, 0, 0, NULL);
1019 root = __pa(page->spt);
1021 vcpu->mmu.root_hpa = root;
1025 for (i = 0; i < 4; ++i) {
1026 hpa_t root = vcpu->mmu.pae_root[i];
1028 ASSERT(!VALID_PAGE(root));
1029 if (vcpu->mmu.root_level == PT32E_ROOT_LEVEL) {
1030 if (!is_present_pte(vcpu->pdptrs[i])) {
1031 vcpu->mmu.pae_root[i] = 0;
1034 root_gfn = vcpu->pdptrs[i] >> PAGE_SHIFT;
1035 } else if (vcpu->mmu.root_level == 0)
1037 page = kvm_mmu_get_page(vcpu, root_gfn, i << 30,
1038 PT32_ROOT_LEVEL, !is_paging(vcpu),
1040 root = __pa(page->spt);
1042 vcpu->mmu.pae_root[i] = root | PT_PRESENT_MASK;
1044 vcpu->mmu.root_hpa = __pa(vcpu->mmu.pae_root);
1047 static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr)
1052 static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
1059 r = mmu_topup_memory_caches(vcpu);
1064 ASSERT(VALID_PAGE(vcpu->mmu.root_hpa));
1067 paddr = gpa_to_hpa(vcpu->kvm, addr & PT64_BASE_ADDR_MASK);
1069 if (is_error_hpa(paddr)) {
1070 kvm_release_page_clean(pfn_to_page((paddr & PT64_BASE_ADDR_MASK)
1075 return nonpaging_map(vcpu, addr & PAGE_MASK, paddr);
1078 static void nonpaging_free(struct kvm_vcpu *vcpu)
1080 mmu_free_roots(vcpu);
1083 static int nonpaging_init_context(struct kvm_vcpu *vcpu)
1085 struct kvm_mmu *context = &vcpu->mmu;
1087 context->new_cr3 = nonpaging_new_cr3;
1088 context->page_fault = nonpaging_page_fault;
1089 context->gva_to_gpa = nonpaging_gva_to_gpa;
1090 context->free = nonpaging_free;
1091 context->prefetch_page = nonpaging_prefetch_page;
1092 context->root_level = 0;
1093 context->shadow_root_level = PT32E_ROOT_LEVEL;
1094 context->root_hpa = INVALID_PAGE;
1098 void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu)
1100 ++vcpu->stat.tlb_flush;
1101 kvm_x86_ops->tlb_flush(vcpu);
1104 static void paging_new_cr3(struct kvm_vcpu *vcpu)
1106 pgprintk("%s: cr3 %lx\n", __FUNCTION__, vcpu->cr3);
1107 mmu_free_roots(vcpu);
1110 static void inject_page_fault(struct kvm_vcpu *vcpu,
1114 kvm_x86_ops->inject_page_fault(vcpu, addr, err_code);
1117 static void paging_free(struct kvm_vcpu *vcpu)
1119 nonpaging_free(vcpu);
1123 #include "paging_tmpl.h"
1127 #include "paging_tmpl.h"
1130 static int paging64_init_context_common(struct kvm_vcpu *vcpu, int level)
1132 struct kvm_mmu *context = &vcpu->mmu;
1134 ASSERT(is_pae(vcpu));
1135 context->new_cr3 = paging_new_cr3;
1136 context->page_fault = paging64_page_fault;
1137 context->gva_to_gpa = paging64_gva_to_gpa;
1138 context->prefetch_page = paging64_prefetch_page;
1139 context->free = paging_free;
1140 context->root_level = level;
1141 context->shadow_root_level = level;
1142 context->root_hpa = INVALID_PAGE;
1146 static int paging64_init_context(struct kvm_vcpu *vcpu)
1148 return paging64_init_context_common(vcpu, PT64_ROOT_LEVEL);
1151 static int paging32_init_context(struct kvm_vcpu *vcpu)
1153 struct kvm_mmu *context = &vcpu->mmu;
1155 context->new_cr3 = paging_new_cr3;
1156 context->page_fault = paging32_page_fault;
1157 context->gva_to_gpa = paging32_gva_to_gpa;
1158 context->free = paging_free;
1159 context->prefetch_page = paging32_prefetch_page;
1160 context->root_level = PT32_ROOT_LEVEL;
1161 context->shadow_root_level = PT32E_ROOT_LEVEL;
1162 context->root_hpa = INVALID_PAGE;
1166 static int paging32E_init_context(struct kvm_vcpu *vcpu)
1168 return paging64_init_context_common(vcpu, PT32E_ROOT_LEVEL);
1171 static int init_kvm_mmu(struct kvm_vcpu *vcpu)
1174 ASSERT(!VALID_PAGE(vcpu->mmu.root_hpa));
1176 if (!is_paging(vcpu))
1177 return nonpaging_init_context(vcpu);
1178 else if (is_long_mode(vcpu))
1179 return paging64_init_context(vcpu);
1180 else if (is_pae(vcpu))
1181 return paging32E_init_context(vcpu);
1183 return paging32_init_context(vcpu);
1186 static void destroy_kvm_mmu(struct kvm_vcpu *vcpu)
1189 if (VALID_PAGE(vcpu->mmu.root_hpa)) {
1190 vcpu->mmu.free(vcpu);
1191 vcpu->mmu.root_hpa = INVALID_PAGE;
1195 int kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
1197 destroy_kvm_mmu(vcpu);
1198 return init_kvm_mmu(vcpu);
1200 EXPORT_SYMBOL_GPL(kvm_mmu_reset_context);
1202 int kvm_mmu_load(struct kvm_vcpu *vcpu)
1206 mutex_lock(&vcpu->kvm->lock);
1207 r = mmu_topup_memory_caches(vcpu);
1210 mmu_alloc_roots(vcpu);
1211 kvm_x86_ops->set_cr3(vcpu, vcpu->mmu.root_hpa);
1212 kvm_mmu_flush_tlb(vcpu);
1214 mutex_unlock(&vcpu->kvm->lock);
1217 EXPORT_SYMBOL_GPL(kvm_mmu_load);
1219 void kvm_mmu_unload(struct kvm_vcpu *vcpu)
1221 mmu_free_roots(vcpu);
1224 static void mmu_pte_write_zap_pte(struct kvm_vcpu *vcpu,
1225 struct kvm_mmu_page *page,
1229 struct kvm_mmu_page *child;
1232 if (is_shadow_present_pte(pte)) {
1233 if (page->role.level == PT_PAGE_TABLE_LEVEL)
1234 rmap_remove(vcpu->kvm, spte);
1236 child = page_header(pte & PT64_BASE_ADDR_MASK);
1237 mmu_page_remove_parent_pte(child, spte);
1240 set_shadow_pte(spte, shadow_trap_nonpresent_pte);
1243 static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu,
1244 struct kvm_mmu_page *page,
1246 const void *new, int bytes,
1249 if (page->role.level != PT_PAGE_TABLE_LEVEL) {
1250 ++vcpu->kvm->stat.mmu_pde_zapped;
1254 ++vcpu->kvm->stat.mmu_pte_updated;
1255 if (page->role.glevels == PT32_ROOT_LEVEL)
1256 paging32_update_pte(vcpu, page, spte, new, bytes,
1259 paging64_update_pte(vcpu, page, spte, new, bytes,
1263 static bool need_remote_flush(u64 old, u64 new)
1265 if (!is_shadow_present_pte(old))
1267 if (!is_shadow_present_pte(new))
1269 if ((old ^ new) & PT64_BASE_ADDR_MASK)
1271 old ^= PT64_NX_MASK;
1272 new ^= PT64_NX_MASK;
1273 return (old & ~new & PT64_PERM_MASK) != 0;
1276 static void mmu_pte_write_flush_tlb(struct kvm_vcpu *vcpu, u64 old, u64 new)
1278 if (need_remote_flush(old, new))
1279 kvm_flush_remote_tlbs(vcpu->kvm);
1281 kvm_mmu_flush_tlb(vcpu);
1284 static bool last_updated_pte_accessed(struct kvm_vcpu *vcpu)
1286 u64 *spte = vcpu->last_pte_updated;
1288 return !!(spte && (*spte & PT_ACCESSED_MASK));
1291 void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
1292 const u8 *new, int bytes)
1294 gfn_t gfn = gpa >> PAGE_SHIFT;
1295 struct kvm_mmu_page *page;
1296 struct hlist_node *node, *n;
1297 struct hlist_head *bucket;
1301 unsigned offset = offset_in_page(gpa);
1303 unsigned page_offset;
1304 unsigned misaligned;
1310 pgprintk("%s: gpa %llx bytes %d\n", __FUNCTION__, gpa, bytes);
1311 ++vcpu->kvm->stat.mmu_pte_write;
1312 kvm_mmu_audit(vcpu, "pre pte write");
1313 if (gfn == vcpu->last_pt_write_gfn
1314 && !last_updated_pte_accessed(vcpu)) {
1315 ++vcpu->last_pt_write_count;
1316 if (vcpu->last_pt_write_count >= 3)
1319 vcpu->last_pt_write_gfn = gfn;
1320 vcpu->last_pt_write_count = 1;
1321 vcpu->last_pte_updated = NULL;
1323 index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
1324 bucket = &vcpu->kvm->mmu_page_hash[index];
1325 hlist_for_each_entry_safe(page, node, n, bucket, hash_link) {
1326 if (page->gfn != gfn || page->role.metaphysical)
1328 pte_size = page->role.glevels == PT32_ROOT_LEVEL ? 4 : 8;
1329 misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1);
1330 misaligned |= bytes < 4;
1331 if (misaligned || flooded) {
1333 * Misaligned accesses are too much trouble to fix
1334 * up; also, they usually indicate a page is not used
1337 * If we're seeing too many writes to a page,
1338 * it may no longer be a page table, or we may be
1339 * forking, in which case it is better to unmap the
1342 pgprintk("misaligned: gpa %llx bytes %d role %x\n",
1343 gpa, bytes, page->role.word);
1344 kvm_mmu_zap_page(vcpu->kvm, page);
1345 ++vcpu->kvm->stat.mmu_flooded;
1348 page_offset = offset;
1349 level = page->role.level;
1351 if (page->role.glevels == PT32_ROOT_LEVEL) {
1352 page_offset <<= 1; /* 32->64 */
1354 * A 32-bit pde maps 4MB while the shadow pdes map
1355 * only 2MB. So we need to double the offset again
1356 * and zap two pdes instead of one.
1358 if (level == PT32_ROOT_LEVEL) {
1359 page_offset &= ~7; /* kill rounding error */
1363 quadrant = page_offset >> PAGE_SHIFT;
1364 page_offset &= ~PAGE_MASK;
1365 if (quadrant != page->role.quadrant)
1368 spte = &page->spt[page_offset / sizeof(*spte)];
1371 mmu_pte_write_zap_pte(vcpu, page, spte);
1372 mmu_pte_write_new_pte(vcpu, page, spte, new, bytes,
1373 page_offset & (pte_size - 1));
1374 mmu_pte_write_flush_tlb(vcpu, entry, *spte);
1378 kvm_mmu_audit(vcpu, "post pte write");
1381 int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
1383 gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, gva);
1385 return kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);
1388 void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
1390 while (vcpu->kvm->n_free_mmu_pages < KVM_REFILL_PAGES) {
1391 struct kvm_mmu_page *page;
1393 page = container_of(vcpu->kvm->active_mmu_pages.prev,
1394 struct kvm_mmu_page, link);
1395 kvm_mmu_zap_page(vcpu->kvm, page);
1396 ++vcpu->kvm->stat.mmu_recycled;
1400 int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code)
1403 enum emulation_result er;
1405 mutex_lock(&vcpu->kvm->lock);
1406 r = vcpu->mmu.page_fault(vcpu, cr2, error_code);
1415 r = mmu_topup_memory_caches(vcpu);
1419 er = emulate_instruction(vcpu, vcpu->run, cr2, error_code, 0);
1420 mutex_unlock(&vcpu->kvm->lock);
1425 case EMULATE_DO_MMIO:
1426 ++vcpu->stat.mmio_exits;
1429 kvm_report_emulation_failure(vcpu, "pagetable");
1435 mutex_unlock(&vcpu->kvm->lock);
1438 EXPORT_SYMBOL_GPL(kvm_mmu_page_fault);
1440 static void free_mmu_pages(struct kvm_vcpu *vcpu)
1442 struct kvm_mmu_page *page;
1444 while (!list_empty(&vcpu->kvm->active_mmu_pages)) {
1445 page = container_of(vcpu->kvm->active_mmu_pages.next,
1446 struct kvm_mmu_page, link);
1447 kvm_mmu_zap_page(vcpu->kvm, page);
1449 free_page((unsigned long)vcpu->mmu.pae_root);
1452 static int alloc_mmu_pages(struct kvm_vcpu *vcpu)
1459 if (vcpu->kvm->n_requested_mmu_pages)
1460 vcpu->kvm->n_free_mmu_pages = vcpu->kvm->n_requested_mmu_pages;
1462 vcpu->kvm->n_free_mmu_pages = vcpu->kvm->n_alloc_mmu_pages;
1464 * When emulating 32-bit mode, cr3 is only 32 bits even on x86_64.
1465 * Therefore we need to allocate shadow page tables in the first
1466 * 4GB of memory, which happens to fit the DMA32 zone.
1468 page = alloc_page(GFP_KERNEL | __GFP_DMA32);
1471 vcpu->mmu.pae_root = page_address(page);
1472 for (i = 0; i < 4; ++i)
1473 vcpu->mmu.pae_root[i] = INVALID_PAGE;
1478 free_mmu_pages(vcpu);
1482 int kvm_mmu_create(struct kvm_vcpu *vcpu)
1485 ASSERT(!VALID_PAGE(vcpu->mmu.root_hpa));
1487 return alloc_mmu_pages(vcpu);
1490 int kvm_mmu_setup(struct kvm_vcpu *vcpu)
1493 ASSERT(!VALID_PAGE(vcpu->mmu.root_hpa));
1495 return init_kvm_mmu(vcpu);
1498 void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
1502 destroy_kvm_mmu(vcpu);
1503 free_mmu_pages(vcpu);
1504 mmu_free_memory_caches(vcpu);
1507 void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
1509 struct kvm_mmu_page *page;
1511 list_for_each_entry(page, &kvm->active_mmu_pages, link) {
1515 if (!test_bit(slot, &page->slot_bitmap))
1519 for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
1521 if (pt[i] & PT_WRITABLE_MASK)
1522 pt[i] &= ~PT_WRITABLE_MASK;
1526 void kvm_mmu_zap_all(struct kvm *kvm)
1528 struct kvm_mmu_page *page, *node;
1530 list_for_each_entry_safe(page, node, &kvm->active_mmu_pages, link)
1531 kvm_mmu_zap_page(kvm, page);
1533 kvm_flush_remote_tlbs(kvm);
1536 void kvm_mmu_module_exit(void)
1538 if (pte_chain_cache)
1539 kmem_cache_destroy(pte_chain_cache);
1540 if (rmap_desc_cache)
1541 kmem_cache_destroy(rmap_desc_cache);
1542 if (mmu_page_header_cache)
1543 kmem_cache_destroy(mmu_page_header_cache);
1546 int kvm_mmu_module_init(void)
1548 pte_chain_cache = kmem_cache_create("kvm_pte_chain",
1549 sizeof(struct kvm_pte_chain),
1551 if (!pte_chain_cache)
1553 rmap_desc_cache = kmem_cache_create("kvm_rmap_desc",
1554 sizeof(struct kvm_rmap_desc),
1556 if (!rmap_desc_cache)
1559 mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header",
1560 sizeof(struct kvm_mmu_page),
1562 if (!mmu_page_header_cache)
1568 kvm_mmu_module_exit();
1573 * Caculate mmu pages needed for kvm.
1575 unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm)
1578 unsigned int nr_mmu_pages;
1579 unsigned int nr_pages = 0;
1581 for (i = 0; i < kvm->nmemslots; i++)
1582 nr_pages += kvm->memslots[i].npages;
1584 nr_mmu_pages = nr_pages * KVM_PERMILLE_MMU_PAGES / 1000;
1585 nr_mmu_pages = max(nr_mmu_pages,
1586 (unsigned int) KVM_MIN_ALLOC_MMU_PAGES);
1588 return nr_mmu_pages;
1593 static const char *audit_msg;
1595 static gva_t canonicalize(gva_t gva)
1597 #ifdef CONFIG_X86_64
1598 gva = (long long)(gva << 16) >> 16;
1603 static void audit_mappings_page(struct kvm_vcpu *vcpu, u64 page_pte,
1604 gva_t va, int level)
1606 u64 *pt = __va(page_pte & PT64_BASE_ADDR_MASK);
1608 gva_t va_delta = 1ul << (PAGE_SHIFT + 9 * (level - 1));
1610 for (i = 0; i < PT64_ENT_PER_PAGE; ++i, va += va_delta) {
1613 if (ent == shadow_trap_nonpresent_pte)
1616 va = canonicalize(va);
1618 if (ent == shadow_notrap_nonpresent_pte)
1619 printk(KERN_ERR "audit: (%s) nontrapping pte"
1620 " in nonleaf level: levels %d gva %lx"
1621 " level %d pte %llx\n", audit_msg,
1622 vcpu->mmu.root_level, va, level, ent);
1624 audit_mappings_page(vcpu, ent, va, level - 1);
1626 gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, va);
1627 hpa_t hpa = gpa_to_hpa(vcpu, gpa);
1630 if (is_shadow_present_pte(ent)
1631 && (ent & PT64_BASE_ADDR_MASK) != hpa)
1632 printk(KERN_ERR "xx audit error: (%s) levels %d"
1633 " gva %lx gpa %llx hpa %llx ent %llx %d\n",
1634 audit_msg, vcpu->mmu.root_level,
1636 is_shadow_present_pte(ent));
1637 else if (ent == shadow_notrap_nonpresent_pte
1638 && !is_error_hpa(hpa))
1639 printk(KERN_ERR "audit: (%s) notrap shadow,"
1640 " valid guest gva %lx\n", audit_msg, va);
1641 page = pfn_to_page((gpa & PT64_BASE_ADDR_MASK)
1643 kvm_release_page_clean(page);
1649 static void audit_mappings(struct kvm_vcpu *vcpu)
1653 if (vcpu->mmu.root_level == 4)
1654 audit_mappings_page(vcpu, vcpu->mmu.root_hpa, 0, 4);
1656 for (i = 0; i < 4; ++i)
1657 if (vcpu->mmu.pae_root[i] & PT_PRESENT_MASK)
1658 audit_mappings_page(vcpu,
1659 vcpu->mmu.pae_root[i],
1664 static int count_rmaps(struct kvm_vcpu *vcpu)
1669 for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
1670 struct kvm_memory_slot *m = &vcpu->kvm->memslots[i];
1671 struct kvm_rmap_desc *d;
1673 for (j = 0; j < m->npages; ++j) {
1674 unsigned long *rmapp = &m->rmap[j];
1678 if (!(*rmapp & 1)) {
1682 d = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
1684 for (k = 0; k < RMAP_EXT; ++k)
1685 if (d->shadow_ptes[k])
1696 static int count_writable_mappings(struct kvm_vcpu *vcpu)
1699 struct kvm_mmu_page *page;
1702 list_for_each_entry(page, &vcpu->kvm->active_mmu_pages, link) {
1703 u64 *pt = page->spt;
1705 if (page->role.level != PT_PAGE_TABLE_LEVEL)
1708 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
1711 if (!(ent & PT_PRESENT_MASK))
1713 if (!(ent & PT_WRITABLE_MASK))
1721 static void audit_rmap(struct kvm_vcpu *vcpu)
1723 int n_rmap = count_rmaps(vcpu);
1724 int n_actual = count_writable_mappings(vcpu);
1726 if (n_rmap != n_actual)
1727 printk(KERN_ERR "%s: (%s) rmap %d actual %d\n",
1728 __FUNCTION__, audit_msg, n_rmap, n_actual);
1731 static void audit_write_protection(struct kvm_vcpu *vcpu)
1733 struct kvm_mmu_page *page;
1734 struct kvm_memory_slot *slot;
1735 unsigned long *rmapp;
1738 list_for_each_entry(page, &vcpu->kvm->active_mmu_pages, link) {
1739 if (page->role.metaphysical)
1742 slot = gfn_to_memslot(vcpu->kvm, page->gfn);
1743 gfn = unalias_gfn(vcpu->kvm, page->gfn);
1744 rmapp = &slot->rmap[gfn - slot->base_gfn];
1746 printk(KERN_ERR "%s: (%s) shadow page has writable"
1747 " mappings: gfn %lx role %x\n",
1748 __FUNCTION__, audit_msg, page->gfn,
1753 static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg)
1760 audit_write_protection(vcpu);
1761 audit_mappings(vcpu);