KVM: MMU: Make set_cr3 a function pointer in kvm_mmu
[pandora-kernel.git] / arch / x86 / kvm / mmu.c
index 311f6da..c8acb96 100644 (file)
  */
 bool tdp_enabled = false;
 
-#undef MMU_DEBUG
+enum {
+       AUDIT_PRE_PAGE_FAULT,
+       AUDIT_POST_PAGE_FAULT,
+       AUDIT_PRE_PTE_WRITE,
+       AUDIT_POST_PTE_WRITE
+};
 
-#undef AUDIT
+char *audit_point_name[] = {
+       "pre page fault",
+       "post page fault",
+       "pre pte write",
+       "post pte write"
+};
 
-#ifdef AUDIT
-static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg);
-#else
-static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg) {}
-#endif
+#undef MMU_DEBUG
 
 #ifdef MMU_DEBUG
 
@@ -71,7 +77,7 @@ static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg) {}
 
 #endif
 
-#if defined(MMU_DEBUG) || defined(AUDIT)
+#ifdef MMU_DEBUG
 static int dbg = 0;
 module_param(dbg, bool, 0644);
 #endif
@@ -89,6 +95,8 @@ module_param(oos_shadow, bool, 0644);
        }
 #endif
 
+#define PTE_PREFETCH_NUM               8
+
 #define PT_FIRST_AVAIL_BITS_SHIFT 9
 #define PT64_SECOND_AVAIL_BITS_SHIFT 52
 
@@ -178,6 +186,7 @@ typedef void (*mmu_parent_walk_fn) (struct kvm_mmu_page *sp, u64 *spte);
 static struct kmem_cache *pte_chain_cache;
 static struct kmem_cache *rmap_desc_cache;
 static struct kmem_cache *mmu_page_header_cache;
+static struct percpu_counter kvm_total_used_mmu_pages;
 
 static u64 __read_mostly shadow_trap_nonpresent_pte;
 static u64 __read_mostly shadow_notrap_nonpresent_pte;
@@ -299,18 +308,50 @@ static u64 __xchg_spte(u64 *sptep, u64 new_spte)
 #endif
 }
 
+static bool spte_has_volatile_bits(u64 spte)
+{
+       if (!shadow_accessed_mask)
+               return false;
+
+       if (!is_shadow_present_pte(spte))
+               return false;
+
+       if ((spte & shadow_accessed_mask) &&
+             (!is_writable_pte(spte) || (spte & shadow_dirty_mask)))
+               return false;
+
+       return true;
+}
+
+static bool spte_is_bit_cleared(u64 old_spte, u64 new_spte, u64 bit_mask)
+{
+       return (old_spte & bit_mask) && !(new_spte & bit_mask);
+}
+
 static void update_spte(u64 *sptep, u64 new_spte)
 {
-       u64 old_spte;
+       u64 mask, old_spte = *sptep;
+
+       WARN_ON(!is_rmap_spte(new_spte));
 
-       if (!shadow_accessed_mask || (new_spte & shadow_accessed_mask) ||
-             !is_rmap_spte(*sptep))
+       new_spte |= old_spte & shadow_dirty_mask;
+
+       mask = shadow_accessed_mask;
+       if (is_writable_pte(old_spte))
+               mask |= shadow_dirty_mask;
+
+       if (!spte_has_volatile_bits(old_spte) || (new_spte & mask) == mask)
                __set_spte(sptep, new_spte);
-       else {
+       else
                old_spte = __xchg_spte(sptep, new_spte);
-               if (old_spte & shadow_accessed_mask)
-                       mark_page_accessed(pfn_to_page(spte_to_pfn(old_spte)));
-       }
+
+       if (!shadow_accessed_mask)
+               return;
+
+       if (spte_is_bit_cleared(old_spte, new_spte, shadow_accessed_mask))
+               kvm_set_pfn_accessed(spte_to_pfn(old_spte));
+       if (spte_is_bit_cleared(old_spte, new_spte, shadow_dirty_mask))
+               kvm_set_pfn_dirty(spte_to_pfn(old_spte));
 }
 
 static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
@@ -367,7 +408,7 @@ static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu)
        if (r)
                goto out;
        r = mmu_topup_memory_cache(&vcpu->arch.mmu_rmap_desc_cache,
-                                  rmap_desc_cache, 4);
+                                  rmap_desc_cache, 4 + PTE_PREFETCH_NUM);
        if (r)
                goto out;
        r = mmu_topup_memory_cache_page(&vcpu->arch.mmu_page_cache, 8);
@@ -645,18 +686,17 @@ static void rmap_remove(struct kvm *kvm, u64 *spte)
        gfn = kvm_mmu_page_get_gfn(sp, spte - sp->spt);
        rmapp = gfn_to_rmap(kvm, gfn, sp->role.level);
        if (!*rmapp) {
-               printk(KERN_ERR "rmap_remove: %p %llx 0->BUG\n", spte, *spte);
+               printk(KERN_ERR "rmap_remove: %p 0->BUG\n", spte);
                BUG();
        } else if (!(*rmapp & 1)) {
-               rmap_printk("rmap_remove:  %p %llx 1->0\n", spte, *spte);
+               rmap_printk("rmap_remove:  %p 1->0\n", spte);
                if ((u64 *)*rmapp != spte) {
-                       printk(KERN_ERR "rmap_remove:  %p %llx 1->BUG\n",
-                              spte, *spte);
+                       printk(KERN_ERR "rmap_remove:  %p 1->BUG\n", spte);
                        BUG();
                }
                *rmapp = 0;
        } else {
-               rmap_printk("rmap_remove:  %p %llx many->many\n", spte, *spte);
+               rmap_printk("rmap_remove:  %p many->many\n", spte);
                desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
                prev_desc = NULL;
                while (desc) {
@@ -670,7 +710,7 @@ static void rmap_remove(struct kvm *kvm, u64 *spte)
                        prev_desc = desc;
                        desc = desc->more;
                }
-               pr_err("rmap_remove: %p %llx many->many\n", spte, *spte);
+               pr_err("rmap_remove: %p many->many\n", spte);
                BUG();
        }
 }
@@ -680,18 +720,18 @@ static void set_spte_track_bits(u64 *sptep, u64 new_spte)
        pfn_t pfn;
        u64 old_spte = *sptep;
 
-       if (!shadow_accessed_mask || !is_shadow_present_pte(old_spte) ||
-             old_spte & shadow_accessed_mask) {
+       if (!spte_has_volatile_bits(old_spte))
                __set_spte(sptep, new_spte);
-       else
+       else
                old_spte = __xchg_spte(sptep, new_spte);
 
        if (!is_rmap_spte(old_spte))
                return;
+
        pfn = spte_to_pfn(old_spte);
        if (!shadow_accessed_mask || old_spte & shadow_accessed_mask)
                kvm_set_pfn_accessed(pfn);
-       if (is_writable_pte(old_spte))
+       if (!shadow_dirty_mask || (old_spte & shadow_dirty_mask))
                kvm_set_pfn_dirty(pfn);
 }
 
@@ -746,13 +786,6 @@ static int rmap_write_protect(struct kvm *kvm, u64 gfn)
                }
                spte = rmap_next(kvm, rmapp, spte);
        }
-       if (write_protected) {
-               pfn_t pfn;
-
-               spte = rmap_next(kvm, rmapp, NULL);
-               pfn = spte_to_pfn(*spte);
-               kvm_set_pfn_dirty(pfn);
-       }
 
        /* check for huge page mappings */
        for (i = PT_DIRECTORY_LEVEL;
@@ -947,6 +980,18 @@ static int is_empty_shadow_page(u64 *spt)
 }
 #endif
 
+/*
+ * This value is the sum of all of the kvm instances's
+ * kvm->arch.n_used_mmu_pages values.  We need a global,
+ * aggregate version in order to make the slab shrinker
+ * faster
+ */
+static inline void kvm_mod_used_mmu_pages(struct kvm *kvm, int nr)
+{
+       kvm->arch.n_used_mmu_pages += nr;
+       percpu_counter_add(&kvm_total_used_mmu_pages, nr);
+}
+
 static void kvm_mmu_free_page(struct kvm *kvm, struct kvm_mmu_page *sp)
 {
        ASSERT(is_empty_shadow_page(sp->spt));
@@ -956,7 +1001,7 @@ static void kvm_mmu_free_page(struct kvm *kvm, struct kvm_mmu_page *sp)
        if (!sp->role.direct)
                __free_page(virt_to_page(sp->gfns));
        kmem_cache_free(mmu_page_header_cache, sp);
-       ++kvm->arch.n_free_mmu_pages;
+       kvm_mod_used_mmu_pages(kvm, -1);
 }
 
 static unsigned kvm_page_table_hashfn(gfn_t gfn)
@@ -979,7 +1024,7 @@ static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu,
        bitmap_zero(sp->slot_bitmap, KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS);
        sp->multimapped = 0;
        sp->parent_pte = parent_pte;
-       --vcpu->kvm->arch.n_free_mmu_pages;
+       kvm_mod_used_mmu_pages(vcpu->kvm, +1);
        return sp;
 }
 
@@ -1403,7 +1448,8 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
        if (role.direct)
                role.cr4_pae = 0;
        role.access = access;
-       if (!tdp_enabled && vcpu->arch.mmu.root_level <= PT32_ROOT_LEVEL) {
+       if (!vcpu->arch.mmu.direct_map
+           && vcpu->arch.mmu.root_level <= PT32_ROOT_LEVEL) {
                quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level));
                quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1;
                role.quadrant = quadrant;
@@ -1665,41 +1711,31 @@ static void kvm_mmu_commit_zap_page(struct kvm *kvm,
 
 /*
  * Changing the number of mmu pages allocated to the vm
- * Note: if kvm_nr_mmu_pages is too small, you will get dead lock
+ * Note: if goal_nr_mmu_pages is too small, you will get dead lock
  */
-void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages)
+void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int goal_nr_mmu_pages)
 {
-       int used_pages;
        LIST_HEAD(invalid_list);
-
-       used_pages = kvm->arch.n_alloc_mmu_pages - kvm->arch.n_free_mmu_pages;
-       used_pages = max(0, used_pages);
-
        /*
         * If we set the number of mmu pages to be smaller be than the
         * number of actived pages , we must to free some mmu pages before we
         * change the value
         */
 
-       if (used_pages > kvm_nr_mmu_pages) {
-               while (used_pages > kvm_nr_mmu_pages &&
+       if (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages) {
+               while (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages &&
                        !list_empty(&kvm->arch.active_mmu_pages)) {
                        struct kvm_mmu_page *page;
 
                        page = container_of(kvm->arch.active_mmu_pages.prev,
                                            struct kvm_mmu_page, link);
-                       used_pages -= kvm_mmu_prepare_zap_page(kvm, page,
-                                                              &invalid_list);
+                       kvm_mmu_prepare_zap_page(kvm, page, &invalid_list);
+                       kvm_mmu_commit_zap_page(kvm, &invalid_list);
                }
-               kvm_mmu_commit_zap_page(kvm, &invalid_list);
-               kvm_nr_mmu_pages = used_pages;
-               kvm->arch.n_free_mmu_pages = 0;
+               goal_nr_mmu_pages = kvm->arch.n_used_mmu_pages;
        }
-       else
-               kvm->arch.n_free_mmu_pages += kvm_nr_mmu_pages
-                                        - kvm->arch.n_alloc_mmu_pages;
 
-       kvm->arch.n_alloc_mmu_pages = kvm_nr_mmu_pages;
+       kvm->arch.n_max_mmu_pages = goal_nr_mmu_pages;
 }
 
 static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
@@ -1709,11 +1745,11 @@ static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
        LIST_HEAD(invalid_list);
        int r;
 
-       pgprintk("%s: looking for gfn %lx\n", __func__, gfn);
+       pgprintk("%s: looking for gfn %llx\n", __func__, gfn);
        r = 0;
 
        for_each_gfn_indirect_valid_sp(kvm, sp, gfn, node) {
-               pgprintk("%s: gfn %lx role %x\n", __func__, gfn,
+               pgprintk("%s: gfn %llx role %x\n", __func__, gfn,
                         sp->role.word);
                r = 1;
                kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
@@ -1729,7 +1765,7 @@ static void mmu_unshadow(struct kvm *kvm, gfn_t gfn)
        LIST_HEAD(invalid_list);
 
        for_each_gfn_indirect_valid_sp(kvm, sp, gfn, node) {
-               pgprintk("%s: zap %lx %x\n",
+               pgprintk("%s: zap %llx %x\n",
                         __func__, gfn, sp->role.word);
                kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
        }
@@ -1925,7 +1961,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
         * whether the guest actually used the pte (in order to detect
         * demand paging).
         */
-       spte = shadow_base_present_pte | shadow_dirty_mask;
+       spte = shadow_base_present_pte;
        if (!speculative)
                spte |= shadow_accessed_mask;
        if (!dirty)
@@ -1938,7 +1974,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
                spte |= shadow_user_mask;
        if (level > PT_PAGE_TABLE_LEVEL)
                spte |= PT_PAGE_SIZE_MASK;
-       if (tdp_enabled)
+       if (vcpu->arch.mmu.direct_map)
                spte |= kvm_x86_ops->get_mt_mask(vcpu, gfn,
                        kvm_is_mmio_pfn(pfn));
 
@@ -1948,8 +1984,8 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
        spte |= (u64)pfn << PAGE_SHIFT;
 
        if ((pte_access & ACC_WRITE_MASK)
-           || (!tdp_enabled && write_fault && !is_write_protection(vcpu)
-               && !user_fault)) {
+           || (!vcpu->arch.mmu.direct_map && write_fault
+               && !is_write_protection(vcpu) && !user_fault)) {
 
                if (level > PT_PAGE_TABLE_LEVEL &&
                    has_wrprotected_page(vcpu->kvm, gfn, level)) {
@@ -1960,7 +1996,8 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
 
                spte |= PT_WRITABLE_MASK;
 
-               if (!tdp_enabled && !(pte_access & ACC_WRITE_MASK))
+               if (!vcpu->arch.mmu.direct_map
+                   && !(pte_access & ACC_WRITE_MASK))
                        spte &= ~PT_USER_MASK;
 
                /*
@@ -1973,7 +2010,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
                        goto set_pte;
 
                if (mmu_need_write_protect(vcpu, gfn, can_unsync)) {
-                       pgprintk("%s: found shadow page for %lx, marking ro\n",
+                       pgprintk("%s: found shadow page for %llx, marking ro\n",
                                 __func__, gfn);
                        ret = 1;
                        pte_access &= ~ACC_WRITE_MASK;
@@ -1986,8 +2023,6 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
                mark_page_dirty(vcpu->kvm, gfn);
 
 set_pte:
-       if (is_writable_pte(*sptep) && !is_writable_pte(spte))
-               kvm_set_pfn_dirty(pfn);
        update_spte(sptep, spte);
 done:
        return ret;
@@ -2004,7 +2039,7 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
        int rmap_count;
 
        pgprintk("%s: spte %llx access %x write_fault %d"
-                " user_fault %d gfn %lx\n",
+                " user_fault %d gfn %llx\n",
                 __func__, *sptep, pt_access,
                 write_fault, user_fault, gfn);
 
@@ -2023,7 +2058,7 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
                        __set_spte(sptep, shadow_trap_nonpresent_pte);
                        kvm_flush_remote_tlbs(vcpu->kvm);
                } else if (pfn != spte_to_pfn(*sptep)) {
-                       pgprintk("hfn old %lx new %lx\n",
+                       pgprintk("hfn old %llx new %llx\n",
                                 spte_to_pfn(*sptep), pfn);
                        drop_spte(vcpu->kvm, sptep, shadow_trap_nonpresent_pte);
                        kvm_flush_remote_tlbs(vcpu->kvm);
@@ -2040,7 +2075,7 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
        }
 
        pgprintk("%s: setting spte %llx\n", __func__, *sptep);
-       pgprintk("instantiating %s PTE (%s) at %ld (%llx) addr %p\n",
+       pgprintk("instantiating %s PTE (%s) at %llx (%llx) addr %p\n",
                 is_large_pte(*sptep)? "2MB" : "4kB",
                 *sptep & PT_PRESENT_MASK ?"RW":"R", gfn,
                 *sptep, sptep);
@@ -2064,6 +2099,105 @@ static void nonpaging_new_cr3(struct kvm_vcpu *vcpu)
 {
 }
 
+static struct kvm_memory_slot *
+pte_prefetch_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn, bool no_dirty_log)
+{
+       struct kvm_memory_slot *slot;
+
+       slot = gfn_to_memslot(vcpu->kvm, gfn);
+       if (!slot || slot->flags & KVM_MEMSLOT_INVALID ||
+             (no_dirty_log && slot->dirty_bitmap))
+               slot = NULL;
+
+       return slot;
+}
+
+static pfn_t pte_prefetch_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn,
+                                    bool no_dirty_log)
+{
+       struct kvm_memory_slot *slot;
+       unsigned long hva;
+
+       slot = pte_prefetch_gfn_to_memslot(vcpu, gfn, no_dirty_log);
+       if (!slot) {
+               get_page(bad_page);
+               return page_to_pfn(bad_page);
+       }
+
+       hva = gfn_to_hva_memslot(slot, gfn);
+
+       return hva_to_pfn_atomic(vcpu->kvm, hva);
+}
+
+static int direct_pte_prefetch_many(struct kvm_vcpu *vcpu,
+                                   struct kvm_mmu_page *sp,
+                                   u64 *start, u64 *end)
+{
+       struct page *pages[PTE_PREFETCH_NUM];
+       unsigned access = sp->role.access;
+       int i, ret;
+       gfn_t gfn;
+
+       gfn = kvm_mmu_page_get_gfn(sp, start - sp->spt);
+       if (!pte_prefetch_gfn_to_memslot(vcpu, gfn, access & ACC_WRITE_MASK))
+               return -1;
+
+       ret = gfn_to_page_many_atomic(vcpu->kvm, gfn, pages, end - start);
+       if (ret <= 0)
+               return -1;
+
+       for (i = 0; i < ret; i++, gfn++, start++)
+               mmu_set_spte(vcpu, start, ACC_ALL,
+                            access, 0, 0, 1, NULL,
+                            sp->role.level, gfn,
+                            page_to_pfn(pages[i]), true, true);
+
+       return 0;
+}
+
+static void __direct_pte_prefetch(struct kvm_vcpu *vcpu,
+                                 struct kvm_mmu_page *sp, u64 *sptep)
+{
+       u64 *spte, *start = NULL;
+       int i;
+
+       WARN_ON(!sp->role.direct);
+
+       i = (sptep - sp->spt) & ~(PTE_PREFETCH_NUM - 1);
+       spte = sp->spt + i;
+
+       for (i = 0; i < PTE_PREFETCH_NUM; i++, spte++) {
+               if (*spte != shadow_trap_nonpresent_pte || spte == sptep) {
+                       if (!start)
+                               continue;
+                       if (direct_pte_prefetch_many(vcpu, sp, start, spte) < 0)
+                               break;
+                       start = NULL;
+               } else if (!start)
+                       start = spte;
+       }
+}
+
+static void direct_pte_prefetch(struct kvm_vcpu *vcpu, u64 *sptep)
+{
+       struct kvm_mmu_page *sp;
+
+       /*
+        * Since it's no accessed bit on EPT, it's no way to
+        * distinguish between actually accessed translations
+        * and prefetched, so disable pte prefetch if EPT is
+        * enabled.
+        */
+       if (!shadow_accessed_mask)
+               return;
+
+       sp = page_header(__pa(sptep));
+       if (sp->role.level > PT_PAGE_TABLE_LEVEL)
+               return;
+
+       __direct_pte_prefetch(vcpu, sp, sptep);
+}
+
 static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
                        int level, gfn_t gfn, pfn_t pfn)
 {
@@ -2077,6 +2211,7 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
                        mmu_set_spte(vcpu, iterator.sptep, ACC_ALL, ACC_ALL,
                                     0, write, 1, &pt_write,
                                     level, gfn, pfn, false, true);
+                       direct_pte_prefetch(vcpu, iterator.sptep);
                        ++vcpu->stat.pf_fixed;
                        break;
                }
@@ -2238,7 +2373,7 @@ static int mmu_alloc_roots(struct kvm_vcpu *vcpu)
                ASSERT(!VALID_PAGE(root));
                if (mmu_check_root(vcpu, root_gfn))
                        return 1;
-               if (tdp_enabled) {
+               if (vcpu->arch.mmu.direct_map) {
                        direct = 1;
                        root_gfn = 0;
                }
@@ -2254,6 +2389,10 @@ static int mmu_alloc_roots(struct kvm_vcpu *vcpu)
                return 0;
        }
        direct = !is_paging(vcpu);
+
+       if (mmu_check_root(vcpu, root_gfn))
+               return 1;
+
        for (i = 0; i < 4; ++i) {
                hpa_t root = vcpu->arch.mmu.pae_root[i];
 
@@ -2265,11 +2404,11 @@ static int mmu_alloc_roots(struct kvm_vcpu *vcpu)
                                continue;
                        }
                        root_gfn = pdptr >> PAGE_SHIFT;
+                       if (mmu_check_root(vcpu, root_gfn))
+                               return 1;
                } else if (vcpu->arch.mmu.root_level == 0)
                        root_gfn = 0;
-               if (mmu_check_root(vcpu, root_gfn))
-                       return 1;
-               if (tdp_enabled) {
+               if (vcpu->arch.mmu.direct_map) {
                        direct = 1;
                        root_gfn = i << 30;
                }
@@ -2407,6 +2546,7 @@ static int nonpaging_init_context(struct kvm_vcpu *vcpu)
        context->root_level = 0;
        context->shadow_root_level = PT32E_ROOT_LEVEL;
        context->root_hpa = INVALID_PAGE;
+       context->direct_map = true;
        return 0;
 }
 
@@ -2526,6 +2666,7 @@ static int paging64_init_context_common(struct kvm_vcpu *vcpu, int level)
        context->root_level = level;
        context->shadow_root_level = level;
        context->root_hpa = INVALID_PAGE;
+       context->direct_map = false;
        return 0;
 }
 
@@ -2550,6 +2691,7 @@ static int paging32_init_context(struct kvm_vcpu *vcpu)
        context->root_level = PT32_ROOT_LEVEL;
        context->shadow_root_level = PT32E_ROOT_LEVEL;
        context->root_hpa = INVALID_PAGE;
+       context->direct_map = false;
        return 0;
 }
 
@@ -2571,6 +2713,8 @@ static int init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
        context->invlpg = nonpaging_invlpg;
        context->shadow_root_level = kvm_x86_ops->get_tdp_level();
        context->root_hpa = INVALID_PAGE;
+       context->direct_map = true;
+       context->set_cr3 = kvm_x86_ops->set_cr3;
 
        if (!is_paging(vcpu)) {
                context->gva_to_gpa = nonpaging_gva_to_gpa;
@@ -2609,7 +2753,8 @@ static int init_kvm_softmmu(struct kvm_vcpu *vcpu)
                r = paging32_init_context(vcpu);
 
        vcpu->arch.mmu.base_role.cr4_pae = !!is_pae(vcpu);
-       vcpu->arch.mmu.base_role.cr0_wp = is_write_protection(vcpu);
+       vcpu->arch.mmu.base_role.cr0_wp  = is_write_protection(vcpu);
+       vcpu->arch.mmu.set_cr3           = kvm_x86_ops->set_cr3;
 
        return r;
 }
@@ -2653,7 +2798,7 @@ int kvm_mmu_load(struct kvm_vcpu *vcpu)
        if (r)
                goto out;
        /* set_cr3() should ensure TLB has been flushed */
-       kvm_x86_ops->set_cr3(vcpu, vcpu->arch.mmu.root_hpa);
+       vcpu->arch.mmu.set_cr3(vcpu, vcpu->arch.mmu.root_hpa);
 out:
        return r;
 }
@@ -2837,7 +2982,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
        kvm_mmu_access_page(vcpu, gfn);
        kvm_mmu_free_some_pages(vcpu);
        ++vcpu->kvm->stat.mmu_pte_write;
-       kvm_mmu_audit(vcpu, "pre pte write");
+       trace_kvm_mmu_audit(vcpu, AUDIT_PRE_PTE_WRITE);
        if (guest_initiated) {
                if (gfn == vcpu->arch.last_pt_write_gfn
                    && !last_updated_pte_accessed(vcpu)) {
@@ -2910,7 +3055,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
        }
        mmu_pte_write_flush_tlb(vcpu, zap_page, remote_flush, local_flush);
        kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
-       kvm_mmu_audit(vcpu, "post pte write");
+       trace_kvm_mmu_audit(vcpu, AUDIT_POST_PTE_WRITE);
        spin_unlock(&vcpu->kvm->mmu_lock);
        if (!is_error_pfn(vcpu->arch.update_pte.pfn)) {
                kvm_release_pfn_clean(vcpu->arch.update_pte.pfn);
@@ -2923,7 +3068,7 @@ int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
        gpa_t gpa;
        int r;
 
-       if (tdp_enabled)
+       if (vcpu->arch.mmu.direct_map)
                return 0;
 
        gpa = kvm_mmu_gva_to_gpa_read(vcpu, gva, NULL);
@@ -2937,21 +3082,18 @@ EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page_virt);
 
 void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
 {
-       int free_pages;
        LIST_HEAD(invalid_list);
 
-       free_pages = vcpu->kvm->arch.n_free_mmu_pages;
-       while (free_pages < KVM_REFILL_PAGES &&
+       while (kvm_mmu_available_pages(vcpu->kvm) < KVM_REFILL_PAGES &&
               !list_empty(&vcpu->kvm->arch.active_mmu_pages)) {
                struct kvm_mmu_page *sp;
 
                sp = container_of(vcpu->kvm->arch.active_mmu_pages.prev,
                                  struct kvm_mmu_page, link);
-               free_pages += kvm_mmu_prepare_zap_page(vcpu->kvm, sp,
-                                                      &invalid_list);
+               kvm_mmu_prepare_zap_page(vcpu->kvm, sp, &invalid_list);
+               kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
                ++vcpu->kvm->stat.mmu_recycled;
        }
-       kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
 }
 
 int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code)
@@ -3112,23 +3254,22 @@ static int mmu_shrink(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask)
 {
        struct kvm *kvm;
        struct kvm *kvm_freed = NULL;
-       int cache_count = 0;
+
+       if (nr_to_scan == 0)
+               goto out;
 
        spin_lock(&kvm_lock);
 
        list_for_each_entry(kvm, &vm_list, vm_list) {
-               int npages, idx, freed_pages;
+               int idx, freed_pages;
                LIST_HEAD(invalid_list);
 
                idx = srcu_read_lock(&kvm->srcu);
                spin_lock(&kvm->mmu_lock);
-               npages = kvm->arch.n_alloc_mmu_pages -
-                        kvm->arch.n_free_mmu_pages;
-               cache_count += npages;
-               if (!kvm_freed && nr_to_scan > 0 && npages > 0) {
+               if (!kvm_freed && nr_to_scan > 0 &&
+                   kvm->arch.n_used_mmu_pages > 0) {
                        freed_pages = kvm_mmu_remove_some_alloc_mmu_pages(kvm,
                                                          &invalid_list);
-                       cache_count -= freed_pages;
                        kvm_freed = kvm;
                }
                nr_to_scan--;
@@ -3142,7 +3283,8 @@ static int mmu_shrink(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask)
 
        spin_unlock(&kvm_lock);
 
-       return cache_count;
+out:
+       return percpu_counter_read_positive(&kvm_total_used_mmu_pages);
 }
 
 static struct shrinker mmu_shrinker = {
@@ -3163,6 +3305,7 @@ static void mmu_destroy_caches(void)
 void kvm_mmu_module_exit(void)
 {
        mmu_destroy_caches();
+       percpu_counter_destroy(&kvm_total_used_mmu_pages);
        unregister_shrinker(&mmu_shrinker);
 }
 
@@ -3185,6 +3328,9 @@ int kvm_mmu_module_init(void)
        if (!mmu_page_header_cache)
                goto nomem;
 
+       if (percpu_counter_init(&kvm_total_used_mmu_pages, 0))
+               goto nomem;
+
        register_shrinker(&mmu_shrinker);
 
        return 0;
@@ -3355,271 +3501,6 @@ int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4])
 }
 EXPORT_SYMBOL_GPL(kvm_mmu_get_spte_hierarchy);
 
-#ifdef AUDIT
-
-static const char *audit_msg;
-
-static gva_t canonicalize(gva_t gva)
-{
-#ifdef CONFIG_X86_64
-       gva = (long long)(gva << 16) >> 16;
-#endif
-       return gva;
-}
-
-
-typedef void (*inspect_spte_fn) (struct kvm *kvm, u64 *sptep);
-
-static void __mmu_spte_walk(struct kvm *kvm, struct kvm_mmu_page *sp,
-                           inspect_spte_fn fn)
-{
-       int i;
-
-       for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
-               u64 ent = sp->spt[i];
-
-               if (is_shadow_present_pte(ent)) {
-                       if (!is_last_spte(ent, sp->role.level)) {
-                               struct kvm_mmu_page *child;
-                               child = page_header(ent & PT64_BASE_ADDR_MASK);
-                               __mmu_spte_walk(kvm, child, fn);
-                       } else
-                               fn(kvm, &sp->spt[i]);
-               }
-       }
-}
-
-static void mmu_spte_walk(struct kvm_vcpu *vcpu, inspect_spte_fn fn)
-{
-       int i;
-       struct kvm_mmu_page *sp;
-
-       if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
-               return;
-       if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
-               hpa_t root = vcpu->arch.mmu.root_hpa;
-               sp = page_header(root);
-               __mmu_spte_walk(vcpu->kvm, sp, fn);
-               return;
-       }
-       for (i = 0; i < 4; ++i) {
-               hpa_t root = vcpu->arch.mmu.pae_root[i];
-
-               if (root && VALID_PAGE(root)) {
-                       root &= PT64_BASE_ADDR_MASK;
-                       sp = page_header(root);
-                       __mmu_spte_walk(vcpu->kvm, sp, fn);
-               }
-       }
-       return;
-}
-
-static void audit_mappings_page(struct kvm_vcpu *vcpu, u64 page_pte,
-                               gva_t va, int level)
-{
-       u64 *pt = __va(page_pte & PT64_BASE_ADDR_MASK);
-       int i;
-       gva_t va_delta = 1ul << (PAGE_SHIFT + 9 * (level - 1));
-
-       for (i = 0; i < PT64_ENT_PER_PAGE; ++i, va += va_delta) {
-               u64 ent = pt[i];
-
-               if (ent == shadow_trap_nonpresent_pte)
-                       continue;
-
-               va = canonicalize(va);
-               if (is_shadow_present_pte(ent) && !is_last_spte(ent, level))
-                       audit_mappings_page(vcpu, ent, va, level - 1);
-               else {
-                       gpa_t gpa = kvm_mmu_gva_to_gpa_read(vcpu, va, NULL);
-                       gfn_t gfn = gpa >> PAGE_SHIFT;
-                       pfn_t pfn = gfn_to_pfn(vcpu->kvm, gfn);
-                       hpa_t hpa = (hpa_t)pfn << PAGE_SHIFT;
-
-                       if (is_error_pfn(pfn)) {
-                               kvm_release_pfn_clean(pfn);
-                               continue;
-                       }
-
-                       if (is_shadow_present_pte(ent)
-                           && (ent & PT64_BASE_ADDR_MASK) != hpa)
-                               printk(KERN_ERR "xx audit error: (%s) levels %d"
-                                      " gva %lx gpa %llx hpa %llx ent %llx %d\n",
-                                      audit_msg, vcpu->arch.mmu.root_level,
-                                      va, gpa, hpa, ent,
-                                      is_shadow_present_pte(ent));
-                       else if (ent == shadow_notrap_nonpresent_pte
-                                && !is_error_hpa(hpa))
-                               printk(KERN_ERR "audit: (%s) notrap shadow,"
-                                      " valid guest gva %lx\n", audit_msg, va);
-                       kvm_release_pfn_clean(pfn);
-
-               }
-       }
-}
-
-static void audit_mappings(struct kvm_vcpu *vcpu)
-{
-       unsigned i;
-
-       if (vcpu->arch.mmu.root_level == 4)
-               audit_mappings_page(vcpu, vcpu->arch.mmu.root_hpa, 0, 4);
-       else
-               for (i = 0; i < 4; ++i)
-                       if (vcpu->arch.mmu.pae_root[i] & PT_PRESENT_MASK)
-                               audit_mappings_page(vcpu,
-                                                   vcpu->arch.mmu.pae_root[i],
-                                                   i << 30,
-                                                   2);
-}
-
-static int count_rmaps(struct kvm_vcpu *vcpu)
-{
-       struct kvm *kvm = vcpu->kvm;
-       struct kvm_memslots *slots;
-       int nmaps = 0;
-       int i, j, k, idx;
-
-       idx = srcu_read_lock(&kvm->srcu);
-       slots = kvm_memslots(kvm);
-       for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
-               struct kvm_memory_slot *m = &slots->memslots[i];
-               struct kvm_rmap_desc *d;
-
-               for (j = 0; j < m->npages; ++j) {
-                       unsigned long *rmapp = &m->rmap[j];
-
-                       if (!*rmapp)
-                               continue;
-                       if (!(*rmapp & 1)) {
-                               ++nmaps;
-                               continue;
-                       }
-                       d = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
-                       while (d) {
-                               for (k = 0; k < RMAP_EXT; ++k)
-                                       if (d->sptes[k])
-                                               ++nmaps;
-                                       else
-                                               break;
-                               d = d->more;
-                       }
-               }
-       }
-       srcu_read_unlock(&kvm->srcu, idx);
-       return nmaps;
-}
-
-void inspect_spte_has_rmap(struct kvm *kvm, u64 *sptep)
-{
-       unsigned long *rmapp;
-       struct kvm_mmu_page *rev_sp;
-       gfn_t gfn;
-
-       if (is_writable_pte(*sptep)) {
-               rev_sp = page_header(__pa(sptep));
-               gfn = kvm_mmu_page_get_gfn(rev_sp, sptep - rev_sp->spt);
-
-               if (!gfn_to_memslot(kvm, gfn)) {
-                       if (!printk_ratelimit())
-                               return;
-                       printk(KERN_ERR "%s: no memslot for gfn %ld\n",
-                                        audit_msg, gfn);
-                       printk(KERN_ERR "%s: index %ld of sp (gfn=%lx)\n",
-                              audit_msg, (long int)(sptep - rev_sp->spt),
-                                       rev_sp->gfn);
-                       dump_stack();
-                       return;
-               }
-
-               rmapp = gfn_to_rmap(kvm, gfn, rev_sp->role.level);
-               if (!*rmapp) {
-                       if (!printk_ratelimit())
-                               return;
-                       printk(KERN_ERR "%s: no rmap for writable spte %llx\n",
-                                        audit_msg, *sptep);
-                       dump_stack();
-               }
-       }
-
-}
-
-void audit_writable_sptes_have_rmaps(struct kvm_vcpu *vcpu)
-{
-       mmu_spte_walk(vcpu, inspect_spte_has_rmap);
-}
-
-static void check_writable_mappings_rmap(struct kvm_vcpu *vcpu)
-{
-       struct kvm_mmu_page *sp;
-       int i;
-
-       list_for_each_entry(sp, &vcpu->kvm->arch.active_mmu_pages, link) {
-               u64 *pt = sp->spt;
-
-               if (sp->role.level != PT_PAGE_TABLE_LEVEL)
-                       continue;
-
-               for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
-                       u64 ent = pt[i];
-
-                       if (!(ent & PT_PRESENT_MASK))
-                               continue;
-                       if (!is_writable_pte(ent))
-                               continue;
-                       inspect_spte_has_rmap(vcpu->kvm, &pt[i]);
-               }
-       }
-       return;
-}
-
-static void audit_rmap(struct kvm_vcpu *vcpu)
-{
-       check_writable_mappings_rmap(vcpu);
-       count_rmaps(vcpu);
-}
-
-static void audit_write_protection(struct kvm_vcpu *vcpu)
-{
-       struct kvm_mmu_page *sp;
-       struct kvm_memory_slot *slot;
-       unsigned long *rmapp;
-       u64 *spte;
-       gfn_t gfn;
-
-       list_for_each_entry(sp, &vcpu->kvm->arch.active_mmu_pages, link) {
-               if (sp->role.direct)
-                       continue;
-               if (sp->unsync)
-                       continue;
-
-               slot = gfn_to_memslot(vcpu->kvm, sp->gfn);
-               rmapp = &slot->rmap[gfn - slot->base_gfn];
-
-               spte = rmap_next(vcpu->kvm, rmapp, NULL);
-               while (spte) {
-                       if (is_writable_pte(*spte))
-                               printk(KERN_ERR "%s: (%s) shadow page has "
-                               "writable mappings: gfn %lx role %x\n",
-                              __func__, audit_msg, sp->gfn,
-                              sp->role.word);
-                       spte = rmap_next(vcpu->kvm, rmapp, spte);
-               }
-       }
-}
-
-static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg)
-{
-       int olddbg = dbg;
-
-       dbg = 0;
-       audit_msg = msg;
-       audit_rmap(vcpu);
-       audit_write_protection(vcpu);
-       if (strcmp("pre pte write", audit_msg) != 0)
-               audit_mappings(vcpu);
-       audit_writable_sptes_have_rmaps(vcpu);
-       dbg = olddbg;
-}
-
+#ifdef CONFIG_KVM_MMU_AUDIT
+#include "mmu_audit.c"
 #endif