KVM: Portability: Introduce kvm_vcpu_arch
[pandora-kernel.git] / drivers / kvm / mmu.c
index 23965aa..da1dedb 100644 (file)
 
 #include "vmx.h"
 #include "kvm.h"
+#include "x86.h"
 
 #include <linux/types.h>
 #include <linux/string.h>
 #include <linux/mm.h>
 #include <linux/highmem.h>
 #include <linux/module.h>
+#include <linux/swap.h>
 
 #include <asm/page.h>
 #include <asm/cmpxchg.h>
+#include <asm/io.h>
 
 #undef MMU_DEBUG
 
@@ -82,7 +85,8 @@ static int dbg = 1;
 #define PT_PAGE_SIZE_MASK (1ULL << 7)
 #define PT_PAT_MASK (1ULL << 7)
 #define PT_GLOBAL_MASK (1ULL << 8)
-#define PT64_NX_MASK (1ULL << 63)
+#define PT64_NX_SHIFT 63
+#define PT64_NX_MASK (1ULL << PT64_NX_SHIFT)
 
 #define PT_PAT_SHIFT 7
 #define PT_DIR_PAT_SHIFT 12
@@ -90,7 +94,8 @@ static int dbg = 1;
 
 #define PT32_DIR_PSE36_SIZE 4
 #define PT32_DIR_PSE36_SHIFT 13
-#define PT32_DIR_PSE36_MASK (((1ULL << PT32_DIR_PSE36_SIZE) - 1) << PT32_DIR_PSE36_SHIFT)
+#define PT32_DIR_PSE36_MASK \
+       (((1ULL << PT32_DIR_PSE36_SIZE) - 1) << PT32_DIR_PSE36_SHIFT)
 
 
 #define PT_FIRST_AVAIL_BITS_SHIFT 9
@@ -103,7 +108,7 @@ static int dbg = 1;
 #define PT64_LEVEL_BITS 9
 
 #define PT64_LEVEL_SHIFT(level) \
-               ( PAGE_SHIFT + (level - 1) * PT64_LEVEL_BITS )
+               (PAGE_SHIFT + (level - 1) * PT64_LEVEL_BITS)
 
 #define PT64_LEVEL_MASK(level) \
                (((1ULL << PT64_LEVEL_BITS) - 1) << PT64_LEVEL_SHIFT(level))
@@ -115,7 +120,7 @@ static int dbg = 1;
 #define PT32_LEVEL_BITS 10
 
 #define PT32_LEVEL_SHIFT(level) \
-               ( PAGE_SHIFT + (level - 1) * PT32_LEVEL_BITS )
+               (PAGE_SHIFT + (level - 1) * PT32_LEVEL_BITS)
 
 #define PT32_LEVEL_MASK(level) \
                (((1ULL << PT32_LEVEL_BITS) - 1) << PT32_LEVEL_SHIFT(level))
@@ -132,6 +137,8 @@ static int dbg = 1;
 #define PT32_DIR_BASE_ADDR_MASK \
        (PAGE_MASK & ~((1ULL << (PAGE_SHIFT + PT32_LEVEL_BITS)) - 1))
 
+#define PT64_PERM_MASK (PT_PRESENT_MASK | PT_WRITABLE_MASK | PT_USER_MASK \
+                       | PT64_NX_MASK)
 
 #define PFERR_PRESENT_MASK (1U << 0)
 #define PFERR_WRITE_MASK (1U << 1)
@@ -147,6 +154,11 @@ static int dbg = 1;
 
 #define RMAP_EXT 4
 
+#define ACC_EXEC_MASK    1
+#define ACC_WRITE_MASK   PT_WRITABLE_MASK
+#define ACC_USER_MASK    PT_USER_MASK
+#define ACC_ALL          (ACC_EXEC_MASK | ACC_WRITE_MASK | ACC_USER_MASK)
+
 struct kvm_rmap_desc {
        u64 *shadow_ptes[RMAP_EXT];
        struct kvm_rmap_desc *more;
@@ -156,9 +168,19 @@ static struct kmem_cache *pte_chain_cache;
 static struct kmem_cache *rmap_desc_cache;
 static struct kmem_cache *mmu_page_header_cache;
 
+static u64 __read_mostly shadow_trap_nonpresent_pte;
+static u64 __read_mostly shadow_notrap_nonpresent_pte;
+
+void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte)
+{
+       shadow_trap_nonpresent_pte = trap_pte;
+       shadow_notrap_nonpresent_pte = notrap_pte;
+}
+EXPORT_SYMBOL_GPL(kvm_mmu_set_nonpresent_ptes);
+
 static int is_write_protection(struct kvm_vcpu *vcpu)
 {
-       return vcpu->cr0 & CR0_WP_MASK;
+       return vcpu->arch.cr0 & X86_CR0_WP;
 }
 
 static int is_cpuid_PSE36(void)
@@ -168,7 +190,7 @@ static int is_cpuid_PSE36(void)
 
 static int is_nx(struct kvm_vcpu *vcpu)
 {
-       return vcpu->shadow_efer & EFER_NX;
+       return vcpu->arch.shadow_efer & EFER_NX;
 }
 
 static int is_present_pte(unsigned long pte)
@@ -176,11 +198,23 @@ static int is_present_pte(unsigned long pte)
        return pte & PT_PRESENT_MASK;
 }
 
+static int is_shadow_present_pte(u64 pte)
+{
+       pte &= ~PT_SHADOW_IO_MARK;
+       return pte != shadow_trap_nonpresent_pte
+               && pte != shadow_notrap_nonpresent_pte;
+}
+
 static int is_writeble_pte(unsigned long pte)
 {
        return pte & PT_WRITABLE_MASK;
 }
 
+static int is_dirty_pte(unsigned long pte)
+{
+       return pte & PT_DIRTY_MASK;
+}
+
 static int is_io_pte(unsigned long pte)
 {
        return pte & PT_SHADOW_IO_MARK;
@@ -188,8 +222,15 @@ static int is_io_pte(unsigned long pte)
 
 static int is_rmap_pte(u64 pte)
 {
-       return (pte & (PT_WRITABLE_MASK | PT_PRESENT_MASK))
-               == (PT_WRITABLE_MASK | PT_PRESENT_MASK);
+       return pte != shadow_trap_nonpresent_pte
+               && pte != shadow_notrap_nonpresent_pte;
+}
+
+static gfn_t pse36_gfn_delta(u32 gpte)
+{
+       int shift = 32 - PT32_DIR_PSE36_SHIFT - PAGE_SHIFT;
+
+       return (gpte & PT32_DIR_PSE36_MASK) << shift;
 }
 
 static void set_shadow_pte(u64 *sptep, u64 spte)
@@ -202,15 +243,14 @@ static void set_shadow_pte(u64 *sptep, u64 spte)
 }
 
 static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
-                                 struct kmem_cache *base_cache, int min,
-                                 gfp_t gfp_flags)
+                                 struct kmem_cache *base_cache, int min)
 {
        void *obj;
 
        if (cache->nobjs >= min)
                return 0;
        while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
-               obj = kmem_cache_zalloc(base_cache, gfp_flags);
+               obj = kmem_cache_zalloc(base_cache, GFP_KERNEL);
                if (!obj)
                        return -ENOMEM;
                cache->objects[cache->nobjs++] = obj;
@@ -225,14 +265,14 @@ static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
 }
 
 static int mmu_topup_memory_cache_page(struct kvm_mmu_memory_cache *cache,
-                                      int min, gfp_t gfp_flags)
+                                      int min)
 {
        struct page *page;
 
        if (cache->nobjs >= min)
                return 0;
        while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
-               page = alloc_page(gfp_flags);
+               page = alloc_page(GFP_KERNEL);
                if (!page)
                        return -ENOMEM;
                set_page_private(page, 0);
@@ -247,50 +287,34 @@ static void mmu_free_memory_cache_page(struct kvm_mmu_memory_cache *mc)
                free_page((unsigned long)mc->objects[--mc->nobjs]);
 }
 
-static int __mmu_topup_memory_caches(struct kvm_vcpu *vcpu, gfp_t gfp_flags)
+static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu)
 {
        int r;
 
-       r = mmu_topup_memory_cache(&vcpu->mmu_pte_chain_cache,
-                                  pte_chain_cache, 4, gfp_flags);
+       kvm_mmu_free_some_pages(vcpu);
+       r = mmu_topup_memory_cache(&vcpu->arch.mmu_pte_chain_cache,
+                                  pte_chain_cache, 4);
        if (r)
                goto out;
-       r = mmu_topup_memory_cache(&vcpu->mmu_rmap_desc_cache,
-                                  rmap_desc_cache, 1, gfp_flags);
+       r = mmu_topup_memory_cache(&vcpu->arch.mmu_rmap_desc_cache,
+                                  rmap_desc_cache, 1);
        if (r)
                goto out;
-       r = mmu_topup_memory_cache_page(&vcpu->mmu_page_cache, 4, gfp_flags);
+       r = mmu_topup_memory_cache_page(&vcpu->arch.mmu_page_cache, 8);
        if (r)
                goto out;
-       r = mmu_topup_memory_cache(&vcpu->mmu_page_header_cache,
-                                  mmu_page_header_cache, 4, gfp_flags);
+       r = mmu_topup_memory_cache(&vcpu->arch.mmu_page_header_cache,
+                                  mmu_page_header_cache, 4);
 out:
        return r;
 }
 
-static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu)
-{
-       int r;
-
-       r = __mmu_topup_memory_caches(vcpu, GFP_NOWAIT);
-       kvm_mmu_free_some_pages(vcpu);
-       if (r < 0) {
-               spin_unlock(&vcpu->kvm->lock);
-               kvm_arch_ops->vcpu_put(vcpu);
-               r = __mmu_topup_memory_caches(vcpu, GFP_KERNEL);
-               kvm_arch_ops->vcpu_load(vcpu);
-               spin_lock(&vcpu->kvm->lock);
-               kvm_mmu_free_some_pages(vcpu);
-       }
-       return r;
-}
-
 static void mmu_free_memory_caches(struct kvm_vcpu *vcpu)
 {
-       mmu_free_memory_cache(&vcpu->mmu_pte_chain_cache);
-       mmu_free_memory_cache(&vcpu->mmu_rmap_desc_cache);
-       mmu_free_memory_cache_page(&vcpu->mmu_page_cache);
-       mmu_free_memory_cache(&vcpu->mmu_page_header_cache);
+       mmu_free_memory_cache(&vcpu->arch.mmu_pte_chain_cache);
+       mmu_free_memory_cache(&vcpu->arch.mmu_rmap_desc_cache);
+       mmu_free_memory_cache_page(&vcpu->arch.mmu_page_cache);
+       mmu_free_memory_cache(&vcpu->arch.mmu_page_header_cache);
 }
 
 static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc,
@@ -306,7 +330,7 @@ static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc,
 
 static struct kvm_pte_chain *mmu_alloc_pte_chain(struct kvm_vcpu *vcpu)
 {
-       return mmu_memory_cache_alloc(&vcpu->mmu_pte_chain_cache,
+       return mmu_memory_cache_alloc(&vcpu->arch.mmu_pte_chain_cache,
                                      sizeof(struct kvm_pte_chain));
 }
 
@@ -317,7 +341,7 @@ static void mmu_free_pte_chain(struct kvm_pte_chain *pc)
 
 static struct kvm_rmap_desc *mmu_alloc_rmap_desc(struct kvm_vcpu *vcpu)
 {
-       return mmu_memory_cache_alloc(&vcpu->mmu_rmap_desc_cache,
+       return mmu_memory_cache_alloc(&vcpu->arch.mmu_rmap_desc_cache,
                                      sizeof(struct kvm_rmap_desc));
 }
 
@@ -326,36 +350,53 @@ static void mmu_free_rmap_desc(struct kvm_rmap_desc *rd)
        kfree(rd);
 }
 
+/*
+ * Take gfn and return the reverse mapping to it.
+ * Note: gfn must be unaliased before this function get called
+ */
+
+static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn)
+{
+       struct kvm_memory_slot *slot;
+
+       slot = gfn_to_memslot(kvm, gfn);
+       return &slot->rmap[gfn - slot->base_gfn];
+}
+
 /*
  * Reverse mapping data structures:
  *
- * If page->private bit zero is zero, then page->private points to the
- * shadow page table entry that points to page_address(page).
+ * If rmapp bit zero is zero, then rmapp point to the shadw page table entry
+ * that points to page_address(page).
  *
- * If page->private bit zero is one, (then page->private & ~1) points
- * to a struct kvm_rmap_desc containing more mappings.
+ * If rmapp bit zero is one, (then rmap & ~1) points to a struct kvm_rmap_desc
+ * containing more mappings.
  */
-static void rmap_add(struct kvm_vcpu *vcpu, u64 *spte)
+static void rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
 {
-       struct page *page;
+       struct kvm_mmu_page *sp;
        struct kvm_rmap_desc *desc;
+       unsigned long *rmapp;
        int i;
 
        if (!is_rmap_pte(*spte))
                return;
-       page = pfn_to_page((*spte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT);
-       if (!page_private(page)) {
+       gfn = unalias_gfn(vcpu->kvm, gfn);
+       sp = page_header(__pa(spte));
+       sp->gfns[spte - sp->spt] = gfn;
+       rmapp = gfn_to_rmap(vcpu->kvm, gfn);
+       if (!*rmapp) {
                rmap_printk("rmap_add: %p %llx 0->1\n", spte, *spte);
-               set_page_private(page,(unsigned long)spte);
-       } else if (!(page_private(page) & 1)) {
+               *rmapp = (unsigned long)spte;
+       } else if (!(*rmapp & 1)) {
                rmap_printk("rmap_add: %p %llx 1->many\n", spte, *spte);
                desc = mmu_alloc_rmap_desc(vcpu);
-               desc->shadow_ptes[0] = (u64 *)page_private(page);
+               desc->shadow_ptes[0] = (u64 *)*rmapp;
                desc->shadow_ptes[1] = spte;
-               set_page_private(page,(unsigned long)desc | 1);
+               *rmapp = (unsigned long)desc | 1;
        } else {
                rmap_printk("rmap_add: %p %llx many->many\n", spte, *spte);
-               desc = (struct kvm_rmap_desc *)(page_private(page) & ~1ul);
+               desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
                while (desc->shadow_ptes[RMAP_EXT-1] && desc->more)
                        desc = desc->more;
                if (desc->shadow_ptes[RMAP_EXT-1]) {
@@ -368,7 +409,7 @@ static void rmap_add(struct kvm_vcpu *vcpu, u64 *spte)
        }
 }
 
-static void rmap_desc_remove_entry(struct page *page,
+static void rmap_desc_remove_entry(unsigned long *rmapp,
                                   struct kvm_rmap_desc *desc,
                                   int i,
                                   struct kvm_rmap_desc *prev_desc)
@@ -382,44 +423,53 @@ static void rmap_desc_remove_entry(struct page *page,
        if (j != 0)
                return;
        if (!prev_desc && !desc->more)
-               set_page_private(page,(unsigned long)desc->shadow_ptes[0]);
+               *rmapp = (unsigned long)desc->shadow_ptes[0];
        else
                if (prev_desc)
                        prev_desc->more = desc->more;
                else
-                       set_page_private(page,(unsigned long)desc->more | 1);
+                       *rmapp = (unsigned long)desc->more | 1;
        mmu_free_rmap_desc(desc);
 }
 
-static void rmap_remove(u64 *spte)
+static void rmap_remove(struct kvm *kvm, u64 *spte)
 {
-       struct page *page;
        struct kvm_rmap_desc *desc;
        struct kvm_rmap_desc *prev_desc;
+       struct kvm_mmu_page *sp;
+       struct page *page;
+       unsigned long *rmapp;
        int i;
 
        if (!is_rmap_pte(*spte))
                return;
+       sp = page_header(__pa(spte));
        page = pfn_to_page((*spte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT);
-       if (!page_private(page)) {
+       mark_page_accessed(page);
+       if (is_writeble_pte(*spte))
+               kvm_release_page_dirty(page);
+       else
+               kvm_release_page_clean(page);
+       rmapp = gfn_to_rmap(kvm, sp->gfns[spte - sp->spt]);
+       if (!*rmapp) {
                printk(KERN_ERR "rmap_remove: %p %llx 0->BUG\n", spte, *spte);
                BUG();
-       } else if (!(page_private(page) & 1)) {
+       } else if (!(*rmapp & 1)) {
                rmap_printk("rmap_remove:  %p %llx 1->0\n", spte, *spte);
-               if ((u64 *)page_private(page) != spte) {
+               if ((u64 *)*rmapp != spte) {
                        printk(KERN_ERR "rmap_remove:  %p %llx 1->BUG\n",
                               spte, *spte);
                        BUG();
                }
-               set_page_private(page,0);
+               *rmapp = 0;
        } else {
                rmap_printk("rmap_remove:  %p %llx many->many\n", spte, *spte);
-               desc = (struct kvm_rmap_desc *)(page_private(page) & ~1ul);
+               desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
                prev_desc = NULL;
                while (desc) {
                        for (i = 0; i < RMAP_EXT && desc->shadow_ptes[i]; ++i)
                                if (desc->shadow_ptes[i] == spte) {
-                                       rmap_desc_remove_entry(page,
+                                       rmap_desc_remove_entry(rmapp,
                                                               desc, i,
                                                               prev_desc);
                                        return;
@@ -431,32 +481,51 @@ static void rmap_remove(u64 *spte)
        }
 }
 
-static void rmap_write_protect(struct kvm_vcpu *vcpu, u64 gfn)
+static u64 *rmap_next(struct kvm *kvm, unsigned long *rmapp, u64 *spte)
 {
-       struct kvm *kvm = vcpu->kvm;
-       struct page *page;
        struct kvm_rmap_desc *desc;
+       struct kvm_rmap_desc *prev_desc;
+       u64 *prev_spte;
+       int i;
+
+       if (!*rmapp)
+               return NULL;
+       else if (!(*rmapp & 1)) {
+               if (!spte)
+                       return (u64 *)*rmapp;
+               return NULL;
+       }
+       desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
+       prev_desc = NULL;
+       prev_spte = NULL;
+       while (desc) {
+               for (i = 0; i < RMAP_EXT && desc->shadow_ptes[i]; ++i) {
+                       if (prev_spte == spte)
+                               return desc->shadow_ptes[i];
+                       prev_spte = desc->shadow_ptes[i];
+               }
+               desc = desc->more;
+       }
+       return NULL;
+}
+
+static void rmap_write_protect(struct kvm *kvm, u64 gfn)
+{
+       unsigned long *rmapp;
        u64 *spte;
 
-       page = gfn_to_page(kvm, gfn);
-       BUG_ON(!page);
+       gfn = unalias_gfn(kvm, gfn);
+       rmapp = gfn_to_rmap(kvm, gfn);
 
-       while (page_private(page)) {
-               if (!(page_private(page) & 1))
-                       spte = (u64 *)page_private(page);
-               else {
-                       desc = (struct kvm_rmap_desc *)(page_private(page) & ~1ul);
-                       spte = desc->shadow_ptes[0];
-               }
+       spte = rmap_next(kvm, rmapp, NULL);
+       while (spte) {
                BUG_ON(!spte);
-               BUG_ON((*spte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT
-                      != page_to_pfn(page));
                BUG_ON(!(*spte & PT_PRESENT_MASK));
-               BUG_ON(!(*spte & PT_WRITABLE_MASK));
                rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte);
-               rmap_remove(spte);
-               set_shadow_pte(spte, *spte & ~PT_WRITABLE_MASK);
-               kvm_flush_remote_tlbs(vcpu->kvm);
+               if (is_writeble_pte(*spte))
+                       set_shadow_pte(spte, *spte & ~PT_WRITABLE_MASK);
+               kvm_flush_remote_tlbs(kvm);
+               spte = rmap_next(kvm, rmapp, spte);
        }
 }
 
@@ -467,7 +536,7 @@ static int is_empty_shadow_page(u64 *spt)
        u64 *end;
 
        for (pos = spt, end = pos + PAGE_SIZE / sizeof(u64); pos != end; pos++)
-               if (*pos != 0) {
+               if ((*pos & ~PT_SHADOW_IO_MARK) != shadow_trap_nonpresent_pte) {
                        printk(KERN_ERR "%s: %p %llx\n", __FUNCTION__,
                               pos, *pos);
                        return 0;
@@ -476,13 +545,13 @@ static int is_empty_shadow_page(u64 *spt)
 }
 #endif
 
-static void kvm_mmu_free_page(struct kvm *kvm,
-                             struct kvm_mmu_page *page_head)
+static void kvm_mmu_free_page(struct kvm *kvm, struct kvm_mmu_page *sp)
 {
-       ASSERT(is_empty_shadow_page(page_head->spt));
-       list_del(&page_head->link);
-       __free_page(virt_to_page(page_head->spt));
-       kfree(page_head);
+       ASSERT(is_empty_shadow_page(sp->spt));
+       list_del(&sp->link);
+       __free_page(virt_to_page(sp->spt));
+       __free_page(virt_to_page(sp->gfns));
+       kfree(sp);
        ++kvm->n_free_mmu_pages;
 }
 
@@ -494,26 +563,26 @@ static unsigned kvm_page_table_hashfn(gfn_t gfn)
 static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu,
                                               u64 *parent_pte)
 {
-       struct kvm_mmu_page *page;
+       struct kvm_mmu_page *sp;
 
        if (!vcpu->kvm->n_free_mmu_pages)
                return NULL;
 
-       page = mmu_memory_cache_alloc(&vcpu->mmu_page_header_cache,
-                                     sizeof *page);
-       page->spt = mmu_memory_cache_alloc(&vcpu->mmu_page_cache, PAGE_SIZE);
-       set_page_private(virt_to_page(page->spt), (unsigned long)page);
-       list_add(&page->link, &vcpu->kvm->active_mmu_pages);
-       ASSERT(is_empty_shadow_page(page->spt));
-       page->slot_bitmap = 0;
-       page->multimapped = 0;
-       page->parent_pte = parent_pte;
+       sp = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache, sizeof *sp);
+       sp->spt = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache, PAGE_SIZE);
+       sp->gfns = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache, PAGE_SIZE);
+       set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
+       list_add(&sp->link, &vcpu->kvm->active_mmu_pages);
+       ASSERT(is_empty_shadow_page(sp->spt));
+       sp->slot_bitmap = 0;
+       sp->multimapped = 0;
+       sp->parent_pte = parent_pte;
        --vcpu->kvm->n_free_mmu_pages;
-       return page;
+       return sp;
 }
 
 static void mmu_page_add_parent_pte(struct kvm_vcpu *vcpu,
-                                   struct kvm_mmu_page *page, u64 *parent_pte)
+                                   struct kvm_mmu_page *sp, u64 *parent_pte)
 {
        struct kvm_pte_chain *pte_chain;
        struct hlist_node *node;
@@ -521,20 +590,20 @@ static void mmu_page_add_parent_pte(struct kvm_vcpu *vcpu,
 
        if (!parent_pte)
                return;
-       if (!page->multimapped) {
-               u64 *old = page->parent_pte;
+       if (!sp->multimapped) {
+               u64 *old = sp->parent_pte;
 
                if (!old) {
-                       page->parent_pte = parent_pte;
+                       sp->parent_pte = parent_pte;
                        return;
                }
-               page->multimapped = 1;
+               sp->multimapped = 1;
                pte_chain = mmu_alloc_pte_chain(vcpu);
-               INIT_HLIST_HEAD(&page->parent_ptes);
-               hlist_add_head(&pte_chain->link, &page->parent_ptes);
+               INIT_HLIST_HEAD(&sp->parent_ptes);
+               hlist_add_head(&pte_chain->link, &sp->parent_ptes);
                pte_chain->parent_ptes[0] = old;
        }
-       hlist_for_each_entry(pte_chain, node, &page->parent_ptes, link) {
+       hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link) {
                if (pte_chain->parent_ptes[NR_PTE_CHAIN_ENTRIES-1])
                        continue;
                for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i)
@@ -545,23 +614,23 @@ static void mmu_page_add_parent_pte(struct kvm_vcpu *vcpu,
        }
        pte_chain = mmu_alloc_pte_chain(vcpu);
        BUG_ON(!pte_chain);
-       hlist_add_head(&pte_chain->link, &page->parent_ptes);
+       hlist_add_head(&pte_chain->link, &sp->parent_ptes);
        pte_chain->parent_ptes[0] = parent_pte;
 }
 
-static void mmu_page_remove_parent_pte(struct kvm_mmu_page *page,
+static void mmu_page_remove_parent_pte(struct kvm_mmu_page *sp,
                                       u64 *parent_pte)
 {
        struct kvm_pte_chain *pte_chain;
        struct hlist_node *node;
        int i;
 
-       if (!page->multimapped) {
-               BUG_ON(page->parent_pte != parent_pte);
-               page->parent_pte = NULL;
+       if (!sp->multimapped) {
+               BUG_ON(sp->parent_pte != parent_pte);
+               sp->parent_pte = NULL;
                return;
        }
-       hlist_for_each_entry(pte_chain, node, &page->parent_ptes, link)
+       hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link)
                for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i) {
                        if (!pte_chain->parent_ptes[i])
                                break;
@@ -577,9 +646,9 @@ static void mmu_page_remove_parent_pte(struct kvm_mmu_page *page,
                        if (i == 0) {
                                hlist_del(&pte_chain->link);
                                mmu_free_pte_chain(pte_chain);
-                               if (hlist_empty(&page->parent_ptes)) {
-                                       page->multimapped = 0;
-                                       page->parent_pte = NULL;
+                               if (hlist_empty(&sp->parent_ptes)) {
+                                       sp->multimapped = 0;
+                                       sp->parent_pte = NULL;
                                }
                        }
                        return;
@@ -587,22 +656,21 @@ static void mmu_page_remove_parent_pte(struct kvm_mmu_page *page,
        BUG();
 }
 
-static struct kvm_mmu_page *kvm_mmu_lookup_page(struct kvm_vcpu *vcpu,
-                                               gfn_t gfn)
+static struct kvm_mmu_page *kvm_mmu_lookup_page(struct kvm *kvm, gfn_t gfn)
 {
        unsigned index;
        struct hlist_head *bucket;
-       struct kvm_mmu_page *page;
+       struct kvm_mmu_page *sp;
        struct hlist_node *node;
 
        pgprintk("%s: looking for gfn %lx\n", __FUNCTION__, gfn);
        index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
-       bucket = &vcpu->kvm->mmu_page_hash[index];
-       hlist_for_each_entry(page, node, bucket, hash_link)
-               if (page->gfn == gfn && !page->role.metaphysical) {
+       bucket = &kvm->mmu_page_hash[index];
+       hlist_for_each_entry(sp, node, bucket, hash_link)
+               if (sp->gfn == gfn && !sp->role.metaphysical) {
                        pgprintk("%s: found role %x\n",
-                                __FUNCTION__, page->role.word);
-                       return page;
+                                __FUNCTION__, sp->role.word);
+                       return sp;
                }
        return NULL;
 }
@@ -612,22 +680,23 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
                                             gva_t gaddr,
                                             unsigned level,
                                             int metaphysical,
-                                            unsigned hugepage_access,
-                                            u64 *parent_pte)
+                                            unsigned access,
+                                            u64 *parent_pte,
+                                            bool *new_page)
 {
        union kvm_mmu_page_role role;
        unsigned index;
        unsigned quadrant;
        struct hlist_head *bucket;
-       struct kvm_mmu_page *page;
+       struct kvm_mmu_page *sp;
        struct hlist_node *node;
 
        role.word = 0;
-       role.glevels = vcpu->mmu.root_level;
+       role.glevels = vcpu->arch.mmu.root_level;
        role.level = level;
        role.metaphysical = metaphysical;
-       role.hugepage_access = hugepage_access;
-       if (vcpu->mmu.root_level <= PT32_ROOT_LEVEL) {
+       role.access = access;
+       if (vcpu->arch.mmu.root_level <= PT32_ROOT_LEVEL) {
                quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level));
                quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1;
                role.quadrant = quadrant;
@@ -636,38 +705,41 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
                 gfn, role.word);
        index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
        bucket = &vcpu->kvm->mmu_page_hash[index];
-       hlist_for_each_entry(page, node, bucket, hash_link)
-               if (page->gfn == gfn && page->role.word == role.word) {
-                       mmu_page_add_parent_pte(vcpu, page, parent_pte);
+       hlist_for_each_entry(sp, node, bucket, hash_link)
+               if (sp->gfn == gfn && sp->role.word == role.word) {
+                       mmu_page_add_parent_pte(vcpu, sp, parent_pte);
                        pgprintk("%s: found\n", __FUNCTION__);
-                       return page;
+                       return sp;
                }
-       page = kvm_mmu_alloc_page(vcpu, parent_pte);
-       if (!page)
-               return page;
+       sp = kvm_mmu_alloc_page(vcpu, parent_pte);
+       if (!sp)
+               return sp;
        pgprintk("%s: adding gfn %lx role %x\n", __FUNCTION__, gfn, role.word);
-       page->gfn = gfn;
-       page->role = role;
-       hlist_add_head(&page->hash_link, bucket);
+       sp->gfn = gfn;
+       sp->role = role;
+       hlist_add_head(&sp->hash_link, bucket);
+       vcpu->arch.mmu.prefetch_page(vcpu, sp);
        if (!metaphysical)
-               rmap_write_protect(vcpu, gfn);
-       return page;
+               rmap_write_protect(vcpu->kvm, gfn);
+       if (new_page)
+               *new_page = 1;
+       return sp;
 }
 
 static void kvm_mmu_page_unlink_children(struct kvm *kvm,
-                                        struct kvm_mmu_page *page)
+                                        struct kvm_mmu_page *sp)
 {
        unsigned i;
        u64 *pt;
        u64 ent;
 
-       pt = page->spt;
+       pt = sp->spt;
 
-       if (page->role.level == PT_PAGE_TABLE_LEVEL) {
+       if (sp->role.level == PT_PAGE_TABLE_LEVEL) {
                for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
-                       if (pt[i] & PT_PRESENT_MASK)
-                               rmap_remove(&pt[i]);
-                       pt[i] = 0;
+                       if (is_shadow_present_pte(pt[i]))
+                               rmap_remove(kvm, &pt[i]);
+                       pt[i] = shadow_trap_nonpresent_pte;
                }
                kvm_flush_remote_tlbs(kvm);
                return;
@@ -676,8 +748,8 @@ static void kvm_mmu_page_unlink_children(struct kvm *kvm,
        for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
                ent = pt[i];
 
-               pt[i] = 0;
-               if (!(ent & PT_PRESENT_MASK))
+               pt[i] = shadow_trap_nonpresent_pte;
+               if (!is_shadow_present_pte(ent))
                        continue;
                ent &= PT64_BASE_ADDR_MASK;
                mmu_page_remove_parent_pte(page_header(ent), &pt[i]);
@@ -685,147 +757,238 @@ static void kvm_mmu_page_unlink_children(struct kvm *kvm,
        kvm_flush_remote_tlbs(kvm);
 }
 
-static void kvm_mmu_put_page(struct kvm_mmu_page *page,
-                            u64 *parent_pte)
+static void kvm_mmu_put_page(struct kvm_mmu_page *sp, u64 *parent_pte)
 {
-       mmu_page_remove_parent_pte(page, parent_pte);
+       mmu_page_remove_parent_pte(sp, parent_pte);
+}
+
+static void kvm_mmu_reset_last_pte_updated(struct kvm *kvm)
+{
+       int i;
+
+       for (i = 0; i < KVM_MAX_VCPUS; ++i)
+               if (kvm->vcpus[i])
+                       kvm->vcpus[i]->arch.last_pte_updated = NULL;
 }
 
-static void kvm_mmu_zap_page(struct kvm *kvm,
-                            struct kvm_mmu_page *page)
+static void kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp)
 {
        u64 *parent_pte;
 
-       while (page->multimapped || page->parent_pte) {
-               if (!page->multimapped)
-                       parent_pte = page->parent_pte;
+       ++kvm->stat.mmu_shadow_zapped;
+       while (sp->multimapped || sp->parent_pte) {
+               if (!sp->multimapped)
+                       parent_pte = sp->parent_pte;
                else {
                        struct kvm_pte_chain *chain;
 
-                       chain = container_of(page->parent_ptes.first,
+                       chain = container_of(sp->parent_ptes.first,
                                             struct kvm_pte_chain, link);
                        parent_pte = chain->parent_ptes[0];
                }
                BUG_ON(!parent_pte);
-               kvm_mmu_put_page(page, parent_pte);
-               set_shadow_pte(parent_pte, 0);
+               kvm_mmu_put_page(sp, parent_pte);
+               set_shadow_pte(parent_pte, shadow_trap_nonpresent_pte);
        }
-       kvm_mmu_page_unlink_children(kvm, page);
-       if (!page->root_count) {
-               hlist_del(&page->hash_link);
-               kvm_mmu_free_page(kvm, page);
+       kvm_mmu_page_unlink_children(kvm, sp);
+       if (!sp->root_count) {
+               hlist_del(&sp->hash_link);
+               kvm_mmu_free_page(kvm, sp);
        } else
-               list_move(&page->link, &kvm->active_mmu_pages);
+               list_move(&sp->link, &kvm->active_mmu_pages);
+       kvm_mmu_reset_last_pte_updated(kvm);
 }
 
-static int kvm_mmu_unprotect_page(struct kvm_vcpu *vcpu, gfn_t gfn)
+/*
+ * Changing the number of mmu pages allocated to the vm
+ * Note: if kvm_nr_mmu_pages is too small, you will get dead lock
+ */
+void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages)
+{
+       /*
+        * If we set the number of mmu pages to be smaller be than the
+        * number of actived pages , we must to free some mmu pages before we
+        * change the value
+        */
+
+       if ((kvm->n_alloc_mmu_pages - kvm->n_free_mmu_pages) >
+           kvm_nr_mmu_pages) {
+               int n_used_mmu_pages = kvm->n_alloc_mmu_pages
+                                      - kvm->n_free_mmu_pages;
+
+               while (n_used_mmu_pages > kvm_nr_mmu_pages) {
+                       struct kvm_mmu_page *page;
+
+                       page = container_of(kvm->active_mmu_pages.prev,
+                                           struct kvm_mmu_page, link);
+                       kvm_mmu_zap_page(kvm, page);
+                       n_used_mmu_pages--;
+               }
+               kvm->n_free_mmu_pages = 0;
+       }
+       else
+               kvm->n_free_mmu_pages += kvm_nr_mmu_pages
+                                        - kvm->n_alloc_mmu_pages;
+
+       kvm->n_alloc_mmu_pages = kvm_nr_mmu_pages;
+}
+
+static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
 {
        unsigned index;
        struct hlist_head *bucket;
-       struct kvm_mmu_page *page;
+       struct kvm_mmu_page *sp;
        struct hlist_node *node, *n;
        int r;
 
        pgprintk("%s: looking for gfn %lx\n", __FUNCTION__, gfn);
        r = 0;
        index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
-       bucket = &vcpu->kvm->mmu_page_hash[index];
-       hlist_for_each_entry_safe(page, node, n, bucket, hash_link)
-               if (page->gfn == gfn && !page->role.metaphysical) {
+       bucket = &kvm->mmu_page_hash[index];
+       hlist_for_each_entry_safe(sp, node, n, bucket, hash_link)
+               if (sp->gfn == gfn && !sp->role.metaphysical) {
                        pgprintk("%s: gfn %lx role %x\n", __FUNCTION__, gfn,
-                                page->role.word);
-                       kvm_mmu_zap_page(vcpu->kvm, page);
+                                sp->role.word);
+                       kvm_mmu_zap_page(kvm, sp);
                        r = 1;
                }
        return r;
 }
 
-static void mmu_unshadow(struct kvm_vcpu *vcpu, gfn_t gfn)
+static void mmu_unshadow(struct kvm *kvm, gfn_t gfn)
 {
-       struct kvm_mmu_page *page;
+       struct kvm_mmu_page *sp;
 
-       while ((page = kvm_mmu_lookup_page(vcpu, gfn)) != NULL) {
-               pgprintk("%s: zap %lx %x\n",
-                        __FUNCTION__, gfn, page->role.word);
-               kvm_mmu_zap_page(vcpu->kvm, page);
+       while ((sp = kvm_mmu_lookup_page(kvm, gfn)) != NULL) {
+               pgprintk("%s: zap %lx %x\n", __FUNCTION__, gfn, sp->role.word);
+               kvm_mmu_zap_page(kvm, sp);
        }
 }
 
-static void page_header_update_slot(struct kvm *kvm, void *pte, gpa_t gpa)
+static void page_header_update_slot(struct kvm *kvm, void *pte, gfn_t gfn)
 {
-       int slot = memslot_id(kvm, gfn_to_memslot(kvm, gpa >> PAGE_SHIFT));
-       struct kvm_mmu_page *page_head = page_header(__pa(pte));
+       int slot = memslot_id(kvm, gfn_to_memslot(kvm, gfn));
+       struct kvm_mmu_page *sp = page_header(__pa(pte));
 
-       __set_bit(slot, &page_head->slot_bitmap);
+       __set_bit(slot, &sp->slot_bitmap);
 }
 
-hpa_t safe_gpa_to_hpa(struct kvm_vcpu *vcpu, gpa_t gpa)
+struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva)
 {
-       hpa_t hpa = gpa_to_hpa(vcpu, gpa);
+       gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva);
 
-       return is_error_hpa(hpa) ? bad_page_address | (gpa & ~PAGE_MASK): hpa;
+       if (gpa == UNMAPPED_GVA)
+               return NULL;
+       return gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
 }
 
-hpa_t gpa_to_hpa(struct kvm_vcpu *vcpu, gpa_t gpa)
+static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
+                        unsigned pt_access, unsigned pte_access,
+                        int user_fault, int write_fault, int dirty,
+                        int *ptwrite, gfn_t gfn)
 {
+       u64 spte;
+       int was_rmapped = is_rmap_pte(*shadow_pte);
        struct page *page;
 
-       ASSERT((gpa & HPA_ERR_MASK) == 0);
-       page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
-       if (!page)
-               return gpa | HPA_ERR_MASK;
-       return ((hpa_t)page_to_pfn(page) << PAGE_SHIFT)
-               | (gpa & (PAGE_SIZE-1));
-}
+       pgprintk("%s: spte %llx access %x write_fault %d"
+                " user_fault %d gfn %lx\n",
+                __FUNCTION__, *shadow_pte, pt_access,
+                write_fault, user_fault, gfn);
 
-hpa_t gva_to_hpa(struct kvm_vcpu *vcpu, gva_t gva)
-{
-       gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, gva);
+       /*
+        * We don't set the accessed bit, since we sometimes want to see
+        * whether the guest actually used the pte (in order to detect
+        * demand paging).
+        */
+       spte = PT_PRESENT_MASK | PT_DIRTY_MASK;
+       if (!dirty)
+               pte_access &= ~ACC_WRITE_MASK;
+       if (!(pte_access & ACC_EXEC_MASK))
+               spte |= PT64_NX_MASK;
+
+       page = gfn_to_page(vcpu->kvm, gfn);
+
+       spte |= PT_PRESENT_MASK;
+       if (pte_access & ACC_USER_MASK)
+               spte |= PT_USER_MASK;
+
+       if (is_error_page(page)) {
+               set_shadow_pte(shadow_pte,
+                              shadow_trap_nonpresent_pte | PT_SHADOW_IO_MARK);
+               kvm_release_page_clean(page);
+               return;
+       }
 
-       if (gpa == UNMAPPED_GVA)
-               return UNMAPPED_GVA;
-       return gpa_to_hpa(vcpu, gpa);
-}
+       spte |= page_to_phys(page);
 
-struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva)
-{
-       gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, gva);
+       if ((pte_access & ACC_WRITE_MASK)
+           || (write_fault && !is_write_protection(vcpu) && !user_fault)) {
+               struct kvm_mmu_page *shadow;
 
-       if (gpa == UNMAPPED_GVA)
-               return NULL;
-       return pfn_to_page(gpa_to_hpa(vcpu, gpa) >> PAGE_SHIFT);
+               spte |= PT_WRITABLE_MASK;
+               if (user_fault) {
+                       mmu_unshadow(vcpu->kvm, gfn);
+                       goto unshadowed;
+               }
+
+               shadow = kvm_mmu_lookup_page(vcpu->kvm, gfn);
+               if (shadow) {
+                       pgprintk("%s: found shadow page for %lx, marking ro\n",
+                                __FUNCTION__, gfn);
+                       pte_access &= ~ACC_WRITE_MASK;
+                       if (is_writeble_pte(spte)) {
+                               spte &= ~PT_WRITABLE_MASK;
+                               kvm_x86_ops->tlb_flush(vcpu);
+                       }
+                       if (write_fault)
+                               *ptwrite = 1;
+               }
+       }
+
+unshadowed:
+
+       if (pte_access & ACC_WRITE_MASK)
+               mark_page_dirty(vcpu->kvm, gfn);
+
+       pgprintk("%s: setting spte %llx\n", __FUNCTION__, spte);
+       set_shadow_pte(shadow_pte, spte);
+       page_header_update_slot(vcpu->kvm, shadow_pte, gfn);
+       if (!was_rmapped) {
+               rmap_add(vcpu, shadow_pte, gfn);
+               if (!is_rmap_pte(*shadow_pte))
+                       kvm_release_page_clean(page);
+       }
+       else
+               kvm_release_page_clean(page);
+       if (!ptwrite || !*ptwrite)
+               vcpu->arch.last_pte_updated = shadow_pte;
 }
 
 static void nonpaging_new_cr3(struct kvm_vcpu *vcpu)
 {
 }
 
-static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, hpa_t p)
+static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn)
 {
        int level = PT32E_ROOT_LEVEL;
-       hpa_t table_addr = vcpu->mmu.root_hpa;
+       hpa_t table_addr = vcpu->arch.mmu.root_hpa;
+       int pt_write = 0;
 
        for (; ; level--) {
                u32 index = PT64_INDEX(v, level);
                u64 *table;
-               u64 pte;
 
                ASSERT(VALID_PAGE(table_addr));
                table = __va(table_addr);
 
                if (level == 1) {
-                       pte = table[index];
-                       if (is_present_pte(pte) && is_writeble_pte(pte))
-                               return 0;
-                       mark_page_dirty(vcpu->kvm, v >> PAGE_SHIFT);
-                       page_header_update_slot(vcpu->kvm, table, v);
-                       table[index] = p | PT_PRESENT_MASK | PT_WRITABLE_MASK |
-                                                               PT_USER_MASK;
-                       rmap_add(vcpu, &table[index]);
-                       return 0;
+                       mmu_set_spte(vcpu, &table[index], ACC_ALL, ACC_ALL,
+                                    0, write, 1, &pt_write, gfn);
+                       return pt_write || is_io_pte(table[index]);
                }
 
-               if (table[index] == 0) {
+               if (table[index] == shadow_trap_nonpresent_pte) {
                        struct kvm_mmu_page *new_table;
                        gfn_t pseudo_gfn;
 
@@ -833,7 +996,8 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, hpa_t p)
                                >> PAGE_SHIFT;
                        new_table = kvm_mmu_get_page(vcpu, pseudo_gfn,
                                                     v, level - 1,
-                                                    1, 0, &table[index]);
+                                                    1, ACC_ALL, &table[index],
+                                                    NULL);
                        if (!new_table) {
                                pgprintk("nonpaging_map: ENOMEM\n");
                                return -ENOMEM;
@@ -846,77 +1010,86 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, hpa_t p)
        }
 }
 
+static void nonpaging_prefetch_page(struct kvm_vcpu *vcpu,
+                                   struct kvm_mmu_page *sp)
+{
+       int i;
+
+       for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
+               sp->spt[i] = shadow_trap_nonpresent_pte;
+}
+
 static void mmu_free_roots(struct kvm_vcpu *vcpu)
 {
        int i;
-       struct kvm_mmu_page *page;
+       struct kvm_mmu_page *sp;
 
-       if (!VALID_PAGE(vcpu->mmu.root_hpa))
+       if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
                return;
 #ifdef CONFIG_X86_64
-       if (vcpu->mmu.shadow_root_level == PT64_ROOT_LEVEL) {
-               hpa_t root = vcpu->mmu.root_hpa;
+       if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
+               hpa_t root = vcpu->arch.mmu.root_hpa;
 
-               page = page_header(root);
-               --page->root_count;
-               vcpu->mmu.root_hpa = INVALID_PAGE;
+               sp = page_header(root);
+               --sp->root_count;
+               vcpu->arch.mmu.root_hpa = INVALID_PAGE;
                return;
        }
 #endif
        for (i = 0; i < 4; ++i) {
-               hpa_t root = vcpu->mmu.pae_root[i];
+               hpa_t root = vcpu->arch.mmu.pae_root[i];
 
                if (root) {
                        root &= PT64_BASE_ADDR_MASK;
-                       page = page_header(root);
-                       --page->root_count;
+                       sp = page_header(root);
+                       --sp->root_count;
                }
-               vcpu->mmu.pae_root[i] = INVALID_PAGE;
+               vcpu->arch.mmu.pae_root[i] = INVALID_PAGE;
        }
-       vcpu->mmu.root_hpa = INVALID_PAGE;
+       vcpu->arch.mmu.root_hpa = INVALID_PAGE;
 }
 
 static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
 {
        int i;
        gfn_t root_gfn;
-       struct kvm_mmu_page *page;
+       struct kvm_mmu_page *sp;
 
-       root_gfn = vcpu->cr3 >> PAGE_SHIFT;
+       root_gfn = vcpu->arch.cr3 >> PAGE_SHIFT;
 
 #ifdef CONFIG_X86_64
-       if (vcpu->mmu.shadow_root_level == PT64_ROOT_LEVEL) {
-               hpa_t root = vcpu->mmu.root_hpa;
+       if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
+               hpa_t root = vcpu->arch.mmu.root_hpa;
 
                ASSERT(!VALID_PAGE(root));
-               page = kvm_mmu_get_page(vcpu, root_gfn, 0,
-                                       PT64_ROOT_LEVEL, 0, 0, NULL);
-               root = __pa(page->spt);
-               ++page->root_count;
-               vcpu->mmu.root_hpa = root;
+               sp = kvm_mmu_get_page(vcpu, root_gfn, 0,
+                                     PT64_ROOT_LEVEL, 0, ACC_ALL, NULL, NULL);
+               root = __pa(sp->spt);
+               ++sp->root_count;
+               vcpu->arch.mmu.root_hpa = root;
                return;
        }
 #endif
        for (i = 0; i < 4; ++i) {
-               hpa_t root = vcpu->mmu.pae_root[i];
+               hpa_t root = vcpu->arch.mmu.pae_root[i];
 
                ASSERT(!VALID_PAGE(root));
-               if (vcpu->mmu.root_level == PT32E_ROOT_LEVEL) {
-                       if (!is_present_pte(vcpu->pdptrs[i])) {
-                               vcpu->mmu.pae_root[i] = 0;
+               if (vcpu->arch.mmu.root_level == PT32E_ROOT_LEVEL) {
+                       if (!is_present_pte(vcpu->arch.pdptrs[i])) {
+                               vcpu->arch.mmu.pae_root[i] = 0;
                                continue;
                        }
-                       root_gfn = vcpu->pdptrs[i] >> PAGE_SHIFT;
-               } else if (vcpu->mmu.root_level == 0)
+                       root_gfn = vcpu->arch.pdptrs[i] >> PAGE_SHIFT;
+               } else if (vcpu->arch.mmu.root_level == 0)
                        root_gfn = 0;
-               page = kvm_mmu_get_page(vcpu, root_gfn, i << 30,
-                                       PT32_ROOT_LEVEL, !is_paging(vcpu),
-                                       0, NULL);
-               root = __pa(page->spt);
-               ++page->root_count;
-               vcpu->mmu.pae_root[i] = root | PT_PRESENT_MASK;
+               sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30,
+                                     PT32_ROOT_LEVEL, !is_paging(vcpu),
+                                     ACC_ALL, NULL, NULL);
+               root = __pa(sp->spt);
+               ++sp->root_count;
+               vcpu->arch.mmu.pae_root[i] = root | PT_PRESENT_MASK;
        }
-       vcpu->mmu.root_hpa = __pa(vcpu->mmu.pae_root);
+       vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.pae_root);
 }
 
 static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr)
@@ -925,26 +1098,23 @@ static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr)
 }
 
 static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
-                              u32 error_code)
+                               u32 error_code)
 {
-       gpa_t addr = gva;
-       hpa_t paddr;
+       gfn_t gfn;
        int r;
 
+       pgprintk("%s: gva %lx error %x\n", __FUNCTION__, gva, error_code);
        r = mmu_topup_memory_caches(vcpu);
        if (r)
                return r;
 
        ASSERT(vcpu);
-       ASSERT(VALID_PAGE(vcpu->mmu.root_hpa));
-
+       ASSERT(VALID_PAGE(vcpu->arch.mmu.root_hpa));
 
-       paddr = gpa_to_hpa(vcpu , addr & PT64_BASE_ADDR_MASK);
+       gfn = gva >> PAGE_SHIFT;
 
-       if (is_error_hpa(paddr))
-               return 1;
-
-       return nonpaging_map(vcpu, addr & PAGE_MASK, paddr);
+       return nonpaging_map(vcpu, gva & PAGE_MASK,
+                            error_code & PFERR_WRITE_MASK, gfn);
 }
 
 static void nonpaging_free(struct kvm_vcpu *vcpu)
@@ -954,22 +1124,23 @@ static void nonpaging_free(struct kvm_vcpu *vcpu)
 
 static int nonpaging_init_context(struct kvm_vcpu *vcpu)
 {
-       struct kvm_mmu *context = &vcpu->mmu;
+       struct kvm_mmu *context = &vcpu->arch.mmu;
 
        context->new_cr3 = nonpaging_new_cr3;
        context->page_fault = nonpaging_page_fault;
        context->gva_to_gpa = nonpaging_gva_to_gpa;
        context->free = nonpaging_free;
+       context->prefetch_page = nonpaging_prefetch_page;
        context->root_level = 0;
        context->shadow_root_level = PT32E_ROOT_LEVEL;
        context->root_hpa = INVALID_PAGE;
        return 0;
 }
 
-static void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu)
+void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu)
 {
        ++vcpu->stat.tlb_flush;
-       kvm_arch_ops->tlb_flush(vcpu);
+       kvm_x86_ops->tlb_flush(vcpu);
 }
 
 static void paging_new_cr3(struct kvm_vcpu *vcpu)
@@ -982,7 +1153,7 @@ static void inject_page_fault(struct kvm_vcpu *vcpu,
                              u64 addr,
                              u32 err_code)
 {
-       kvm_arch_ops->inject_page_fault(vcpu, addr, err_code);
+       kvm_inject_page_fault(vcpu, addr, err_code);
 }
 
 static void paging_free(struct kvm_vcpu *vcpu)
@@ -1000,12 +1171,13 @@ static void paging_free(struct kvm_vcpu *vcpu)
 
 static int paging64_init_context_common(struct kvm_vcpu *vcpu, int level)
 {
-       struct kvm_mmu *context = &vcpu->mmu;
+       struct kvm_mmu *context = &vcpu->arch.mmu;
 
        ASSERT(is_pae(vcpu));
        context->new_cr3 = paging_new_cr3;
        context->page_fault = paging64_page_fault;
        context->gva_to_gpa = paging64_gva_to_gpa;
+       context->prefetch_page = paging64_prefetch_page;
        context->free = paging_free;
        context->root_level = level;
        context->shadow_root_level = level;
@@ -1020,12 +1192,13 @@ static int paging64_init_context(struct kvm_vcpu *vcpu)
 
 static int paging32_init_context(struct kvm_vcpu *vcpu)
 {
-       struct kvm_mmu *context = &vcpu->mmu;
+       struct kvm_mmu *context = &vcpu->arch.mmu;
 
        context->new_cr3 = paging_new_cr3;
        context->page_fault = paging32_page_fault;
        context->gva_to_gpa = paging32_gva_to_gpa;
        context->free = paging_free;
+       context->prefetch_page = paging32_prefetch_page;
        context->root_level = PT32_ROOT_LEVEL;
        context->shadow_root_level = PT32E_ROOT_LEVEL;
        context->root_hpa = INVALID_PAGE;
@@ -1040,7 +1213,7 @@ static int paging32E_init_context(struct kvm_vcpu *vcpu)
 static int init_kvm_mmu(struct kvm_vcpu *vcpu)
 {
        ASSERT(vcpu);
-       ASSERT(!VALID_PAGE(vcpu->mmu.root_hpa));
+       ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
 
        if (!is_paging(vcpu))
                return nonpaging_init_context(vcpu);
@@ -1055,9 +1228,9 @@ static int init_kvm_mmu(struct kvm_vcpu *vcpu)
 static void destroy_kvm_mmu(struct kvm_vcpu *vcpu)
 {
        ASSERT(vcpu);
-       if (VALID_PAGE(vcpu->mmu.root_hpa)) {
-               vcpu->mmu.free(vcpu);
-               vcpu->mmu.root_hpa = INVALID_PAGE;
+       if (VALID_PAGE(vcpu->arch.mmu.root_hpa)) {
+               vcpu->arch.mmu.free(vcpu);
+               vcpu->arch.mmu.root_hpa = INVALID_PAGE;
        }
 }
 
@@ -1066,20 +1239,21 @@ int kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
        destroy_kvm_mmu(vcpu);
        return init_kvm_mmu(vcpu);
 }
+EXPORT_SYMBOL_GPL(kvm_mmu_reset_context);
 
 int kvm_mmu_load(struct kvm_vcpu *vcpu)
 {
        int r;
 
-       spin_lock(&vcpu->kvm->lock);
+       mutex_lock(&vcpu->kvm->lock);
        r = mmu_topup_memory_caches(vcpu);
        if (r)
                goto out;
        mmu_alloc_roots(vcpu);
-       kvm_arch_ops->set_cr3(vcpu, vcpu->mmu.root_hpa);
+       kvm_x86_ops->set_cr3(vcpu, vcpu->arch.mmu.root_hpa);
        kvm_mmu_flush_tlb(vcpu);
 out:
-       spin_unlock(&vcpu->kvm->lock);
+       mutex_unlock(&vcpu->kvm->lock);
        return r;
 }
 EXPORT_SYMBOL_GPL(kvm_mmu_load);
@@ -1090,47 +1264,79 @@ void kvm_mmu_unload(struct kvm_vcpu *vcpu)
 }
 
 static void mmu_pte_write_zap_pte(struct kvm_vcpu *vcpu,
-                                 struct kvm_mmu_page *page,
+                                 struct kvm_mmu_page *sp,
                                  u64 *spte)
 {
        u64 pte;
        struct kvm_mmu_page *child;
 
        pte = *spte;
-       if (is_present_pte(pte)) {
-               if (page->role.level == PT_PAGE_TABLE_LEVEL)
-                       rmap_remove(spte);
+       if (is_shadow_present_pte(pte)) {
+               if (sp->role.level == PT_PAGE_TABLE_LEVEL)
+                       rmap_remove(vcpu->kvm, spte);
                else {
                        child = page_header(pte & PT64_BASE_ADDR_MASK);
                        mmu_page_remove_parent_pte(child, spte);
                }
        }
-       *spte = 0;
-       kvm_flush_remote_tlbs(vcpu->kvm);
+       set_shadow_pte(spte, shadow_trap_nonpresent_pte);
 }
 
 static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu,
-                                 struct kvm_mmu_page *page,
+                                 struct kvm_mmu_page *sp,
                                  u64 *spte,
-                                 const void *new, int bytes)
+                                 const void *new, int bytes,
+                                 int offset_in_pte)
 {
-       if (page->role.level != PT_PAGE_TABLE_LEVEL)
+       if (sp->role.level != PT_PAGE_TABLE_LEVEL) {
+               ++vcpu->kvm->stat.mmu_pde_zapped;
                return;
+       }
 
-       if (page->role.glevels == PT32_ROOT_LEVEL)
-               paging32_update_pte(vcpu, page, spte, new, bytes);
+       ++vcpu->kvm->stat.mmu_pte_updated;
+       if (sp->role.glevels == PT32_ROOT_LEVEL)
+               paging32_update_pte(vcpu, sp, spte, new, bytes, offset_in_pte);
        else
-               paging64_update_pte(vcpu, page, spte, new, bytes);
+               paging64_update_pte(vcpu, sp, spte, new, bytes, offset_in_pte);
+}
+
+static bool need_remote_flush(u64 old, u64 new)
+{
+       if (!is_shadow_present_pte(old))
+               return false;
+       if (!is_shadow_present_pte(new))
+               return true;
+       if ((old ^ new) & PT64_BASE_ADDR_MASK)
+               return true;
+       old ^= PT64_NX_MASK;
+       new ^= PT64_NX_MASK;
+       return (old & ~new & PT64_PERM_MASK) != 0;
+}
+
+static void mmu_pte_write_flush_tlb(struct kvm_vcpu *vcpu, u64 old, u64 new)
+{
+       if (need_remote_flush(old, new))
+               kvm_flush_remote_tlbs(vcpu->kvm);
+       else
+               kvm_mmu_flush_tlb(vcpu);
+}
+
+static bool last_updated_pte_accessed(struct kvm_vcpu *vcpu)
+{
+       u64 *spte = vcpu->arch.last_pte_updated;
+
+       return !!(spte && (*spte & PT_ACCESSED_MASK));
 }
 
 void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
-                      const u8 *old, const u8 *new, int bytes)
+                      const u8 *new, int bytes)
 {
        gfn_t gfn = gpa >> PAGE_SHIFT;
-       struct kvm_mmu_page *page;
+       struct kvm_mmu_page *sp;
        struct hlist_node *node, *n;
        struct hlist_head *bucket;
        unsigned index;
+       u64 entry;
        u64 *spte;
        unsigned offset = offset_in_page(gpa);
        unsigned pte_size;
@@ -1142,20 +1348,24 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
        int npte;
 
        pgprintk("%s: gpa %llx bytes %d\n", __FUNCTION__, gpa, bytes);
-       if (gfn == vcpu->last_pt_write_gfn) {
-               ++vcpu->last_pt_write_count;
-               if (vcpu->last_pt_write_count >= 3)
+       ++vcpu->kvm->stat.mmu_pte_write;
+       kvm_mmu_audit(vcpu, "pre pte write");
+       if (gfn == vcpu->arch.last_pt_write_gfn
+           && !last_updated_pte_accessed(vcpu)) {
+               ++vcpu->arch.last_pt_write_count;
+               if (vcpu->arch.last_pt_write_count >= 3)
                        flooded = 1;
        } else {
-               vcpu->last_pt_write_gfn = gfn;
-               vcpu->last_pt_write_count = 1;
+               vcpu->arch.last_pt_write_gfn = gfn;
+               vcpu->arch.last_pt_write_count = 1;
+               vcpu->arch.last_pte_updated = NULL;
        }
        index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
        bucket = &vcpu->kvm->mmu_page_hash[index];
-       hlist_for_each_entry_safe(page, node, n, bucket, hash_link) {
-               if (page->gfn != gfn || page->role.metaphysical)
+       hlist_for_each_entry_safe(sp, node, n, bucket, hash_link) {
+               if (sp->gfn != gfn || sp->role.metaphysical)
                        continue;
-               pte_size = page->role.glevels == PT32_ROOT_LEVEL ? 4 : 8;
+               pte_size = sp->role.glevels == PT32_ROOT_LEVEL ? 4 : 8;
                misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1);
                misaligned |= bytes < 4;
                if (misaligned || flooded) {
@@ -1170,14 +1380,15 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
                         * page.
                         */
                        pgprintk("misaligned: gpa %llx bytes %d role %x\n",
-                                gpa, bytes, page->role.word);
-                       kvm_mmu_zap_page(vcpu->kvm, page);
+                                gpa, bytes, sp->role.word);
+                       kvm_mmu_zap_page(vcpu->kvm, sp);
+                       ++vcpu->kvm->stat.mmu_flooded;
                        continue;
                }
                page_offset = offset;
-               level = page->role.level;
+               level = sp->role.level;
                npte = 1;
-               if (page->role.glevels == PT32_ROOT_LEVEL) {
+               if (sp->role.glevels == PT32_ROOT_LEVEL) {
                        page_offset <<= 1;      /* 32->64 */
                        /*
                         * A 32-bit pde maps 4MB while the shadow pdes map
@@ -1191,46 +1402,91 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
                        }
                        quadrant = page_offset >> PAGE_SHIFT;
                        page_offset &= ~PAGE_MASK;
-                       if (quadrant != page->role.quadrant)
+                       if (quadrant != sp->role.quadrant)
                                continue;
                }
-               spte = &page->spt[page_offset / sizeof(*spte)];
+               spte = &sp->spt[page_offset / sizeof(*spte)];
                while (npte--) {
-                       mmu_pte_write_zap_pte(vcpu, page, spte);
-                       mmu_pte_write_new_pte(vcpu, page, spte, new, bytes);
+                       entry = *spte;
+                       mmu_pte_write_zap_pte(vcpu, sp, spte);
+                       mmu_pte_write_new_pte(vcpu, sp, spte, new, bytes,
+                                             page_offset & (pte_size - 1));
+                       mmu_pte_write_flush_tlb(vcpu, entry, *spte);
                        ++spte;
                }
        }
+       kvm_mmu_audit(vcpu, "post pte write");
 }
 
 int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
 {
-       gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, gva);
+       gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva);
 
-       return kvm_mmu_unprotect_page(vcpu, gpa >> PAGE_SHIFT);
+       return kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);
 }
 
 void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
 {
        while (vcpu->kvm->n_free_mmu_pages < KVM_REFILL_PAGES) {
-               struct kvm_mmu_page *page;
+               struct kvm_mmu_page *sp;
+
+               sp = container_of(vcpu->kvm->active_mmu_pages.prev,
+                                 struct kvm_mmu_page, link);
+               kvm_mmu_zap_page(vcpu->kvm, sp);
+               ++vcpu->kvm->stat.mmu_recycled;
+       }
+}
+
+int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code)
+{
+       int r;
+       enum emulation_result er;
+
+       mutex_lock(&vcpu->kvm->lock);
+       r = vcpu->arch.mmu.page_fault(vcpu, cr2, error_code);
+       if (r < 0)
+               goto out;
 
-               page = container_of(vcpu->kvm->active_mmu_pages.prev,
-                                   struct kvm_mmu_page, link);
-               kvm_mmu_zap_page(vcpu->kvm, page);
+       if (!r) {
+               r = 1;
+               goto out;
+       }
+
+       r = mmu_topup_memory_caches(vcpu);
+       if (r)
+               goto out;
+
+       er = emulate_instruction(vcpu, vcpu->run, cr2, error_code, 0);
+       mutex_unlock(&vcpu->kvm->lock);
+
+       switch (er) {
+       case EMULATE_DONE:
+               return 1;
+       case EMULATE_DO_MMIO:
+               ++vcpu->stat.mmio_exits;
+               return 0;
+       case EMULATE_FAIL:
+               kvm_report_emulation_failure(vcpu, "pagetable");
+               return 1;
+       default:
+               BUG();
        }
+out:
+       mutex_unlock(&vcpu->kvm->lock);
+       return r;
 }
+EXPORT_SYMBOL_GPL(kvm_mmu_page_fault);
 
 static void free_mmu_pages(struct kvm_vcpu *vcpu)
 {
-       struct kvm_mmu_page *page;
+       struct kvm_mmu_page *sp;
 
        while (!list_empty(&vcpu->kvm->active_mmu_pages)) {
-               page = container_of(vcpu->kvm->active_mmu_pages.next,
-                                   struct kvm_mmu_page, link);
-               kvm_mmu_zap_page(vcpu->kvm, page);
+               sp = container_of(vcpu->kvm->active_mmu_pages.next,
+                                 struct kvm_mmu_page, link);
+               kvm_mmu_zap_page(vcpu->kvm, sp);
        }
-       free_page((unsigned long)vcpu->mmu.pae_root);
+       free_page((unsigned long)vcpu->arch.mmu.pae_root);
 }
 
 static int alloc_mmu_pages(struct kvm_vcpu *vcpu)
@@ -1240,8 +1496,10 @@ static int alloc_mmu_pages(struct kvm_vcpu *vcpu)
 
        ASSERT(vcpu);
 
-       vcpu->kvm->n_free_mmu_pages = KVM_NUM_MMU_PAGES;
-
+       if (vcpu->kvm->n_requested_mmu_pages)
+               vcpu->kvm->n_free_mmu_pages = vcpu->kvm->n_requested_mmu_pages;
+       else
+               vcpu->kvm->n_free_mmu_pages = vcpu->kvm->n_alloc_mmu_pages;
        /*
         * When emulating 32-bit mode, cr3 is only 32 bits even on x86_64.
         * Therefore we need to allocate shadow page tables in the first
@@ -1250,9 +1508,9 @@ static int alloc_mmu_pages(struct kvm_vcpu *vcpu)
        page = alloc_page(GFP_KERNEL | __GFP_DMA32);
        if (!page)
                goto error_1;
-       vcpu->mmu.pae_root = page_address(page);
+       vcpu->arch.mmu.pae_root = page_address(page);
        for (i = 0; i < 4; ++i)
-               vcpu->mmu.pae_root[i] = INVALID_PAGE;
+               vcpu->arch.mmu.pae_root[i] = INVALID_PAGE;
 
        return 0;
 
@@ -1264,7 +1522,7 @@ error_1:
 int kvm_mmu_create(struct kvm_vcpu *vcpu)
 {
        ASSERT(vcpu);
-       ASSERT(!VALID_PAGE(vcpu->mmu.root_hpa));
+       ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
 
        return alloc_mmu_pages(vcpu);
 }
@@ -1272,7 +1530,7 @@ int kvm_mmu_create(struct kvm_vcpu *vcpu)
 int kvm_mmu_setup(struct kvm_vcpu *vcpu)
 {
        ASSERT(vcpu);
-       ASSERT(!VALID_PAGE(vcpu->mmu.root_hpa));
+       ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
 
        return init_kvm_mmu(vcpu);
 }
@@ -1288,31 +1546,29 @@ void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
 
 void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
 {
-       struct kvm_mmu_page *page;
+       struct kvm_mmu_page *sp;
 
-       list_for_each_entry(page, &kvm->active_mmu_pages, link) {
+       list_for_each_entry(sp, &kvm->active_mmu_pages, link) {
                int i;
                u64 *pt;
 
-               if (!test_bit(slot, &page->slot_bitmap))
+               if (!test_bit(slot, &sp->slot_bitmap))
                        continue;
 
-               pt = page->spt;
+               pt = sp->spt;
                for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
                        /* avoid RMW */
-                       if (pt[i] & PT_WRITABLE_MASK) {
-                               rmap_remove(&pt[i]);
+                       if (pt[i] & PT_WRITABLE_MASK)
                                pt[i] &= ~PT_WRITABLE_MASK;
-                       }
        }
 }
 
 void kvm_mmu_zap_all(struct kvm *kvm)
 {
-       struct kvm_mmu_page *page, *node;
+       struct kvm_mmu_page *sp, *node;
 
-       list_for_each_entry_safe(page, node, &kvm->active_mmu_pages, link)
-               kvm_mmu_zap_page(kvm, page);
+       list_for_each_entry_safe(sp, node, &kvm->active_mmu_pages, link)
+               kvm_mmu_zap_page(kvm, sp);
 
        kvm_flush_remote_tlbs(kvm);
 }
@@ -1353,6 +1609,25 @@ nomem:
        return -ENOMEM;
 }
 
+/*
+ * Caculate mmu pages needed for kvm.
+ */
+unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm)
+{
+       int i;
+       unsigned int nr_mmu_pages;
+       unsigned int  nr_pages = 0;
+
+       for (i = 0; i < kvm->nmemslots; i++)
+               nr_pages += kvm->memslots[i].npages;
+
+       nr_mmu_pages = nr_pages * KVM_PERMILLE_MMU_PAGES / 1000;
+       nr_mmu_pages = max(nr_mmu_pages,
+                       (unsigned int) KVM_MIN_ALLOC_MMU_PAGES);
+
+       return nr_mmu_pages;
+}
+
 #ifdef AUDIT
 
 static const char *audit_msg;
@@ -1375,22 +1650,36 @@ static void audit_mappings_page(struct kvm_vcpu *vcpu, u64 page_pte,
        for (i = 0; i < PT64_ENT_PER_PAGE; ++i, va += va_delta) {
                u64 ent = pt[i];
 
-               if (!(ent & PT_PRESENT_MASK))
+               if (ent == shadow_trap_nonpresent_pte)
                        continue;
 
                va = canonicalize(va);
-               if (level > 1)
+               if (level > 1) {
+                       if (ent == shadow_notrap_nonpresent_pte)
+                               printk(KERN_ERR "audit: (%s) nontrapping pte"
+                                      " in nonleaf level: levels %d gva %lx"
+                                      " level %d pte %llx\n", audit_msg,
+                                      vcpu->arch.mmu.root_level, va, level, ent);
+
                        audit_mappings_page(vcpu, ent, va, level - 1);
-               else {
-                       gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, va);
-                       hpa_t hpa = gpa_to_hpa(vcpu, gpa);
+               } else {
+                       gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, va);
+                       struct page *page = gpa_to_page(vcpu, gpa);
+                       hpa_t hpa = page_to_phys(page);
 
-                       if ((ent & PT_PRESENT_MASK)
+                       if (is_shadow_present_pte(ent)
                            && (ent & PT64_BASE_ADDR_MASK) != hpa)
-                               printk(KERN_ERR "audit error: (%s) levels %d"
-                                      " gva %lx gpa %llx hpa %llx ent %llx\n",
-                                      audit_msg, vcpu->mmu.root_level,
-                                      va, gpa, hpa, ent);
+                               printk(KERN_ERR "xx audit error: (%s) levels %d"
+                                      " gva %lx gpa %llx hpa %llx ent %llx %d\n",
+                                      audit_msg, vcpu->arch.mmu.root_level,
+                                      va, gpa, hpa, ent,
+                                      is_shadow_present_pte(ent));
+                       else if (ent == shadow_notrap_nonpresent_pte
+                                && !is_error_hpa(hpa))
+                               printk(KERN_ERR "audit: (%s) notrap shadow,"
+                                      " valid guest gva %lx\n", audit_msg, va);
+                       kvm_release_page_clean(page);
+
                }
        }
 }
@@ -1399,13 +1688,13 @@ static void audit_mappings(struct kvm_vcpu *vcpu)
 {
        unsigned i;
 
-       if (vcpu->mmu.root_level == 4)
-               audit_mappings_page(vcpu, vcpu->mmu.root_hpa, 0, 4);
+       if (vcpu->arch.mmu.root_level == 4)
+               audit_mappings_page(vcpu, vcpu->arch.mmu.root_hpa, 0, 4);
        else
                for (i = 0; i < 4; ++i)
-                       if (vcpu->mmu.pae_root[i] & PT_PRESENT_MASK)
+                       if (vcpu->arch.mmu.pae_root[i] & PT_PRESENT_MASK)
                                audit_mappings_page(vcpu,
-                                                   vcpu->mmu.pae_root[i],
+                                                   vcpu->arch.mmu.pae_root[i],
                                                    i << 30,
                                                    2);
 }
@@ -1420,15 +1709,15 @@ static int count_rmaps(struct kvm_vcpu *vcpu)
                struct kvm_rmap_desc *d;
 
                for (j = 0; j < m->npages; ++j) {
-                       struct page *page = m->phys_mem[j];
+                       unsigned long *rmapp = &m->rmap[j];
 
-                       if (!page->private)
+                       if (!*rmapp)
                                continue;
-                       if (!(page->private & 1)) {
+                       if (!(*rmapp & 1)) {
                                ++nmaps;
                                continue;
                        }
-                       d = (struct kvm_rmap_desc *)(page->private & ~1ul);
+                       d = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
                        while (d) {
                                for (k = 0; k < RMAP_EXT; ++k)
                                        if (d->shadow_ptes[k])
@@ -1445,13 +1734,13 @@ static int count_rmaps(struct kvm_vcpu *vcpu)
 static int count_writable_mappings(struct kvm_vcpu *vcpu)
 {
        int nmaps = 0;
-       struct kvm_mmu_page *page;
+       struct kvm_mmu_page *sp;
        int i;
 
-       list_for_each_entry(page, &vcpu->kvm->active_mmu_pages, link) {
-               u64 *pt = page->spt;
+       list_for_each_entry(sp, &vcpu->kvm->active_mmu_pages, link) {
+               u64 *pt = sp->spt;
 
-               if (page->role.level != PT_PAGE_TABLE_LEVEL)
+               if (sp->role.level != PT_PAGE_TABLE_LEVEL)
                        continue;
 
                for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
@@ -1479,23 +1768,23 @@ static void audit_rmap(struct kvm_vcpu *vcpu)
 
 static void audit_write_protection(struct kvm_vcpu *vcpu)
 {
-       struct kvm_mmu_page *page;
-
-       list_for_each_entry(page, &vcpu->kvm->active_mmu_pages, link) {
-               hfn_t hfn;
-               struct page *pg;
+       struct kvm_mmu_page *sp;
+       struct kvm_memory_slot *slot;
+       unsigned long *rmapp;
+       gfn_t gfn;
 
-               if (page->role.metaphysical)
+       list_for_each_entry(sp, &vcpu->kvm->active_mmu_pages, link) {
+               if (sp->role.metaphysical)
                        continue;
 
-               hfn = gpa_to_hpa(vcpu, (gpa_t)page->gfn << PAGE_SHIFT)
-                       >> PAGE_SHIFT;
-               pg = pfn_to_page(hfn);
-               if (pg->private)
+               slot = gfn_to_memslot(vcpu->kvm, sp->gfn);
+               gfn = unalias_gfn(vcpu->kvm, sp->gfn);
+               rmapp = &slot->rmap[gfn - slot->base_gfn];
+               if (*rmapp)
                        printk(KERN_ERR "%s: (%s) shadow page has writable"
                               " mappings: gfn %lx role %x\n",
-                              __FUNCTION__, audit_msg, page->gfn,
-                              page->role.word);
+                              __FUNCTION__, audit_msg, sp->gfn,
+                              sp->role.word);
        }
 }