KVM: Portability: Introduce kvm_vcpu_arch
[pandora-kernel.git] / drivers / kvm / mmu.c
index 9b9d1b6..da1dedb 100644 (file)
@@ -85,7 +85,8 @@ static int dbg = 1;
 #define PT_PAGE_SIZE_MASK (1ULL << 7)
 #define PT_PAT_MASK (1ULL << 7)
 #define PT_GLOBAL_MASK (1ULL << 8)
-#define PT64_NX_MASK (1ULL << 63)
+#define PT64_NX_SHIFT 63
+#define PT64_NX_MASK (1ULL << PT64_NX_SHIFT)
 
 #define PT_PAT_SHIFT 7
 #define PT_DIR_PAT_SHIFT 12
@@ -153,6 +154,11 @@ static int dbg = 1;
 
 #define RMAP_EXT 4
 
+#define ACC_EXEC_MASK    1
+#define ACC_WRITE_MASK   PT_WRITABLE_MASK
+#define ACC_USER_MASK    PT_USER_MASK
+#define ACC_ALL          (ACC_EXEC_MASK | ACC_WRITE_MASK | ACC_USER_MASK)
+
 struct kvm_rmap_desc {
        u64 *shadow_ptes[RMAP_EXT];
        struct kvm_rmap_desc *more;
@@ -174,7 +180,7 @@ EXPORT_SYMBOL_GPL(kvm_mmu_set_nonpresent_ptes);
 
 static int is_write_protection(struct kvm_vcpu *vcpu)
 {
-       return vcpu->cr0 & X86_CR0_WP;
+       return vcpu->arch.cr0 & X86_CR0_WP;
 }
 
 static int is_cpuid_PSE36(void)
@@ -184,7 +190,7 @@ static int is_cpuid_PSE36(void)
 
 static int is_nx(struct kvm_vcpu *vcpu)
 {
-       return vcpu->shadow_efer & EFER_NX;
+       return vcpu->arch.shadow_efer & EFER_NX;
 }
 
 static int is_present_pte(unsigned long pte)
@@ -286,18 +292,18 @@ static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu)
        int r;
 
        kvm_mmu_free_some_pages(vcpu);
-       r = mmu_topup_memory_cache(&vcpu->mmu_pte_chain_cache,
+       r = mmu_topup_memory_cache(&vcpu->arch.mmu_pte_chain_cache,
                                   pte_chain_cache, 4);
        if (r)
                goto out;
-       r = mmu_topup_memory_cache(&vcpu->mmu_rmap_desc_cache,
+       r = mmu_topup_memory_cache(&vcpu->arch.mmu_rmap_desc_cache,
                                   rmap_desc_cache, 1);
        if (r)
                goto out;
-       r = mmu_topup_memory_cache_page(&vcpu->mmu_page_cache, 8);
+       r = mmu_topup_memory_cache_page(&vcpu->arch.mmu_page_cache, 8);
        if (r)
                goto out;
-       r = mmu_topup_memory_cache(&vcpu->mmu_page_header_cache,
+       r = mmu_topup_memory_cache(&vcpu->arch.mmu_page_header_cache,
                                   mmu_page_header_cache, 4);
 out:
        return r;
@@ -305,10 +311,10 @@ out:
 
 static void mmu_free_memory_caches(struct kvm_vcpu *vcpu)
 {
-       mmu_free_memory_cache(&vcpu->mmu_pte_chain_cache);
-       mmu_free_memory_cache(&vcpu->mmu_rmap_desc_cache);
-       mmu_free_memory_cache_page(&vcpu->mmu_page_cache);
-       mmu_free_memory_cache(&vcpu->mmu_page_header_cache);
+       mmu_free_memory_cache(&vcpu->arch.mmu_pte_chain_cache);
+       mmu_free_memory_cache(&vcpu->arch.mmu_rmap_desc_cache);
+       mmu_free_memory_cache_page(&vcpu->arch.mmu_page_cache);
+       mmu_free_memory_cache(&vcpu->arch.mmu_page_header_cache);
 }
 
 static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc,
@@ -324,7 +330,7 @@ static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc,
 
 static struct kvm_pte_chain *mmu_alloc_pte_chain(struct kvm_vcpu *vcpu)
 {
-       return mmu_memory_cache_alloc(&vcpu->mmu_pte_chain_cache,
+       return mmu_memory_cache_alloc(&vcpu->arch.mmu_pte_chain_cache,
                                      sizeof(struct kvm_pte_chain));
 }
 
@@ -335,7 +341,7 @@ static void mmu_free_pte_chain(struct kvm_pte_chain *pc)
 
 static struct kvm_rmap_desc *mmu_alloc_rmap_desc(struct kvm_vcpu *vcpu)
 {
-       return mmu_memory_cache_alloc(&vcpu->mmu_rmap_desc_cache,
+       return mmu_memory_cache_alloc(&vcpu->arch.mmu_rmap_desc_cache,
                                      sizeof(struct kvm_rmap_desc));
 }
 
@@ -562,9 +568,9 @@ static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu,
        if (!vcpu->kvm->n_free_mmu_pages)
                return NULL;
 
-       sp = mmu_memory_cache_alloc(&vcpu->mmu_page_header_cache, sizeof *sp);
-       sp->spt = mmu_memory_cache_alloc(&vcpu->mmu_page_cache, PAGE_SIZE);
-       sp->gfns = mmu_memory_cache_alloc(&vcpu->mmu_page_cache, PAGE_SIZE);
+       sp = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache, sizeof *sp);
+       sp->spt = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache, PAGE_SIZE);
+       sp->gfns = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache, PAGE_SIZE);
        set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
        list_add(&sp->link, &vcpu->kvm->active_mmu_pages);
        ASSERT(is_empty_shadow_page(sp->spt));
@@ -674,8 +680,9 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
                                             gva_t gaddr,
                                             unsigned level,
                                             int metaphysical,
-                                            unsigned hugepage_access,
-                                            u64 *parent_pte)
+                                            unsigned access,
+                                            u64 *parent_pte,
+                                            bool *new_page)
 {
        union kvm_mmu_page_role role;
        unsigned index;
@@ -685,11 +692,11 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
        struct hlist_node *node;
 
        role.word = 0;
-       role.glevels = vcpu->mmu.root_level;
+       role.glevels = vcpu->arch.mmu.root_level;
        role.level = level;
        role.metaphysical = metaphysical;
-       role.hugepage_access = hugepage_access;
-       if (vcpu->mmu.root_level <= PT32_ROOT_LEVEL) {
+       role.access = access;
+       if (vcpu->arch.mmu.root_level <= PT32_ROOT_LEVEL) {
                quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level));
                quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1;
                role.quadrant = quadrant;
@@ -711,9 +718,11 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
        sp->gfn = gfn;
        sp->role = role;
        hlist_add_head(&sp->hash_link, bucket);
-       vcpu->mmu.prefetch_page(vcpu, sp);
+       vcpu->arch.mmu.prefetch_page(vcpu, sp);
        if (!metaphysical)
                rmap_write_protect(vcpu->kvm, gfn);
+       if (new_page)
+               *new_page = 1;
        return sp;
 }
 
@@ -759,7 +768,7 @@ static void kvm_mmu_reset_last_pte_updated(struct kvm *kvm)
 
        for (i = 0; i < KVM_MAX_VCPUS; ++i)
                if (kvm->vcpus[i])
-                       kvm->vcpus[i]->last_pte_updated = NULL;
+                       kvm->vcpus[i]->arch.last_pte_updated = NULL;
 }
 
 static void kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp)
@@ -866,51 +875,117 @@ static void page_header_update_slot(struct kvm *kvm, void *pte, gfn_t gfn)
 
 struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva)
 {
-       gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, gva);
+       gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva);
 
        if (gpa == UNMAPPED_GVA)
                return NULL;
        return gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
 }
 
+static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
+                        unsigned pt_access, unsigned pte_access,
+                        int user_fault, int write_fault, int dirty,
+                        int *ptwrite, gfn_t gfn)
+{
+       u64 spte;
+       int was_rmapped = is_rmap_pte(*shadow_pte);
+       struct page *page;
+
+       pgprintk("%s: spte %llx access %x write_fault %d"
+                " user_fault %d gfn %lx\n",
+                __FUNCTION__, *shadow_pte, pt_access,
+                write_fault, user_fault, gfn);
+
+       /*
+        * We don't set the accessed bit, since we sometimes want to see
+        * whether the guest actually used the pte (in order to detect
+        * demand paging).
+        */
+       spte = PT_PRESENT_MASK | PT_DIRTY_MASK;
+       if (!dirty)
+               pte_access &= ~ACC_WRITE_MASK;
+       if (!(pte_access & ACC_EXEC_MASK))
+               spte |= PT64_NX_MASK;
+
+       page = gfn_to_page(vcpu->kvm, gfn);
+
+       spte |= PT_PRESENT_MASK;
+       if (pte_access & ACC_USER_MASK)
+               spte |= PT_USER_MASK;
+
+       if (is_error_page(page)) {
+               set_shadow_pte(shadow_pte,
+                              shadow_trap_nonpresent_pte | PT_SHADOW_IO_MARK);
+               kvm_release_page_clean(page);
+               return;
+       }
+
+       spte |= page_to_phys(page);
+
+       if ((pte_access & ACC_WRITE_MASK)
+           || (write_fault && !is_write_protection(vcpu) && !user_fault)) {
+               struct kvm_mmu_page *shadow;
+
+               spte |= PT_WRITABLE_MASK;
+               if (user_fault) {
+                       mmu_unshadow(vcpu->kvm, gfn);
+                       goto unshadowed;
+               }
+
+               shadow = kvm_mmu_lookup_page(vcpu->kvm, gfn);
+               if (shadow) {
+                       pgprintk("%s: found shadow page for %lx, marking ro\n",
+                                __FUNCTION__, gfn);
+                       pte_access &= ~ACC_WRITE_MASK;
+                       if (is_writeble_pte(spte)) {
+                               spte &= ~PT_WRITABLE_MASK;
+                               kvm_x86_ops->tlb_flush(vcpu);
+                       }
+                       if (write_fault)
+                               *ptwrite = 1;
+               }
+       }
+
+unshadowed:
+
+       if (pte_access & ACC_WRITE_MASK)
+               mark_page_dirty(vcpu->kvm, gfn);
+
+       pgprintk("%s: setting spte %llx\n", __FUNCTION__, spte);
+       set_shadow_pte(shadow_pte, spte);
+       page_header_update_slot(vcpu->kvm, shadow_pte, gfn);
+       if (!was_rmapped) {
+               rmap_add(vcpu, shadow_pte, gfn);
+               if (!is_rmap_pte(*shadow_pte))
+                       kvm_release_page_clean(page);
+       }
+       else
+               kvm_release_page_clean(page);
+       if (!ptwrite || !*ptwrite)
+               vcpu->arch.last_pte_updated = shadow_pte;
+}
+
 static void nonpaging_new_cr3(struct kvm_vcpu *vcpu)
 {
 }
 
-static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, struct page *page)
+static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn)
 {
        int level = PT32E_ROOT_LEVEL;
-       hpa_t table_addr = vcpu->mmu.root_hpa;
+       hpa_t table_addr = vcpu->arch.mmu.root_hpa;
+       int pt_write = 0;
 
        for (; ; level--) {
                u32 index = PT64_INDEX(v, level);
                u64 *table;
-               u64 pte;
 
                ASSERT(VALID_PAGE(table_addr));
                table = __va(table_addr);
 
                if (level == 1) {
-                       int was_rmapped;
-
-                       pte = table[index];
-                       was_rmapped = is_rmap_pte(pte);
-                       if (is_shadow_present_pte(pte) && is_writeble_pte(pte)) {
-                               kvm_release_page_clean(page);
-                               return 0;
-                       }
-                       mark_page_dirty(vcpu->kvm, v >> PAGE_SHIFT);
-                       page_header_update_slot(vcpu->kvm, table,
-                                               v >> PAGE_SHIFT);
-                       table[index] = page_to_phys(page)
-                               | PT_PRESENT_MASK | PT_WRITABLE_MASK
-                               | PT_USER_MASK;
-                       if (!was_rmapped)
-                               rmap_add(vcpu, &table[index], v >> PAGE_SHIFT);
-                       else
-                               kvm_release_page_clean(page);
-
-                       return 0;
+                       mmu_set_spte(vcpu, &table[index], ACC_ALL, ACC_ALL,
+                                    0, write, 1, &pt_write, gfn);
+                       return pt_write || is_io_pte(table[index]);
                }
 
                if (table[index] == shadow_trap_nonpresent_pte) {
@@ -921,10 +996,10 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, struct page *page)
                                >> PAGE_SHIFT;
                        new_table = kvm_mmu_get_page(vcpu, pseudo_gfn,
                                                     v, level - 1,
-                                                    1, 3, &table[index]);
+                                                    1, ACC_ALL, &table[index],
+                                                    NULL);
                        if (!new_table) {
                                pgprintk("nonpaging_map: ENOMEM\n");
-                               kvm_release_page_clean(page);
                                return -ENOMEM;
                        }
 
@@ -949,29 +1024,29 @@ static void mmu_free_roots(struct kvm_vcpu *vcpu)
        int i;
        struct kvm_mmu_page *sp;
 
-       if (!VALID_PAGE(vcpu->mmu.root_hpa))
+       if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
                return;
 #ifdef CONFIG_X86_64
-       if (vcpu->mmu.shadow_root_level == PT64_ROOT_LEVEL) {
-               hpa_t root = vcpu->mmu.root_hpa;
+       if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
+               hpa_t root = vcpu->arch.mmu.root_hpa;
 
                sp = page_header(root);
                --sp->root_count;
-               vcpu->mmu.root_hpa = INVALID_PAGE;
+               vcpu->arch.mmu.root_hpa = INVALID_PAGE;
                return;
        }
 #endif
        for (i = 0; i < 4; ++i) {
-               hpa_t root = vcpu->mmu.pae_root[i];
+               hpa_t root = vcpu->arch.mmu.pae_root[i];
 
                if (root) {
                        root &= PT64_BASE_ADDR_MASK;
                        sp = page_header(root);
                        --sp->root_count;
                }
-               vcpu->mmu.pae_root[i] = INVALID_PAGE;
+               vcpu->arch.mmu.pae_root[i] = INVALID_PAGE;
        }
-       vcpu->mmu.root_hpa = INVALID_PAGE;
+       vcpu->arch.mmu.root_hpa = INVALID_PAGE;
 }
 
 static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
@@ -980,41 +1055,41 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
        gfn_t root_gfn;
        struct kvm_mmu_page *sp;
 
-       root_gfn = vcpu->cr3 >> PAGE_SHIFT;
+       root_gfn = vcpu->arch.cr3 >> PAGE_SHIFT;
 
 #ifdef CONFIG_X86_64
-       if (vcpu->mmu.shadow_root_level == PT64_ROOT_LEVEL) {
-               hpa_t root = vcpu->mmu.root_hpa;
+       if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
+               hpa_t root = vcpu->arch.mmu.root_hpa;
 
                ASSERT(!VALID_PAGE(root));
                sp = kvm_mmu_get_page(vcpu, root_gfn, 0,
-                                     PT64_ROOT_LEVEL, 0, 0, NULL);
+                                     PT64_ROOT_LEVEL, 0, ACC_ALL, NULL, NULL);
                root = __pa(sp->spt);
                ++sp->root_count;
-               vcpu->mmu.root_hpa = root;
+               vcpu->arch.mmu.root_hpa = root;
                return;
        }
 #endif
        for (i = 0; i < 4; ++i) {
-               hpa_t root = vcpu->mmu.pae_root[i];
+               hpa_t root = vcpu->arch.mmu.pae_root[i];
 
                ASSERT(!VALID_PAGE(root));
-               if (vcpu->mmu.root_level == PT32E_ROOT_LEVEL) {
-                       if (!is_present_pte(vcpu->pdptrs[i])) {
-                               vcpu->mmu.pae_root[i] = 0;
+               if (vcpu->arch.mmu.root_level == PT32E_ROOT_LEVEL) {
+                       if (!is_present_pte(vcpu->arch.pdptrs[i])) {
+                               vcpu->arch.mmu.pae_root[i] = 0;
                                continue;
                        }
-                       root_gfn = vcpu->pdptrs[i] >> PAGE_SHIFT;
-               } else if (vcpu->mmu.root_level == 0)
+                       root_gfn = vcpu->arch.pdptrs[i] >> PAGE_SHIFT;
+               } else if (vcpu->arch.mmu.root_level == 0)
                        root_gfn = 0;
                sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30,
                                      PT32_ROOT_LEVEL, !is_paging(vcpu),
-                                     0, NULL);
+                                     ACC_ALL, NULL, NULL);
                root = __pa(sp->spt);
                ++sp->root_count;
-               vcpu->mmu.pae_root[i] = root | PT_PRESENT_MASK;
+               vcpu->arch.mmu.pae_root[i] = root | PT_PRESENT_MASK;
        }
-       vcpu->mmu.root_hpa = __pa(vcpu->mmu.pae_root);
+       vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.pae_root);
 }
 
 static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr)
@@ -1025,24 +1100,21 @@ static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr)
 static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
                                u32 error_code)
 {
-       struct page *page;
+       gfn_t gfn;
        int r;
 
+       pgprintk("%s: gva %lx error %x\n", __FUNCTION__, gva, error_code);
        r = mmu_topup_memory_caches(vcpu);
        if (r)
                return r;
 
        ASSERT(vcpu);
-       ASSERT(VALID_PAGE(vcpu->mmu.root_hpa));
+       ASSERT(VALID_PAGE(vcpu->arch.mmu.root_hpa));
 
-       page = gfn_to_page(vcpu->kvm, gva >> PAGE_SHIFT);
-
-       if (is_error_page(page)) {
-               kvm_release_page_clean(page);
-               return 1;
-       }
+       gfn = gva >> PAGE_SHIFT;
 
-       return nonpaging_map(vcpu, gva & PAGE_MASK, page);
+       return nonpaging_map(vcpu, gva & PAGE_MASK,
+                            error_code & PFERR_WRITE_MASK, gfn);
 }
 
 static void nonpaging_free(struct kvm_vcpu *vcpu)
@@ -1052,7 +1124,7 @@ static void nonpaging_free(struct kvm_vcpu *vcpu)
 
 static int nonpaging_init_context(struct kvm_vcpu *vcpu)
 {
-       struct kvm_mmu *context = &vcpu->mmu;
+       struct kvm_mmu *context = &vcpu->arch.mmu;
 
        context->new_cr3 = nonpaging_new_cr3;
        context->page_fault = nonpaging_page_fault;
@@ -1081,7 +1153,7 @@ static void inject_page_fault(struct kvm_vcpu *vcpu,
                              u64 addr,
                              u32 err_code)
 {
-       kvm_x86_ops->inject_page_fault(vcpu, addr, err_code);
+       kvm_inject_page_fault(vcpu, addr, err_code);
 }
 
 static void paging_free(struct kvm_vcpu *vcpu)
@@ -1099,7 +1171,7 @@ static void paging_free(struct kvm_vcpu *vcpu)
 
 static int paging64_init_context_common(struct kvm_vcpu *vcpu, int level)
 {
-       struct kvm_mmu *context = &vcpu->mmu;
+       struct kvm_mmu *context = &vcpu->arch.mmu;
 
        ASSERT(is_pae(vcpu));
        context->new_cr3 = paging_new_cr3;
@@ -1120,7 +1192,7 @@ static int paging64_init_context(struct kvm_vcpu *vcpu)
 
 static int paging32_init_context(struct kvm_vcpu *vcpu)
 {
-       struct kvm_mmu *context = &vcpu->mmu;
+       struct kvm_mmu *context = &vcpu->arch.mmu;
 
        context->new_cr3 = paging_new_cr3;
        context->page_fault = paging32_page_fault;
@@ -1141,7 +1213,7 @@ static int paging32E_init_context(struct kvm_vcpu *vcpu)
 static int init_kvm_mmu(struct kvm_vcpu *vcpu)
 {
        ASSERT(vcpu);
-       ASSERT(!VALID_PAGE(vcpu->mmu.root_hpa));
+       ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
 
        if (!is_paging(vcpu))
                return nonpaging_init_context(vcpu);
@@ -1156,9 +1228,9 @@ static int init_kvm_mmu(struct kvm_vcpu *vcpu)
 static void destroy_kvm_mmu(struct kvm_vcpu *vcpu)
 {
        ASSERT(vcpu);
-       if (VALID_PAGE(vcpu->mmu.root_hpa)) {
-               vcpu->mmu.free(vcpu);
-               vcpu->mmu.root_hpa = INVALID_PAGE;
+       if (VALID_PAGE(vcpu->arch.mmu.root_hpa)) {
+               vcpu->arch.mmu.free(vcpu);
+               vcpu->arch.mmu.root_hpa = INVALID_PAGE;
        }
 }
 
@@ -1178,7 +1250,7 @@ int kvm_mmu_load(struct kvm_vcpu *vcpu)
        if (r)
                goto out;
        mmu_alloc_roots(vcpu);
-       kvm_x86_ops->set_cr3(vcpu, vcpu->mmu.root_hpa);
+       kvm_x86_ops->set_cr3(vcpu, vcpu->arch.mmu.root_hpa);
        kvm_mmu_flush_tlb(vcpu);
 out:
        mutex_unlock(&vcpu->kvm->lock);
@@ -1251,7 +1323,7 @@ static void mmu_pte_write_flush_tlb(struct kvm_vcpu *vcpu, u64 old, u64 new)
 
 static bool last_updated_pte_accessed(struct kvm_vcpu *vcpu)
 {
-       u64 *spte = vcpu->last_pte_updated;
+       u64 *spte = vcpu->arch.last_pte_updated;
 
        return !!(spte && (*spte & PT_ACCESSED_MASK));
 }
@@ -1278,15 +1350,15 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
        pgprintk("%s: gpa %llx bytes %d\n", __FUNCTION__, gpa, bytes);
        ++vcpu->kvm->stat.mmu_pte_write;
        kvm_mmu_audit(vcpu, "pre pte write");
-       if (gfn == vcpu->last_pt_write_gfn
+       if (gfn == vcpu->arch.last_pt_write_gfn
            && !last_updated_pte_accessed(vcpu)) {
-               ++vcpu->last_pt_write_count;
-               if (vcpu->last_pt_write_count >= 3)
+               ++vcpu->arch.last_pt_write_count;
+               if (vcpu->arch.last_pt_write_count >= 3)
                        flooded = 1;
        } else {
-               vcpu->last_pt_write_gfn = gfn;
-               vcpu->last_pt_write_count = 1;
-               vcpu->last_pte_updated = NULL;
+               vcpu->arch.last_pt_write_gfn = gfn;
+               vcpu->arch.last_pt_write_count = 1;
+               vcpu->arch.last_pte_updated = NULL;
        }
        index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
        bucket = &vcpu->kvm->mmu_page_hash[index];
@@ -1348,7 +1420,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
 
 int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
 {
-       gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, gva);
+       gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva);
 
        return kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);
 }
@@ -1371,7 +1443,7 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code)
        enum emulation_result er;
 
        mutex_lock(&vcpu->kvm->lock);
-       r = vcpu->mmu.page_fault(vcpu, cr2, error_code);
+       r = vcpu->arch.mmu.page_fault(vcpu, cr2, error_code);
        if (r < 0)
                goto out;
 
@@ -1414,7 +1486,7 @@ static void free_mmu_pages(struct kvm_vcpu *vcpu)
                                  struct kvm_mmu_page, link);
                kvm_mmu_zap_page(vcpu->kvm, sp);
        }
-       free_page((unsigned long)vcpu->mmu.pae_root);
+       free_page((unsigned long)vcpu->arch.mmu.pae_root);
 }
 
 static int alloc_mmu_pages(struct kvm_vcpu *vcpu)
@@ -1436,9 +1508,9 @@ static int alloc_mmu_pages(struct kvm_vcpu *vcpu)
        page = alloc_page(GFP_KERNEL | __GFP_DMA32);
        if (!page)
                goto error_1;
-       vcpu->mmu.pae_root = page_address(page);
+       vcpu->arch.mmu.pae_root = page_address(page);
        for (i = 0; i < 4; ++i)
-               vcpu->mmu.pae_root[i] = INVALID_PAGE;
+               vcpu->arch.mmu.pae_root[i] = INVALID_PAGE;
 
        return 0;
 
@@ -1450,7 +1522,7 @@ error_1:
 int kvm_mmu_create(struct kvm_vcpu *vcpu)
 {
        ASSERT(vcpu);
-       ASSERT(!VALID_PAGE(vcpu->mmu.root_hpa));
+       ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
 
        return alloc_mmu_pages(vcpu);
 }
@@ -1458,7 +1530,7 @@ int kvm_mmu_create(struct kvm_vcpu *vcpu)
 int kvm_mmu_setup(struct kvm_vcpu *vcpu)
 {
        ASSERT(vcpu);
-       ASSERT(!VALID_PAGE(vcpu->mmu.root_hpa));
+       ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
 
        return init_kvm_mmu(vcpu);
 }
@@ -1587,11 +1659,11 @@ static void audit_mappings_page(struct kvm_vcpu *vcpu, u64 page_pte,
                                printk(KERN_ERR "audit: (%s) nontrapping pte"
                                       " in nonleaf level: levels %d gva %lx"
                                       " level %d pte %llx\n", audit_msg,
-                                      vcpu->mmu.root_level, va, level, ent);
+                                      vcpu->arch.mmu.root_level, va, level, ent);
 
                        audit_mappings_page(vcpu, ent, va, level - 1);
                } else {
-                       gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, va);
+                       gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, va);
                        struct page *page = gpa_to_page(vcpu, gpa);
                        hpa_t hpa = page_to_phys(page);
 
@@ -1599,7 +1671,7 @@ static void audit_mappings_page(struct kvm_vcpu *vcpu, u64 page_pte,
                            && (ent & PT64_BASE_ADDR_MASK) != hpa)
                                printk(KERN_ERR "xx audit error: (%s) levels %d"
                                       " gva %lx gpa %llx hpa %llx ent %llx %d\n",
-                                      audit_msg, vcpu->mmu.root_level,
+                                      audit_msg, vcpu->arch.mmu.root_level,
                                       va, gpa, hpa, ent,
                                       is_shadow_present_pte(ent));
                        else if (ent == shadow_notrap_nonpresent_pte
@@ -1616,13 +1688,13 @@ static void audit_mappings(struct kvm_vcpu *vcpu)
 {
        unsigned i;
 
-       if (vcpu->mmu.root_level == 4)
-               audit_mappings_page(vcpu, vcpu->mmu.root_hpa, 0, 4);
+       if (vcpu->arch.mmu.root_level == 4)
+               audit_mappings_page(vcpu, vcpu->arch.mmu.root_hpa, 0, 4);
        else
                for (i = 0; i < 4; ++i)
-                       if (vcpu->mmu.pae_root[i] & PT_PRESENT_MASK)
+                       if (vcpu->arch.mmu.pae_root[i] & PT_PRESENT_MASK)
                                audit_mappings_page(vcpu,
-                                                   vcpu->mmu.pae_root[i],
+                                                   vcpu->arch.mmu.pae_root[i],
                                                    i << 30,
                                                    2);
 }