Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ericvh...
[pandora-kernel.git] / arch / x86 / kvm / paging_tmpl.h
index 81eab9a..89d66ca 100644 (file)
@@ -170,7 +170,7 @@ walk:
                        goto access_error;
 
 #if PTTYPE == 64
-               if (fetch_fault && is_nx(vcpu) && (pte & PT64_NX_MASK))
+               if (fetch_fault && (pte & PT64_NX_MASK))
                        goto access_error;
 #endif
 
@@ -190,10 +190,10 @@ walk:
 
                if ((walker->level == PT_PAGE_TABLE_LEVEL) ||
                    ((walker->level == PT_DIRECTORY_LEVEL) &&
-                               (pte & PT_PAGE_SIZE_MASK)  &&
+                               is_large_pte(pte) &&
                                (PTTYPE == 64 || is_pse(vcpu))) ||
                    ((walker->level == PT_PDPE_LEVEL) &&
-                               (pte & PT_PAGE_SIZE_MASK)  &&
+                               is_large_pte(pte) &&
                                is_long_mode(vcpu))) {
                        int lvl = walker->level;
 
@@ -258,11 +258,17 @@ static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page,
        pt_element_t gpte;
        unsigned pte_access;
        pfn_t pfn;
+       u64 new_spte;
 
        gpte = *(const pt_element_t *)pte;
        if (~gpte & (PT_PRESENT_MASK | PT_ACCESSED_MASK)) {
-               if (!is_present_gpte(gpte))
-                       __set_spte(spte, shadow_notrap_nonpresent_pte);
+               if (!is_present_gpte(gpte)) {
+                       if (page->unsync)
+                               new_spte = shadow_trap_nonpresent_pte;
+                       else
+                               new_spte = shadow_notrap_nonpresent_pte;
+                       __set_spte(spte, new_spte);
+               }
                return;
        }
        pgprintk("%s: gpte %llx spte %p\n", __func__, (u64)gpte, spte);
@@ -457,6 +463,7 @@ out_unlock:
 static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
 {
        struct kvm_shadow_walk_iterator iterator;
+       gpa_t pte_gpa = -1;
        int level;
        u64 *sptep;
        int need_flush = 0;
@@ -467,9 +474,16 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
                level = iterator.level;
                sptep = iterator.sptep;
 
-               if (level == PT_PAGE_TABLE_LEVEL  ||
-                   ((level == PT_DIRECTORY_LEVEL && is_large_pte(*sptep))) ||
-                   ((level == PT_PDPE_LEVEL && is_large_pte(*sptep)))) {
+               if (is_last_spte(*sptep, level)) {
+                       struct kvm_mmu_page *sp = page_header(__pa(sptep));
+                       int offset, shift;
+
+                       shift = PAGE_SHIFT -
+                                 (PT_LEVEL_BITS - PT64_LEVEL_BITS) * level;
+                       offset = sp->role.quadrant << shift;
+
+                       pte_gpa = (sp->gfn << PAGE_SHIFT) + offset;
+                       pte_gpa += (sptep - sp->spt) * sizeof(pt_element_t);
 
                        if (is_shadow_present_pte(*sptep)) {
                                rmap_remove(vcpu->kvm, sptep);
@@ -487,7 +501,17 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
 
        if (need_flush)
                kvm_flush_remote_tlbs(vcpu->kvm);
+
+       atomic_inc(&vcpu->kvm->arch.invlpg_counter);
+
        spin_unlock(&vcpu->kvm->mmu_lock);
+
+       if (pte_gpa == -1)
+               return;
+
+       if (mmu_topup_memory_caches(vcpu))
+               return;
+       kvm_mmu_pte_write(vcpu, pte_gpa, NULL, sizeof(pt_element_t), 0);
 }
 
 static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr, u32 access,
@@ -551,12 +575,15 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
 {
        int i, offset, nr_present;
        bool reset_host_protection;
+       gpa_t first_pte_gpa;
 
        offset = nr_present = 0;
 
        if (PTTYPE == 32)
                offset = sp->role.quadrant << PT64_LEVEL_BITS;
 
+       first_pte_gpa = gfn_to_gpa(sp->gfn) + offset * sizeof(pt_element_t);
+
        for (i = 0; i < PT64_ENT_PER_PAGE; i++) {
                unsigned pte_access;
                pt_element_t gpte;
@@ -566,8 +593,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
                if (!is_shadow_present_pte(sp->spt[i]))
                        continue;
 
-               pte_gpa = gfn_to_gpa(sp->gfn);
-               pte_gpa += (i+offset) * sizeof(pt_element_t);
+               pte_gpa = first_pte_gpa + i * sizeof(pt_element_t);
 
                if (kvm_read_guest_atomic(vcpu->kvm, pte_gpa, &gpte,
                                          sizeof(pt_element_t)))