KVM: MMU: retry #PF for softmmu
authorXiao Guangrong <xiaoguangrong@cn.fujitsu.com>
Tue, 7 Dec 2010 02:35:25 +0000 (10:35 +0800)
committerAvi Kivity <avi@redhat.com>
Wed, 12 Jan 2011 09:30:41 +0000 (11:30 +0200)
Retry #PF for softmmu only when the current vcpu has the same cr3 as the time
when #PF occurs

Signed-off-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
arch/x86/include/asm/kvm_host.h
arch/x86/kvm/mmu.c
arch/x86/kvm/paging_tmpl.h
arch/x86/kvm/x86.c

index aa1518d..4461429 100644 (file)
@@ -593,6 +593,7 @@ struct kvm_x86_ops {
 struct kvm_arch_async_pf {
        u32 token;
        gfn_t gfn;
+       unsigned long cr3;
        bool direct_map;
 };
 
index d475b6b..abda57f 100644 (file)
@@ -2608,9 +2608,11 @@ static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
 static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn)
 {
        struct kvm_arch_async_pf arch;
+
        arch.token = (vcpu->arch.apf.id++ << 12) | vcpu->vcpu_id;
        arch.gfn = gfn;
        arch.direct_map = vcpu->arch.mmu.direct_map;
+       arch.cr3 = vcpu->arch.mmu.get_cr3(vcpu);
 
        return kvm_setup_async_pf(vcpu, gva, gfn, &arch);
 }
index 52b3e91..146b681 100644 (file)
@@ -438,7 +438,8 @@ static void FNAME(pte_prefetch)(struct kvm_vcpu *vcpu, struct guest_walker *gw,
 static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
                         struct guest_walker *gw,
                         int user_fault, int write_fault, int hlevel,
-                        int *ptwrite, pfn_t pfn, bool map_writable)
+                        int *ptwrite, pfn_t pfn, bool map_writable,
+                        bool prefault)
 {
        unsigned access = gw->pt_access;
        struct kvm_mmu_page *sp = NULL;
@@ -512,7 +513,7 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
 
        mmu_set_spte(vcpu, it.sptep, access, gw->pte_access & access,
                     user_fault, write_fault, dirty, ptwrite, it.level,
-                    gw->gfn, pfn, false, map_writable);
+                    gw->gfn, pfn, prefault, map_writable);
        FNAME(pte_prefetch)(vcpu, gw, it.sptep);
 
        return it.sptep;
@@ -568,8 +569,11 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
         */
        if (!r) {
                pgprintk("%s: guest page fault\n", __func__);
-               inject_page_fault(vcpu, &walker.fault);
-               vcpu->arch.last_pt_write_count = 0; /* reset fork detector */
+               if (!prefault) {
+                       inject_page_fault(vcpu, &walker.fault);
+                       /* reset fork detector */
+                       vcpu->arch.last_pt_write_count = 0;
+               }
                return 0;
        }
 
@@ -599,7 +603,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
        trace_kvm_mmu_audit(vcpu, AUDIT_PRE_PAGE_FAULT);
        kvm_mmu_free_some_pages(vcpu);
        sptep = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault,
-                            level, &write_pt, pfn, map_writable);
+                            level, &write_pt, pfn, map_writable, prefault);
        (void)sptep;
        pgprintk("%s: shadow pte %p %llx ptwrite %d\n", __func__,
                 sptep, *sptep, write_pt);
index 8b4d5fc..cd71d21 100644 (file)
@@ -6182,7 +6182,7 @@ void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work)
 {
        int r;
 
-       if (!vcpu->arch.mmu.direct_map || !work->arch.direct_map ||
+       if ((vcpu->arch.mmu.direct_map != work->arch.direct_map) ||
              is_error_page(work->page))
                return;
 
@@ -6190,6 +6190,10 @@ void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work)
        if (unlikely(r))
                return;
 
+       if (!vcpu->arch.mmu.direct_map &&
+             work->arch.cr3 != vcpu->arch.mmu.get_cr3(vcpu))
+               return;
+
        vcpu->arch.mmu.page_fault(vcpu, work->gva, 0, true);
 }