KVM: mmu_notifier: Flush TLBs before releasing mmu_lock
authorTakuya Yoshikawa <yoshikawa.takuya@oss.ntt.co.jp>
Fri, 10 Feb 2012 06:28:31 +0000 (15:28 +0900)
committerBen Hutchings <ben@decadent.org.uk>
Wed, 30 May 2012 23:43:09 +0000 (00:43 +0100)
(cherry picked from commit 565f3be2174611f364405bbea2d86e153c2e7e78

Other threads may process the same page in that small window and skip
TLB flush and then return before these functions do flush.

Signed-off-by: Takuya Yoshikawa <yoshikawa.takuya@oss.ntt.co.jp>
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
virt/kvm/kvm_main.c

index e401c1b..9ffac2e 100644 (file)
@@ -289,15 +289,15 @@ static void kvm_mmu_notifier_invalidate_page(struct mmu_notifier *mn,
         */
        idx = srcu_read_lock(&kvm->srcu);
        spin_lock(&kvm->mmu_lock);
+
        kvm->mmu_notifier_seq++;
        need_tlb_flush = kvm_unmap_hva(kvm, address) | kvm->tlbs_dirty;
-       spin_unlock(&kvm->mmu_lock);
-       srcu_read_unlock(&kvm->srcu, idx);
-
        /* we've to flush the tlb before the pages can be freed */
        if (need_tlb_flush)
                kvm_flush_remote_tlbs(kvm);
 
+       spin_unlock(&kvm->mmu_lock);
+       srcu_read_unlock(&kvm->srcu, idx);
 }
 
 static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn,
@@ -335,12 +335,12 @@ static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
        for (; start < end; start += PAGE_SIZE)
                need_tlb_flush |= kvm_unmap_hva(kvm, start);
        need_tlb_flush |= kvm->tlbs_dirty;
-       spin_unlock(&kvm->mmu_lock);
-       srcu_read_unlock(&kvm->srcu, idx);
-
        /* we've to flush the tlb before the pages can be freed */
        if (need_tlb_flush)
                kvm_flush_remote_tlbs(kvm);
+
+       spin_unlock(&kvm->mmu_lock);
+       srcu_read_unlock(&kvm->srcu, idx);
 }
 
 static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn,
@@ -378,13 +378,14 @@ static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn,
 
        idx = srcu_read_lock(&kvm->srcu);
        spin_lock(&kvm->mmu_lock);
-       young = kvm_age_hva(kvm, address);
-       spin_unlock(&kvm->mmu_lock);
-       srcu_read_unlock(&kvm->srcu, idx);
 
+       young = kvm_age_hva(kvm, address);
        if (young)
                kvm_flush_remote_tlbs(kvm);
 
+       spin_unlock(&kvm->mmu_lock);
+       srcu_read_unlock(&kvm->srcu, idx);
+
        return young;
 }