KVM: MMU: protect kvm_mmu_change_mmu_pages with mmu_lock
authorMarcelo Tosatti <mtosatti@redhat.com>
Thu, 6 Aug 2009 17:40:07 +0000 (14:40 -0300)
committerGreg Kroah-Hartman <gregkh@suse.de>
Wed, 9 Sep 2009 03:17:29 +0000 (20:17 -0700)
(cherry picked from commit 7c8a83b75a38a807d37f5a4398eca2a42c8cf513)

kvm_handle_hva, called by MMU notifiers, manipulates mmu data only with
the protection of mmu_lock.

Update kvm_mmu_change_mmu_pages callers to take mmu_lock, thus protecting
against kvm_handle_hva.

Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
arch/x86/kvm/mmu.c
arch/x86/kvm/x86.c

index ca65ef5..95c65e3 100644 (file)
@@ -2059,7 +2059,6 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
 {
        struct kvm_mmu_page *sp;
 
-       spin_lock(&kvm->mmu_lock);
        list_for_each_entry(sp, &kvm->arch.active_mmu_pages, link) {
                int i;
                u64 *pt;
@@ -2074,7 +2073,6 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
                                pt[i] &= ~PT_WRITABLE_MASK;
        }
        kvm_flush_remote_tlbs(kvm);
-       spin_unlock(&kvm->mmu_lock);
 }
 
 void kvm_mmu_zap_all(struct kvm *kvm)
index 9ead469..f7c7142 100644 (file)
@@ -1454,10 +1454,12 @@ static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm,
                return -EINVAL;
 
        down_write(&kvm->slots_lock);
+       spin_lock(&kvm->mmu_lock);
 
        kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages);
        kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages;
 
+       spin_unlock(&kvm->mmu_lock);
        up_write(&kvm->slots_lock);
        return 0;
 }
@@ -1624,7 +1626,9 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
 
        /* If nothing is dirty, don't bother messing with page tables. */
        if (is_dirty) {
+               spin_lock(&kvm->mmu_lock);
                kvm_mmu_slot_remove_write_access(kvm, log->slot);
+               spin_unlock(&kvm->mmu_lock);
                kvm_flush_remote_tlbs(kvm);
                memslot = &kvm->memslots[log->slot];
                n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
@@ -4059,12 +4063,14 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
                }
        }
 
+       spin_lock(&kvm->mmu_lock);
        if (!kvm->arch.n_requested_mmu_pages) {
                unsigned int nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm);
                kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages);
        }
 
        kvm_mmu_slot_remove_write_access(kvm, mem->slot);
+       spin_unlock(&kvm->mmu_lock);
        kvm_flush_remote_tlbs(kvm);
 
        return 0;