X-Git-Url: https://git.openpandora.org/cgi-bin/gitweb.cgi?p=pandora-kernel.git;a=blobdiff_plain;f=virt%2Fkvm%2Fkvm_main.c;h=d83aa5e700d599ae8184db5a68ca7998da748195;hp=d9cfb782cb81a87fc5ef4f1734df1a308a55539b;hb=7a99657f9dc3c00998e6313295832687b70ed52f;hpb=1583171492eb514e267c7b51e96846b38de0f544 diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index d9cfb782cb81..d83aa5e700d5 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -289,15 +289,15 @@ static void kvm_mmu_notifier_invalidate_page(struct mmu_notifier *mn, */ idx = srcu_read_lock(&kvm->srcu); spin_lock(&kvm->mmu_lock); + kvm->mmu_notifier_seq++; need_tlb_flush = kvm_unmap_hva(kvm, address) | kvm->tlbs_dirty; - spin_unlock(&kvm->mmu_lock); - srcu_read_unlock(&kvm->srcu, idx); - /* we've to flush the tlb before the pages can be freed */ if (need_tlb_flush) kvm_flush_remote_tlbs(kvm); + spin_unlock(&kvm->mmu_lock); + srcu_read_unlock(&kvm->srcu, idx); } static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn, @@ -335,12 +335,12 @@ static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn, for (; start < end; start += PAGE_SIZE) need_tlb_flush |= kvm_unmap_hva(kvm, start); need_tlb_flush |= kvm->tlbs_dirty; - spin_unlock(&kvm->mmu_lock); - srcu_read_unlock(&kvm->srcu, idx); - /* we've to flush the tlb before the pages can be freed */ if (need_tlb_flush) kvm_flush_remote_tlbs(kvm); + + spin_unlock(&kvm->mmu_lock); + srcu_read_unlock(&kvm->srcu, idx); } static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn, @@ -378,13 +378,14 @@ static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn, idx = srcu_read_lock(&kvm->srcu); spin_lock(&kvm->mmu_lock); - young = kvm_age_hva(kvm, address); - spin_unlock(&kvm->mmu_lock); - srcu_read_unlock(&kvm->srcu, idx); + young = kvm_age_hva(kvm, address); if (young) kvm_flush_remote_tlbs(kvm); + spin_unlock(&kvm->mmu_lock); + srcu_read_unlock(&kvm->srcu, idx); + return young; } @@ -773,7 +774,7 @@ skip_lpage: new.userspace_addr = mem->userspace_addr; #endif /* not defined CONFIG_S390 */ - if (!npages) { + if (!npages || base_gfn != old.base_gfn) { r = -ENOMEM; slots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL); if (!slots) @@ -787,8 +788,10 @@ skip_lpage: old_memslots = kvm->memslots; rcu_assign_pointer(kvm->memslots, slots); synchronize_srcu_expedited(&kvm->srcu); - /* From this point no new shadow pages pointing to a deleted - * memslot will be created. + /* slot was deleted or moved, clear iommu mapping */ + kvm_iommu_unmap_pages(kvm, &old); + /* From this point no new shadow pages pointing to a deleted, + * or moved, memslot will be created. * * validation of sp->gfn happens in: * - gfn_to_hva (kvm_read_guest, gfn_to_pfn) @@ -802,13 +805,6 @@ skip_lpage: if (r) goto out_free; - /* map the pages in iommu page table */ - if (npages) { - r = kvm_iommu_map_pages(kvm, &new); - if (r) - goto out_free; - } - r = -ENOMEM; slots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL); if (!slots) @@ -818,6 +814,13 @@ skip_lpage: slots->nmemslots = mem->slot + 1; slots->generation++; + /* map new memory slot into the iommu */ + if (npages) { + r = kvm_iommu_map_pages(kvm, &new); + if (r) + goto out_slots; + } + /* actual memory is freed via old in kvm_free_physmem_slot below */ if (!npages) { new.rmap = NULL; @@ -845,6 +848,8 @@ skip_lpage: return 0; +out_slots: + kfree(slots); out_free: kvm_free_physmem_slot(&new, &old); out: @@ -1399,21 +1404,38 @@ int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data, } int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc, - gpa_t gpa) + gpa_t gpa, unsigned long len) { struct kvm_memslots *slots = kvm_memslots(kvm); int offset = offset_in_page(gpa); - gfn_t gfn = gpa >> PAGE_SHIFT; + gfn_t start_gfn = gpa >> PAGE_SHIFT; + gfn_t end_gfn = (gpa + len - 1) >> PAGE_SHIFT; + gfn_t nr_pages_needed = end_gfn - start_gfn + 1; + gfn_t nr_pages_avail; ghc->gpa = gpa; ghc->generation = slots->generation; - ghc->memslot = __gfn_to_memslot(slots, gfn); - ghc->hva = gfn_to_hva_many(ghc->memslot, gfn, NULL); - if (!kvm_is_error_hva(ghc->hva)) + ghc->len = len; + ghc->memslot = __gfn_to_memslot(slots, start_gfn); + ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn, &nr_pages_avail); + if (!kvm_is_error_hva(ghc->hva) && nr_pages_avail >= nr_pages_needed) { ghc->hva += offset; - else - return -EFAULT; - + } else { + /* + * If the requested region crosses two memslots, we still + * verify that the entire region is valid here. + */ + while (start_gfn <= end_gfn) { + ghc->memslot = __gfn_to_memslot(slots, start_gfn); + ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn, + &nr_pages_avail); + if (kvm_is_error_hva(ghc->hva)) + return -EFAULT; + start_gfn += nr_pages_avail; + } + /* Use the slow path for cross page reads and writes. */ + ghc->memslot = NULL; + } return 0; } EXPORT_SYMBOL_GPL(kvm_gfn_to_hva_cache_init); @@ -1424,8 +1446,13 @@ int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, struct kvm_memslots *slots = kvm_memslots(kvm); int r; + BUG_ON(len > ghc->len); + if (slots->generation != ghc->generation) - kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa); + kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa, ghc->len); + + if (unlikely(!ghc->memslot)) + return kvm_write_guest(kvm, ghc->gpa, data, len); if (kvm_is_error_hva(ghc->hva)) return -EFAULT; @@ -1445,8 +1472,13 @@ int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, struct kvm_memslots *slots = kvm_memslots(kvm); int r; + BUG_ON(len > ghc->len); + if (slots->generation != ghc->generation) - kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa); + kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa, ghc->len); + + if (unlikely(!ghc->memslot)) + return kvm_read_guest(kvm, ghc->gpa, data, len); if (kvm_is_error_hva(ghc->hva)) return -EFAULT; @@ -1654,6 +1686,9 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id) int r; struct kvm_vcpu *vcpu, *v; + if (id >= KVM_MAX_VCPUS) + return -EINVAL; + vcpu = kvm_arch_vcpu_create(kvm, id); if (IS_ERR(vcpu)) return PTR_ERR(vcpu); @@ -1665,6 +1700,10 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id) goto vcpu_destroy; mutex_lock(&kvm->lock); + if (!kvm_vcpu_compatible(vcpu)) { + r = -EINVAL; + goto unlock_vcpu_destroy; + } if (atomic_read(&kvm->online_vcpus) == KVM_MAX_VCPUS) { r = -EINVAL; goto unlock_vcpu_destroy;