KVM: fix searching async gfn in kvm_async_pf_gfn_slot
[pandora-kernel.git] / arch / x86 / kvm / x86.c
index fff70b5..ab10a6c 100644 (file)
@@ -3208,18 +3208,15 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
                struct kvm_memslots *slots, *old_slots;
                unsigned long *dirty_bitmap;
 
-               r = -ENOMEM;
-               dirty_bitmap = vmalloc(n);
-               if (!dirty_bitmap)
-                       goto out;
+               dirty_bitmap = memslot->dirty_bitmap_head;
+               if (memslot->dirty_bitmap == dirty_bitmap)
+                       dirty_bitmap += n / sizeof(long);
                memset(dirty_bitmap, 0, n);
 
                r = -ENOMEM;
                slots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
-               if (!slots) {
-                       vfree(dirty_bitmap);
+               if (!slots)
                        goto out;
-               }
                memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots));
                slots->memslots[log->slot].dirty_bitmap = dirty_bitmap;
                slots->generation++;
@@ -3235,11 +3232,8 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
                spin_unlock(&kvm->mmu_lock);
 
                r = -EFAULT;
-               if (copy_to_user(log->dirty_bitmap, dirty_bitmap, n)) {
-                       vfree(dirty_bitmap);
+               if (copy_to_user(log->dirty_bitmap, dirty_bitmap, n))
                        goto out;
-               }
-               vfree(dirty_bitmap);
        } else {
                r = -EFAULT;
                if (clear_user(log->dirty_bitmap, n))
@@ -4013,13 +4007,15 @@ int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu)
                return X86EMUL_CONTINUE;
 
        if (kvm_x86_ops->has_wbinvd_exit()) {
-               preempt_disable();
+               int cpu = get_cpu();
+
+               cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask);
                smp_call_function_many(vcpu->arch.wbinvd_dirty_mask,
                                wbinvd_ipi, NULL, 1);
-               preempt_enable();
+               put_cpu();
                cpumask_clear(vcpu->arch.wbinvd_dirty_mask);
-       }
-       wbinvd();
+       } else
+               wbinvd();
        return X86EMUL_CONTINUE;
 }
 EXPORT_SYMBOL_GPL(kvm_emulate_wbinvd);
@@ -4693,7 +4689,6 @@ int kvm_arch_init(void *opaque)
 
        kvm_x86_ops = ops;
        kvm_mmu_set_nonpresent_ptes(0ull, 0ull);
-       kvm_mmu_set_base_ptes(PT_PRESENT_MASK);
        kvm_mmu_set_mask_ptes(PT_USER_MASK, PT_ACCESSED_MASK,
                        PT_DIRTY_MASK, PT64_NX_MASK, 0);
 
@@ -6214,8 +6209,8 @@ static u32 kvm_async_pf_gfn_slot(struct kvm_vcpu *vcpu, gfn_t gfn)
        u32 key = kvm_async_pf_hash_fn(gfn);
 
        for (i = 0; i < roundup_pow_of_two(ASYNC_PF_PER_VCPU) &&
-                    (vcpu->arch.apf.gfns[key] != gfn ||
-                     vcpu->arch.apf.gfns[key] == ~0); i++)
+                    (vcpu->arch.apf.gfns[key] != gfn &&
+                     vcpu->arch.apf.gfns[key] != ~0); i++)
                key = kvm_async_pf_next_probe(key);
 
        return key;
@@ -6263,7 +6258,8 @@ void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
        kvm_add_async_pf_gfn(vcpu, work->arch.gfn);
 
        if (!(vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED) ||
-           kvm_x86_ops->get_cpl(vcpu) == 0)
+           (vcpu->arch.apf.send_user_only &&
+            kvm_x86_ops->get_cpl(vcpu) == 0))
                kvm_make_request(KVM_REQ_APF_HALT, vcpu);
        else if (!apf_put_user(vcpu, KVM_PV_REASON_PAGE_NOT_PRESENT)) {
                vcpu->arch.fault.error_code = 0;