KVM: fix searching async gfn in kvm_async_pf_gfn_slot
[pandora-kernel.git] / arch / x86 / kvm / x86.c
index 3cd4d09..ab10a6c 100644 (file)
@@ -783,12 +783,12 @@ EXPORT_SYMBOL_GPL(kvm_get_dr);
  * kvm-specific. Those are put in the beginning of the list.
  */
 
-#define KVM_SAVE_MSRS_BEGIN    7
+#define KVM_SAVE_MSRS_BEGIN    8
 static u32 msrs_to_save[] = {
        MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK,
        MSR_KVM_SYSTEM_TIME_NEW, MSR_KVM_WALL_CLOCK_NEW,
        HV_X64_MSR_GUEST_OS_ID, HV_X64_MSR_HYPERCALL,
-       HV_X64_MSR_APIC_ASSIST_PAGE,
+       HV_X64_MSR_APIC_ASSIST_PAGE, MSR_KVM_ASYNC_PF_EN,
        MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
        MSR_STAR,
 #ifdef CONFIG_X86_64
@@ -1425,6 +1425,30 @@ static int set_msr_hyperv(struct kvm_vcpu *vcpu, u32 msr, u64 data)
        return 0;
 }
 
+static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
+{
+       gpa_t gpa = data & ~0x3f;
+
+       /* Bits 2:5 are resrved, Should be zero */
+       if (data & 0x3c)
+               return 1;
+
+       vcpu->arch.apf.msr_val = data;
+
+       if (!(data & KVM_ASYNC_PF_ENABLED)) {
+               kvm_clear_async_pf_completion_queue(vcpu);
+               kvm_async_pf_hash_reset(vcpu);
+               return 0;
+       }
+
+       if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa))
+               return 1;
+
+       vcpu->arch.apf.send_user_only = !(data & KVM_ASYNC_PF_SEND_ALWAYS);
+       kvm_async_pf_wakeup_all(vcpu);
+       return 0;
+}
+
 int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
 {
        switch (msr) {
@@ -1506,6 +1530,10 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
                }
                break;
        }
+       case MSR_KVM_ASYNC_PF_EN:
+               if (kvm_pv_enable_async_pf(vcpu, data))
+                       return 1;
+               break;
        case MSR_IA32_MCG_CTL:
        case MSR_IA32_MCG_STATUS:
        case MSR_IA32_MC0_CTL ... MSR_IA32_MC0_CTL + 4 * KVM_MAX_MCE_BANKS - 1:
@@ -1782,6 +1810,9 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
        case MSR_KVM_SYSTEM_TIME_NEW:
                data = vcpu->arch.time;
                break;
+       case MSR_KVM_ASYNC_PF_EN:
+               data = vcpu->arch.apf.msr_val;
+               break;
        case MSR_IA32_P5_MC_ADDR:
        case MSR_IA32_P5_MC_TYPE:
        case MSR_IA32_MCG_CAP:
@@ -1929,6 +1960,7 @@ int kvm_dev_ioctl_check_extension(long ext)
        case KVM_CAP_DEBUGREGS:
        case KVM_CAP_X86_ROBUST_SINGLESTEP:
        case KVM_CAP_XSAVE:
+       case KVM_CAP_ASYNC_PF:
                r = 1;
                break;
        case KVM_CAP_COALESCED_MMIO:
@@ -3176,20 +3208,18 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
                struct kvm_memslots *slots, *old_slots;
                unsigned long *dirty_bitmap;
 
-               r = -ENOMEM;
-               dirty_bitmap = vmalloc(n);
-               if (!dirty_bitmap)
-                       goto out;
+               dirty_bitmap = memslot->dirty_bitmap_head;
+               if (memslot->dirty_bitmap == dirty_bitmap)
+                       dirty_bitmap += n / sizeof(long);
                memset(dirty_bitmap, 0, n);
 
                r = -ENOMEM;
                slots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
-               if (!slots) {
-                       vfree(dirty_bitmap);
+               if (!slots)
                        goto out;
-               }
                memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots));
                slots->memslots[log->slot].dirty_bitmap = dirty_bitmap;
+               slots->generation++;
 
                old_slots = kvm->memslots;
                rcu_assign_pointer(kvm->memslots, slots);
@@ -3202,11 +3232,8 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
                spin_unlock(&kvm->mmu_lock);
 
                r = -EFAULT;
-               if (copy_to_user(log->dirty_bitmap, dirty_bitmap, n)) {
-                       vfree(dirty_bitmap);
+               if (copy_to_user(log->dirty_bitmap, dirty_bitmap, n))
                        goto out;
-               }
-               vfree(dirty_bitmap);
        } else {
                r = -EFAULT;
                if (clear_user(log->dirty_bitmap, n))
@@ -3980,13 +4007,15 @@ int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu)
                return X86EMUL_CONTINUE;
 
        if (kvm_x86_ops->has_wbinvd_exit()) {
-               preempt_disable();
+               int cpu = get_cpu();
+
+               cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask);
                smp_call_function_many(vcpu->arch.wbinvd_dirty_mask,
                                wbinvd_ipi, NULL, 1);
-               preempt_enable();
+               put_cpu();
                cpumask_clear(vcpu->arch.wbinvd_dirty_mask);
-       }
-       wbinvd();
+       } else
+               wbinvd();
        return X86EMUL_CONTINUE;
 }
 EXPORT_SYMBOL_GPL(kvm_emulate_wbinvd);
@@ -4660,7 +4689,6 @@ int kvm_arch_init(void *opaque)
 
        kvm_x86_ops = ops;
        kvm_mmu_set_nonpresent_ptes(0ull, 0ull);
-       kvm_mmu_set_base_ptes(PT_PRESENT_MASK);
        kvm_mmu_set_mask_ptes(PT_USER_MASK, PT_ACCESSED_MASK,
                        PT_DIRTY_MASK, PT64_NX_MASK, 0);
 
@@ -5791,6 +5819,8 @@ free_vcpu:
 
 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
 {
+       vcpu->arch.apf.msr_val = 0;
+
        vcpu_load(vcpu);
        kvm_mmu_unload(vcpu);
        vcpu_put(vcpu);
@@ -5810,6 +5840,7 @@ int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu)
        vcpu->arch.dr7 = DR7_FIXED_1;
 
        kvm_make_request(KVM_REQ_EVENT, vcpu);
+       vcpu->arch.apf.msr_val = 0;
 
        kvm_clear_async_pf_completion_queue(vcpu);
        kvm_async_pf_hash_reset(vcpu);
@@ -6138,6 +6169,20 @@ void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
 }
 EXPORT_SYMBOL_GPL(kvm_set_rflags);
 
+void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work)
+{
+       int r;
+
+       if (!vcpu->arch.mmu.direct_map || is_error_page(work->page))
+               return;
+
+       r = kvm_mmu_reload(vcpu);
+       if (unlikely(r))
+               return;
+
+       vcpu->arch.mmu.page_fault(vcpu, work->gva, 0, true);
+}
+
 static inline u32 kvm_async_pf_hash_fn(gfn_t gfn)
 {
        return hash_32(gfn & 0xffffffff, order_base_2(ASYNC_PF_PER_VCPU));
@@ -6164,8 +6209,8 @@ static u32 kvm_async_pf_gfn_slot(struct kvm_vcpu *vcpu, gfn_t gfn)
        u32 key = kvm_async_pf_hash_fn(gfn);
 
        for (i = 0; i < roundup_pow_of_two(ASYNC_PF_PER_VCPU) &&
-                    (vcpu->arch.apf.gfns[key] != gfn ||
-                     vcpu->arch.apf.gfns[key] == ~0); i++)
+                    (vcpu->arch.apf.gfns[key] != gfn &&
+                     vcpu->arch.apf.gfns[key] != ~0); i++)
                key = kvm_async_pf_next_probe(key);
 
        return key;
@@ -6199,20 +6244,54 @@ static void kvm_del_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
        }
 }
 
+static int apf_put_user(struct kvm_vcpu *vcpu, u32 val)
+{
+
+       return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apf.data, &val,
+                                     sizeof(val));
+}
+
 void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
                                     struct kvm_async_pf *work)
 {
-       trace_kvm_async_pf_not_present(work->gva);
-
-       kvm_make_request(KVM_REQ_APF_HALT, vcpu);
+       trace_kvm_async_pf_not_present(work->arch.token, work->gva);
        kvm_add_async_pf_gfn(vcpu, work->arch.gfn);
+
+       if (!(vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED) ||
+           (vcpu->arch.apf.send_user_only &&
+            kvm_x86_ops->get_cpl(vcpu) == 0))
+               kvm_make_request(KVM_REQ_APF_HALT, vcpu);
+       else if (!apf_put_user(vcpu, KVM_PV_REASON_PAGE_NOT_PRESENT)) {
+               vcpu->arch.fault.error_code = 0;
+               vcpu->arch.fault.address = work->arch.token;
+               kvm_inject_page_fault(vcpu);
+       }
 }
 
 void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
                                 struct kvm_async_pf *work)
 {
-       trace_kvm_async_pf_ready(work->gva);
-       kvm_del_async_pf_gfn(vcpu, work->arch.gfn);
+       trace_kvm_async_pf_ready(work->arch.token, work->gva);
+       if (is_error_page(work->page))
+               work->arch.token = ~0; /* broadcast wakeup */
+       else
+               kvm_del_async_pf_gfn(vcpu, work->arch.gfn);
+
+       if ((vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED) &&
+           !apf_put_user(vcpu, KVM_PV_REASON_PAGE_READY)) {
+               vcpu->arch.fault.error_code = 0;
+               vcpu->arch.fault.address = work->arch.token;
+               kvm_inject_page_fault(vcpu);
+       }
+}
+
+bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
+{
+       if (!(vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED))
+               return true;
+       else
+               return !kvm_event_needs_reinjection(vcpu) &&
+                       kvm_x86_ops->interrupt_allowed(vcpu);
 }
 
 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_exit);