KVM: x86: Fix wrong masking on relative jump/call
[pandora-kernel.git] / arch / x86 / kvm / x86.c
index f4063fd..2d7d0df 100644 (file)
@@ -92,6 +92,9 @@ EXPORT_SYMBOL_GPL(kvm_x86_ops);
 int ignore_msrs = 0;
 module_param_named(ignore_msrs, ignore_msrs, bool, S_IRUGO | S_IWUSR);
 
+unsigned int min_timer_period_us = 500;
+module_param(min_timer_period_us, uint, S_IRUGO | S_IWUSR);
+
 bool kvm_has_tsc_control;
 EXPORT_SYMBOL_GPL(kvm_has_tsc_control);
 u32  kvm_max_guest_tsc_khz;
@@ -551,8 +554,6 @@ int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
        if (index != XCR_XFEATURE_ENABLED_MASK)
                return 1;
        xcr0 = xcr;
-       if (kvm_x86_ops->get_cpl(vcpu) != 0)
-               return 1;
        if (!(xcr0 & XSTATE_FP))
                return 1;
        if ((xcr0 & XSTATE_YMM) && !(xcr0 & XSTATE_SSE))
@@ -566,7 +567,8 @@ int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
 
 int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
 {
-       if (__kvm_set_xcr(vcpu, index, xcr)) {
+       if (kvm_x86_ops->get_cpl(vcpu) != 0 ||
+           __kvm_set_xcr(vcpu, index, xcr)) {
                kvm_inject_gp(vcpu, 0);
                return 1;
        }
@@ -891,7 +893,6 @@ void kvm_enable_efer_bits(u64 mask)
 }
 EXPORT_SYMBOL_GPL(kvm_enable_efer_bits);
 
-
 /*
  * Writes msr value into into the appropriate "register".
  * Returns 0 on success, non-0 otherwise.
@@ -899,8 +900,34 @@ EXPORT_SYMBOL_GPL(kvm_enable_efer_bits);
  */
 int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
 {
+       switch (msr_index) {
+       case MSR_FS_BASE:
+       case MSR_GS_BASE:
+       case MSR_KERNEL_GS_BASE:
+       case MSR_CSTAR:
+       case MSR_LSTAR:
+               if (is_noncanonical_address(data))
+                       return 1;
+               break;
+       case MSR_IA32_SYSENTER_EIP:
+       case MSR_IA32_SYSENTER_ESP:
+               /*
+                * IA32_SYSENTER_ESP and IA32_SYSENTER_EIP cause #GP if
+                * non-canonical address is written on Intel but not on
+                * AMD (which ignores the top 32-bits, because it does
+                * not implement 64-bit SYSENTER).
+                *
+                * 64-bit code should hence be able to write a non-canonical
+                * value on AMD.  Making the address canonical ensures that
+                * vmentry does not fail on Intel after writing a non-canonical
+                * value, and that something deterministic happens if the guest
+                * invokes 64-bit SYSENTER.
+                */
+               data = get_canonical(data);
+       }
        return kvm_x86_ops->set_msr(vcpu, msr_index, data);
 }
+EXPORT_SYMBOL_GPL(kvm_set_msr);
 
 /*
  * Adapt set_msr() to msr_io()'s calling convention
@@ -1105,7 +1132,6 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
 {
        unsigned long flags;
        struct kvm_vcpu_arch *vcpu = &v->arch;
-       void *shared_kaddr;
        unsigned long this_tsc_khz;
        s64 kernel_ns, max_kernel_ns;
        u64 tsc_timestamp;
@@ -1141,7 +1167,7 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
 
        local_irq_restore(flags);
 
-       if (!vcpu->time_page)
+       if (!vcpu->pv_time_enabled)
                return 0;
 
        /*
@@ -1199,14 +1225,9 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
         */
        vcpu->hv_clock.version += 2;
 
-       shared_kaddr = kmap_atomic(vcpu->time_page, KM_USER0);
-
-       memcpy(shared_kaddr + vcpu->time_offset, &vcpu->hv_clock,
-              sizeof(vcpu->hv_clock));
-
-       kunmap_atomic(shared_kaddr, KM_USER0);
-
-       mark_page_dirty(v->kvm, vcpu->time >> PAGE_SHIFT);
+       kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
+                               &vcpu->hv_clock,
+                               sizeof(vcpu->hv_clock));
        return 0;
 }
 
@@ -1486,7 +1507,8 @@ static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
                return 0;
        }
 
-       if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa))
+       if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa,
+                                       sizeof(u32)))
                return 1;
 
        vcpu->arch.apf.send_user_only = !(data & KVM_ASYNC_PF_SEND_ALWAYS);
@@ -1496,10 +1518,7 @@ static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
 
 static void kvmclock_reset(struct kvm_vcpu *vcpu)
 {
-       if (vcpu->arch.time_page) {
-               kvm_release_page_dirty(vcpu->arch.time_page);
-               vcpu->arch.time_page = NULL;
-       }
+       vcpu->arch.pv_time_enabled = false;
 }
 
 static void accumulate_steal_time(struct kvm_vcpu *vcpu)
@@ -1591,6 +1610,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
                break;
        case MSR_KVM_SYSTEM_TIME_NEW:
        case MSR_KVM_SYSTEM_TIME: {
+               u64 gpa_offset;
                kvmclock_reset(vcpu);
 
                vcpu->arch.time = data;
@@ -1600,16 +1620,14 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
                if (!(data & 1))
                        break;
 
-               /* ...but clean it before doing the actual write */
-               vcpu->arch.time_offset = data & ~(PAGE_MASK | 1);
+               gpa_offset = data & ~(PAGE_MASK | 1);
 
-               vcpu->arch.time_page =
-                               gfn_to_page(vcpu->kvm, data >> PAGE_SHIFT);
-
-               if (is_error_page(vcpu->arch.time_page)) {
-                       kvm_release_page_clean(vcpu->arch.time_page);
-                       vcpu->arch.time_page = NULL;
-               }
+               if (kvm_gfn_to_hva_cache_init(vcpu->kvm,
+                    &vcpu->arch.pv_time, data & ~1ULL,
+                    sizeof(struct pvclock_vcpu_time_info)))
+                       vcpu->arch.pv_time_enabled = false;
+               else
+                       vcpu->arch.pv_time_enabled = true;
                break;
        }
        case MSR_KVM_ASYNC_PF_EN:
@@ -1625,7 +1643,8 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
                        return 1;
 
                if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.st.stime,
-                                                       data & KVM_STEAL_VALID_BITS))
+                                               data & KVM_STEAL_VALID_BITS,
+                                               sizeof(struct kvm_steal_time)))
                        return 1;
 
                vcpu->arch.st.msr_val = data;
@@ -3149,8 +3168,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
                r = -EFAULT;
                if (copy_from_user(&va, argp, sizeof va))
                        goto out;
-               r = 0;
-               kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr);
+               r = kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr);
                break;
        }
        case KVM_X86_SETUP_MCE: {
@@ -5546,33 +5564,6 @@ static void post_kvm_run_save(struct kvm_vcpu *vcpu)
                        !kvm_event_needs_reinjection(vcpu);
 }
 
-static void vapic_enter(struct kvm_vcpu *vcpu)
-{
-       struct kvm_lapic *apic = vcpu->arch.apic;
-       struct page *page;
-
-       if (!apic || !apic->vapic_addr)
-               return;
-
-       page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
-
-       vcpu->arch.apic->vapic_page = page;
-}
-
-static void vapic_exit(struct kvm_vcpu *vcpu)
-{
-       struct kvm_lapic *apic = vcpu->arch.apic;
-       int idx;
-
-       if (!apic || !apic->vapic_addr)
-               return;
-
-       idx = srcu_read_lock(&vcpu->kvm->srcu);
-       kvm_release_page_dirty(apic->vapic_page);
-       mark_page_dirty(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
-       srcu_read_unlock(&vcpu->kvm->srcu, idx);
-}
-
 static void update_cr8_intercept(struct kvm_vcpu *vcpu)
 {
        int max_irr, tpr;
@@ -5845,7 +5836,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
        }
 
        vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
-       vapic_enter(vcpu);
 
        r = 1;
        while (r > 0) {
@@ -5902,8 +5892,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
 
        srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
 
-       vapic_exit(vcpu);
-
        return r;
 }
 
@@ -6549,6 +6537,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
        if (!zalloc_cpumask_var(&vcpu->arch.wbinvd_dirty_mask, GFP_KERNEL))
                goto fail_free_mce_banks;
 
+       vcpu->arch.pv_time_enabled = false;
        kvm_async_pf_hash_reset(vcpu);
 
        return 0;