Merge branch 'kvm-updates/2.6.37' of git://git.kernel.org/pub/scm/virt/kvm/kvm
[pandora-kernel.git] / arch / x86 / kvm / x86.c
index 3101060..2288ad8 100644 (file)
@@ -6,7 +6,7 @@
  * Copyright (C) 2006 Qumranet, Inc.
  * Copyright (C) 2008 Qumranet, Inc.
  * Copyright IBM Corporation, 2008
- * Copyright 2010 Red Hat, Inc. and/or its affilates.
+ * Copyright 2010 Red Hat, Inc. and/or its affiliates.
  *
  * Authors:
  *   Avi Kivity   <avi@qumranet.com>
@@ -73,7 +73,7 @@
 #define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR)
 
 #define KVM_MAX_MCE_BANKS 32
-#define KVM_MCE_CAP_SUPPORTED MCG_CTL_P
+#define KVM_MCE_CAP_SUPPORTED (MCG_CTL_P | MCG_SER_P)
 
 /* EFER defaults:
  * - enable syscall per default because its emulated by KVM
@@ -284,6 +284,8 @@ static void kvm_multiple_exception(struct kvm_vcpu *vcpu,
        u32 prev_nr;
        int class1, class2;
 
+       kvm_make_request(KVM_REQ_EVENT, vcpu);
+
        if (!vcpu->arch.exception.pending) {
        queue:
                vcpu->arch.exception.pending = true;
@@ -340,22 +342,17 @@ void kvm_inject_page_fault(struct kvm_vcpu *vcpu)
 
 void kvm_propagate_fault(struct kvm_vcpu *vcpu)
 {
-       u32 nested, error;
-
-       error   = vcpu->arch.fault.error_code;
-       nested  = error &  PFERR_NESTED_MASK;
-       error   = error & ~PFERR_NESTED_MASK;
-
-       vcpu->arch.fault.error_code = error;
-
-       if (mmu_is_nested(vcpu) && !nested)
+       if (mmu_is_nested(vcpu) && !vcpu->arch.fault.nested)
                vcpu->arch.nested_mmu.inject_page_fault(vcpu);
        else
                vcpu->arch.mmu.inject_page_fault(vcpu);
+
+       vcpu->arch.fault.nested = false;
 }
 
 void kvm_inject_nmi(struct kvm_vcpu *vcpu)
 {
+       kvm_make_request(KVM_REQ_EVENT, vcpu);
        vcpu->arch.nmi_pending = 1;
 }
 EXPORT_SYMBOL_GPL(kvm_inject_nmi);
@@ -418,17 +415,17 @@ int kvm_read_nested_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn,
 /*
  * Load the pae pdptrs.  Return true is they are all valid.
  */
-int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3)
+int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3)
 {
        gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT;
        unsigned offset = ((cr3 & (PAGE_SIZE-1)) >> 5) << 2;
        int i;
        int ret;
-       u64 pdpte[ARRAY_SIZE(vcpu->arch.pdptrs)];
+       u64 pdpte[ARRAY_SIZE(mmu->pdptrs)];
 
-       ret = kvm_read_nested_guest_page(vcpu, pdpt_gfn, pdpte,
-                                        offset * sizeof(u64), sizeof(pdpte),
-                                        PFERR_USER_MASK|PFERR_WRITE_MASK);
+       ret = kvm_read_guest_page_mmu(vcpu, mmu, pdpt_gfn, pdpte,
+                                     offset * sizeof(u64), sizeof(pdpte),
+                                     PFERR_USER_MASK|PFERR_WRITE_MASK);
        if (ret < 0) {
                ret = 0;
                goto out;
@@ -442,7 +439,7 @@ int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3)
        }
        ret = 1;
 
-       memcpy(vcpu->arch.pdptrs, pdpte, sizeof(vcpu->arch.pdptrs));
+       memcpy(mmu->pdptrs, pdpte, sizeof(mmu->pdptrs));
        __set_bit(VCPU_EXREG_PDPTR,
                  (unsigned long *)&vcpu->arch.regs_avail);
        __set_bit(VCPU_EXREG_PDPTR,
@@ -455,7 +452,7 @@ EXPORT_SYMBOL_GPL(load_pdptrs);
 
 static bool pdptrs_changed(struct kvm_vcpu *vcpu)
 {
-       u64 pdpte[ARRAY_SIZE(vcpu->arch.pdptrs)];
+       u64 pdpte[ARRAY_SIZE(vcpu->arch.walk_mmu->pdptrs)];
        bool changed = true;
        int offset;
        gfn_t gfn;
@@ -474,7 +471,7 @@ static bool pdptrs_changed(struct kvm_vcpu *vcpu)
                                       PFERR_USER_MASK | PFERR_WRITE_MASK);
        if (r < 0)
                goto out;
-       changed = memcmp(pdpte, vcpu->arch.pdptrs, sizeof(pdpte)) != 0;
+       changed = memcmp(pdpte, vcpu->arch.walk_mmu->pdptrs, sizeof(pdpte)) != 0;
 out:
 
        return changed;
@@ -513,7 +510,8 @@ int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
                                return 1;
                } else
 #endif
-               if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.cr3))
+               if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.walk_mmu,
+                                                vcpu->arch.cr3))
                        return 1;
        }
 
@@ -602,7 +600,7 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
                        return 1;
        } else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE)
                   && ((cr4 ^ old_cr4) & pdptr_bits)
-                  && !load_pdptrs(vcpu, vcpu->arch.cr3))
+                  && !load_pdptrs(vcpu, vcpu->arch.walk_mmu, vcpu->arch.cr3))
                return 1;
 
        if (cr4 & X86_CR4_VMXE)
@@ -635,7 +633,8 @@ int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
                if (is_pae(vcpu)) {
                        if (cr3 & CR3_PAE_RESERVED_BITS)
                                return 1;
-                       if (is_paging(vcpu) && !load_pdptrs(vcpu, cr3))
+                       if (is_paging(vcpu) &&
+                           !load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3))
                                return 1;
                }
                /*
@@ -893,7 +892,7 @@ static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock)
 
        /*
         * The guest calculates current wall clock time by adding
-        * system time (updated by kvm_write_guest_time below) to the
+        * system time (updated by kvm_guest_time_update below) to the
         * wall clock specified here.  guest system time equals host
         * system time for us, thus we must fill in host boot time here.
         */
@@ -921,31 +920,35 @@ static uint32_t div_frac(uint32_t dividend, uint32_t divisor)
        return quotient;
 }
 
-static void kvm_set_time_scale(uint32_t tsc_khz, struct pvclock_vcpu_time_info *hv_clock)
+static void kvm_get_time_scale(uint32_t scaled_khz, uint32_t base_khz,
+                              s8 *pshift, u32 *pmultiplier)
 {
-       uint64_t nsecs = 1000000000LL;
+       uint64_t scaled64;
        int32_t  shift = 0;
        uint64_t tps64;
        uint32_t tps32;
 
-       tps64 = tsc_khz * 1000LL;
-       while (tps64 > nsecs*2) {
+       tps64 = base_khz * 1000LL;
+       scaled64 = scaled_khz * 1000LL;
+       while (tps64 > scaled64*2 || tps64 & 0xffffffff00000000ULL) {
                tps64 >>= 1;
                shift--;
        }
 
        tps32 = (uint32_t)tps64;
-       while (tps32 <= (uint32_t)nsecs) {
-               tps32 <<= 1;
+       while (tps32 <= scaled64 || scaled64 & 0xffffffff00000000ULL) {
+               if (scaled64 & 0xffffffff00000000ULL || tps32 & 0x80000000)
+                       scaled64 >>= 1;
+               else
+                       tps32 <<= 1;
                shift++;
        }
 
-       hv_clock->tsc_shift = shift;
-       hv_clock->tsc_to_system_mul = div_frac(nsecs, tps32);
+       *pshift = shift;
+       *pmultiplier = div_frac(scaled64, tps32);
 
-       pr_debug("%s: tsc_khz %u, tsc_shift %d, tsc_mul %u\n",
-                __func__, tsc_khz, hv_clock->tsc_shift,
-                hv_clock->tsc_to_system_mul);
+       pr_debug("%s: base_khz %u => %u, shift %d, mul %u\n",
+                __func__, base_khz, scaled_khz, shift, *pmultiplier);
 }
 
 static inline u64 get_kernel_ns(void)
@@ -959,6 +962,7 @@ static inline u64 get_kernel_ns(void)
 }
 
 static DEFINE_PER_CPU(unsigned long, cpu_tsc_khz);
+unsigned long max_tsc_khz;
 
 static inline int kvm_tsc_changes_freq(void)
 {
@@ -982,6 +986,24 @@ static inline u64 nsec_to_cycles(u64 nsec)
        return ret;
 }
 
+static void kvm_arch_set_tsc_khz(struct kvm *kvm, u32 this_tsc_khz)
+{
+       /* Compute a scale to convert nanoseconds in TSC cycles */
+       kvm_get_time_scale(this_tsc_khz, NSEC_PER_SEC / 1000,
+                          &kvm->arch.virtual_tsc_shift,
+                          &kvm->arch.virtual_tsc_mult);
+       kvm->arch.virtual_tsc_khz = this_tsc_khz;
+}
+
+static u64 compute_guest_tsc(struct kvm_vcpu *vcpu, s64 kernel_ns)
+{
+       u64 tsc = pvclock_scale_delta(kernel_ns-vcpu->arch.last_tsc_nsec,
+                                     vcpu->kvm->arch.virtual_tsc_mult,
+                                     vcpu->kvm->arch.virtual_tsc_shift);
+       tsc += vcpu->arch.last_tsc_write;
+       return tsc;
+}
+
 void kvm_write_tsc(struct kvm_vcpu *vcpu, u64 data)
 {
        struct kvm *kvm = vcpu->kvm;
@@ -1026,10 +1048,12 @@ void kvm_write_tsc(struct kvm_vcpu *vcpu, u64 data)
 
        /* Reset of TSC must disable overshoot protection below */
        vcpu->arch.hv_clock.tsc_timestamp = 0;
+       vcpu->arch.last_tsc_write = data;
+       vcpu->arch.last_tsc_nsec = ns;
 }
 EXPORT_SYMBOL_GPL(kvm_write_tsc);
 
-static int kvm_write_guest_time(struct kvm_vcpu *v)
+static int kvm_guest_time_update(struct kvm_vcpu *v)
 {
        unsigned long flags;
        struct kvm_vcpu_arch *vcpu = &v->arch;
@@ -1038,21 +1062,41 @@ static int kvm_write_guest_time(struct kvm_vcpu *v)
        s64 kernel_ns, max_kernel_ns;
        u64 tsc_timestamp;
 
-       if ((!vcpu->time_page))
-               return 0;
-
        /* Keep irq disabled to prevent changes to the clock */
        local_irq_save(flags);
        kvm_get_msr(v, MSR_IA32_TSC, &tsc_timestamp);
        kernel_ns = get_kernel_ns();
        this_tsc_khz = __get_cpu_var(cpu_tsc_khz);
-       local_irq_restore(flags);
 
        if (unlikely(this_tsc_khz == 0)) {
-               kvm_make_request(KVM_REQ_KVMCLOCK_UPDATE, v);
+               local_irq_restore(flags);
+               kvm_make_request(KVM_REQ_CLOCK_UPDATE, v);
                return 1;
        }
 
+       /*
+        * We may have to catch up the TSC to match elapsed wall clock
+        * time for two reasons, even if kvmclock is used.
+        *   1) CPU could have been running below the maximum TSC rate
+        *   2) Broken TSC compensation resets the base at each VCPU
+        *      entry to avoid unknown leaps of TSC even when running
+        *      again on the same CPU.  This may cause apparent elapsed
+        *      time to disappear, and the guest to stand still or run
+        *      very slowly.
+        */
+       if (vcpu->tsc_catchup) {
+               u64 tsc = compute_guest_tsc(v, kernel_ns);
+               if (tsc > tsc_timestamp) {
+                       kvm_x86_ops->adjust_tsc_offset(v, tsc - tsc_timestamp);
+                       tsc_timestamp = tsc;
+               }
+       }
+
+       local_irq_restore(flags);
+
+       if (!vcpu->time_page)
+               return 0;
+
        /*
         * Time as measured by the TSC may go backwards when resetting the base
         * tsc_timestamp.  The reason for this is that the TSC resolution is
@@ -1085,7 +1129,9 @@ static int kvm_write_guest_time(struct kvm_vcpu *v)
        }
 
        if (unlikely(vcpu->hw_tsc_khz != this_tsc_khz)) {
-               kvm_set_time_scale(this_tsc_khz, &vcpu->hv_clock);
+               kvm_get_time_scale(NSEC_PER_SEC / 1000, this_tsc_khz,
+                                  &vcpu->hv_clock.tsc_shift,
+                                  &vcpu->hv_clock.tsc_to_system_mul);
                vcpu->hw_tsc_khz = this_tsc_khz;
        }
 
@@ -1096,6 +1142,7 @@ static int kvm_write_guest_time(struct kvm_vcpu *v)
        vcpu->hv_clock.tsc_timestamp = tsc_timestamp;
        vcpu->hv_clock.system_time = kernel_ns + v->kvm->arch.kvmclock_offset;
        vcpu->last_kernel_ns = kernel_ns;
+       vcpu->last_guest_tsc = tsc_timestamp;
        vcpu->hv_clock.flags = 0;
 
        /*
@@ -1116,16 +1163,6 @@ static int kvm_write_guest_time(struct kvm_vcpu *v)
        return 0;
 }
 
-static int kvm_request_guest_time_update(struct kvm_vcpu *v)
-{
-       struct kvm_vcpu_arch *vcpu = &v->arch;
-
-       if (!vcpu->time_page)
-               return 0;
-       kvm_make_request(KVM_REQ_KVMCLOCK_UPDATE, v);
-       return 1;
-}
-
 static bool msr_mtrr_valid(unsigned msr)
 {
        switch (msr) {
@@ -1449,6 +1486,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
                }
 
                vcpu->arch.time = data;
+               kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
 
                /* we verify if the enable bit is set... */
                if (!(data & 1))
@@ -1464,8 +1502,6 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
                        kvm_release_page_clean(vcpu->arch.time_page);
                        vcpu->arch.time_page = NULL;
                }
-
-               kvm_request_guest_time_update(vcpu);
                break;
        }
        case MSR_IA32_MCG_CTL:
@@ -2022,9 +2058,13 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
                                native_read_tsc() - vcpu->arch.last_host_tsc;
                if (tsc_delta < 0)
                        mark_tsc_unstable("KVM discovered backwards TSC");
-               if (check_tsc_unstable())
+               if (check_tsc_unstable()) {
                        kvm_x86_ops->adjust_tsc_offset(vcpu, -tsc_delta);
-               kvm_migrate_timers(vcpu);
+                       vcpu->arch.tsc_catchup = 1;
+                       kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
+               }
+               if (vcpu->cpu != cpu)
+                       kvm_migrate_timers(vcpu);
                vcpu->cpu = cpu;
        }
 }
@@ -2204,13 +2244,14 @@ static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
                0 /* Reserved */ | F(CX16) | 0 /* xTPR Update, PDCM */ |
                0 /* Reserved, DCA */ | F(XMM4_1) |
                F(XMM4_2) | F(X2APIC) | F(MOVBE) | F(POPCNT) |
-               0 /* Reserved, AES */ | F(XSAVE) | 0 /* OSXSAVE */ | F(AVX);
+               0 /* Reserved*/ | F(AES) | F(XSAVE) | 0 /* OSXSAVE */ | F(AVX) |
+               F(F16C);
        /* cpuid 0x80000001.ecx */
        const u32 kvm_supported_word6_x86_features =
-               F(LAHF_LM) | F(CMP_LEGACY) | F(SVM) | 0 /* ExtApicSpace */ |
+               F(LAHF_LM) | F(CMP_LEGACY) | 0 /*SVM*/ | 0 /* ExtApicSpace */ |
                F(CR8_LEGACY) | F(ABM) | F(SSE4A) | F(MISALIGNSSE) |
-               F(3DNOWPREFETCH) | 0 /* OSVW */ | 0 /* IBS */ | F(SSE5) |
-               0 /* SKINIT */ | 0 /* WDT */;
+               F(3DNOWPREFETCH) | 0 /* OSVW */ | 0 /* IBS */ | F(XOP) |
+               0 /* SKINIT, WDT, LWP */ | F(FMA4) | F(TBM);
 
        /* all calls to cpuid_count() should be made on the same cpu */
        get_cpu();
@@ -2416,6 +2457,7 @@ static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
                return -ENXIO;
 
        kvm_queue_interrupt(vcpu, irq->irq, false);
+       kvm_make_request(KVM_REQ_EVENT, vcpu);
 
        return 0;
 }
@@ -2569,6 +2611,8 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
        if (events->flags & KVM_VCPUEVENT_VALID_SIPI_VECTOR)
                vcpu->arch.sipi_vector = events->sipi_vector;
 
+       kvm_make_request(KVM_REQ_EVENT, vcpu);
+
        return 0;
 }
 
@@ -3008,18 +3052,18 @@ static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
        r = 0;
        switch (chip->chip_id) {
        case KVM_IRQCHIP_PIC_MASTER:
-               raw_spin_lock(&pic_irqchip(kvm)->lock);
+               spin_lock(&pic_irqchip(kvm)->lock);
                memcpy(&pic_irqchip(kvm)->pics[0],
                        &chip->chip.pic,
                        sizeof(struct kvm_pic_state));
-               raw_spin_unlock(&pic_irqchip(kvm)->lock);
+               spin_unlock(&pic_irqchip(kvm)->lock);
                break;
        case KVM_IRQCHIP_PIC_SLAVE:
-               raw_spin_lock(&pic_irqchip(kvm)->lock);
+               spin_lock(&pic_irqchip(kvm)->lock);
                memcpy(&pic_irqchip(kvm)->pics[1],
                        &chip->chip.pic,
                        sizeof(struct kvm_pic_state));
-               raw_spin_unlock(&pic_irqchip(kvm)->lock);
+               spin_unlock(&pic_irqchip(kvm)->lock);
                break;
        case KVM_IRQCHIP_IOAPIC:
                r = kvm_set_ioapic(kvm, &chip->chip.ioapic);
@@ -3426,8 +3470,10 @@ long kvm_arch_vm_ioctl(struct file *filp,
                        goto out;
 
                r = 0;
+               local_irq_disable();
                now_ns = get_kernel_ns();
                delta = user_ns.clock - now_ns;
+               local_irq_enable();
                kvm->arch.kvmclock_offset = delta;
                break;
        }
@@ -3435,8 +3481,10 @@ long kvm_arch_vm_ioctl(struct file *filp,
                struct kvm_clock_data user_ns;
                u64 now_ns;
 
+               local_irq_disable();
                now_ns = get_kernel_ns();
                user_ns.clock = kvm->arch.kvmclock_offset + now_ns;
+               local_irq_enable();
                user_ns.flags = 0;
 
                r = -EFAULT;
@@ -3516,7 +3564,7 @@ static gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access)
        access |= PFERR_USER_MASK;
        t_gpa  = vcpu->arch.mmu.gva_to_gpa(vcpu, gpa, access, &error);
        if (t_gpa == UNMAPPED_GVA)
-               vcpu->arch.fault.error_code |= PFERR_NESTED_MASK;
+               vcpu->arch.fault.nested = true;
 
        return t_gpa;
 }
@@ -4185,6 +4233,35 @@ static void init_emulate_ctxt(struct kvm_vcpu *vcpu)
        memcpy(c->regs, vcpu->arch.regs, sizeof c->regs);
 }
 
+int kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq)
+{
+       struct decode_cache *c = &vcpu->arch.emulate_ctxt.decode;
+       int ret;
+
+       init_emulate_ctxt(vcpu);
+
+       vcpu->arch.emulate_ctxt.decode.op_bytes = 2;
+       vcpu->arch.emulate_ctxt.decode.ad_bytes = 2;
+       vcpu->arch.emulate_ctxt.decode.eip = vcpu->arch.emulate_ctxt.eip;
+       ret = emulate_int_real(&vcpu->arch.emulate_ctxt, &emulate_ops, irq);
+
+       if (ret != X86EMUL_CONTINUE)
+               return EMULATE_FAIL;
+
+       vcpu->arch.emulate_ctxt.eip = c->eip;
+       memcpy(vcpu->arch.regs, c->regs, sizeof c->regs);
+       kvm_rip_write(vcpu, vcpu->arch.emulate_ctxt.eip);
+       kvm_x86_ops->set_rflags(vcpu, vcpu->arch.emulate_ctxt.eflags);
+
+       if (irq == NMI_VECTOR)
+               vcpu->arch.nmi_pending = false;
+       else
+               vcpu->arch.interrupt.pending = false;
+
+       return EMULATE_DONE;
+}
+EXPORT_SYMBOL_GPL(kvm_inject_realmode_interrupt);
+
 static int handle_emulation_failure(struct kvm_vcpu *vcpu)
 {
        ++vcpu->stat.insn_emulation_fail;
@@ -4327,6 +4404,7 @@ done:
 
        toggle_interruptibility(vcpu, vcpu->arch.emulate_ctxt.interruptibility);
        kvm_x86_ops->set_rflags(vcpu, vcpu->arch.emulate_ctxt.eflags);
+       kvm_make_request(KVM_REQ_EVENT, vcpu);
        memcpy(vcpu->arch.regs, c->regs, sizeof c->regs);
        kvm_rip_write(vcpu, vcpu->arch.emulate_ctxt.eip);
 
@@ -4422,8 +4500,7 @@ static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long va
                kvm_for_each_vcpu(i, vcpu, kvm) {
                        if (vcpu->cpu != freq->cpu)
                                continue;
-                       if (!kvm_request_guest_time_update(vcpu))
-                               continue;
+                       kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
                        if (vcpu->cpu != smp_processor_id())
                                send_ipi = 1;
                }
@@ -4478,11 +4555,20 @@ static void kvm_timer_init(void)
 {
        int cpu;
 
+       max_tsc_khz = tsc_khz;
        register_hotcpu_notifier(&kvmclock_cpu_notifier_block);
        if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) {
+#ifdef CONFIG_CPU_FREQ
+               struct cpufreq_policy policy;
+               memset(&policy, 0, sizeof(policy));
+               cpufreq_get_policy(&policy, get_cpu());
+               if (policy.cpuinfo.max_freq)
+                       max_tsc_khz = policy.cpuinfo.max_freq;
+#endif
                cpufreq_register_notifier(&kvmclock_cpufreq_notifier_block,
                                          CPUFREQ_TRANSITION_NOTIFIER);
        }
+       pr_debug("kvm: max_tsc_khz = %ld\n", max_tsc_khz);
        for_each_online_cpu(cpu)
                smp_call_function_single(cpu, tsc_khz_changed, NULL, 1);
 }
@@ -5002,8 +5088,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
                        kvm_mmu_unload(vcpu);
                if (kvm_check_request(KVM_REQ_MIGRATE_TIMER, vcpu))
                        __kvm_migrate_timers(vcpu);
-               if (kvm_check_request(KVM_REQ_KVMCLOCK_UPDATE, vcpu)) {
-                       r = kvm_write_guest_time(vcpu);
+               if (kvm_check_request(KVM_REQ_CLOCK_UPDATE, vcpu)) {
+                       r = kvm_guest_time_update(vcpu);
                        if (unlikely(r))
                                goto out;
                }
@@ -5031,6 +5117,21 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
        if (unlikely(r))
                goto out;
 
+       if (kvm_check_request(KVM_REQ_EVENT, vcpu) || req_int_win) {
+               inject_pending_event(vcpu);
+
+               /* enable NMI/IRQ window open exits if needed */
+               if (vcpu->arch.nmi_pending)
+                       kvm_x86_ops->enable_nmi_window(vcpu);
+               else if (kvm_cpu_has_interrupt(vcpu) || req_int_win)
+                       kvm_x86_ops->enable_irq_window(vcpu);
+
+               if (kvm_lapic_enabled(vcpu)) {
+                       update_cr8_intercept(vcpu);
+                       kvm_lapic_sync_to_vapic(vcpu);
+               }
+       }
+
        preempt_disable();
 
        kvm_x86_ops->prepare_guest_switch(vcpu);
@@ -5049,23 +5150,11 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
                smp_wmb();
                local_irq_enable();
                preempt_enable();
+               kvm_x86_ops->cancel_injection(vcpu);
                r = 1;
                goto out;
        }
 
-       inject_pending_event(vcpu);
-
-       /* enable NMI/IRQ window open exits if needed */
-       if (vcpu->arch.nmi_pending)
-               kvm_x86_ops->enable_nmi_window(vcpu);
-       else if (kvm_cpu_has_interrupt(vcpu) || req_int_win)
-               kvm_x86_ops->enable_irq_window(vcpu);
-
-       if (kvm_lapic_enabled(vcpu)) {
-               update_cr8_intercept(vcpu);
-               kvm_lapic_sync_to_vapic(vcpu);
-       }
-
        srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
 
        kvm_guest_enter();
@@ -5303,6 +5392,8 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
 
        vcpu->arch.exception.pending = false;
 
+       kvm_make_request(KVM_REQ_EVENT, vcpu);
+
        return 0;
 }
 
@@ -5366,6 +5457,7 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
                                    struct kvm_mp_state *mp_state)
 {
        vcpu->arch.mp_state = mp_state->mp_state;
+       kvm_make_request(KVM_REQ_EVENT, vcpu);
        return 0;
 }
 
@@ -5387,6 +5479,7 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason,
        memcpy(vcpu->arch.regs, c->regs, sizeof c->regs);
        kvm_rip_write(vcpu, vcpu->arch.emulate_ctxt.eip);
        kvm_x86_ops->set_rflags(vcpu, vcpu->arch.emulate_ctxt.eflags);
+       kvm_make_request(KVM_REQ_EVENT, vcpu);
        return EMULATE_DONE;
 }
 EXPORT_SYMBOL_GPL(kvm_task_switch);
@@ -5422,7 +5515,7 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
        mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4;
        kvm_x86_ops->set_cr4(vcpu, sregs->cr4);
        if (!is_long_mode(vcpu) && is_pae(vcpu)) {
-               load_pdptrs(vcpu, vcpu->arch.cr3);
+               load_pdptrs(vcpu, vcpu->arch.walk_mmu, vcpu->arch.cr3);
                mmu_reset_needed = 1;
        }
 
@@ -5457,6 +5550,8 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
            !is_protmode(vcpu))
                vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
 
+       kvm_make_request(KVM_REQ_EVENT, vcpu);
+
        return 0;
 }
 
@@ -5689,6 +5784,8 @@ int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu)
        vcpu->arch.dr6 = DR6_FIXED_1;
        vcpu->arch.dr7 = DR7_FIXED_1;
 
+       kvm_make_request(KVM_REQ_EVENT, vcpu);
+
        return kvm_x86_ops->vcpu_reset(vcpu);
 }
 
@@ -5702,7 +5799,7 @@ int kvm_arch_hardware_enable(void *garbage)
        list_for_each_entry(kvm, &vm_list, vm_list)
                kvm_for_each_vcpu(i, vcpu, kvm)
                        if (vcpu->cpu == smp_processor_id())
-                               kvm_request_guest_time_update(vcpu);
+                               kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
        return kvm_x86_ops->hardware_enable(garbage);
 }
 
@@ -5753,6 +5850,9 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
        }
        vcpu->arch.pio_data = page_address(page);
 
+       if (!kvm->arch.virtual_tsc_khz)
+               kvm_arch_set_tsc_khz(kvm, max_tsc_khz);
+
        r = kvm_mmu_create(vcpu);
        if (r < 0)
                goto fail_free_pio_data;
@@ -5999,6 +6099,7 @@ void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
            kvm_is_linear_rip(vcpu, vcpu->arch.singlestep_rip))
                rflags |= X86_EFLAGS_TF;
        kvm_x86_ops->set_rflags(vcpu, rflags);
+       kvm_make_request(KVM_REQ_EVENT, vcpu);
 }
 EXPORT_SYMBOL_GPL(kvm_set_rflags);