KVM: Make unloading of FPU state when putting vcpu arch-independent
[pandora-kernel.git] / drivers / kvm / vmx.c
index 894fd45..0c082fa 100644 (file)
@@ -16,6 +16,7 @@
  */
 
 #include "kvm.h"
+#include "x86.h"
 #include "x86_emulate.h"
 #include "irq.h"
 #include "vmx.h"
@@ -47,6 +48,7 @@ struct vcpu_vmx {
        struct kvm_vcpu       vcpu;
        int                   launched;
        u8                    fail;
+       u32                   idt_vectoring_info;
        struct kvm_msr_entry *guest_msrs;
        struct kvm_msr_entry *host_msrs;
        int                   nmsrs;
@@ -62,8 +64,14 @@ struct vcpu_vmx {
                int           gs_ldt_reload_needed;
                int           fs_reload_needed;
                int           guest_efer_loaded;
-       }host_state;
-
+       } host_state;
+       struct {
+               struct {
+                       bool pending;
+                       u8 vector;
+                       unsigned rip;
+               } irq;
+       } rmode;
 };
 
 static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu)
@@ -85,6 +93,7 @@ static struct vmcs_config {
        u32 revision_id;
        u32 pin_based_exec_ctrl;
        u32 cpu_based_exec_ctrl;
+       u32 cpu_based_2nd_exec_ctrl;
        u32 vmexit_ctrl;
        u32 vmentry_ctrl;
 } vmcs_config;
@@ -178,6 +187,29 @@ static inline int vm_need_tpr_shadow(struct kvm *kvm)
        return ((cpu_has_vmx_tpr_shadow()) && (irqchip_in_kernel(kvm)));
 }
 
+static inline int cpu_has_secondary_exec_ctrls(void)
+{
+       return (vmcs_config.cpu_based_exec_ctrl &
+               CPU_BASED_ACTIVATE_SECONDARY_CONTROLS);
+}
+
+static inline int vm_need_secondary_exec_ctrls(struct kvm *kvm)
+{
+       return ((cpu_has_secondary_exec_ctrls()) && (irqchip_in_kernel(kvm)));
+}
+
+static inline int cpu_has_vmx_virtualize_apic_accesses(void)
+{
+       return (vmcs_config.cpu_based_2nd_exec_ctrl &
+               SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES);
+}
+
+static inline int vm_need_virtualize_apic_accesses(struct kvm *kvm)
+{
+       return ((cpu_has_vmx_virtualize_apic_accesses()) &&
+               (irqchip_in_kernel(kvm)));
+}
+
 static int __find_msr_index(struct vcpu_vmx *vmx, u32 msr)
 {
        int i;
@@ -271,7 +303,7 @@ static void vmcs_writel(unsigned long field, unsigned long value)
        u8 error;
 
        asm volatile (ASM_VMX_VMWRITE_RAX_RDX "; setna %0"
-                      : "=q"(error) : "a"(value), "d"(field) : "cc" );
+                      : "=q"(error) : "a"(value), "d"(field) : "cc");
        if (unlikely(error))
                vmwrite_error(field, value);
 }
@@ -415,10 +447,10 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu)
 #endif
 
 #ifdef CONFIG_X86_64
-       if (is_long_mode(&vmx->vcpu)) {
+       if (is_long_mode(&vmx->vcpu))
                save_msrs(vmx->host_msrs +
                          vmx->msr_offset_kernel_gs_base, 1);
-       }
+
 #endif
        load_msrs(vmx->guest_msrs, vmx->save_nmsrs);
        load_transition_efer(vmx);
@@ -431,6 +463,7 @@ static void vmx_load_host_state(struct vcpu_vmx *vmx)
        if (!vmx->host_state.loaded)
                return;
 
+       ++vmx->vcpu.stat.host_state_reload;
        vmx->host_state.loaded = 0;
        if (vmx->host_state.fs_reload_needed)
                load_fs(vmx->host_state.fs_sel);
@@ -508,7 +541,6 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 static void vmx_vcpu_put(struct kvm_vcpu *vcpu)
 {
        vmx_load_host_state(to_vmx(vcpu));
-       kvm_put_guest_fpu(vcpu);
 }
 
 static void vmx_fpu_activate(struct kvm_vcpu *vcpu)
@@ -838,14 +870,15 @@ static int set_guest_debug(struct kvm_vcpu *vcpu, struct kvm_debug_guest *dbg)
 
 static int vmx_get_irq(struct kvm_vcpu *vcpu)
 {
+       struct vcpu_vmx *vmx = to_vmx(vcpu);
        u32 idtv_info_field;
 
-       idtv_info_field = vmcs_read32(IDT_VECTORING_INFO_FIELD);
+       idtv_info_field = vmx->idt_vectoring_info;
        if (idtv_info_field & INTR_INFO_VALID_MASK) {
                if (is_external_interrupt(idtv_info_field))
                        return idtv_info_field & VECTORING_INFO_VECTOR_MASK;
                else
-                       printk("pending exception: not handled yet\n");
+                       printk(KERN_DEBUG "pending exception: not handled yet\n");
        }
        return -1;
 }
@@ -893,7 +926,7 @@ static void hardware_disable(void *garbage)
 }
 
 static __init int adjust_vmx_controls(u32 ctl_min, u32 ctl_opt,
-                                     u32 msr, u32result)
+                                     u32 msr, u32 *result)
 {
        u32 vmx_msr_low, vmx_msr_high;
        u32 ctl = ctl_min | ctl_opt;
@@ -917,6 +950,7 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
        u32 min, opt;
        u32 _pin_based_exec_control = 0;
        u32 _cpu_based_exec_control = 0;
+       u32 _cpu_based_2nd_exec_control = 0;
        u32 _vmexit_control = 0;
        u32 _vmentry_control = 0;
 
@@ -934,11 +968,8 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
              CPU_BASED_USE_IO_BITMAPS |
              CPU_BASED_MOV_DR_EXITING |
              CPU_BASED_USE_TSC_OFFSETING;
-#ifdef CONFIG_X86_64
-       opt = CPU_BASED_TPR_SHADOW;
-#else
-       opt = 0;
-#endif
+       opt = CPU_BASED_TPR_SHADOW |
+             CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
        if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PROCBASED_CTLS,
                                &_cpu_based_exec_control) < 0)
                return -EIO;
@@ -947,6 +978,19 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
                _cpu_based_exec_control &= ~CPU_BASED_CR8_LOAD_EXITING &
                                           ~CPU_BASED_CR8_STORE_EXITING;
 #endif
+       if (_cpu_based_exec_control & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) {
+               min = 0;
+               opt = SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
+                       SECONDARY_EXEC_WBINVD_EXITING;
+               if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PROCBASED_CTLS2,
+                                       &_cpu_based_2nd_exec_control) < 0)
+                       return -EIO;
+       }
+#ifndef CONFIG_X86_64
+       if (!(_cpu_based_2nd_exec_control &
+                               SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES))
+               _cpu_based_exec_control &= ~CPU_BASED_TPR_SHADOW;
+#endif
 
        min = 0;
 #ifdef CONFIG_X86_64
@@ -984,6 +1028,7 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
 
        vmcs_conf->pin_based_exec_ctrl = _pin_based_exec_control;
        vmcs_conf->cpu_based_exec_ctrl = _cpu_based_exec_control;
+       vmcs_conf->cpu_based_2nd_exec_ctrl = _cpu_based_2nd_exec_control;
        vmcs_conf->vmexit_ctrl         = _vmexit_control;
        vmcs_conf->vmentry_ctrl        = _vmentry_control;
 
@@ -1102,10 +1147,14 @@ static void enter_pmode(struct kvm_vcpu *vcpu)
        vmcs_write32(GUEST_CS_AR_BYTES, 0x9b);
 }
 
-static gva_t rmode_tss_base(struct kvmkvm)
+static gva_t rmode_tss_base(struct kvm *kvm)
 {
-       gfn_t base_gfn = kvm->memslots[0].base_gfn + kvm->memslots[0].npages - 3;
-       return base_gfn << PAGE_SHIFT;
+       if (!kvm->tss_addr) {
+               gfn_t base_gfn = kvm->memslots[0].base_gfn +
+                                kvm->memslots[0].npages - 3;
+               return base_gfn << PAGE_SHIFT;
+       }
+       return kvm->tss_addr;
 }
 
 static void fix_rmode_seg(int seg, struct kvm_save_segment *save)
@@ -1385,7 +1434,7 @@ static void vmx_set_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
        vmcs_writel(GUEST_GDTR_BASE, dt->base);
 }
 
-static int init_rmode_tss(struct kvmkvm)
+static int init_rmode_tss(struct kvm *kvm)
 {
        gfn_t fn = rmode_tss_base(kvm) >> PAGE_SHIFT;
        u16 data = 0;
@@ -1422,6 +1471,27 @@ static void seg_setup(int seg)
        vmcs_write32(sf->ar_bytes, 0x93);
 }
 
+static int alloc_apic_access_page(struct kvm *kvm)
+{
+       struct kvm_userspace_memory_region kvm_userspace_mem;
+       int r = 0;
+
+       mutex_lock(&kvm->lock);
+       if (kvm->apic_access_page)
+               goto out;
+       kvm_userspace_mem.slot = APIC_ACCESS_PAGE_PRIVATE_MEMSLOT;
+       kvm_userspace_mem.flags = 0;
+       kvm_userspace_mem.guest_phys_addr = 0xfee00000ULL;
+       kvm_userspace_mem.memory_size = PAGE_SIZE;
+       r = __kvm_set_memory_region(kvm, &kvm_userspace_mem, 0);
+       if (r)
+               goto out;
+       kvm->apic_access_page = gfn_to_page(kvm, 0xfee00);
+out:
+       mutex_unlock(&kvm->lock);
+       return r;
+}
+
 /*
  * Sets up the vmcs for emulated real mode.
  */
@@ -1432,92 +1502,15 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
        unsigned long a;
        struct descriptor_table dt;
        int i;
-       int ret = 0;
        unsigned long kvm_vmx_return;
-       u64 msr;
        u32 exec_control;
 
-       if (!init_rmode_tss(vmx->vcpu.kvm)) {
-               ret = -ENOMEM;
-               goto out;
-       }
-
-       vmx->vcpu.rmode.active = 0;
-
-       vmx->vcpu.regs[VCPU_REGS_RDX] = get_rdx_init_val();
-       set_cr8(&vmx->vcpu, 0);
-       msr = 0xfee00000 | MSR_IA32_APICBASE_ENABLE;
-       if (vmx->vcpu.vcpu_id == 0)
-               msr |= MSR_IA32_APICBASE_BSP;
-       kvm_set_apic_base(&vmx->vcpu, msr);
-
-       fx_init(&vmx->vcpu);
-
-       /*
-        * GUEST_CS_BASE should really be 0xffff0000, but VT vm86 mode
-        * insists on having GUEST_CS_BASE == GUEST_CS_SELECTOR << 4.  Sigh.
-        */
-       if (vmx->vcpu.vcpu_id == 0) {
-               vmcs_write16(GUEST_CS_SELECTOR, 0xf000);
-               vmcs_writel(GUEST_CS_BASE, 0x000f0000);
-       } else {
-               vmcs_write16(GUEST_CS_SELECTOR, vmx->vcpu.sipi_vector << 8);
-               vmcs_writel(GUEST_CS_BASE, vmx->vcpu.sipi_vector << 12);
-       }
-       vmcs_write32(GUEST_CS_LIMIT, 0xffff);
-       vmcs_write32(GUEST_CS_AR_BYTES, 0x9b);
-
-       seg_setup(VCPU_SREG_DS);
-       seg_setup(VCPU_SREG_ES);
-       seg_setup(VCPU_SREG_FS);
-       seg_setup(VCPU_SREG_GS);
-       seg_setup(VCPU_SREG_SS);
-
-       vmcs_write16(GUEST_TR_SELECTOR, 0);
-       vmcs_writel(GUEST_TR_BASE, 0);
-       vmcs_write32(GUEST_TR_LIMIT, 0xffff);
-       vmcs_write32(GUEST_TR_AR_BYTES, 0x008b);
-
-       vmcs_write16(GUEST_LDTR_SELECTOR, 0);
-       vmcs_writel(GUEST_LDTR_BASE, 0);
-       vmcs_write32(GUEST_LDTR_LIMIT, 0xffff);
-       vmcs_write32(GUEST_LDTR_AR_BYTES, 0x00082);
-
-       vmcs_write32(GUEST_SYSENTER_CS, 0);
-       vmcs_writel(GUEST_SYSENTER_ESP, 0);
-       vmcs_writel(GUEST_SYSENTER_EIP, 0);
-
-       vmcs_writel(GUEST_RFLAGS, 0x02);
-       if (vmx->vcpu.vcpu_id == 0)
-               vmcs_writel(GUEST_RIP, 0xfff0);
-       else
-               vmcs_writel(GUEST_RIP, 0);
-       vmcs_writel(GUEST_RSP, 0);
-
-       //todo: dr0 = dr1 = dr2 = dr3 = 0; dr6 = 0xffff0ff0
-       vmcs_writel(GUEST_DR7, 0x400);
-
-       vmcs_writel(GUEST_GDTR_BASE, 0);
-       vmcs_write32(GUEST_GDTR_LIMIT, 0xffff);
-
-       vmcs_writel(GUEST_IDTR_BASE, 0);
-       vmcs_write32(GUEST_IDTR_LIMIT, 0xffff);
-
-       vmcs_write32(GUEST_ACTIVITY_STATE, 0);
-       vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, 0);
-       vmcs_write32(GUEST_PENDING_DBG_EXCEPTIONS, 0);
-
        /* I/O */
        vmcs_write64(IO_BITMAP_A, page_to_phys(vmx_io_bitmap_a));
        vmcs_write64(IO_BITMAP_B, page_to_phys(vmx_io_bitmap_b));
 
-       guest_write_tsc(0);
-
        vmcs_write64(VMCS_LINK_POINTER, -1ull); /* 22.3.1.5 */
 
-       /* Special registers */
-       vmcs_write64(GUEST_IA32_DEBUGCTL, 0);
-
        /* Control */
        vmcs_write32(PIN_BASED_VM_EXEC_CONTROL,
                vmcs_config.pin_based_exec_ctrl);
@@ -1530,8 +1523,14 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
                                CPU_BASED_CR8_LOAD_EXITING;
 #endif
        }
+       if (!vm_need_secondary_exec_ctrls(vmx->vcpu.kvm))
+               exec_control &= ~CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
        vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, exec_control);
 
+       if (vm_need_secondary_exec_ctrls(vmx->vcpu.kvm))
+               vmcs_write32(SECONDARY_VM_EXEC_CONTROL,
+                            vmcs_config.cpu_based_2nd_exec_ctrl);
+
        vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, !!bypass_guest_pf);
        vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, !!bypass_guest_pf);
        vmcs_write32(CR3_TARGET_COUNT, 0);           /* 22.2.1 */
@@ -1561,7 +1560,7 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
        get_idt(&dt);
        vmcs_writel(HOST_IDTR_BASE, dt.base);   /* 22.2.4 */
 
-       asm ("mov $.Lkvm_vmx_return, %0" : "=r"(kvm_vmx_return));
+       asm("mov $.Lkvm_vmx_return, %0" : "=r"(kvm_vmx_return));
        vmcs_writel(HOST_RIP, kvm_vmx_return); /* 22.2.5 */
        vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
        vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
@@ -1592,97 +1591,145 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
                ++vmx->nmsrs;
        }
 
-       setup_msrs(vmx);
-
        vmcs_write32(VM_EXIT_CONTROLS, vmcs_config.vmexit_ctrl);
 
        /* 22.2.1, 20.8.1 */
        vmcs_write32(VM_ENTRY_CONTROLS, vmcs_config.vmentry_ctrl);
 
-       vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0);  /* 22.2.1 */
-
-#ifdef CONFIG_X86_64
-       vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, 0);
-       if (vm_need_tpr_shadow(vmx->vcpu.kvm))
-               vmcs_write64(VIRTUAL_APIC_PAGE_ADDR,
-                            page_to_phys(vmx->vcpu.apic->regs_page));
-       vmcs_write32(TPR_THRESHOLD, 0);
-#endif
-
        vmcs_writel(CR0_GUEST_HOST_MASK, ~0UL);
        vmcs_writel(CR4_GUEST_HOST_MASK, KVM_GUEST_CR4_MASK);
 
-       vmx->vcpu.cr0 = 0x60000010;
-       vmx_set_cr0(&vmx->vcpu, vmx->vcpu.cr0); // enter rmode
-       vmx_set_cr4(&vmx->vcpu, 0);
-#ifdef CONFIG_X86_64
-       vmx_set_efer(&vmx->vcpu, 0);
-#endif
-       vmx_fpu_activate(&vmx->vcpu);
-       update_exception_bitmap(&vmx->vcpu);
+       if (vm_need_virtualize_apic_accesses(vmx->vcpu.kvm))
+               if (alloc_apic_access_page(vmx->vcpu.kvm) != 0)
+                       return -ENOMEM;
 
        return 0;
-
-out:
-       return ret;
 }
 
-static void vmx_vcpu_reset(struct kvm_vcpu *vcpu)
+static int vmx_vcpu_reset(struct kvm_vcpu *vcpu)
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
+       u64 msr;
+       int ret;
 
-       vmx_vcpu_setup(vmx);
-}
-
-static void inject_rmode_irq(struct kvm_vcpu *vcpu, int irq)
-{
-       u16 ent[2];
-       u16 cs;
-       u16 ip;
-       unsigned long flags;
-       unsigned long ss_base = vmcs_readl(GUEST_SS_BASE);
-       u16 sp =  vmcs_readl(GUEST_RSP);
-       u32 ss_limit = vmcs_read32(GUEST_SS_LIMIT);
-
-       if (sp > ss_limit || sp < 6 ) {
-               vcpu_printf(vcpu, "%s: #SS, rsp 0x%lx ss 0x%lx limit 0x%x\n",
-                           __FUNCTION__,
-                           vmcs_readl(GUEST_RSP),
-                           vmcs_readl(GUEST_SS_BASE),
-                           vmcs_read32(GUEST_SS_LIMIT));
-               return;
+       if (!init_rmode_tss(vmx->vcpu.kvm)) {
+               ret = -ENOMEM;
+               goto out;
        }
 
-       if (emulator_read_std(irq * sizeof(ent), &ent, sizeof(ent), vcpu) !=
-                                                       X86EMUL_CONTINUE) {
-               vcpu_printf(vcpu, "%s: read guest err\n", __FUNCTION__);
-               return;
+       vmx->vcpu.rmode.active = 0;
+
+       vmx->vcpu.regs[VCPU_REGS_RDX] = get_rdx_init_val();
+       set_cr8(&vmx->vcpu, 0);
+       msr = 0xfee00000 | MSR_IA32_APICBASE_ENABLE;
+       if (vmx->vcpu.vcpu_id == 0)
+               msr |= MSR_IA32_APICBASE_BSP;
+       kvm_set_apic_base(&vmx->vcpu, msr);
+
+       fx_init(&vmx->vcpu);
+
+       /*
+        * GUEST_CS_BASE should really be 0xffff0000, but VT vm86 mode
+        * insists on having GUEST_CS_BASE == GUEST_CS_SELECTOR << 4.  Sigh.
+        */
+       if (vmx->vcpu.vcpu_id == 0) {
+               vmcs_write16(GUEST_CS_SELECTOR, 0xf000);
+               vmcs_writel(GUEST_CS_BASE, 0x000f0000);
+       } else {
+               vmcs_write16(GUEST_CS_SELECTOR, vmx->vcpu.sipi_vector << 8);
+               vmcs_writel(GUEST_CS_BASE, vmx->vcpu.sipi_vector << 12);
        }
+       vmcs_write32(GUEST_CS_LIMIT, 0xffff);
+       vmcs_write32(GUEST_CS_AR_BYTES, 0x9b);
+
+       seg_setup(VCPU_SREG_DS);
+       seg_setup(VCPU_SREG_ES);
+       seg_setup(VCPU_SREG_FS);
+       seg_setup(VCPU_SREG_GS);
+       seg_setup(VCPU_SREG_SS);
 
-       flags =  vmcs_readl(GUEST_RFLAGS);
-       cs =  vmcs_readl(GUEST_CS_BASE) >> 4;
-       ip =  vmcs_readl(GUEST_RIP);
+       vmcs_write16(GUEST_TR_SELECTOR, 0);
+       vmcs_writel(GUEST_TR_BASE, 0);
+       vmcs_write32(GUEST_TR_LIMIT, 0xffff);
+       vmcs_write32(GUEST_TR_AR_BYTES, 0x008b);
 
+       vmcs_write16(GUEST_LDTR_SELECTOR, 0);
+       vmcs_writel(GUEST_LDTR_BASE, 0);
+       vmcs_write32(GUEST_LDTR_LIMIT, 0xffff);
+       vmcs_write32(GUEST_LDTR_AR_BYTES, 0x00082);
 
-       if (emulator_write_emulated(ss_base + sp - 2, &flags, 2, vcpu) != X86EMUL_CONTINUE ||
-           emulator_write_emulated(ss_base + sp - 4, &cs, 2, vcpu) != X86EMUL_CONTINUE ||
-           emulator_write_emulated(ss_base + sp - 6, &ip, 2, vcpu) != X86EMUL_CONTINUE) {
-               vcpu_printf(vcpu, "%s: write guest err\n", __FUNCTION__);
-               return;
+       vmcs_write32(GUEST_SYSENTER_CS, 0);
+       vmcs_writel(GUEST_SYSENTER_ESP, 0);
+       vmcs_writel(GUEST_SYSENTER_EIP, 0);
+
+       vmcs_writel(GUEST_RFLAGS, 0x02);
+       if (vmx->vcpu.vcpu_id == 0)
+               vmcs_writel(GUEST_RIP, 0xfff0);
+       else
+               vmcs_writel(GUEST_RIP, 0);
+       vmcs_writel(GUEST_RSP, 0);
+
+       /* todo: dr0 = dr1 = dr2 = dr3 = 0; dr6 = 0xffff0ff0 */
+       vmcs_writel(GUEST_DR7, 0x400);
+
+       vmcs_writel(GUEST_GDTR_BASE, 0);
+       vmcs_write32(GUEST_GDTR_LIMIT, 0xffff);
+
+       vmcs_writel(GUEST_IDTR_BASE, 0);
+       vmcs_write32(GUEST_IDTR_LIMIT, 0xffff);
+
+       vmcs_write32(GUEST_ACTIVITY_STATE, 0);
+       vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, 0);
+       vmcs_write32(GUEST_PENDING_DBG_EXCEPTIONS, 0);
+
+       guest_write_tsc(0);
+
+       /* Special registers */
+       vmcs_write64(GUEST_IA32_DEBUGCTL, 0);
+
+       setup_msrs(vmx);
+
+       vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0);  /* 22.2.1 */
+
+       if (cpu_has_vmx_tpr_shadow()) {
+               vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, 0);
+               if (vm_need_tpr_shadow(vmx->vcpu.kvm))
+                       vmcs_write64(VIRTUAL_APIC_PAGE_ADDR,
+                                    page_to_phys(vmx->vcpu.apic->regs_page));
+               vmcs_write32(TPR_THRESHOLD, 0);
        }
 
-       vmcs_writel(GUEST_RFLAGS, flags &
-                   ~( X86_EFLAGS_IF | X86_EFLAGS_AC | X86_EFLAGS_TF));
-       vmcs_write16(GUEST_CS_SELECTOR, ent[1]) ;
-       vmcs_writel(GUEST_CS_BASE, ent[1] << 4);
-       vmcs_writel(GUEST_RIP, ent[0]);
-       vmcs_writel(GUEST_RSP, (vmcs_readl(GUEST_RSP) & ~0xffff) | (sp - 6));
+       if (vm_need_virtualize_apic_accesses(vmx->vcpu.kvm))
+               vmcs_write64(APIC_ACCESS_ADDR,
+                            page_to_phys(vmx->vcpu.kvm->apic_access_page));
+
+       vmx->vcpu.cr0 = 0x60000010;
+       vmx_set_cr0(&vmx->vcpu, vmx->vcpu.cr0); /* enter rmode */
+       vmx_set_cr4(&vmx->vcpu, 0);
+#ifdef CONFIG_X86_64
+       vmx_set_efer(&vmx->vcpu, 0);
+#endif
+       vmx_fpu_activate(&vmx->vcpu);
+       update_exception_bitmap(&vmx->vcpu);
+
+       return 0;
+
+out:
+       return ret;
 }
 
 static void vmx_inject_irq(struct kvm_vcpu *vcpu, int irq)
 {
+       struct vcpu_vmx *vmx = to_vmx(vcpu);
+
        if (vcpu->rmode.active) {
-               inject_rmode_irq(vcpu, irq);
+               vmx->rmode.irq.pending = true;
+               vmx->rmode.irq.vector = irq;
+               vmx->rmode.irq.rip = vmcs_readl(GUEST_RIP);
+               vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
+                            irq | INTR_TYPE_SOFT_INTR | INTR_INFO_VALID_MASK);
+               vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, 1);
+               vmcs_writel(GUEST_RIP, vmx->rmode.irq.rip - 1);
                return;
        }
        vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
@@ -1731,6 +1778,23 @@ static void do_interrupt_requests(struct kvm_vcpu *vcpu,
        vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
 }
 
+static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr)
+{
+       int ret;
+       struct kvm_userspace_memory_region tss_mem = {
+               .slot = 8,
+               .guest_phys_addr = addr,
+               .memory_size = PAGE_SIZE * 3,
+               .flags = 0,
+       };
+
+       ret = kvm_set_memory_region(kvm, &tss_mem, 0);
+       if (ret)
+               return ret;
+       kvm->tss_addr = addr;
+       return 0;
+}
+
 static void kvm_guest_debug_pre(struct kvm_vcpu *vcpu)
 {
        struct kvm_guest_debug *dbg = &vcpu->guest_debug;
@@ -1767,20 +1831,19 @@ static int handle_rmode_exception(struct kvm_vcpu *vcpu,
 
 static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
 {
+       struct vcpu_vmx *vmx = to_vmx(vcpu);
        u32 intr_info, error_code;
        unsigned long cr2, rip;
        u32 vect_info;
        enum emulation_result er;
-       int r;
 
-       vect_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
+       vect_info = vmx->idt_vectoring_info;
        intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
 
        if ((vect_info & VECTORING_INFO_VALID_MASK) &&
-                                               !is_page_fault(intr_info)) {
+                                               !is_page_fault(intr_info))
                printk(KERN_ERR "%s: unexpected, vectoring info 0x%x "
                       "intr info 0x%x\n", __FUNCTION__, vect_info, intr_info);
-       }
 
        if (!irqchip_in_kernel(vcpu->kvm) && is_external_interrupt(vect_info)) {
                int irq = vect_info & VECTORING_INFO_VECTOR_MASK;
@@ -1810,33 +1873,7 @@ static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
                error_code = vmcs_read32(VM_EXIT_INTR_ERROR_CODE);
        if (is_page_fault(intr_info)) {
                cr2 = vmcs_readl(EXIT_QUALIFICATION);
-
-               mutex_lock(&vcpu->kvm->lock);
-               r = kvm_mmu_page_fault(vcpu, cr2, error_code);
-               if (r < 0) {
-                       mutex_unlock(&vcpu->kvm->lock);
-                       return r;
-               }
-               if (!r) {
-                       mutex_unlock(&vcpu->kvm->lock);
-                       return 1;
-               }
-
-               er = emulate_instruction(vcpu, kvm_run, cr2, error_code, 0);
-               mutex_unlock(&vcpu->kvm->lock);
-
-               switch (er) {
-               case EMULATE_DONE:
-                       return 1;
-               case EMULATE_DO_MMIO:
-                       ++vcpu->stat.mmio_exits;
-                       return 0;
-                case EMULATE_FAIL:
-                       kvm_report_emulation_failure(vcpu, "pagetable");
-                       break;
-               default:
-                       BUG();
-               }
+               return kvm_mmu_page_fault(vcpu, cr2, error_code);
        }
 
        if (vcpu->rmode.active &&
@@ -1849,7 +1886,8 @@ static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
                return 1;
        }
 
-       if ((intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK)) == (INTR_TYPE_EXCEPTION | 1)) {
+       if ((intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK)) ==
+           (INTR_TYPE_EXCEPTION | 1)) {
                kvm_run->exit_reason = KVM_EXIT_DEBUG;
                return 0;
        }
@@ -2096,6 +2134,33 @@ static int handle_vmcall(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
        return 1;
 }
 
+static int handle_wbinvd(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+{
+       skip_emulated_instruction(vcpu);
+       /* TODO: Add support for VT-d/pass-through device */
+       return 1;
+}
+
+static int handle_apic_access(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+{
+       u64 exit_qualification;
+       enum emulation_result er;
+       unsigned long offset;
+
+       exit_qualification = vmcs_read64(EXIT_QUALIFICATION);
+       offset = exit_qualification & 0xffful;
+
+       er = emulate_instruction(vcpu, kvm_run, 0, 0, 0);
+
+       if (er !=  EMULATE_DONE) {
+               printk(KERN_ERR
+                      "Fail to handle apic access vmexit! Offset is 0x%lx\n",
+                      offset);
+               return -ENOTSUPP;
+       }
+       return 1;
+}
+
 /*
  * The exit handlers return 1 if the exit was handled fully and guest execution
  * may resume.  Otherwise they set the kvm_run parameter to indicate what needs
@@ -2115,7 +2180,9 @@ static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu,
        [EXIT_REASON_PENDING_INTERRUPT]       = handle_interrupt_window,
        [EXIT_REASON_HLT]                     = handle_halt,
        [EXIT_REASON_VMCALL]                  = handle_vmcall,
-       [EXIT_REASON_TPR_BELOW_THRESHOLD]     = handle_tpr_below_threshold
+       [EXIT_REASON_TPR_BELOW_THRESHOLD]     = handle_tpr_below_threshold,
+       [EXIT_REASON_APIC_ACCESS]             = handle_apic_access,
+       [EXIT_REASON_WBINVD]                  = handle_wbinvd,
 };
 
 static const int kvm_vmx_max_exit_handlers =
@@ -2127,9 +2194,9 @@ static const int kvm_vmx_max_exit_handlers =
  */
 static int kvm_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
 {
-       u32 vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
        u32 exit_reason = vmcs_read32(VM_EXIT_REASON);
        struct vcpu_vmx *vmx = to_vmx(vcpu);
+       u32 vectoring_info = vmx->idt_vectoring_info;
 
        if (unlikely(vmx->fail)) {
                kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
@@ -2138,8 +2205,8 @@ static int kvm_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
                return 0;
        }
 
-       if ( (vectoring_info & VECTORING_INFO_VALID_MASK) &&
-                               exit_reason != EXIT_REASON_EXCEPTION_NMI )
+       if ((vectoring_info & VECTORING_INFO_VALID_MASK) &&
+                               exit_reason != EXIT_REASON_EXCEPTION_NMI)
                printk(KERN_WARNING "%s: unexpected, valid vectoring info and "
                       "exit reason is 0x%x\n", __FUNCTION__, exit_reason);
        if (exit_reason < kvm_vmx_max_exit_handlers
@@ -2184,16 +2251,16 @@ static void enable_irq_window(struct kvm_vcpu *vcpu)
 
 static void vmx_intr_assist(struct kvm_vcpu *vcpu)
 {
+       struct vcpu_vmx *vmx = to_vmx(vcpu);
        u32 idtv_info_field, intr_info_field;
        int has_ext_irq, interrupt_window_open;
        int vector;
 
-       kvm_inject_pending_timer_irqs(vcpu);
        update_tpr_threshold(vcpu);
 
        has_ext_irq = kvm_cpu_has_interrupt(vcpu);
        intr_info_field = vmcs_read32(VM_ENTRY_INTR_INFO_FIELD);
-       idtv_info_field = vmcs_read32(IDT_VECTORING_INFO_FIELD);
+       idtv_info_field = vmx->idt_vectoring_info;
        if (intr_info_field & INTR_INFO_VALID_MASK) {
                if (idtv_info_field & INTR_INFO_VALID_MASK) {
                        /* TODO: fault when IDT_Vectoring */
@@ -2204,6 +2271,17 @@ static void vmx_intr_assist(struct kvm_vcpu *vcpu)
                return;
        }
        if (unlikely(idtv_info_field & INTR_INFO_VALID_MASK)) {
+               if ((idtv_info_field & VECTORING_INFO_TYPE_MASK)
+                   == INTR_TYPE_EXT_INTR
+                   && vcpu->rmode.active) {
+                       u8 vect = idtv_info_field & VECTORING_INFO_VECTOR_MASK;
+
+                       vmx_inject_irq(vcpu, vect);
+                       if (unlikely(has_ext_irq))
+                               enable_irq_window(vcpu);
+                       return;
+               }
+
                vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, idtv_info_field);
                vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
                                vmcs_read32(VM_EXIT_INSTRUCTION_LEN));
@@ -2228,6 +2306,29 @@ static void vmx_intr_assist(struct kvm_vcpu *vcpu)
                enable_irq_window(vcpu);
 }
 
+/*
+ * Failure to inject an interrupt should give us the information
+ * in IDT_VECTORING_INFO_FIELD.  However, if the failure occurs
+ * when fetching the interrupt redirection bitmap in the real-mode
+ * tss, this doesn't happen.  So we do it ourselves.
+ */
+static void fixup_rmode_irq(struct vcpu_vmx *vmx)
+{
+       vmx->rmode.irq.pending = 0;
+       if (vmcs_readl(GUEST_RIP) + 1 != vmx->rmode.irq.rip)
+               return;
+       vmcs_writel(GUEST_RIP, vmx->rmode.irq.rip);
+       if (vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK) {
+               vmx->idt_vectoring_info &= ~VECTORING_INFO_TYPE_MASK;
+               vmx->idt_vectoring_info |= INTR_TYPE_EXT_INTR;
+               return;
+       }
+       vmx->idt_vectoring_info =
+               VECTORING_INFO_VALID_MASK
+               | INTR_TYPE_EXT_INTR
+               | vmx->rmode.irq.vector;
+}
+
 static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
@@ -2238,50 +2339,47 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
         */
        vmcs_writel(HOST_CR0, read_cr0());
 
-       asm (
+       asm(
                /* Store host registers */
 #ifdef CONFIG_X86_64
-               "push %%rax; push %%rbx; push %%rdx;"
-               "push %%rsi; push %%rdi; push %%rbp;"
-               "push %%r8;  push %%r9;  push %%r10; push %%r11;"
-               "push %%r12; push %%r13; push %%r14; push %%r15;"
+               "push %%rdx; push %%rbp;"
                "push %%rcx \n\t"
-               ASM_VMX_VMWRITE_RSP_RDX "\n\t"
 #else
-               "pusha; push %%ecx \n\t"
-               ASM_VMX_VMWRITE_RSP_RDX "\n\t"
+               "push %%edx; push %%ebp;"
+               "push %%ecx \n\t"
 #endif
+               ASM_VMX_VMWRITE_RSP_RDX "\n\t"
                /* Check if vmlaunch of vmresume is needed */
-               "cmp $0, %1 \n\t"
+               "cmpl $0, %c[launched](%0) \n\t"
                /* Load guest registers.  Don't clobber flags. */
 #ifdef CONFIG_X86_64
-               "mov %c[cr2](%3), %%rax \n\t"
+               "mov %c[cr2](%0), %%rax \n\t"
                "mov %%rax, %%cr2 \n\t"
-               "mov %c[rax](%3), %%rax \n\t"
-               "mov %c[rbx](%3), %%rbx \n\t"
-               "mov %c[rdx](%3), %%rdx \n\t"
-               "mov %c[rsi](%3), %%rsi \n\t"
-               "mov %c[rdi](%3), %%rdi \n\t"
-               "mov %c[rbp](%3), %%rbp \n\t"
-               "mov %c[r8](%3),  %%r8  \n\t"
-               "mov %c[r9](%3),  %%r9  \n\t"
-               "mov %c[r10](%3), %%r10 \n\t"
-               "mov %c[r11](%3), %%r11 \n\t"
-               "mov %c[r12](%3), %%r12 \n\t"
-               "mov %c[r13](%3), %%r13 \n\t"
-               "mov %c[r14](%3), %%r14 \n\t"
-               "mov %c[r15](%3), %%r15 \n\t"
-               "mov %c[rcx](%3), %%rcx \n\t" /* kills %3 (rcx) */
+               "mov %c[rax](%0), %%rax \n\t"
+               "mov %c[rbx](%0), %%rbx \n\t"
+               "mov %c[rdx](%0), %%rdx \n\t"
+               "mov %c[rsi](%0), %%rsi \n\t"
+               "mov %c[rdi](%0), %%rdi \n\t"
+               "mov %c[rbp](%0), %%rbp \n\t"
+               "mov %c[r8](%0),  %%r8  \n\t"
+               "mov %c[r9](%0),  %%r9  \n\t"
+               "mov %c[r10](%0), %%r10 \n\t"
+               "mov %c[r11](%0), %%r11 \n\t"
+               "mov %c[r12](%0), %%r12 \n\t"
+               "mov %c[r13](%0), %%r13 \n\t"
+               "mov %c[r14](%0), %%r14 \n\t"
+               "mov %c[r15](%0), %%r15 \n\t"
+               "mov %c[rcx](%0), %%rcx \n\t" /* kills %0 (rcx) */
 #else
-               "mov %c[cr2](%3), %%eax \n\t"
+               "mov %c[cr2](%0), %%eax \n\t"
                "mov %%eax,   %%cr2 \n\t"
-               "mov %c[rax](%3), %%eax \n\t"
-               "mov %c[rbx](%3), %%ebx \n\t"
-               "mov %c[rdx](%3), %%edx \n\t"
-               "mov %c[rsi](%3), %%esi \n\t"
-               "mov %c[rdi](%3), %%edi \n\t"
-               "mov %c[rbp](%3), %%ebp \n\t"
-               "mov %c[rcx](%3), %%ecx \n\t" /* kills %3 (ecx) */
+               "mov %c[rax](%0), %%eax \n\t"
+               "mov %c[rbx](%0), %%ebx \n\t"
+               "mov %c[rdx](%0), %%edx \n\t"
+               "mov %c[rsi](%0), %%esi \n\t"
+               "mov %c[rdi](%0), %%edi \n\t"
+               "mov %c[rbp](%0), %%ebp \n\t"
+               "mov %c[rcx](%0), %%ecx \n\t" /* kills %0 (ecx) */
 #endif
                /* Enter guest mode */
                "jne .Llaunched \n\t"
@@ -2291,72 +2389,79 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
                ".Lkvm_vmx_return: "
                /* Save guest registers, load host registers, keep flags */
 #ifdef CONFIG_X86_64
-               "xchg %3,     (%%rsp) \n\t"
-               "mov %%rax, %c[rax](%3) \n\t"
-               "mov %%rbx, %c[rbx](%3) \n\t"
-               "pushq (%%rsp); popq %c[rcx](%3) \n\t"
-               "mov %%rdx, %c[rdx](%3) \n\t"
-               "mov %%rsi, %c[rsi](%3) \n\t"
-               "mov %%rdi, %c[rdi](%3) \n\t"
-               "mov %%rbp, %c[rbp](%3) \n\t"
-               "mov %%r8,  %c[r8](%3) \n\t"
-               "mov %%r9,  %c[r9](%3) \n\t"
-               "mov %%r10, %c[r10](%3) \n\t"
-               "mov %%r11, %c[r11](%3) \n\t"
-               "mov %%r12, %c[r12](%3) \n\t"
-               "mov %%r13, %c[r13](%3) \n\t"
-               "mov %%r14, %c[r14](%3) \n\t"
-               "mov %%r15, %c[r15](%3) \n\t"
+               "xchg %0,     (%%rsp) \n\t"
+               "mov %%rax, %c[rax](%0) \n\t"
+               "mov %%rbx, %c[rbx](%0) \n\t"
+               "pushq (%%rsp); popq %c[rcx](%0) \n\t"
+               "mov %%rdx, %c[rdx](%0) \n\t"
+               "mov %%rsi, %c[rsi](%0) \n\t"
+               "mov %%rdi, %c[rdi](%0) \n\t"
+               "mov %%rbp, %c[rbp](%0) \n\t"
+               "mov %%r8,  %c[r8](%0) \n\t"
+               "mov %%r9,  %c[r9](%0) \n\t"
+               "mov %%r10, %c[r10](%0) \n\t"
+               "mov %%r11, %c[r11](%0) \n\t"
+               "mov %%r12, %c[r12](%0) \n\t"
+               "mov %%r13, %c[r13](%0) \n\t"
+               "mov %%r14, %c[r14](%0) \n\t"
+               "mov %%r15, %c[r15](%0) \n\t"
                "mov %%cr2, %%rax   \n\t"
-               "mov %%rax, %c[cr2](%3) \n\t"
-               "mov (%%rsp), %3 \n\t"
+               "mov %%rax, %c[cr2](%0) \n\t"
 
-               "pop  %%rcx; pop  %%r15; pop  %%r14; pop  %%r13; pop  %%r12;"
-               "pop  %%r11; pop  %%r10; pop  %%r9;  pop  %%r8;"
-               "pop  %%rbp; pop  %%rdi; pop  %%rsi;"
-               "pop  %%rdx; pop  %%rbx; pop  %%rax \n\t"
+               "pop  %%rbp; pop  %%rbp; pop  %%rdx \n\t"
 #else
-               "xchg %3, (%%esp) \n\t"
-               "mov %%eax, %c[rax](%3) \n\t"
-               "mov %%ebx, %c[rbx](%3) \n\t"
-               "pushl (%%esp); popl %c[rcx](%3) \n\t"
-               "mov %%edx, %c[rdx](%3) \n\t"
-               "mov %%esi, %c[rsi](%3) \n\t"
-               "mov %%edi, %c[rdi](%3) \n\t"
-               "mov %%ebp, %c[rbp](%3) \n\t"
+               "xchg %0, (%%esp) \n\t"
+               "mov %%eax, %c[rax](%0) \n\t"
+               "mov %%ebx, %c[rbx](%0) \n\t"
+               "pushl (%%esp); popl %c[rcx](%0) \n\t"
+               "mov %%edx, %c[rdx](%0) \n\t"
+               "mov %%esi, %c[rsi](%0) \n\t"
+               "mov %%edi, %c[rdi](%0) \n\t"
+               "mov %%ebp, %c[rbp](%0) \n\t"
                "mov %%cr2, %%eax  \n\t"
-               "mov %%eax, %c[cr2](%3) \n\t"
-               "mov (%%esp), %3 \n\t"
+               "mov %%eax, %c[cr2](%0) \n\t"
 
-               "pop %%ecx; popa \n\t"
+               "pop %%ebp; pop %%ebp; pop %%edx \n\t"
 #endif
-               "setbe %0 \n\t"
-             : "=q" (vmx->fail)
-             : "r"(vmx->launched), "d"((unsigned long)HOST_RSP),
-               "c"(vcpu),
-               [rax]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RAX])),
-               [rbx]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RBX])),
-               [rcx]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RCX])),
-               [rdx]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RDX])),
-               [rsi]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RSI])),
-               [rdi]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RDI])),
-               [rbp]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RBP])),
+               "setbe %c[fail](%0) \n\t"
+             : : "c"(vmx), "d"((unsigned long)HOST_RSP),
+               [launched]"i"(offsetof(struct vcpu_vmx, launched)),
+               [fail]"i"(offsetof(struct vcpu_vmx, fail)),
+               [rax]"i"(offsetof(struct vcpu_vmx, vcpu.regs[VCPU_REGS_RAX])),
+               [rbx]"i"(offsetof(struct vcpu_vmx, vcpu.regs[VCPU_REGS_RBX])),
+               [rcx]"i"(offsetof(struct vcpu_vmx, vcpu.regs[VCPU_REGS_RCX])),
+               [rdx]"i"(offsetof(struct vcpu_vmx, vcpu.regs[VCPU_REGS_RDX])),
+               [rsi]"i"(offsetof(struct vcpu_vmx, vcpu.regs[VCPU_REGS_RSI])),
+               [rdi]"i"(offsetof(struct vcpu_vmx, vcpu.regs[VCPU_REGS_RDI])),
+               [rbp]"i"(offsetof(struct vcpu_vmx, vcpu.regs[VCPU_REGS_RBP])),
 #ifdef CONFIG_X86_64
-               [r8 ]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R8 ])),
-               [r9 ]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R9 ])),
-               [r10]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R10])),
-               [r11]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R11])),
-               [r12]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R12])),
-               [r13]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R13])),
-               [r14]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R14])),
-               [r15]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R15])),
+               [r8]"i"(offsetof(struct vcpu_vmx, vcpu.regs[VCPU_REGS_R8])),
+               [r9]"i"(offsetof(struct vcpu_vmx, vcpu.regs[VCPU_REGS_R9])),
+               [r10]"i"(offsetof(struct vcpu_vmx, vcpu.regs[VCPU_REGS_R10])),
+               [r11]"i"(offsetof(struct vcpu_vmx, vcpu.regs[VCPU_REGS_R11])),
+               [r12]"i"(offsetof(struct vcpu_vmx, vcpu.regs[VCPU_REGS_R12])),
+               [r13]"i"(offsetof(struct vcpu_vmx, vcpu.regs[VCPU_REGS_R13])),
+               [r14]"i"(offsetof(struct vcpu_vmx, vcpu.regs[VCPU_REGS_R14])),
+               [r15]"i"(offsetof(struct vcpu_vmx, vcpu.regs[VCPU_REGS_R15])),
 #endif
-               [cr2]"i"(offsetof(struct kvm_vcpu, cr2))
-             : "cc", "memory" );
+               [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.cr2))
+             : "cc", "memory"
+#ifdef CONFIG_X86_64
+               , "rbx", "rdi", "rsi"
+               , "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
+#else
+               , "ebx", "edi", "rsi"
+#endif
+             );
 
-       vcpu->interrupt_window_open = (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 3) == 0;
+       vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
+       if (vmx->rmode.irq.pending)
+               fixup_rmode_irq(vmx);
+
+       vcpu->interrupt_window_open =
+               (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 3) == 0;
 
-       asm ("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
+       asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
        vmx->launched = 1;
 
        intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
@@ -2370,7 +2475,8 @@ static void vmx_inject_page_fault(struct kvm_vcpu *vcpu,
                                  unsigned long addr,
                                  u32 err_code)
 {
-       u32 vect_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
+       struct vcpu_vmx *vmx = to_vmx(vcpu);
+       u32 vect_info = vmx->idt_vectoring_info;
 
        ++vcpu->stat.pf_guest;
 
@@ -2431,12 +2537,6 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
        if (err)
                goto free_vcpu;
 
-       if (irqchip_in_kernel(kvm)) {
-               err = kvm_create_lapic(&vmx->vcpu);
-               if (err < 0)
-                       goto free_vcpu;
-       }
-
        vmx->guest_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL);
        if (!vmx->guest_msrs) {
                err = -ENOMEM;
@@ -2545,6 +2645,8 @@ static struct kvm_x86_ops vmx_x86_ops = {
        .set_irq = vmx_inject_irq,
        .inject_pending_irq = vmx_intr_assist,
        .inject_pending_vectors = do_interrupt_requests,
+
+       .set_tss_addr = vmx_set_tss_addr,
 };
 
 static int __init vmx_init(void)
@@ -2575,7 +2677,7 @@ static int __init vmx_init(void)
        memset(iova, 0xff, PAGE_SIZE);
        kunmap(vmx_io_bitmap_b);
 
-       r = kvm_init_x86(&vmx_x86_ops, sizeof(struct vcpu_vmx), THIS_MODULE);
+       r = kvm_init(&vmx_x86_ops, sizeof(struct vcpu_vmx), THIS_MODULE);
        if (r)
                goto out1;
 
@@ -2596,7 +2698,7 @@ static void __exit vmx_exit(void)
        __free_page(vmx_io_bitmap_b);
        __free_page(vmx_io_bitmap_a);
 
-       kvm_exit_x86();
+       kvm_exit();
 }
 
 module_init(vmx_init)