KVM: x86: Push potential exception error code on task switches
[pandora-kernel.git] / arch / x86 / kvm / vmx.c
index 14873b9..1b38d8a 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/sched.h>
 #include <linux/moduleparam.h>
 #include <linux/ftrace_event.h>
+#include <linux/slab.h>
 #include "kvm_cache_regs.h"
 #include "x86.h"
 
@@ -76,6 +77,8 @@ module_param(emulate_invalid_guest_state, bool, S_IRUGO);
 #define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE)
 #define KVM_RMODE_VM_CR4_ALWAYS_ON (X86_CR4_VME | X86_CR4_PAE | X86_CR4_VMXE)
 
+#define RMODE_GUEST_OWNED_EFLAGS_BITS (~(X86_EFLAGS_IOPL | X86_EFLAGS_VM))
+
 /*
  * These 2 parameters are used to config the controls for Pause-Loop Exiting:
  * ple_gap:    upper bound on the amount of time between two successive
@@ -130,7 +133,7 @@ struct vcpu_vmx {
        } host_state;
        struct {
                int vm86_active;
-               u8 save_iopl;
+               ulong save_rflags;
                struct kvm_save_segment {
                        u16 selector;
                        unsigned long base;
@@ -231,56 +234,56 @@ static const u32 vmx_msr_index[] = {
 };
 #define NR_VMX_MSR ARRAY_SIZE(vmx_msr_index)
 
-static inline int is_page_fault(u32 intr_info)
+static inline bool is_page_fault(u32 intr_info)
 {
        return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
                             INTR_INFO_VALID_MASK)) ==
                (INTR_TYPE_HARD_EXCEPTION | PF_VECTOR | INTR_INFO_VALID_MASK);
 }
 
-static inline int is_no_device(u32 intr_info)
+static inline bool is_no_device(u32 intr_info)
 {
        return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
                             INTR_INFO_VALID_MASK)) ==
                (INTR_TYPE_HARD_EXCEPTION | NM_VECTOR | INTR_INFO_VALID_MASK);
 }
 
-static inline int is_invalid_opcode(u32 intr_info)
+static inline bool is_invalid_opcode(u32 intr_info)
 {
        return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
                             INTR_INFO_VALID_MASK)) ==
                (INTR_TYPE_HARD_EXCEPTION | UD_VECTOR | INTR_INFO_VALID_MASK);
 }
 
-static inline int is_external_interrupt(u32 intr_info)
+static inline bool is_external_interrupt(u32 intr_info)
 {
        return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK))
                == (INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK);
 }
 
-static inline int is_machine_check(u32 intr_info)
+static inline bool is_machine_check(u32 intr_info)
 {
        return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
                             INTR_INFO_VALID_MASK)) ==
                (INTR_TYPE_HARD_EXCEPTION | MC_VECTOR | INTR_INFO_VALID_MASK);
 }
 
-static inline int cpu_has_vmx_msr_bitmap(void)
+static inline bool cpu_has_vmx_msr_bitmap(void)
 {
        return vmcs_config.cpu_based_exec_ctrl & CPU_BASED_USE_MSR_BITMAPS;
 }
 
-static inline int cpu_has_vmx_tpr_shadow(void)
+static inline bool cpu_has_vmx_tpr_shadow(void)
 {
        return vmcs_config.cpu_based_exec_ctrl & CPU_BASED_TPR_SHADOW;
 }
 
-static inline int vm_need_tpr_shadow(struct kvm *kvm)
+static inline bool vm_need_tpr_shadow(struct kvm *kvm)
 {
        return (cpu_has_vmx_tpr_shadow()) && (irqchip_in_kernel(kvm));
 }
 
-static inline int cpu_has_secondary_exec_ctrls(void)
+static inline bool cpu_has_secondary_exec_ctrls(void)
 {
        return vmcs_config.cpu_based_exec_ctrl &
                CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
@@ -300,80 +303,80 @@ static inline bool cpu_has_vmx_flexpriority(void)
 
 static inline bool cpu_has_vmx_ept_execute_only(void)
 {
-       return !!(vmx_capability.ept & VMX_EPT_EXECUTE_ONLY_BIT);
+       return vmx_capability.ept & VMX_EPT_EXECUTE_ONLY_BIT;
 }
 
 static inline bool cpu_has_vmx_eptp_uncacheable(void)
 {
-       return !!(vmx_capability.ept & VMX_EPTP_UC_BIT);
+       return vmx_capability.ept & VMX_EPTP_UC_BIT;
 }
 
 static inline bool cpu_has_vmx_eptp_writeback(void)
 {
-       return !!(vmx_capability.ept & VMX_EPTP_WB_BIT);
+       return vmx_capability.ept & VMX_EPTP_WB_BIT;
 }
 
 static inline bool cpu_has_vmx_ept_2m_page(void)
 {
-       return !!(vmx_capability.ept & VMX_EPT_2MB_PAGE_BIT);
+       return vmx_capability.ept & VMX_EPT_2MB_PAGE_BIT;
 }
 
 static inline bool cpu_has_vmx_ept_1g_page(void)
 {
-       return !!(vmx_capability.ept & VMX_EPT_1GB_PAGE_BIT);
+       return vmx_capability.ept & VMX_EPT_1GB_PAGE_BIT;
 }
 
-static inline int cpu_has_vmx_invept_individual_addr(void)
+static inline bool cpu_has_vmx_invept_individual_addr(void)
 {
-       return !!(vmx_capability.ept & VMX_EPT_EXTENT_INDIVIDUAL_BIT);
+       return vmx_capability.ept & VMX_EPT_EXTENT_INDIVIDUAL_BIT;
 }
 
-static inline int cpu_has_vmx_invept_context(void)
+static inline bool cpu_has_vmx_invept_context(void)
 {
-       return !!(vmx_capability.ept & VMX_EPT_EXTENT_CONTEXT_BIT);
+       return vmx_capability.ept & VMX_EPT_EXTENT_CONTEXT_BIT;
 }
 
-static inline int cpu_has_vmx_invept_global(void)
+static inline bool cpu_has_vmx_invept_global(void)
 {
-       return !!(vmx_capability.ept & VMX_EPT_EXTENT_GLOBAL_BIT);
+       return vmx_capability.ept & VMX_EPT_EXTENT_GLOBAL_BIT;
 }
 
-static inline int cpu_has_vmx_ept(void)
+static inline bool cpu_has_vmx_ept(void)
 {
        return vmcs_config.cpu_based_2nd_exec_ctrl &
                SECONDARY_EXEC_ENABLE_EPT;
 }
 
-static inline int cpu_has_vmx_unrestricted_guest(void)
+static inline bool cpu_has_vmx_unrestricted_guest(void)
 {
        return vmcs_config.cpu_based_2nd_exec_ctrl &
                SECONDARY_EXEC_UNRESTRICTED_GUEST;
 }
 
-static inline int cpu_has_vmx_ple(void)
+static inline bool cpu_has_vmx_ple(void)
 {
        return vmcs_config.cpu_based_2nd_exec_ctrl &
                SECONDARY_EXEC_PAUSE_LOOP_EXITING;
 }
 
-static inline int vm_need_virtualize_apic_accesses(struct kvm *kvm)
+static inline bool vm_need_virtualize_apic_accesses(struct kvm *kvm)
 {
        return flexpriority_enabled && irqchip_in_kernel(kvm);
 }
 
-static inline int cpu_has_vmx_vpid(void)
+static inline bool cpu_has_vmx_vpid(void)
 {
        return vmcs_config.cpu_based_2nd_exec_ctrl &
                SECONDARY_EXEC_ENABLE_VPID;
 }
 
-static inline int cpu_has_vmx_rdtscp(void)
+static inline bool cpu_has_vmx_rdtscp(void)
 {
        return vmcs_config.cpu_based_2nd_exec_ctrl &
                SECONDARY_EXEC_RDTSCP;
 }
 
-static inline int cpu_has_virtual_nmis(void)
+static inline bool cpu_has_virtual_nmis(void)
 {
        return vmcs_config.pin_based_exec_ctrl & PIN_BASED_VIRTUAL_NMIS;
 }
@@ -597,11 +600,11 @@ static void reload_tss(void)
        /*
         * VT restores TR but not its size.  Useless.
         */
-       struct descriptor_table gdt;
+       struct desc_ptr gdt;
        struct desc_struct *descs;
 
-       kvm_get_gdt(&gdt);
-       descs = (void *)gdt.base;
+       native_store_gdt(&gdt);
+       descs = (void *)gdt.address;
        descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
        load_TR_desc();
 }
@@ -631,6 +634,43 @@ static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset)
        return true;
 }
 
+static unsigned long segment_base(u16 selector)
+{
+       struct desc_ptr gdt;
+       struct desc_struct *d;
+       unsigned long table_base;
+       unsigned long v;
+
+       if (!(selector & ~3))
+               return 0;
+
+       native_store_gdt(&gdt);
+       table_base = gdt.address;
+
+       if (selector & 4) {           /* from ldt */
+               u16 ldt_selector = kvm_read_ldt();
+
+               if (!(ldt_selector & ~3))
+                       return 0;
+
+               table_base = segment_base(ldt_selector);
+       }
+       d = (struct desc_struct *)(table_base + (selector & ~7));
+       v = get_desc_base(d);
+#ifdef CONFIG_X86_64
+       if (d->s == 0 && (d->type == 2 || d->type == 9 || d->type == 11))
+               v |= ((unsigned long)((struct ldttss_desc64 *)d)->base3) << 32;
+#endif
+       return v;
+}
+
+static inline unsigned long kvm_read_tr_base(void)
+{
+       u16 tr;
+       asm("str %0" : "=g"(tr));
+       return segment_base(tr);
+}
+
 static void vmx_save_host_state(struct kvm_vcpu *vcpu)
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
@@ -755,7 +795,7 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
        }
 
        if (vcpu->cpu != cpu) {
-               struct descriptor_table dt;
+               struct desc_ptr dt;
                unsigned long sysenter_esp;
 
                vcpu->cpu = cpu;
@@ -764,8 +804,8 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
                 * processors.
                 */
                vmcs_writel(HOST_TR_BASE, kvm_read_tr_base()); /* 22.2.4 */
-               kvm_get_gdt(&dt);
-               vmcs_writel(HOST_GDTR_BASE, dt.base);   /* 22.2.4 */
+               native_store_gdt(&dt);
+               vmcs_writel(HOST_GDTR_BASE, dt.address);   /* 22.2.4 */
 
                rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
                vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
@@ -817,18 +857,23 @@ static void vmx_fpu_deactivate(struct kvm_vcpu *vcpu)
 
 static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu)
 {
-       unsigned long rflags;
+       unsigned long rflags, save_rflags;
 
        rflags = vmcs_readl(GUEST_RFLAGS);
-       if (to_vmx(vcpu)->rmode.vm86_active)
-               rflags &= ~(unsigned long)(X86_EFLAGS_IOPL | X86_EFLAGS_VM);
+       if (to_vmx(vcpu)->rmode.vm86_active) {
+               rflags &= RMODE_GUEST_OWNED_EFLAGS_BITS;
+               save_rflags = to_vmx(vcpu)->rmode.save_rflags;
+               rflags |= save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS;
+       }
        return rflags;
 }
 
 static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
 {
-       if (to_vmx(vcpu)->rmode.vm86_active)
+       if (to_vmx(vcpu)->rmode.vm86_active) {
+               to_vmx(vcpu)->rmode.save_rflags = rflags;
                rflags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM;
+       }
        vmcs_writel(GUEST_RFLAGS, rflags);
 }
 
@@ -838,9 +883,9 @@ static u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
        int ret = 0;
 
        if (interruptibility & GUEST_INTR_STATE_STI)
-               ret |= X86_SHADOW_INT_STI;
+               ret |= KVM_X86_SHADOW_INT_STI;
        if (interruptibility & GUEST_INTR_STATE_MOV_SS)
-               ret |= X86_SHADOW_INT_MOV_SS;
+               ret |= KVM_X86_SHADOW_INT_MOV_SS;
 
        return ret & mask;
 }
@@ -852,9 +897,9 @@ static void vmx_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
 
        interruptibility &= ~(GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS);
 
-       if (mask & X86_SHADOW_INT_MOV_SS)
+       if (mask & KVM_X86_SHADOW_INT_MOV_SS)
                interruptibility |= GUEST_INTR_STATE_MOV_SS;
-       if (mask & X86_SHADOW_INT_STI)
+       else if (mask & KVM_X86_SHADOW_INT_STI)
                interruptibility |= GUEST_INTR_STATE_STI;
 
        if ((interruptibility != interruptibility_old))
@@ -1482,8 +1527,8 @@ static void enter_pmode(struct kvm_vcpu *vcpu)
        vmcs_write32(GUEST_TR_AR_BYTES, vmx->rmode.tr.ar);
 
        flags = vmcs_readl(GUEST_RFLAGS);
-       flags &= ~(X86_EFLAGS_IOPL | X86_EFLAGS_VM);
-       flags |= (vmx->rmode.save_iopl << IOPL_SHIFT);
+       flags &= RMODE_GUEST_OWNED_EFLAGS_BITS;
+       flags |= vmx->rmode.save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS;
        vmcs_writel(GUEST_RFLAGS, flags);
 
        vmcs_writel(GUEST_CR4, (vmcs_readl(GUEST_CR4) & ~X86_CR4_VME) |
@@ -1556,8 +1601,7 @@ static void enter_rmode(struct kvm_vcpu *vcpu)
        vmcs_write32(GUEST_TR_AR_BYTES, 0x008b);
 
        flags = vmcs_readl(GUEST_RFLAGS);
-       vmx->rmode.save_iopl
-               = (flags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
+       vmx->rmode.save_rflags = flags;
 
        flags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM;
 
@@ -1927,28 +1971,28 @@ static void vmx_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
        *l = (ar >> 13) & 1;
 }
 
-static void vmx_get_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
+static void vmx_get_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
 {
-       dt->limit = vmcs_read32(GUEST_IDTR_LIMIT);
-       dt->base = vmcs_readl(GUEST_IDTR_BASE);
+       dt->size = vmcs_read32(GUEST_IDTR_LIMIT);
+       dt->address = vmcs_readl(GUEST_IDTR_BASE);
 }
 
-static void vmx_set_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
+static void vmx_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
 {
-       vmcs_write32(GUEST_IDTR_LIMIT, dt->limit);
-       vmcs_writel(GUEST_IDTR_BASE, dt->base);
+       vmcs_write32(GUEST_IDTR_LIMIT, dt->size);
+       vmcs_writel(GUEST_IDTR_BASE, dt->address);
 }
 
-static void vmx_get_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
+static void vmx_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
 {
-       dt->limit = vmcs_read32(GUEST_GDTR_LIMIT);
-       dt->base = vmcs_readl(GUEST_GDTR_BASE);
+       dt->size = vmcs_read32(GUEST_GDTR_LIMIT);
+       dt->address = vmcs_readl(GUEST_GDTR_BASE);
 }
 
-static void vmx_set_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
+static void vmx_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
 {
-       vmcs_write32(GUEST_GDTR_LIMIT, dt->limit);
-       vmcs_writel(GUEST_GDTR_BASE, dt->base);
+       vmcs_write32(GUEST_GDTR_LIMIT, dt->size);
+       vmcs_writel(GUEST_GDTR_BASE, dt->address);
 }
 
 static bool rmode_segment_valid(struct kvm_vcpu *vcpu, int seg)
@@ -2327,7 +2371,7 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
        u32 junk;
        u64 host_pat, tsc_this, tsc_base;
        unsigned long a;
-       struct descriptor_table dt;
+       struct desc_ptr dt;
        int i;
        unsigned long kvm_vmx_return;
        u32 exec_control;
@@ -2408,8 +2452,8 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
 
        vmcs_write16(HOST_TR_SELECTOR, GDT_ENTRY_TSS*8);  /* 22.2.4 */
 
-       kvm_get_idt(&dt);
-       vmcs_writel(HOST_IDTR_BASE, dt.base);   /* 22.2.4 */
+       native_store_idt(&dt);
+       vmcs_writel(HOST_IDTR_BASE, dt.address);   /* 22.2.4 */
 
        asm("mov $.Lkvm_vmx_return, %0" : "=r"(kvm_vmx_return));
        vmcs_writel(HOST_RIP, kvm_vmx_return); /* 22.2.5 */
@@ -2941,22 +2985,20 @@ static int handle_io(struct kvm_vcpu *vcpu)
        int size, in, string;
        unsigned port;
 
-       ++vcpu->stat.io_exits;
        exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
        string = (exit_qualification & 16) != 0;
+       in = (exit_qualification & 8) != 0;
 
-       if (string) {
-               if (emulate_instruction(vcpu, 0, 0, 0) == EMULATE_DO_MMIO)
-                       return 0;
-               return 1;
-       }
+       ++vcpu->stat.io_exits;
 
-       size = (exit_qualification & 7) + 1;
-       in = (exit_qualification & 8) != 0;
-       port = exit_qualification >> 16;
+       if (string || in)
+               return !(emulate_instruction(vcpu, 0, 0, 0) == EMULATE_DO_MMIO);
 
+       port = exit_qualification >> 16;
+       size = (exit_qualification & 7) + 1;
        skip_emulated_instruction(vcpu);
-       return kvm_emulate_pio(vcpu, in, size, port);
+
+       return kvm_fast_pio_out(vcpu, size, port);
 }
 
 static void
@@ -3047,19 +3089,9 @@ static int handle_cr(struct kvm_vcpu *vcpu)
        return 0;
 }
 
-static int check_dr_alias(struct kvm_vcpu *vcpu)
-{
-       if (kvm_read_cr4_bits(vcpu, X86_CR4_DE)) {
-               kvm_queue_exception(vcpu, UD_VECTOR);
-               return -1;
-       }
-       return 0;
-}
-
 static int handle_dr(struct kvm_vcpu *vcpu)
 {
        unsigned long exit_qualification;
-       unsigned long val;
        int dr, reg;
 
        /* Do not handle if the CPL > 0, will trigger GP on re-entry */
@@ -3094,67 +3126,20 @@ static int handle_dr(struct kvm_vcpu *vcpu)
        dr = exit_qualification & DEBUG_REG_ACCESS_NUM;
        reg = DEBUG_REG_ACCESS_REG(exit_qualification);
        if (exit_qualification & TYPE_MOV_FROM_DR) {
-               switch (dr) {
-               case 0 ... 3:
-                       val = vcpu->arch.db[dr];
-                       break;
-               case 4:
-                       if (check_dr_alias(vcpu) < 0)
-                               return 1;
-                       /* fall through */
-               case 6:
-                       val = vcpu->arch.dr6;
-                       break;
-               case 5:
-                       if (check_dr_alias(vcpu) < 0)
-                               return 1;
-                       /* fall through */
-               default: /* 7 */
-                       val = vcpu->arch.dr7;
-                       break;
-               }
-               kvm_register_write(vcpu, reg, val);
-       } else {
-               val = vcpu->arch.regs[reg];
-               switch (dr) {
-               case 0 ... 3:
-                       vcpu->arch.db[dr] = val;
-                       if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP))
-                               vcpu->arch.eff_db[dr] = val;
-                       break;
-               case 4:
-                       if (check_dr_alias(vcpu) < 0)
-                               return 1;
-                       /* fall through */
-               case 6:
-                       if (val & 0xffffffff00000000ULL) {
-                               kvm_inject_gp(vcpu, 0);
-                               return 1;
-                       }
-                       vcpu->arch.dr6 = (val & DR6_VOLATILE) | DR6_FIXED_1;
-                       break;
-               case 5:
-                       if (check_dr_alias(vcpu) < 0)
-                               return 1;
-                       /* fall through */
-               default: /* 7 */
-                       if (val & 0xffffffff00000000ULL) {
-                               kvm_inject_gp(vcpu, 0);
-                               return 1;
-                       }
-                       vcpu->arch.dr7 = (val & DR7_VOLATILE) | DR7_FIXED_1;
-                       if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) {
-                               vmcs_writel(GUEST_DR7, vcpu->arch.dr7);
-                               vcpu->arch.switch_db_regs =
-                                       (val & DR7_BP_EN_MASK);
-                       }
-                       break;
-               }
-       }
+               unsigned long val;
+               if (!kvm_get_dr(vcpu, dr, &val))
+                       kvm_register_write(vcpu, reg, val);
+       } else
+               kvm_set_dr(vcpu, dr, vcpu->arch.regs[reg]);
        skip_emulated_instruction(vcpu);
        return 1;
 }
 
+static void vmx_set_dr7(struct kvm_vcpu *vcpu, unsigned long val)
+{
+       vmcs_writel(GUEST_DR7, val);
+}
+
 static int handle_cpuid(struct kvm_vcpu *vcpu)
 {
        kvm_emulate_cpuid(vcpu);
@@ -3286,6 +3271,8 @@ static int handle_task_switch(struct kvm_vcpu *vcpu)
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
        unsigned long exit_qualification;
+       bool has_error_code = false;
+       u32 error_code = 0;
        u16 tss_selector;
        int reason, type, idt_v;
 
@@ -3308,6 +3295,13 @@ static int handle_task_switch(struct kvm_vcpu *vcpu)
                        kvm_clear_interrupt_queue(vcpu);
                        break;
                case INTR_TYPE_HARD_EXCEPTION:
+                       if (vmx->idt_vectoring_info &
+                           VECTORING_INFO_DELIVER_CODE_MASK) {
+                               has_error_code = true;
+                               error_code =
+                                       vmcs_read32(IDT_VECTORING_ERROR_CODE);
+                       }
+                       /* fall through */
                case INTR_TYPE_SOFT_EXCEPTION:
                        kvm_clear_exception_queue(vcpu);
                        break;
@@ -3322,7 +3316,8 @@ static int handle_task_switch(struct kvm_vcpu *vcpu)
                       type != INTR_TYPE_NMI_INTR))
                skip_emulated_instruction(vcpu);
 
-       if (!kvm_task_switch(vcpu, tss_selector, reason))
+       if (!kvm_task_switch(vcpu, tss_selector, reason, has_error_code,
+                            error_code))
                return 0;
 
        /* clear all local breakpoint enable flags */
@@ -3568,7 +3563,7 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu)
        u32 exit_reason = vmx->exit_reason;
        u32 vectoring_info = vmx->idt_vectoring_info;
 
-       trace_kvm_exit(exit_reason, kvm_rip_read(vcpu));
+       trace_kvm_exit(exit_reason, vcpu);
 
        /* If guest state is invalid, start emulating */
        if (vmx->emulation_required && emulate_invalid_guest_state)
@@ -4145,6 +4140,7 @@ static struct kvm_x86_ops vmx_x86_ops = {
        .set_idt = vmx_set_idt,
        .get_gdt = vmx_get_gdt,
        .set_gdt = vmx_set_gdt,
+       .set_dr7 = vmx_set_dr7,
        .cache_reg = vmx_cache_reg,
        .get_rflags = vmx_get_rflags,
        .set_rflags = vmx_set_rflags,