KVM: Make unloading of FPU state when putting vcpu arch-independent
[pandora-kernel.git] / drivers / kvm / vmx.c
index f045f40..0c082fa 100644 (file)
@@ -65,7 +65,13 @@ struct vcpu_vmx {
                int           fs_reload_needed;
                int           guest_efer_loaded;
        } host_state;
-
+       struct {
+               struct {
+                       bool pending;
+                       u8 vector;
+                       unsigned rip;
+               } irq;
+       } rmode;
 };
 
 static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu)
@@ -457,6 +463,7 @@ static void vmx_load_host_state(struct vcpu_vmx *vmx)
        if (!vmx->host_state.loaded)
                return;
 
+       ++vmx->vcpu.stat.host_state_reload;
        vmx->host_state.loaded = 0;
        if (vmx->host_state.fs_reload_needed)
                load_fs(vmx->host_state.fs_sel);
@@ -534,7 +541,6 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 static void vmx_vcpu_put(struct kvm_vcpu *vcpu)
 {
        vmx_load_host_state(to_vmx(vcpu));
-       kvm_put_guest_fpu(vcpu);
 }
 
 static void vmx_fpu_activate(struct kvm_vcpu *vcpu)
@@ -974,7 +980,8 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
 #endif
        if (_cpu_based_exec_control & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) {
                min = 0;
-               opt = SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
+               opt = SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
+                       SECONDARY_EXEC_WBINVD_EXITING;
                if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PROCBASED_CTLS2,
                                        &_cpu_based_2nd_exec_control) < 0)
                        return -EIO;
@@ -1713,11 +1720,16 @@ out:
 
 static void vmx_inject_irq(struct kvm_vcpu *vcpu, int irq)
 {
+       struct vcpu_vmx *vmx = to_vmx(vcpu);
+
        if (vcpu->rmode.active) {
+               vmx->rmode.irq.pending = true;
+               vmx->rmode.irq.vector = irq;
+               vmx->rmode.irq.rip = vmcs_readl(GUEST_RIP);
                vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
                             irq | INTR_TYPE_SOFT_INTR | INTR_INFO_VALID_MASK);
                vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, 1);
-               vmcs_writel(GUEST_RIP, vmcs_readl(GUEST_RIP) - 1);
+               vmcs_writel(GUEST_RIP, vmx->rmode.irq.rip - 1);
                return;
        }
        vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
@@ -2122,6 +2134,13 @@ static int handle_vmcall(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
        return 1;
 }
 
+static int handle_wbinvd(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+{
+       skip_emulated_instruction(vcpu);
+       /* TODO: Add support for VT-d/pass-through device */
+       return 1;
+}
+
 static int handle_apic_access(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
 {
        u64 exit_qualification;
@@ -2163,6 +2182,7 @@ static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu,
        [EXIT_REASON_VMCALL]                  = handle_vmcall,
        [EXIT_REASON_TPR_BELOW_THRESHOLD]     = handle_tpr_below_threshold,
        [EXIT_REASON_APIC_ACCESS]             = handle_apic_access,
+       [EXIT_REASON_WBINVD]                  = handle_wbinvd,
 };
 
 static const int kvm_vmx_max_exit_handlers =
@@ -2251,6 +2271,17 @@ static void vmx_intr_assist(struct kvm_vcpu *vcpu)
                return;
        }
        if (unlikely(idtv_info_field & INTR_INFO_VALID_MASK)) {
+               if ((idtv_info_field & VECTORING_INFO_TYPE_MASK)
+                   == INTR_TYPE_EXT_INTR
+                   && vcpu->rmode.active) {
+                       u8 vect = idtv_info_field & VECTORING_INFO_VECTOR_MASK;
+
+                       vmx_inject_irq(vcpu, vect);
+                       if (unlikely(has_ext_irq))
+                               enable_irq_window(vcpu);
+                       return;
+               }
+
                vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, idtv_info_field);
                vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
                                vmcs_read32(VM_EXIT_INSTRUCTION_LEN));
@@ -2275,6 +2306,29 @@ static void vmx_intr_assist(struct kvm_vcpu *vcpu)
                enable_irq_window(vcpu);
 }
 
+/*
+ * Failure to inject an interrupt should give us the information
+ * in IDT_VECTORING_INFO_FIELD.  However, if the failure occurs
+ * when fetching the interrupt redirection bitmap in the real-mode
+ * tss, this doesn't happen.  So we do it ourselves.
+ */
+static void fixup_rmode_irq(struct vcpu_vmx *vmx)
+{
+       vmx->rmode.irq.pending = 0;
+       if (vmcs_readl(GUEST_RIP) + 1 != vmx->rmode.irq.rip)
+               return;
+       vmcs_writel(GUEST_RIP, vmx->rmode.irq.rip);
+       if (vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK) {
+               vmx->idt_vectoring_info &= ~VECTORING_INFO_TYPE_MASK;
+               vmx->idt_vectoring_info |= INTR_TYPE_EXT_INTR;
+               return;
+       }
+       vmx->idt_vectoring_info =
+               VECTORING_INFO_VALID_MASK
+               | INTR_TYPE_EXT_INTR
+               | vmx->rmode.irq.vector;
+}
+
 static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
@@ -2296,36 +2350,36 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
 #endif
                ASM_VMX_VMWRITE_RSP_RDX "\n\t"
                /* Check if vmlaunch of vmresume is needed */
-               "cmp $0, %1 \n\t"
+               "cmpl $0, %c[launched](%0) \n\t"
                /* Load guest registers.  Don't clobber flags. */
 #ifdef CONFIG_X86_64
-               "mov %c[cr2](%3), %%rax \n\t"
+               "mov %c[cr2](%0), %%rax \n\t"
                "mov %%rax, %%cr2 \n\t"
-               "mov %c[rax](%3), %%rax \n\t"
-               "mov %c[rbx](%3), %%rbx \n\t"
-               "mov %c[rdx](%3), %%rdx \n\t"
-               "mov %c[rsi](%3), %%rsi \n\t"
-               "mov %c[rdi](%3), %%rdi \n\t"
-               "mov %c[rbp](%3), %%rbp \n\t"
-               "mov %c[r8](%3),  %%r8  \n\t"
-               "mov %c[r9](%3),  %%r9  \n\t"
-               "mov %c[r10](%3), %%r10 \n\t"
-               "mov %c[r11](%3), %%r11 \n\t"
-               "mov %c[r12](%3), %%r12 \n\t"
-               "mov %c[r13](%3), %%r13 \n\t"
-               "mov %c[r14](%3), %%r14 \n\t"
-               "mov %c[r15](%3), %%r15 \n\t"
-               "mov %c[rcx](%3), %%rcx \n\t" /* kills %3 (rcx) */
+               "mov %c[rax](%0), %%rax \n\t"
+               "mov %c[rbx](%0), %%rbx \n\t"
+               "mov %c[rdx](%0), %%rdx \n\t"
+               "mov %c[rsi](%0), %%rsi \n\t"
+               "mov %c[rdi](%0), %%rdi \n\t"
+               "mov %c[rbp](%0), %%rbp \n\t"
+               "mov %c[r8](%0),  %%r8  \n\t"
+               "mov %c[r9](%0),  %%r9  \n\t"
+               "mov %c[r10](%0), %%r10 \n\t"
+               "mov %c[r11](%0), %%r11 \n\t"
+               "mov %c[r12](%0), %%r12 \n\t"
+               "mov %c[r13](%0), %%r13 \n\t"
+               "mov %c[r14](%0), %%r14 \n\t"
+               "mov %c[r15](%0), %%r15 \n\t"
+               "mov %c[rcx](%0), %%rcx \n\t" /* kills %0 (rcx) */
 #else
-               "mov %c[cr2](%3), %%eax \n\t"
+               "mov %c[cr2](%0), %%eax \n\t"
                "mov %%eax,   %%cr2 \n\t"
-               "mov %c[rax](%3), %%eax \n\t"
-               "mov %c[rbx](%3), %%ebx \n\t"
-               "mov %c[rdx](%3), %%edx \n\t"
-               "mov %c[rsi](%3), %%esi \n\t"
-               "mov %c[rdi](%3), %%edi \n\t"
-               "mov %c[rbp](%3), %%ebp \n\t"
-               "mov %c[rcx](%3), %%ecx \n\t" /* kills %3 (ecx) */
+               "mov %c[rax](%0), %%eax \n\t"
+               "mov %c[rbx](%0), %%ebx \n\t"
+               "mov %c[rdx](%0), %%edx \n\t"
+               "mov %c[rsi](%0), %%esi \n\t"
+               "mov %c[rdi](%0), %%edi \n\t"
+               "mov %c[rbp](%0), %%ebp \n\t"
+               "mov %c[rcx](%0), %%ecx \n\t" /* kills %0 (ecx) */
 #endif
                /* Enter guest mode */
                "jne .Llaunched \n\t"
@@ -2335,62 +2389,62 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
                ".Lkvm_vmx_return: "
                /* Save guest registers, load host registers, keep flags */
 #ifdef CONFIG_X86_64
-               "xchg %3,     (%%rsp) \n\t"
-               "mov %%rax, %c[rax](%3) \n\t"
-               "mov %%rbx, %c[rbx](%3) \n\t"
-               "pushq (%%rsp); popq %c[rcx](%3) \n\t"
-               "mov %%rdx, %c[rdx](%3) \n\t"
-               "mov %%rsi, %c[rsi](%3) \n\t"
-               "mov %%rdi, %c[rdi](%3) \n\t"
-               "mov %%rbp, %c[rbp](%3) \n\t"
-               "mov %%r8,  %c[r8](%3) \n\t"
-               "mov %%r9,  %c[r9](%3) \n\t"
-               "mov %%r10, %c[r10](%3) \n\t"
-               "mov %%r11, %c[r11](%3) \n\t"
-               "mov %%r12, %c[r12](%3) \n\t"
-               "mov %%r13, %c[r13](%3) \n\t"
-               "mov %%r14, %c[r14](%3) \n\t"
-               "mov %%r15, %c[r15](%3) \n\t"
+               "xchg %0,     (%%rsp) \n\t"
+               "mov %%rax, %c[rax](%0) \n\t"
+               "mov %%rbx, %c[rbx](%0) \n\t"
+               "pushq (%%rsp); popq %c[rcx](%0) \n\t"
+               "mov %%rdx, %c[rdx](%0) \n\t"
+               "mov %%rsi, %c[rsi](%0) \n\t"
+               "mov %%rdi, %c[rdi](%0) \n\t"
+               "mov %%rbp, %c[rbp](%0) \n\t"
+               "mov %%r8,  %c[r8](%0) \n\t"
+               "mov %%r9,  %c[r9](%0) \n\t"
+               "mov %%r10, %c[r10](%0) \n\t"
+               "mov %%r11, %c[r11](%0) \n\t"
+               "mov %%r12, %c[r12](%0) \n\t"
+               "mov %%r13, %c[r13](%0) \n\t"
+               "mov %%r14, %c[r14](%0) \n\t"
+               "mov %%r15, %c[r15](%0) \n\t"
                "mov %%cr2, %%rax   \n\t"
-               "mov %%rax, %c[cr2](%3) \n\t"
+               "mov %%rax, %c[cr2](%0) \n\t"
 
-               "pop  %%rcx; pop  %%rbp; pop  %%rdx \n\t"
+               "pop  %%rbp; pop  %%rbp; pop  %%rdx \n\t"
 #else
-               "xchg %3, (%%esp) \n\t"
-               "mov %%eax, %c[rax](%3) \n\t"
-               "mov %%ebx, %c[rbx](%3) \n\t"
-               "pushl (%%esp); popl %c[rcx](%3) \n\t"
-               "mov %%edx, %c[rdx](%3) \n\t"
-               "mov %%esi, %c[rsi](%3) \n\t"
-               "mov %%edi, %c[rdi](%3) \n\t"
-               "mov %%ebp, %c[rbp](%3) \n\t"
+               "xchg %0, (%%esp) \n\t"
+               "mov %%eax, %c[rax](%0) \n\t"
+               "mov %%ebx, %c[rbx](%0) \n\t"
+               "pushl (%%esp); popl %c[rcx](%0) \n\t"
+               "mov %%edx, %c[rdx](%0) \n\t"
+               "mov %%esi, %c[rsi](%0) \n\t"
+               "mov %%edi, %c[rdi](%0) \n\t"
+               "mov %%ebp, %c[rbp](%0) \n\t"
                "mov %%cr2, %%eax  \n\t"
-               "mov %%eax, %c[cr2](%3) \n\t"
+               "mov %%eax, %c[cr2](%0) \n\t"
 
-               "pop %%ecx; pop %%ebp; pop %%edx \n\t"
+               "pop %%ebp; pop %%ebp; pop %%edx \n\t"
 #endif
-               "setbe %0 \n\t"
-             : "=q" (vmx->fail)
-             : "r"(vmx->launched), "d"((unsigned long)HOST_RSP),
-               "c"(vcpu),
-               [rax]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RAX])),
-               [rbx]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RBX])),
-               [rcx]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RCX])),
-               [rdx]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RDX])),
-               [rsi]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RSI])),
-               [rdi]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RDI])),
-               [rbp]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RBP])),
+               "setbe %c[fail](%0) \n\t"
+             : : "c"(vmx), "d"((unsigned long)HOST_RSP),
+               [launched]"i"(offsetof(struct vcpu_vmx, launched)),
+               [fail]"i"(offsetof(struct vcpu_vmx, fail)),
+               [rax]"i"(offsetof(struct vcpu_vmx, vcpu.regs[VCPU_REGS_RAX])),
+               [rbx]"i"(offsetof(struct vcpu_vmx, vcpu.regs[VCPU_REGS_RBX])),
+               [rcx]"i"(offsetof(struct vcpu_vmx, vcpu.regs[VCPU_REGS_RCX])),
+               [rdx]"i"(offsetof(struct vcpu_vmx, vcpu.regs[VCPU_REGS_RDX])),
+               [rsi]"i"(offsetof(struct vcpu_vmx, vcpu.regs[VCPU_REGS_RSI])),
+               [rdi]"i"(offsetof(struct vcpu_vmx, vcpu.regs[VCPU_REGS_RDI])),
+               [rbp]"i"(offsetof(struct vcpu_vmx, vcpu.regs[VCPU_REGS_RBP])),
 #ifdef CONFIG_X86_64
-               [r8]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R8])),
-               [r9]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R9])),
-               [r10]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R10])),
-               [r11]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R11])),
-               [r12]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R12])),
-               [r13]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R13])),
-               [r14]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R14])),
-               [r15]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R15])),
+               [r8]"i"(offsetof(struct vcpu_vmx, vcpu.regs[VCPU_REGS_R8])),
+               [r9]"i"(offsetof(struct vcpu_vmx, vcpu.regs[VCPU_REGS_R9])),
+               [r10]"i"(offsetof(struct vcpu_vmx, vcpu.regs[VCPU_REGS_R10])),
+               [r11]"i"(offsetof(struct vcpu_vmx, vcpu.regs[VCPU_REGS_R11])),
+               [r12]"i"(offsetof(struct vcpu_vmx, vcpu.regs[VCPU_REGS_R12])),
+               [r13]"i"(offsetof(struct vcpu_vmx, vcpu.regs[VCPU_REGS_R13])),
+               [r14]"i"(offsetof(struct vcpu_vmx, vcpu.regs[VCPU_REGS_R14])),
+               [r15]"i"(offsetof(struct vcpu_vmx, vcpu.regs[VCPU_REGS_R15])),
 #endif
-               [cr2]"i"(offsetof(struct kvm_vcpu, cr2))
+               [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.cr2))
              : "cc", "memory"
 #ifdef CONFIG_X86_64
                , "rbx", "rdi", "rsi"
@@ -2401,6 +2455,8 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
              );
 
        vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
+       if (vmx->rmode.irq.pending)
+               fixup_rmode_irq(vmx);
 
        vcpu->interrupt_window_open =
                (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 3) == 0;
@@ -2621,7 +2677,7 @@ static int __init vmx_init(void)
        memset(iova, 0xff, PAGE_SIZE);
        kunmap(vmx_io_bitmap_b);
 
-       r = kvm_init_x86(&vmx_x86_ops, sizeof(struct vcpu_vmx), THIS_MODULE);
+       r = kvm_init(&vmx_x86_ops, sizeof(struct vcpu_vmx), THIS_MODULE);
        if (r)
                goto out1;
 
@@ -2642,7 +2698,7 @@ static void __exit vmx_exit(void)
        __free_page(vmx_io_bitmap_b);
        __free_page(vmx_io_bitmap_a);
 
-       kvm_exit_x86();
+       kvm_exit();
 }
 
 module_init(vmx_init)