Merge branch 'stable-3.2' into pandora-3.2
[pandora-kernel.git] / arch / x86 / kvm / vmx.c
index 0fb33a0..24227a8 100644 (file)
@@ -29,6 +29,7 @@
 #include <linux/ftrace_event.h>
 #include <linux/slab.h>
 #include <linux/tboot.h>
+#include <linux/nospec.h>
 #include "kvm_cache_regs.h"
 #include "x86.h"
 
@@ -40,6 +41,7 @@
 #include <asm/i387.h>
 #include <asm/xcr.h>
 #include <asm/perf_event.h>
+#include <asm/nospec-branch.h>
 
 #include "trace.h"
 
@@ -569,13 +571,21 @@ static unsigned short vmcs_field_to_offset_table[] = {
        FIELD(HOST_RSP, host_rsp),
        FIELD(HOST_RIP, host_rip),
 };
-static const int max_vmcs_field = ARRAY_SIZE(vmcs_field_to_offset_table);
 
 static inline short vmcs_field_to_offset(unsigned long field)
 {
-       if (field >= max_vmcs_field || vmcs_field_to_offset_table[field] == 0)
+       const size_t size = ARRAY_SIZE(vmcs_field_to_offset_table);
+       unsigned short offset;
+
+       BUILD_BUG_ON(size > SHRT_MAX);
+       if (field >= size)
+               return -1;
+
+       field = array_index_nospec(field, size);
+       offset = vmcs_field_to_offset_table[field];
+       if (offset == 0)
                return -1;
-       return vmcs_field_to_offset_table[field];
+       return offset;
 }
 
 static inline struct vmcs12 *get_vmcs12(struct kvm_vcpu *vcpu)
@@ -2204,6 +2214,8 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
                break;
        case MSR_IA32_CR_PAT:
                if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) {
+                       if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data))
+                               return 1;
                        vmcs_write64(GUEST_IA32_PAT, data);
                        vcpu->arch.pat = data;
                        break;
@@ -6118,14 +6130,6 @@ static void atomic_switch_perf_msrs(struct vcpu_vmx *vmx)
                                        msrs[i].host);
 }
 
-#ifdef CONFIG_X86_64
-#define R "r"
-#define Q "q"
-#else
-#define R "e"
-#define Q "l"
-#endif
-
 static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
@@ -6179,30 +6183,30 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
        vmx->__launched = vmx->loaded_vmcs->launched;
        asm(
                /* Store host registers */
-               "push %%"R"dx; push %%"R"bp;"
-               "push %%"R"cx \n\t" /* placeholder for guest rcx */
-               "push %%"R"cx \n\t"
-               "cmp %%"R"sp, %c[host_rsp](%0) \n\t"
+               "push %%" _ASM_DX "; push %%" _ASM_BP ";"
+               "push %%" _ASM_CX " \n\t" /* placeholder for guest rcx */
+               "push %%" _ASM_CX " \n\t"
+               "cmp %%" _ASM_SP ", %c[host_rsp](%0) \n\t"
                "je 1f \n\t"
-               "mov %%"R"sp, %c[host_rsp](%0) \n\t"
+               "mov %%" _ASM_SP ", %c[host_rsp](%0) \n\t"
                __ex(ASM_VMX_VMWRITE_RSP_RDX) "\n\t"
                "1: \n\t"
                /* Reload cr2 if changed */
-               "mov %c[cr2](%0), %%"R"ax \n\t"
-               "mov %%cr2, %%"R"dx \n\t"
-               "cmp %%"R"ax, %%"R"dx \n\t"
+               "mov %c[cr2](%0), %%" _ASM_AX " \n\t"
+               "mov %%cr2, %%" _ASM_DX " \n\t"
+               "cmp %%" _ASM_AX ", %%" _ASM_DX " \n\t"
                "je 2f \n\t"
-               "mov %%"R"ax, %%cr2 \n\t"
+               "mov %%" _ASM_AX", %%cr2 \n\t"
                "2: \n\t"
                /* Check if vmlaunch of vmresume is needed */
                "cmpl $0, %c[launched](%0) \n\t"
                /* Load guest registers.  Don't clobber flags. */
-               "mov %c[rax](%0), %%"R"ax \n\t"
-               "mov %c[rbx](%0), %%"R"bx \n\t"
-               "mov %c[rdx](%0), %%"R"dx \n\t"
-               "mov %c[rsi](%0), %%"R"si \n\t"
-               "mov %c[rdi](%0), %%"R"di \n\t"
-               "mov %c[rbp](%0), %%"R"bp \n\t"
+               "mov %c[rax](%0), %%" _ASM_AX " \n\t"
+               "mov %c[rbx](%0), %%" _ASM_BX " \n\t"
+               "mov %c[rdx](%0), %%" _ASM_DX " \n\t"
+               "mov %c[rsi](%0), %%" _ASM_SI " \n\t"
+               "mov %c[rdi](%0), %%" _ASM_DI " \n\t"
+               "mov %c[rbp](%0), %%" _ASM_BP " \n\t"
 #ifdef CONFIG_X86_64
                "mov %c[r8](%0),  %%r8  \n\t"
                "mov %c[r9](%0),  %%r9  \n\t"
@@ -6213,7 +6217,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
                "mov %c[r14](%0), %%r14 \n\t"
                "mov %c[r15](%0), %%r15 \n\t"
 #endif
-               "mov %c[rcx](%0), %%"R"cx \n\t" /* kills %0 (ecx) */
+               "mov %c[rcx](%0), %%" _ASM_CX " \n\t" /* kills %0 (ecx) */
 
                /* Enter guest mode */
                "jne .Llaunched \n\t"
@@ -6222,15 +6226,16 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
                ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
                ".Lkvm_vmx_return: "
                /* Save guest registers, load host registers, keep flags */
-               "mov %0, %c[wordsize](%%"R"sp) \n\t"
+               "mov %0, %c[wordsize](%%" _ASM_SP ") \n\t"
                "pop %0 \n\t"
-               "mov %%"R"ax, %c[rax](%0) \n\t"
-               "mov %%"R"bx, %c[rbx](%0) \n\t"
-               "pop"Q" %c[rcx](%0) \n\t"
-               "mov %%"R"dx, %c[rdx](%0) \n\t"
-               "mov %%"R"si, %c[rsi](%0) \n\t"
-               "mov %%"R"di, %c[rdi](%0) \n\t"
-               "mov %%"R"bp, %c[rbp](%0) \n\t"
+               "setbe %c[fail](%0)\n\t"
+               "mov %%" _ASM_AX ", %c[rax](%0) \n\t"
+               "mov %%" _ASM_BX ", %c[rbx](%0) \n\t"
+               __ASM_SIZE(pop) " %c[rcx](%0) \n\t"
+               "mov %%" _ASM_DX ", %c[rdx](%0) \n\t"
+               "mov %%" _ASM_SI ", %c[rsi](%0) \n\t"
+               "mov %%" _ASM_DI ", %c[rdi](%0) \n\t"
+               "mov %%" _ASM_BP ", %c[rbp](%0) \n\t"
 #ifdef CONFIG_X86_64
                "mov %%r8,  %c[r8](%0) \n\t"
                "mov %%r9,  %c[r9](%0) \n\t"
@@ -6240,12 +6245,23 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
                "mov %%r13, %c[r13](%0) \n\t"
                "mov %%r14, %c[r14](%0) \n\t"
                "mov %%r15, %c[r15](%0) \n\t"
+               "xor %%r8d,  %%r8d \n\t"
+               "xor %%r9d,  %%r9d \n\t"
+               "xor %%r10d, %%r10d \n\t"
+               "xor %%r11d, %%r11d \n\t"
+               "xor %%r12d, %%r12d \n\t"
+               "xor %%r13d, %%r13d \n\t"
+               "xor %%r14d, %%r14d \n\t"
+               "xor %%r15d, %%r15d \n\t"
 #endif
-               "mov %%cr2, %%"R"ax   \n\t"
-               "mov %%"R"ax, %c[cr2](%0) \n\t"
-
-               "pop  %%"R"bp; pop  %%"R"dx \n\t"
-               "setbe %c[fail](%0) \n\t"
+               "mov %%cr2, %%" _ASM_AX "   \n\t"
+               "mov %%" _ASM_AX ", %c[cr2](%0) \n\t"
+
+               "xor %%eax, %%eax \n\t"
+               "xor %%ebx, %%ebx \n\t"
+               "xor %%esi, %%esi \n\t"
+               "xor %%edi, %%edi \n\t"
+               "pop  %%" _ASM_BP "; pop  %%" _ASM_DX " \n\t"
              : : "c"(vmx), "d"((unsigned long)HOST_RSP),
                [launched]"i"(offsetof(struct vcpu_vmx, __launched)),
                [fail]"i"(offsetof(struct vcpu_vmx, fail)),
@@ -6270,12 +6286,17 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
                [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
                [wordsize]"i"(sizeof(ulong))
              : "cc", "memory"
-               , R"ax", R"bx", R"di", R"si"
 #ifdef CONFIG_X86_64
+               , "rax", "rbx", "rdi", "rsi"
                , "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
+#else
+               , "eax", "ebx", "edi", "esi"
 #endif
              );
 
+       /* Eliminate branch target predictions from guest mode */
+       vmexit_fill_RSB();
+
        vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)
                                  | (1 << VCPU_EXREG_RFLAGS)
                                  | (1 << VCPU_EXREG_CPL)
@@ -6308,9 +6329,6 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
        vmx_complete_interrupts(vmx);
 }
 
-#undef R
-#undef Q
-
 static void vmx_load_vmcs01(struct kvm_vcpu *vcpu)
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
@@ -6671,6 +6689,14 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
        exec_control &= ~CPU_BASED_VIRTUAL_NMI_PENDING;
        exec_control &= ~CPU_BASED_TPR_SHADOW;
        exec_control |= vmcs12->cpu_based_vm_exec_control;
+
+       if (!(exec_control & CPU_BASED_TPR_SHADOW)) {
+#ifdef CONFIG_X86_64
+               exec_control |= CPU_BASED_CR8_LOAD_EXITING |
+                               CPU_BASED_CR8_STORE_EXITING;
+#endif
+       }
+
        /*
         * Merging of IO and MSR bitmaps not currently supported.
         * Rather, exit every time.
@@ -7047,7 +7073,7 @@ void load_vmcs12_host_state(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
         * (KVM doesn't change it)- no reason to call set_cr4_guest_host_mask();
         */
        vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK);
-       kvm_set_cr4(vcpu, vmcs12->host_cr4);
+       vmx_set_cr4(vcpu, vmcs12->host_cr4);
 
        /* shadow page tables on either EPT or shadow page tables */
        kvm_set_cr3(vcpu, vmcs12->host_cr3);
@@ -7068,6 +7094,8 @@ void load_vmcs12_host_state(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
        vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->host_ia32_sysenter_eip);
        vmcs_writel(GUEST_IDTR_BASE, vmcs12->host_idtr_base);
        vmcs_writel(GUEST_GDTR_BASE, vmcs12->host_gdtr_base);
+       vmcs_write32(GUEST_IDTR_LIMIT, 0xFFFF);
+       vmcs_write32(GUEST_GDTR_LIMIT, 0xFFFF);
        vmcs_writel(GUEST_TR_BASE, vmcs12->host_tr_base);
        vmcs_writel(GUEST_GS_BASE, vmcs12->host_gs_base);
        vmcs_writel(GUEST_FS_BASE, vmcs12->host_fs_base);
@@ -7277,12 +7305,7 @@ static int __init vmx_init(void)
                goto out2;
        }
 
-       /*
-        * Allow direct access to the PC debug port (it is often used for I/O
-        * delays, but the vmexits simply slow things down).
-        */
        memset(vmx_io_bitmap_a, 0xff, PAGE_SIZE);
-       clear_bit(0x80, vmx_io_bitmap_a);
 
        memset(vmx_io_bitmap_b, 0xff, PAGE_SIZE);