2 * Kernel-based Virtual Machine driver for Linux
4 * This module enables machines with Intel VT-x extensions to run virtual
5 * machines without emulation or binary translation.
7 * Copyright (C) 2006 Qumranet, Inc.
10 * Avi Kivity <avi@qumranet.com>
11 * Yaniv Kamay <yaniv@qumranet.com>
13 * This work is licensed under the terms of the GNU GPL, version 2. See
14 * the COPYING file in the top-level directory.
19 #include "x86_emulate.h"
22 #include "segment_descriptor.h"
24 #include <linux/module.h>
25 #include <linux/kernel.h>
27 #include <linux/highmem.h>
28 #include <linux/profile.h>
29 #include <linux/sched.h>
34 MODULE_AUTHOR("Qumranet");
35 MODULE_LICENSE("GPL");
46 struct kvm_msr_entry *guest_msrs;
47 struct kvm_msr_entry *host_msrs;
52 int msr_offset_kernel_gs_base;
57 u16 fs_sel, gs_sel, ldt_sel;
58 int gs_ldt_reload_needed;
64 static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu)
66 return container_of(vcpu, struct vcpu_vmx, vcpu);
69 static int init_rmode_tss(struct kvm *kvm);
71 static DEFINE_PER_CPU(struct vmcs *, vmxarea);
72 static DEFINE_PER_CPU(struct vmcs *, current_vmcs);
74 static struct page *vmx_io_bitmap_a;
75 static struct page *vmx_io_bitmap_b;
77 #define EFER_SAVE_RESTORE_BITS ((u64)EFER_SCE)
79 static struct vmcs_config {
83 u32 pin_based_exec_ctrl;
84 u32 cpu_based_exec_ctrl;
89 #define VMX_SEGMENT_FIELD(seg) \
90 [VCPU_SREG_##seg] = { \
91 .selector = GUEST_##seg##_SELECTOR, \
92 .base = GUEST_##seg##_BASE, \
93 .limit = GUEST_##seg##_LIMIT, \
94 .ar_bytes = GUEST_##seg##_AR_BYTES, \
97 static struct kvm_vmx_segment_field {
102 } kvm_vmx_segment_fields[] = {
103 VMX_SEGMENT_FIELD(CS),
104 VMX_SEGMENT_FIELD(DS),
105 VMX_SEGMENT_FIELD(ES),
106 VMX_SEGMENT_FIELD(FS),
107 VMX_SEGMENT_FIELD(GS),
108 VMX_SEGMENT_FIELD(SS),
109 VMX_SEGMENT_FIELD(TR),
110 VMX_SEGMENT_FIELD(LDTR),
114 * Keep MSR_K6_STAR at the end, as setup_msrs() will try to optimize it
115 * away by decrementing the array size.
117 static const u32 vmx_msr_index[] = {
119 MSR_SYSCALL_MASK, MSR_LSTAR, MSR_CSTAR, MSR_KERNEL_GS_BASE,
121 MSR_EFER, MSR_K6_STAR,
123 #define NR_VMX_MSR ARRAY_SIZE(vmx_msr_index)
125 static void load_msrs(struct kvm_msr_entry *e, int n)
129 for (i = 0; i < n; ++i)
130 wrmsrl(e[i].index, e[i].data);
133 static void save_msrs(struct kvm_msr_entry *e, int n)
137 for (i = 0; i < n; ++i)
138 rdmsrl(e[i].index, e[i].data);
141 static inline u64 msr_efer_save_restore_bits(struct kvm_msr_entry msr)
143 return (u64)msr.data & EFER_SAVE_RESTORE_BITS;
146 static inline int msr_efer_need_save_restore(struct vcpu_vmx *vmx)
148 int efer_offset = vmx->msr_offset_efer;
149 return msr_efer_save_restore_bits(vmx->host_msrs[efer_offset]) !=
150 msr_efer_save_restore_bits(vmx->guest_msrs[efer_offset]);
153 static inline int is_page_fault(u32 intr_info)
155 return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
156 INTR_INFO_VALID_MASK)) ==
157 (INTR_TYPE_EXCEPTION | PF_VECTOR | INTR_INFO_VALID_MASK);
160 static inline int is_no_device(u32 intr_info)
162 return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
163 INTR_INFO_VALID_MASK)) ==
164 (INTR_TYPE_EXCEPTION | NM_VECTOR | INTR_INFO_VALID_MASK);
167 static inline int is_external_interrupt(u32 intr_info)
169 return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK))
170 == (INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK);
173 static inline int cpu_has_vmx_tpr_shadow(void)
175 return (vmcs_config.cpu_based_exec_ctrl & CPU_BASED_TPR_SHADOW);
178 static inline int vm_need_tpr_shadow(struct kvm *kvm)
180 return ((cpu_has_vmx_tpr_shadow()) && (irqchip_in_kernel(kvm)));
183 static int __find_msr_index(struct vcpu_vmx *vmx, u32 msr)
187 for (i = 0; i < vmx->nmsrs; ++i)
188 if (vmx->guest_msrs[i].index == msr)
193 static struct kvm_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr)
197 i = __find_msr_index(vmx, msr);
199 return &vmx->guest_msrs[i];
203 static void vmcs_clear(struct vmcs *vmcs)
205 u64 phys_addr = __pa(vmcs);
208 asm volatile (ASM_VMX_VMCLEAR_RAX "; setna %0"
209 : "=g"(error) : "a"(&phys_addr), "m"(phys_addr)
212 printk(KERN_ERR "kvm: vmclear fail: %p/%llx\n",
216 static void __vcpu_clear(void *arg)
218 struct vcpu_vmx *vmx = arg;
219 int cpu = raw_smp_processor_id();
221 if (vmx->vcpu.cpu == cpu)
222 vmcs_clear(vmx->vmcs);
223 if (per_cpu(current_vmcs, cpu) == vmx->vmcs)
224 per_cpu(current_vmcs, cpu) = NULL;
225 rdtscll(vmx->vcpu.host_tsc);
228 static void vcpu_clear(struct vcpu_vmx *vmx)
230 if (vmx->vcpu.cpu != raw_smp_processor_id() && vmx->vcpu.cpu != -1)
231 smp_call_function_single(vmx->vcpu.cpu, __vcpu_clear,
238 static unsigned long vmcs_readl(unsigned long field)
242 asm volatile (ASM_VMX_VMREAD_RDX_RAX
243 : "=a"(value) : "d"(field) : "cc");
247 static u16 vmcs_read16(unsigned long field)
249 return vmcs_readl(field);
252 static u32 vmcs_read32(unsigned long field)
254 return vmcs_readl(field);
257 static u64 vmcs_read64(unsigned long field)
260 return vmcs_readl(field);
262 return vmcs_readl(field) | ((u64)vmcs_readl(field+1) << 32);
266 static noinline void vmwrite_error(unsigned long field, unsigned long value)
268 printk(KERN_ERR "vmwrite error: reg %lx value %lx (err %d)\n",
269 field, value, vmcs_read32(VM_INSTRUCTION_ERROR));
273 static void vmcs_writel(unsigned long field, unsigned long value)
277 asm volatile (ASM_VMX_VMWRITE_RAX_RDX "; setna %0"
278 : "=q"(error) : "a"(value), "d"(field) : "cc" );
280 vmwrite_error(field, value);
283 static void vmcs_write16(unsigned long field, u16 value)
285 vmcs_writel(field, value);
288 static void vmcs_write32(unsigned long field, u32 value)
290 vmcs_writel(field, value);
293 static void vmcs_write64(unsigned long field, u64 value)
296 vmcs_writel(field, value);
298 vmcs_writel(field, value);
300 vmcs_writel(field+1, value >> 32);
304 static void vmcs_clear_bits(unsigned long field, u32 mask)
306 vmcs_writel(field, vmcs_readl(field) & ~mask);
309 static void vmcs_set_bits(unsigned long field, u32 mask)
311 vmcs_writel(field, vmcs_readl(field) | mask);
314 static void update_exception_bitmap(struct kvm_vcpu *vcpu)
318 eb = 1u << PF_VECTOR;
319 if (!vcpu->fpu_active)
320 eb |= 1u << NM_VECTOR;
321 if (vcpu->guest_debug.enabled)
323 if (vcpu->rmode.active)
325 vmcs_write32(EXCEPTION_BITMAP, eb);
328 static void reload_tss(void)
330 #ifndef CONFIG_X86_64
333 * VT restores TR but not its size. Useless.
335 struct descriptor_table gdt;
336 struct segment_descriptor *descs;
339 descs = (void *)gdt.base;
340 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
345 static void load_transition_efer(struct vcpu_vmx *vmx)
348 int efer_offset = vmx->msr_offset_efer;
350 trans_efer = vmx->host_msrs[efer_offset].data;
351 trans_efer &= ~EFER_SAVE_RESTORE_BITS;
352 trans_efer |= msr_efer_save_restore_bits(vmx->guest_msrs[efer_offset]);
353 wrmsrl(MSR_EFER, trans_efer);
354 vmx->vcpu.stat.efer_reload++;
357 static void vmx_save_host_state(struct vcpu_vmx *vmx)
359 if (vmx->host_state.loaded)
362 vmx->host_state.loaded = 1;
364 * Set host fs and gs selectors. Unfortunately, 22.2.3 does not
365 * allow segment selectors with cpl > 0 or ti == 1.
367 vmx->host_state.ldt_sel = read_ldt();
368 vmx->host_state.gs_ldt_reload_needed = vmx->host_state.ldt_sel;
369 vmx->host_state.fs_sel = read_fs();
370 if (!(vmx->host_state.fs_sel & 7)) {
371 vmcs_write16(HOST_FS_SELECTOR, vmx->host_state.fs_sel);
372 vmx->host_state.fs_reload_needed = 0;
374 vmcs_write16(HOST_FS_SELECTOR, 0);
375 vmx->host_state.fs_reload_needed = 1;
377 vmx->host_state.gs_sel = read_gs();
378 if (!(vmx->host_state.gs_sel & 7))
379 vmcs_write16(HOST_GS_SELECTOR, vmx->host_state.gs_sel);
381 vmcs_write16(HOST_GS_SELECTOR, 0);
382 vmx->host_state.gs_ldt_reload_needed = 1;
386 vmcs_writel(HOST_FS_BASE, read_msr(MSR_FS_BASE));
387 vmcs_writel(HOST_GS_BASE, read_msr(MSR_GS_BASE));
389 vmcs_writel(HOST_FS_BASE, segment_base(vmx->host_state.fs_sel));
390 vmcs_writel(HOST_GS_BASE, segment_base(vmx->host_state.gs_sel));
394 if (is_long_mode(&vmx->vcpu)) {
395 save_msrs(vmx->host_msrs +
396 vmx->msr_offset_kernel_gs_base, 1);
399 load_msrs(vmx->guest_msrs, vmx->save_nmsrs);
400 if (msr_efer_need_save_restore(vmx))
401 load_transition_efer(vmx);
404 static void vmx_load_host_state(struct vcpu_vmx *vmx)
408 if (!vmx->host_state.loaded)
411 vmx->host_state.loaded = 0;
412 if (vmx->host_state.fs_reload_needed)
413 load_fs(vmx->host_state.fs_sel);
414 if (vmx->host_state.gs_ldt_reload_needed) {
415 load_ldt(vmx->host_state.ldt_sel);
417 * If we have to reload gs, we must take care to
418 * preserve our gs base.
420 local_irq_save(flags);
421 load_gs(vmx->host_state.gs_sel);
423 wrmsrl(MSR_GS_BASE, vmcs_readl(HOST_GS_BASE));
425 local_irq_restore(flags);
428 save_msrs(vmx->guest_msrs, vmx->save_nmsrs);
429 load_msrs(vmx->host_msrs, vmx->save_nmsrs);
430 if (msr_efer_need_save_restore(vmx))
431 load_msrs(vmx->host_msrs + vmx->msr_offset_efer, 1);
435 * Switches to specified vcpu, until a matching vcpu_put(), but assumes
436 * vcpu mutex is already taken.
438 static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
440 struct vcpu_vmx *vmx = to_vmx(vcpu);
441 u64 phys_addr = __pa(vmx->vmcs);
444 if (vcpu->cpu != cpu) {
446 kvm_migrate_apic_timer(vcpu);
449 if (per_cpu(current_vmcs, cpu) != vmx->vmcs) {
452 per_cpu(current_vmcs, cpu) = vmx->vmcs;
453 asm volatile (ASM_VMX_VMPTRLD_RAX "; setna %0"
454 : "=g"(error) : "a"(&phys_addr), "m"(phys_addr)
457 printk(KERN_ERR "kvm: vmptrld %p/%llx fail\n",
458 vmx->vmcs, phys_addr);
461 if (vcpu->cpu != cpu) {
462 struct descriptor_table dt;
463 unsigned long sysenter_esp;
467 * Linux uses per-cpu TSS and GDT, so set these when switching
470 vmcs_writel(HOST_TR_BASE, read_tr_base()); /* 22.2.4 */
472 vmcs_writel(HOST_GDTR_BASE, dt.base); /* 22.2.4 */
474 rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
475 vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
478 * Make sure the time stamp counter is monotonous.
481 delta = vcpu->host_tsc - tsc_this;
482 vmcs_write64(TSC_OFFSET, vmcs_read64(TSC_OFFSET) + delta);
486 static void vmx_vcpu_put(struct kvm_vcpu *vcpu)
488 vmx_load_host_state(to_vmx(vcpu));
489 kvm_put_guest_fpu(vcpu);
492 static void vmx_fpu_activate(struct kvm_vcpu *vcpu)
494 if (vcpu->fpu_active)
496 vcpu->fpu_active = 1;
497 vmcs_clear_bits(GUEST_CR0, X86_CR0_TS);
498 if (vcpu->cr0 & X86_CR0_TS)
499 vmcs_set_bits(GUEST_CR0, X86_CR0_TS);
500 update_exception_bitmap(vcpu);
503 static void vmx_fpu_deactivate(struct kvm_vcpu *vcpu)
505 if (!vcpu->fpu_active)
507 vcpu->fpu_active = 0;
508 vmcs_set_bits(GUEST_CR0, X86_CR0_TS);
509 update_exception_bitmap(vcpu);
512 static void vmx_vcpu_decache(struct kvm_vcpu *vcpu)
514 vcpu_clear(to_vmx(vcpu));
517 static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu)
519 return vmcs_readl(GUEST_RFLAGS);
522 static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
524 vmcs_writel(GUEST_RFLAGS, rflags);
527 static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
530 u32 interruptibility;
532 rip = vmcs_readl(GUEST_RIP);
533 rip += vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
534 vmcs_writel(GUEST_RIP, rip);
537 * We emulated an instruction, so temporary interrupt blocking
538 * should be removed, if set.
540 interruptibility = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
541 if (interruptibility & 3)
542 vmcs_write32(GUEST_INTERRUPTIBILITY_INFO,
543 interruptibility & ~3);
544 vcpu->interrupt_window_open = 1;
547 static void vmx_inject_gp(struct kvm_vcpu *vcpu, unsigned error_code)
549 printk(KERN_DEBUG "inject_general_protection: rip 0x%lx\n",
550 vmcs_readl(GUEST_RIP));
551 vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code);
552 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
554 INTR_TYPE_EXCEPTION |
555 INTR_INFO_DELIEVER_CODE_MASK |
556 INTR_INFO_VALID_MASK);
560 * Swap MSR entry in host/guest MSR entry array.
563 static void move_msr_up(struct vcpu_vmx *vmx, int from, int to)
565 struct kvm_msr_entry tmp;
567 tmp = vmx->guest_msrs[to];
568 vmx->guest_msrs[to] = vmx->guest_msrs[from];
569 vmx->guest_msrs[from] = tmp;
570 tmp = vmx->host_msrs[to];
571 vmx->host_msrs[to] = vmx->host_msrs[from];
572 vmx->host_msrs[from] = tmp;
577 * Set up the vmcs to automatically save and restore system
578 * msrs. Don't touch the 64-bit msrs if the guest is in legacy
579 * mode, as fiddling with msrs is very expensive.
581 static void setup_msrs(struct vcpu_vmx *vmx)
587 if (is_long_mode(&vmx->vcpu)) {
590 index = __find_msr_index(vmx, MSR_SYSCALL_MASK);
592 move_msr_up(vmx, index, save_nmsrs++);
593 index = __find_msr_index(vmx, MSR_LSTAR);
595 move_msr_up(vmx, index, save_nmsrs++);
596 index = __find_msr_index(vmx, MSR_CSTAR);
598 move_msr_up(vmx, index, save_nmsrs++);
599 index = __find_msr_index(vmx, MSR_KERNEL_GS_BASE);
601 move_msr_up(vmx, index, save_nmsrs++);
603 * MSR_K6_STAR is only needed on long mode guests, and only
604 * if efer.sce is enabled.
606 index = __find_msr_index(vmx, MSR_K6_STAR);
607 if ((index >= 0) && (vmx->vcpu.shadow_efer & EFER_SCE))
608 move_msr_up(vmx, index, save_nmsrs++);
611 vmx->save_nmsrs = save_nmsrs;
614 vmx->msr_offset_kernel_gs_base =
615 __find_msr_index(vmx, MSR_KERNEL_GS_BASE);
617 vmx->msr_offset_efer = __find_msr_index(vmx, MSR_EFER);
621 * reads and returns guest's timestamp counter "register"
622 * guest_tsc = host_tsc + tsc_offset -- 21.3
624 static u64 guest_read_tsc(void)
626 u64 host_tsc, tsc_offset;
629 tsc_offset = vmcs_read64(TSC_OFFSET);
630 return host_tsc + tsc_offset;
634 * writes 'guest_tsc' into guest's timestamp counter "register"
635 * guest_tsc = host_tsc + tsc_offset ==> tsc_offset = guest_tsc - host_tsc
637 static void guest_write_tsc(u64 guest_tsc)
642 vmcs_write64(TSC_OFFSET, guest_tsc - host_tsc);
646 * Reads an msr value (of 'msr_index') into 'pdata'.
647 * Returns 0 on success, non-0 otherwise.
648 * Assumes vcpu_load() was already called.
650 static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
653 struct kvm_msr_entry *msr;
656 printk(KERN_ERR "BUG: get_msr called with NULL pdata\n");
663 data = vmcs_readl(GUEST_FS_BASE);
666 data = vmcs_readl(GUEST_GS_BASE);
669 return kvm_get_msr_common(vcpu, msr_index, pdata);
671 case MSR_IA32_TIME_STAMP_COUNTER:
672 data = guest_read_tsc();
674 case MSR_IA32_SYSENTER_CS:
675 data = vmcs_read32(GUEST_SYSENTER_CS);
677 case MSR_IA32_SYSENTER_EIP:
678 data = vmcs_readl(GUEST_SYSENTER_EIP);
680 case MSR_IA32_SYSENTER_ESP:
681 data = vmcs_readl(GUEST_SYSENTER_ESP);
684 msr = find_msr_entry(to_vmx(vcpu), msr_index);
689 return kvm_get_msr_common(vcpu, msr_index, pdata);
697 * Writes msr value into into the appropriate "register".
698 * Returns 0 on success, non-0 otherwise.
699 * Assumes vcpu_load() was already called.
701 static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
703 struct vcpu_vmx *vmx = to_vmx(vcpu);
704 struct kvm_msr_entry *msr;
710 ret = kvm_set_msr_common(vcpu, msr_index, data);
711 if (vmx->host_state.loaded)
712 load_transition_efer(vmx);
715 vmcs_writel(GUEST_FS_BASE, data);
718 vmcs_writel(GUEST_GS_BASE, data);
721 case MSR_IA32_SYSENTER_CS:
722 vmcs_write32(GUEST_SYSENTER_CS, data);
724 case MSR_IA32_SYSENTER_EIP:
725 vmcs_writel(GUEST_SYSENTER_EIP, data);
727 case MSR_IA32_SYSENTER_ESP:
728 vmcs_writel(GUEST_SYSENTER_ESP, data);
730 case MSR_IA32_TIME_STAMP_COUNTER:
731 guest_write_tsc(data);
734 msr = find_msr_entry(vmx, msr_index);
737 if (vmx->host_state.loaded)
738 load_msrs(vmx->guest_msrs, vmx->save_nmsrs);
741 ret = kvm_set_msr_common(vcpu, msr_index, data);
748 * Sync the rsp and rip registers into the vcpu structure. This allows
749 * registers to be accessed by indexing vcpu->regs.
751 static void vcpu_load_rsp_rip(struct kvm_vcpu *vcpu)
753 vcpu->regs[VCPU_REGS_RSP] = vmcs_readl(GUEST_RSP);
754 vcpu->rip = vmcs_readl(GUEST_RIP);
758 * Syncs rsp and rip back into the vmcs. Should be called after possible
761 static void vcpu_put_rsp_rip(struct kvm_vcpu *vcpu)
763 vmcs_writel(GUEST_RSP, vcpu->regs[VCPU_REGS_RSP]);
764 vmcs_writel(GUEST_RIP, vcpu->rip);
767 static int set_guest_debug(struct kvm_vcpu *vcpu, struct kvm_debug_guest *dbg)
769 unsigned long dr7 = 0x400;
772 old_singlestep = vcpu->guest_debug.singlestep;
774 vcpu->guest_debug.enabled = dbg->enabled;
775 if (vcpu->guest_debug.enabled) {
778 dr7 |= 0x200; /* exact */
779 for (i = 0; i < 4; ++i) {
780 if (!dbg->breakpoints[i].enabled)
782 vcpu->guest_debug.bp[i] = dbg->breakpoints[i].address;
783 dr7 |= 2 << (i*2); /* global enable */
784 dr7 |= 0 << (i*4+16); /* execution breakpoint */
787 vcpu->guest_debug.singlestep = dbg->singlestep;
789 vcpu->guest_debug.singlestep = 0;
791 if (old_singlestep && !vcpu->guest_debug.singlestep) {
794 flags = vmcs_readl(GUEST_RFLAGS);
795 flags &= ~(X86_EFLAGS_TF | X86_EFLAGS_RF);
796 vmcs_writel(GUEST_RFLAGS, flags);
799 update_exception_bitmap(vcpu);
800 vmcs_writel(GUEST_DR7, dr7);
805 static int vmx_get_irq(struct kvm_vcpu *vcpu)
809 idtv_info_field = vmcs_read32(IDT_VECTORING_INFO_FIELD);
810 if (idtv_info_field & INTR_INFO_VALID_MASK) {
811 if (is_external_interrupt(idtv_info_field))
812 return idtv_info_field & VECTORING_INFO_VECTOR_MASK;
814 printk("pending exception: not handled yet\n");
819 static __init int cpu_has_kvm_support(void)
821 unsigned long ecx = cpuid_ecx(1);
822 return test_bit(5, &ecx); /* CPUID.1:ECX.VMX[bit 5] -> VT */
825 static __init int vmx_disabled_by_bios(void)
829 rdmsrl(MSR_IA32_FEATURE_CONTROL, msr);
830 return (msr & (MSR_IA32_FEATURE_CONTROL_LOCKED |
831 MSR_IA32_FEATURE_CONTROL_VMXON_ENABLED))
832 == MSR_IA32_FEATURE_CONTROL_LOCKED;
833 /* locked but not enabled */
836 static void hardware_enable(void *garbage)
838 int cpu = raw_smp_processor_id();
839 u64 phys_addr = __pa(per_cpu(vmxarea, cpu));
842 rdmsrl(MSR_IA32_FEATURE_CONTROL, old);
843 if ((old & (MSR_IA32_FEATURE_CONTROL_LOCKED |
844 MSR_IA32_FEATURE_CONTROL_VMXON_ENABLED))
845 != (MSR_IA32_FEATURE_CONTROL_LOCKED |
846 MSR_IA32_FEATURE_CONTROL_VMXON_ENABLED))
847 /* enable and lock */
848 wrmsrl(MSR_IA32_FEATURE_CONTROL, old |
849 MSR_IA32_FEATURE_CONTROL_LOCKED |
850 MSR_IA32_FEATURE_CONTROL_VMXON_ENABLED);
851 write_cr4(read_cr4() | X86_CR4_VMXE); /* FIXME: not cpu hotplug safe */
852 asm volatile (ASM_VMX_VMXON_RAX : : "a"(&phys_addr), "m"(phys_addr)
856 static void hardware_disable(void *garbage)
858 asm volatile (ASM_VMX_VMXOFF : : : "cc");
861 static __init int adjust_vmx_controls(u32 ctl_min, u32 ctl_opt,
862 u32 msr, u32* result)
864 u32 vmx_msr_low, vmx_msr_high;
865 u32 ctl = ctl_min | ctl_opt;
867 rdmsr(msr, vmx_msr_low, vmx_msr_high);
869 ctl &= vmx_msr_high; /* bit == 0 in high word ==> must be zero */
870 ctl |= vmx_msr_low; /* bit == 1 in low word ==> must be one */
872 /* Ensure minimum (required) set of control bits are supported. */
880 static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
882 u32 vmx_msr_low, vmx_msr_high;
884 u32 _pin_based_exec_control = 0;
885 u32 _cpu_based_exec_control = 0;
886 u32 _vmexit_control = 0;
887 u32 _vmentry_control = 0;
889 min = PIN_BASED_EXT_INTR_MASK | PIN_BASED_NMI_EXITING;
891 if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PINBASED_CTLS,
892 &_pin_based_exec_control) < 0)
895 min = CPU_BASED_HLT_EXITING |
897 CPU_BASED_CR8_LOAD_EXITING |
898 CPU_BASED_CR8_STORE_EXITING |
900 CPU_BASED_USE_IO_BITMAPS |
901 CPU_BASED_MOV_DR_EXITING |
902 CPU_BASED_USE_TSC_OFFSETING;
904 opt = CPU_BASED_TPR_SHADOW;
908 if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PROCBASED_CTLS,
909 &_cpu_based_exec_control) < 0)
912 if ((_cpu_based_exec_control & CPU_BASED_TPR_SHADOW))
913 _cpu_based_exec_control &= ~CPU_BASED_CR8_LOAD_EXITING &
914 ~CPU_BASED_CR8_STORE_EXITING;
919 min |= VM_EXIT_HOST_ADDR_SPACE_SIZE;
922 if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_EXIT_CTLS,
923 &_vmexit_control) < 0)
927 if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_ENTRY_CTLS,
928 &_vmentry_control) < 0)
931 rdmsr(MSR_IA32_VMX_BASIC, vmx_msr_low, vmx_msr_high);
933 /* IA-32 SDM Vol 3B: VMCS size is never greater than 4kB. */
934 if ((vmx_msr_high & 0x1fff) > PAGE_SIZE)
938 /* IA-32 SDM Vol 3B: 64-bit CPUs always have VMX_BASIC_MSR[48]==0. */
939 if (vmx_msr_high & (1u<<16))
943 /* Require Write-Back (WB) memory type for VMCS accesses. */
944 if (((vmx_msr_high >> 18) & 15) != 6)
947 vmcs_conf->size = vmx_msr_high & 0x1fff;
948 vmcs_conf->order = get_order(vmcs_config.size);
949 vmcs_conf->revision_id = vmx_msr_low;
951 vmcs_conf->pin_based_exec_ctrl = _pin_based_exec_control;
952 vmcs_conf->cpu_based_exec_ctrl = _cpu_based_exec_control;
953 vmcs_conf->vmexit_ctrl = _vmexit_control;
954 vmcs_conf->vmentry_ctrl = _vmentry_control;
959 static struct vmcs *alloc_vmcs_cpu(int cpu)
961 int node = cpu_to_node(cpu);
965 pages = alloc_pages_node(node, GFP_KERNEL, vmcs_config.order);
968 vmcs = page_address(pages);
969 memset(vmcs, 0, vmcs_config.size);
970 vmcs->revision_id = vmcs_config.revision_id; /* vmcs revision id */
974 static struct vmcs *alloc_vmcs(void)
976 return alloc_vmcs_cpu(raw_smp_processor_id());
979 static void free_vmcs(struct vmcs *vmcs)
981 free_pages((unsigned long)vmcs, vmcs_config.order);
984 static void free_kvm_area(void)
988 for_each_online_cpu(cpu)
989 free_vmcs(per_cpu(vmxarea, cpu));
992 static __init int alloc_kvm_area(void)
996 for_each_online_cpu(cpu) {
999 vmcs = alloc_vmcs_cpu(cpu);
1005 per_cpu(vmxarea, cpu) = vmcs;
1010 static __init int hardware_setup(void)
1012 if (setup_vmcs_config(&vmcs_config) < 0)
1014 return alloc_kvm_area();
1017 static __exit void hardware_unsetup(void)
1022 static void fix_pmode_dataseg(int seg, struct kvm_save_segment *save)
1024 struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
1026 if (vmcs_readl(sf->base) == save->base && (save->base & AR_S_MASK)) {
1027 vmcs_write16(sf->selector, save->selector);
1028 vmcs_writel(sf->base, save->base);
1029 vmcs_write32(sf->limit, save->limit);
1030 vmcs_write32(sf->ar_bytes, save->ar);
1032 u32 dpl = (vmcs_read16(sf->selector) & SELECTOR_RPL_MASK)
1034 vmcs_write32(sf->ar_bytes, 0x93 | dpl);
1038 static void enter_pmode(struct kvm_vcpu *vcpu)
1040 unsigned long flags;
1042 vcpu->rmode.active = 0;
1044 vmcs_writel(GUEST_TR_BASE, vcpu->rmode.tr.base);
1045 vmcs_write32(GUEST_TR_LIMIT, vcpu->rmode.tr.limit);
1046 vmcs_write32(GUEST_TR_AR_BYTES, vcpu->rmode.tr.ar);
1048 flags = vmcs_readl(GUEST_RFLAGS);
1049 flags &= ~(IOPL_MASK | X86_EFLAGS_VM);
1050 flags |= (vcpu->rmode.save_iopl << IOPL_SHIFT);
1051 vmcs_writel(GUEST_RFLAGS, flags);
1053 vmcs_writel(GUEST_CR4, (vmcs_readl(GUEST_CR4) & ~X86_CR4_VME) |
1054 (vmcs_readl(CR4_READ_SHADOW) & X86_CR4_VME));
1056 update_exception_bitmap(vcpu);
1058 fix_pmode_dataseg(VCPU_SREG_ES, &vcpu->rmode.es);
1059 fix_pmode_dataseg(VCPU_SREG_DS, &vcpu->rmode.ds);
1060 fix_pmode_dataseg(VCPU_SREG_GS, &vcpu->rmode.gs);
1061 fix_pmode_dataseg(VCPU_SREG_FS, &vcpu->rmode.fs);
1063 vmcs_write16(GUEST_SS_SELECTOR, 0);
1064 vmcs_write32(GUEST_SS_AR_BYTES, 0x93);
1066 vmcs_write16(GUEST_CS_SELECTOR,
1067 vmcs_read16(GUEST_CS_SELECTOR) & ~SELECTOR_RPL_MASK);
1068 vmcs_write32(GUEST_CS_AR_BYTES, 0x9b);
1071 static gva_t rmode_tss_base(struct kvm* kvm)
1073 gfn_t base_gfn = kvm->memslots[0].base_gfn + kvm->memslots[0].npages - 3;
1074 return base_gfn << PAGE_SHIFT;
1077 static void fix_rmode_seg(int seg, struct kvm_save_segment *save)
1079 struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
1081 save->selector = vmcs_read16(sf->selector);
1082 save->base = vmcs_readl(sf->base);
1083 save->limit = vmcs_read32(sf->limit);
1084 save->ar = vmcs_read32(sf->ar_bytes);
1085 vmcs_write16(sf->selector, vmcs_readl(sf->base) >> 4);
1086 vmcs_write32(sf->limit, 0xffff);
1087 vmcs_write32(sf->ar_bytes, 0xf3);
1090 static void enter_rmode(struct kvm_vcpu *vcpu)
1092 unsigned long flags;
1094 vcpu->rmode.active = 1;
1096 vcpu->rmode.tr.base = vmcs_readl(GUEST_TR_BASE);
1097 vmcs_writel(GUEST_TR_BASE, rmode_tss_base(vcpu->kvm));
1099 vcpu->rmode.tr.limit = vmcs_read32(GUEST_TR_LIMIT);
1100 vmcs_write32(GUEST_TR_LIMIT, RMODE_TSS_SIZE - 1);
1102 vcpu->rmode.tr.ar = vmcs_read32(GUEST_TR_AR_BYTES);
1103 vmcs_write32(GUEST_TR_AR_BYTES, 0x008b);
1105 flags = vmcs_readl(GUEST_RFLAGS);
1106 vcpu->rmode.save_iopl = (flags & IOPL_MASK) >> IOPL_SHIFT;
1108 flags |= IOPL_MASK | X86_EFLAGS_VM;
1110 vmcs_writel(GUEST_RFLAGS, flags);
1111 vmcs_writel(GUEST_CR4, vmcs_readl(GUEST_CR4) | X86_CR4_VME);
1112 update_exception_bitmap(vcpu);
1114 vmcs_write16(GUEST_SS_SELECTOR, vmcs_readl(GUEST_SS_BASE) >> 4);
1115 vmcs_write32(GUEST_SS_LIMIT, 0xffff);
1116 vmcs_write32(GUEST_SS_AR_BYTES, 0xf3);
1118 vmcs_write32(GUEST_CS_AR_BYTES, 0xf3);
1119 vmcs_write32(GUEST_CS_LIMIT, 0xffff);
1120 if (vmcs_readl(GUEST_CS_BASE) == 0xffff0000)
1121 vmcs_writel(GUEST_CS_BASE, 0xf0000);
1122 vmcs_write16(GUEST_CS_SELECTOR, vmcs_readl(GUEST_CS_BASE) >> 4);
1124 fix_rmode_seg(VCPU_SREG_ES, &vcpu->rmode.es);
1125 fix_rmode_seg(VCPU_SREG_DS, &vcpu->rmode.ds);
1126 fix_rmode_seg(VCPU_SREG_GS, &vcpu->rmode.gs);
1127 fix_rmode_seg(VCPU_SREG_FS, &vcpu->rmode.fs);
1129 init_rmode_tss(vcpu->kvm);
1132 #ifdef CONFIG_X86_64
1134 static void enter_lmode(struct kvm_vcpu *vcpu)
1138 guest_tr_ar = vmcs_read32(GUEST_TR_AR_BYTES);
1139 if ((guest_tr_ar & AR_TYPE_MASK) != AR_TYPE_BUSY_64_TSS) {
1140 printk(KERN_DEBUG "%s: tss fixup for long mode. \n",
1142 vmcs_write32(GUEST_TR_AR_BYTES,
1143 (guest_tr_ar & ~AR_TYPE_MASK)
1144 | AR_TYPE_BUSY_64_TSS);
1147 vcpu->shadow_efer |= EFER_LMA;
1149 find_msr_entry(to_vmx(vcpu), MSR_EFER)->data |= EFER_LMA | EFER_LME;
1150 vmcs_write32(VM_ENTRY_CONTROLS,
1151 vmcs_read32(VM_ENTRY_CONTROLS)
1152 | VM_ENTRY_IA32E_MODE);
1155 static void exit_lmode(struct kvm_vcpu *vcpu)
1157 vcpu->shadow_efer &= ~EFER_LMA;
1159 vmcs_write32(VM_ENTRY_CONTROLS,
1160 vmcs_read32(VM_ENTRY_CONTROLS)
1161 & ~VM_ENTRY_IA32E_MODE);
1166 static void vmx_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
1168 vcpu->cr4 &= KVM_GUEST_CR4_MASK;
1169 vcpu->cr4 |= vmcs_readl(GUEST_CR4) & ~KVM_GUEST_CR4_MASK;
1172 static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
1174 vmx_fpu_deactivate(vcpu);
1176 if (vcpu->rmode.active && (cr0 & X86_CR0_PE))
1179 if (!vcpu->rmode.active && !(cr0 & X86_CR0_PE))
1182 #ifdef CONFIG_X86_64
1183 if (vcpu->shadow_efer & EFER_LME) {
1184 if (!is_paging(vcpu) && (cr0 & X86_CR0_PG))
1186 if (is_paging(vcpu) && !(cr0 & X86_CR0_PG))
1191 vmcs_writel(CR0_READ_SHADOW, cr0);
1192 vmcs_writel(GUEST_CR0,
1193 (cr0 & ~KVM_GUEST_CR0_MASK) | KVM_VM_CR0_ALWAYS_ON);
1196 if (!(cr0 & X86_CR0_TS) || !(cr0 & X86_CR0_PE))
1197 vmx_fpu_activate(vcpu);
1200 static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
1202 vmcs_writel(GUEST_CR3, cr3);
1203 if (vcpu->cr0 & X86_CR0_PE)
1204 vmx_fpu_deactivate(vcpu);
1207 static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
1209 vmcs_writel(CR4_READ_SHADOW, cr4);
1210 vmcs_writel(GUEST_CR4, cr4 | (vcpu->rmode.active ?
1211 KVM_RMODE_VM_CR4_ALWAYS_ON : KVM_PMODE_VM_CR4_ALWAYS_ON));
1215 #ifdef CONFIG_X86_64
1217 static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
1219 struct vcpu_vmx *vmx = to_vmx(vcpu);
1220 struct kvm_msr_entry *msr = find_msr_entry(vmx, MSR_EFER);
1222 vcpu->shadow_efer = efer;
1223 if (efer & EFER_LMA) {
1224 vmcs_write32(VM_ENTRY_CONTROLS,
1225 vmcs_read32(VM_ENTRY_CONTROLS) |
1226 VM_ENTRY_IA32E_MODE);
1230 vmcs_write32(VM_ENTRY_CONTROLS,
1231 vmcs_read32(VM_ENTRY_CONTROLS) &
1232 ~VM_ENTRY_IA32E_MODE);
1234 msr->data = efer & ~EFER_LME;
1241 static u64 vmx_get_segment_base(struct kvm_vcpu *vcpu, int seg)
1243 struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
1245 return vmcs_readl(sf->base);
1248 static void vmx_get_segment(struct kvm_vcpu *vcpu,
1249 struct kvm_segment *var, int seg)
1251 struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
1254 var->base = vmcs_readl(sf->base);
1255 var->limit = vmcs_read32(sf->limit);
1256 var->selector = vmcs_read16(sf->selector);
1257 ar = vmcs_read32(sf->ar_bytes);
1258 if (ar & AR_UNUSABLE_MASK)
1260 var->type = ar & 15;
1261 var->s = (ar >> 4) & 1;
1262 var->dpl = (ar >> 5) & 3;
1263 var->present = (ar >> 7) & 1;
1264 var->avl = (ar >> 12) & 1;
1265 var->l = (ar >> 13) & 1;
1266 var->db = (ar >> 14) & 1;
1267 var->g = (ar >> 15) & 1;
1268 var->unusable = (ar >> 16) & 1;
1271 static u32 vmx_segment_access_rights(struct kvm_segment *var)
1278 ar = var->type & 15;
1279 ar |= (var->s & 1) << 4;
1280 ar |= (var->dpl & 3) << 5;
1281 ar |= (var->present & 1) << 7;
1282 ar |= (var->avl & 1) << 12;
1283 ar |= (var->l & 1) << 13;
1284 ar |= (var->db & 1) << 14;
1285 ar |= (var->g & 1) << 15;
1287 if (ar == 0) /* a 0 value means unusable */
1288 ar = AR_UNUSABLE_MASK;
1293 static void vmx_set_segment(struct kvm_vcpu *vcpu,
1294 struct kvm_segment *var, int seg)
1296 struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
1299 if (vcpu->rmode.active && seg == VCPU_SREG_TR) {
1300 vcpu->rmode.tr.selector = var->selector;
1301 vcpu->rmode.tr.base = var->base;
1302 vcpu->rmode.tr.limit = var->limit;
1303 vcpu->rmode.tr.ar = vmx_segment_access_rights(var);
1306 vmcs_writel(sf->base, var->base);
1307 vmcs_write32(sf->limit, var->limit);
1308 vmcs_write16(sf->selector, var->selector);
1309 if (vcpu->rmode.active && var->s) {
1311 * Hack real-mode segments into vm86 compatibility.
1313 if (var->base == 0xffff0000 && var->selector == 0xf000)
1314 vmcs_writel(sf->base, 0xf0000);
1317 ar = vmx_segment_access_rights(var);
1318 vmcs_write32(sf->ar_bytes, ar);
1321 static void vmx_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
1323 u32 ar = vmcs_read32(GUEST_CS_AR_BYTES);
1325 *db = (ar >> 14) & 1;
1326 *l = (ar >> 13) & 1;
1329 static void vmx_get_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
1331 dt->limit = vmcs_read32(GUEST_IDTR_LIMIT);
1332 dt->base = vmcs_readl(GUEST_IDTR_BASE);
1335 static void vmx_set_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
1337 vmcs_write32(GUEST_IDTR_LIMIT, dt->limit);
1338 vmcs_writel(GUEST_IDTR_BASE, dt->base);
1341 static void vmx_get_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
1343 dt->limit = vmcs_read32(GUEST_GDTR_LIMIT);
1344 dt->base = vmcs_readl(GUEST_GDTR_BASE);
1347 static void vmx_set_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
1349 vmcs_write32(GUEST_GDTR_LIMIT, dt->limit);
1350 vmcs_writel(GUEST_GDTR_BASE, dt->base);
1353 static int init_rmode_tss(struct kvm* kvm)
1355 struct page *p1, *p2, *p3;
1356 gfn_t fn = rmode_tss_base(kvm) >> PAGE_SHIFT;
1359 p1 = gfn_to_page(kvm, fn++);
1360 p2 = gfn_to_page(kvm, fn++);
1361 p3 = gfn_to_page(kvm, fn);
1363 if (!p1 || !p2 || !p3) {
1364 kvm_printf(kvm,"%s: gfn_to_page failed\n", __FUNCTION__);
1368 page = kmap_atomic(p1, KM_USER0);
1370 *(u16*)(page + 0x66) = TSS_BASE_SIZE + TSS_REDIRECTION_SIZE;
1371 kunmap_atomic(page, KM_USER0);
1373 page = kmap_atomic(p2, KM_USER0);
1375 kunmap_atomic(page, KM_USER0);
1377 page = kmap_atomic(p3, KM_USER0);
1379 *(page + RMODE_TSS_SIZE - 2 * PAGE_SIZE - 1) = ~0;
1380 kunmap_atomic(page, KM_USER0);
1385 static void seg_setup(int seg)
1387 struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
1389 vmcs_write16(sf->selector, 0);
1390 vmcs_writel(sf->base, 0);
1391 vmcs_write32(sf->limit, 0xffff);
1392 vmcs_write32(sf->ar_bytes, 0x93);
1396 * Sets up the vmcs for emulated real mode.
1398 static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
1400 u32 host_sysenter_cs;
1403 struct descriptor_table dt;
1406 unsigned long kvm_vmx_return;
1410 if (!init_rmode_tss(vmx->vcpu.kvm)) {
1415 vmx->vcpu.rmode.active = 0;
1417 vmx->vcpu.regs[VCPU_REGS_RDX] = get_rdx_init_val();
1418 set_cr8(&vmx->vcpu, 0);
1419 msr = 0xfee00000 | MSR_IA32_APICBASE_ENABLE;
1420 if (vmx->vcpu.vcpu_id == 0)
1421 msr |= MSR_IA32_APICBASE_BSP;
1422 kvm_set_apic_base(&vmx->vcpu, msr);
1424 fx_init(&vmx->vcpu);
1427 * GUEST_CS_BASE should really be 0xffff0000, but VT vm86 mode
1428 * insists on having GUEST_CS_BASE == GUEST_CS_SELECTOR << 4. Sigh.
1430 if (vmx->vcpu.vcpu_id == 0) {
1431 vmcs_write16(GUEST_CS_SELECTOR, 0xf000);
1432 vmcs_writel(GUEST_CS_BASE, 0x000f0000);
1434 vmcs_write16(GUEST_CS_SELECTOR, vmx->vcpu.sipi_vector << 8);
1435 vmcs_writel(GUEST_CS_BASE, vmx->vcpu.sipi_vector << 12);
1437 vmcs_write32(GUEST_CS_LIMIT, 0xffff);
1438 vmcs_write32(GUEST_CS_AR_BYTES, 0x9b);
1440 seg_setup(VCPU_SREG_DS);
1441 seg_setup(VCPU_SREG_ES);
1442 seg_setup(VCPU_SREG_FS);
1443 seg_setup(VCPU_SREG_GS);
1444 seg_setup(VCPU_SREG_SS);
1446 vmcs_write16(GUEST_TR_SELECTOR, 0);
1447 vmcs_writel(GUEST_TR_BASE, 0);
1448 vmcs_write32(GUEST_TR_LIMIT, 0xffff);
1449 vmcs_write32(GUEST_TR_AR_BYTES, 0x008b);
1451 vmcs_write16(GUEST_LDTR_SELECTOR, 0);
1452 vmcs_writel(GUEST_LDTR_BASE, 0);
1453 vmcs_write32(GUEST_LDTR_LIMIT, 0xffff);
1454 vmcs_write32(GUEST_LDTR_AR_BYTES, 0x00082);
1456 vmcs_write32(GUEST_SYSENTER_CS, 0);
1457 vmcs_writel(GUEST_SYSENTER_ESP, 0);
1458 vmcs_writel(GUEST_SYSENTER_EIP, 0);
1460 vmcs_writel(GUEST_RFLAGS, 0x02);
1461 if (vmx->vcpu.vcpu_id == 0)
1462 vmcs_writel(GUEST_RIP, 0xfff0);
1464 vmcs_writel(GUEST_RIP, 0);
1465 vmcs_writel(GUEST_RSP, 0);
1467 //todo: dr0 = dr1 = dr2 = dr3 = 0; dr6 = 0xffff0ff0
1468 vmcs_writel(GUEST_DR7, 0x400);
1470 vmcs_writel(GUEST_GDTR_BASE, 0);
1471 vmcs_write32(GUEST_GDTR_LIMIT, 0xffff);
1473 vmcs_writel(GUEST_IDTR_BASE, 0);
1474 vmcs_write32(GUEST_IDTR_LIMIT, 0xffff);
1476 vmcs_write32(GUEST_ACTIVITY_STATE, 0);
1477 vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, 0);
1478 vmcs_write32(GUEST_PENDING_DBG_EXCEPTIONS, 0);
1481 vmcs_write64(IO_BITMAP_A, page_to_phys(vmx_io_bitmap_a));
1482 vmcs_write64(IO_BITMAP_B, page_to_phys(vmx_io_bitmap_b));
1486 vmcs_write64(VMCS_LINK_POINTER, -1ull); /* 22.3.1.5 */
1488 /* Special registers */
1489 vmcs_write64(GUEST_IA32_DEBUGCTL, 0);
1492 vmcs_write32(PIN_BASED_VM_EXEC_CONTROL,
1493 vmcs_config.pin_based_exec_ctrl);
1495 exec_control = vmcs_config.cpu_based_exec_ctrl;
1496 if (!vm_need_tpr_shadow(vmx->vcpu.kvm)) {
1497 exec_control &= ~CPU_BASED_TPR_SHADOW;
1498 #ifdef CONFIG_X86_64
1499 exec_control |= CPU_BASED_CR8_STORE_EXITING |
1500 CPU_BASED_CR8_LOAD_EXITING;
1503 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, exec_control);
1505 vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, 0);
1506 vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, 0);
1507 vmcs_write32(CR3_TARGET_COUNT, 0); /* 22.2.1 */
1509 vmcs_writel(HOST_CR0, read_cr0()); /* 22.2.3 */
1510 vmcs_writel(HOST_CR4, read_cr4()); /* 22.2.3, 22.2.5 */
1511 vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
1513 vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */
1514 vmcs_write16(HOST_DS_SELECTOR, __KERNEL_DS); /* 22.2.4 */
1515 vmcs_write16(HOST_ES_SELECTOR, __KERNEL_DS); /* 22.2.4 */
1516 vmcs_write16(HOST_FS_SELECTOR, read_fs()); /* 22.2.4 */
1517 vmcs_write16(HOST_GS_SELECTOR, read_gs()); /* 22.2.4 */
1518 vmcs_write16(HOST_SS_SELECTOR, __KERNEL_DS); /* 22.2.4 */
1519 #ifdef CONFIG_X86_64
1520 rdmsrl(MSR_FS_BASE, a);
1521 vmcs_writel(HOST_FS_BASE, a); /* 22.2.4 */
1522 rdmsrl(MSR_GS_BASE, a);
1523 vmcs_writel(HOST_GS_BASE, a); /* 22.2.4 */
1525 vmcs_writel(HOST_FS_BASE, 0); /* 22.2.4 */
1526 vmcs_writel(HOST_GS_BASE, 0); /* 22.2.4 */
1529 vmcs_write16(HOST_TR_SELECTOR, GDT_ENTRY_TSS*8); /* 22.2.4 */
1532 vmcs_writel(HOST_IDTR_BASE, dt.base); /* 22.2.4 */
1534 asm ("mov $.Lkvm_vmx_return, %0" : "=r"(kvm_vmx_return));
1535 vmcs_writel(HOST_RIP, kvm_vmx_return); /* 22.2.5 */
1536 vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
1537 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
1538 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0);
1540 rdmsr(MSR_IA32_SYSENTER_CS, host_sysenter_cs, junk);
1541 vmcs_write32(HOST_IA32_SYSENTER_CS, host_sysenter_cs);
1542 rdmsrl(MSR_IA32_SYSENTER_ESP, a);
1543 vmcs_writel(HOST_IA32_SYSENTER_ESP, a); /* 22.2.3 */
1544 rdmsrl(MSR_IA32_SYSENTER_EIP, a);
1545 vmcs_writel(HOST_IA32_SYSENTER_EIP, a); /* 22.2.3 */
1547 for (i = 0; i < NR_VMX_MSR; ++i) {
1548 u32 index = vmx_msr_index[i];
1549 u32 data_low, data_high;
1553 if (rdmsr_safe(index, &data_low, &data_high) < 0)
1555 if (wrmsr_safe(index, data_low, data_high) < 0)
1557 data = data_low | ((u64)data_high << 32);
1558 vmx->host_msrs[j].index = index;
1559 vmx->host_msrs[j].reserved = 0;
1560 vmx->host_msrs[j].data = data;
1561 vmx->guest_msrs[j] = vmx->host_msrs[j];
1567 vmcs_write32(VM_EXIT_CONTROLS, vmcs_config.vmexit_ctrl);
1569 /* 22.2.1, 20.8.1 */
1570 vmcs_write32(VM_ENTRY_CONTROLS, vmcs_config.vmentry_ctrl);
1572 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0); /* 22.2.1 */
1574 #ifdef CONFIG_X86_64
1575 vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, 0);
1576 if (vm_need_tpr_shadow(vmx->vcpu.kvm))
1577 vmcs_write64(VIRTUAL_APIC_PAGE_ADDR,
1578 page_to_phys(vmx->vcpu.apic->regs_page));
1579 vmcs_write32(TPR_THRESHOLD, 0);
1582 vmcs_writel(CR0_GUEST_HOST_MASK, ~0UL);
1583 vmcs_writel(CR4_GUEST_HOST_MASK, KVM_GUEST_CR4_MASK);
1585 vmx->vcpu.cr0 = 0x60000010;
1586 vmx_set_cr0(&vmx->vcpu, vmx->vcpu.cr0); // enter rmode
1587 vmx_set_cr4(&vmx->vcpu, 0);
1588 #ifdef CONFIG_X86_64
1589 vmx_set_efer(&vmx->vcpu, 0);
1591 vmx_fpu_activate(&vmx->vcpu);
1592 update_exception_bitmap(&vmx->vcpu);
1600 static void inject_rmode_irq(struct kvm_vcpu *vcpu, int irq)
1605 unsigned long flags;
1606 unsigned long ss_base = vmcs_readl(GUEST_SS_BASE);
1607 u16 sp = vmcs_readl(GUEST_RSP);
1608 u32 ss_limit = vmcs_read32(GUEST_SS_LIMIT);
1610 if (sp > ss_limit || sp < 6 ) {
1611 vcpu_printf(vcpu, "%s: #SS, rsp 0x%lx ss 0x%lx limit 0x%x\n",
1613 vmcs_readl(GUEST_RSP),
1614 vmcs_readl(GUEST_SS_BASE),
1615 vmcs_read32(GUEST_SS_LIMIT));
1619 if (emulator_read_std(irq * sizeof(ent), &ent, sizeof(ent), vcpu) !=
1621 vcpu_printf(vcpu, "%s: read guest err\n", __FUNCTION__);
1625 flags = vmcs_readl(GUEST_RFLAGS);
1626 cs = vmcs_readl(GUEST_CS_BASE) >> 4;
1627 ip = vmcs_readl(GUEST_RIP);
1630 if (emulator_write_emulated(ss_base + sp - 2, &flags, 2, vcpu) != X86EMUL_CONTINUE ||
1631 emulator_write_emulated(ss_base + sp - 4, &cs, 2, vcpu) != X86EMUL_CONTINUE ||
1632 emulator_write_emulated(ss_base + sp - 6, &ip, 2, vcpu) != X86EMUL_CONTINUE) {
1633 vcpu_printf(vcpu, "%s: write guest err\n", __FUNCTION__);
1637 vmcs_writel(GUEST_RFLAGS, flags &
1638 ~( X86_EFLAGS_IF | X86_EFLAGS_AC | X86_EFLAGS_TF));
1639 vmcs_write16(GUEST_CS_SELECTOR, ent[1]) ;
1640 vmcs_writel(GUEST_CS_BASE, ent[1] << 4);
1641 vmcs_writel(GUEST_RIP, ent[0]);
1642 vmcs_writel(GUEST_RSP, (vmcs_readl(GUEST_RSP) & ~0xffff) | (sp - 6));
1645 static void vmx_inject_irq(struct kvm_vcpu *vcpu, int irq)
1647 if (vcpu->rmode.active) {
1648 inject_rmode_irq(vcpu, irq);
1651 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
1652 irq | INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK);
1655 static void kvm_do_inject_irq(struct kvm_vcpu *vcpu)
1657 int word_index = __ffs(vcpu->irq_summary);
1658 int bit_index = __ffs(vcpu->irq_pending[word_index]);
1659 int irq = word_index * BITS_PER_LONG + bit_index;
1661 clear_bit(bit_index, &vcpu->irq_pending[word_index]);
1662 if (!vcpu->irq_pending[word_index])
1663 clear_bit(word_index, &vcpu->irq_summary);
1664 vmx_inject_irq(vcpu, irq);
1668 static void do_interrupt_requests(struct kvm_vcpu *vcpu,
1669 struct kvm_run *kvm_run)
1671 u32 cpu_based_vm_exec_control;
1673 vcpu->interrupt_window_open =
1674 ((vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) &&
1675 (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 3) == 0);
1677 if (vcpu->interrupt_window_open &&
1678 vcpu->irq_summary &&
1679 !(vmcs_read32(VM_ENTRY_INTR_INFO_FIELD) & INTR_INFO_VALID_MASK))
1681 * If interrupts enabled, and not blocked by sti or mov ss. Good.
1683 kvm_do_inject_irq(vcpu);
1685 cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
1686 if (!vcpu->interrupt_window_open &&
1687 (vcpu->irq_summary || kvm_run->request_interrupt_window))
1689 * Interrupts blocked. Wait for unblock.
1691 cpu_based_vm_exec_control |= CPU_BASED_VIRTUAL_INTR_PENDING;
1693 cpu_based_vm_exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING;
1694 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
1697 static void kvm_guest_debug_pre(struct kvm_vcpu *vcpu)
1699 struct kvm_guest_debug *dbg = &vcpu->guest_debug;
1701 set_debugreg(dbg->bp[0], 0);
1702 set_debugreg(dbg->bp[1], 1);
1703 set_debugreg(dbg->bp[2], 2);
1704 set_debugreg(dbg->bp[3], 3);
1706 if (dbg->singlestep) {
1707 unsigned long flags;
1709 flags = vmcs_readl(GUEST_RFLAGS);
1710 flags |= X86_EFLAGS_TF | X86_EFLAGS_RF;
1711 vmcs_writel(GUEST_RFLAGS, flags);
1715 static int handle_rmode_exception(struct kvm_vcpu *vcpu,
1716 int vec, u32 err_code)
1718 if (!vcpu->rmode.active)
1722 * Instruction with address size override prefix opcode 0x67
1723 * Cause the #SS fault with 0 error code in VM86 mode.
1725 if (((vec == GP_VECTOR) || (vec == SS_VECTOR)) && err_code == 0)
1726 if (emulate_instruction(vcpu, NULL, 0, 0) == EMULATE_DONE)
1731 static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1733 u32 intr_info, error_code;
1734 unsigned long cr2, rip;
1736 enum emulation_result er;
1739 vect_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
1740 intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
1742 if ((vect_info & VECTORING_INFO_VALID_MASK) &&
1743 !is_page_fault(intr_info)) {
1744 printk(KERN_ERR "%s: unexpected, vectoring info 0x%x "
1745 "intr info 0x%x\n", __FUNCTION__, vect_info, intr_info);
1748 if (!irqchip_in_kernel(vcpu->kvm) && is_external_interrupt(vect_info)) {
1749 int irq = vect_info & VECTORING_INFO_VECTOR_MASK;
1750 set_bit(irq, vcpu->irq_pending);
1751 set_bit(irq / BITS_PER_LONG, &vcpu->irq_summary);
1754 if ((intr_info & INTR_INFO_INTR_TYPE_MASK) == 0x200) { /* nmi */
1759 if (is_no_device(intr_info)) {
1760 vmx_fpu_activate(vcpu);
1765 rip = vmcs_readl(GUEST_RIP);
1766 if (intr_info & INTR_INFO_DELIEVER_CODE_MASK)
1767 error_code = vmcs_read32(VM_EXIT_INTR_ERROR_CODE);
1768 if (is_page_fault(intr_info)) {
1769 cr2 = vmcs_readl(EXIT_QUALIFICATION);
1771 mutex_lock(&vcpu->kvm->lock);
1772 r = kvm_mmu_page_fault(vcpu, cr2, error_code);
1774 mutex_unlock(&vcpu->kvm->lock);
1778 mutex_unlock(&vcpu->kvm->lock);
1782 er = emulate_instruction(vcpu, kvm_run, cr2, error_code);
1783 mutex_unlock(&vcpu->kvm->lock);
1788 case EMULATE_DO_MMIO:
1789 ++vcpu->stat.mmio_exits;
1792 vcpu_printf(vcpu, "%s: emulate fail\n", __FUNCTION__);
1799 if (vcpu->rmode.active &&
1800 handle_rmode_exception(vcpu, intr_info & INTR_INFO_VECTOR_MASK,
1802 if (vcpu->halt_request) {
1803 vcpu->halt_request = 0;
1804 return kvm_emulate_halt(vcpu);
1809 if ((intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK)) == (INTR_TYPE_EXCEPTION | 1)) {
1810 kvm_run->exit_reason = KVM_EXIT_DEBUG;
1813 kvm_run->exit_reason = KVM_EXIT_EXCEPTION;
1814 kvm_run->ex.exception = intr_info & INTR_INFO_VECTOR_MASK;
1815 kvm_run->ex.error_code = error_code;
1819 static int handle_external_interrupt(struct kvm_vcpu *vcpu,
1820 struct kvm_run *kvm_run)
1822 ++vcpu->stat.irq_exits;
1826 static int handle_triple_fault(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1828 kvm_run->exit_reason = KVM_EXIT_SHUTDOWN;
1832 static int handle_io(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1834 u64 exit_qualification;
1835 int size, down, in, string, rep;
1838 ++vcpu->stat.io_exits;
1839 exit_qualification = vmcs_read64(EXIT_QUALIFICATION);
1840 string = (exit_qualification & 16) != 0;
1843 if (emulate_instruction(vcpu, kvm_run, 0, 0) == EMULATE_DO_MMIO)
1848 size = (exit_qualification & 7) + 1;
1849 in = (exit_qualification & 8) != 0;
1850 down = (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_DF) != 0;
1851 rep = (exit_qualification & 32) != 0;
1852 port = exit_qualification >> 16;
1854 return kvm_emulate_pio(vcpu, kvm_run, in, size, port);
1858 vmx_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall)
1861 * Patch in the VMCALL instruction:
1863 hypercall[0] = 0x0f;
1864 hypercall[1] = 0x01;
1865 hypercall[2] = 0xc1;
1866 hypercall[3] = 0xc3;
1869 static int handle_cr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1871 u64 exit_qualification;
1875 exit_qualification = vmcs_read64(EXIT_QUALIFICATION);
1876 cr = exit_qualification & 15;
1877 reg = (exit_qualification >> 8) & 15;
1878 switch ((exit_qualification >> 4) & 3) {
1879 case 0: /* mov to cr */
1882 vcpu_load_rsp_rip(vcpu);
1883 set_cr0(vcpu, vcpu->regs[reg]);
1884 skip_emulated_instruction(vcpu);
1887 vcpu_load_rsp_rip(vcpu);
1888 set_cr3(vcpu, vcpu->regs[reg]);
1889 skip_emulated_instruction(vcpu);
1892 vcpu_load_rsp_rip(vcpu);
1893 set_cr4(vcpu, vcpu->regs[reg]);
1894 skip_emulated_instruction(vcpu);
1897 vcpu_load_rsp_rip(vcpu);
1898 set_cr8(vcpu, vcpu->regs[reg]);
1899 skip_emulated_instruction(vcpu);
1900 kvm_run->exit_reason = KVM_EXIT_SET_TPR;
1905 vcpu_load_rsp_rip(vcpu);
1906 vmx_fpu_deactivate(vcpu);
1907 vcpu->cr0 &= ~X86_CR0_TS;
1908 vmcs_writel(CR0_READ_SHADOW, vcpu->cr0);
1909 vmx_fpu_activate(vcpu);
1910 skip_emulated_instruction(vcpu);
1912 case 1: /*mov from cr*/
1915 vcpu_load_rsp_rip(vcpu);
1916 vcpu->regs[reg] = vcpu->cr3;
1917 vcpu_put_rsp_rip(vcpu);
1918 skip_emulated_instruction(vcpu);
1921 vcpu_load_rsp_rip(vcpu);
1922 vcpu->regs[reg] = get_cr8(vcpu);
1923 vcpu_put_rsp_rip(vcpu);
1924 skip_emulated_instruction(vcpu);
1929 lmsw(vcpu, (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f);
1931 skip_emulated_instruction(vcpu);
1936 kvm_run->exit_reason = 0;
1937 pr_unimpl(vcpu, "unhandled control register: op %d cr %d\n",
1938 (int)(exit_qualification >> 4) & 3, cr);
1942 static int handle_dr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1944 u64 exit_qualification;
1949 * FIXME: this code assumes the host is debugging the guest.
1950 * need to deal with guest debugging itself too.
1952 exit_qualification = vmcs_read64(EXIT_QUALIFICATION);
1953 dr = exit_qualification & 7;
1954 reg = (exit_qualification >> 8) & 15;
1955 vcpu_load_rsp_rip(vcpu);
1956 if (exit_qualification & 16) {
1968 vcpu->regs[reg] = val;
1972 vcpu_put_rsp_rip(vcpu);
1973 skip_emulated_instruction(vcpu);
1977 static int handle_cpuid(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1979 kvm_emulate_cpuid(vcpu);
1983 static int handle_rdmsr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1985 u32 ecx = vcpu->regs[VCPU_REGS_RCX];
1988 if (vmx_get_msr(vcpu, ecx, &data)) {
1989 vmx_inject_gp(vcpu, 0);
1993 /* FIXME: handling of bits 32:63 of rax, rdx */
1994 vcpu->regs[VCPU_REGS_RAX] = data & -1u;
1995 vcpu->regs[VCPU_REGS_RDX] = (data >> 32) & -1u;
1996 skip_emulated_instruction(vcpu);
2000 static int handle_wrmsr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2002 u32 ecx = vcpu->regs[VCPU_REGS_RCX];
2003 u64 data = (vcpu->regs[VCPU_REGS_RAX] & -1u)
2004 | ((u64)(vcpu->regs[VCPU_REGS_RDX] & -1u) << 32);
2006 if (vmx_set_msr(vcpu, ecx, data) != 0) {
2007 vmx_inject_gp(vcpu, 0);
2011 skip_emulated_instruction(vcpu);
2015 static int handle_tpr_below_threshold(struct kvm_vcpu *vcpu,
2016 struct kvm_run *kvm_run)
2021 static void post_kvm_run_save(struct kvm_vcpu *vcpu,
2022 struct kvm_run *kvm_run)
2024 kvm_run->if_flag = (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) != 0;
2025 kvm_run->cr8 = get_cr8(vcpu);
2026 kvm_run->apic_base = kvm_get_apic_base(vcpu);
2027 if (irqchip_in_kernel(vcpu->kvm))
2028 kvm_run->ready_for_interrupt_injection = 1;
2030 kvm_run->ready_for_interrupt_injection =
2031 (vcpu->interrupt_window_open &&
2032 vcpu->irq_summary == 0);
2035 static int handle_interrupt_window(struct kvm_vcpu *vcpu,
2036 struct kvm_run *kvm_run)
2038 u32 cpu_based_vm_exec_control;
2040 /* clear pending irq */
2041 cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
2042 cpu_based_vm_exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING;
2043 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
2045 * If the user space waits to inject interrupts, exit as soon as
2048 if (kvm_run->request_interrupt_window &&
2049 !vcpu->irq_summary) {
2050 kvm_run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
2051 ++vcpu->stat.irq_window_exits;
2057 static int handle_halt(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2059 skip_emulated_instruction(vcpu);
2060 return kvm_emulate_halt(vcpu);
2063 static int handle_vmcall(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2065 skip_emulated_instruction(vcpu);
2066 return kvm_hypercall(vcpu, kvm_run);
2070 * The exit handlers return 1 if the exit was handled fully and guest execution
2071 * may resume. Otherwise they set the kvm_run parameter to indicate what needs
2072 * to be done to userspace and return 0.
2074 static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu,
2075 struct kvm_run *kvm_run) = {
2076 [EXIT_REASON_EXCEPTION_NMI] = handle_exception,
2077 [EXIT_REASON_EXTERNAL_INTERRUPT] = handle_external_interrupt,
2078 [EXIT_REASON_TRIPLE_FAULT] = handle_triple_fault,
2079 [EXIT_REASON_IO_INSTRUCTION] = handle_io,
2080 [EXIT_REASON_CR_ACCESS] = handle_cr,
2081 [EXIT_REASON_DR_ACCESS] = handle_dr,
2082 [EXIT_REASON_CPUID] = handle_cpuid,
2083 [EXIT_REASON_MSR_READ] = handle_rdmsr,
2084 [EXIT_REASON_MSR_WRITE] = handle_wrmsr,
2085 [EXIT_REASON_PENDING_INTERRUPT] = handle_interrupt_window,
2086 [EXIT_REASON_HLT] = handle_halt,
2087 [EXIT_REASON_VMCALL] = handle_vmcall,
2088 [EXIT_REASON_TPR_BELOW_THRESHOLD] = handle_tpr_below_threshold
2091 static const int kvm_vmx_max_exit_handlers =
2092 ARRAY_SIZE(kvm_vmx_exit_handlers);
2095 * The guest has exited. See if we can fix it or if we need userspace
2098 static int kvm_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
2100 u32 vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
2101 u32 exit_reason = vmcs_read32(VM_EXIT_REASON);
2103 if ( (vectoring_info & VECTORING_INFO_VALID_MASK) &&
2104 exit_reason != EXIT_REASON_EXCEPTION_NMI )
2105 printk(KERN_WARNING "%s: unexpected, valid vectoring info and "
2106 "exit reason is 0x%x\n", __FUNCTION__, exit_reason);
2107 if (exit_reason < kvm_vmx_max_exit_handlers
2108 && kvm_vmx_exit_handlers[exit_reason])
2109 return kvm_vmx_exit_handlers[exit_reason](vcpu, kvm_run);
2111 kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
2112 kvm_run->hw.hardware_exit_reason = exit_reason;
2118 * Check if userspace requested an interrupt window, and that the
2119 * interrupt window is open.
2121 * No need to exit to userspace if we already have an interrupt queued.
2123 static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu,
2124 struct kvm_run *kvm_run)
2126 return (!vcpu->irq_summary &&
2127 kvm_run->request_interrupt_window &&
2128 vcpu->interrupt_window_open &&
2129 (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF));
2132 static void vmx_flush_tlb(struct kvm_vcpu *vcpu)
2136 static void update_tpr_threshold(struct kvm_vcpu *vcpu)
2140 if (!vm_need_tpr_shadow(vcpu->kvm))
2143 if (!kvm_lapic_enabled(vcpu) ||
2144 ((max_irr = kvm_lapic_find_highest_irr(vcpu)) == -1)) {
2145 vmcs_write32(TPR_THRESHOLD, 0);
2149 tpr = (kvm_lapic_get_cr8(vcpu) & 0x0f) << 4;
2150 vmcs_write32(TPR_THRESHOLD, (max_irr > tpr) ? tpr >> 4 : max_irr >> 4);
2153 static void enable_irq_window(struct kvm_vcpu *vcpu)
2155 u32 cpu_based_vm_exec_control;
2157 cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
2158 cpu_based_vm_exec_control |= CPU_BASED_VIRTUAL_INTR_PENDING;
2159 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
2162 static void vmx_intr_assist(struct kvm_vcpu *vcpu)
2164 u32 idtv_info_field, intr_info_field;
2165 int has_ext_irq, interrupt_window_open;
2168 kvm_inject_pending_timer_irqs(vcpu);
2169 update_tpr_threshold(vcpu);
2171 has_ext_irq = kvm_cpu_has_interrupt(vcpu);
2172 intr_info_field = vmcs_read32(VM_ENTRY_INTR_INFO_FIELD);
2173 idtv_info_field = vmcs_read32(IDT_VECTORING_INFO_FIELD);
2174 if (intr_info_field & INTR_INFO_VALID_MASK) {
2175 if (idtv_info_field & INTR_INFO_VALID_MASK) {
2176 /* TODO: fault when IDT_Vectoring */
2177 printk(KERN_ERR "Fault when IDT_Vectoring\n");
2180 enable_irq_window(vcpu);
2183 if (unlikely(idtv_info_field & INTR_INFO_VALID_MASK)) {
2184 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, idtv_info_field);
2185 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
2186 vmcs_read32(VM_EXIT_INSTRUCTION_LEN));
2188 if (unlikely(idtv_info_field & INTR_INFO_DELIEVER_CODE_MASK))
2189 vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE,
2190 vmcs_read32(IDT_VECTORING_ERROR_CODE));
2191 if (unlikely(has_ext_irq))
2192 enable_irq_window(vcpu);
2197 interrupt_window_open =
2198 ((vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) &&
2199 (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 3) == 0);
2200 if (interrupt_window_open) {
2201 vector = kvm_cpu_get_interrupt(vcpu);
2202 vmx_inject_irq(vcpu, vector);
2203 kvm_timer_intr_post(vcpu, vector);
2205 enable_irq_window(vcpu);
2208 static int vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2210 struct vcpu_vmx *vmx = to_vmx(vcpu);
2214 if (unlikely(vcpu->mp_state == VCPU_MP_STATE_SIPI_RECEIVED)) {
2215 printk("vcpu %d received sipi with vector # %x\n",
2216 vcpu->vcpu_id, vcpu->sipi_vector);
2217 kvm_lapic_reset(vcpu);
2218 vmx_vcpu_setup(vmx);
2219 vcpu->mp_state = VCPU_MP_STATE_RUNNABLE;
2223 if (vcpu->guest_debug.enabled)
2224 kvm_guest_debug_pre(vcpu);
2227 r = kvm_mmu_reload(vcpu);
2233 vmx_save_host_state(vmx);
2234 kvm_load_guest_fpu(vcpu);
2237 * Loading guest fpu may have cleared host cr0.ts
2239 vmcs_writel(HOST_CR0, read_cr0());
2241 local_irq_disable();
2243 if (signal_pending(current)) {
2247 kvm_run->exit_reason = KVM_EXIT_INTR;
2248 ++vcpu->stat.signal_exits;
2252 if (irqchip_in_kernel(vcpu->kvm))
2253 vmx_intr_assist(vcpu);
2254 else if (!vcpu->mmio_read_completed)
2255 do_interrupt_requests(vcpu, kvm_run);
2257 vcpu->guest_mode = 1;
2259 if (test_and_clear_bit(KVM_TLB_FLUSH, &vcpu->requests))
2260 vmx_flush_tlb(vcpu);
2263 /* Store host registers */
2264 #ifdef CONFIG_X86_64
2265 "push %%rax; push %%rbx; push %%rdx;"
2266 "push %%rsi; push %%rdi; push %%rbp;"
2267 "push %%r8; push %%r9; push %%r10; push %%r11;"
2268 "push %%r12; push %%r13; push %%r14; push %%r15;"
2270 ASM_VMX_VMWRITE_RSP_RDX "\n\t"
2272 "pusha; push %%ecx \n\t"
2273 ASM_VMX_VMWRITE_RSP_RDX "\n\t"
2275 /* Check if vmlaunch of vmresume is needed */
2277 /* Load guest registers. Don't clobber flags. */
2278 #ifdef CONFIG_X86_64
2279 "mov %c[cr2](%3), %%rax \n\t"
2280 "mov %%rax, %%cr2 \n\t"
2281 "mov %c[rax](%3), %%rax \n\t"
2282 "mov %c[rbx](%3), %%rbx \n\t"
2283 "mov %c[rdx](%3), %%rdx \n\t"
2284 "mov %c[rsi](%3), %%rsi \n\t"
2285 "mov %c[rdi](%3), %%rdi \n\t"
2286 "mov %c[rbp](%3), %%rbp \n\t"
2287 "mov %c[r8](%3), %%r8 \n\t"
2288 "mov %c[r9](%3), %%r9 \n\t"
2289 "mov %c[r10](%3), %%r10 \n\t"
2290 "mov %c[r11](%3), %%r11 \n\t"
2291 "mov %c[r12](%3), %%r12 \n\t"
2292 "mov %c[r13](%3), %%r13 \n\t"
2293 "mov %c[r14](%3), %%r14 \n\t"
2294 "mov %c[r15](%3), %%r15 \n\t"
2295 "mov %c[rcx](%3), %%rcx \n\t" /* kills %3 (rcx) */
2297 "mov %c[cr2](%3), %%eax \n\t"
2298 "mov %%eax, %%cr2 \n\t"
2299 "mov %c[rax](%3), %%eax \n\t"
2300 "mov %c[rbx](%3), %%ebx \n\t"
2301 "mov %c[rdx](%3), %%edx \n\t"
2302 "mov %c[rsi](%3), %%esi \n\t"
2303 "mov %c[rdi](%3), %%edi \n\t"
2304 "mov %c[rbp](%3), %%ebp \n\t"
2305 "mov %c[rcx](%3), %%ecx \n\t" /* kills %3 (ecx) */
2307 /* Enter guest mode */
2308 "jne .Llaunched \n\t"
2309 ASM_VMX_VMLAUNCH "\n\t"
2310 "jmp .Lkvm_vmx_return \n\t"
2311 ".Llaunched: " ASM_VMX_VMRESUME "\n\t"
2312 ".Lkvm_vmx_return: "
2313 /* Save guest registers, load host registers, keep flags */
2314 #ifdef CONFIG_X86_64
2315 "xchg %3, (%%rsp) \n\t"
2316 "mov %%rax, %c[rax](%3) \n\t"
2317 "mov %%rbx, %c[rbx](%3) \n\t"
2318 "pushq (%%rsp); popq %c[rcx](%3) \n\t"
2319 "mov %%rdx, %c[rdx](%3) \n\t"
2320 "mov %%rsi, %c[rsi](%3) \n\t"
2321 "mov %%rdi, %c[rdi](%3) \n\t"
2322 "mov %%rbp, %c[rbp](%3) \n\t"
2323 "mov %%r8, %c[r8](%3) \n\t"
2324 "mov %%r9, %c[r9](%3) \n\t"
2325 "mov %%r10, %c[r10](%3) \n\t"
2326 "mov %%r11, %c[r11](%3) \n\t"
2327 "mov %%r12, %c[r12](%3) \n\t"
2328 "mov %%r13, %c[r13](%3) \n\t"
2329 "mov %%r14, %c[r14](%3) \n\t"
2330 "mov %%r15, %c[r15](%3) \n\t"
2331 "mov %%cr2, %%rax \n\t"
2332 "mov %%rax, %c[cr2](%3) \n\t"
2333 "mov (%%rsp), %3 \n\t"
2335 "pop %%rcx; pop %%r15; pop %%r14; pop %%r13; pop %%r12;"
2336 "pop %%r11; pop %%r10; pop %%r9; pop %%r8;"
2337 "pop %%rbp; pop %%rdi; pop %%rsi;"
2338 "pop %%rdx; pop %%rbx; pop %%rax \n\t"
2340 "xchg %3, (%%esp) \n\t"
2341 "mov %%eax, %c[rax](%3) \n\t"
2342 "mov %%ebx, %c[rbx](%3) \n\t"
2343 "pushl (%%esp); popl %c[rcx](%3) \n\t"
2344 "mov %%edx, %c[rdx](%3) \n\t"
2345 "mov %%esi, %c[rsi](%3) \n\t"
2346 "mov %%edi, %c[rdi](%3) \n\t"
2347 "mov %%ebp, %c[rbp](%3) \n\t"
2348 "mov %%cr2, %%eax \n\t"
2349 "mov %%eax, %c[cr2](%3) \n\t"
2350 "mov (%%esp), %3 \n\t"
2352 "pop %%ecx; popa \n\t"
2356 : "r"(vmx->launched), "d"((unsigned long)HOST_RSP),
2358 [rax]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RAX])),
2359 [rbx]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RBX])),
2360 [rcx]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RCX])),
2361 [rdx]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RDX])),
2362 [rsi]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RSI])),
2363 [rdi]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RDI])),
2364 [rbp]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RBP])),
2365 #ifdef CONFIG_X86_64
2366 [r8 ]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R8 ])),
2367 [r9 ]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R9 ])),
2368 [r10]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R10])),
2369 [r11]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R11])),
2370 [r12]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R12])),
2371 [r13]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R13])),
2372 [r14]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R14])),
2373 [r15]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R15])),
2375 [cr2]"i"(offsetof(struct kvm_vcpu, cr2))
2378 vcpu->guest_mode = 0;
2383 vcpu->interrupt_window_open = (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 3) == 0;
2385 asm ("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
2390 if (unlikely(fail)) {
2391 kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
2392 kvm_run->fail_entry.hardware_entry_failure_reason
2393 = vmcs_read32(VM_INSTRUCTION_ERROR);
2398 * Profile KVM exit RIPs:
2400 if (unlikely(prof_on == KVM_PROFILING))
2401 profile_hit(KVM_PROFILING, (void *)vmcs_readl(GUEST_RIP));
2403 r = kvm_handle_exit(kvm_run, vcpu);
2405 if (dm_request_for_irq_injection(vcpu, kvm_run)) {
2407 kvm_run->exit_reason = KVM_EXIT_INTR;
2408 ++vcpu->stat.request_irq_exits;
2411 if (!need_resched()) {
2412 ++vcpu->stat.light_exits;
2423 post_kvm_run_save(vcpu, kvm_run);
2427 static void vmx_inject_page_fault(struct kvm_vcpu *vcpu,
2431 u32 vect_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
2433 ++vcpu->stat.pf_guest;
2435 if (is_page_fault(vect_info)) {
2436 printk(KERN_DEBUG "inject_page_fault: "
2437 "double fault 0x%lx @ 0x%lx\n",
2438 addr, vmcs_readl(GUEST_RIP));
2439 vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, 0);
2440 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
2442 INTR_TYPE_EXCEPTION |
2443 INTR_INFO_DELIEVER_CODE_MASK |
2444 INTR_INFO_VALID_MASK);
2448 vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, err_code);
2449 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
2451 INTR_TYPE_EXCEPTION |
2452 INTR_INFO_DELIEVER_CODE_MASK |
2453 INTR_INFO_VALID_MASK);
2457 static void vmx_free_vmcs(struct kvm_vcpu *vcpu)
2459 struct vcpu_vmx *vmx = to_vmx(vcpu);
2462 on_each_cpu(__vcpu_clear, vmx, 0, 1);
2463 free_vmcs(vmx->vmcs);
2468 static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
2470 struct vcpu_vmx *vmx = to_vmx(vcpu);
2472 vmx_free_vmcs(vcpu);
2473 kfree(vmx->host_msrs);
2474 kfree(vmx->guest_msrs);
2475 kvm_vcpu_uninit(vcpu);
2476 kmem_cache_free(kvm_vcpu_cache, vmx);
2479 static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
2482 struct vcpu_vmx *vmx = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
2486 return ERR_PTR(-ENOMEM);
2488 err = kvm_vcpu_init(&vmx->vcpu, kvm, id);
2492 if (irqchip_in_kernel(kvm)) {
2493 err = kvm_create_lapic(&vmx->vcpu);
2498 vmx->guest_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL);
2499 if (!vmx->guest_msrs) {
2504 vmx->host_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL);
2505 if (!vmx->host_msrs)
2506 goto free_guest_msrs;
2508 vmx->vmcs = alloc_vmcs();
2512 vmcs_clear(vmx->vmcs);
2515 vmx_vcpu_load(&vmx->vcpu, cpu);
2516 err = vmx_vcpu_setup(vmx);
2517 vmx_vcpu_put(&vmx->vcpu);
2525 free_vmcs(vmx->vmcs);
2527 kfree(vmx->host_msrs);
2529 kfree(vmx->guest_msrs);
2531 kvm_vcpu_uninit(&vmx->vcpu);
2533 kmem_cache_free(kvm_vcpu_cache, vmx);
2534 return ERR_PTR(err);
2537 static void __init vmx_check_processor_compat(void *rtn)
2539 struct vmcs_config vmcs_conf;
2542 if (setup_vmcs_config(&vmcs_conf) < 0)
2544 if (memcmp(&vmcs_config, &vmcs_conf, sizeof(struct vmcs_config)) != 0) {
2545 printk(KERN_ERR "kvm: CPU %d feature inconsistency!\n",
2546 smp_processor_id());
2551 static struct kvm_x86_ops vmx_x86_ops = {
2552 .cpu_has_kvm_support = cpu_has_kvm_support,
2553 .disabled_by_bios = vmx_disabled_by_bios,
2554 .hardware_setup = hardware_setup,
2555 .hardware_unsetup = hardware_unsetup,
2556 .check_processor_compatibility = vmx_check_processor_compat,
2557 .hardware_enable = hardware_enable,
2558 .hardware_disable = hardware_disable,
2560 .vcpu_create = vmx_create_vcpu,
2561 .vcpu_free = vmx_free_vcpu,
2563 .vcpu_load = vmx_vcpu_load,
2564 .vcpu_put = vmx_vcpu_put,
2565 .vcpu_decache = vmx_vcpu_decache,
2567 .set_guest_debug = set_guest_debug,
2568 .get_msr = vmx_get_msr,
2569 .set_msr = vmx_set_msr,
2570 .get_segment_base = vmx_get_segment_base,
2571 .get_segment = vmx_get_segment,
2572 .set_segment = vmx_set_segment,
2573 .get_cs_db_l_bits = vmx_get_cs_db_l_bits,
2574 .decache_cr4_guest_bits = vmx_decache_cr4_guest_bits,
2575 .set_cr0 = vmx_set_cr0,
2576 .set_cr3 = vmx_set_cr3,
2577 .set_cr4 = vmx_set_cr4,
2578 #ifdef CONFIG_X86_64
2579 .set_efer = vmx_set_efer,
2581 .get_idt = vmx_get_idt,
2582 .set_idt = vmx_set_idt,
2583 .get_gdt = vmx_get_gdt,
2584 .set_gdt = vmx_set_gdt,
2585 .cache_regs = vcpu_load_rsp_rip,
2586 .decache_regs = vcpu_put_rsp_rip,
2587 .get_rflags = vmx_get_rflags,
2588 .set_rflags = vmx_set_rflags,
2590 .tlb_flush = vmx_flush_tlb,
2591 .inject_page_fault = vmx_inject_page_fault,
2593 .inject_gp = vmx_inject_gp,
2595 .run = vmx_vcpu_run,
2596 .skip_emulated_instruction = skip_emulated_instruction,
2597 .patch_hypercall = vmx_patch_hypercall,
2598 .get_irq = vmx_get_irq,
2599 .set_irq = vmx_inject_irq,
2602 static int __init vmx_init(void)
2607 vmx_io_bitmap_a = alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
2608 if (!vmx_io_bitmap_a)
2611 vmx_io_bitmap_b = alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
2612 if (!vmx_io_bitmap_b) {
2618 * Allow direct access to the PC debug port (it is often used for I/O
2619 * delays, but the vmexits simply slow things down).
2621 iova = kmap(vmx_io_bitmap_a);
2622 memset(iova, 0xff, PAGE_SIZE);
2623 clear_bit(0x80, iova);
2624 kunmap(vmx_io_bitmap_a);
2626 iova = kmap(vmx_io_bitmap_b);
2627 memset(iova, 0xff, PAGE_SIZE);
2628 kunmap(vmx_io_bitmap_b);
2630 r = kvm_init_x86(&vmx_x86_ops, sizeof(struct vcpu_vmx), THIS_MODULE);
2637 __free_page(vmx_io_bitmap_b);
2639 __free_page(vmx_io_bitmap_a);
2643 static void __exit vmx_exit(void)
2645 __free_page(vmx_io_bitmap_b);
2646 __free_page(vmx_io_bitmap_a);
2651 module_init(vmx_init)
2652 module_exit(vmx_exit)