KVM: Move vmx_vcpu_reset() out of vmx_vcpu_setup()
[pandora-kernel.git] / drivers / kvm / vmx.c
1 /*
2  * Kernel-based Virtual Machine driver for Linux
3  *
4  * This module enables machines with Intel VT-x extensions to run virtual
5  * machines without emulation or binary translation.
6  *
7  * Copyright (C) 2006 Qumranet, Inc.
8  *
9  * Authors:
10  *   Avi Kivity   <avi@qumranet.com>
11  *   Yaniv Kamay  <yaniv@qumranet.com>
12  *
13  * This work is licensed under the terms of the GNU GPL, version 2.  See
14  * the COPYING file in the top-level directory.
15  *
16  */
17
18 #include "kvm.h"
19 #include "x86.h"
20 #include "x86_emulate.h"
21 #include "irq.h"
22 #include "vmx.h"
23 #include "segment_descriptor.h"
24
25 #include <linux/module.h>
26 #include <linux/kernel.h>
27 #include <linux/mm.h>
28 #include <linux/highmem.h>
29 #include <linux/sched.h>
30 #include <linux/moduleparam.h>
31
32 #include <asm/io.h>
33 #include <asm/desc.h>
34
35 MODULE_AUTHOR("Qumranet");
36 MODULE_LICENSE("GPL");
37
38 static int bypass_guest_pf = 1;
39 module_param(bypass_guest_pf, bool, 0);
40
41 struct vmcs {
42         u32 revision_id;
43         u32 abort;
44         char data[0];
45 };
46
47 struct vcpu_vmx {
48         struct kvm_vcpu       vcpu;
49         int                   launched;
50         u8                    fail;
51         struct kvm_msr_entry *guest_msrs;
52         struct kvm_msr_entry *host_msrs;
53         int                   nmsrs;
54         int                   save_nmsrs;
55         int                   msr_offset_efer;
56 #ifdef CONFIG_X86_64
57         int                   msr_offset_kernel_gs_base;
58 #endif
59         struct vmcs          *vmcs;
60         struct {
61                 int           loaded;
62                 u16           fs_sel, gs_sel, ldt_sel;
63                 int           gs_ldt_reload_needed;
64                 int           fs_reload_needed;
65                 int           guest_efer_loaded;
66         } host_state;
67
68 };
69
70 static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu)
71 {
72         return container_of(vcpu, struct vcpu_vmx, vcpu);
73 }
74
75 static int init_rmode_tss(struct kvm *kvm);
76
77 static DEFINE_PER_CPU(struct vmcs *, vmxarea);
78 static DEFINE_PER_CPU(struct vmcs *, current_vmcs);
79
80 static struct page *vmx_io_bitmap_a;
81 static struct page *vmx_io_bitmap_b;
82
83 static struct vmcs_config {
84         int size;
85         int order;
86         u32 revision_id;
87         u32 pin_based_exec_ctrl;
88         u32 cpu_based_exec_ctrl;
89         u32 vmexit_ctrl;
90         u32 vmentry_ctrl;
91 } vmcs_config;
92
93 #define VMX_SEGMENT_FIELD(seg)                                  \
94         [VCPU_SREG_##seg] = {                                   \
95                 .selector = GUEST_##seg##_SELECTOR,             \
96                 .base = GUEST_##seg##_BASE,                     \
97                 .limit = GUEST_##seg##_LIMIT,                   \
98                 .ar_bytes = GUEST_##seg##_AR_BYTES,             \
99         }
100
101 static struct kvm_vmx_segment_field {
102         unsigned selector;
103         unsigned base;
104         unsigned limit;
105         unsigned ar_bytes;
106 } kvm_vmx_segment_fields[] = {
107         VMX_SEGMENT_FIELD(CS),
108         VMX_SEGMENT_FIELD(DS),
109         VMX_SEGMENT_FIELD(ES),
110         VMX_SEGMENT_FIELD(FS),
111         VMX_SEGMENT_FIELD(GS),
112         VMX_SEGMENT_FIELD(SS),
113         VMX_SEGMENT_FIELD(TR),
114         VMX_SEGMENT_FIELD(LDTR),
115 };
116
117 /*
118  * Keep MSR_K6_STAR at the end, as setup_msrs() will try to optimize it
119  * away by decrementing the array size.
120  */
121 static const u32 vmx_msr_index[] = {
122 #ifdef CONFIG_X86_64
123         MSR_SYSCALL_MASK, MSR_LSTAR, MSR_CSTAR, MSR_KERNEL_GS_BASE,
124 #endif
125         MSR_EFER, MSR_K6_STAR,
126 };
127 #define NR_VMX_MSR ARRAY_SIZE(vmx_msr_index)
128
129 static void load_msrs(struct kvm_msr_entry *e, int n)
130 {
131         int i;
132
133         for (i = 0; i < n; ++i)
134                 wrmsrl(e[i].index, e[i].data);
135 }
136
137 static void save_msrs(struct kvm_msr_entry *e, int n)
138 {
139         int i;
140
141         for (i = 0; i < n; ++i)
142                 rdmsrl(e[i].index, e[i].data);
143 }
144
145 static inline int is_page_fault(u32 intr_info)
146 {
147         return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
148                              INTR_INFO_VALID_MASK)) ==
149                 (INTR_TYPE_EXCEPTION | PF_VECTOR | INTR_INFO_VALID_MASK);
150 }
151
152 static inline int is_no_device(u32 intr_info)
153 {
154         return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
155                              INTR_INFO_VALID_MASK)) ==
156                 (INTR_TYPE_EXCEPTION | NM_VECTOR | INTR_INFO_VALID_MASK);
157 }
158
159 static inline int is_invalid_opcode(u32 intr_info)
160 {
161         return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
162                              INTR_INFO_VALID_MASK)) ==
163                 (INTR_TYPE_EXCEPTION | UD_VECTOR | INTR_INFO_VALID_MASK);
164 }
165
166 static inline int is_external_interrupt(u32 intr_info)
167 {
168         return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK))
169                 == (INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK);
170 }
171
172 static inline int cpu_has_vmx_tpr_shadow(void)
173 {
174         return (vmcs_config.cpu_based_exec_ctrl & CPU_BASED_TPR_SHADOW);
175 }
176
177 static inline int vm_need_tpr_shadow(struct kvm *kvm)
178 {
179         return ((cpu_has_vmx_tpr_shadow()) && (irqchip_in_kernel(kvm)));
180 }
181
182 static int __find_msr_index(struct vcpu_vmx *vmx, u32 msr)
183 {
184         int i;
185
186         for (i = 0; i < vmx->nmsrs; ++i)
187                 if (vmx->guest_msrs[i].index == msr)
188                         return i;
189         return -1;
190 }
191
192 static struct kvm_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr)
193 {
194         int i;
195
196         i = __find_msr_index(vmx, msr);
197         if (i >= 0)
198                 return &vmx->guest_msrs[i];
199         return NULL;
200 }
201
202 static void vmcs_clear(struct vmcs *vmcs)
203 {
204         u64 phys_addr = __pa(vmcs);
205         u8 error;
206
207         asm volatile (ASM_VMX_VMCLEAR_RAX "; setna %0"
208                       : "=g"(error) : "a"(&phys_addr), "m"(phys_addr)
209                       : "cc", "memory");
210         if (error)
211                 printk(KERN_ERR "kvm: vmclear fail: %p/%llx\n",
212                        vmcs, phys_addr);
213 }
214
215 static void __vcpu_clear(void *arg)
216 {
217         struct vcpu_vmx *vmx = arg;
218         int cpu = raw_smp_processor_id();
219
220         if (vmx->vcpu.cpu == cpu)
221                 vmcs_clear(vmx->vmcs);
222         if (per_cpu(current_vmcs, cpu) == vmx->vmcs)
223                 per_cpu(current_vmcs, cpu) = NULL;
224         rdtscll(vmx->vcpu.host_tsc);
225 }
226
227 static void vcpu_clear(struct vcpu_vmx *vmx)
228 {
229         if (vmx->vcpu.cpu == -1)
230                 return;
231         smp_call_function_single(vmx->vcpu.cpu, __vcpu_clear, vmx, 0, 1);
232         vmx->launched = 0;
233 }
234
235 static unsigned long vmcs_readl(unsigned long field)
236 {
237         unsigned long value;
238
239         asm volatile (ASM_VMX_VMREAD_RDX_RAX
240                       : "=a"(value) : "d"(field) : "cc");
241         return value;
242 }
243
244 static u16 vmcs_read16(unsigned long field)
245 {
246         return vmcs_readl(field);
247 }
248
249 static u32 vmcs_read32(unsigned long field)
250 {
251         return vmcs_readl(field);
252 }
253
254 static u64 vmcs_read64(unsigned long field)
255 {
256 #ifdef CONFIG_X86_64
257         return vmcs_readl(field);
258 #else
259         return vmcs_readl(field) | ((u64)vmcs_readl(field+1) << 32);
260 #endif
261 }
262
263 static noinline void vmwrite_error(unsigned long field, unsigned long value)
264 {
265         printk(KERN_ERR "vmwrite error: reg %lx value %lx (err %d)\n",
266                field, value, vmcs_read32(VM_INSTRUCTION_ERROR));
267         dump_stack();
268 }
269
270 static void vmcs_writel(unsigned long field, unsigned long value)
271 {
272         u8 error;
273
274         asm volatile (ASM_VMX_VMWRITE_RAX_RDX "; setna %0"
275                        : "=q"(error) : "a"(value), "d"(field) : "cc");
276         if (unlikely(error))
277                 vmwrite_error(field, value);
278 }
279
280 static void vmcs_write16(unsigned long field, u16 value)
281 {
282         vmcs_writel(field, value);
283 }
284
285 static void vmcs_write32(unsigned long field, u32 value)
286 {
287         vmcs_writel(field, value);
288 }
289
290 static void vmcs_write64(unsigned long field, u64 value)
291 {
292 #ifdef CONFIG_X86_64
293         vmcs_writel(field, value);
294 #else
295         vmcs_writel(field, value);
296         asm volatile ("");
297         vmcs_writel(field+1, value >> 32);
298 #endif
299 }
300
301 static void vmcs_clear_bits(unsigned long field, u32 mask)
302 {
303         vmcs_writel(field, vmcs_readl(field) & ~mask);
304 }
305
306 static void vmcs_set_bits(unsigned long field, u32 mask)
307 {
308         vmcs_writel(field, vmcs_readl(field) | mask);
309 }
310
311 static void update_exception_bitmap(struct kvm_vcpu *vcpu)
312 {
313         u32 eb;
314
315         eb = (1u << PF_VECTOR) | (1u << UD_VECTOR);
316         if (!vcpu->fpu_active)
317                 eb |= 1u << NM_VECTOR;
318         if (vcpu->guest_debug.enabled)
319                 eb |= 1u << 1;
320         if (vcpu->rmode.active)
321                 eb = ~0;
322         vmcs_write32(EXCEPTION_BITMAP, eb);
323 }
324
325 static void reload_tss(void)
326 {
327 #ifndef CONFIG_X86_64
328
329         /*
330          * VT restores TR but not its size.  Useless.
331          */
332         struct descriptor_table gdt;
333         struct segment_descriptor *descs;
334
335         get_gdt(&gdt);
336         descs = (void *)gdt.base;
337         descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
338         load_TR_desc();
339 #endif
340 }
341
342 static void load_transition_efer(struct vcpu_vmx *vmx)
343 {
344         int efer_offset = vmx->msr_offset_efer;
345         u64 host_efer = vmx->host_msrs[efer_offset].data;
346         u64 guest_efer = vmx->guest_msrs[efer_offset].data;
347         u64 ignore_bits;
348
349         if (efer_offset < 0)
350                 return;
351         /*
352          * NX is emulated; LMA and LME handled by hardware; SCE meaninless
353          * outside long mode
354          */
355         ignore_bits = EFER_NX | EFER_SCE;
356 #ifdef CONFIG_X86_64
357         ignore_bits |= EFER_LMA | EFER_LME;
358         /* SCE is meaningful only in long mode on Intel */
359         if (guest_efer & EFER_LMA)
360                 ignore_bits &= ~(u64)EFER_SCE;
361 #endif
362         if ((guest_efer & ~ignore_bits) == (host_efer & ~ignore_bits))
363                 return;
364
365         vmx->host_state.guest_efer_loaded = 1;
366         guest_efer &= ~ignore_bits;
367         guest_efer |= host_efer & ignore_bits;
368         wrmsrl(MSR_EFER, guest_efer);
369         vmx->vcpu.stat.efer_reload++;
370 }
371
372 static void reload_host_efer(struct vcpu_vmx *vmx)
373 {
374         if (vmx->host_state.guest_efer_loaded) {
375                 vmx->host_state.guest_efer_loaded = 0;
376                 load_msrs(vmx->host_msrs + vmx->msr_offset_efer, 1);
377         }
378 }
379
380 static void vmx_save_host_state(struct kvm_vcpu *vcpu)
381 {
382         struct vcpu_vmx *vmx = to_vmx(vcpu);
383
384         if (vmx->host_state.loaded)
385                 return;
386
387         vmx->host_state.loaded = 1;
388         /*
389          * Set host fs and gs selectors.  Unfortunately, 22.2.3 does not
390          * allow segment selectors with cpl > 0 or ti == 1.
391          */
392         vmx->host_state.ldt_sel = read_ldt();
393         vmx->host_state.gs_ldt_reload_needed = vmx->host_state.ldt_sel;
394         vmx->host_state.fs_sel = read_fs();
395         if (!(vmx->host_state.fs_sel & 7)) {
396                 vmcs_write16(HOST_FS_SELECTOR, vmx->host_state.fs_sel);
397                 vmx->host_state.fs_reload_needed = 0;
398         } else {
399                 vmcs_write16(HOST_FS_SELECTOR, 0);
400                 vmx->host_state.fs_reload_needed = 1;
401         }
402         vmx->host_state.gs_sel = read_gs();
403         if (!(vmx->host_state.gs_sel & 7))
404                 vmcs_write16(HOST_GS_SELECTOR, vmx->host_state.gs_sel);
405         else {
406                 vmcs_write16(HOST_GS_SELECTOR, 0);
407                 vmx->host_state.gs_ldt_reload_needed = 1;
408         }
409
410 #ifdef CONFIG_X86_64
411         vmcs_writel(HOST_FS_BASE, read_msr(MSR_FS_BASE));
412         vmcs_writel(HOST_GS_BASE, read_msr(MSR_GS_BASE));
413 #else
414         vmcs_writel(HOST_FS_BASE, segment_base(vmx->host_state.fs_sel));
415         vmcs_writel(HOST_GS_BASE, segment_base(vmx->host_state.gs_sel));
416 #endif
417
418 #ifdef CONFIG_X86_64
419         if (is_long_mode(&vmx->vcpu))
420                 save_msrs(vmx->host_msrs +
421                           vmx->msr_offset_kernel_gs_base, 1);
422
423 #endif
424         load_msrs(vmx->guest_msrs, vmx->save_nmsrs);
425         load_transition_efer(vmx);
426 }
427
428 static void vmx_load_host_state(struct vcpu_vmx *vmx)
429 {
430         unsigned long flags;
431
432         if (!vmx->host_state.loaded)
433                 return;
434
435         vmx->host_state.loaded = 0;
436         if (vmx->host_state.fs_reload_needed)
437                 load_fs(vmx->host_state.fs_sel);
438         if (vmx->host_state.gs_ldt_reload_needed) {
439                 load_ldt(vmx->host_state.ldt_sel);
440                 /*
441                  * If we have to reload gs, we must take care to
442                  * preserve our gs base.
443                  */
444                 local_irq_save(flags);
445                 load_gs(vmx->host_state.gs_sel);
446 #ifdef CONFIG_X86_64
447                 wrmsrl(MSR_GS_BASE, vmcs_readl(HOST_GS_BASE));
448 #endif
449                 local_irq_restore(flags);
450         }
451         reload_tss();
452         save_msrs(vmx->guest_msrs, vmx->save_nmsrs);
453         load_msrs(vmx->host_msrs, vmx->save_nmsrs);
454         reload_host_efer(vmx);
455 }
456
457 /*
458  * Switches to specified vcpu, until a matching vcpu_put(), but assumes
459  * vcpu mutex is already taken.
460  */
461 static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
462 {
463         struct vcpu_vmx *vmx = to_vmx(vcpu);
464         u64 phys_addr = __pa(vmx->vmcs);
465         u64 tsc_this, delta;
466
467         if (vcpu->cpu != cpu) {
468                 vcpu_clear(vmx);
469                 kvm_migrate_apic_timer(vcpu);
470         }
471
472         if (per_cpu(current_vmcs, cpu) != vmx->vmcs) {
473                 u8 error;
474
475                 per_cpu(current_vmcs, cpu) = vmx->vmcs;
476                 asm volatile (ASM_VMX_VMPTRLD_RAX "; setna %0"
477                               : "=g"(error) : "a"(&phys_addr), "m"(phys_addr)
478                               : "cc");
479                 if (error)
480                         printk(KERN_ERR "kvm: vmptrld %p/%llx fail\n",
481                                vmx->vmcs, phys_addr);
482         }
483
484         if (vcpu->cpu != cpu) {
485                 struct descriptor_table dt;
486                 unsigned long sysenter_esp;
487
488                 vcpu->cpu = cpu;
489                 /*
490                  * Linux uses per-cpu TSS and GDT, so set these when switching
491                  * processors.
492                  */
493                 vmcs_writel(HOST_TR_BASE, read_tr_base()); /* 22.2.4 */
494                 get_gdt(&dt);
495                 vmcs_writel(HOST_GDTR_BASE, dt.base);   /* 22.2.4 */
496
497                 rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
498                 vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
499
500                 /*
501                  * Make sure the time stamp counter is monotonous.
502                  */
503                 rdtscll(tsc_this);
504                 delta = vcpu->host_tsc - tsc_this;
505                 vmcs_write64(TSC_OFFSET, vmcs_read64(TSC_OFFSET) + delta);
506         }
507 }
508
509 static void vmx_vcpu_put(struct kvm_vcpu *vcpu)
510 {
511         vmx_load_host_state(to_vmx(vcpu));
512         kvm_put_guest_fpu(vcpu);
513 }
514
515 static void vmx_fpu_activate(struct kvm_vcpu *vcpu)
516 {
517         if (vcpu->fpu_active)
518                 return;
519         vcpu->fpu_active = 1;
520         vmcs_clear_bits(GUEST_CR0, X86_CR0_TS);
521         if (vcpu->cr0 & X86_CR0_TS)
522                 vmcs_set_bits(GUEST_CR0, X86_CR0_TS);
523         update_exception_bitmap(vcpu);
524 }
525
526 static void vmx_fpu_deactivate(struct kvm_vcpu *vcpu)
527 {
528         if (!vcpu->fpu_active)
529                 return;
530         vcpu->fpu_active = 0;
531         vmcs_set_bits(GUEST_CR0, X86_CR0_TS);
532         update_exception_bitmap(vcpu);
533 }
534
535 static void vmx_vcpu_decache(struct kvm_vcpu *vcpu)
536 {
537         vcpu_clear(to_vmx(vcpu));
538 }
539
540 static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu)
541 {
542         return vmcs_readl(GUEST_RFLAGS);
543 }
544
545 static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
546 {
547         if (vcpu->rmode.active)
548                 rflags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM;
549         vmcs_writel(GUEST_RFLAGS, rflags);
550 }
551
552 static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
553 {
554         unsigned long rip;
555         u32 interruptibility;
556
557         rip = vmcs_readl(GUEST_RIP);
558         rip += vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
559         vmcs_writel(GUEST_RIP, rip);
560
561         /*
562          * We emulated an instruction, so temporary interrupt blocking
563          * should be removed, if set.
564          */
565         interruptibility = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
566         if (interruptibility & 3)
567                 vmcs_write32(GUEST_INTERRUPTIBILITY_INFO,
568                              interruptibility & ~3);
569         vcpu->interrupt_window_open = 1;
570 }
571
572 static void vmx_inject_gp(struct kvm_vcpu *vcpu, unsigned error_code)
573 {
574         printk(KERN_DEBUG "inject_general_protection: rip 0x%lx\n",
575                vmcs_readl(GUEST_RIP));
576         vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code);
577         vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
578                      GP_VECTOR |
579                      INTR_TYPE_EXCEPTION |
580                      INTR_INFO_DELIEVER_CODE_MASK |
581                      INTR_INFO_VALID_MASK);
582 }
583
584 static void vmx_inject_ud(struct kvm_vcpu *vcpu)
585 {
586         vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
587                      UD_VECTOR |
588                      INTR_TYPE_EXCEPTION |
589                      INTR_INFO_VALID_MASK);
590 }
591
592 /*
593  * Swap MSR entry in host/guest MSR entry array.
594  */
595 #ifdef CONFIG_X86_64
596 static void move_msr_up(struct vcpu_vmx *vmx, int from, int to)
597 {
598         struct kvm_msr_entry tmp;
599
600         tmp = vmx->guest_msrs[to];
601         vmx->guest_msrs[to] = vmx->guest_msrs[from];
602         vmx->guest_msrs[from] = tmp;
603         tmp = vmx->host_msrs[to];
604         vmx->host_msrs[to] = vmx->host_msrs[from];
605         vmx->host_msrs[from] = tmp;
606 }
607 #endif
608
609 /*
610  * Set up the vmcs to automatically save and restore system
611  * msrs.  Don't touch the 64-bit msrs if the guest is in legacy
612  * mode, as fiddling with msrs is very expensive.
613  */
614 static void setup_msrs(struct vcpu_vmx *vmx)
615 {
616         int save_nmsrs;
617
618         save_nmsrs = 0;
619 #ifdef CONFIG_X86_64
620         if (is_long_mode(&vmx->vcpu)) {
621                 int index;
622
623                 index = __find_msr_index(vmx, MSR_SYSCALL_MASK);
624                 if (index >= 0)
625                         move_msr_up(vmx, index, save_nmsrs++);
626                 index = __find_msr_index(vmx, MSR_LSTAR);
627                 if (index >= 0)
628                         move_msr_up(vmx, index, save_nmsrs++);
629                 index = __find_msr_index(vmx, MSR_CSTAR);
630                 if (index >= 0)
631                         move_msr_up(vmx, index, save_nmsrs++);
632                 index = __find_msr_index(vmx, MSR_KERNEL_GS_BASE);
633                 if (index >= 0)
634                         move_msr_up(vmx, index, save_nmsrs++);
635                 /*
636                  * MSR_K6_STAR is only needed on long mode guests, and only
637                  * if efer.sce is enabled.
638                  */
639                 index = __find_msr_index(vmx, MSR_K6_STAR);
640                 if ((index >= 0) && (vmx->vcpu.shadow_efer & EFER_SCE))
641                         move_msr_up(vmx, index, save_nmsrs++);
642         }
643 #endif
644         vmx->save_nmsrs = save_nmsrs;
645
646 #ifdef CONFIG_X86_64
647         vmx->msr_offset_kernel_gs_base =
648                 __find_msr_index(vmx, MSR_KERNEL_GS_BASE);
649 #endif
650         vmx->msr_offset_efer = __find_msr_index(vmx, MSR_EFER);
651 }
652
653 /*
654  * reads and returns guest's timestamp counter "register"
655  * guest_tsc = host_tsc + tsc_offset    -- 21.3
656  */
657 static u64 guest_read_tsc(void)
658 {
659         u64 host_tsc, tsc_offset;
660
661         rdtscll(host_tsc);
662         tsc_offset = vmcs_read64(TSC_OFFSET);
663         return host_tsc + tsc_offset;
664 }
665
666 /*
667  * writes 'guest_tsc' into guest's timestamp counter "register"
668  * guest_tsc = host_tsc + tsc_offset ==> tsc_offset = guest_tsc - host_tsc
669  */
670 static void guest_write_tsc(u64 guest_tsc)
671 {
672         u64 host_tsc;
673
674         rdtscll(host_tsc);
675         vmcs_write64(TSC_OFFSET, guest_tsc - host_tsc);
676 }
677
678 /*
679  * Reads an msr value (of 'msr_index') into 'pdata'.
680  * Returns 0 on success, non-0 otherwise.
681  * Assumes vcpu_load() was already called.
682  */
683 static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
684 {
685         u64 data;
686         struct kvm_msr_entry *msr;
687
688         if (!pdata) {
689                 printk(KERN_ERR "BUG: get_msr called with NULL pdata\n");
690                 return -EINVAL;
691         }
692
693         switch (msr_index) {
694 #ifdef CONFIG_X86_64
695         case MSR_FS_BASE:
696                 data = vmcs_readl(GUEST_FS_BASE);
697                 break;
698         case MSR_GS_BASE:
699                 data = vmcs_readl(GUEST_GS_BASE);
700                 break;
701         case MSR_EFER:
702                 return kvm_get_msr_common(vcpu, msr_index, pdata);
703 #endif
704         case MSR_IA32_TIME_STAMP_COUNTER:
705                 data = guest_read_tsc();
706                 break;
707         case MSR_IA32_SYSENTER_CS:
708                 data = vmcs_read32(GUEST_SYSENTER_CS);
709                 break;
710         case MSR_IA32_SYSENTER_EIP:
711                 data = vmcs_readl(GUEST_SYSENTER_EIP);
712                 break;
713         case MSR_IA32_SYSENTER_ESP:
714                 data = vmcs_readl(GUEST_SYSENTER_ESP);
715                 break;
716         default:
717                 msr = find_msr_entry(to_vmx(vcpu), msr_index);
718                 if (msr) {
719                         data = msr->data;
720                         break;
721                 }
722                 return kvm_get_msr_common(vcpu, msr_index, pdata);
723         }
724
725         *pdata = data;
726         return 0;
727 }
728
729 /*
730  * Writes msr value into into the appropriate "register".
731  * Returns 0 on success, non-0 otherwise.
732  * Assumes vcpu_load() was already called.
733  */
734 static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
735 {
736         struct vcpu_vmx *vmx = to_vmx(vcpu);
737         struct kvm_msr_entry *msr;
738         int ret = 0;
739
740         switch (msr_index) {
741 #ifdef CONFIG_X86_64
742         case MSR_EFER:
743                 ret = kvm_set_msr_common(vcpu, msr_index, data);
744                 if (vmx->host_state.loaded) {
745                         reload_host_efer(vmx);
746                         load_transition_efer(vmx);
747                 }
748                 break;
749         case MSR_FS_BASE:
750                 vmcs_writel(GUEST_FS_BASE, data);
751                 break;
752         case MSR_GS_BASE:
753                 vmcs_writel(GUEST_GS_BASE, data);
754                 break;
755 #endif
756         case MSR_IA32_SYSENTER_CS:
757                 vmcs_write32(GUEST_SYSENTER_CS, data);
758                 break;
759         case MSR_IA32_SYSENTER_EIP:
760                 vmcs_writel(GUEST_SYSENTER_EIP, data);
761                 break;
762         case MSR_IA32_SYSENTER_ESP:
763                 vmcs_writel(GUEST_SYSENTER_ESP, data);
764                 break;
765         case MSR_IA32_TIME_STAMP_COUNTER:
766                 guest_write_tsc(data);
767                 break;
768         default:
769                 msr = find_msr_entry(vmx, msr_index);
770                 if (msr) {
771                         msr->data = data;
772                         if (vmx->host_state.loaded)
773                                 load_msrs(vmx->guest_msrs, vmx->save_nmsrs);
774                         break;
775                 }
776                 ret = kvm_set_msr_common(vcpu, msr_index, data);
777         }
778
779         return ret;
780 }
781
782 /*
783  * Sync the rsp and rip registers into the vcpu structure.  This allows
784  * registers to be accessed by indexing vcpu->regs.
785  */
786 static void vcpu_load_rsp_rip(struct kvm_vcpu *vcpu)
787 {
788         vcpu->regs[VCPU_REGS_RSP] = vmcs_readl(GUEST_RSP);
789         vcpu->rip = vmcs_readl(GUEST_RIP);
790 }
791
792 /*
793  * Syncs rsp and rip back into the vmcs.  Should be called after possible
794  * modification.
795  */
796 static void vcpu_put_rsp_rip(struct kvm_vcpu *vcpu)
797 {
798         vmcs_writel(GUEST_RSP, vcpu->regs[VCPU_REGS_RSP]);
799         vmcs_writel(GUEST_RIP, vcpu->rip);
800 }
801
802 static int set_guest_debug(struct kvm_vcpu *vcpu, struct kvm_debug_guest *dbg)
803 {
804         unsigned long dr7 = 0x400;
805         int old_singlestep;
806
807         old_singlestep = vcpu->guest_debug.singlestep;
808
809         vcpu->guest_debug.enabled = dbg->enabled;
810         if (vcpu->guest_debug.enabled) {
811                 int i;
812
813                 dr7 |= 0x200;  /* exact */
814                 for (i = 0; i < 4; ++i) {
815                         if (!dbg->breakpoints[i].enabled)
816                                 continue;
817                         vcpu->guest_debug.bp[i] = dbg->breakpoints[i].address;
818                         dr7 |= 2 << (i*2);    /* global enable */
819                         dr7 |= 0 << (i*4+16); /* execution breakpoint */
820                 }
821
822                 vcpu->guest_debug.singlestep = dbg->singlestep;
823         } else
824                 vcpu->guest_debug.singlestep = 0;
825
826         if (old_singlestep && !vcpu->guest_debug.singlestep) {
827                 unsigned long flags;
828
829                 flags = vmcs_readl(GUEST_RFLAGS);
830                 flags &= ~(X86_EFLAGS_TF | X86_EFLAGS_RF);
831                 vmcs_writel(GUEST_RFLAGS, flags);
832         }
833
834         update_exception_bitmap(vcpu);
835         vmcs_writel(GUEST_DR7, dr7);
836
837         return 0;
838 }
839
840 static int vmx_get_irq(struct kvm_vcpu *vcpu)
841 {
842         u32 idtv_info_field;
843
844         idtv_info_field = vmcs_read32(IDT_VECTORING_INFO_FIELD);
845         if (idtv_info_field & INTR_INFO_VALID_MASK) {
846                 if (is_external_interrupt(idtv_info_field))
847                         return idtv_info_field & VECTORING_INFO_VECTOR_MASK;
848                 else
849                         printk(KERN_DEBUG "pending exception: not handled yet\n");
850         }
851         return -1;
852 }
853
854 static __init int cpu_has_kvm_support(void)
855 {
856         unsigned long ecx = cpuid_ecx(1);
857         return test_bit(5, &ecx); /* CPUID.1:ECX.VMX[bit 5] -> VT */
858 }
859
860 static __init int vmx_disabled_by_bios(void)
861 {
862         u64 msr;
863
864         rdmsrl(MSR_IA32_FEATURE_CONTROL, msr);
865         return (msr & (MSR_IA32_FEATURE_CONTROL_LOCKED |
866                        MSR_IA32_FEATURE_CONTROL_VMXON_ENABLED))
867             == MSR_IA32_FEATURE_CONTROL_LOCKED;
868         /* locked but not enabled */
869 }
870
871 static void hardware_enable(void *garbage)
872 {
873         int cpu = raw_smp_processor_id();
874         u64 phys_addr = __pa(per_cpu(vmxarea, cpu));
875         u64 old;
876
877         rdmsrl(MSR_IA32_FEATURE_CONTROL, old);
878         if ((old & (MSR_IA32_FEATURE_CONTROL_LOCKED |
879                     MSR_IA32_FEATURE_CONTROL_VMXON_ENABLED))
880             != (MSR_IA32_FEATURE_CONTROL_LOCKED |
881                 MSR_IA32_FEATURE_CONTROL_VMXON_ENABLED))
882                 /* enable and lock */
883                 wrmsrl(MSR_IA32_FEATURE_CONTROL, old |
884                        MSR_IA32_FEATURE_CONTROL_LOCKED |
885                        MSR_IA32_FEATURE_CONTROL_VMXON_ENABLED);
886         write_cr4(read_cr4() | X86_CR4_VMXE); /* FIXME: not cpu hotplug safe */
887         asm volatile (ASM_VMX_VMXON_RAX : : "a"(&phys_addr), "m"(phys_addr)
888                       : "memory", "cc");
889 }
890
891 static void hardware_disable(void *garbage)
892 {
893         asm volatile (ASM_VMX_VMXOFF : : : "cc");
894 }
895
896 static __init int adjust_vmx_controls(u32 ctl_min, u32 ctl_opt,
897                                       u32 msr, u32 *result)
898 {
899         u32 vmx_msr_low, vmx_msr_high;
900         u32 ctl = ctl_min | ctl_opt;
901
902         rdmsr(msr, vmx_msr_low, vmx_msr_high);
903
904         ctl &= vmx_msr_high; /* bit == 0 in high word ==> must be zero */
905         ctl |= vmx_msr_low;  /* bit == 1 in low word  ==> must be one  */
906
907         /* Ensure minimum (required) set of control bits are supported. */
908         if (ctl_min & ~ctl)
909                 return -EIO;
910
911         *result = ctl;
912         return 0;
913 }
914
915 static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
916 {
917         u32 vmx_msr_low, vmx_msr_high;
918         u32 min, opt;
919         u32 _pin_based_exec_control = 0;
920         u32 _cpu_based_exec_control = 0;
921         u32 _vmexit_control = 0;
922         u32 _vmentry_control = 0;
923
924         min = PIN_BASED_EXT_INTR_MASK | PIN_BASED_NMI_EXITING;
925         opt = 0;
926         if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PINBASED_CTLS,
927                                 &_pin_based_exec_control) < 0)
928                 return -EIO;
929
930         min = CPU_BASED_HLT_EXITING |
931 #ifdef CONFIG_X86_64
932               CPU_BASED_CR8_LOAD_EXITING |
933               CPU_BASED_CR8_STORE_EXITING |
934 #endif
935               CPU_BASED_USE_IO_BITMAPS |
936               CPU_BASED_MOV_DR_EXITING |
937               CPU_BASED_USE_TSC_OFFSETING;
938 #ifdef CONFIG_X86_64
939         opt = CPU_BASED_TPR_SHADOW;
940 #else
941         opt = 0;
942 #endif
943         if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PROCBASED_CTLS,
944                                 &_cpu_based_exec_control) < 0)
945                 return -EIO;
946 #ifdef CONFIG_X86_64
947         if ((_cpu_based_exec_control & CPU_BASED_TPR_SHADOW))
948                 _cpu_based_exec_control &= ~CPU_BASED_CR8_LOAD_EXITING &
949                                            ~CPU_BASED_CR8_STORE_EXITING;
950 #endif
951
952         min = 0;
953 #ifdef CONFIG_X86_64
954         min |= VM_EXIT_HOST_ADDR_SPACE_SIZE;
955 #endif
956         opt = 0;
957         if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_EXIT_CTLS,
958                                 &_vmexit_control) < 0)
959                 return -EIO;
960
961         min = opt = 0;
962         if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_ENTRY_CTLS,
963                                 &_vmentry_control) < 0)
964                 return -EIO;
965
966         rdmsr(MSR_IA32_VMX_BASIC, vmx_msr_low, vmx_msr_high);
967
968         /* IA-32 SDM Vol 3B: VMCS size is never greater than 4kB. */
969         if ((vmx_msr_high & 0x1fff) > PAGE_SIZE)
970                 return -EIO;
971
972 #ifdef CONFIG_X86_64
973         /* IA-32 SDM Vol 3B: 64-bit CPUs always have VMX_BASIC_MSR[48]==0. */
974         if (vmx_msr_high & (1u<<16))
975                 return -EIO;
976 #endif
977
978         /* Require Write-Back (WB) memory type for VMCS accesses. */
979         if (((vmx_msr_high >> 18) & 15) != 6)
980                 return -EIO;
981
982         vmcs_conf->size = vmx_msr_high & 0x1fff;
983         vmcs_conf->order = get_order(vmcs_config.size);
984         vmcs_conf->revision_id = vmx_msr_low;
985
986         vmcs_conf->pin_based_exec_ctrl = _pin_based_exec_control;
987         vmcs_conf->cpu_based_exec_ctrl = _cpu_based_exec_control;
988         vmcs_conf->vmexit_ctrl         = _vmexit_control;
989         vmcs_conf->vmentry_ctrl        = _vmentry_control;
990
991         return 0;
992 }
993
994 static struct vmcs *alloc_vmcs_cpu(int cpu)
995 {
996         int node = cpu_to_node(cpu);
997         struct page *pages;
998         struct vmcs *vmcs;
999
1000         pages = alloc_pages_node(node, GFP_KERNEL, vmcs_config.order);
1001         if (!pages)
1002                 return NULL;
1003         vmcs = page_address(pages);
1004         memset(vmcs, 0, vmcs_config.size);
1005         vmcs->revision_id = vmcs_config.revision_id; /* vmcs revision id */
1006         return vmcs;
1007 }
1008
1009 static struct vmcs *alloc_vmcs(void)
1010 {
1011         return alloc_vmcs_cpu(raw_smp_processor_id());
1012 }
1013
1014 static void free_vmcs(struct vmcs *vmcs)
1015 {
1016         free_pages((unsigned long)vmcs, vmcs_config.order);
1017 }
1018
1019 static void free_kvm_area(void)
1020 {
1021         int cpu;
1022
1023         for_each_online_cpu(cpu)
1024                 free_vmcs(per_cpu(vmxarea, cpu));
1025 }
1026
1027 static __init int alloc_kvm_area(void)
1028 {
1029         int cpu;
1030
1031         for_each_online_cpu(cpu) {
1032                 struct vmcs *vmcs;
1033
1034                 vmcs = alloc_vmcs_cpu(cpu);
1035                 if (!vmcs) {
1036                         free_kvm_area();
1037                         return -ENOMEM;
1038                 }
1039
1040                 per_cpu(vmxarea, cpu) = vmcs;
1041         }
1042         return 0;
1043 }
1044
1045 static __init int hardware_setup(void)
1046 {
1047         if (setup_vmcs_config(&vmcs_config) < 0)
1048                 return -EIO;
1049         return alloc_kvm_area();
1050 }
1051
1052 static __exit void hardware_unsetup(void)
1053 {
1054         free_kvm_area();
1055 }
1056
1057 static void fix_pmode_dataseg(int seg, struct kvm_save_segment *save)
1058 {
1059         struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
1060
1061         if (vmcs_readl(sf->base) == save->base && (save->base & AR_S_MASK)) {
1062                 vmcs_write16(sf->selector, save->selector);
1063                 vmcs_writel(sf->base, save->base);
1064                 vmcs_write32(sf->limit, save->limit);
1065                 vmcs_write32(sf->ar_bytes, save->ar);
1066         } else {
1067                 u32 dpl = (vmcs_read16(sf->selector) & SELECTOR_RPL_MASK)
1068                         << AR_DPL_SHIFT;
1069                 vmcs_write32(sf->ar_bytes, 0x93 | dpl);
1070         }
1071 }
1072
1073 static void enter_pmode(struct kvm_vcpu *vcpu)
1074 {
1075         unsigned long flags;
1076
1077         vcpu->rmode.active = 0;
1078
1079         vmcs_writel(GUEST_TR_BASE, vcpu->rmode.tr.base);
1080         vmcs_write32(GUEST_TR_LIMIT, vcpu->rmode.tr.limit);
1081         vmcs_write32(GUEST_TR_AR_BYTES, vcpu->rmode.tr.ar);
1082
1083         flags = vmcs_readl(GUEST_RFLAGS);
1084         flags &= ~(X86_EFLAGS_IOPL | X86_EFLAGS_VM);
1085         flags |= (vcpu->rmode.save_iopl << IOPL_SHIFT);
1086         vmcs_writel(GUEST_RFLAGS, flags);
1087
1088         vmcs_writel(GUEST_CR4, (vmcs_readl(GUEST_CR4) & ~X86_CR4_VME) |
1089                         (vmcs_readl(CR4_READ_SHADOW) & X86_CR4_VME));
1090
1091         update_exception_bitmap(vcpu);
1092
1093         fix_pmode_dataseg(VCPU_SREG_ES, &vcpu->rmode.es);
1094         fix_pmode_dataseg(VCPU_SREG_DS, &vcpu->rmode.ds);
1095         fix_pmode_dataseg(VCPU_SREG_GS, &vcpu->rmode.gs);
1096         fix_pmode_dataseg(VCPU_SREG_FS, &vcpu->rmode.fs);
1097
1098         vmcs_write16(GUEST_SS_SELECTOR, 0);
1099         vmcs_write32(GUEST_SS_AR_BYTES, 0x93);
1100
1101         vmcs_write16(GUEST_CS_SELECTOR,
1102                      vmcs_read16(GUEST_CS_SELECTOR) & ~SELECTOR_RPL_MASK);
1103         vmcs_write32(GUEST_CS_AR_BYTES, 0x9b);
1104 }
1105
1106 static gva_t rmode_tss_base(struct kvm *kvm)
1107 {
1108         gfn_t base_gfn = kvm->memslots[0].base_gfn + kvm->memslots[0].npages - 3;
1109         return base_gfn << PAGE_SHIFT;
1110 }
1111
1112 static void fix_rmode_seg(int seg, struct kvm_save_segment *save)
1113 {
1114         struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
1115
1116         save->selector = vmcs_read16(sf->selector);
1117         save->base = vmcs_readl(sf->base);
1118         save->limit = vmcs_read32(sf->limit);
1119         save->ar = vmcs_read32(sf->ar_bytes);
1120         vmcs_write16(sf->selector, vmcs_readl(sf->base) >> 4);
1121         vmcs_write32(sf->limit, 0xffff);
1122         vmcs_write32(sf->ar_bytes, 0xf3);
1123 }
1124
1125 static void enter_rmode(struct kvm_vcpu *vcpu)
1126 {
1127         unsigned long flags;
1128
1129         vcpu->rmode.active = 1;
1130
1131         vcpu->rmode.tr.base = vmcs_readl(GUEST_TR_BASE);
1132         vmcs_writel(GUEST_TR_BASE, rmode_tss_base(vcpu->kvm));
1133
1134         vcpu->rmode.tr.limit = vmcs_read32(GUEST_TR_LIMIT);
1135         vmcs_write32(GUEST_TR_LIMIT, RMODE_TSS_SIZE - 1);
1136
1137         vcpu->rmode.tr.ar = vmcs_read32(GUEST_TR_AR_BYTES);
1138         vmcs_write32(GUEST_TR_AR_BYTES, 0x008b);
1139
1140         flags = vmcs_readl(GUEST_RFLAGS);
1141         vcpu->rmode.save_iopl = (flags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
1142
1143         flags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM;
1144
1145         vmcs_writel(GUEST_RFLAGS, flags);
1146         vmcs_writel(GUEST_CR4, vmcs_readl(GUEST_CR4) | X86_CR4_VME);
1147         update_exception_bitmap(vcpu);
1148
1149         vmcs_write16(GUEST_SS_SELECTOR, vmcs_readl(GUEST_SS_BASE) >> 4);
1150         vmcs_write32(GUEST_SS_LIMIT, 0xffff);
1151         vmcs_write32(GUEST_SS_AR_BYTES, 0xf3);
1152
1153         vmcs_write32(GUEST_CS_AR_BYTES, 0xf3);
1154         vmcs_write32(GUEST_CS_LIMIT, 0xffff);
1155         if (vmcs_readl(GUEST_CS_BASE) == 0xffff0000)
1156                 vmcs_writel(GUEST_CS_BASE, 0xf0000);
1157         vmcs_write16(GUEST_CS_SELECTOR, vmcs_readl(GUEST_CS_BASE) >> 4);
1158
1159         fix_rmode_seg(VCPU_SREG_ES, &vcpu->rmode.es);
1160         fix_rmode_seg(VCPU_SREG_DS, &vcpu->rmode.ds);
1161         fix_rmode_seg(VCPU_SREG_GS, &vcpu->rmode.gs);
1162         fix_rmode_seg(VCPU_SREG_FS, &vcpu->rmode.fs);
1163
1164         kvm_mmu_reset_context(vcpu);
1165         init_rmode_tss(vcpu->kvm);
1166 }
1167
1168 #ifdef CONFIG_X86_64
1169
1170 static void enter_lmode(struct kvm_vcpu *vcpu)
1171 {
1172         u32 guest_tr_ar;
1173
1174         guest_tr_ar = vmcs_read32(GUEST_TR_AR_BYTES);
1175         if ((guest_tr_ar & AR_TYPE_MASK) != AR_TYPE_BUSY_64_TSS) {
1176                 printk(KERN_DEBUG "%s: tss fixup for long mode. \n",
1177                        __FUNCTION__);
1178                 vmcs_write32(GUEST_TR_AR_BYTES,
1179                              (guest_tr_ar & ~AR_TYPE_MASK)
1180                              | AR_TYPE_BUSY_64_TSS);
1181         }
1182
1183         vcpu->shadow_efer |= EFER_LMA;
1184
1185         find_msr_entry(to_vmx(vcpu), MSR_EFER)->data |= EFER_LMA | EFER_LME;
1186         vmcs_write32(VM_ENTRY_CONTROLS,
1187                      vmcs_read32(VM_ENTRY_CONTROLS)
1188                      | VM_ENTRY_IA32E_MODE);
1189 }
1190
1191 static void exit_lmode(struct kvm_vcpu *vcpu)
1192 {
1193         vcpu->shadow_efer &= ~EFER_LMA;
1194
1195         vmcs_write32(VM_ENTRY_CONTROLS,
1196                      vmcs_read32(VM_ENTRY_CONTROLS)
1197                      & ~VM_ENTRY_IA32E_MODE);
1198 }
1199
1200 #endif
1201
1202 static void vmx_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
1203 {
1204         vcpu->cr4 &= KVM_GUEST_CR4_MASK;
1205         vcpu->cr4 |= vmcs_readl(GUEST_CR4) & ~KVM_GUEST_CR4_MASK;
1206 }
1207
1208 static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
1209 {
1210         vmx_fpu_deactivate(vcpu);
1211
1212         if (vcpu->rmode.active && (cr0 & X86_CR0_PE))
1213                 enter_pmode(vcpu);
1214
1215         if (!vcpu->rmode.active && !(cr0 & X86_CR0_PE))
1216                 enter_rmode(vcpu);
1217
1218 #ifdef CONFIG_X86_64
1219         if (vcpu->shadow_efer & EFER_LME) {
1220                 if (!is_paging(vcpu) && (cr0 & X86_CR0_PG))
1221                         enter_lmode(vcpu);
1222                 if (is_paging(vcpu) && !(cr0 & X86_CR0_PG))
1223                         exit_lmode(vcpu);
1224         }
1225 #endif
1226
1227         vmcs_writel(CR0_READ_SHADOW, cr0);
1228         vmcs_writel(GUEST_CR0,
1229                     (cr0 & ~KVM_GUEST_CR0_MASK) | KVM_VM_CR0_ALWAYS_ON);
1230         vcpu->cr0 = cr0;
1231
1232         if (!(cr0 & X86_CR0_TS) || !(cr0 & X86_CR0_PE))
1233                 vmx_fpu_activate(vcpu);
1234 }
1235
1236 static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
1237 {
1238         vmcs_writel(GUEST_CR3, cr3);
1239         if (vcpu->cr0 & X86_CR0_PE)
1240                 vmx_fpu_deactivate(vcpu);
1241 }
1242
1243 static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
1244 {
1245         vmcs_writel(CR4_READ_SHADOW, cr4);
1246         vmcs_writel(GUEST_CR4, cr4 | (vcpu->rmode.active ?
1247                     KVM_RMODE_VM_CR4_ALWAYS_ON : KVM_PMODE_VM_CR4_ALWAYS_ON));
1248         vcpu->cr4 = cr4;
1249 }
1250
1251 #ifdef CONFIG_X86_64
1252
1253 static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
1254 {
1255         struct vcpu_vmx *vmx = to_vmx(vcpu);
1256         struct kvm_msr_entry *msr = find_msr_entry(vmx, MSR_EFER);
1257
1258         vcpu->shadow_efer = efer;
1259         if (efer & EFER_LMA) {
1260                 vmcs_write32(VM_ENTRY_CONTROLS,
1261                                      vmcs_read32(VM_ENTRY_CONTROLS) |
1262                                      VM_ENTRY_IA32E_MODE);
1263                 msr->data = efer;
1264
1265         } else {
1266                 vmcs_write32(VM_ENTRY_CONTROLS,
1267                                      vmcs_read32(VM_ENTRY_CONTROLS) &
1268                                      ~VM_ENTRY_IA32E_MODE);
1269
1270                 msr->data = efer & ~EFER_LME;
1271         }
1272         setup_msrs(vmx);
1273 }
1274
1275 #endif
1276
1277 static u64 vmx_get_segment_base(struct kvm_vcpu *vcpu, int seg)
1278 {
1279         struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
1280
1281         return vmcs_readl(sf->base);
1282 }
1283
1284 static void vmx_get_segment(struct kvm_vcpu *vcpu,
1285                             struct kvm_segment *var, int seg)
1286 {
1287         struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
1288         u32 ar;
1289
1290         var->base = vmcs_readl(sf->base);
1291         var->limit = vmcs_read32(sf->limit);
1292         var->selector = vmcs_read16(sf->selector);
1293         ar = vmcs_read32(sf->ar_bytes);
1294         if (ar & AR_UNUSABLE_MASK)
1295                 ar = 0;
1296         var->type = ar & 15;
1297         var->s = (ar >> 4) & 1;
1298         var->dpl = (ar >> 5) & 3;
1299         var->present = (ar >> 7) & 1;
1300         var->avl = (ar >> 12) & 1;
1301         var->l = (ar >> 13) & 1;
1302         var->db = (ar >> 14) & 1;
1303         var->g = (ar >> 15) & 1;
1304         var->unusable = (ar >> 16) & 1;
1305 }
1306
1307 static u32 vmx_segment_access_rights(struct kvm_segment *var)
1308 {
1309         u32 ar;
1310
1311         if (var->unusable)
1312                 ar = 1 << 16;
1313         else {
1314                 ar = var->type & 15;
1315                 ar |= (var->s & 1) << 4;
1316                 ar |= (var->dpl & 3) << 5;
1317                 ar |= (var->present & 1) << 7;
1318                 ar |= (var->avl & 1) << 12;
1319                 ar |= (var->l & 1) << 13;
1320                 ar |= (var->db & 1) << 14;
1321                 ar |= (var->g & 1) << 15;
1322         }
1323         if (ar == 0) /* a 0 value means unusable */
1324                 ar = AR_UNUSABLE_MASK;
1325
1326         return ar;
1327 }
1328
1329 static void vmx_set_segment(struct kvm_vcpu *vcpu,
1330                             struct kvm_segment *var, int seg)
1331 {
1332         struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
1333         u32 ar;
1334
1335         if (vcpu->rmode.active && seg == VCPU_SREG_TR) {
1336                 vcpu->rmode.tr.selector = var->selector;
1337                 vcpu->rmode.tr.base = var->base;
1338                 vcpu->rmode.tr.limit = var->limit;
1339                 vcpu->rmode.tr.ar = vmx_segment_access_rights(var);
1340                 return;
1341         }
1342         vmcs_writel(sf->base, var->base);
1343         vmcs_write32(sf->limit, var->limit);
1344         vmcs_write16(sf->selector, var->selector);
1345         if (vcpu->rmode.active && var->s) {
1346                 /*
1347                  * Hack real-mode segments into vm86 compatibility.
1348                  */
1349                 if (var->base == 0xffff0000 && var->selector == 0xf000)
1350                         vmcs_writel(sf->base, 0xf0000);
1351                 ar = 0xf3;
1352         } else
1353                 ar = vmx_segment_access_rights(var);
1354         vmcs_write32(sf->ar_bytes, ar);
1355 }
1356
1357 static void vmx_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
1358 {
1359         u32 ar = vmcs_read32(GUEST_CS_AR_BYTES);
1360
1361         *db = (ar >> 14) & 1;
1362         *l = (ar >> 13) & 1;
1363 }
1364
1365 static void vmx_get_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
1366 {
1367         dt->limit = vmcs_read32(GUEST_IDTR_LIMIT);
1368         dt->base = vmcs_readl(GUEST_IDTR_BASE);
1369 }
1370
1371 static void vmx_set_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
1372 {
1373         vmcs_write32(GUEST_IDTR_LIMIT, dt->limit);
1374         vmcs_writel(GUEST_IDTR_BASE, dt->base);
1375 }
1376
1377 static void vmx_get_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
1378 {
1379         dt->limit = vmcs_read32(GUEST_GDTR_LIMIT);
1380         dt->base = vmcs_readl(GUEST_GDTR_BASE);
1381 }
1382
1383 static void vmx_set_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
1384 {
1385         vmcs_write32(GUEST_GDTR_LIMIT, dt->limit);
1386         vmcs_writel(GUEST_GDTR_BASE, dt->base);
1387 }
1388
1389 static int init_rmode_tss(struct kvm *kvm)
1390 {
1391         gfn_t fn = rmode_tss_base(kvm) >> PAGE_SHIFT;
1392         u16 data = 0;
1393         int r;
1394
1395         r = kvm_clear_guest_page(kvm, fn, 0, PAGE_SIZE);
1396         if (r < 0)
1397                 return 0;
1398         data = TSS_BASE_SIZE + TSS_REDIRECTION_SIZE;
1399         r = kvm_write_guest_page(kvm, fn++, &data, 0x66, sizeof(u16));
1400         if (r < 0)
1401                 return 0;
1402         r = kvm_clear_guest_page(kvm, fn++, 0, PAGE_SIZE);
1403         if (r < 0)
1404                 return 0;
1405         r = kvm_clear_guest_page(kvm, fn, 0, PAGE_SIZE);
1406         if (r < 0)
1407                 return 0;
1408         data = ~0;
1409         r = kvm_write_guest_page(kvm, fn, &data, RMODE_TSS_SIZE - 2 * PAGE_SIZE - 1,
1410                         sizeof(u8));
1411         if (r < 0)
1412                 return 0;
1413         return 1;
1414 }
1415
1416 static void seg_setup(int seg)
1417 {
1418         struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
1419
1420         vmcs_write16(sf->selector, 0);
1421         vmcs_writel(sf->base, 0);
1422         vmcs_write32(sf->limit, 0xffff);
1423         vmcs_write32(sf->ar_bytes, 0x93);
1424 }
1425
1426 /*
1427  * Sets up the vmcs for emulated real mode.
1428  */
1429 static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
1430 {
1431         u32 host_sysenter_cs;
1432         u32 junk;
1433         unsigned long a;
1434         struct descriptor_table dt;
1435         int i;
1436         unsigned long kvm_vmx_return;
1437         u32 exec_control;
1438
1439         /* I/O */
1440         vmcs_write64(IO_BITMAP_A, page_to_phys(vmx_io_bitmap_a));
1441         vmcs_write64(IO_BITMAP_B, page_to_phys(vmx_io_bitmap_b));
1442
1443         vmcs_write64(VMCS_LINK_POINTER, -1ull); /* 22.3.1.5 */
1444
1445         /* Control */
1446         vmcs_write32(PIN_BASED_VM_EXEC_CONTROL,
1447                 vmcs_config.pin_based_exec_ctrl);
1448
1449         exec_control = vmcs_config.cpu_based_exec_ctrl;
1450         if (!vm_need_tpr_shadow(vmx->vcpu.kvm)) {
1451                 exec_control &= ~CPU_BASED_TPR_SHADOW;
1452 #ifdef CONFIG_X86_64
1453                 exec_control |= CPU_BASED_CR8_STORE_EXITING |
1454                                 CPU_BASED_CR8_LOAD_EXITING;
1455 #endif
1456         }
1457         vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, exec_control);
1458
1459         vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, !!bypass_guest_pf);
1460         vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, !!bypass_guest_pf);
1461         vmcs_write32(CR3_TARGET_COUNT, 0);           /* 22.2.1 */
1462
1463         vmcs_writel(HOST_CR0, read_cr0());  /* 22.2.3 */
1464         vmcs_writel(HOST_CR4, read_cr4());  /* 22.2.3, 22.2.5 */
1465         vmcs_writel(HOST_CR3, read_cr3());  /* 22.2.3  FIXME: shadow tables */
1466
1467         vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS);  /* 22.2.4 */
1468         vmcs_write16(HOST_DS_SELECTOR, __KERNEL_DS);  /* 22.2.4 */
1469         vmcs_write16(HOST_ES_SELECTOR, __KERNEL_DS);  /* 22.2.4 */
1470         vmcs_write16(HOST_FS_SELECTOR, read_fs());    /* 22.2.4 */
1471         vmcs_write16(HOST_GS_SELECTOR, read_gs());    /* 22.2.4 */
1472         vmcs_write16(HOST_SS_SELECTOR, __KERNEL_DS);  /* 22.2.4 */
1473 #ifdef CONFIG_X86_64
1474         rdmsrl(MSR_FS_BASE, a);
1475         vmcs_writel(HOST_FS_BASE, a); /* 22.2.4 */
1476         rdmsrl(MSR_GS_BASE, a);
1477         vmcs_writel(HOST_GS_BASE, a); /* 22.2.4 */
1478 #else
1479         vmcs_writel(HOST_FS_BASE, 0); /* 22.2.4 */
1480         vmcs_writel(HOST_GS_BASE, 0); /* 22.2.4 */
1481 #endif
1482
1483         vmcs_write16(HOST_TR_SELECTOR, GDT_ENTRY_TSS*8);  /* 22.2.4 */
1484
1485         get_idt(&dt);
1486         vmcs_writel(HOST_IDTR_BASE, dt.base);   /* 22.2.4 */
1487
1488         asm("mov $.Lkvm_vmx_return, %0" : "=r"(kvm_vmx_return));
1489         vmcs_writel(HOST_RIP, kvm_vmx_return); /* 22.2.5 */
1490         vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
1491         vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
1492         vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0);
1493
1494         rdmsr(MSR_IA32_SYSENTER_CS, host_sysenter_cs, junk);
1495         vmcs_write32(HOST_IA32_SYSENTER_CS, host_sysenter_cs);
1496         rdmsrl(MSR_IA32_SYSENTER_ESP, a);
1497         vmcs_writel(HOST_IA32_SYSENTER_ESP, a);   /* 22.2.3 */
1498         rdmsrl(MSR_IA32_SYSENTER_EIP, a);
1499         vmcs_writel(HOST_IA32_SYSENTER_EIP, a);   /* 22.2.3 */
1500
1501         for (i = 0; i < NR_VMX_MSR; ++i) {
1502                 u32 index = vmx_msr_index[i];
1503                 u32 data_low, data_high;
1504                 u64 data;
1505                 int j = vmx->nmsrs;
1506
1507                 if (rdmsr_safe(index, &data_low, &data_high) < 0)
1508                         continue;
1509                 if (wrmsr_safe(index, data_low, data_high) < 0)
1510                         continue;
1511                 data = data_low | ((u64)data_high << 32);
1512                 vmx->host_msrs[j].index = index;
1513                 vmx->host_msrs[j].reserved = 0;
1514                 vmx->host_msrs[j].data = data;
1515                 vmx->guest_msrs[j] = vmx->host_msrs[j];
1516                 ++vmx->nmsrs;
1517         }
1518
1519         vmcs_write32(VM_EXIT_CONTROLS, vmcs_config.vmexit_ctrl);
1520
1521         /* 22.2.1, 20.8.1 */
1522         vmcs_write32(VM_ENTRY_CONTROLS, vmcs_config.vmentry_ctrl);
1523
1524         vmcs_writel(CR0_GUEST_HOST_MASK, ~0UL);
1525         vmcs_writel(CR4_GUEST_HOST_MASK, KVM_GUEST_CR4_MASK);
1526
1527         return 0;
1528 }
1529
1530 static int vmx_vcpu_reset(struct kvm_vcpu *vcpu)
1531 {
1532         struct vcpu_vmx *vmx = to_vmx(vcpu);
1533         u64 msr;
1534         int ret;
1535
1536         if (!init_rmode_tss(vmx->vcpu.kvm)) {
1537                 ret = -ENOMEM;
1538                 goto out;
1539         }
1540
1541         vmx->vcpu.rmode.active = 0;
1542
1543         vmx->vcpu.regs[VCPU_REGS_RDX] = get_rdx_init_val();
1544         set_cr8(&vmx->vcpu, 0);
1545         msr = 0xfee00000 | MSR_IA32_APICBASE_ENABLE;
1546         if (vmx->vcpu.vcpu_id == 0)
1547                 msr |= MSR_IA32_APICBASE_BSP;
1548         kvm_set_apic_base(&vmx->vcpu, msr);
1549
1550         fx_init(&vmx->vcpu);
1551
1552         /*
1553          * GUEST_CS_BASE should really be 0xffff0000, but VT vm86 mode
1554          * insists on having GUEST_CS_BASE == GUEST_CS_SELECTOR << 4.  Sigh.
1555          */
1556         if (vmx->vcpu.vcpu_id == 0) {
1557                 vmcs_write16(GUEST_CS_SELECTOR, 0xf000);
1558                 vmcs_writel(GUEST_CS_BASE, 0x000f0000);
1559         } else {
1560                 vmcs_write16(GUEST_CS_SELECTOR, vmx->vcpu.sipi_vector << 8);
1561                 vmcs_writel(GUEST_CS_BASE, vmx->vcpu.sipi_vector << 12);
1562         }
1563         vmcs_write32(GUEST_CS_LIMIT, 0xffff);
1564         vmcs_write32(GUEST_CS_AR_BYTES, 0x9b);
1565
1566         seg_setup(VCPU_SREG_DS);
1567         seg_setup(VCPU_SREG_ES);
1568         seg_setup(VCPU_SREG_FS);
1569         seg_setup(VCPU_SREG_GS);
1570         seg_setup(VCPU_SREG_SS);
1571
1572         vmcs_write16(GUEST_TR_SELECTOR, 0);
1573         vmcs_writel(GUEST_TR_BASE, 0);
1574         vmcs_write32(GUEST_TR_LIMIT, 0xffff);
1575         vmcs_write32(GUEST_TR_AR_BYTES, 0x008b);
1576
1577         vmcs_write16(GUEST_LDTR_SELECTOR, 0);
1578         vmcs_writel(GUEST_LDTR_BASE, 0);
1579         vmcs_write32(GUEST_LDTR_LIMIT, 0xffff);
1580         vmcs_write32(GUEST_LDTR_AR_BYTES, 0x00082);
1581
1582         vmcs_write32(GUEST_SYSENTER_CS, 0);
1583         vmcs_writel(GUEST_SYSENTER_ESP, 0);
1584         vmcs_writel(GUEST_SYSENTER_EIP, 0);
1585
1586         vmcs_writel(GUEST_RFLAGS, 0x02);
1587         if (vmx->vcpu.vcpu_id == 0)
1588                 vmcs_writel(GUEST_RIP, 0xfff0);
1589         else
1590                 vmcs_writel(GUEST_RIP, 0);
1591         vmcs_writel(GUEST_RSP, 0);
1592
1593         /* todo: dr0 = dr1 = dr2 = dr3 = 0; dr6 = 0xffff0ff0 */
1594         vmcs_writel(GUEST_DR7, 0x400);
1595
1596         vmcs_writel(GUEST_GDTR_BASE, 0);
1597         vmcs_write32(GUEST_GDTR_LIMIT, 0xffff);
1598
1599         vmcs_writel(GUEST_IDTR_BASE, 0);
1600         vmcs_write32(GUEST_IDTR_LIMIT, 0xffff);
1601
1602         vmcs_write32(GUEST_ACTIVITY_STATE, 0);
1603         vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, 0);
1604         vmcs_write32(GUEST_PENDING_DBG_EXCEPTIONS, 0);
1605
1606         guest_write_tsc(0);
1607
1608         /* Special registers */
1609         vmcs_write64(GUEST_IA32_DEBUGCTL, 0);
1610
1611         setup_msrs(vmx);
1612
1613         vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0);  /* 22.2.1 */
1614
1615 #ifdef CONFIG_X86_64
1616         vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, 0);
1617         if (vm_need_tpr_shadow(vmx->vcpu.kvm))
1618                 vmcs_write64(VIRTUAL_APIC_PAGE_ADDR,
1619                              page_to_phys(vmx->vcpu.apic->regs_page));
1620         vmcs_write32(TPR_THRESHOLD, 0);
1621 #endif
1622
1623         vmx->vcpu.cr0 = 0x60000010;
1624         vmx_set_cr0(&vmx->vcpu, vmx->vcpu.cr0); /* enter rmode */
1625         vmx_set_cr4(&vmx->vcpu, 0);
1626 #ifdef CONFIG_X86_64
1627         vmx_set_efer(&vmx->vcpu, 0);
1628 #endif
1629         vmx_fpu_activate(&vmx->vcpu);
1630         update_exception_bitmap(&vmx->vcpu);
1631
1632         return 0;
1633
1634 out:
1635         return ret;
1636 }
1637
1638 static void inject_rmode_irq(struct kvm_vcpu *vcpu, int irq)
1639 {
1640         u16 ent[2];
1641         u16 cs;
1642         u16 ip;
1643         unsigned long flags;
1644         unsigned long ss_base = vmcs_readl(GUEST_SS_BASE);
1645         u16 sp =  vmcs_readl(GUEST_RSP);
1646         u32 ss_limit = vmcs_read32(GUEST_SS_LIMIT);
1647
1648         if (sp > ss_limit || sp < 6) {
1649                 vcpu_printf(vcpu, "%s: #SS, rsp 0x%lx ss 0x%lx limit 0x%x\n",
1650                             __FUNCTION__,
1651                             vmcs_readl(GUEST_RSP),
1652                             vmcs_readl(GUEST_SS_BASE),
1653                             vmcs_read32(GUEST_SS_LIMIT));
1654                 return;
1655         }
1656
1657         if (emulator_read_std(irq * sizeof(ent), &ent, sizeof(ent), vcpu) !=
1658                                                         X86EMUL_CONTINUE) {
1659                 vcpu_printf(vcpu, "%s: read guest err\n", __FUNCTION__);
1660                 return;
1661         }
1662
1663         flags =  vmcs_readl(GUEST_RFLAGS);
1664         cs =  vmcs_readl(GUEST_CS_BASE) >> 4;
1665         ip =  vmcs_readl(GUEST_RIP);
1666
1667
1668         if (emulator_write_emulated(
1669                     ss_base + sp - 2, &flags, 2, vcpu) != X86EMUL_CONTINUE ||
1670             emulator_write_emulated(
1671                     ss_base + sp - 4, &cs, 2, vcpu) != X86EMUL_CONTINUE ||
1672             emulator_write_emulated(
1673                     ss_base + sp - 6, &ip, 2, vcpu) != X86EMUL_CONTINUE) {
1674                 vcpu_printf(vcpu, "%s: write guest err\n", __FUNCTION__);
1675                 return;
1676         }
1677
1678         vmcs_writel(GUEST_RFLAGS, flags &
1679                     ~(X86_EFLAGS_IF | X86_EFLAGS_AC | X86_EFLAGS_TF));
1680         vmcs_write16(GUEST_CS_SELECTOR, ent[1]) ;
1681         vmcs_writel(GUEST_CS_BASE, ent[1] << 4);
1682         vmcs_writel(GUEST_RIP, ent[0]);
1683         vmcs_writel(GUEST_RSP, (vmcs_readl(GUEST_RSP) & ~0xffff) | (sp - 6));
1684 }
1685
1686 static void vmx_inject_irq(struct kvm_vcpu *vcpu, int irq)
1687 {
1688         if (vcpu->rmode.active) {
1689                 inject_rmode_irq(vcpu, irq);
1690                 return;
1691         }
1692         vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
1693                         irq | INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK);
1694 }
1695
1696 static void kvm_do_inject_irq(struct kvm_vcpu *vcpu)
1697 {
1698         int word_index = __ffs(vcpu->irq_summary);
1699         int bit_index = __ffs(vcpu->irq_pending[word_index]);
1700         int irq = word_index * BITS_PER_LONG + bit_index;
1701
1702         clear_bit(bit_index, &vcpu->irq_pending[word_index]);
1703         if (!vcpu->irq_pending[word_index])
1704                 clear_bit(word_index, &vcpu->irq_summary);
1705         vmx_inject_irq(vcpu, irq);
1706 }
1707
1708
1709 static void do_interrupt_requests(struct kvm_vcpu *vcpu,
1710                                        struct kvm_run *kvm_run)
1711 {
1712         u32 cpu_based_vm_exec_control;
1713
1714         vcpu->interrupt_window_open =
1715                 ((vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) &&
1716                  (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 3) == 0);
1717
1718         if (vcpu->interrupt_window_open &&
1719             vcpu->irq_summary &&
1720             !(vmcs_read32(VM_ENTRY_INTR_INFO_FIELD) & INTR_INFO_VALID_MASK))
1721                 /*
1722                  * If interrupts enabled, and not blocked by sti or mov ss. Good.
1723                  */
1724                 kvm_do_inject_irq(vcpu);
1725
1726         cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
1727         if (!vcpu->interrupt_window_open &&
1728             (vcpu->irq_summary || kvm_run->request_interrupt_window))
1729                 /*
1730                  * Interrupts blocked.  Wait for unblock.
1731                  */
1732                 cpu_based_vm_exec_control |= CPU_BASED_VIRTUAL_INTR_PENDING;
1733         else
1734                 cpu_based_vm_exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING;
1735         vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
1736 }
1737
1738 static void kvm_guest_debug_pre(struct kvm_vcpu *vcpu)
1739 {
1740         struct kvm_guest_debug *dbg = &vcpu->guest_debug;
1741
1742         set_debugreg(dbg->bp[0], 0);
1743         set_debugreg(dbg->bp[1], 1);
1744         set_debugreg(dbg->bp[2], 2);
1745         set_debugreg(dbg->bp[3], 3);
1746
1747         if (dbg->singlestep) {
1748                 unsigned long flags;
1749
1750                 flags = vmcs_readl(GUEST_RFLAGS);
1751                 flags |= X86_EFLAGS_TF | X86_EFLAGS_RF;
1752                 vmcs_writel(GUEST_RFLAGS, flags);
1753         }
1754 }
1755
1756 static int handle_rmode_exception(struct kvm_vcpu *vcpu,
1757                                   int vec, u32 err_code)
1758 {
1759         if (!vcpu->rmode.active)
1760                 return 0;
1761
1762         /*
1763          * Instruction with address size override prefix opcode 0x67
1764          * Cause the #SS fault with 0 error code in VM86 mode.
1765          */
1766         if (((vec == GP_VECTOR) || (vec == SS_VECTOR)) && err_code == 0)
1767                 if (emulate_instruction(vcpu, NULL, 0, 0, 0) == EMULATE_DONE)
1768                         return 1;
1769         return 0;
1770 }
1771
1772 static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1773 {
1774         u32 intr_info, error_code;
1775         unsigned long cr2, rip;
1776         u32 vect_info;
1777         enum emulation_result er;
1778         int r;
1779
1780         vect_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
1781         intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
1782
1783         if ((vect_info & VECTORING_INFO_VALID_MASK) &&
1784                                                 !is_page_fault(intr_info))
1785                 printk(KERN_ERR "%s: unexpected, vectoring info 0x%x "
1786                        "intr info 0x%x\n", __FUNCTION__, vect_info, intr_info);
1787
1788         if (!irqchip_in_kernel(vcpu->kvm) && is_external_interrupt(vect_info)) {
1789                 int irq = vect_info & VECTORING_INFO_VECTOR_MASK;
1790                 set_bit(irq, vcpu->irq_pending);
1791                 set_bit(irq / BITS_PER_LONG, &vcpu->irq_summary);
1792         }
1793
1794         if ((intr_info & INTR_INFO_INTR_TYPE_MASK) == 0x200) /* nmi */
1795                 return 1;  /* already handled by vmx_vcpu_run() */
1796
1797         if (is_no_device(intr_info)) {
1798                 vmx_fpu_activate(vcpu);
1799                 return 1;
1800         }
1801
1802         if (is_invalid_opcode(intr_info)) {
1803                 er = emulate_instruction(vcpu, kvm_run, 0, 0, 0);
1804                 if (er != EMULATE_DONE)
1805                         vmx_inject_ud(vcpu);
1806
1807                 return 1;
1808         }
1809
1810         error_code = 0;
1811         rip = vmcs_readl(GUEST_RIP);
1812         if (intr_info & INTR_INFO_DELIEVER_CODE_MASK)
1813                 error_code = vmcs_read32(VM_EXIT_INTR_ERROR_CODE);
1814         if (is_page_fault(intr_info)) {
1815                 cr2 = vmcs_readl(EXIT_QUALIFICATION);
1816
1817                 mutex_lock(&vcpu->kvm->lock);
1818                 r = kvm_mmu_page_fault(vcpu, cr2, error_code);
1819                 if (r < 0) {
1820                         mutex_unlock(&vcpu->kvm->lock);
1821                         return r;
1822                 }
1823                 if (!r) {
1824                         mutex_unlock(&vcpu->kvm->lock);
1825                         return 1;
1826                 }
1827
1828                 er = emulate_instruction(vcpu, kvm_run, cr2, error_code, 0);
1829                 mutex_unlock(&vcpu->kvm->lock);
1830
1831                 switch (er) {
1832                 case EMULATE_DONE:
1833                         return 1;
1834                 case EMULATE_DO_MMIO:
1835                         ++vcpu->stat.mmio_exits;
1836                         return 0;
1837                 case EMULATE_FAIL:
1838                         kvm_report_emulation_failure(vcpu, "pagetable");
1839                         break;
1840                 default:
1841                         BUG();
1842                 }
1843         }
1844
1845         if (vcpu->rmode.active &&
1846             handle_rmode_exception(vcpu, intr_info & INTR_INFO_VECTOR_MASK,
1847                                                                 error_code)) {
1848                 if (vcpu->halt_request) {
1849                         vcpu->halt_request = 0;
1850                         return kvm_emulate_halt(vcpu);
1851                 }
1852                 return 1;
1853         }
1854
1855         if ((intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK)) ==
1856             (INTR_TYPE_EXCEPTION | 1)) {
1857                 kvm_run->exit_reason = KVM_EXIT_DEBUG;
1858                 return 0;
1859         }
1860         kvm_run->exit_reason = KVM_EXIT_EXCEPTION;
1861         kvm_run->ex.exception = intr_info & INTR_INFO_VECTOR_MASK;
1862         kvm_run->ex.error_code = error_code;
1863         return 0;
1864 }
1865
1866 static int handle_external_interrupt(struct kvm_vcpu *vcpu,
1867                                      struct kvm_run *kvm_run)
1868 {
1869         ++vcpu->stat.irq_exits;
1870         return 1;
1871 }
1872
1873 static int handle_triple_fault(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1874 {
1875         kvm_run->exit_reason = KVM_EXIT_SHUTDOWN;
1876         return 0;
1877 }
1878
1879 static int handle_io(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1880 {
1881         unsigned long exit_qualification;
1882         int size, down, in, string, rep;
1883         unsigned port;
1884
1885         ++vcpu->stat.io_exits;
1886         exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
1887         string = (exit_qualification & 16) != 0;
1888
1889         if (string) {
1890                 if (emulate_instruction(vcpu,
1891                                         kvm_run, 0, 0, 0) == EMULATE_DO_MMIO)
1892                         return 0;
1893                 return 1;
1894         }
1895
1896         size = (exit_qualification & 7) + 1;
1897         in = (exit_qualification & 8) != 0;
1898         down = (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_DF) != 0;
1899         rep = (exit_qualification & 32) != 0;
1900         port = exit_qualification >> 16;
1901
1902         return kvm_emulate_pio(vcpu, kvm_run, in, size, port);
1903 }
1904
1905 static void
1906 vmx_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall)
1907 {
1908         /*
1909          * Patch in the VMCALL instruction:
1910          */
1911         hypercall[0] = 0x0f;
1912         hypercall[1] = 0x01;
1913         hypercall[2] = 0xc1;
1914 }
1915
1916 static int handle_cr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1917 {
1918         unsigned long exit_qualification;
1919         int cr;
1920         int reg;
1921
1922         exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
1923         cr = exit_qualification & 15;
1924         reg = (exit_qualification >> 8) & 15;
1925         switch ((exit_qualification >> 4) & 3) {
1926         case 0: /* mov to cr */
1927                 switch (cr) {
1928                 case 0:
1929                         vcpu_load_rsp_rip(vcpu);
1930                         set_cr0(vcpu, vcpu->regs[reg]);
1931                         skip_emulated_instruction(vcpu);
1932                         return 1;
1933                 case 3:
1934                         vcpu_load_rsp_rip(vcpu);
1935                         set_cr3(vcpu, vcpu->regs[reg]);
1936                         skip_emulated_instruction(vcpu);
1937                         return 1;
1938                 case 4:
1939                         vcpu_load_rsp_rip(vcpu);
1940                         set_cr4(vcpu, vcpu->regs[reg]);
1941                         skip_emulated_instruction(vcpu);
1942                         return 1;
1943                 case 8:
1944                         vcpu_load_rsp_rip(vcpu);
1945                         set_cr8(vcpu, vcpu->regs[reg]);
1946                         skip_emulated_instruction(vcpu);
1947                         kvm_run->exit_reason = KVM_EXIT_SET_TPR;
1948                         return 0;
1949                 };
1950                 break;
1951         case 2: /* clts */
1952                 vcpu_load_rsp_rip(vcpu);
1953                 vmx_fpu_deactivate(vcpu);
1954                 vcpu->cr0 &= ~X86_CR0_TS;
1955                 vmcs_writel(CR0_READ_SHADOW, vcpu->cr0);
1956                 vmx_fpu_activate(vcpu);
1957                 skip_emulated_instruction(vcpu);
1958                 return 1;
1959         case 1: /*mov from cr*/
1960                 switch (cr) {
1961                 case 3:
1962                         vcpu_load_rsp_rip(vcpu);
1963                         vcpu->regs[reg] = vcpu->cr3;
1964                         vcpu_put_rsp_rip(vcpu);
1965                         skip_emulated_instruction(vcpu);
1966                         return 1;
1967                 case 8:
1968                         vcpu_load_rsp_rip(vcpu);
1969                         vcpu->regs[reg] = get_cr8(vcpu);
1970                         vcpu_put_rsp_rip(vcpu);
1971                         skip_emulated_instruction(vcpu);
1972                         return 1;
1973                 }
1974                 break;
1975         case 3: /* lmsw */
1976                 lmsw(vcpu, (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f);
1977
1978                 skip_emulated_instruction(vcpu);
1979                 return 1;
1980         default:
1981                 break;
1982         }
1983         kvm_run->exit_reason = 0;
1984         pr_unimpl(vcpu, "unhandled control register: op %d cr %d\n",
1985                (int)(exit_qualification >> 4) & 3, cr);
1986         return 0;
1987 }
1988
1989 static int handle_dr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1990 {
1991         unsigned long exit_qualification;
1992         unsigned long val;
1993         int dr, reg;
1994
1995         /*
1996          * FIXME: this code assumes the host is debugging the guest.
1997          *        need to deal with guest debugging itself too.
1998          */
1999         exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
2000         dr = exit_qualification & 7;
2001         reg = (exit_qualification >> 8) & 15;
2002         vcpu_load_rsp_rip(vcpu);
2003         if (exit_qualification & 16) {
2004                 /* mov from dr */
2005                 switch (dr) {
2006                 case 6:
2007                         val = 0xffff0ff0;
2008                         break;
2009                 case 7:
2010                         val = 0x400;
2011                         break;
2012                 default:
2013                         val = 0;
2014                 }
2015                 vcpu->regs[reg] = val;
2016         } else {
2017                 /* mov to dr */
2018         }
2019         vcpu_put_rsp_rip(vcpu);
2020         skip_emulated_instruction(vcpu);
2021         return 1;
2022 }
2023
2024 static int handle_cpuid(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2025 {
2026         kvm_emulate_cpuid(vcpu);
2027         return 1;
2028 }
2029
2030 static int handle_rdmsr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2031 {
2032         u32 ecx = vcpu->regs[VCPU_REGS_RCX];
2033         u64 data;
2034
2035         if (vmx_get_msr(vcpu, ecx, &data)) {
2036                 vmx_inject_gp(vcpu, 0);
2037                 return 1;
2038         }
2039
2040         /* FIXME: handling of bits 32:63 of rax, rdx */
2041         vcpu->regs[VCPU_REGS_RAX] = data & -1u;
2042         vcpu->regs[VCPU_REGS_RDX] = (data >> 32) & -1u;
2043         skip_emulated_instruction(vcpu);
2044         return 1;
2045 }
2046
2047 static int handle_wrmsr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2048 {
2049         u32 ecx = vcpu->regs[VCPU_REGS_RCX];
2050         u64 data = (vcpu->regs[VCPU_REGS_RAX] & -1u)
2051                 | ((u64)(vcpu->regs[VCPU_REGS_RDX] & -1u) << 32);
2052
2053         if (vmx_set_msr(vcpu, ecx, data) != 0) {
2054                 vmx_inject_gp(vcpu, 0);
2055                 return 1;
2056         }
2057
2058         skip_emulated_instruction(vcpu);
2059         return 1;
2060 }
2061
2062 static int handle_tpr_below_threshold(struct kvm_vcpu *vcpu,
2063                                       struct kvm_run *kvm_run)
2064 {
2065         return 1;
2066 }
2067
2068 static int handle_interrupt_window(struct kvm_vcpu *vcpu,
2069                                    struct kvm_run *kvm_run)
2070 {
2071         u32 cpu_based_vm_exec_control;
2072
2073         /* clear pending irq */
2074         cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
2075         cpu_based_vm_exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING;
2076         vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
2077         /*
2078          * If the user space waits to inject interrupts, exit as soon as
2079          * possible
2080          */
2081         if (kvm_run->request_interrupt_window &&
2082             !vcpu->irq_summary) {
2083                 kvm_run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
2084                 ++vcpu->stat.irq_window_exits;
2085                 return 0;
2086         }
2087         return 1;
2088 }
2089
2090 static int handle_halt(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2091 {
2092         skip_emulated_instruction(vcpu);
2093         return kvm_emulate_halt(vcpu);
2094 }
2095
2096 static int handle_vmcall(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2097 {
2098         skip_emulated_instruction(vcpu);
2099         kvm_emulate_hypercall(vcpu);
2100         return 1;
2101 }
2102
2103 /*
2104  * The exit handlers return 1 if the exit was handled fully and guest execution
2105  * may resume.  Otherwise they set the kvm_run parameter to indicate what needs
2106  * to be done to userspace and return 0.
2107  */
2108 static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu,
2109                                       struct kvm_run *kvm_run) = {
2110         [EXIT_REASON_EXCEPTION_NMI]           = handle_exception,
2111         [EXIT_REASON_EXTERNAL_INTERRUPT]      = handle_external_interrupt,
2112         [EXIT_REASON_TRIPLE_FAULT]            = handle_triple_fault,
2113         [EXIT_REASON_IO_INSTRUCTION]          = handle_io,
2114         [EXIT_REASON_CR_ACCESS]               = handle_cr,
2115         [EXIT_REASON_DR_ACCESS]               = handle_dr,
2116         [EXIT_REASON_CPUID]                   = handle_cpuid,
2117         [EXIT_REASON_MSR_READ]                = handle_rdmsr,
2118         [EXIT_REASON_MSR_WRITE]               = handle_wrmsr,
2119         [EXIT_REASON_PENDING_INTERRUPT]       = handle_interrupt_window,
2120         [EXIT_REASON_HLT]                     = handle_halt,
2121         [EXIT_REASON_VMCALL]                  = handle_vmcall,
2122         [EXIT_REASON_TPR_BELOW_THRESHOLD]     = handle_tpr_below_threshold
2123 };
2124
2125 static const int kvm_vmx_max_exit_handlers =
2126         ARRAY_SIZE(kvm_vmx_exit_handlers);
2127
2128 /*
2129  * The guest has exited.  See if we can fix it or if we need userspace
2130  * assistance.
2131  */
2132 static int kvm_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
2133 {
2134         u32 vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
2135         u32 exit_reason = vmcs_read32(VM_EXIT_REASON);
2136         struct vcpu_vmx *vmx = to_vmx(vcpu);
2137
2138         if (unlikely(vmx->fail)) {
2139                 kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
2140                 kvm_run->fail_entry.hardware_entry_failure_reason
2141                         = vmcs_read32(VM_INSTRUCTION_ERROR);
2142                 return 0;
2143         }
2144
2145         if ((vectoring_info & VECTORING_INFO_VALID_MASK) &&
2146                                 exit_reason != EXIT_REASON_EXCEPTION_NMI)
2147                 printk(KERN_WARNING "%s: unexpected, valid vectoring info and "
2148                        "exit reason is 0x%x\n", __FUNCTION__, exit_reason);
2149         if (exit_reason < kvm_vmx_max_exit_handlers
2150             && kvm_vmx_exit_handlers[exit_reason])
2151                 return kvm_vmx_exit_handlers[exit_reason](vcpu, kvm_run);
2152         else {
2153                 kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
2154                 kvm_run->hw.hardware_exit_reason = exit_reason;
2155         }
2156         return 0;
2157 }
2158
2159 static void vmx_flush_tlb(struct kvm_vcpu *vcpu)
2160 {
2161 }
2162
2163 static void update_tpr_threshold(struct kvm_vcpu *vcpu)
2164 {
2165         int max_irr, tpr;
2166
2167         if (!vm_need_tpr_shadow(vcpu->kvm))
2168                 return;
2169
2170         if (!kvm_lapic_enabled(vcpu) ||
2171             ((max_irr = kvm_lapic_find_highest_irr(vcpu)) == -1)) {
2172                 vmcs_write32(TPR_THRESHOLD, 0);
2173                 return;
2174         }
2175
2176         tpr = (kvm_lapic_get_cr8(vcpu) & 0x0f) << 4;
2177         vmcs_write32(TPR_THRESHOLD, (max_irr > tpr) ? tpr >> 4 : max_irr >> 4);
2178 }
2179
2180 static void enable_irq_window(struct kvm_vcpu *vcpu)
2181 {
2182         u32 cpu_based_vm_exec_control;
2183
2184         cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
2185         cpu_based_vm_exec_control |= CPU_BASED_VIRTUAL_INTR_PENDING;
2186         vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
2187 }
2188
2189 static void vmx_intr_assist(struct kvm_vcpu *vcpu)
2190 {
2191         u32 idtv_info_field, intr_info_field;
2192         int has_ext_irq, interrupt_window_open;
2193         int vector;
2194
2195         update_tpr_threshold(vcpu);
2196
2197         has_ext_irq = kvm_cpu_has_interrupt(vcpu);
2198         intr_info_field = vmcs_read32(VM_ENTRY_INTR_INFO_FIELD);
2199         idtv_info_field = vmcs_read32(IDT_VECTORING_INFO_FIELD);
2200         if (intr_info_field & INTR_INFO_VALID_MASK) {
2201                 if (idtv_info_field & INTR_INFO_VALID_MASK) {
2202                         /* TODO: fault when IDT_Vectoring */
2203                         printk(KERN_ERR "Fault when IDT_Vectoring\n");
2204                 }
2205                 if (has_ext_irq)
2206                         enable_irq_window(vcpu);
2207                 return;
2208         }
2209         if (unlikely(idtv_info_field & INTR_INFO_VALID_MASK)) {
2210                 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, idtv_info_field);
2211                 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
2212                                 vmcs_read32(VM_EXIT_INSTRUCTION_LEN));
2213
2214                 if (unlikely(idtv_info_field & INTR_INFO_DELIEVER_CODE_MASK))
2215                         vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE,
2216                                 vmcs_read32(IDT_VECTORING_ERROR_CODE));
2217                 if (unlikely(has_ext_irq))
2218                         enable_irq_window(vcpu);
2219                 return;
2220         }
2221         if (!has_ext_irq)
2222                 return;
2223         interrupt_window_open =
2224                 ((vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) &&
2225                  (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 3) == 0);
2226         if (interrupt_window_open) {
2227                 vector = kvm_cpu_get_interrupt(vcpu);
2228                 vmx_inject_irq(vcpu, vector);
2229                 kvm_timer_intr_post(vcpu, vector);
2230         } else
2231                 enable_irq_window(vcpu);
2232 }
2233
2234 static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2235 {
2236         struct vcpu_vmx *vmx = to_vmx(vcpu);
2237         u32 intr_info;
2238
2239         /*
2240          * Loading guest fpu may have cleared host cr0.ts
2241          */
2242         vmcs_writel(HOST_CR0, read_cr0());
2243
2244         asm(
2245                 /* Store host registers */
2246 #ifdef CONFIG_X86_64
2247                 "push %%rax; push %%rbx; push %%rdx;"
2248                 "push %%rsi; push %%rdi; push %%rbp;"
2249                 "push %%r8;  push %%r9;  push %%r10; push %%r11;"
2250                 "push %%r12; push %%r13; push %%r14; push %%r15;"
2251                 "push %%rcx \n\t"
2252                 ASM_VMX_VMWRITE_RSP_RDX "\n\t"
2253 #else
2254                 "pusha; push %%ecx \n\t"
2255                 ASM_VMX_VMWRITE_RSP_RDX "\n\t"
2256 #endif
2257                 /* Check if vmlaunch of vmresume is needed */
2258                 "cmp $0, %1 \n\t"
2259                 /* Load guest registers.  Don't clobber flags. */
2260 #ifdef CONFIG_X86_64
2261                 "mov %c[cr2](%3), %%rax \n\t"
2262                 "mov %%rax, %%cr2 \n\t"
2263                 "mov %c[rax](%3), %%rax \n\t"
2264                 "mov %c[rbx](%3), %%rbx \n\t"
2265                 "mov %c[rdx](%3), %%rdx \n\t"
2266                 "mov %c[rsi](%3), %%rsi \n\t"
2267                 "mov %c[rdi](%3), %%rdi \n\t"
2268                 "mov %c[rbp](%3), %%rbp \n\t"
2269                 "mov %c[r8](%3),  %%r8  \n\t"
2270                 "mov %c[r9](%3),  %%r9  \n\t"
2271                 "mov %c[r10](%3), %%r10 \n\t"
2272                 "mov %c[r11](%3), %%r11 \n\t"
2273                 "mov %c[r12](%3), %%r12 \n\t"
2274                 "mov %c[r13](%3), %%r13 \n\t"
2275                 "mov %c[r14](%3), %%r14 \n\t"
2276                 "mov %c[r15](%3), %%r15 \n\t"
2277                 "mov %c[rcx](%3), %%rcx \n\t" /* kills %3 (rcx) */
2278 #else
2279                 "mov %c[cr2](%3), %%eax \n\t"
2280                 "mov %%eax,   %%cr2 \n\t"
2281                 "mov %c[rax](%3), %%eax \n\t"
2282                 "mov %c[rbx](%3), %%ebx \n\t"
2283                 "mov %c[rdx](%3), %%edx \n\t"
2284                 "mov %c[rsi](%3), %%esi \n\t"
2285                 "mov %c[rdi](%3), %%edi \n\t"
2286                 "mov %c[rbp](%3), %%ebp \n\t"
2287                 "mov %c[rcx](%3), %%ecx \n\t" /* kills %3 (ecx) */
2288 #endif
2289                 /* Enter guest mode */
2290                 "jne .Llaunched \n\t"
2291                 ASM_VMX_VMLAUNCH "\n\t"
2292                 "jmp .Lkvm_vmx_return \n\t"
2293                 ".Llaunched: " ASM_VMX_VMRESUME "\n\t"
2294                 ".Lkvm_vmx_return: "
2295                 /* Save guest registers, load host registers, keep flags */
2296 #ifdef CONFIG_X86_64
2297                 "xchg %3,     (%%rsp) \n\t"
2298                 "mov %%rax, %c[rax](%3) \n\t"
2299                 "mov %%rbx, %c[rbx](%3) \n\t"
2300                 "pushq (%%rsp); popq %c[rcx](%3) \n\t"
2301                 "mov %%rdx, %c[rdx](%3) \n\t"
2302                 "mov %%rsi, %c[rsi](%3) \n\t"
2303                 "mov %%rdi, %c[rdi](%3) \n\t"
2304                 "mov %%rbp, %c[rbp](%3) \n\t"
2305                 "mov %%r8,  %c[r8](%3) \n\t"
2306                 "mov %%r9,  %c[r9](%3) \n\t"
2307                 "mov %%r10, %c[r10](%3) \n\t"
2308                 "mov %%r11, %c[r11](%3) \n\t"
2309                 "mov %%r12, %c[r12](%3) \n\t"
2310                 "mov %%r13, %c[r13](%3) \n\t"
2311                 "mov %%r14, %c[r14](%3) \n\t"
2312                 "mov %%r15, %c[r15](%3) \n\t"
2313                 "mov %%cr2, %%rax   \n\t"
2314                 "mov %%rax, %c[cr2](%3) \n\t"
2315                 "mov (%%rsp), %3 \n\t"
2316
2317                 "pop  %%rcx; pop  %%r15; pop  %%r14; pop  %%r13; pop  %%r12;"
2318                 "pop  %%r11; pop  %%r10; pop  %%r9;  pop  %%r8;"
2319                 "pop  %%rbp; pop  %%rdi; pop  %%rsi;"
2320                 "pop  %%rdx; pop  %%rbx; pop  %%rax \n\t"
2321 #else
2322                 "xchg %3, (%%esp) \n\t"
2323                 "mov %%eax, %c[rax](%3) \n\t"
2324                 "mov %%ebx, %c[rbx](%3) \n\t"
2325                 "pushl (%%esp); popl %c[rcx](%3) \n\t"
2326                 "mov %%edx, %c[rdx](%3) \n\t"
2327                 "mov %%esi, %c[rsi](%3) \n\t"
2328                 "mov %%edi, %c[rdi](%3) \n\t"
2329                 "mov %%ebp, %c[rbp](%3) \n\t"
2330                 "mov %%cr2, %%eax  \n\t"
2331                 "mov %%eax, %c[cr2](%3) \n\t"
2332                 "mov (%%esp), %3 \n\t"
2333
2334                 "pop %%ecx; popa \n\t"
2335 #endif
2336                 "setbe %0 \n\t"
2337               : "=q" (vmx->fail)
2338               : "r"(vmx->launched), "d"((unsigned long)HOST_RSP),
2339                 "c"(vcpu),
2340                 [rax]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RAX])),
2341                 [rbx]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RBX])),
2342                 [rcx]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RCX])),
2343                 [rdx]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RDX])),
2344                 [rsi]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RSI])),
2345                 [rdi]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RDI])),
2346                 [rbp]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RBP])),
2347 #ifdef CONFIG_X86_64
2348                 [r8]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R8])),
2349                 [r9]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R9])),
2350                 [r10]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R10])),
2351                 [r11]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R11])),
2352                 [r12]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R12])),
2353                 [r13]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R13])),
2354                 [r14]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R14])),
2355                 [r15]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R15])),
2356 #endif
2357                 [cr2]"i"(offsetof(struct kvm_vcpu, cr2))
2358               : "cc", "memory");
2359
2360         vcpu->interrupt_window_open =
2361                 (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 3) == 0;
2362
2363         asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
2364         vmx->launched = 1;
2365
2366         intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
2367
2368         /* We need to handle NMIs before interrupts are enabled */
2369         if ((intr_info & INTR_INFO_INTR_TYPE_MASK) == 0x200) /* nmi */
2370                 asm("int $2");
2371 }
2372
2373 static void vmx_inject_page_fault(struct kvm_vcpu *vcpu,
2374                                   unsigned long addr,
2375                                   u32 err_code)
2376 {
2377         u32 vect_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
2378
2379         ++vcpu->stat.pf_guest;
2380
2381         if (is_page_fault(vect_info)) {
2382                 printk(KERN_DEBUG "inject_page_fault: "
2383                        "double fault 0x%lx @ 0x%lx\n",
2384                        addr, vmcs_readl(GUEST_RIP));
2385                 vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, 0);
2386                 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
2387                              DF_VECTOR |
2388                              INTR_TYPE_EXCEPTION |
2389                              INTR_INFO_DELIEVER_CODE_MASK |
2390                              INTR_INFO_VALID_MASK);
2391                 return;
2392         }
2393         vcpu->cr2 = addr;
2394         vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, err_code);
2395         vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
2396                      PF_VECTOR |
2397                      INTR_TYPE_EXCEPTION |
2398                      INTR_INFO_DELIEVER_CODE_MASK |
2399                      INTR_INFO_VALID_MASK);
2400
2401 }
2402
2403 static void vmx_free_vmcs(struct kvm_vcpu *vcpu)
2404 {
2405         struct vcpu_vmx *vmx = to_vmx(vcpu);
2406
2407         if (vmx->vmcs) {
2408                 on_each_cpu(__vcpu_clear, vmx, 0, 1);
2409                 free_vmcs(vmx->vmcs);
2410                 vmx->vmcs = NULL;
2411         }
2412 }
2413
2414 static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
2415 {
2416         struct vcpu_vmx *vmx = to_vmx(vcpu);
2417
2418         vmx_free_vmcs(vcpu);
2419         kfree(vmx->host_msrs);
2420         kfree(vmx->guest_msrs);
2421         kvm_vcpu_uninit(vcpu);
2422         kmem_cache_free(kvm_vcpu_cache, vmx);
2423 }
2424
2425 static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
2426 {
2427         int err;
2428         struct vcpu_vmx *vmx = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
2429         int cpu;
2430
2431         if (!vmx)
2432                 return ERR_PTR(-ENOMEM);
2433
2434         err = kvm_vcpu_init(&vmx->vcpu, kvm, id);
2435         if (err)
2436                 goto free_vcpu;
2437
2438         vmx->guest_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL);
2439         if (!vmx->guest_msrs) {
2440                 err = -ENOMEM;
2441                 goto uninit_vcpu;
2442         }
2443
2444         vmx->host_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL);
2445         if (!vmx->host_msrs)
2446                 goto free_guest_msrs;
2447
2448         vmx->vmcs = alloc_vmcs();
2449         if (!vmx->vmcs)
2450                 goto free_msrs;
2451
2452         vmcs_clear(vmx->vmcs);
2453
2454         cpu = get_cpu();
2455         vmx_vcpu_load(&vmx->vcpu, cpu);
2456         err = vmx_vcpu_setup(vmx);
2457         vmx_vcpu_put(&vmx->vcpu);
2458         put_cpu();
2459         if (err)
2460                 goto free_vmcs;
2461
2462         return &vmx->vcpu;
2463
2464 free_vmcs:
2465         free_vmcs(vmx->vmcs);
2466 free_msrs:
2467         kfree(vmx->host_msrs);
2468 free_guest_msrs:
2469         kfree(vmx->guest_msrs);
2470 uninit_vcpu:
2471         kvm_vcpu_uninit(&vmx->vcpu);
2472 free_vcpu:
2473         kmem_cache_free(kvm_vcpu_cache, vmx);
2474         return ERR_PTR(err);
2475 }
2476
2477 static void __init vmx_check_processor_compat(void *rtn)
2478 {
2479         struct vmcs_config vmcs_conf;
2480
2481         *(int *)rtn = 0;
2482         if (setup_vmcs_config(&vmcs_conf) < 0)
2483                 *(int *)rtn = -EIO;
2484         if (memcmp(&vmcs_config, &vmcs_conf, sizeof(struct vmcs_config)) != 0) {
2485                 printk(KERN_ERR "kvm: CPU %d feature inconsistency!\n",
2486                                 smp_processor_id());
2487                 *(int *)rtn = -EIO;
2488         }
2489 }
2490
2491 static struct kvm_x86_ops vmx_x86_ops = {
2492         .cpu_has_kvm_support = cpu_has_kvm_support,
2493         .disabled_by_bios = vmx_disabled_by_bios,
2494         .hardware_setup = hardware_setup,
2495         .hardware_unsetup = hardware_unsetup,
2496         .check_processor_compatibility = vmx_check_processor_compat,
2497         .hardware_enable = hardware_enable,
2498         .hardware_disable = hardware_disable,
2499
2500         .vcpu_create = vmx_create_vcpu,
2501         .vcpu_free = vmx_free_vcpu,
2502         .vcpu_reset = vmx_vcpu_reset,
2503
2504         .prepare_guest_switch = vmx_save_host_state,
2505         .vcpu_load = vmx_vcpu_load,
2506         .vcpu_put = vmx_vcpu_put,
2507         .vcpu_decache = vmx_vcpu_decache,
2508
2509         .set_guest_debug = set_guest_debug,
2510         .guest_debug_pre = kvm_guest_debug_pre,
2511         .get_msr = vmx_get_msr,
2512         .set_msr = vmx_set_msr,
2513         .get_segment_base = vmx_get_segment_base,
2514         .get_segment = vmx_get_segment,
2515         .set_segment = vmx_set_segment,
2516         .get_cs_db_l_bits = vmx_get_cs_db_l_bits,
2517         .decache_cr4_guest_bits = vmx_decache_cr4_guest_bits,
2518         .set_cr0 = vmx_set_cr0,
2519         .set_cr3 = vmx_set_cr3,
2520         .set_cr4 = vmx_set_cr4,
2521 #ifdef CONFIG_X86_64
2522         .set_efer = vmx_set_efer,
2523 #endif
2524         .get_idt = vmx_get_idt,
2525         .set_idt = vmx_set_idt,
2526         .get_gdt = vmx_get_gdt,
2527         .set_gdt = vmx_set_gdt,
2528         .cache_regs = vcpu_load_rsp_rip,
2529         .decache_regs = vcpu_put_rsp_rip,
2530         .get_rflags = vmx_get_rflags,
2531         .set_rflags = vmx_set_rflags,
2532
2533         .tlb_flush = vmx_flush_tlb,
2534         .inject_page_fault = vmx_inject_page_fault,
2535
2536         .inject_gp = vmx_inject_gp,
2537
2538         .run = vmx_vcpu_run,
2539         .handle_exit = kvm_handle_exit,
2540         .skip_emulated_instruction = skip_emulated_instruction,
2541         .patch_hypercall = vmx_patch_hypercall,
2542         .get_irq = vmx_get_irq,
2543         .set_irq = vmx_inject_irq,
2544         .inject_pending_irq = vmx_intr_assist,
2545         .inject_pending_vectors = do_interrupt_requests,
2546 };
2547
2548 static int __init vmx_init(void)
2549 {
2550         void *iova;
2551         int r;
2552
2553         vmx_io_bitmap_a = alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
2554         if (!vmx_io_bitmap_a)
2555                 return -ENOMEM;
2556
2557         vmx_io_bitmap_b = alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
2558         if (!vmx_io_bitmap_b) {
2559                 r = -ENOMEM;
2560                 goto out;
2561         }
2562
2563         /*
2564          * Allow direct access to the PC debug port (it is often used for I/O
2565          * delays, but the vmexits simply slow things down).
2566          */
2567         iova = kmap(vmx_io_bitmap_a);
2568         memset(iova, 0xff, PAGE_SIZE);
2569         clear_bit(0x80, iova);
2570         kunmap(vmx_io_bitmap_a);
2571
2572         iova = kmap(vmx_io_bitmap_b);
2573         memset(iova, 0xff, PAGE_SIZE);
2574         kunmap(vmx_io_bitmap_b);
2575
2576         r = kvm_init_x86(&vmx_x86_ops, sizeof(struct vcpu_vmx), THIS_MODULE);
2577         if (r)
2578                 goto out1;
2579
2580         if (bypass_guest_pf)
2581                 kvm_mmu_set_nonpresent_ptes(~0xffeull, 0ull);
2582
2583         return 0;
2584
2585 out1:
2586         __free_page(vmx_io_bitmap_b);
2587 out:
2588         __free_page(vmx_io_bitmap_a);
2589         return r;
2590 }
2591
2592 static void __exit vmx_exit(void)
2593 {
2594         __free_page(vmx_io_bitmap_b);
2595         __free_page(vmx_io_bitmap_a);
2596
2597         kvm_exit_x86();
2598 }
2599
2600 module_init(vmx_init)
2601 module_exit(vmx_exit)