KVM: Hack real-mode segments on vmx from KVM_SET_SREGS
[pandora-kernel.git] / drivers / kvm / vmx.c
1 /*
2  * Kernel-based Virtual Machine driver for Linux
3  *
4  * This module enables machines with Intel VT-x extensions to run virtual
5  * machines without emulation or binary translation.
6  *
7  * Copyright (C) 2006 Qumranet, Inc.
8  *
9  * Authors:
10  *   Avi Kivity   <avi@qumranet.com>
11  *   Yaniv Kamay  <yaniv@qumranet.com>
12  *
13  * This work is licensed under the terms of the GNU GPL, version 2.  See
14  * the COPYING file in the top-level directory.
15  *
16  */
17
18 #include "kvm.h"
19 #include "vmx.h"
20 #include "kvm_vmx.h"
21 #include <linux/module.h>
22 #include <linux/kernel.h>
23 #include <linux/mm.h>
24 #include <linux/highmem.h>
25 #include <linux/profile.h>
26 #include <asm/io.h>
27 #include <asm/desc.h>
28
29 #include "segment_descriptor.h"
30
31 MODULE_AUTHOR("Qumranet");
32 MODULE_LICENSE("GPL");
33
34 static DEFINE_PER_CPU(struct vmcs *, vmxarea);
35 static DEFINE_PER_CPU(struct vmcs *, current_vmcs);
36
37 #ifdef CONFIG_X86_64
38 #define HOST_IS_64 1
39 #else
40 #define HOST_IS_64 0
41 #endif
42
43 static struct vmcs_descriptor {
44         int size;
45         int order;
46         u32 revision_id;
47 } vmcs_descriptor;
48
49 #define VMX_SEGMENT_FIELD(seg)                                  \
50         [VCPU_SREG_##seg] = {                                   \
51                 .selector = GUEST_##seg##_SELECTOR,             \
52                 .base = GUEST_##seg##_BASE,                     \
53                 .limit = GUEST_##seg##_LIMIT,                   \
54                 .ar_bytes = GUEST_##seg##_AR_BYTES,             \
55         }
56
57 static struct kvm_vmx_segment_field {
58         unsigned selector;
59         unsigned base;
60         unsigned limit;
61         unsigned ar_bytes;
62 } kvm_vmx_segment_fields[] = {
63         VMX_SEGMENT_FIELD(CS),
64         VMX_SEGMENT_FIELD(DS),
65         VMX_SEGMENT_FIELD(ES),
66         VMX_SEGMENT_FIELD(FS),
67         VMX_SEGMENT_FIELD(GS),
68         VMX_SEGMENT_FIELD(SS),
69         VMX_SEGMENT_FIELD(TR),
70         VMX_SEGMENT_FIELD(LDTR),
71 };
72
73 static const u32 vmx_msr_index[] = {
74 #ifdef CONFIG_X86_64
75         MSR_SYSCALL_MASK, MSR_LSTAR, MSR_CSTAR, MSR_KERNEL_GS_BASE,
76 #endif
77         MSR_EFER, MSR_K6_STAR,
78 };
79 #define NR_VMX_MSR ARRAY_SIZE(vmx_msr_index)
80
81 static inline int is_page_fault(u32 intr_info)
82 {
83         return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
84                              INTR_INFO_VALID_MASK)) ==
85                 (INTR_TYPE_EXCEPTION | PF_VECTOR | INTR_INFO_VALID_MASK);
86 }
87
88 static inline int is_external_interrupt(u32 intr_info)
89 {
90         return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK))
91                 == (INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK);
92 }
93
94 static struct vmx_msr_entry *find_msr_entry(struct kvm_vcpu *vcpu, u32 msr)
95 {
96         int i;
97
98         for (i = 0; i < vcpu->nmsrs; ++i)
99                 if (vcpu->guest_msrs[i].index == msr)
100                         return &vcpu->guest_msrs[i];
101         return NULL;
102 }
103
104 static void vmcs_clear(struct vmcs *vmcs)
105 {
106         u64 phys_addr = __pa(vmcs);
107         u8 error;
108
109         asm volatile (ASM_VMX_VMCLEAR_RAX "; setna %0"
110                       : "=g"(error) : "a"(&phys_addr), "m"(phys_addr)
111                       : "cc", "memory");
112         if (error)
113                 printk(KERN_ERR "kvm: vmclear fail: %p/%llx\n",
114                        vmcs, phys_addr);
115 }
116
117 static void __vcpu_clear(void *arg)
118 {
119         struct kvm_vcpu *vcpu = arg;
120         int cpu = raw_smp_processor_id();
121
122         if (vcpu->cpu == cpu)
123                 vmcs_clear(vcpu->vmcs);
124         if (per_cpu(current_vmcs, cpu) == vcpu->vmcs)
125                 per_cpu(current_vmcs, cpu) = NULL;
126 }
127
128 static void vcpu_clear(struct kvm_vcpu *vcpu)
129 {
130         if (vcpu->cpu != raw_smp_processor_id() && vcpu->cpu != -1)
131                 smp_call_function_single(vcpu->cpu, __vcpu_clear, vcpu, 0, 1);
132         else
133                 __vcpu_clear(vcpu);
134         vcpu->launched = 0;
135 }
136
137 static unsigned long vmcs_readl(unsigned long field)
138 {
139         unsigned long value;
140
141         asm volatile (ASM_VMX_VMREAD_RDX_RAX
142                       : "=a"(value) : "d"(field) : "cc");
143         return value;
144 }
145
146 static u16 vmcs_read16(unsigned long field)
147 {
148         return vmcs_readl(field);
149 }
150
151 static u32 vmcs_read32(unsigned long field)
152 {
153         return vmcs_readl(field);
154 }
155
156 static u64 vmcs_read64(unsigned long field)
157 {
158 #ifdef CONFIG_X86_64
159         return vmcs_readl(field);
160 #else
161         return vmcs_readl(field) | ((u64)vmcs_readl(field+1) << 32);
162 #endif
163 }
164
165 static noinline void vmwrite_error(unsigned long field, unsigned long value)
166 {
167         printk(KERN_ERR "vmwrite error: reg %lx value %lx (err %d)\n",
168                field, value, vmcs_read32(VM_INSTRUCTION_ERROR));
169         dump_stack();
170 }
171
172 static void vmcs_writel(unsigned long field, unsigned long value)
173 {
174         u8 error;
175
176         asm volatile (ASM_VMX_VMWRITE_RAX_RDX "; setna %0"
177                        : "=q"(error) : "a"(value), "d"(field) : "cc" );
178         if (unlikely(error))
179                 vmwrite_error(field, value);
180 }
181
182 static void vmcs_write16(unsigned long field, u16 value)
183 {
184         vmcs_writel(field, value);
185 }
186
187 static void vmcs_write32(unsigned long field, u32 value)
188 {
189         vmcs_writel(field, value);
190 }
191
192 static void vmcs_write64(unsigned long field, u64 value)
193 {
194 #ifdef CONFIG_X86_64
195         vmcs_writel(field, value);
196 #else
197         vmcs_writel(field, value);
198         asm volatile ("");
199         vmcs_writel(field+1, value >> 32);
200 #endif
201 }
202
203 /*
204  * Switches to specified vcpu, until a matching vcpu_put(), but assumes
205  * vcpu mutex is already taken.
206  */
207 static void vmx_vcpu_load(struct kvm_vcpu *vcpu)
208 {
209         u64 phys_addr = __pa(vcpu->vmcs);
210         int cpu;
211
212         cpu = get_cpu();
213
214         if (vcpu->cpu != cpu)
215                 vcpu_clear(vcpu);
216
217         if (per_cpu(current_vmcs, cpu) != vcpu->vmcs) {
218                 u8 error;
219
220                 per_cpu(current_vmcs, cpu) = vcpu->vmcs;
221                 asm volatile (ASM_VMX_VMPTRLD_RAX "; setna %0"
222                               : "=g"(error) : "a"(&phys_addr), "m"(phys_addr)
223                               : "cc");
224                 if (error)
225                         printk(KERN_ERR "kvm: vmptrld %p/%llx fail\n",
226                                vcpu->vmcs, phys_addr);
227         }
228
229         if (vcpu->cpu != cpu) {
230                 struct descriptor_table dt;
231                 unsigned long sysenter_esp;
232
233                 vcpu->cpu = cpu;
234                 /*
235                  * Linux uses per-cpu TSS and GDT, so set these when switching
236                  * processors.
237                  */
238                 vmcs_writel(HOST_TR_BASE, read_tr_base()); /* 22.2.4 */
239                 get_gdt(&dt);
240                 vmcs_writel(HOST_GDTR_BASE, dt.base);   /* 22.2.4 */
241
242                 rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
243                 vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
244         }
245 }
246
247 static void vmx_vcpu_put(struct kvm_vcpu *vcpu)
248 {
249         put_cpu();
250 }
251
252 static void vmx_vcpu_decache(struct kvm_vcpu *vcpu)
253 {
254         vcpu_clear(vcpu);
255 }
256
257 static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu)
258 {
259         return vmcs_readl(GUEST_RFLAGS);
260 }
261
262 static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
263 {
264         vmcs_writel(GUEST_RFLAGS, rflags);
265 }
266
267 static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
268 {
269         unsigned long rip;
270         u32 interruptibility;
271
272         rip = vmcs_readl(GUEST_RIP);
273         rip += vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
274         vmcs_writel(GUEST_RIP, rip);
275
276         /*
277          * We emulated an instruction, so temporary interrupt blocking
278          * should be removed, if set.
279          */
280         interruptibility = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
281         if (interruptibility & 3)
282                 vmcs_write32(GUEST_INTERRUPTIBILITY_INFO,
283                              interruptibility & ~3);
284         vcpu->interrupt_window_open = 1;
285 }
286
287 static void vmx_inject_gp(struct kvm_vcpu *vcpu, unsigned error_code)
288 {
289         printk(KERN_DEBUG "inject_general_protection: rip 0x%lx\n",
290                vmcs_readl(GUEST_RIP));
291         vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code);
292         vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
293                      GP_VECTOR |
294                      INTR_TYPE_EXCEPTION |
295                      INTR_INFO_DELIEVER_CODE_MASK |
296                      INTR_INFO_VALID_MASK);
297 }
298
299 /*
300  * reads and returns guest's timestamp counter "register"
301  * guest_tsc = host_tsc + tsc_offset    -- 21.3
302  */
303 static u64 guest_read_tsc(void)
304 {
305         u64 host_tsc, tsc_offset;
306
307         rdtscll(host_tsc);
308         tsc_offset = vmcs_read64(TSC_OFFSET);
309         return host_tsc + tsc_offset;
310 }
311
312 /*
313  * writes 'guest_tsc' into guest's timestamp counter "register"
314  * guest_tsc = host_tsc + tsc_offset ==> tsc_offset = guest_tsc - host_tsc
315  */
316 static void guest_write_tsc(u64 guest_tsc)
317 {
318         u64 host_tsc;
319
320         rdtscll(host_tsc);
321         vmcs_write64(TSC_OFFSET, guest_tsc - host_tsc);
322 }
323
324 static void reload_tss(void)
325 {
326 #ifndef CONFIG_X86_64
327
328         /*
329          * VT restores TR but not its size.  Useless.
330          */
331         struct descriptor_table gdt;
332         struct segment_descriptor *descs;
333
334         get_gdt(&gdt);
335         descs = (void *)gdt.base;
336         descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
337         load_TR_desc();
338 #endif
339 }
340
341 /*
342  * Reads an msr value (of 'msr_index') into 'pdata'.
343  * Returns 0 on success, non-0 otherwise.
344  * Assumes vcpu_load() was already called.
345  */
346 static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
347 {
348         u64 data;
349         struct vmx_msr_entry *msr;
350
351         if (!pdata) {
352                 printk(KERN_ERR "BUG: get_msr called with NULL pdata\n");
353                 return -EINVAL;
354         }
355
356         switch (msr_index) {
357 #ifdef CONFIG_X86_64
358         case MSR_FS_BASE:
359                 data = vmcs_readl(GUEST_FS_BASE);
360                 break;
361         case MSR_GS_BASE:
362                 data = vmcs_readl(GUEST_GS_BASE);
363                 break;
364         case MSR_EFER:
365                 return kvm_get_msr_common(vcpu, msr_index, pdata);
366 #endif
367         case MSR_IA32_TIME_STAMP_COUNTER:
368                 data = guest_read_tsc();
369                 break;
370         case MSR_IA32_SYSENTER_CS:
371                 data = vmcs_read32(GUEST_SYSENTER_CS);
372                 break;
373         case MSR_IA32_SYSENTER_EIP:
374                 data = vmcs_readl(GUEST_SYSENTER_EIP);
375                 break;
376         case MSR_IA32_SYSENTER_ESP:
377                 data = vmcs_readl(GUEST_SYSENTER_ESP);
378                 break;
379         default:
380                 msr = find_msr_entry(vcpu, msr_index);
381                 if (msr) {
382                         data = msr->data;
383                         break;
384                 }
385                 return kvm_get_msr_common(vcpu, msr_index, pdata);
386         }
387
388         *pdata = data;
389         return 0;
390 }
391
392 /*
393  * Writes msr value into into the appropriate "register".
394  * Returns 0 on success, non-0 otherwise.
395  * Assumes vcpu_load() was already called.
396  */
397 static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
398 {
399         struct vmx_msr_entry *msr;
400         switch (msr_index) {
401 #ifdef CONFIG_X86_64
402         case MSR_EFER:
403                 return kvm_set_msr_common(vcpu, msr_index, data);
404         case MSR_FS_BASE:
405                 vmcs_writel(GUEST_FS_BASE, data);
406                 break;
407         case MSR_GS_BASE:
408                 vmcs_writel(GUEST_GS_BASE, data);
409                 break;
410 #endif
411         case MSR_IA32_SYSENTER_CS:
412                 vmcs_write32(GUEST_SYSENTER_CS, data);
413                 break;
414         case MSR_IA32_SYSENTER_EIP:
415                 vmcs_writel(GUEST_SYSENTER_EIP, data);
416                 break;
417         case MSR_IA32_SYSENTER_ESP:
418                 vmcs_writel(GUEST_SYSENTER_ESP, data);
419                 break;
420         case MSR_IA32_TIME_STAMP_COUNTER:
421                 guest_write_tsc(data);
422                 break;
423         default:
424                 msr = find_msr_entry(vcpu, msr_index);
425                 if (msr) {
426                         msr->data = data;
427                         break;
428                 }
429                 return kvm_set_msr_common(vcpu, msr_index, data);
430                 msr->data = data;
431                 break;
432         }
433
434         return 0;
435 }
436
437 /*
438  * Sync the rsp and rip registers into the vcpu structure.  This allows
439  * registers to be accessed by indexing vcpu->regs.
440  */
441 static void vcpu_load_rsp_rip(struct kvm_vcpu *vcpu)
442 {
443         vcpu->regs[VCPU_REGS_RSP] = vmcs_readl(GUEST_RSP);
444         vcpu->rip = vmcs_readl(GUEST_RIP);
445 }
446
447 /*
448  * Syncs rsp and rip back into the vmcs.  Should be called after possible
449  * modification.
450  */
451 static void vcpu_put_rsp_rip(struct kvm_vcpu *vcpu)
452 {
453         vmcs_writel(GUEST_RSP, vcpu->regs[VCPU_REGS_RSP]);
454         vmcs_writel(GUEST_RIP, vcpu->rip);
455 }
456
457 static int set_guest_debug(struct kvm_vcpu *vcpu, struct kvm_debug_guest *dbg)
458 {
459         unsigned long dr7 = 0x400;
460         u32 exception_bitmap;
461         int old_singlestep;
462
463         exception_bitmap = vmcs_read32(EXCEPTION_BITMAP);
464         old_singlestep = vcpu->guest_debug.singlestep;
465
466         vcpu->guest_debug.enabled = dbg->enabled;
467         if (vcpu->guest_debug.enabled) {
468                 int i;
469
470                 dr7 |= 0x200;  /* exact */
471                 for (i = 0; i < 4; ++i) {
472                         if (!dbg->breakpoints[i].enabled)
473                                 continue;
474                         vcpu->guest_debug.bp[i] = dbg->breakpoints[i].address;
475                         dr7 |= 2 << (i*2);    /* global enable */
476                         dr7 |= 0 << (i*4+16); /* execution breakpoint */
477                 }
478
479                 exception_bitmap |= (1u << 1);  /* Trap debug exceptions */
480
481                 vcpu->guest_debug.singlestep = dbg->singlestep;
482         } else {
483                 exception_bitmap &= ~(1u << 1); /* Ignore debug exceptions */
484                 vcpu->guest_debug.singlestep = 0;
485         }
486
487         if (old_singlestep && !vcpu->guest_debug.singlestep) {
488                 unsigned long flags;
489
490                 flags = vmcs_readl(GUEST_RFLAGS);
491                 flags &= ~(X86_EFLAGS_TF | X86_EFLAGS_RF);
492                 vmcs_writel(GUEST_RFLAGS, flags);
493         }
494
495         vmcs_write32(EXCEPTION_BITMAP, exception_bitmap);
496         vmcs_writel(GUEST_DR7, dr7);
497
498         return 0;
499 }
500
501 static __init int cpu_has_kvm_support(void)
502 {
503         unsigned long ecx = cpuid_ecx(1);
504         return test_bit(5, &ecx); /* CPUID.1:ECX.VMX[bit 5] -> VT */
505 }
506
507 static __init int vmx_disabled_by_bios(void)
508 {
509         u64 msr;
510
511         rdmsrl(MSR_IA32_FEATURE_CONTROL, msr);
512         return (msr & 5) == 1; /* locked but not enabled */
513 }
514
515 static void hardware_enable(void *garbage)
516 {
517         int cpu = raw_smp_processor_id();
518         u64 phys_addr = __pa(per_cpu(vmxarea, cpu));
519         u64 old;
520
521         rdmsrl(MSR_IA32_FEATURE_CONTROL, old);
522         if ((old & 5) != 5)
523                 /* enable and lock */
524                 wrmsrl(MSR_IA32_FEATURE_CONTROL, old | 5);
525         write_cr4(read_cr4() | CR4_VMXE); /* FIXME: not cpu hotplug safe */
526         asm volatile (ASM_VMX_VMXON_RAX : : "a"(&phys_addr), "m"(phys_addr)
527                       : "memory", "cc");
528 }
529
530 static void hardware_disable(void *garbage)
531 {
532         asm volatile (ASM_VMX_VMXOFF : : : "cc");
533 }
534
535 static __init void setup_vmcs_descriptor(void)
536 {
537         u32 vmx_msr_low, vmx_msr_high;
538
539         rdmsr(MSR_IA32_VMX_BASIC, vmx_msr_low, vmx_msr_high);
540         vmcs_descriptor.size = vmx_msr_high & 0x1fff;
541         vmcs_descriptor.order = get_order(vmcs_descriptor.size);
542         vmcs_descriptor.revision_id = vmx_msr_low;
543 }
544
545 static struct vmcs *alloc_vmcs_cpu(int cpu)
546 {
547         int node = cpu_to_node(cpu);
548         struct page *pages;
549         struct vmcs *vmcs;
550
551         pages = alloc_pages_node(node, GFP_KERNEL, vmcs_descriptor.order);
552         if (!pages)
553                 return NULL;
554         vmcs = page_address(pages);
555         memset(vmcs, 0, vmcs_descriptor.size);
556         vmcs->revision_id = vmcs_descriptor.revision_id; /* vmcs revision id */
557         return vmcs;
558 }
559
560 static struct vmcs *alloc_vmcs(void)
561 {
562         return alloc_vmcs_cpu(raw_smp_processor_id());
563 }
564
565 static void free_vmcs(struct vmcs *vmcs)
566 {
567         free_pages((unsigned long)vmcs, vmcs_descriptor.order);
568 }
569
570 static __exit void free_kvm_area(void)
571 {
572         int cpu;
573
574         for_each_online_cpu(cpu)
575                 free_vmcs(per_cpu(vmxarea, cpu));
576 }
577
578 extern struct vmcs *alloc_vmcs_cpu(int cpu);
579
580 static __init int alloc_kvm_area(void)
581 {
582         int cpu;
583
584         for_each_online_cpu(cpu) {
585                 struct vmcs *vmcs;
586
587                 vmcs = alloc_vmcs_cpu(cpu);
588                 if (!vmcs) {
589                         free_kvm_area();
590                         return -ENOMEM;
591                 }
592
593                 per_cpu(vmxarea, cpu) = vmcs;
594         }
595         return 0;
596 }
597
598 static __init int hardware_setup(void)
599 {
600         setup_vmcs_descriptor();
601         return alloc_kvm_area();
602 }
603
604 static __exit void hardware_unsetup(void)
605 {
606         free_kvm_area();
607 }
608
609 static void update_exception_bitmap(struct kvm_vcpu *vcpu)
610 {
611         if (vcpu->rmode.active)
612                 vmcs_write32(EXCEPTION_BITMAP, ~0);
613         else
614                 vmcs_write32(EXCEPTION_BITMAP, 1 << PF_VECTOR);
615 }
616
617 static void fix_pmode_dataseg(int seg, struct kvm_save_segment *save)
618 {
619         struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
620
621         if (vmcs_readl(sf->base) == save->base && (save->base & AR_S_MASK)) {
622                 vmcs_write16(sf->selector, save->selector);
623                 vmcs_writel(sf->base, save->base);
624                 vmcs_write32(sf->limit, save->limit);
625                 vmcs_write32(sf->ar_bytes, save->ar);
626         } else {
627                 u32 dpl = (vmcs_read16(sf->selector) & SELECTOR_RPL_MASK)
628                         << AR_DPL_SHIFT;
629                 vmcs_write32(sf->ar_bytes, 0x93 | dpl);
630         }
631 }
632
633 static void enter_pmode(struct kvm_vcpu *vcpu)
634 {
635         unsigned long flags;
636
637         vcpu->rmode.active = 0;
638
639         vmcs_writel(GUEST_TR_BASE, vcpu->rmode.tr.base);
640         vmcs_write32(GUEST_TR_LIMIT, vcpu->rmode.tr.limit);
641         vmcs_write32(GUEST_TR_AR_BYTES, vcpu->rmode.tr.ar);
642
643         flags = vmcs_readl(GUEST_RFLAGS);
644         flags &= ~(IOPL_MASK | X86_EFLAGS_VM);
645         flags |= (vcpu->rmode.save_iopl << IOPL_SHIFT);
646         vmcs_writel(GUEST_RFLAGS, flags);
647
648         vmcs_writel(GUEST_CR4, (vmcs_readl(GUEST_CR4) & ~CR4_VME_MASK) |
649                         (vmcs_readl(CR4_READ_SHADOW) & CR4_VME_MASK));
650
651         update_exception_bitmap(vcpu);
652
653         fix_pmode_dataseg(VCPU_SREG_ES, &vcpu->rmode.es);
654         fix_pmode_dataseg(VCPU_SREG_DS, &vcpu->rmode.ds);
655         fix_pmode_dataseg(VCPU_SREG_GS, &vcpu->rmode.gs);
656         fix_pmode_dataseg(VCPU_SREG_FS, &vcpu->rmode.fs);
657
658         vmcs_write16(GUEST_SS_SELECTOR, 0);
659         vmcs_write32(GUEST_SS_AR_BYTES, 0x93);
660
661         vmcs_write16(GUEST_CS_SELECTOR,
662                      vmcs_read16(GUEST_CS_SELECTOR) & ~SELECTOR_RPL_MASK);
663         vmcs_write32(GUEST_CS_AR_BYTES, 0x9b);
664 }
665
666 static int rmode_tss_base(struct kvm* kvm)
667 {
668         gfn_t base_gfn = kvm->memslots[0].base_gfn + kvm->memslots[0].npages - 3;
669         return base_gfn << PAGE_SHIFT;
670 }
671
672 static void fix_rmode_seg(int seg, struct kvm_save_segment *save)
673 {
674         struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
675
676         save->selector = vmcs_read16(sf->selector);
677         save->base = vmcs_readl(sf->base);
678         save->limit = vmcs_read32(sf->limit);
679         save->ar = vmcs_read32(sf->ar_bytes);
680         vmcs_write16(sf->selector, vmcs_readl(sf->base) >> 4);
681         vmcs_write32(sf->limit, 0xffff);
682         vmcs_write32(sf->ar_bytes, 0xf3);
683 }
684
685 static void enter_rmode(struct kvm_vcpu *vcpu)
686 {
687         unsigned long flags;
688
689         vcpu->rmode.active = 1;
690
691         vcpu->rmode.tr.base = vmcs_readl(GUEST_TR_BASE);
692         vmcs_writel(GUEST_TR_BASE, rmode_tss_base(vcpu->kvm));
693
694         vcpu->rmode.tr.limit = vmcs_read32(GUEST_TR_LIMIT);
695         vmcs_write32(GUEST_TR_LIMIT, RMODE_TSS_SIZE - 1);
696
697         vcpu->rmode.tr.ar = vmcs_read32(GUEST_TR_AR_BYTES);
698         vmcs_write32(GUEST_TR_AR_BYTES, 0x008b);
699
700         flags = vmcs_readl(GUEST_RFLAGS);
701         vcpu->rmode.save_iopl = (flags & IOPL_MASK) >> IOPL_SHIFT;
702
703         flags |= IOPL_MASK | X86_EFLAGS_VM;
704
705         vmcs_writel(GUEST_RFLAGS, flags);
706         vmcs_writel(GUEST_CR4, vmcs_readl(GUEST_CR4) | CR4_VME_MASK);
707         update_exception_bitmap(vcpu);
708
709         vmcs_write16(GUEST_SS_SELECTOR, vmcs_readl(GUEST_SS_BASE) >> 4);
710         vmcs_write32(GUEST_SS_LIMIT, 0xffff);
711         vmcs_write32(GUEST_SS_AR_BYTES, 0xf3);
712
713         vmcs_write32(GUEST_CS_AR_BYTES, 0xf3);
714         vmcs_write32(GUEST_CS_LIMIT, 0xffff);
715         if (vmcs_readl(GUEST_CS_BASE) == 0xffff0000)
716                 vmcs_writel(GUEST_CS_BASE, 0xf0000);
717         vmcs_write16(GUEST_CS_SELECTOR, vmcs_readl(GUEST_CS_BASE) >> 4);
718
719         fix_rmode_seg(VCPU_SREG_ES, &vcpu->rmode.es);
720         fix_rmode_seg(VCPU_SREG_DS, &vcpu->rmode.ds);
721         fix_rmode_seg(VCPU_SREG_GS, &vcpu->rmode.gs);
722         fix_rmode_seg(VCPU_SREG_FS, &vcpu->rmode.fs);
723 }
724
725 #ifdef CONFIG_X86_64
726
727 static void enter_lmode(struct kvm_vcpu *vcpu)
728 {
729         u32 guest_tr_ar;
730
731         guest_tr_ar = vmcs_read32(GUEST_TR_AR_BYTES);
732         if ((guest_tr_ar & AR_TYPE_MASK) != AR_TYPE_BUSY_64_TSS) {
733                 printk(KERN_DEBUG "%s: tss fixup for long mode. \n",
734                        __FUNCTION__);
735                 vmcs_write32(GUEST_TR_AR_BYTES,
736                              (guest_tr_ar & ~AR_TYPE_MASK)
737                              | AR_TYPE_BUSY_64_TSS);
738         }
739
740         vcpu->shadow_efer |= EFER_LMA;
741
742         find_msr_entry(vcpu, MSR_EFER)->data |= EFER_LMA | EFER_LME;
743         vmcs_write32(VM_ENTRY_CONTROLS,
744                      vmcs_read32(VM_ENTRY_CONTROLS)
745                      | VM_ENTRY_CONTROLS_IA32E_MASK);
746 }
747
748 static void exit_lmode(struct kvm_vcpu *vcpu)
749 {
750         vcpu->shadow_efer &= ~EFER_LMA;
751
752         vmcs_write32(VM_ENTRY_CONTROLS,
753                      vmcs_read32(VM_ENTRY_CONTROLS)
754                      & ~VM_ENTRY_CONTROLS_IA32E_MASK);
755 }
756
757 #endif
758
759 static void vmx_decache_cr0_cr4_guest_bits(struct kvm_vcpu *vcpu)
760 {
761         vcpu->cr0 &= KVM_GUEST_CR0_MASK;
762         vcpu->cr0 |= vmcs_readl(GUEST_CR0) & ~KVM_GUEST_CR0_MASK;
763
764         vcpu->cr4 &= KVM_GUEST_CR4_MASK;
765         vcpu->cr4 |= vmcs_readl(GUEST_CR4) & ~KVM_GUEST_CR4_MASK;
766 }
767
768 static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
769 {
770         if (vcpu->rmode.active && (cr0 & CR0_PE_MASK))
771                 enter_pmode(vcpu);
772
773         if (!vcpu->rmode.active && !(cr0 & CR0_PE_MASK))
774                 enter_rmode(vcpu);
775
776 #ifdef CONFIG_X86_64
777         if (vcpu->shadow_efer & EFER_LME) {
778                 if (!is_paging(vcpu) && (cr0 & CR0_PG_MASK))
779                         enter_lmode(vcpu);
780                 if (is_paging(vcpu) && !(cr0 & CR0_PG_MASK))
781                         exit_lmode(vcpu);
782         }
783 #endif
784
785         vmcs_writel(CR0_READ_SHADOW, cr0);
786         vmcs_writel(GUEST_CR0,
787                     (cr0 & ~KVM_GUEST_CR0_MASK) | KVM_VM_CR0_ALWAYS_ON);
788         vcpu->cr0 = cr0;
789 }
790
791 static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
792 {
793         vmcs_writel(GUEST_CR3, cr3);
794 }
795
796 static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
797 {
798         vmcs_writel(CR4_READ_SHADOW, cr4);
799         vmcs_writel(GUEST_CR4, cr4 | (vcpu->rmode.active ?
800                     KVM_RMODE_VM_CR4_ALWAYS_ON : KVM_PMODE_VM_CR4_ALWAYS_ON));
801         vcpu->cr4 = cr4;
802 }
803
804 #ifdef CONFIG_X86_64
805
806 static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
807 {
808         struct vmx_msr_entry *msr = find_msr_entry(vcpu, MSR_EFER);
809
810         vcpu->shadow_efer = efer;
811         if (efer & EFER_LMA) {
812                 vmcs_write32(VM_ENTRY_CONTROLS,
813                                      vmcs_read32(VM_ENTRY_CONTROLS) |
814                                      VM_ENTRY_CONTROLS_IA32E_MASK);
815                 msr->data = efer;
816
817         } else {
818                 vmcs_write32(VM_ENTRY_CONTROLS,
819                                      vmcs_read32(VM_ENTRY_CONTROLS) &
820                                      ~VM_ENTRY_CONTROLS_IA32E_MASK);
821
822                 msr->data = efer & ~EFER_LME;
823         }
824 }
825
826 #endif
827
828 static u64 vmx_get_segment_base(struct kvm_vcpu *vcpu, int seg)
829 {
830         struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
831
832         return vmcs_readl(sf->base);
833 }
834
835 static void vmx_get_segment(struct kvm_vcpu *vcpu,
836                             struct kvm_segment *var, int seg)
837 {
838         struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
839         u32 ar;
840
841         var->base = vmcs_readl(sf->base);
842         var->limit = vmcs_read32(sf->limit);
843         var->selector = vmcs_read16(sf->selector);
844         ar = vmcs_read32(sf->ar_bytes);
845         if (ar & AR_UNUSABLE_MASK)
846                 ar = 0;
847         var->type = ar & 15;
848         var->s = (ar >> 4) & 1;
849         var->dpl = (ar >> 5) & 3;
850         var->present = (ar >> 7) & 1;
851         var->avl = (ar >> 12) & 1;
852         var->l = (ar >> 13) & 1;
853         var->db = (ar >> 14) & 1;
854         var->g = (ar >> 15) & 1;
855         var->unusable = (ar >> 16) & 1;
856 }
857
858 static void vmx_set_segment(struct kvm_vcpu *vcpu,
859                             struct kvm_segment *var, int seg)
860 {
861         struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
862         u32 ar;
863
864         vmcs_writel(sf->base, var->base);
865         vmcs_write32(sf->limit, var->limit);
866         vmcs_write16(sf->selector, var->selector);
867         if (vcpu->rmode.active && var->s) {
868                 /*
869                  * Hack real-mode segments into vm86 compatibility.
870                  */
871                 if (var->base == 0xffff0000 && var->selector == 0xf000)
872                         vmcs_writel(sf->base, 0xf0000);
873                 ar = 0xf3;
874         } else if (var->unusable)
875                 ar = 1 << 16;
876         else {
877                 ar = var->type & 15;
878                 ar |= (var->s & 1) << 4;
879                 ar |= (var->dpl & 3) << 5;
880                 ar |= (var->present & 1) << 7;
881                 ar |= (var->avl & 1) << 12;
882                 ar |= (var->l & 1) << 13;
883                 ar |= (var->db & 1) << 14;
884                 ar |= (var->g & 1) << 15;
885         }
886         if (ar == 0) /* a 0 value means unusable */
887                 ar = AR_UNUSABLE_MASK;
888         vmcs_write32(sf->ar_bytes, ar);
889 }
890
891 static void vmx_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
892 {
893         u32 ar = vmcs_read32(GUEST_CS_AR_BYTES);
894
895         *db = (ar >> 14) & 1;
896         *l = (ar >> 13) & 1;
897 }
898
899 static void vmx_get_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
900 {
901         dt->limit = vmcs_read32(GUEST_IDTR_LIMIT);
902         dt->base = vmcs_readl(GUEST_IDTR_BASE);
903 }
904
905 static void vmx_set_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
906 {
907         vmcs_write32(GUEST_IDTR_LIMIT, dt->limit);
908         vmcs_writel(GUEST_IDTR_BASE, dt->base);
909 }
910
911 static void vmx_get_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
912 {
913         dt->limit = vmcs_read32(GUEST_GDTR_LIMIT);
914         dt->base = vmcs_readl(GUEST_GDTR_BASE);
915 }
916
917 static void vmx_set_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
918 {
919         vmcs_write32(GUEST_GDTR_LIMIT, dt->limit);
920         vmcs_writel(GUEST_GDTR_BASE, dt->base);
921 }
922
923 static int init_rmode_tss(struct kvm* kvm)
924 {
925         struct page *p1, *p2, *p3;
926         gfn_t fn = rmode_tss_base(kvm) >> PAGE_SHIFT;
927         char *page;
928
929         p1 = _gfn_to_page(kvm, fn++);
930         p2 = _gfn_to_page(kvm, fn++);
931         p3 = _gfn_to_page(kvm, fn);
932
933         if (!p1 || !p2 || !p3) {
934                 kvm_printf(kvm,"%s: gfn_to_page failed\n", __FUNCTION__);
935                 return 0;
936         }
937
938         page = kmap_atomic(p1, KM_USER0);
939         memset(page, 0, PAGE_SIZE);
940         *(u16*)(page + 0x66) = TSS_BASE_SIZE + TSS_REDIRECTION_SIZE;
941         kunmap_atomic(page, KM_USER0);
942
943         page = kmap_atomic(p2, KM_USER0);
944         memset(page, 0, PAGE_SIZE);
945         kunmap_atomic(page, KM_USER0);
946
947         page = kmap_atomic(p3, KM_USER0);
948         memset(page, 0, PAGE_SIZE);
949         *(page + RMODE_TSS_SIZE - 2 * PAGE_SIZE - 1) = ~0;
950         kunmap_atomic(page, KM_USER0);
951
952         return 1;
953 }
954
955 static void vmcs_write32_fixedbits(u32 msr, u32 vmcs_field, u32 val)
956 {
957         u32 msr_high, msr_low;
958
959         rdmsr(msr, msr_low, msr_high);
960
961         val &= msr_high;
962         val |= msr_low;
963         vmcs_write32(vmcs_field, val);
964 }
965
966 static void seg_setup(int seg)
967 {
968         struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
969
970         vmcs_write16(sf->selector, 0);
971         vmcs_writel(sf->base, 0);
972         vmcs_write32(sf->limit, 0xffff);
973         vmcs_write32(sf->ar_bytes, 0x93);
974 }
975
976 /*
977  * Sets up the vmcs for emulated real mode.
978  */
979 static int vmx_vcpu_setup(struct kvm_vcpu *vcpu)
980 {
981         u32 host_sysenter_cs;
982         u32 junk;
983         unsigned long a;
984         struct descriptor_table dt;
985         int i;
986         int ret = 0;
987         int nr_good_msrs;
988         extern asmlinkage void kvm_vmx_return(void);
989
990         if (!init_rmode_tss(vcpu->kvm)) {
991                 ret = -ENOMEM;
992                 goto out;
993         }
994
995         memset(vcpu->regs, 0, sizeof(vcpu->regs));
996         vcpu->regs[VCPU_REGS_RDX] = get_rdx_init_val();
997         vcpu->cr8 = 0;
998         vcpu->apic_base = 0xfee00000 |
999                         /*for vcpu 0*/ MSR_IA32_APICBASE_BSP |
1000                         MSR_IA32_APICBASE_ENABLE;
1001
1002         fx_init(vcpu);
1003
1004         /*
1005          * GUEST_CS_BASE should really be 0xffff0000, but VT vm86 mode
1006          * insists on having GUEST_CS_BASE == GUEST_CS_SELECTOR << 4.  Sigh.
1007          */
1008         vmcs_write16(GUEST_CS_SELECTOR, 0xf000);
1009         vmcs_writel(GUEST_CS_BASE, 0x000f0000);
1010         vmcs_write32(GUEST_CS_LIMIT, 0xffff);
1011         vmcs_write32(GUEST_CS_AR_BYTES, 0x9b);
1012
1013         seg_setup(VCPU_SREG_DS);
1014         seg_setup(VCPU_SREG_ES);
1015         seg_setup(VCPU_SREG_FS);
1016         seg_setup(VCPU_SREG_GS);
1017         seg_setup(VCPU_SREG_SS);
1018
1019         vmcs_write16(GUEST_TR_SELECTOR, 0);
1020         vmcs_writel(GUEST_TR_BASE, 0);
1021         vmcs_write32(GUEST_TR_LIMIT, 0xffff);
1022         vmcs_write32(GUEST_TR_AR_BYTES, 0x008b);
1023
1024         vmcs_write16(GUEST_LDTR_SELECTOR, 0);
1025         vmcs_writel(GUEST_LDTR_BASE, 0);
1026         vmcs_write32(GUEST_LDTR_LIMIT, 0xffff);
1027         vmcs_write32(GUEST_LDTR_AR_BYTES, 0x00082);
1028
1029         vmcs_write32(GUEST_SYSENTER_CS, 0);
1030         vmcs_writel(GUEST_SYSENTER_ESP, 0);
1031         vmcs_writel(GUEST_SYSENTER_EIP, 0);
1032
1033         vmcs_writel(GUEST_RFLAGS, 0x02);
1034         vmcs_writel(GUEST_RIP, 0xfff0);
1035         vmcs_writel(GUEST_RSP, 0);
1036
1037         //todo: dr0 = dr1 = dr2 = dr3 = 0; dr6 = 0xffff0ff0
1038         vmcs_writel(GUEST_DR7, 0x400);
1039
1040         vmcs_writel(GUEST_GDTR_BASE, 0);
1041         vmcs_write32(GUEST_GDTR_LIMIT, 0xffff);
1042
1043         vmcs_writel(GUEST_IDTR_BASE, 0);
1044         vmcs_write32(GUEST_IDTR_LIMIT, 0xffff);
1045
1046         vmcs_write32(GUEST_ACTIVITY_STATE, 0);
1047         vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, 0);
1048         vmcs_write32(GUEST_PENDING_DBG_EXCEPTIONS, 0);
1049
1050         /* I/O */
1051         vmcs_write64(IO_BITMAP_A, 0);
1052         vmcs_write64(IO_BITMAP_B, 0);
1053
1054         guest_write_tsc(0);
1055
1056         vmcs_write64(VMCS_LINK_POINTER, -1ull); /* 22.3.1.5 */
1057
1058         /* Special registers */
1059         vmcs_write64(GUEST_IA32_DEBUGCTL, 0);
1060
1061         /* Control */
1062         vmcs_write32_fixedbits(MSR_IA32_VMX_PINBASED_CTLS,
1063                                PIN_BASED_VM_EXEC_CONTROL,
1064                                PIN_BASED_EXT_INTR_MASK   /* 20.6.1 */
1065                                | PIN_BASED_NMI_EXITING   /* 20.6.1 */
1066                         );
1067         vmcs_write32_fixedbits(MSR_IA32_VMX_PROCBASED_CTLS,
1068                                CPU_BASED_VM_EXEC_CONTROL,
1069                                CPU_BASED_HLT_EXITING         /* 20.6.2 */
1070                                | CPU_BASED_CR8_LOAD_EXITING    /* 20.6.2 */
1071                                | CPU_BASED_CR8_STORE_EXITING   /* 20.6.2 */
1072                                | CPU_BASED_UNCOND_IO_EXITING   /* 20.6.2 */
1073                                | CPU_BASED_MOV_DR_EXITING
1074                                | CPU_BASED_USE_TSC_OFFSETING   /* 21.3 */
1075                         );
1076
1077         vmcs_write32(EXCEPTION_BITMAP, 1 << PF_VECTOR);
1078         vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, 0);
1079         vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, 0);
1080         vmcs_write32(CR3_TARGET_COUNT, 0);           /* 22.2.1 */
1081
1082         vmcs_writel(HOST_CR0, read_cr0());  /* 22.2.3 */
1083         vmcs_writel(HOST_CR4, read_cr4());  /* 22.2.3, 22.2.5 */
1084         vmcs_writel(HOST_CR3, read_cr3());  /* 22.2.3  FIXME: shadow tables */
1085
1086         vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS);  /* 22.2.4 */
1087         vmcs_write16(HOST_DS_SELECTOR, __KERNEL_DS);  /* 22.2.4 */
1088         vmcs_write16(HOST_ES_SELECTOR, __KERNEL_DS);  /* 22.2.4 */
1089         vmcs_write16(HOST_FS_SELECTOR, read_fs());    /* 22.2.4 */
1090         vmcs_write16(HOST_GS_SELECTOR, read_gs());    /* 22.2.4 */
1091         vmcs_write16(HOST_SS_SELECTOR, __KERNEL_DS);  /* 22.2.4 */
1092 #ifdef CONFIG_X86_64
1093         rdmsrl(MSR_FS_BASE, a);
1094         vmcs_writel(HOST_FS_BASE, a); /* 22.2.4 */
1095         rdmsrl(MSR_GS_BASE, a);
1096         vmcs_writel(HOST_GS_BASE, a); /* 22.2.4 */
1097 #else
1098         vmcs_writel(HOST_FS_BASE, 0); /* 22.2.4 */
1099         vmcs_writel(HOST_GS_BASE, 0); /* 22.2.4 */
1100 #endif
1101
1102         vmcs_write16(HOST_TR_SELECTOR, GDT_ENTRY_TSS*8);  /* 22.2.4 */
1103
1104         get_idt(&dt);
1105         vmcs_writel(HOST_IDTR_BASE, dt.base);   /* 22.2.4 */
1106
1107
1108         vmcs_writel(HOST_RIP, (unsigned long)kvm_vmx_return); /* 22.2.5 */
1109
1110         rdmsr(MSR_IA32_SYSENTER_CS, host_sysenter_cs, junk);
1111         vmcs_write32(HOST_IA32_SYSENTER_CS, host_sysenter_cs);
1112         rdmsrl(MSR_IA32_SYSENTER_ESP, a);
1113         vmcs_writel(HOST_IA32_SYSENTER_ESP, a);   /* 22.2.3 */
1114         rdmsrl(MSR_IA32_SYSENTER_EIP, a);
1115         vmcs_writel(HOST_IA32_SYSENTER_EIP, a);   /* 22.2.3 */
1116
1117         for (i = 0; i < NR_VMX_MSR; ++i) {
1118                 u32 index = vmx_msr_index[i];
1119                 u32 data_low, data_high;
1120                 u64 data;
1121                 int j = vcpu->nmsrs;
1122
1123                 if (rdmsr_safe(index, &data_low, &data_high) < 0)
1124                         continue;
1125                 if (wrmsr_safe(index, data_low, data_high) < 0)
1126                         continue;
1127                 data = data_low | ((u64)data_high << 32);
1128                 vcpu->host_msrs[j].index = index;
1129                 vcpu->host_msrs[j].reserved = 0;
1130                 vcpu->host_msrs[j].data = data;
1131                 vcpu->guest_msrs[j] = vcpu->host_msrs[j];
1132                 ++vcpu->nmsrs;
1133         }
1134         printk(KERN_DEBUG "kvm: msrs: %d\n", vcpu->nmsrs);
1135
1136         nr_good_msrs = vcpu->nmsrs - NR_BAD_MSRS;
1137         vmcs_writel(VM_ENTRY_MSR_LOAD_ADDR,
1138                     virt_to_phys(vcpu->guest_msrs + NR_BAD_MSRS));
1139         vmcs_writel(VM_EXIT_MSR_STORE_ADDR,
1140                     virt_to_phys(vcpu->guest_msrs + NR_BAD_MSRS));
1141         vmcs_writel(VM_EXIT_MSR_LOAD_ADDR,
1142                     virt_to_phys(vcpu->host_msrs + NR_BAD_MSRS));
1143         vmcs_write32_fixedbits(MSR_IA32_VMX_EXIT_CTLS, VM_EXIT_CONTROLS,
1144                                (HOST_IS_64 << 9));  /* 22.2,1, 20.7.1 */
1145         vmcs_write32(VM_EXIT_MSR_STORE_COUNT, nr_good_msrs); /* 22.2.2 */
1146         vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, nr_good_msrs);  /* 22.2.2 */
1147         vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, nr_good_msrs); /* 22.2.2 */
1148
1149
1150         /* 22.2.1, 20.8.1 */
1151         vmcs_write32_fixedbits(MSR_IA32_VMX_ENTRY_CTLS,
1152                                VM_ENTRY_CONTROLS, 0);
1153         vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0);  /* 22.2.1 */
1154
1155 #ifdef CONFIG_X86_64
1156         vmcs_writel(VIRTUAL_APIC_PAGE_ADDR, 0);
1157         vmcs_writel(TPR_THRESHOLD, 0);
1158 #endif
1159
1160         vmcs_writel(CR0_GUEST_HOST_MASK, KVM_GUEST_CR0_MASK);
1161         vmcs_writel(CR4_GUEST_HOST_MASK, KVM_GUEST_CR4_MASK);
1162
1163         vcpu->cr0 = 0x60000010;
1164         vmx_set_cr0(vcpu, vcpu->cr0); // enter rmode
1165         vmx_set_cr4(vcpu, 0);
1166 #ifdef CONFIG_X86_64
1167         vmx_set_efer(vcpu, 0);
1168 #endif
1169
1170         return 0;
1171
1172 out:
1173         return ret;
1174 }
1175
1176 static void inject_rmode_irq(struct kvm_vcpu *vcpu, int irq)
1177 {
1178         u16 ent[2];
1179         u16 cs;
1180         u16 ip;
1181         unsigned long flags;
1182         unsigned long ss_base = vmcs_readl(GUEST_SS_BASE);
1183         u16 sp =  vmcs_readl(GUEST_RSP);
1184         u32 ss_limit = vmcs_read32(GUEST_SS_LIMIT);
1185
1186         if (sp > ss_limit || sp - 6 > sp) {
1187                 vcpu_printf(vcpu, "%s: #SS, rsp 0x%lx ss 0x%lx limit 0x%x\n",
1188                             __FUNCTION__,
1189                             vmcs_readl(GUEST_RSP),
1190                             vmcs_readl(GUEST_SS_BASE),
1191                             vmcs_read32(GUEST_SS_LIMIT));
1192                 return;
1193         }
1194
1195         if (kvm_read_guest(vcpu, irq * sizeof(ent), sizeof(ent), &ent) !=
1196                                                                 sizeof(ent)) {
1197                 vcpu_printf(vcpu, "%s: read guest err\n", __FUNCTION__);
1198                 return;
1199         }
1200
1201         flags =  vmcs_readl(GUEST_RFLAGS);
1202         cs =  vmcs_readl(GUEST_CS_BASE) >> 4;
1203         ip =  vmcs_readl(GUEST_RIP);
1204
1205
1206         if (kvm_write_guest(vcpu, ss_base + sp - 2, 2, &flags) != 2 ||
1207             kvm_write_guest(vcpu, ss_base + sp - 4, 2, &cs) != 2 ||
1208             kvm_write_guest(vcpu, ss_base + sp - 6, 2, &ip) != 2) {
1209                 vcpu_printf(vcpu, "%s: write guest err\n", __FUNCTION__);
1210                 return;
1211         }
1212
1213         vmcs_writel(GUEST_RFLAGS, flags &
1214                     ~( X86_EFLAGS_IF | X86_EFLAGS_AC | X86_EFLAGS_TF));
1215         vmcs_write16(GUEST_CS_SELECTOR, ent[1]) ;
1216         vmcs_writel(GUEST_CS_BASE, ent[1] << 4);
1217         vmcs_writel(GUEST_RIP, ent[0]);
1218         vmcs_writel(GUEST_RSP, (vmcs_readl(GUEST_RSP) & ~0xffff) | (sp - 6));
1219 }
1220
1221 static void kvm_do_inject_irq(struct kvm_vcpu *vcpu)
1222 {
1223         int word_index = __ffs(vcpu->irq_summary);
1224         int bit_index = __ffs(vcpu->irq_pending[word_index]);
1225         int irq = word_index * BITS_PER_LONG + bit_index;
1226
1227         clear_bit(bit_index, &vcpu->irq_pending[word_index]);
1228         if (!vcpu->irq_pending[word_index])
1229                 clear_bit(word_index, &vcpu->irq_summary);
1230
1231         if (vcpu->rmode.active) {
1232                 inject_rmode_irq(vcpu, irq);
1233                 return;
1234         }
1235         vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
1236                         irq | INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK);
1237 }
1238
1239
1240 static void do_interrupt_requests(struct kvm_vcpu *vcpu,
1241                                        struct kvm_run *kvm_run)
1242 {
1243         u32 cpu_based_vm_exec_control;
1244
1245         vcpu->interrupt_window_open =
1246                 ((vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) &&
1247                  (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 3) == 0);
1248
1249         if (vcpu->interrupt_window_open &&
1250             vcpu->irq_summary &&
1251             !(vmcs_read32(VM_ENTRY_INTR_INFO_FIELD) & INTR_INFO_VALID_MASK))
1252                 /*
1253                  * If interrupts enabled, and not blocked by sti or mov ss. Good.
1254                  */
1255                 kvm_do_inject_irq(vcpu);
1256
1257         cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
1258         if (!vcpu->interrupt_window_open &&
1259             (vcpu->irq_summary || kvm_run->request_interrupt_window))
1260                 /*
1261                  * Interrupts blocked.  Wait for unblock.
1262                  */
1263                 cpu_based_vm_exec_control |= CPU_BASED_VIRTUAL_INTR_PENDING;
1264         else
1265                 cpu_based_vm_exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING;
1266         vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
1267 }
1268
1269 static void kvm_guest_debug_pre(struct kvm_vcpu *vcpu)
1270 {
1271         struct kvm_guest_debug *dbg = &vcpu->guest_debug;
1272
1273         set_debugreg(dbg->bp[0], 0);
1274         set_debugreg(dbg->bp[1], 1);
1275         set_debugreg(dbg->bp[2], 2);
1276         set_debugreg(dbg->bp[3], 3);
1277
1278         if (dbg->singlestep) {
1279                 unsigned long flags;
1280
1281                 flags = vmcs_readl(GUEST_RFLAGS);
1282                 flags |= X86_EFLAGS_TF | X86_EFLAGS_RF;
1283                 vmcs_writel(GUEST_RFLAGS, flags);
1284         }
1285 }
1286
1287 static int handle_rmode_exception(struct kvm_vcpu *vcpu,
1288                                   int vec, u32 err_code)
1289 {
1290         if (!vcpu->rmode.active)
1291                 return 0;
1292
1293         if (vec == GP_VECTOR && err_code == 0)
1294                 if (emulate_instruction(vcpu, NULL, 0, 0) == EMULATE_DONE)
1295                         return 1;
1296         return 0;
1297 }
1298
1299 static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1300 {
1301         u32 intr_info, error_code;
1302         unsigned long cr2, rip;
1303         u32 vect_info;
1304         enum emulation_result er;
1305         int r;
1306
1307         vect_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
1308         intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
1309
1310         if ((vect_info & VECTORING_INFO_VALID_MASK) &&
1311                                                 !is_page_fault(intr_info)) {
1312                 printk(KERN_ERR "%s: unexpected, vectoring info 0x%x "
1313                        "intr info 0x%x\n", __FUNCTION__, vect_info, intr_info);
1314         }
1315
1316         if (is_external_interrupt(vect_info)) {
1317                 int irq = vect_info & VECTORING_INFO_VECTOR_MASK;
1318                 set_bit(irq, vcpu->irq_pending);
1319                 set_bit(irq / BITS_PER_LONG, &vcpu->irq_summary);
1320         }
1321
1322         if ((intr_info & INTR_INFO_INTR_TYPE_MASK) == 0x200) { /* nmi */
1323                 asm ("int $2");
1324                 return 1;
1325         }
1326         error_code = 0;
1327         rip = vmcs_readl(GUEST_RIP);
1328         if (intr_info & INTR_INFO_DELIEVER_CODE_MASK)
1329                 error_code = vmcs_read32(VM_EXIT_INTR_ERROR_CODE);
1330         if (is_page_fault(intr_info)) {
1331                 cr2 = vmcs_readl(EXIT_QUALIFICATION);
1332
1333                 spin_lock(&vcpu->kvm->lock);
1334                 r = kvm_mmu_page_fault(vcpu, cr2, error_code);
1335                 if (r < 0) {
1336                         spin_unlock(&vcpu->kvm->lock);
1337                         return r;
1338                 }
1339                 if (!r) {
1340                         spin_unlock(&vcpu->kvm->lock);
1341                         return 1;
1342                 }
1343
1344                 er = emulate_instruction(vcpu, kvm_run, cr2, error_code);
1345                 spin_unlock(&vcpu->kvm->lock);
1346
1347                 switch (er) {
1348                 case EMULATE_DONE:
1349                         return 1;
1350                 case EMULATE_DO_MMIO:
1351                         ++kvm_stat.mmio_exits;
1352                         kvm_run->exit_reason = KVM_EXIT_MMIO;
1353                         return 0;
1354                  case EMULATE_FAIL:
1355                         vcpu_printf(vcpu, "%s: emulate fail\n", __FUNCTION__);
1356                         break;
1357                 default:
1358                         BUG();
1359                 }
1360         }
1361
1362         if (vcpu->rmode.active &&
1363             handle_rmode_exception(vcpu, intr_info & INTR_INFO_VECTOR_MASK,
1364                                                                 error_code))
1365                 return 1;
1366
1367         if ((intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK)) == (INTR_TYPE_EXCEPTION | 1)) {
1368                 kvm_run->exit_reason = KVM_EXIT_DEBUG;
1369                 return 0;
1370         }
1371         kvm_run->exit_reason = KVM_EXIT_EXCEPTION;
1372         kvm_run->ex.exception = intr_info & INTR_INFO_VECTOR_MASK;
1373         kvm_run->ex.error_code = error_code;
1374         return 0;
1375 }
1376
1377 static int handle_external_interrupt(struct kvm_vcpu *vcpu,
1378                                      struct kvm_run *kvm_run)
1379 {
1380         ++kvm_stat.irq_exits;
1381         return 1;
1382 }
1383
1384 static int handle_triple_fault(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1385 {
1386         kvm_run->exit_reason = KVM_EXIT_SHUTDOWN;
1387         return 0;
1388 }
1389
1390 static int get_io_count(struct kvm_vcpu *vcpu, unsigned long *count)
1391 {
1392         u64 inst;
1393         gva_t rip;
1394         int countr_size;
1395         int i, n;
1396
1397         if ((vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_VM)) {
1398                 countr_size = 2;
1399         } else {
1400                 u32 cs_ar = vmcs_read32(GUEST_CS_AR_BYTES);
1401
1402                 countr_size = (cs_ar & AR_L_MASK) ? 8:
1403                               (cs_ar & AR_DB_MASK) ? 4: 2;
1404         }
1405
1406         rip =  vmcs_readl(GUEST_RIP);
1407         if (countr_size != 8)
1408                 rip += vmcs_readl(GUEST_CS_BASE);
1409
1410         n = kvm_read_guest(vcpu, rip, sizeof(inst), &inst);
1411
1412         for (i = 0; i < n; i++) {
1413                 switch (((u8*)&inst)[i]) {
1414                 case 0xf0:
1415                 case 0xf2:
1416                 case 0xf3:
1417                 case 0x2e:
1418                 case 0x36:
1419                 case 0x3e:
1420                 case 0x26:
1421                 case 0x64:
1422                 case 0x65:
1423                 case 0x66:
1424                         break;
1425                 case 0x67:
1426                         countr_size = (countr_size == 2) ? 4: (countr_size >> 1);
1427                 default:
1428                         goto done;
1429                 }
1430         }
1431         return 0;
1432 done:
1433         countr_size *= 8;
1434         *count = vcpu->regs[VCPU_REGS_RCX] & (~0ULL >> (64 - countr_size));
1435         //printk("cx: %lx\n", vcpu->regs[VCPU_REGS_RCX]);
1436         return 1;
1437 }
1438
1439 static int handle_io(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1440 {
1441         u64 exit_qualification;
1442         int size, down, in, string, rep;
1443         unsigned port;
1444         unsigned long count;
1445         gva_t address;
1446
1447         ++kvm_stat.io_exits;
1448         exit_qualification = vmcs_read64(EXIT_QUALIFICATION);
1449         in = (exit_qualification & 8) != 0;
1450         size = (exit_qualification & 7) + 1;
1451         string = (exit_qualification & 16) != 0;
1452         down = (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_DF) != 0;
1453         count = 1;
1454         rep = (exit_qualification & 32) != 0;
1455         port = exit_qualification >> 16;
1456         address = 0;
1457         if (string) {
1458                 if (rep && !get_io_count(vcpu, &count))
1459                         return 1;
1460                 address = vmcs_readl(GUEST_LINEAR_ADDRESS);
1461         }
1462         return kvm_setup_pio(vcpu, kvm_run, in, size, count, string, down,
1463                              address, rep, port);
1464 }
1465
1466 static void
1467 vmx_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall)
1468 {
1469         /*
1470          * Patch in the VMCALL instruction:
1471          */
1472         hypercall[0] = 0x0f;
1473         hypercall[1] = 0x01;
1474         hypercall[2] = 0xc1;
1475         hypercall[3] = 0xc3;
1476 }
1477
1478 static int handle_cr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1479 {
1480         u64 exit_qualification;
1481         int cr;
1482         int reg;
1483
1484         exit_qualification = vmcs_read64(EXIT_QUALIFICATION);
1485         cr = exit_qualification & 15;
1486         reg = (exit_qualification >> 8) & 15;
1487         switch ((exit_qualification >> 4) & 3) {
1488         case 0: /* mov to cr */
1489                 switch (cr) {
1490                 case 0:
1491                         vcpu_load_rsp_rip(vcpu);
1492                         set_cr0(vcpu, vcpu->regs[reg]);
1493                         skip_emulated_instruction(vcpu);
1494                         return 1;
1495                 case 3:
1496                         vcpu_load_rsp_rip(vcpu);
1497                         set_cr3(vcpu, vcpu->regs[reg]);
1498                         skip_emulated_instruction(vcpu);
1499                         return 1;
1500                 case 4:
1501                         vcpu_load_rsp_rip(vcpu);
1502                         set_cr4(vcpu, vcpu->regs[reg]);
1503                         skip_emulated_instruction(vcpu);
1504                         return 1;
1505                 case 8:
1506                         vcpu_load_rsp_rip(vcpu);
1507                         set_cr8(vcpu, vcpu->regs[reg]);
1508                         skip_emulated_instruction(vcpu);
1509                         return 1;
1510                 };
1511                 break;
1512         case 1: /*mov from cr*/
1513                 switch (cr) {
1514                 case 3:
1515                         vcpu_load_rsp_rip(vcpu);
1516                         vcpu->regs[reg] = vcpu->cr3;
1517                         vcpu_put_rsp_rip(vcpu);
1518                         skip_emulated_instruction(vcpu);
1519                         return 1;
1520                 case 8:
1521                         printk(KERN_DEBUG "handle_cr: read CR8 "
1522                                "cpu erratum AA15\n");
1523                         vcpu_load_rsp_rip(vcpu);
1524                         vcpu->regs[reg] = vcpu->cr8;
1525                         vcpu_put_rsp_rip(vcpu);
1526                         skip_emulated_instruction(vcpu);
1527                         return 1;
1528                 }
1529                 break;
1530         case 3: /* lmsw */
1531                 lmsw(vcpu, (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f);
1532
1533                 skip_emulated_instruction(vcpu);
1534                 return 1;
1535         default:
1536                 break;
1537         }
1538         kvm_run->exit_reason = 0;
1539         printk(KERN_ERR "kvm: unhandled control register: op %d cr %d\n",
1540                (int)(exit_qualification >> 4) & 3, cr);
1541         return 0;
1542 }
1543
1544 static int handle_dr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1545 {
1546         u64 exit_qualification;
1547         unsigned long val;
1548         int dr, reg;
1549
1550         /*
1551          * FIXME: this code assumes the host is debugging the guest.
1552          *        need to deal with guest debugging itself too.
1553          */
1554         exit_qualification = vmcs_read64(EXIT_QUALIFICATION);
1555         dr = exit_qualification & 7;
1556         reg = (exit_qualification >> 8) & 15;
1557         vcpu_load_rsp_rip(vcpu);
1558         if (exit_qualification & 16) {
1559                 /* mov from dr */
1560                 switch (dr) {
1561                 case 6:
1562                         val = 0xffff0ff0;
1563                         break;
1564                 case 7:
1565                         val = 0x400;
1566                         break;
1567                 default:
1568                         val = 0;
1569                 }
1570                 vcpu->regs[reg] = val;
1571         } else {
1572                 /* mov to dr */
1573         }
1574         vcpu_put_rsp_rip(vcpu);
1575         skip_emulated_instruction(vcpu);
1576         return 1;
1577 }
1578
1579 static int handle_cpuid(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1580 {
1581         kvm_emulate_cpuid(vcpu);
1582         return 1;
1583 }
1584
1585 static int handle_rdmsr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1586 {
1587         u32 ecx = vcpu->regs[VCPU_REGS_RCX];
1588         u64 data;
1589
1590         if (vmx_get_msr(vcpu, ecx, &data)) {
1591                 vmx_inject_gp(vcpu, 0);
1592                 return 1;
1593         }
1594
1595         /* FIXME: handling of bits 32:63 of rax, rdx */
1596         vcpu->regs[VCPU_REGS_RAX] = data & -1u;
1597         vcpu->regs[VCPU_REGS_RDX] = (data >> 32) & -1u;
1598         skip_emulated_instruction(vcpu);
1599         return 1;
1600 }
1601
1602 static int handle_wrmsr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1603 {
1604         u32 ecx = vcpu->regs[VCPU_REGS_RCX];
1605         u64 data = (vcpu->regs[VCPU_REGS_RAX] & -1u)
1606                 | ((u64)(vcpu->regs[VCPU_REGS_RDX] & -1u) << 32);
1607
1608         if (vmx_set_msr(vcpu, ecx, data) != 0) {
1609                 vmx_inject_gp(vcpu, 0);
1610                 return 1;
1611         }
1612
1613         skip_emulated_instruction(vcpu);
1614         return 1;
1615 }
1616
1617 static void post_kvm_run_save(struct kvm_vcpu *vcpu,
1618                               struct kvm_run *kvm_run)
1619 {
1620         kvm_run->if_flag = (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) != 0;
1621         kvm_run->cr8 = vcpu->cr8;
1622         kvm_run->apic_base = vcpu->apic_base;
1623         kvm_run->ready_for_interrupt_injection = (vcpu->interrupt_window_open &&
1624                                                   vcpu->irq_summary == 0);
1625 }
1626
1627 static int handle_interrupt_window(struct kvm_vcpu *vcpu,
1628                                    struct kvm_run *kvm_run)
1629 {
1630         /*
1631          * If the user space waits to inject interrupts, exit as soon as
1632          * possible
1633          */
1634         if (kvm_run->request_interrupt_window &&
1635             !vcpu->irq_summary) {
1636                 kvm_run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
1637                 ++kvm_stat.irq_window_exits;
1638                 return 0;
1639         }
1640         return 1;
1641 }
1642
1643 static int handle_halt(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1644 {
1645         skip_emulated_instruction(vcpu);
1646         if (vcpu->irq_summary)
1647                 return 1;
1648
1649         kvm_run->exit_reason = KVM_EXIT_HLT;
1650         ++kvm_stat.halt_exits;
1651         return 0;
1652 }
1653
1654 static int handle_vmcall(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1655 {
1656         skip_emulated_instruction(vcpu);
1657         return kvm_hypercall(vcpu, kvm_run);
1658 }
1659
1660 /*
1661  * The exit handlers return 1 if the exit was handled fully and guest execution
1662  * may resume.  Otherwise they set the kvm_run parameter to indicate what needs
1663  * to be done to userspace and return 0.
1664  */
1665 static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu,
1666                                       struct kvm_run *kvm_run) = {
1667         [EXIT_REASON_EXCEPTION_NMI]           = handle_exception,
1668         [EXIT_REASON_EXTERNAL_INTERRUPT]      = handle_external_interrupt,
1669         [EXIT_REASON_TRIPLE_FAULT]            = handle_triple_fault,
1670         [EXIT_REASON_IO_INSTRUCTION]          = handle_io,
1671         [EXIT_REASON_CR_ACCESS]               = handle_cr,
1672         [EXIT_REASON_DR_ACCESS]               = handle_dr,
1673         [EXIT_REASON_CPUID]                   = handle_cpuid,
1674         [EXIT_REASON_MSR_READ]                = handle_rdmsr,
1675         [EXIT_REASON_MSR_WRITE]               = handle_wrmsr,
1676         [EXIT_REASON_PENDING_INTERRUPT]       = handle_interrupt_window,
1677         [EXIT_REASON_HLT]                     = handle_halt,
1678         [EXIT_REASON_VMCALL]                  = handle_vmcall,
1679 };
1680
1681 static const int kvm_vmx_max_exit_handlers =
1682         sizeof(kvm_vmx_exit_handlers) / sizeof(*kvm_vmx_exit_handlers);
1683
1684 /*
1685  * The guest has exited.  See if we can fix it or if we need userspace
1686  * assistance.
1687  */
1688 static int kvm_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
1689 {
1690         u32 vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
1691         u32 exit_reason = vmcs_read32(VM_EXIT_REASON);
1692
1693         if ( (vectoring_info & VECTORING_INFO_VALID_MASK) &&
1694                                 exit_reason != EXIT_REASON_EXCEPTION_NMI )
1695                 printk(KERN_WARNING "%s: unexpected, valid vectoring info and "
1696                        "exit reason is 0x%x\n", __FUNCTION__, exit_reason);
1697         kvm_run->instruction_length = vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
1698         if (exit_reason < kvm_vmx_max_exit_handlers
1699             && kvm_vmx_exit_handlers[exit_reason])
1700                 return kvm_vmx_exit_handlers[exit_reason](vcpu, kvm_run);
1701         else {
1702                 kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
1703                 kvm_run->hw.hardware_exit_reason = exit_reason;
1704         }
1705         return 0;
1706 }
1707
1708 /*
1709  * Check if userspace requested an interrupt window, and that the
1710  * interrupt window is open.
1711  *
1712  * No need to exit to userspace if we already have an interrupt queued.
1713  */
1714 static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu,
1715                                           struct kvm_run *kvm_run)
1716 {
1717         return (!vcpu->irq_summary &&
1718                 kvm_run->request_interrupt_window &&
1719                 vcpu->interrupt_window_open &&
1720                 (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF));
1721 }
1722
1723 static int vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1724 {
1725         u8 fail;
1726         u16 fs_sel, gs_sel, ldt_sel;
1727         int fs_gs_ldt_reload_needed;
1728         int r;
1729
1730 again:
1731         /*
1732          * Set host fs and gs selectors.  Unfortunately, 22.2.3 does not
1733          * allow segment selectors with cpl > 0 or ti == 1.
1734          */
1735         fs_sel = read_fs();
1736         gs_sel = read_gs();
1737         ldt_sel = read_ldt();
1738         fs_gs_ldt_reload_needed = (fs_sel & 7) | (gs_sel & 7) | ldt_sel;
1739         if (!fs_gs_ldt_reload_needed) {
1740                 vmcs_write16(HOST_FS_SELECTOR, fs_sel);
1741                 vmcs_write16(HOST_GS_SELECTOR, gs_sel);
1742         } else {
1743                 vmcs_write16(HOST_FS_SELECTOR, 0);
1744                 vmcs_write16(HOST_GS_SELECTOR, 0);
1745         }
1746
1747 #ifdef CONFIG_X86_64
1748         vmcs_writel(HOST_FS_BASE, read_msr(MSR_FS_BASE));
1749         vmcs_writel(HOST_GS_BASE, read_msr(MSR_GS_BASE));
1750 #else
1751         vmcs_writel(HOST_FS_BASE, segment_base(fs_sel));
1752         vmcs_writel(HOST_GS_BASE, segment_base(gs_sel));
1753 #endif
1754
1755         if (!vcpu->mmio_read_completed)
1756                 do_interrupt_requests(vcpu, kvm_run);
1757
1758         if (vcpu->guest_debug.enabled)
1759                 kvm_guest_debug_pre(vcpu);
1760
1761         fx_save(vcpu->host_fx_image);
1762         fx_restore(vcpu->guest_fx_image);
1763
1764         save_msrs(vcpu->host_msrs, vcpu->nmsrs);
1765         load_msrs(vcpu->guest_msrs, NR_BAD_MSRS);
1766
1767         asm (
1768                 /* Store host registers */
1769                 "pushf \n\t"
1770 #ifdef CONFIG_X86_64
1771                 "push %%rax; push %%rbx; push %%rdx;"
1772                 "push %%rsi; push %%rdi; push %%rbp;"
1773                 "push %%r8;  push %%r9;  push %%r10; push %%r11;"
1774                 "push %%r12; push %%r13; push %%r14; push %%r15;"
1775                 "push %%rcx \n\t"
1776                 ASM_VMX_VMWRITE_RSP_RDX "\n\t"
1777 #else
1778                 "pusha; push %%ecx \n\t"
1779                 ASM_VMX_VMWRITE_RSP_RDX "\n\t"
1780 #endif
1781                 /* Check if vmlaunch of vmresume is needed */
1782                 "cmp $0, %1 \n\t"
1783                 /* Load guest registers.  Don't clobber flags. */
1784 #ifdef CONFIG_X86_64
1785                 "mov %c[cr2](%3), %%rax \n\t"
1786                 "mov %%rax, %%cr2 \n\t"
1787                 "mov %c[rax](%3), %%rax \n\t"
1788                 "mov %c[rbx](%3), %%rbx \n\t"
1789                 "mov %c[rdx](%3), %%rdx \n\t"
1790                 "mov %c[rsi](%3), %%rsi \n\t"
1791                 "mov %c[rdi](%3), %%rdi \n\t"
1792                 "mov %c[rbp](%3), %%rbp \n\t"
1793                 "mov %c[r8](%3),  %%r8  \n\t"
1794                 "mov %c[r9](%3),  %%r9  \n\t"
1795                 "mov %c[r10](%3), %%r10 \n\t"
1796                 "mov %c[r11](%3), %%r11 \n\t"
1797                 "mov %c[r12](%3), %%r12 \n\t"
1798                 "mov %c[r13](%3), %%r13 \n\t"
1799                 "mov %c[r14](%3), %%r14 \n\t"
1800                 "mov %c[r15](%3), %%r15 \n\t"
1801                 "mov %c[rcx](%3), %%rcx \n\t" /* kills %3 (rcx) */
1802 #else
1803                 "mov %c[cr2](%3), %%eax \n\t"
1804                 "mov %%eax,   %%cr2 \n\t"
1805                 "mov %c[rax](%3), %%eax \n\t"
1806                 "mov %c[rbx](%3), %%ebx \n\t"
1807                 "mov %c[rdx](%3), %%edx \n\t"
1808                 "mov %c[rsi](%3), %%esi \n\t"
1809                 "mov %c[rdi](%3), %%edi \n\t"
1810                 "mov %c[rbp](%3), %%ebp \n\t"
1811                 "mov %c[rcx](%3), %%ecx \n\t" /* kills %3 (ecx) */
1812 #endif
1813                 /* Enter guest mode */
1814                 "jne launched \n\t"
1815                 ASM_VMX_VMLAUNCH "\n\t"
1816                 "jmp kvm_vmx_return \n\t"
1817                 "launched: " ASM_VMX_VMRESUME "\n\t"
1818                 ".globl kvm_vmx_return \n\t"
1819                 "kvm_vmx_return: "
1820                 /* Save guest registers, load host registers, keep flags */
1821 #ifdef CONFIG_X86_64
1822                 "xchg %3,     (%%rsp) \n\t"
1823                 "mov %%rax, %c[rax](%3) \n\t"
1824                 "mov %%rbx, %c[rbx](%3) \n\t"
1825                 "pushq (%%rsp); popq %c[rcx](%3) \n\t"
1826                 "mov %%rdx, %c[rdx](%3) \n\t"
1827                 "mov %%rsi, %c[rsi](%3) \n\t"
1828                 "mov %%rdi, %c[rdi](%3) \n\t"
1829                 "mov %%rbp, %c[rbp](%3) \n\t"
1830                 "mov %%r8,  %c[r8](%3) \n\t"
1831                 "mov %%r9,  %c[r9](%3) \n\t"
1832                 "mov %%r10, %c[r10](%3) \n\t"
1833                 "mov %%r11, %c[r11](%3) \n\t"
1834                 "mov %%r12, %c[r12](%3) \n\t"
1835                 "mov %%r13, %c[r13](%3) \n\t"
1836                 "mov %%r14, %c[r14](%3) \n\t"
1837                 "mov %%r15, %c[r15](%3) \n\t"
1838                 "mov %%cr2, %%rax   \n\t"
1839                 "mov %%rax, %c[cr2](%3) \n\t"
1840                 "mov (%%rsp), %3 \n\t"
1841
1842                 "pop  %%rcx; pop  %%r15; pop  %%r14; pop  %%r13; pop  %%r12;"
1843                 "pop  %%r11; pop  %%r10; pop  %%r9;  pop  %%r8;"
1844                 "pop  %%rbp; pop  %%rdi; pop  %%rsi;"
1845                 "pop  %%rdx; pop  %%rbx; pop  %%rax \n\t"
1846 #else
1847                 "xchg %3, (%%esp) \n\t"
1848                 "mov %%eax, %c[rax](%3) \n\t"
1849                 "mov %%ebx, %c[rbx](%3) \n\t"
1850                 "pushl (%%esp); popl %c[rcx](%3) \n\t"
1851                 "mov %%edx, %c[rdx](%3) \n\t"
1852                 "mov %%esi, %c[rsi](%3) \n\t"
1853                 "mov %%edi, %c[rdi](%3) \n\t"
1854                 "mov %%ebp, %c[rbp](%3) \n\t"
1855                 "mov %%cr2, %%eax  \n\t"
1856                 "mov %%eax, %c[cr2](%3) \n\t"
1857                 "mov (%%esp), %3 \n\t"
1858
1859                 "pop %%ecx; popa \n\t"
1860 #endif
1861                 "setbe %0 \n\t"
1862                 "popf \n\t"
1863               : "=q" (fail)
1864               : "r"(vcpu->launched), "d"((unsigned long)HOST_RSP),
1865                 "c"(vcpu),
1866                 [rax]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RAX])),
1867                 [rbx]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RBX])),
1868                 [rcx]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RCX])),
1869                 [rdx]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RDX])),
1870                 [rsi]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RSI])),
1871                 [rdi]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RDI])),
1872                 [rbp]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RBP])),
1873 #ifdef CONFIG_X86_64
1874                 [r8 ]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R8 ])),
1875                 [r9 ]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R9 ])),
1876                 [r10]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R10])),
1877                 [r11]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R11])),
1878                 [r12]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R12])),
1879                 [r13]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R13])),
1880                 [r14]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R14])),
1881                 [r15]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R15])),
1882 #endif
1883                 [cr2]"i"(offsetof(struct kvm_vcpu, cr2))
1884               : "cc", "memory" );
1885
1886         /*
1887          * Reload segment selectors ASAP. (it's needed for a functional
1888          * kernel: x86 relies on having __KERNEL_PDA in %fs and x86_64
1889          * relies on having 0 in %gs for the CPU PDA to work.)
1890          */
1891         if (fs_gs_ldt_reload_needed) {
1892                 load_ldt(ldt_sel);
1893                 load_fs(fs_sel);
1894                 /*
1895                  * If we have to reload gs, we must take care to
1896                  * preserve our gs base.
1897                  */
1898                 local_irq_disable();
1899                 load_gs(gs_sel);
1900 #ifdef CONFIG_X86_64
1901                 wrmsrl(MSR_GS_BASE, vmcs_readl(HOST_GS_BASE));
1902 #endif
1903                 local_irq_enable();
1904
1905                 reload_tss();
1906         }
1907         ++kvm_stat.exits;
1908
1909         save_msrs(vcpu->guest_msrs, NR_BAD_MSRS);
1910         load_msrs(vcpu->host_msrs, NR_BAD_MSRS);
1911
1912         fx_save(vcpu->guest_fx_image);
1913         fx_restore(vcpu->host_fx_image);
1914         vcpu->interrupt_window_open = (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 3) == 0;
1915
1916         asm ("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
1917
1918         if (fail) {
1919                 kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
1920                 kvm_run->fail_entry.hardware_entry_failure_reason
1921                         = vmcs_read32(VM_INSTRUCTION_ERROR);
1922                 r = 0;
1923         } else {
1924                 /*
1925                  * Profile KVM exit RIPs:
1926                  */
1927                 if (unlikely(prof_on == KVM_PROFILING))
1928                         profile_hit(KVM_PROFILING, (void *)vmcs_readl(GUEST_RIP));
1929
1930                 vcpu->launched = 1;
1931                 r = kvm_handle_exit(kvm_run, vcpu);
1932                 if (r > 0) {
1933                         /* Give scheduler a change to reschedule. */
1934                         if (signal_pending(current)) {
1935                                 ++kvm_stat.signal_exits;
1936                                 post_kvm_run_save(vcpu, kvm_run);
1937                                 kvm_run->exit_reason = KVM_EXIT_INTR;
1938                                 return -EINTR;
1939                         }
1940
1941                         if (dm_request_for_irq_injection(vcpu, kvm_run)) {
1942                                 ++kvm_stat.request_irq_exits;
1943                                 post_kvm_run_save(vcpu, kvm_run);
1944                                 kvm_run->exit_reason = KVM_EXIT_INTR;
1945                                 return -EINTR;
1946                         }
1947
1948                         kvm_resched(vcpu);
1949                         goto again;
1950                 }
1951         }
1952
1953         post_kvm_run_save(vcpu, kvm_run);
1954         return r;
1955 }
1956
1957 static void vmx_flush_tlb(struct kvm_vcpu *vcpu)
1958 {
1959         vmcs_writel(GUEST_CR3, vmcs_readl(GUEST_CR3));
1960 }
1961
1962 static void vmx_inject_page_fault(struct kvm_vcpu *vcpu,
1963                                   unsigned long addr,
1964                                   u32 err_code)
1965 {
1966         u32 vect_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
1967
1968         ++kvm_stat.pf_guest;
1969
1970         if (is_page_fault(vect_info)) {
1971                 printk(KERN_DEBUG "inject_page_fault: "
1972                        "double fault 0x%lx @ 0x%lx\n",
1973                        addr, vmcs_readl(GUEST_RIP));
1974                 vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, 0);
1975                 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
1976                              DF_VECTOR |
1977                              INTR_TYPE_EXCEPTION |
1978                              INTR_INFO_DELIEVER_CODE_MASK |
1979                              INTR_INFO_VALID_MASK);
1980                 return;
1981         }
1982         vcpu->cr2 = addr;
1983         vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, err_code);
1984         vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
1985                      PF_VECTOR |
1986                      INTR_TYPE_EXCEPTION |
1987                      INTR_INFO_DELIEVER_CODE_MASK |
1988                      INTR_INFO_VALID_MASK);
1989
1990 }
1991
1992 static void vmx_free_vmcs(struct kvm_vcpu *vcpu)
1993 {
1994         if (vcpu->vmcs) {
1995                 on_each_cpu(__vcpu_clear, vcpu, 0, 1);
1996                 free_vmcs(vcpu->vmcs);
1997                 vcpu->vmcs = NULL;
1998         }
1999 }
2000
2001 static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
2002 {
2003         vmx_free_vmcs(vcpu);
2004 }
2005
2006 static int vmx_create_vcpu(struct kvm_vcpu *vcpu)
2007 {
2008         struct vmcs *vmcs;
2009
2010         vcpu->guest_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL);
2011         if (!vcpu->guest_msrs)
2012                 return -ENOMEM;
2013
2014         vcpu->host_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL);
2015         if (!vcpu->host_msrs)
2016                 goto out_free_guest_msrs;
2017
2018         vmcs = alloc_vmcs();
2019         if (!vmcs)
2020                 goto out_free_msrs;
2021
2022         vmcs_clear(vmcs);
2023         vcpu->vmcs = vmcs;
2024         vcpu->launched = 0;
2025
2026         return 0;
2027
2028 out_free_msrs:
2029         kfree(vcpu->host_msrs);
2030         vcpu->host_msrs = NULL;
2031
2032 out_free_guest_msrs:
2033         kfree(vcpu->guest_msrs);
2034         vcpu->guest_msrs = NULL;
2035
2036         return -ENOMEM;
2037 }
2038
2039 static struct kvm_arch_ops vmx_arch_ops = {
2040         .cpu_has_kvm_support = cpu_has_kvm_support,
2041         .disabled_by_bios = vmx_disabled_by_bios,
2042         .hardware_setup = hardware_setup,
2043         .hardware_unsetup = hardware_unsetup,
2044         .hardware_enable = hardware_enable,
2045         .hardware_disable = hardware_disable,
2046
2047         .vcpu_create = vmx_create_vcpu,
2048         .vcpu_free = vmx_free_vcpu,
2049
2050         .vcpu_load = vmx_vcpu_load,
2051         .vcpu_put = vmx_vcpu_put,
2052         .vcpu_decache = vmx_vcpu_decache,
2053
2054         .set_guest_debug = set_guest_debug,
2055         .get_msr = vmx_get_msr,
2056         .set_msr = vmx_set_msr,
2057         .get_segment_base = vmx_get_segment_base,
2058         .get_segment = vmx_get_segment,
2059         .set_segment = vmx_set_segment,
2060         .get_cs_db_l_bits = vmx_get_cs_db_l_bits,
2061         .decache_cr0_cr4_guest_bits = vmx_decache_cr0_cr4_guest_bits,
2062         .set_cr0 = vmx_set_cr0,
2063         .set_cr3 = vmx_set_cr3,
2064         .set_cr4 = vmx_set_cr4,
2065 #ifdef CONFIG_X86_64
2066         .set_efer = vmx_set_efer,
2067 #endif
2068         .get_idt = vmx_get_idt,
2069         .set_idt = vmx_set_idt,
2070         .get_gdt = vmx_get_gdt,
2071         .set_gdt = vmx_set_gdt,
2072         .cache_regs = vcpu_load_rsp_rip,
2073         .decache_regs = vcpu_put_rsp_rip,
2074         .get_rflags = vmx_get_rflags,
2075         .set_rflags = vmx_set_rflags,
2076
2077         .tlb_flush = vmx_flush_tlb,
2078         .inject_page_fault = vmx_inject_page_fault,
2079
2080         .inject_gp = vmx_inject_gp,
2081
2082         .run = vmx_vcpu_run,
2083         .skip_emulated_instruction = skip_emulated_instruction,
2084         .vcpu_setup = vmx_vcpu_setup,
2085         .patch_hypercall = vmx_patch_hypercall,
2086 };
2087
2088 static int __init vmx_init(void)
2089 {
2090         return kvm_init_arch(&vmx_arch_ops, THIS_MODULE);
2091 }
2092
2093 static void __exit vmx_exit(void)
2094 {
2095         kvm_exit_arch();
2096 }
2097
2098 module_init(vmx_init)
2099 module_exit(vmx_exit)