KVM: nVMX: Move control field setup to functions
[pandora-kernel.git] / arch / x86 / kvm / vmx.c
1 /*
2  * Kernel-based Virtual Machine driver for Linux
3  *
4  * This module enables machines with Intel VT-x extensions to run virtual
5  * machines without emulation or binary translation.
6  *
7  * Copyright (C) 2006 Qumranet, Inc.
8  * Copyright 2010 Red Hat, Inc. and/or its affiliates.
9  *
10  * Authors:
11  *   Avi Kivity   <avi@qumranet.com>
12  *   Yaniv Kamay  <yaniv@qumranet.com>
13  *
14  * This work is licensed under the terms of the GNU GPL, version 2.  See
15  * the COPYING file in the top-level directory.
16  *
17  */
18
19 #include "irq.h"
20 #include "mmu.h"
21
22 #include <linux/kvm_host.h>
23 #include <linux/module.h>
24 #include <linux/kernel.h>
25 #include <linux/mm.h>
26 #include <linux/highmem.h>
27 #include <linux/sched.h>
28 #include <linux/moduleparam.h>
29 #include <linux/ftrace_event.h>
30 #include <linux/slab.h>
31 #include <linux/tboot.h>
32 #include "kvm_cache_regs.h"
33 #include "x86.h"
34
35 #include <asm/io.h>
36 #include <asm/desc.h>
37 #include <asm/vmx.h>
38 #include <asm/virtext.h>
39 #include <asm/mce.h>
40 #include <asm/i387.h>
41 #include <asm/xcr.h>
42
43 #include "trace.h"
44
45 #define __ex(x) __kvm_handle_fault_on_reboot(x)
46 #define __ex_clear(x, reg) \
47         ____kvm_handle_fault_on_reboot(x, "xor " reg " , " reg)
48
49 MODULE_AUTHOR("Qumranet");
50 MODULE_LICENSE("GPL");
51
52 static int __read_mostly bypass_guest_pf = 1;
53 module_param(bypass_guest_pf, bool, S_IRUGO);
54
55 static int __read_mostly enable_vpid = 1;
56 module_param_named(vpid, enable_vpid, bool, 0444);
57
58 static int __read_mostly flexpriority_enabled = 1;
59 module_param_named(flexpriority, flexpriority_enabled, bool, S_IRUGO);
60
61 static int __read_mostly enable_ept = 1;
62 module_param_named(ept, enable_ept, bool, S_IRUGO);
63
64 static int __read_mostly enable_unrestricted_guest = 1;
65 module_param_named(unrestricted_guest,
66                         enable_unrestricted_guest, bool, S_IRUGO);
67
68 static int __read_mostly emulate_invalid_guest_state = 0;
69 module_param(emulate_invalid_guest_state, bool, S_IRUGO);
70
71 static int __read_mostly vmm_exclusive = 1;
72 module_param(vmm_exclusive, bool, S_IRUGO);
73
74 static int __read_mostly yield_on_hlt = 1;
75 module_param(yield_on_hlt, bool, S_IRUGO);
76
77 /*
78  * If nested=1, nested virtualization is supported, i.e., guests may use
79  * VMX and be a hypervisor for its own guests. If nested=0, guests may not
80  * use VMX instructions.
81  */
82 static int __read_mostly nested = 0;
83 module_param(nested, bool, S_IRUGO);
84
85 #define KVM_GUEST_CR0_MASK_UNRESTRICTED_GUEST                           \
86         (X86_CR0_WP | X86_CR0_NE | X86_CR0_NW | X86_CR0_CD)
87 #define KVM_GUEST_CR0_MASK                                              \
88         (KVM_GUEST_CR0_MASK_UNRESTRICTED_GUEST | X86_CR0_PG | X86_CR0_PE)
89 #define KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST                         \
90         (X86_CR0_WP | X86_CR0_NE)
91 #define KVM_VM_CR0_ALWAYS_ON                                            \
92         (KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST | X86_CR0_PG | X86_CR0_PE)
93 #define KVM_CR4_GUEST_OWNED_BITS                                      \
94         (X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR      \
95          | X86_CR4_OSXMMEXCPT)
96
97 #define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE)
98 #define KVM_RMODE_VM_CR4_ALWAYS_ON (X86_CR4_VME | X86_CR4_PAE | X86_CR4_VMXE)
99
100 #define RMODE_GUEST_OWNED_EFLAGS_BITS (~(X86_EFLAGS_IOPL | X86_EFLAGS_VM))
101
102 /*
103  * These 2 parameters are used to config the controls for Pause-Loop Exiting:
104  * ple_gap:    upper bound on the amount of time between two successive
105  *             executions of PAUSE in a loop. Also indicate if ple enabled.
106  *             According to test, this time is usually smaller than 128 cycles.
107  * ple_window: upper bound on the amount of time a guest is allowed to execute
108  *             in a PAUSE loop. Tests indicate that most spinlocks are held for
109  *             less than 2^12 cycles
110  * Time is measured based on a counter that runs at the same rate as the TSC,
111  * refer SDM volume 3b section 21.6.13 & 22.1.3.
112  */
113 #define KVM_VMX_DEFAULT_PLE_GAP    128
114 #define KVM_VMX_DEFAULT_PLE_WINDOW 4096
115 static int ple_gap = KVM_VMX_DEFAULT_PLE_GAP;
116 module_param(ple_gap, int, S_IRUGO);
117
118 static int ple_window = KVM_VMX_DEFAULT_PLE_WINDOW;
119 module_param(ple_window, int, S_IRUGO);
120
121 #define NR_AUTOLOAD_MSRS 1
122 #define VMCS02_POOL_SIZE 1
123
124 struct vmcs {
125         u32 revision_id;
126         u32 abort;
127         char data[0];
128 };
129
130 /*
131  * Track a VMCS that may be loaded on a certain CPU. If it is (cpu!=-1), also
132  * remember whether it was VMLAUNCHed, and maintain a linked list of all VMCSs
133  * loaded on this CPU (so we can clear them if the CPU goes down).
134  */
135 struct loaded_vmcs {
136         struct vmcs *vmcs;
137         int cpu;
138         int launched;
139         struct list_head loaded_vmcss_on_cpu_link;
140 };
141
142 struct shared_msr_entry {
143         unsigned index;
144         u64 data;
145         u64 mask;
146 };
147
148 /*
149  * struct vmcs12 describes the state that our guest hypervisor (L1) keeps for a
150  * single nested guest (L2), hence the name vmcs12. Any VMX implementation has
151  * a VMCS structure, and vmcs12 is our emulated VMX's VMCS. This structure is
152  * stored in guest memory specified by VMPTRLD, but is opaque to the guest,
153  * which must access it using VMREAD/VMWRITE/VMCLEAR instructions.
154  * More than one of these structures may exist, if L1 runs multiple L2 guests.
155  * nested_vmx_run() will use the data here to build a vmcs02: a VMCS for the
156  * underlying hardware which will be used to run L2.
157  * This structure is packed to ensure that its layout is identical across
158  * machines (necessary for live migration).
159  * If there are changes in this struct, VMCS12_REVISION must be changed.
160  */
161 typedef u64 natural_width;
162 struct __packed vmcs12 {
163         /* According to the Intel spec, a VMCS region must start with the
164          * following two fields. Then follow implementation-specific data.
165          */
166         u32 revision_id;
167         u32 abort;
168
169         u32 launch_state; /* set to 0 by VMCLEAR, to 1 by VMLAUNCH */
170         u32 padding[7]; /* room for future expansion */
171
172         u64 io_bitmap_a;
173         u64 io_bitmap_b;
174         u64 msr_bitmap;
175         u64 vm_exit_msr_store_addr;
176         u64 vm_exit_msr_load_addr;
177         u64 vm_entry_msr_load_addr;
178         u64 tsc_offset;
179         u64 virtual_apic_page_addr;
180         u64 apic_access_addr;
181         u64 ept_pointer;
182         u64 guest_physical_address;
183         u64 vmcs_link_pointer;
184         u64 guest_ia32_debugctl;
185         u64 guest_ia32_pat;
186         u64 guest_ia32_efer;
187         u64 guest_ia32_perf_global_ctrl;
188         u64 guest_pdptr0;
189         u64 guest_pdptr1;
190         u64 guest_pdptr2;
191         u64 guest_pdptr3;
192         u64 host_ia32_pat;
193         u64 host_ia32_efer;
194         u64 host_ia32_perf_global_ctrl;
195         u64 padding64[8]; /* room for future expansion */
196         /*
197          * To allow migration of L1 (complete with its L2 guests) between
198          * machines of different natural widths (32 or 64 bit), we cannot have
199          * unsigned long fields with no explict size. We use u64 (aliased
200          * natural_width) instead. Luckily, x86 is little-endian.
201          */
202         natural_width cr0_guest_host_mask;
203         natural_width cr4_guest_host_mask;
204         natural_width cr0_read_shadow;
205         natural_width cr4_read_shadow;
206         natural_width cr3_target_value0;
207         natural_width cr3_target_value1;
208         natural_width cr3_target_value2;
209         natural_width cr3_target_value3;
210         natural_width exit_qualification;
211         natural_width guest_linear_address;
212         natural_width guest_cr0;
213         natural_width guest_cr3;
214         natural_width guest_cr4;
215         natural_width guest_es_base;
216         natural_width guest_cs_base;
217         natural_width guest_ss_base;
218         natural_width guest_ds_base;
219         natural_width guest_fs_base;
220         natural_width guest_gs_base;
221         natural_width guest_ldtr_base;
222         natural_width guest_tr_base;
223         natural_width guest_gdtr_base;
224         natural_width guest_idtr_base;
225         natural_width guest_dr7;
226         natural_width guest_rsp;
227         natural_width guest_rip;
228         natural_width guest_rflags;
229         natural_width guest_pending_dbg_exceptions;
230         natural_width guest_sysenter_esp;
231         natural_width guest_sysenter_eip;
232         natural_width host_cr0;
233         natural_width host_cr3;
234         natural_width host_cr4;
235         natural_width host_fs_base;
236         natural_width host_gs_base;
237         natural_width host_tr_base;
238         natural_width host_gdtr_base;
239         natural_width host_idtr_base;
240         natural_width host_ia32_sysenter_esp;
241         natural_width host_ia32_sysenter_eip;
242         natural_width host_rsp;
243         natural_width host_rip;
244         natural_width paddingl[8]; /* room for future expansion */
245         u32 pin_based_vm_exec_control;
246         u32 cpu_based_vm_exec_control;
247         u32 exception_bitmap;
248         u32 page_fault_error_code_mask;
249         u32 page_fault_error_code_match;
250         u32 cr3_target_count;
251         u32 vm_exit_controls;
252         u32 vm_exit_msr_store_count;
253         u32 vm_exit_msr_load_count;
254         u32 vm_entry_controls;
255         u32 vm_entry_msr_load_count;
256         u32 vm_entry_intr_info_field;
257         u32 vm_entry_exception_error_code;
258         u32 vm_entry_instruction_len;
259         u32 tpr_threshold;
260         u32 secondary_vm_exec_control;
261         u32 vm_instruction_error;
262         u32 vm_exit_reason;
263         u32 vm_exit_intr_info;
264         u32 vm_exit_intr_error_code;
265         u32 idt_vectoring_info_field;
266         u32 idt_vectoring_error_code;
267         u32 vm_exit_instruction_len;
268         u32 vmx_instruction_info;
269         u32 guest_es_limit;
270         u32 guest_cs_limit;
271         u32 guest_ss_limit;
272         u32 guest_ds_limit;
273         u32 guest_fs_limit;
274         u32 guest_gs_limit;
275         u32 guest_ldtr_limit;
276         u32 guest_tr_limit;
277         u32 guest_gdtr_limit;
278         u32 guest_idtr_limit;
279         u32 guest_es_ar_bytes;
280         u32 guest_cs_ar_bytes;
281         u32 guest_ss_ar_bytes;
282         u32 guest_ds_ar_bytes;
283         u32 guest_fs_ar_bytes;
284         u32 guest_gs_ar_bytes;
285         u32 guest_ldtr_ar_bytes;
286         u32 guest_tr_ar_bytes;
287         u32 guest_interruptibility_info;
288         u32 guest_activity_state;
289         u32 guest_sysenter_cs;
290         u32 host_ia32_sysenter_cs;
291         u32 padding32[8]; /* room for future expansion */
292         u16 virtual_processor_id;
293         u16 guest_es_selector;
294         u16 guest_cs_selector;
295         u16 guest_ss_selector;
296         u16 guest_ds_selector;
297         u16 guest_fs_selector;
298         u16 guest_gs_selector;
299         u16 guest_ldtr_selector;
300         u16 guest_tr_selector;
301         u16 host_es_selector;
302         u16 host_cs_selector;
303         u16 host_ss_selector;
304         u16 host_ds_selector;
305         u16 host_fs_selector;
306         u16 host_gs_selector;
307         u16 host_tr_selector;
308 };
309
310 /*
311  * VMCS12_REVISION is an arbitrary id that should be changed if the content or
312  * layout of struct vmcs12 is changed. MSR_IA32_VMX_BASIC returns this id, and
313  * VMPTRLD verifies that the VMCS region that L1 is loading contains this id.
314  */
315 #define VMCS12_REVISION 0x11e57ed0
316
317 /*
318  * VMCS12_SIZE is the number of bytes L1 should allocate for the VMXON region
319  * and any VMCS region. Although only sizeof(struct vmcs12) are used by the
320  * current implementation, 4K are reserved to avoid future complications.
321  */
322 #define VMCS12_SIZE 0x1000
323
324 /* Used to remember the last vmcs02 used for some recently used vmcs12s */
325 struct vmcs02_list {
326         struct list_head list;
327         gpa_t vmptr;
328         struct loaded_vmcs vmcs02;
329 };
330
331 /*
332  * The nested_vmx structure is part of vcpu_vmx, and holds information we need
333  * for correct emulation of VMX (i.e., nested VMX) on this vcpu.
334  */
335 struct nested_vmx {
336         /* Has the level1 guest done vmxon? */
337         bool vmxon;
338
339         /* The guest-physical address of the current VMCS L1 keeps for L2 */
340         gpa_t current_vmptr;
341         /* The host-usable pointer to the above */
342         struct page *current_vmcs12_page;
343         struct vmcs12 *current_vmcs12;
344
345         /* vmcs02_list cache of VMCSs recently used to run L2 guests */
346         struct list_head vmcs02_pool;
347         int vmcs02_num;
348 };
349
350 struct vcpu_vmx {
351         struct kvm_vcpu       vcpu;
352         unsigned long         host_rsp;
353         u8                    fail;
354         u8                    cpl;
355         bool                  nmi_known_unmasked;
356         u32                   exit_intr_info;
357         u32                   idt_vectoring_info;
358         ulong                 rflags;
359         struct shared_msr_entry *guest_msrs;
360         int                   nmsrs;
361         int                   save_nmsrs;
362 #ifdef CONFIG_X86_64
363         u64                   msr_host_kernel_gs_base;
364         u64                   msr_guest_kernel_gs_base;
365 #endif
366         /*
367          * loaded_vmcs points to the VMCS currently used in this vcpu. For a
368          * non-nested (L1) guest, it always points to vmcs01. For a nested
369          * guest (L2), it points to a different VMCS.
370          */
371         struct loaded_vmcs    vmcs01;
372         struct loaded_vmcs   *loaded_vmcs;
373         bool                  __launched; /* temporary, used in vmx_vcpu_run */
374         struct msr_autoload {
375                 unsigned nr;
376                 struct vmx_msr_entry guest[NR_AUTOLOAD_MSRS];
377                 struct vmx_msr_entry host[NR_AUTOLOAD_MSRS];
378         } msr_autoload;
379         struct {
380                 int           loaded;
381                 u16           fs_sel, gs_sel, ldt_sel;
382                 int           gs_ldt_reload_needed;
383                 int           fs_reload_needed;
384         } host_state;
385         struct {
386                 int vm86_active;
387                 ulong save_rflags;
388                 struct kvm_save_segment {
389                         u16 selector;
390                         unsigned long base;
391                         u32 limit;
392                         u32 ar;
393                 } tr, es, ds, fs, gs;
394         } rmode;
395         struct {
396                 u32 bitmask; /* 4 bits per segment (1 bit per field) */
397                 struct kvm_save_segment seg[8];
398         } segment_cache;
399         int vpid;
400         bool emulation_required;
401
402         /* Support for vnmi-less CPUs */
403         int soft_vnmi_blocked;
404         ktime_t entry_time;
405         s64 vnmi_blocked_time;
406         u32 exit_reason;
407
408         bool rdtscp_enabled;
409
410         /* Support for a guest hypervisor (nested VMX) */
411         struct nested_vmx nested;
412 };
413
414 enum segment_cache_field {
415         SEG_FIELD_SEL = 0,
416         SEG_FIELD_BASE = 1,
417         SEG_FIELD_LIMIT = 2,
418         SEG_FIELD_AR = 3,
419
420         SEG_FIELD_NR = 4
421 };
422
423 static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu)
424 {
425         return container_of(vcpu, struct vcpu_vmx, vcpu);
426 }
427
428 #define VMCS12_OFFSET(x) offsetof(struct vmcs12, x)
429 #define FIELD(number, name)     [number] = VMCS12_OFFSET(name)
430 #define FIELD64(number, name)   [number] = VMCS12_OFFSET(name), \
431                                 [number##_HIGH] = VMCS12_OFFSET(name)+4
432
433 static unsigned short vmcs_field_to_offset_table[] = {
434         FIELD(VIRTUAL_PROCESSOR_ID, virtual_processor_id),
435         FIELD(GUEST_ES_SELECTOR, guest_es_selector),
436         FIELD(GUEST_CS_SELECTOR, guest_cs_selector),
437         FIELD(GUEST_SS_SELECTOR, guest_ss_selector),
438         FIELD(GUEST_DS_SELECTOR, guest_ds_selector),
439         FIELD(GUEST_FS_SELECTOR, guest_fs_selector),
440         FIELD(GUEST_GS_SELECTOR, guest_gs_selector),
441         FIELD(GUEST_LDTR_SELECTOR, guest_ldtr_selector),
442         FIELD(GUEST_TR_SELECTOR, guest_tr_selector),
443         FIELD(HOST_ES_SELECTOR, host_es_selector),
444         FIELD(HOST_CS_SELECTOR, host_cs_selector),
445         FIELD(HOST_SS_SELECTOR, host_ss_selector),
446         FIELD(HOST_DS_SELECTOR, host_ds_selector),
447         FIELD(HOST_FS_SELECTOR, host_fs_selector),
448         FIELD(HOST_GS_SELECTOR, host_gs_selector),
449         FIELD(HOST_TR_SELECTOR, host_tr_selector),
450         FIELD64(IO_BITMAP_A, io_bitmap_a),
451         FIELD64(IO_BITMAP_B, io_bitmap_b),
452         FIELD64(MSR_BITMAP, msr_bitmap),
453         FIELD64(VM_EXIT_MSR_STORE_ADDR, vm_exit_msr_store_addr),
454         FIELD64(VM_EXIT_MSR_LOAD_ADDR, vm_exit_msr_load_addr),
455         FIELD64(VM_ENTRY_MSR_LOAD_ADDR, vm_entry_msr_load_addr),
456         FIELD64(TSC_OFFSET, tsc_offset),
457         FIELD64(VIRTUAL_APIC_PAGE_ADDR, virtual_apic_page_addr),
458         FIELD64(APIC_ACCESS_ADDR, apic_access_addr),
459         FIELD64(EPT_POINTER, ept_pointer),
460         FIELD64(GUEST_PHYSICAL_ADDRESS, guest_physical_address),
461         FIELD64(VMCS_LINK_POINTER, vmcs_link_pointer),
462         FIELD64(GUEST_IA32_DEBUGCTL, guest_ia32_debugctl),
463         FIELD64(GUEST_IA32_PAT, guest_ia32_pat),
464         FIELD64(GUEST_IA32_EFER, guest_ia32_efer),
465         FIELD64(GUEST_IA32_PERF_GLOBAL_CTRL, guest_ia32_perf_global_ctrl),
466         FIELD64(GUEST_PDPTR0, guest_pdptr0),
467         FIELD64(GUEST_PDPTR1, guest_pdptr1),
468         FIELD64(GUEST_PDPTR2, guest_pdptr2),
469         FIELD64(GUEST_PDPTR3, guest_pdptr3),
470         FIELD64(HOST_IA32_PAT, host_ia32_pat),
471         FIELD64(HOST_IA32_EFER, host_ia32_efer),
472         FIELD64(HOST_IA32_PERF_GLOBAL_CTRL, host_ia32_perf_global_ctrl),
473         FIELD(PIN_BASED_VM_EXEC_CONTROL, pin_based_vm_exec_control),
474         FIELD(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control),
475         FIELD(EXCEPTION_BITMAP, exception_bitmap),
476         FIELD(PAGE_FAULT_ERROR_CODE_MASK, page_fault_error_code_mask),
477         FIELD(PAGE_FAULT_ERROR_CODE_MATCH, page_fault_error_code_match),
478         FIELD(CR3_TARGET_COUNT, cr3_target_count),
479         FIELD(VM_EXIT_CONTROLS, vm_exit_controls),
480         FIELD(VM_EXIT_MSR_STORE_COUNT, vm_exit_msr_store_count),
481         FIELD(VM_EXIT_MSR_LOAD_COUNT, vm_exit_msr_load_count),
482         FIELD(VM_ENTRY_CONTROLS, vm_entry_controls),
483         FIELD(VM_ENTRY_MSR_LOAD_COUNT, vm_entry_msr_load_count),
484         FIELD(VM_ENTRY_INTR_INFO_FIELD, vm_entry_intr_info_field),
485         FIELD(VM_ENTRY_EXCEPTION_ERROR_CODE, vm_entry_exception_error_code),
486         FIELD(VM_ENTRY_INSTRUCTION_LEN, vm_entry_instruction_len),
487         FIELD(TPR_THRESHOLD, tpr_threshold),
488         FIELD(SECONDARY_VM_EXEC_CONTROL, secondary_vm_exec_control),
489         FIELD(VM_INSTRUCTION_ERROR, vm_instruction_error),
490         FIELD(VM_EXIT_REASON, vm_exit_reason),
491         FIELD(VM_EXIT_INTR_INFO, vm_exit_intr_info),
492         FIELD(VM_EXIT_INTR_ERROR_CODE, vm_exit_intr_error_code),
493         FIELD(IDT_VECTORING_INFO_FIELD, idt_vectoring_info_field),
494         FIELD(IDT_VECTORING_ERROR_CODE, idt_vectoring_error_code),
495         FIELD(VM_EXIT_INSTRUCTION_LEN, vm_exit_instruction_len),
496         FIELD(VMX_INSTRUCTION_INFO, vmx_instruction_info),
497         FIELD(GUEST_ES_LIMIT, guest_es_limit),
498         FIELD(GUEST_CS_LIMIT, guest_cs_limit),
499         FIELD(GUEST_SS_LIMIT, guest_ss_limit),
500         FIELD(GUEST_DS_LIMIT, guest_ds_limit),
501         FIELD(GUEST_FS_LIMIT, guest_fs_limit),
502         FIELD(GUEST_GS_LIMIT, guest_gs_limit),
503         FIELD(GUEST_LDTR_LIMIT, guest_ldtr_limit),
504         FIELD(GUEST_TR_LIMIT, guest_tr_limit),
505         FIELD(GUEST_GDTR_LIMIT, guest_gdtr_limit),
506         FIELD(GUEST_IDTR_LIMIT, guest_idtr_limit),
507         FIELD(GUEST_ES_AR_BYTES, guest_es_ar_bytes),
508         FIELD(GUEST_CS_AR_BYTES, guest_cs_ar_bytes),
509         FIELD(GUEST_SS_AR_BYTES, guest_ss_ar_bytes),
510         FIELD(GUEST_DS_AR_BYTES, guest_ds_ar_bytes),
511         FIELD(GUEST_FS_AR_BYTES, guest_fs_ar_bytes),
512         FIELD(GUEST_GS_AR_BYTES, guest_gs_ar_bytes),
513         FIELD(GUEST_LDTR_AR_BYTES, guest_ldtr_ar_bytes),
514         FIELD(GUEST_TR_AR_BYTES, guest_tr_ar_bytes),
515         FIELD(GUEST_INTERRUPTIBILITY_INFO, guest_interruptibility_info),
516         FIELD(GUEST_ACTIVITY_STATE, guest_activity_state),
517         FIELD(GUEST_SYSENTER_CS, guest_sysenter_cs),
518         FIELD(HOST_IA32_SYSENTER_CS, host_ia32_sysenter_cs),
519         FIELD(CR0_GUEST_HOST_MASK, cr0_guest_host_mask),
520         FIELD(CR4_GUEST_HOST_MASK, cr4_guest_host_mask),
521         FIELD(CR0_READ_SHADOW, cr0_read_shadow),
522         FIELD(CR4_READ_SHADOW, cr4_read_shadow),
523         FIELD(CR3_TARGET_VALUE0, cr3_target_value0),
524         FIELD(CR3_TARGET_VALUE1, cr3_target_value1),
525         FIELD(CR3_TARGET_VALUE2, cr3_target_value2),
526         FIELD(CR3_TARGET_VALUE3, cr3_target_value3),
527         FIELD(EXIT_QUALIFICATION, exit_qualification),
528         FIELD(GUEST_LINEAR_ADDRESS, guest_linear_address),
529         FIELD(GUEST_CR0, guest_cr0),
530         FIELD(GUEST_CR3, guest_cr3),
531         FIELD(GUEST_CR4, guest_cr4),
532         FIELD(GUEST_ES_BASE, guest_es_base),
533         FIELD(GUEST_CS_BASE, guest_cs_base),
534         FIELD(GUEST_SS_BASE, guest_ss_base),
535         FIELD(GUEST_DS_BASE, guest_ds_base),
536         FIELD(GUEST_FS_BASE, guest_fs_base),
537         FIELD(GUEST_GS_BASE, guest_gs_base),
538         FIELD(GUEST_LDTR_BASE, guest_ldtr_base),
539         FIELD(GUEST_TR_BASE, guest_tr_base),
540         FIELD(GUEST_GDTR_BASE, guest_gdtr_base),
541         FIELD(GUEST_IDTR_BASE, guest_idtr_base),
542         FIELD(GUEST_DR7, guest_dr7),
543         FIELD(GUEST_RSP, guest_rsp),
544         FIELD(GUEST_RIP, guest_rip),
545         FIELD(GUEST_RFLAGS, guest_rflags),
546         FIELD(GUEST_PENDING_DBG_EXCEPTIONS, guest_pending_dbg_exceptions),
547         FIELD(GUEST_SYSENTER_ESP, guest_sysenter_esp),
548         FIELD(GUEST_SYSENTER_EIP, guest_sysenter_eip),
549         FIELD(HOST_CR0, host_cr0),
550         FIELD(HOST_CR3, host_cr3),
551         FIELD(HOST_CR4, host_cr4),
552         FIELD(HOST_FS_BASE, host_fs_base),
553         FIELD(HOST_GS_BASE, host_gs_base),
554         FIELD(HOST_TR_BASE, host_tr_base),
555         FIELD(HOST_GDTR_BASE, host_gdtr_base),
556         FIELD(HOST_IDTR_BASE, host_idtr_base),
557         FIELD(HOST_IA32_SYSENTER_ESP, host_ia32_sysenter_esp),
558         FIELD(HOST_IA32_SYSENTER_EIP, host_ia32_sysenter_eip),
559         FIELD(HOST_RSP, host_rsp),
560         FIELD(HOST_RIP, host_rip),
561 };
562 static const int max_vmcs_field = ARRAY_SIZE(vmcs_field_to_offset_table);
563
564 static inline short vmcs_field_to_offset(unsigned long field)
565 {
566         if (field >= max_vmcs_field || vmcs_field_to_offset_table[field] == 0)
567                 return -1;
568         return vmcs_field_to_offset_table[field];
569 }
570
571 static inline struct vmcs12 *get_vmcs12(struct kvm_vcpu *vcpu)
572 {
573         return to_vmx(vcpu)->nested.current_vmcs12;
574 }
575
576 static struct page *nested_get_page(struct kvm_vcpu *vcpu, gpa_t addr)
577 {
578         struct page *page = gfn_to_page(vcpu->kvm, addr >> PAGE_SHIFT);
579         if (is_error_page(page)) {
580                 kvm_release_page_clean(page);
581                 return NULL;
582         }
583         return page;
584 }
585
586 static void nested_release_page(struct page *page)
587 {
588         kvm_release_page_dirty(page);
589 }
590
591 static void nested_release_page_clean(struct page *page)
592 {
593         kvm_release_page_clean(page);
594 }
595
596 static u64 construct_eptp(unsigned long root_hpa);
597 static void kvm_cpu_vmxon(u64 addr);
598 static void kvm_cpu_vmxoff(void);
599 static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3);
600 static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr);
601
602 static DEFINE_PER_CPU(struct vmcs *, vmxarea);
603 static DEFINE_PER_CPU(struct vmcs *, current_vmcs);
604 /*
605  * We maintain a per-CPU linked-list of VMCS loaded on that CPU. This is needed
606  * when a CPU is brought down, and we need to VMCLEAR all VMCSs loaded on it.
607  */
608 static DEFINE_PER_CPU(struct list_head, loaded_vmcss_on_cpu);
609 static DEFINE_PER_CPU(struct desc_ptr, host_gdt);
610
611 static unsigned long *vmx_io_bitmap_a;
612 static unsigned long *vmx_io_bitmap_b;
613 static unsigned long *vmx_msr_bitmap_legacy;
614 static unsigned long *vmx_msr_bitmap_longmode;
615
616 static bool cpu_has_load_ia32_efer;
617
618 static DECLARE_BITMAP(vmx_vpid_bitmap, VMX_NR_VPIDS);
619 static DEFINE_SPINLOCK(vmx_vpid_lock);
620
621 static struct vmcs_config {
622         int size;
623         int order;
624         u32 revision_id;
625         u32 pin_based_exec_ctrl;
626         u32 cpu_based_exec_ctrl;
627         u32 cpu_based_2nd_exec_ctrl;
628         u32 vmexit_ctrl;
629         u32 vmentry_ctrl;
630 } vmcs_config;
631
632 static struct vmx_capability {
633         u32 ept;
634         u32 vpid;
635 } vmx_capability;
636
637 #define VMX_SEGMENT_FIELD(seg)                                  \
638         [VCPU_SREG_##seg] = {                                   \
639                 .selector = GUEST_##seg##_SELECTOR,             \
640                 .base = GUEST_##seg##_BASE,                     \
641                 .limit = GUEST_##seg##_LIMIT,                   \
642                 .ar_bytes = GUEST_##seg##_AR_BYTES,             \
643         }
644
645 static struct kvm_vmx_segment_field {
646         unsigned selector;
647         unsigned base;
648         unsigned limit;
649         unsigned ar_bytes;
650 } kvm_vmx_segment_fields[] = {
651         VMX_SEGMENT_FIELD(CS),
652         VMX_SEGMENT_FIELD(DS),
653         VMX_SEGMENT_FIELD(ES),
654         VMX_SEGMENT_FIELD(FS),
655         VMX_SEGMENT_FIELD(GS),
656         VMX_SEGMENT_FIELD(SS),
657         VMX_SEGMENT_FIELD(TR),
658         VMX_SEGMENT_FIELD(LDTR),
659 };
660
661 static u64 host_efer;
662
663 static void ept_save_pdptrs(struct kvm_vcpu *vcpu);
664
665 /*
666  * Keep MSR_STAR at the end, as setup_msrs() will try to optimize it
667  * away by decrementing the array size.
668  */
669 static const u32 vmx_msr_index[] = {
670 #ifdef CONFIG_X86_64
671         MSR_SYSCALL_MASK, MSR_LSTAR, MSR_CSTAR,
672 #endif
673         MSR_EFER, MSR_TSC_AUX, MSR_STAR,
674 };
675 #define NR_VMX_MSR ARRAY_SIZE(vmx_msr_index)
676
677 static inline bool is_page_fault(u32 intr_info)
678 {
679         return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
680                              INTR_INFO_VALID_MASK)) ==
681                 (INTR_TYPE_HARD_EXCEPTION | PF_VECTOR | INTR_INFO_VALID_MASK);
682 }
683
684 static inline bool is_no_device(u32 intr_info)
685 {
686         return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
687                              INTR_INFO_VALID_MASK)) ==
688                 (INTR_TYPE_HARD_EXCEPTION | NM_VECTOR | INTR_INFO_VALID_MASK);
689 }
690
691 static inline bool is_invalid_opcode(u32 intr_info)
692 {
693         return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
694                              INTR_INFO_VALID_MASK)) ==
695                 (INTR_TYPE_HARD_EXCEPTION | UD_VECTOR | INTR_INFO_VALID_MASK);
696 }
697
698 static inline bool is_external_interrupt(u32 intr_info)
699 {
700         return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK))
701                 == (INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK);
702 }
703
704 static inline bool is_machine_check(u32 intr_info)
705 {
706         return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
707                              INTR_INFO_VALID_MASK)) ==
708                 (INTR_TYPE_HARD_EXCEPTION | MC_VECTOR | INTR_INFO_VALID_MASK);
709 }
710
711 static inline bool cpu_has_vmx_msr_bitmap(void)
712 {
713         return vmcs_config.cpu_based_exec_ctrl & CPU_BASED_USE_MSR_BITMAPS;
714 }
715
716 static inline bool cpu_has_vmx_tpr_shadow(void)
717 {
718         return vmcs_config.cpu_based_exec_ctrl & CPU_BASED_TPR_SHADOW;
719 }
720
721 static inline bool vm_need_tpr_shadow(struct kvm *kvm)
722 {
723         return (cpu_has_vmx_tpr_shadow()) && (irqchip_in_kernel(kvm));
724 }
725
726 static inline bool cpu_has_secondary_exec_ctrls(void)
727 {
728         return vmcs_config.cpu_based_exec_ctrl &
729                 CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
730 }
731
732 static inline bool cpu_has_vmx_virtualize_apic_accesses(void)
733 {
734         return vmcs_config.cpu_based_2nd_exec_ctrl &
735                 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
736 }
737
738 static inline bool cpu_has_vmx_flexpriority(void)
739 {
740         return cpu_has_vmx_tpr_shadow() &&
741                 cpu_has_vmx_virtualize_apic_accesses();
742 }
743
744 static inline bool cpu_has_vmx_ept_execute_only(void)
745 {
746         return vmx_capability.ept & VMX_EPT_EXECUTE_ONLY_BIT;
747 }
748
749 static inline bool cpu_has_vmx_eptp_uncacheable(void)
750 {
751         return vmx_capability.ept & VMX_EPTP_UC_BIT;
752 }
753
754 static inline bool cpu_has_vmx_eptp_writeback(void)
755 {
756         return vmx_capability.ept & VMX_EPTP_WB_BIT;
757 }
758
759 static inline bool cpu_has_vmx_ept_2m_page(void)
760 {
761         return vmx_capability.ept & VMX_EPT_2MB_PAGE_BIT;
762 }
763
764 static inline bool cpu_has_vmx_ept_1g_page(void)
765 {
766         return vmx_capability.ept & VMX_EPT_1GB_PAGE_BIT;
767 }
768
769 static inline bool cpu_has_vmx_ept_4levels(void)
770 {
771         return vmx_capability.ept & VMX_EPT_PAGE_WALK_4_BIT;
772 }
773
774 static inline bool cpu_has_vmx_invept_individual_addr(void)
775 {
776         return vmx_capability.ept & VMX_EPT_EXTENT_INDIVIDUAL_BIT;
777 }
778
779 static inline bool cpu_has_vmx_invept_context(void)
780 {
781         return vmx_capability.ept & VMX_EPT_EXTENT_CONTEXT_BIT;
782 }
783
784 static inline bool cpu_has_vmx_invept_global(void)
785 {
786         return vmx_capability.ept & VMX_EPT_EXTENT_GLOBAL_BIT;
787 }
788
789 static inline bool cpu_has_vmx_invvpid_single(void)
790 {
791         return vmx_capability.vpid & VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT;
792 }
793
794 static inline bool cpu_has_vmx_invvpid_global(void)
795 {
796         return vmx_capability.vpid & VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT;
797 }
798
799 static inline bool cpu_has_vmx_ept(void)
800 {
801         return vmcs_config.cpu_based_2nd_exec_ctrl &
802                 SECONDARY_EXEC_ENABLE_EPT;
803 }
804
805 static inline bool cpu_has_vmx_unrestricted_guest(void)
806 {
807         return vmcs_config.cpu_based_2nd_exec_ctrl &
808                 SECONDARY_EXEC_UNRESTRICTED_GUEST;
809 }
810
811 static inline bool cpu_has_vmx_ple(void)
812 {
813         return vmcs_config.cpu_based_2nd_exec_ctrl &
814                 SECONDARY_EXEC_PAUSE_LOOP_EXITING;
815 }
816
817 static inline bool vm_need_virtualize_apic_accesses(struct kvm *kvm)
818 {
819         return flexpriority_enabled && irqchip_in_kernel(kvm);
820 }
821
822 static inline bool cpu_has_vmx_vpid(void)
823 {
824         return vmcs_config.cpu_based_2nd_exec_ctrl &
825                 SECONDARY_EXEC_ENABLE_VPID;
826 }
827
828 static inline bool cpu_has_vmx_rdtscp(void)
829 {
830         return vmcs_config.cpu_based_2nd_exec_ctrl &
831                 SECONDARY_EXEC_RDTSCP;
832 }
833
834 static inline bool cpu_has_virtual_nmis(void)
835 {
836         return vmcs_config.pin_based_exec_ctrl & PIN_BASED_VIRTUAL_NMIS;
837 }
838
839 static inline bool cpu_has_vmx_wbinvd_exit(void)
840 {
841         return vmcs_config.cpu_based_2nd_exec_ctrl &
842                 SECONDARY_EXEC_WBINVD_EXITING;
843 }
844
845 static inline bool report_flexpriority(void)
846 {
847         return flexpriority_enabled;
848 }
849
850 static int __find_msr_index(struct vcpu_vmx *vmx, u32 msr)
851 {
852         int i;
853
854         for (i = 0; i < vmx->nmsrs; ++i)
855                 if (vmx_msr_index[vmx->guest_msrs[i].index] == msr)
856                         return i;
857         return -1;
858 }
859
860 static inline void __invvpid(int ext, u16 vpid, gva_t gva)
861 {
862     struct {
863         u64 vpid : 16;
864         u64 rsvd : 48;
865         u64 gva;
866     } operand = { vpid, 0, gva };
867
868     asm volatile (__ex(ASM_VMX_INVVPID)
869                   /* CF==1 or ZF==1 --> rc = -1 */
870                   "; ja 1f ; ud2 ; 1:"
871                   : : "a"(&operand), "c"(ext) : "cc", "memory");
872 }
873
874 static inline void __invept(int ext, u64 eptp, gpa_t gpa)
875 {
876         struct {
877                 u64 eptp, gpa;
878         } operand = {eptp, gpa};
879
880         asm volatile (__ex(ASM_VMX_INVEPT)
881                         /* CF==1 or ZF==1 --> rc = -1 */
882                         "; ja 1f ; ud2 ; 1:\n"
883                         : : "a" (&operand), "c" (ext) : "cc", "memory");
884 }
885
886 static struct shared_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr)
887 {
888         int i;
889
890         i = __find_msr_index(vmx, msr);
891         if (i >= 0)
892                 return &vmx->guest_msrs[i];
893         return NULL;
894 }
895
896 static void vmcs_clear(struct vmcs *vmcs)
897 {
898         u64 phys_addr = __pa(vmcs);
899         u8 error;
900
901         asm volatile (__ex(ASM_VMX_VMCLEAR_RAX) "; setna %0"
902                       : "=qm"(error) : "a"(&phys_addr), "m"(phys_addr)
903                       : "cc", "memory");
904         if (error)
905                 printk(KERN_ERR "kvm: vmclear fail: %p/%llx\n",
906                        vmcs, phys_addr);
907 }
908
909 static inline void loaded_vmcs_init(struct loaded_vmcs *loaded_vmcs)
910 {
911         vmcs_clear(loaded_vmcs->vmcs);
912         loaded_vmcs->cpu = -1;
913         loaded_vmcs->launched = 0;
914 }
915
916 static void vmcs_load(struct vmcs *vmcs)
917 {
918         u64 phys_addr = __pa(vmcs);
919         u8 error;
920
921         asm volatile (__ex(ASM_VMX_VMPTRLD_RAX) "; setna %0"
922                         : "=qm"(error) : "a"(&phys_addr), "m"(phys_addr)
923                         : "cc", "memory");
924         if (error)
925                 printk(KERN_ERR "kvm: vmptrld %p/%llx fail\n",
926                        vmcs, phys_addr);
927 }
928
929 static void __loaded_vmcs_clear(void *arg)
930 {
931         struct loaded_vmcs *loaded_vmcs = arg;
932         int cpu = raw_smp_processor_id();
933
934         if (loaded_vmcs->cpu != cpu)
935                 return; /* vcpu migration can race with cpu offline */
936         if (per_cpu(current_vmcs, cpu) == loaded_vmcs->vmcs)
937                 per_cpu(current_vmcs, cpu) = NULL;
938         list_del(&loaded_vmcs->loaded_vmcss_on_cpu_link);
939         loaded_vmcs_init(loaded_vmcs);
940 }
941
942 static void loaded_vmcs_clear(struct loaded_vmcs *loaded_vmcs)
943 {
944         if (loaded_vmcs->cpu != -1)
945                 smp_call_function_single(
946                         loaded_vmcs->cpu, __loaded_vmcs_clear, loaded_vmcs, 1);
947 }
948
949 static inline void vpid_sync_vcpu_single(struct vcpu_vmx *vmx)
950 {
951         if (vmx->vpid == 0)
952                 return;
953
954         if (cpu_has_vmx_invvpid_single())
955                 __invvpid(VMX_VPID_EXTENT_SINGLE_CONTEXT, vmx->vpid, 0);
956 }
957
958 static inline void vpid_sync_vcpu_global(void)
959 {
960         if (cpu_has_vmx_invvpid_global())
961                 __invvpid(VMX_VPID_EXTENT_ALL_CONTEXT, 0, 0);
962 }
963
964 static inline void vpid_sync_context(struct vcpu_vmx *vmx)
965 {
966         if (cpu_has_vmx_invvpid_single())
967                 vpid_sync_vcpu_single(vmx);
968         else
969                 vpid_sync_vcpu_global();
970 }
971
972 static inline void ept_sync_global(void)
973 {
974         if (cpu_has_vmx_invept_global())
975                 __invept(VMX_EPT_EXTENT_GLOBAL, 0, 0);
976 }
977
978 static inline void ept_sync_context(u64 eptp)
979 {
980         if (enable_ept) {
981                 if (cpu_has_vmx_invept_context())
982                         __invept(VMX_EPT_EXTENT_CONTEXT, eptp, 0);
983                 else
984                         ept_sync_global();
985         }
986 }
987
988 static inline void ept_sync_individual_addr(u64 eptp, gpa_t gpa)
989 {
990         if (enable_ept) {
991                 if (cpu_has_vmx_invept_individual_addr())
992                         __invept(VMX_EPT_EXTENT_INDIVIDUAL_ADDR,
993                                         eptp, gpa);
994                 else
995                         ept_sync_context(eptp);
996         }
997 }
998
999 static __always_inline unsigned long vmcs_readl(unsigned long field)
1000 {
1001         unsigned long value;
1002
1003         asm volatile (__ex_clear(ASM_VMX_VMREAD_RDX_RAX, "%0")
1004                       : "=a"(value) : "d"(field) : "cc");
1005         return value;
1006 }
1007
1008 static __always_inline u16 vmcs_read16(unsigned long field)
1009 {
1010         return vmcs_readl(field);
1011 }
1012
1013 static __always_inline u32 vmcs_read32(unsigned long field)
1014 {
1015         return vmcs_readl(field);
1016 }
1017
1018 static __always_inline u64 vmcs_read64(unsigned long field)
1019 {
1020 #ifdef CONFIG_X86_64
1021         return vmcs_readl(field);
1022 #else
1023         return vmcs_readl(field) | ((u64)vmcs_readl(field+1) << 32);
1024 #endif
1025 }
1026
1027 static noinline void vmwrite_error(unsigned long field, unsigned long value)
1028 {
1029         printk(KERN_ERR "vmwrite error: reg %lx value %lx (err %d)\n",
1030                field, value, vmcs_read32(VM_INSTRUCTION_ERROR));
1031         dump_stack();
1032 }
1033
1034 static void vmcs_writel(unsigned long field, unsigned long value)
1035 {
1036         u8 error;
1037
1038         asm volatile (__ex(ASM_VMX_VMWRITE_RAX_RDX) "; setna %0"
1039                        : "=q"(error) : "a"(value), "d"(field) : "cc");
1040         if (unlikely(error))
1041                 vmwrite_error(field, value);
1042 }
1043
1044 static void vmcs_write16(unsigned long field, u16 value)
1045 {
1046         vmcs_writel(field, value);
1047 }
1048
1049 static void vmcs_write32(unsigned long field, u32 value)
1050 {
1051         vmcs_writel(field, value);
1052 }
1053
1054 static void vmcs_write64(unsigned long field, u64 value)
1055 {
1056         vmcs_writel(field, value);
1057 #ifndef CONFIG_X86_64
1058         asm volatile ("");
1059         vmcs_writel(field+1, value >> 32);
1060 #endif
1061 }
1062
1063 static void vmcs_clear_bits(unsigned long field, u32 mask)
1064 {
1065         vmcs_writel(field, vmcs_readl(field) & ~mask);
1066 }
1067
1068 static void vmcs_set_bits(unsigned long field, u32 mask)
1069 {
1070         vmcs_writel(field, vmcs_readl(field) | mask);
1071 }
1072
1073 static void vmx_segment_cache_clear(struct vcpu_vmx *vmx)
1074 {
1075         vmx->segment_cache.bitmask = 0;
1076 }
1077
1078 static bool vmx_segment_cache_test_set(struct vcpu_vmx *vmx, unsigned seg,
1079                                        unsigned field)
1080 {
1081         bool ret;
1082         u32 mask = 1 << (seg * SEG_FIELD_NR + field);
1083
1084         if (!(vmx->vcpu.arch.regs_avail & (1 << VCPU_EXREG_SEGMENTS))) {
1085                 vmx->vcpu.arch.regs_avail |= (1 << VCPU_EXREG_SEGMENTS);
1086                 vmx->segment_cache.bitmask = 0;
1087         }
1088         ret = vmx->segment_cache.bitmask & mask;
1089         vmx->segment_cache.bitmask |= mask;
1090         return ret;
1091 }
1092
1093 static u16 vmx_read_guest_seg_selector(struct vcpu_vmx *vmx, unsigned seg)
1094 {
1095         u16 *p = &vmx->segment_cache.seg[seg].selector;
1096
1097         if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_SEL))
1098                 *p = vmcs_read16(kvm_vmx_segment_fields[seg].selector);
1099         return *p;
1100 }
1101
1102 static ulong vmx_read_guest_seg_base(struct vcpu_vmx *vmx, unsigned seg)
1103 {
1104         ulong *p = &vmx->segment_cache.seg[seg].base;
1105
1106         if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_BASE))
1107                 *p = vmcs_readl(kvm_vmx_segment_fields[seg].base);
1108         return *p;
1109 }
1110
1111 static u32 vmx_read_guest_seg_limit(struct vcpu_vmx *vmx, unsigned seg)
1112 {
1113         u32 *p = &vmx->segment_cache.seg[seg].limit;
1114
1115         if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_LIMIT))
1116                 *p = vmcs_read32(kvm_vmx_segment_fields[seg].limit);
1117         return *p;
1118 }
1119
1120 static u32 vmx_read_guest_seg_ar(struct vcpu_vmx *vmx, unsigned seg)
1121 {
1122         u32 *p = &vmx->segment_cache.seg[seg].ar;
1123
1124         if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_AR))
1125                 *p = vmcs_read32(kvm_vmx_segment_fields[seg].ar_bytes);
1126         return *p;
1127 }
1128
1129 static void update_exception_bitmap(struct kvm_vcpu *vcpu)
1130 {
1131         u32 eb;
1132
1133         eb = (1u << PF_VECTOR) | (1u << UD_VECTOR) | (1u << MC_VECTOR) |
1134              (1u << NM_VECTOR) | (1u << DB_VECTOR);
1135         if ((vcpu->guest_debug &
1136              (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP)) ==
1137             (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP))
1138                 eb |= 1u << BP_VECTOR;
1139         if (to_vmx(vcpu)->rmode.vm86_active)
1140                 eb = ~0;
1141         if (enable_ept)
1142                 eb &= ~(1u << PF_VECTOR); /* bypass_guest_pf = 0 */
1143         if (vcpu->fpu_active)
1144                 eb &= ~(1u << NM_VECTOR);
1145         vmcs_write32(EXCEPTION_BITMAP, eb);
1146 }
1147
1148 static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr)
1149 {
1150         unsigned i;
1151         struct msr_autoload *m = &vmx->msr_autoload;
1152
1153         if (msr == MSR_EFER && cpu_has_load_ia32_efer) {
1154                 vmcs_clear_bits(VM_ENTRY_CONTROLS, VM_ENTRY_LOAD_IA32_EFER);
1155                 vmcs_clear_bits(VM_EXIT_CONTROLS, VM_EXIT_LOAD_IA32_EFER);
1156                 return;
1157         }
1158
1159         for (i = 0; i < m->nr; ++i)
1160                 if (m->guest[i].index == msr)
1161                         break;
1162
1163         if (i == m->nr)
1164                 return;
1165         --m->nr;
1166         m->guest[i] = m->guest[m->nr];
1167         m->host[i] = m->host[m->nr];
1168         vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->nr);
1169         vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr);
1170 }
1171
1172 static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
1173                                   u64 guest_val, u64 host_val)
1174 {
1175         unsigned i;
1176         struct msr_autoload *m = &vmx->msr_autoload;
1177
1178         if (msr == MSR_EFER && cpu_has_load_ia32_efer) {
1179                 vmcs_write64(GUEST_IA32_EFER, guest_val);
1180                 vmcs_write64(HOST_IA32_EFER, host_val);
1181                 vmcs_set_bits(VM_ENTRY_CONTROLS, VM_ENTRY_LOAD_IA32_EFER);
1182                 vmcs_set_bits(VM_EXIT_CONTROLS, VM_EXIT_LOAD_IA32_EFER);
1183                 return;
1184         }
1185
1186         for (i = 0; i < m->nr; ++i)
1187                 if (m->guest[i].index == msr)
1188                         break;
1189
1190         if (i == m->nr) {
1191                 ++m->nr;
1192                 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->nr);
1193                 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr);
1194         }
1195
1196         m->guest[i].index = msr;
1197         m->guest[i].value = guest_val;
1198         m->host[i].index = msr;
1199         m->host[i].value = host_val;
1200 }
1201
1202 static void reload_tss(void)
1203 {
1204         /*
1205          * VT restores TR but not its size.  Useless.
1206          */
1207         struct desc_ptr *gdt = &__get_cpu_var(host_gdt);
1208         struct desc_struct *descs;
1209
1210         descs = (void *)gdt->address;
1211         descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
1212         load_TR_desc();
1213 }
1214
1215 static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset)
1216 {
1217         u64 guest_efer;
1218         u64 ignore_bits;
1219
1220         guest_efer = vmx->vcpu.arch.efer;
1221
1222         /*
1223          * NX is emulated; LMA and LME handled by hardware; SCE meaninless
1224          * outside long mode
1225          */
1226         ignore_bits = EFER_NX | EFER_SCE;
1227 #ifdef CONFIG_X86_64
1228         ignore_bits |= EFER_LMA | EFER_LME;
1229         /* SCE is meaningful only in long mode on Intel */
1230         if (guest_efer & EFER_LMA)
1231                 ignore_bits &= ~(u64)EFER_SCE;
1232 #endif
1233         guest_efer &= ~ignore_bits;
1234         guest_efer |= host_efer & ignore_bits;
1235         vmx->guest_msrs[efer_offset].data = guest_efer;
1236         vmx->guest_msrs[efer_offset].mask = ~ignore_bits;
1237
1238         clear_atomic_switch_msr(vmx, MSR_EFER);
1239         /* On ept, can't emulate nx, and must switch nx atomically */
1240         if (enable_ept && ((vmx->vcpu.arch.efer ^ host_efer) & EFER_NX)) {
1241                 guest_efer = vmx->vcpu.arch.efer;
1242                 if (!(guest_efer & EFER_LMA))
1243                         guest_efer &= ~EFER_LME;
1244                 add_atomic_switch_msr(vmx, MSR_EFER, guest_efer, host_efer);
1245                 return false;
1246         }
1247
1248         return true;
1249 }
1250
1251 static unsigned long segment_base(u16 selector)
1252 {
1253         struct desc_ptr *gdt = &__get_cpu_var(host_gdt);
1254         struct desc_struct *d;
1255         unsigned long table_base;
1256         unsigned long v;
1257
1258         if (!(selector & ~3))
1259                 return 0;
1260
1261         table_base = gdt->address;
1262
1263         if (selector & 4) {           /* from ldt */
1264                 u16 ldt_selector = kvm_read_ldt();
1265
1266                 if (!(ldt_selector & ~3))
1267                         return 0;
1268
1269                 table_base = segment_base(ldt_selector);
1270         }
1271         d = (struct desc_struct *)(table_base + (selector & ~7));
1272         v = get_desc_base(d);
1273 #ifdef CONFIG_X86_64
1274        if (d->s == 0 && (d->type == 2 || d->type == 9 || d->type == 11))
1275                v |= ((unsigned long)((struct ldttss_desc64 *)d)->base3) << 32;
1276 #endif
1277         return v;
1278 }
1279
1280 static inline unsigned long kvm_read_tr_base(void)
1281 {
1282         u16 tr;
1283         asm("str %0" : "=g"(tr));
1284         return segment_base(tr);
1285 }
1286
1287 static void vmx_save_host_state(struct kvm_vcpu *vcpu)
1288 {
1289         struct vcpu_vmx *vmx = to_vmx(vcpu);
1290         int i;
1291
1292         if (vmx->host_state.loaded)
1293                 return;
1294
1295         vmx->host_state.loaded = 1;
1296         /*
1297          * Set host fs and gs selectors.  Unfortunately, 22.2.3 does not
1298          * allow segment selectors with cpl > 0 or ti == 1.
1299          */
1300         vmx->host_state.ldt_sel = kvm_read_ldt();
1301         vmx->host_state.gs_ldt_reload_needed = vmx->host_state.ldt_sel;
1302         savesegment(fs, vmx->host_state.fs_sel);
1303         if (!(vmx->host_state.fs_sel & 7)) {
1304                 vmcs_write16(HOST_FS_SELECTOR, vmx->host_state.fs_sel);
1305                 vmx->host_state.fs_reload_needed = 0;
1306         } else {
1307                 vmcs_write16(HOST_FS_SELECTOR, 0);
1308                 vmx->host_state.fs_reload_needed = 1;
1309         }
1310         savesegment(gs, vmx->host_state.gs_sel);
1311         if (!(vmx->host_state.gs_sel & 7))
1312                 vmcs_write16(HOST_GS_SELECTOR, vmx->host_state.gs_sel);
1313         else {
1314                 vmcs_write16(HOST_GS_SELECTOR, 0);
1315                 vmx->host_state.gs_ldt_reload_needed = 1;
1316         }
1317
1318 #ifdef CONFIG_X86_64
1319         vmcs_writel(HOST_FS_BASE, read_msr(MSR_FS_BASE));
1320         vmcs_writel(HOST_GS_BASE, read_msr(MSR_GS_BASE));
1321 #else
1322         vmcs_writel(HOST_FS_BASE, segment_base(vmx->host_state.fs_sel));
1323         vmcs_writel(HOST_GS_BASE, segment_base(vmx->host_state.gs_sel));
1324 #endif
1325
1326 #ifdef CONFIG_X86_64
1327         rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base);
1328         if (is_long_mode(&vmx->vcpu))
1329                 wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
1330 #endif
1331         for (i = 0; i < vmx->save_nmsrs; ++i)
1332                 kvm_set_shared_msr(vmx->guest_msrs[i].index,
1333                                    vmx->guest_msrs[i].data,
1334                                    vmx->guest_msrs[i].mask);
1335 }
1336
1337 static void __vmx_load_host_state(struct vcpu_vmx *vmx)
1338 {
1339         if (!vmx->host_state.loaded)
1340                 return;
1341
1342         ++vmx->vcpu.stat.host_state_reload;
1343         vmx->host_state.loaded = 0;
1344 #ifdef CONFIG_X86_64
1345         if (is_long_mode(&vmx->vcpu))
1346                 rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
1347 #endif
1348         if (vmx->host_state.gs_ldt_reload_needed) {
1349                 kvm_load_ldt(vmx->host_state.ldt_sel);
1350 #ifdef CONFIG_X86_64
1351                 load_gs_index(vmx->host_state.gs_sel);
1352 #else
1353                 loadsegment(gs, vmx->host_state.gs_sel);
1354 #endif
1355         }
1356         if (vmx->host_state.fs_reload_needed)
1357                 loadsegment(fs, vmx->host_state.fs_sel);
1358         reload_tss();
1359 #ifdef CONFIG_X86_64
1360         wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base);
1361 #endif
1362         if (current_thread_info()->status & TS_USEDFPU)
1363                 clts();
1364         load_gdt(&__get_cpu_var(host_gdt));
1365 }
1366
1367 static void vmx_load_host_state(struct vcpu_vmx *vmx)
1368 {
1369         preempt_disable();
1370         __vmx_load_host_state(vmx);
1371         preempt_enable();
1372 }
1373
1374 /*
1375  * Switches to specified vcpu, until a matching vcpu_put(), but assumes
1376  * vcpu mutex is already taken.
1377  */
1378 static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1379 {
1380         struct vcpu_vmx *vmx = to_vmx(vcpu);
1381         u64 phys_addr = __pa(per_cpu(vmxarea, cpu));
1382
1383         if (!vmm_exclusive)
1384                 kvm_cpu_vmxon(phys_addr);
1385         else if (vmx->loaded_vmcs->cpu != cpu)
1386                 loaded_vmcs_clear(vmx->loaded_vmcs);
1387
1388         if (per_cpu(current_vmcs, cpu) != vmx->loaded_vmcs->vmcs) {
1389                 per_cpu(current_vmcs, cpu) = vmx->loaded_vmcs->vmcs;
1390                 vmcs_load(vmx->loaded_vmcs->vmcs);
1391         }
1392
1393         if (vmx->loaded_vmcs->cpu != cpu) {
1394                 struct desc_ptr *gdt = &__get_cpu_var(host_gdt);
1395                 unsigned long sysenter_esp;
1396
1397                 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
1398                 local_irq_disable();
1399                 list_add(&vmx->loaded_vmcs->loaded_vmcss_on_cpu_link,
1400                          &per_cpu(loaded_vmcss_on_cpu, cpu));
1401                 local_irq_enable();
1402
1403                 /*
1404                  * Linux uses per-cpu TSS and GDT, so set these when switching
1405                  * processors.
1406                  */
1407                 vmcs_writel(HOST_TR_BASE, kvm_read_tr_base()); /* 22.2.4 */
1408                 vmcs_writel(HOST_GDTR_BASE, gdt->address);   /* 22.2.4 */
1409
1410                 rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
1411                 vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
1412                 vmx->loaded_vmcs->cpu = cpu;
1413         }
1414 }
1415
1416 static void vmx_vcpu_put(struct kvm_vcpu *vcpu)
1417 {
1418         __vmx_load_host_state(to_vmx(vcpu));
1419         if (!vmm_exclusive) {
1420                 __loaded_vmcs_clear(to_vmx(vcpu)->loaded_vmcs);
1421                 vcpu->cpu = -1;
1422                 kvm_cpu_vmxoff();
1423         }
1424 }
1425
1426 static void vmx_fpu_activate(struct kvm_vcpu *vcpu)
1427 {
1428         ulong cr0;
1429
1430         if (vcpu->fpu_active)
1431                 return;
1432         vcpu->fpu_active = 1;
1433         cr0 = vmcs_readl(GUEST_CR0);
1434         cr0 &= ~(X86_CR0_TS | X86_CR0_MP);
1435         cr0 |= kvm_read_cr0_bits(vcpu, X86_CR0_TS | X86_CR0_MP);
1436         vmcs_writel(GUEST_CR0, cr0);
1437         update_exception_bitmap(vcpu);
1438         vcpu->arch.cr0_guest_owned_bits = X86_CR0_TS;
1439         vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits);
1440 }
1441
1442 static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu);
1443
1444 static void vmx_fpu_deactivate(struct kvm_vcpu *vcpu)
1445 {
1446         vmx_decache_cr0_guest_bits(vcpu);
1447         vmcs_set_bits(GUEST_CR0, X86_CR0_TS | X86_CR0_MP);
1448         update_exception_bitmap(vcpu);
1449         vcpu->arch.cr0_guest_owned_bits = 0;
1450         vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits);
1451         vmcs_writel(CR0_READ_SHADOW, vcpu->arch.cr0);
1452 }
1453
1454 static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu)
1455 {
1456         unsigned long rflags, save_rflags;
1457
1458         if (!test_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail)) {
1459                 __set_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail);
1460                 rflags = vmcs_readl(GUEST_RFLAGS);
1461                 if (to_vmx(vcpu)->rmode.vm86_active) {
1462                         rflags &= RMODE_GUEST_OWNED_EFLAGS_BITS;
1463                         save_rflags = to_vmx(vcpu)->rmode.save_rflags;
1464                         rflags |= save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS;
1465                 }
1466                 to_vmx(vcpu)->rflags = rflags;
1467         }
1468         return to_vmx(vcpu)->rflags;
1469 }
1470
1471 static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
1472 {
1473         __set_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail);
1474         __clear_bit(VCPU_EXREG_CPL, (ulong *)&vcpu->arch.regs_avail);
1475         to_vmx(vcpu)->rflags = rflags;
1476         if (to_vmx(vcpu)->rmode.vm86_active) {
1477                 to_vmx(vcpu)->rmode.save_rflags = rflags;
1478                 rflags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM;
1479         }
1480         vmcs_writel(GUEST_RFLAGS, rflags);
1481 }
1482
1483 static u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
1484 {
1485         u32 interruptibility = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
1486         int ret = 0;
1487
1488         if (interruptibility & GUEST_INTR_STATE_STI)
1489                 ret |= KVM_X86_SHADOW_INT_STI;
1490         if (interruptibility & GUEST_INTR_STATE_MOV_SS)
1491                 ret |= KVM_X86_SHADOW_INT_MOV_SS;
1492
1493         return ret & mask;
1494 }
1495
1496 static void vmx_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
1497 {
1498         u32 interruptibility_old = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
1499         u32 interruptibility = interruptibility_old;
1500
1501         interruptibility &= ~(GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS);
1502
1503         if (mask & KVM_X86_SHADOW_INT_MOV_SS)
1504                 interruptibility |= GUEST_INTR_STATE_MOV_SS;
1505         else if (mask & KVM_X86_SHADOW_INT_STI)
1506                 interruptibility |= GUEST_INTR_STATE_STI;
1507
1508         if ((interruptibility != interruptibility_old))
1509                 vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, interruptibility);
1510 }
1511
1512 static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
1513 {
1514         unsigned long rip;
1515
1516         rip = kvm_rip_read(vcpu);
1517         rip += vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
1518         kvm_rip_write(vcpu, rip);
1519
1520         /* skipping an emulated instruction also counts */
1521         vmx_set_interrupt_shadow(vcpu, 0);
1522 }
1523
1524 static void vmx_clear_hlt(struct kvm_vcpu *vcpu)
1525 {
1526         /* Ensure that we clear the HLT state in the VMCS.  We don't need to
1527          * explicitly skip the instruction because if the HLT state is set, then
1528          * the instruction is already executing and RIP has already been
1529          * advanced. */
1530         if (!yield_on_hlt &&
1531             vmcs_read32(GUEST_ACTIVITY_STATE) == GUEST_ACTIVITY_HLT)
1532                 vmcs_write32(GUEST_ACTIVITY_STATE, GUEST_ACTIVITY_ACTIVE);
1533 }
1534
1535 static void vmx_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
1536                                 bool has_error_code, u32 error_code,
1537                                 bool reinject)
1538 {
1539         struct vcpu_vmx *vmx = to_vmx(vcpu);
1540         u32 intr_info = nr | INTR_INFO_VALID_MASK;
1541
1542         if (has_error_code) {
1543                 vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code);
1544                 intr_info |= INTR_INFO_DELIVER_CODE_MASK;
1545         }
1546
1547         if (vmx->rmode.vm86_active) {
1548                 int inc_eip = 0;
1549                 if (kvm_exception_is_soft(nr))
1550                         inc_eip = vcpu->arch.event_exit_inst_len;
1551                 if (kvm_inject_realmode_interrupt(vcpu, nr, inc_eip) != EMULATE_DONE)
1552                         kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
1553                 return;
1554         }
1555
1556         if (kvm_exception_is_soft(nr)) {
1557                 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
1558                              vmx->vcpu.arch.event_exit_inst_len);
1559                 intr_info |= INTR_TYPE_SOFT_EXCEPTION;
1560         } else
1561                 intr_info |= INTR_TYPE_HARD_EXCEPTION;
1562
1563         vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr_info);
1564         vmx_clear_hlt(vcpu);
1565 }
1566
1567 static bool vmx_rdtscp_supported(void)
1568 {
1569         return cpu_has_vmx_rdtscp();
1570 }
1571
1572 /*
1573  * Swap MSR entry in host/guest MSR entry array.
1574  */
1575 static void move_msr_up(struct vcpu_vmx *vmx, int from, int to)
1576 {
1577         struct shared_msr_entry tmp;
1578
1579         tmp = vmx->guest_msrs[to];
1580         vmx->guest_msrs[to] = vmx->guest_msrs[from];
1581         vmx->guest_msrs[from] = tmp;
1582 }
1583
1584 /*
1585  * Set up the vmcs to automatically save and restore system
1586  * msrs.  Don't touch the 64-bit msrs if the guest is in legacy
1587  * mode, as fiddling with msrs is very expensive.
1588  */
1589 static void setup_msrs(struct vcpu_vmx *vmx)
1590 {
1591         int save_nmsrs, index;
1592         unsigned long *msr_bitmap;
1593
1594         vmx_load_host_state(vmx);
1595         save_nmsrs = 0;
1596 #ifdef CONFIG_X86_64
1597         if (is_long_mode(&vmx->vcpu)) {
1598                 index = __find_msr_index(vmx, MSR_SYSCALL_MASK);
1599                 if (index >= 0)
1600                         move_msr_up(vmx, index, save_nmsrs++);
1601                 index = __find_msr_index(vmx, MSR_LSTAR);
1602                 if (index >= 0)
1603                         move_msr_up(vmx, index, save_nmsrs++);
1604                 index = __find_msr_index(vmx, MSR_CSTAR);
1605                 if (index >= 0)
1606                         move_msr_up(vmx, index, save_nmsrs++);
1607                 index = __find_msr_index(vmx, MSR_TSC_AUX);
1608                 if (index >= 0 && vmx->rdtscp_enabled)
1609                         move_msr_up(vmx, index, save_nmsrs++);
1610                 /*
1611                  * MSR_STAR is only needed on long mode guests, and only
1612                  * if efer.sce is enabled.
1613                  */
1614                 index = __find_msr_index(vmx, MSR_STAR);
1615                 if ((index >= 0) && (vmx->vcpu.arch.efer & EFER_SCE))
1616                         move_msr_up(vmx, index, save_nmsrs++);
1617         }
1618 #endif
1619         index = __find_msr_index(vmx, MSR_EFER);
1620         if (index >= 0 && update_transition_efer(vmx, index))
1621                 move_msr_up(vmx, index, save_nmsrs++);
1622
1623         vmx->save_nmsrs = save_nmsrs;
1624
1625         if (cpu_has_vmx_msr_bitmap()) {
1626                 if (is_long_mode(&vmx->vcpu))
1627                         msr_bitmap = vmx_msr_bitmap_longmode;
1628                 else
1629                         msr_bitmap = vmx_msr_bitmap_legacy;
1630
1631                 vmcs_write64(MSR_BITMAP, __pa(msr_bitmap));
1632         }
1633 }
1634
1635 /*
1636  * reads and returns guest's timestamp counter "register"
1637  * guest_tsc = host_tsc + tsc_offset    -- 21.3
1638  */
1639 static u64 guest_read_tsc(void)
1640 {
1641         u64 host_tsc, tsc_offset;
1642
1643         rdtscll(host_tsc);
1644         tsc_offset = vmcs_read64(TSC_OFFSET);
1645         return host_tsc + tsc_offset;
1646 }
1647
1648 /*
1649  * Empty call-back. Needs to be implemented when VMX enables the SET_TSC_KHZ
1650  * ioctl. In this case the call-back should update internal vmx state to make
1651  * the changes effective.
1652  */
1653 static void vmx_set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz)
1654 {
1655         /* Nothing to do here */
1656 }
1657
1658 /*
1659  * writes 'offset' into guest's timestamp counter offset register
1660  */
1661 static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
1662 {
1663         vmcs_write64(TSC_OFFSET, offset);
1664 }
1665
1666 static void vmx_adjust_tsc_offset(struct kvm_vcpu *vcpu, s64 adjustment)
1667 {
1668         u64 offset = vmcs_read64(TSC_OFFSET);
1669         vmcs_write64(TSC_OFFSET, offset + adjustment);
1670 }
1671
1672 static u64 vmx_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc)
1673 {
1674         return target_tsc - native_read_tsc();
1675 }
1676
1677 static bool guest_cpuid_has_vmx(struct kvm_vcpu *vcpu)
1678 {
1679         struct kvm_cpuid_entry2 *best = kvm_find_cpuid_entry(vcpu, 1, 0);
1680         return best && (best->ecx & (1 << (X86_FEATURE_VMX & 31)));
1681 }
1682
1683 /*
1684  * nested_vmx_allowed() checks whether a guest should be allowed to use VMX
1685  * instructions and MSRs (i.e., nested VMX). Nested VMX is disabled for
1686  * all guests if the "nested" module option is off, and can also be disabled
1687  * for a single guest by disabling its VMX cpuid bit.
1688  */
1689 static inline bool nested_vmx_allowed(struct kvm_vcpu *vcpu)
1690 {
1691         return nested && guest_cpuid_has_vmx(vcpu);
1692 }
1693
1694 /*
1695  * nested_vmx_setup_ctls_msrs() sets up variables containing the values to be
1696  * returned for the various VMX controls MSRs when nested VMX is enabled.
1697  * The same values should also be used to verify that vmcs12 control fields are
1698  * valid during nested entry from L1 to L2.
1699  * Each of these control msrs has a low and high 32-bit half: A low bit is on
1700  * if the corresponding bit in the (32-bit) control field *must* be on, and a
1701  * bit in the high half is on if the corresponding bit in the control field
1702  * may be on. See also vmx_control_verify().
1703  * TODO: allow these variables to be modified (downgraded) by module options
1704  * or other means.
1705  */
1706 static u32 nested_vmx_procbased_ctls_low, nested_vmx_procbased_ctls_high;
1707 static u32 nested_vmx_secondary_ctls_low, nested_vmx_secondary_ctls_high;
1708 static u32 nested_vmx_pinbased_ctls_low, nested_vmx_pinbased_ctls_high;
1709 static u32 nested_vmx_exit_ctls_low, nested_vmx_exit_ctls_high;
1710 static u32 nested_vmx_entry_ctls_low, nested_vmx_entry_ctls_high;
1711 static __init void nested_vmx_setup_ctls_msrs(void)
1712 {
1713         /*
1714          * Note that as a general rule, the high half of the MSRs (bits in
1715          * the control fields which may be 1) should be initialized by the
1716          * intersection of the underlying hardware's MSR (i.e., features which
1717          * can be supported) and the list of features we want to expose -
1718          * because they are known to be properly supported in our code.
1719          * Also, usually, the low half of the MSRs (bits which must be 1) can
1720          * be set to 0, meaning that L1 may turn off any of these bits. The
1721          * reason is that if one of these bits is necessary, it will appear
1722          * in vmcs01 and prepare_vmcs02, when it bitwise-or's the control
1723          * fields of vmcs01 and vmcs02, will turn these bits off - and
1724          * nested_vmx_exit_handled() will not pass related exits to L1.
1725          * These rules have exceptions below.
1726          */
1727
1728         /* pin-based controls */
1729         /*
1730          * According to the Intel spec, if bit 55 of VMX_BASIC is off (as it is
1731          * in our case), bits 1, 2 and 4 (i.e., 0x16) must be 1 in this MSR.
1732          */
1733         nested_vmx_pinbased_ctls_low = 0x16 ;
1734         nested_vmx_pinbased_ctls_high = 0x16 |
1735                 PIN_BASED_EXT_INTR_MASK | PIN_BASED_NMI_EXITING |
1736                 PIN_BASED_VIRTUAL_NMIS;
1737
1738         /* exit controls */
1739         nested_vmx_exit_ctls_low = 0;
1740 #ifdef CONFIG_X86_64
1741         nested_vmx_exit_ctls_high = VM_EXIT_HOST_ADDR_SPACE_SIZE;
1742 #else
1743         nested_vmx_exit_ctls_high = 0;
1744 #endif
1745
1746         /* entry controls */
1747         rdmsr(MSR_IA32_VMX_ENTRY_CTLS,
1748                 nested_vmx_entry_ctls_low, nested_vmx_entry_ctls_high);
1749         nested_vmx_entry_ctls_low = 0;
1750         nested_vmx_entry_ctls_high &=
1751                 VM_ENTRY_LOAD_IA32_PAT | VM_ENTRY_IA32E_MODE;
1752
1753         /* cpu-based controls */
1754         rdmsr(MSR_IA32_VMX_PROCBASED_CTLS,
1755                 nested_vmx_procbased_ctls_low, nested_vmx_procbased_ctls_high);
1756         nested_vmx_procbased_ctls_low = 0;
1757         nested_vmx_procbased_ctls_high &=
1758                 CPU_BASED_VIRTUAL_INTR_PENDING | CPU_BASED_USE_TSC_OFFSETING |
1759                 CPU_BASED_HLT_EXITING | CPU_BASED_INVLPG_EXITING |
1760                 CPU_BASED_MWAIT_EXITING | CPU_BASED_CR3_LOAD_EXITING |
1761                 CPU_BASED_CR3_STORE_EXITING |
1762 #ifdef CONFIG_X86_64
1763                 CPU_BASED_CR8_LOAD_EXITING | CPU_BASED_CR8_STORE_EXITING |
1764 #endif
1765                 CPU_BASED_MOV_DR_EXITING | CPU_BASED_UNCOND_IO_EXITING |
1766                 CPU_BASED_USE_IO_BITMAPS | CPU_BASED_MONITOR_EXITING |
1767                 CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
1768         /*
1769          * We can allow some features even when not supported by the
1770          * hardware. For example, L1 can specify an MSR bitmap - and we
1771          * can use it to avoid exits to L1 - even when L0 runs L2
1772          * without MSR bitmaps.
1773          */
1774         nested_vmx_procbased_ctls_high |= CPU_BASED_USE_MSR_BITMAPS;
1775
1776         /* secondary cpu-based controls */
1777         rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2,
1778                 nested_vmx_secondary_ctls_low, nested_vmx_secondary_ctls_high);
1779         nested_vmx_secondary_ctls_low = 0;
1780         nested_vmx_secondary_ctls_high &=
1781                 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
1782 }
1783
1784 static inline bool vmx_control_verify(u32 control, u32 low, u32 high)
1785 {
1786         /*
1787          * Bits 0 in high must be 0, and bits 1 in low must be 1.
1788          */
1789         return ((control & high) | low) == control;
1790 }
1791
1792 static inline u64 vmx_control_msr(u32 low, u32 high)
1793 {
1794         return low | ((u64)high << 32);
1795 }
1796
1797 /*
1798  * If we allow our guest to use VMX instructions (i.e., nested VMX), we should
1799  * also let it use VMX-specific MSRs.
1800  * vmx_get_vmx_msr() and vmx_set_vmx_msr() return 1 when we handled a
1801  * VMX-specific MSR, or 0 when we haven't (and the caller should handle it
1802  * like all other MSRs).
1803  */
1804 static int vmx_get_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
1805 {
1806         if (!nested_vmx_allowed(vcpu) && msr_index >= MSR_IA32_VMX_BASIC &&
1807                      msr_index <= MSR_IA32_VMX_TRUE_ENTRY_CTLS) {
1808                 /*
1809                  * According to the spec, processors which do not support VMX
1810                  * should throw a #GP(0) when VMX capability MSRs are read.
1811                  */
1812                 kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
1813                 return 1;
1814         }
1815
1816         switch (msr_index) {
1817         case MSR_IA32_FEATURE_CONTROL:
1818                 *pdata = 0;
1819                 break;
1820         case MSR_IA32_VMX_BASIC:
1821                 /*
1822                  * This MSR reports some information about VMX support. We
1823                  * should return information about the VMX we emulate for the
1824                  * guest, and the VMCS structure we give it - not about the
1825                  * VMX support of the underlying hardware.
1826                  */
1827                 *pdata = VMCS12_REVISION |
1828                            ((u64)VMCS12_SIZE << VMX_BASIC_VMCS_SIZE_SHIFT) |
1829                            (VMX_BASIC_MEM_TYPE_WB << VMX_BASIC_MEM_TYPE_SHIFT);
1830                 break;
1831         case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
1832         case MSR_IA32_VMX_PINBASED_CTLS:
1833                 *pdata = vmx_control_msr(nested_vmx_pinbased_ctls_low,
1834                                         nested_vmx_pinbased_ctls_high);
1835                 break;
1836         case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
1837         case MSR_IA32_VMX_PROCBASED_CTLS:
1838                 *pdata = vmx_control_msr(nested_vmx_procbased_ctls_low,
1839                                         nested_vmx_procbased_ctls_high);
1840                 break;
1841         case MSR_IA32_VMX_TRUE_EXIT_CTLS:
1842         case MSR_IA32_VMX_EXIT_CTLS:
1843                 *pdata = vmx_control_msr(nested_vmx_exit_ctls_low,
1844                                         nested_vmx_exit_ctls_high);
1845                 break;
1846         case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
1847         case MSR_IA32_VMX_ENTRY_CTLS:
1848                 *pdata = vmx_control_msr(nested_vmx_entry_ctls_low,
1849                                         nested_vmx_entry_ctls_high);
1850                 break;
1851         case MSR_IA32_VMX_MISC:
1852                 *pdata = 0;
1853                 break;
1854         /*
1855          * These MSRs specify bits which the guest must keep fixed (on or off)
1856          * while L1 is in VMXON mode (in L1's root mode, or running an L2).
1857          * We picked the standard core2 setting.
1858          */
1859 #define VMXON_CR0_ALWAYSON      (X86_CR0_PE | X86_CR0_PG | X86_CR0_NE)
1860 #define VMXON_CR4_ALWAYSON      X86_CR4_VMXE
1861         case MSR_IA32_VMX_CR0_FIXED0:
1862                 *pdata = VMXON_CR0_ALWAYSON;
1863                 break;
1864         case MSR_IA32_VMX_CR0_FIXED1:
1865                 *pdata = -1ULL;
1866                 break;
1867         case MSR_IA32_VMX_CR4_FIXED0:
1868                 *pdata = VMXON_CR4_ALWAYSON;
1869                 break;
1870         case MSR_IA32_VMX_CR4_FIXED1:
1871                 *pdata = -1ULL;
1872                 break;
1873         case MSR_IA32_VMX_VMCS_ENUM:
1874                 *pdata = 0x1f;
1875                 break;
1876         case MSR_IA32_VMX_PROCBASED_CTLS2:
1877                 *pdata = vmx_control_msr(nested_vmx_secondary_ctls_low,
1878                                         nested_vmx_secondary_ctls_high);
1879                 break;
1880         case MSR_IA32_VMX_EPT_VPID_CAP:
1881                 /* Currently, no nested ept or nested vpid */
1882                 *pdata = 0;
1883                 break;
1884         default:
1885                 return 0;
1886         }
1887
1888         return 1;
1889 }
1890
1891 static int vmx_set_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
1892 {
1893         if (!nested_vmx_allowed(vcpu))
1894                 return 0;
1895
1896         if (msr_index == MSR_IA32_FEATURE_CONTROL)
1897                 /* TODO: the right thing. */
1898                 return 1;
1899         /*
1900          * No need to treat VMX capability MSRs specially: If we don't handle
1901          * them, handle_wrmsr will #GP(0), which is correct (they are readonly)
1902          */
1903         return 0;
1904 }
1905
1906 /*
1907  * Reads an msr value (of 'msr_index') into 'pdata'.
1908  * Returns 0 on success, non-0 otherwise.
1909  * Assumes vcpu_load() was already called.
1910  */
1911 static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
1912 {
1913         u64 data;
1914         struct shared_msr_entry *msr;
1915
1916         if (!pdata) {
1917                 printk(KERN_ERR "BUG: get_msr called with NULL pdata\n");
1918                 return -EINVAL;
1919         }
1920
1921         switch (msr_index) {
1922 #ifdef CONFIG_X86_64
1923         case MSR_FS_BASE:
1924                 data = vmcs_readl(GUEST_FS_BASE);
1925                 break;
1926         case MSR_GS_BASE:
1927                 data = vmcs_readl(GUEST_GS_BASE);
1928                 break;
1929         case MSR_KERNEL_GS_BASE:
1930                 vmx_load_host_state(to_vmx(vcpu));
1931                 data = to_vmx(vcpu)->msr_guest_kernel_gs_base;
1932                 break;
1933 #endif
1934         case MSR_EFER:
1935                 return kvm_get_msr_common(vcpu, msr_index, pdata);
1936         case MSR_IA32_TSC:
1937                 data = guest_read_tsc();
1938                 break;
1939         case MSR_IA32_SYSENTER_CS:
1940                 data = vmcs_read32(GUEST_SYSENTER_CS);
1941                 break;
1942         case MSR_IA32_SYSENTER_EIP:
1943                 data = vmcs_readl(GUEST_SYSENTER_EIP);
1944                 break;
1945         case MSR_IA32_SYSENTER_ESP:
1946                 data = vmcs_readl(GUEST_SYSENTER_ESP);
1947                 break;
1948         case MSR_TSC_AUX:
1949                 if (!to_vmx(vcpu)->rdtscp_enabled)
1950                         return 1;
1951                 /* Otherwise falls through */
1952         default:
1953                 vmx_load_host_state(to_vmx(vcpu));
1954                 if (vmx_get_vmx_msr(vcpu, msr_index, pdata))
1955                         return 0;
1956                 msr = find_msr_entry(to_vmx(vcpu), msr_index);
1957                 if (msr) {
1958                         vmx_load_host_state(to_vmx(vcpu));
1959                         data = msr->data;
1960                         break;
1961                 }
1962                 return kvm_get_msr_common(vcpu, msr_index, pdata);
1963         }
1964
1965         *pdata = data;
1966         return 0;
1967 }
1968
1969 /*
1970  * Writes msr value into into the appropriate "register".
1971  * Returns 0 on success, non-0 otherwise.
1972  * Assumes vcpu_load() was already called.
1973  */
1974 static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
1975 {
1976         struct vcpu_vmx *vmx = to_vmx(vcpu);
1977         struct shared_msr_entry *msr;
1978         int ret = 0;
1979
1980         switch (msr_index) {
1981         case MSR_EFER:
1982                 vmx_load_host_state(vmx);
1983                 ret = kvm_set_msr_common(vcpu, msr_index, data);
1984                 break;
1985 #ifdef CONFIG_X86_64
1986         case MSR_FS_BASE:
1987                 vmx_segment_cache_clear(vmx);
1988                 vmcs_writel(GUEST_FS_BASE, data);
1989                 break;
1990         case MSR_GS_BASE:
1991                 vmx_segment_cache_clear(vmx);
1992                 vmcs_writel(GUEST_GS_BASE, data);
1993                 break;
1994         case MSR_KERNEL_GS_BASE:
1995                 vmx_load_host_state(vmx);
1996                 vmx->msr_guest_kernel_gs_base = data;
1997                 break;
1998 #endif
1999         case MSR_IA32_SYSENTER_CS:
2000                 vmcs_write32(GUEST_SYSENTER_CS, data);
2001                 break;
2002         case MSR_IA32_SYSENTER_EIP:
2003                 vmcs_writel(GUEST_SYSENTER_EIP, data);
2004                 break;
2005         case MSR_IA32_SYSENTER_ESP:
2006                 vmcs_writel(GUEST_SYSENTER_ESP, data);
2007                 break;
2008         case MSR_IA32_TSC:
2009                 kvm_write_tsc(vcpu, data);
2010                 break;
2011         case MSR_IA32_CR_PAT:
2012                 if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) {
2013                         vmcs_write64(GUEST_IA32_PAT, data);
2014                         vcpu->arch.pat = data;
2015                         break;
2016                 }
2017                 ret = kvm_set_msr_common(vcpu, msr_index, data);
2018                 break;
2019         case MSR_TSC_AUX:
2020                 if (!vmx->rdtscp_enabled)
2021                         return 1;
2022                 /* Check reserved bit, higher 32 bits should be zero */
2023                 if ((data >> 32) != 0)
2024                         return 1;
2025                 /* Otherwise falls through */
2026         default:
2027                 if (vmx_set_vmx_msr(vcpu, msr_index, data))
2028                         break;
2029                 msr = find_msr_entry(vmx, msr_index);
2030                 if (msr) {
2031                         vmx_load_host_state(vmx);
2032                         msr->data = data;
2033                         break;
2034                 }
2035                 ret = kvm_set_msr_common(vcpu, msr_index, data);
2036         }
2037
2038         return ret;
2039 }
2040
2041 static void vmx_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
2042 {
2043         __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
2044         switch (reg) {
2045         case VCPU_REGS_RSP:
2046                 vcpu->arch.regs[VCPU_REGS_RSP] = vmcs_readl(GUEST_RSP);
2047                 break;
2048         case VCPU_REGS_RIP:
2049                 vcpu->arch.regs[VCPU_REGS_RIP] = vmcs_readl(GUEST_RIP);
2050                 break;
2051         case VCPU_EXREG_PDPTR:
2052                 if (enable_ept)
2053                         ept_save_pdptrs(vcpu);
2054                 break;
2055         default:
2056                 break;
2057         }
2058 }
2059
2060 static void set_guest_debug(struct kvm_vcpu *vcpu, struct kvm_guest_debug *dbg)
2061 {
2062         if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
2063                 vmcs_writel(GUEST_DR7, dbg->arch.debugreg[7]);
2064         else
2065                 vmcs_writel(GUEST_DR7, vcpu->arch.dr7);
2066
2067         update_exception_bitmap(vcpu);
2068 }
2069
2070 static __init int cpu_has_kvm_support(void)
2071 {
2072         return cpu_has_vmx();
2073 }
2074
2075 static __init int vmx_disabled_by_bios(void)
2076 {
2077         u64 msr;
2078
2079         rdmsrl(MSR_IA32_FEATURE_CONTROL, msr);
2080         if (msr & FEATURE_CONTROL_LOCKED) {
2081                 /* launched w/ TXT and VMX disabled */
2082                 if (!(msr & FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX)
2083                         && tboot_enabled())
2084                         return 1;
2085                 /* launched w/o TXT and VMX only enabled w/ TXT */
2086                 if (!(msr & FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX)
2087                         && (msr & FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX)
2088                         && !tboot_enabled()) {
2089                         printk(KERN_WARNING "kvm: disable TXT in the BIOS or "
2090                                 "activate TXT before enabling KVM\n");
2091                         return 1;
2092                 }
2093                 /* launched w/o TXT and VMX disabled */
2094                 if (!(msr & FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX)
2095                         && !tboot_enabled())
2096                         return 1;
2097         }
2098
2099         return 0;
2100 }
2101
2102 static void kvm_cpu_vmxon(u64 addr)
2103 {
2104         asm volatile (ASM_VMX_VMXON_RAX
2105                         : : "a"(&addr), "m"(addr)
2106                         : "memory", "cc");
2107 }
2108
2109 static int hardware_enable(void *garbage)
2110 {
2111         int cpu = raw_smp_processor_id();
2112         u64 phys_addr = __pa(per_cpu(vmxarea, cpu));
2113         u64 old, test_bits;
2114
2115         if (read_cr4() & X86_CR4_VMXE)
2116                 return -EBUSY;
2117
2118         INIT_LIST_HEAD(&per_cpu(loaded_vmcss_on_cpu, cpu));
2119         rdmsrl(MSR_IA32_FEATURE_CONTROL, old);
2120
2121         test_bits = FEATURE_CONTROL_LOCKED;
2122         test_bits |= FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX;
2123         if (tboot_enabled())
2124                 test_bits |= FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX;
2125
2126         if ((old & test_bits) != test_bits) {
2127                 /* enable and lock */
2128                 wrmsrl(MSR_IA32_FEATURE_CONTROL, old | test_bits);
2129         }
2130         write_cr4(read_cr4() | X86_CR4_VMXE); /* FIXME: not cpu hotplug safe */
2131
2132         if (vmm_exclusive) {
2133                 kvm_cpu_vmxon(phys_addr);
2134                 ept_sync_global();
2135         }
2136
2137         store_gdt(&__get_cpu_var(host_gdt));
2138
2139         return 0;
2140 }
2141
2142 static void vmclear_local_loaded_vmcss(void)
2143 {
2144         int cpu = raw_smp_processor_id();
2145         struct loaded_vmcs *v, *n;
2146
2147         list_for_each_entry_safe(v, n, &per_cpu(loaded_vmcss_on_cpu, cpu),
2148                                  loaded_vmcss_on_cpu_link)
2149                 __loaded_vmcs_clear(v);
2150 }
2151
2152
2153 /* Just like cpu_vmxoff(), but with the __kvm_handle_fault_on_reboot()
2154  * tricks.
2155  */
2156 static void kvm_cpu_vmxoff(void)
2157 {
2158         asm volatile (__ex(ASM_VMX_VMXOFF) : : : "cc");
2159 }
2160
2161 static void hardware_disable(void *garbage)
2162 {
2163         if (vmm_exclusive) {
2164                 vmclear_local_loaded_vmcss();
2165                 kvm_cpu_vmxoff();
2166         }
2167         write_cr4(read_cr4() & ~X86_CR4_VMXE);
2168 }
2169
2170 static __init int adjust_vmx_controls(u32 ctl_min, u32 ctl_opt,
2171                                       u32 msr, u32 *result)
2172 {
2173         u32 vmx_msr_low, vmx_msr_high;
2174         u32 ctl = ctl_min | ctl_opt;
2175
2176         rdmsr(msr, vmx_msr_low, vmx_msr_high);
2177
2178         ctl &= vmx_msr_high; /* bit == 0 in high word ==> must be zero */
2179         ctl |= vmx_msr_low;  /* bit == 1 in low word  ==> must be one  */
2180
2181         /* Ensure minimum (required) set of control bits are supported. */
2182         if (ctl_min & ~ctl)
2183                 return -EIO;
2184
2185         *result = ctl;
2186         return 0;
2187 }
2188
2189 static __init bool allow_1_setting(u32 msr, u32 ctl)
2190 {
2191         u32 vmx_msr_low, vmx_msr_high;
2192
2193         rdmsr(msr, vmx_msr_low, vmx_msr_high);
2194         return vmx_msr_high & ctl;
2195 }
2196
2197 static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
2198 {
2199         u32 vmx_msr_low, vmx_msr_high;
2200         u32 min, opt, min2, opt2;
2201         u32 _pin_based_exec_control = 0;
2202         u32 _cpu_based_exec_control = 0;
2203         u32 _cpu_based_2nd_exec_control = 0;
2204         u32 _vmexit_control = 0;
2205         u32 _vmentry_control = 0;
2206
2207         min = PIN_BASED_EXT_INTR_MASK | PIN_BASED_NMI_EXITING;
2208         opt = PIN_BASED_VIRTUAL_NMIS;
2209         if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PINBASED_CTLS,
2210                                 &_pin_based_exec_control) < 0)
2211                 return -EIO;
2212
2213         min =
2214 #ifdef CONFIG_X86_64
2215               CPU_BASED_CR8_LOAD_EXITING |
2216               CPU_BASED_CR8_STORE_EXITING |
2217 #endif
2218               CPU_BASED_CR3_LOAD_EXITING |
2219               CPU_BASED_CR3_STORE_EXITING |
2220               CPU_BASED_USE_IO_BITMAPS |
2221               CPU_BASED_MOV_DR_EXITING |
2222               CPU_BASED_USE_TSC_OFFSETING |
2223               CPU_BASED_MWAIT_EXITING |
2224               CPU_BASED_MONITOR_EXITING |
2225               CPU_BASED_INVLPG_EXITING;
2226
2227         if (yield_on_hlt)
2228                 min |= CPU_BASED_HLT_EXITING;
2229
2230         opt = CPU_BASED_TPR_SHADOW |
2231               CPU_BASED_USE_MSR_BITMAPS |
2232               CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
2233         if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PROCBASED_CTLS,
2234                                 &_cpu_based_exec_control) < 0)
2235                 return -EIO;
2236 #ifdef CONFIG_X86_64
2237         if ((_cpu_based_exec_control & CPU_BASED_TPR_SHADOW))
2238                 _cpu_based_exec_control &= ~CPU_BASED_CR8_LOAD_EXITING &
2239                                            ~CPU_BASED_CR8_STORE_EXITING;
2240 #endif
2241         if (_cpu_based_exec_control & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) {
2242                 min2 = 0;
2243                 opt2 = SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
2244                         SECONDARY_EXEC_WBINVD_EXITING |
2245                         SECONDARY_EXEC_ENABLE_VPID |
2246                         SECONDARY_EXEC_ENABLE_EPT |
2247                         SECONDARY_EXEC_UNRESTRICTED_GUEST |
2248                         SECONDARY_EXEC_PAUSE_LOOP_EXITING |
2249                         SECONDARY_EXEC_RDTSCP;
2250                 if (adjust_vmx_controls(min2, opt2,
2251                                         MSR_IA32_VMX_PROCBASED_CTLS2,
2252                                         &_cpu_based_2nd_exec_control) < 0)
2253                         return -EIO;
2254         }
2255 #ifndef CONFIG_X86_64
2256         if (!(_cpu_based_2nd_exec_control &
2257                                 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES))
2258                 _cpu_based_exec_control &= ~CPU_BASED_TPR_SHADOW;
2259 #endif
2260         if (_cpu_based_2nd_exec_control & SECONDARY_EXEC_ENABLE_EPT) {
2261                 /* CR3 accesses and invlpg don't need to cause VM Exits when EPT
2262                    enabled */
2263                 _cpu_based_exec_control &= ~(CPU_BASED_CR3_LOAD_EXITING |
2264                                              CPU_BASED_CR3_STORE_EXITING |
2265                                              CPU_BASED_INVLPG_EXITING);
2266                 rdmsr(MSR_IA32_VMX_EPT_VPID_CAP,
2267                       vmx_capability.ept, vmx_capability.vpid);
2268         }
2269
2270         min = 0;
2271 #ifdef CONFIG_X86_64
2272         min |= VM_EXIT_HOST_ADDR_SPACE_SIZE;
2273 #endif
2274         opt = VM_EXIT_SAVE_IA32_PAT | VM_EXIT_LOAD_IA32_PAT;
2275         if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_EXIT_CTLS,
2276                                 &_vmexit_control) < 0)
2277                 return -EIO;
2278
2279         min = 0;
2280         opt = VM_ENTRY_LOAD_IA32_PAT;
2281         if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_ENTRY_CTLS,
2282                                 &_vmentry_control) < 0)
2283                 return -EIO;
2284
2285         rdmsr(MSR_IA32_VMX_BASIC, vmx_msr_low, vmx_msr_high);
2286
2287         /* IA-32 SDM Vol 3B: VMCS size is never greater than 4kB. */
2288         if ((vmx_msr_high & 0x1fff) > PAGE_SIZE)
2289                 return -EIO;
2290
2291 #ifdef CONFIG_X86_64
2292         /* IA-32 SDM Vol 3B: 64-bit CPUs always have VMX_BASIC_MSR[48]==0. */
2293         if (vmx_msr_high & (1u<<16))
2294                 return -EIO;
2295 #endif
2296
2297         /* Require Write-Back (WB) memory type for VMCS accesses. */
2298         if (((vmx_msr_high >> 18) & 15) != 6)
2299                 return -EIO;
2300
2301         vmcs_conf->size = vmx_msr_high & 0x1fff;
2302         vmcs_conf->order = get_order(vmcs_config.size);
2303         vmcs_conf->revision_id = vmx_msr_low;
2304
2305         vmcs_conf->pin_based_exec_ctrl = _pin_based_exec_control;
2306         vmcs_conf->cpu_based_exec_ctrl = _cpu_based_exec_control;
2307         vmcs_conf->cpu_based_2nd_exec_ctrl = _cpu_based_2nd_exec_control;
2308         vmcs_conf->vmexit_ctrl         = _vmexit_control;
2309         vmcs_conf->vmentry_ctrl        = _vmentry_control;
2310
2311         cpu_has_load_ia32_efer =
2312                 allow_1_setting(MSR_IA32_VMX_ENTRY_CTLS,
2313                                 VM_ENTRY_LOAD_IA32_EFER)
2314                 && allow_1_setting(MSR_IA32_VMX_EXIT_CTLS,
2315                                    VM_EXIT_LOAD_IA32_EFER);
2316
2317         return 0;
2318 }
2319
2320 static struct vmcs *alloc_vmcs_cpu(int cpu)
2321 {
2322         int node = cpu_to_node(cpu);
2323         struct page *pages;
2324         struct vmcs *vmcs;
2325
2326         pages = alloc_pages_exact_node(node, GFP_KERNEL, vmcs_config.order);
2327         if (!pages)
2328                 return NULL;
2329         vmcs = page_address(pages);
2330         memset(vmcs, 0, vmcs_config.size);
2331         vmcs->revision_id = vmcs_config.revision_id; /* vmcs revision id */
2332         return vmcs;
2333 }
2334
2335 static struct vmcs *alloc_vmcs(void)
2336 {
2337         return alloc_vmcs_cpu(raw_smp_processor_id());
2338 }
2339
2340 static void free_vmcs(struct vmcs *vmcs)
2341 {
2342         free_pages((unsigned long)vmcs, vmcs_config.order);
2343 }
2344
2345 /*
2346  * Free a VMCS, but before that VMCLEAR it on the CPU where it was last loaded
2347  */
2348 static void free_loaded_vmcs(struct loaded_vmcs *loaded_vmcs)
2349 {
2350         if (!loaded_vmcs->vmcs)
2351                 return;
2352         loaded_vmcs_clear(loaded_vmcs);
2353         free_vmcs(loaded_vmcs->vmcs);
2354         loaded_vmcs->vmcs = NULL;
2355 }
2356
2357 static void free_kvm_area(void)
2358 {
2359         int cpu;
2360
2361         for_each_possible_cpu(cpu) {
2362                 free_vmcs(per_cpu(vmxarea, cpu));
2363                 per_cpu(vmxarea, cpu) = NULL;
2364         }
2365 }
2366
2367 static __init int alloc_kvm_area(void)
2368 {
2369         int cpu;
2370
2371         for_each_possible_cpu(cpu) {
2372                 struct vmcs *vmcs;
2373
2374                 vmcs = alloc_vmcs_cpu(cpu);
2375                 if (!vmcs) {
2376                         free_kvm_area();
2377                         return -ENOMEM;
2378                 }
2379
2380                 per_cpu(vmxarea, cpu) = vmcs;
2381         }
2382         return 0;
2383 }
2384
2385 static __init int hardware_setup(void)
2386 {
2387         if (setup_vmcs_config(&vmcs_config) < 0)
2388                 return -EIO;
2389
2390         if (boot_cpu_has(X86_FEATURE_NX))
2391                 kvm_enable_efer_bits(EFER_NX);
2392
2393         if (!cpu_has_vmx_vpid())
2394                 enable_vpid = 0;
2395
2396         if (!cpu_has_vmx_ept() ||
2397             !cpu_has_vmx_ept_4levels()) {
2398                 enable_ept = 0;
2399                 enable_unrestricted_guest = 0;
2400         }
2401
2402         if (!cpu_has_vmx_unrestricted_guest())
2403                 enable_unrestricted_guest = 0;
2404
2405         if (!cpu_has_vmx_flexpriority())
2406                 flexpriority_enabled = 0;
2407
2408         if (!cpu_has_vmx_tpr_shadow())
2409                 kvm_x86_ops->update_cr8_intercept = NULL;
2410
2411         if (enable_ept && !cpu_has_vmx_ept_2m_page())
2412                 kvm_disable_largepages();
2413
2414         if (!cpu_has_vmx_ple())
2415                 ple_gap = 0;
2416
2417         if (nested)
2418                 nested_vmx_setup_ctls_msrs();
2419
2420         return alloc_kvm_area();
2421 }
2422
2423 static __exit void hardware_unsetup(void)
2424 {
2425         free_kvm_area();
2426 }
2427
2428 static void fix_pmode_dataseg(int seg, struct kvm_save_segment *save)
2429 {
2430         struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
2431
2432         if (vmcs_readl(sf->base) == save->base && (save->base & AR_S_MASK)) {
2433                 vmcs_write16(sf->selector, save->selector);
2434                 vmcs_writel(sf->base, save->base);
2435                 vmcs_write32(sf->limit, save->limit);
2436                 vmcs_write32(sf->ar_bytes, save->ar);
2437         } else {
2438                 u32 dpl = (vmcs_read16(sf->selector) & SELECTOR_RPL_MASK)
2439                         << AR_DPL_SHIFT;
2440                 vmcs_write32(sf->ar_bytes, 0x93 | dpl);
2441         }
2442 }
2443
2444 static void enter_pmode(struct kvm_vcpu *vcpu)
2445 {
2446         unsigned long flags;
2447         struct vcpu_vmx *vmx = to_vmx(vcpu);
2448
2449         vmx->emulation_required = 1;
2450         vmx->rmode.vm86_active = 0;
2451
2452         vmx_segment_cache_clear(vmx);
2453
2454         vmcs_write16(GUEST_TR_SELECTOR, vmx->rmode.tr.selector);
2455         vmcs_writel(GUEST_TR_BASE, vmx->rmode.tr.base);
2456         vmcs_write32(GUEST_TR_LIMIT, vmx->rmode.tr.limit);
2457         vmcs_write32(GUEST_TR_AR_BYTES, vmx->rmode.tr.ar);
2458
2459         flags = vmcs_readl(GUEST_RFLAGS);
2460         flags &= RMODE_GUEST_OWNED_EFLAGS_BITS;
2461         flags |= vmx->rmode.save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS;
2462         vmcs_writel(GUEST_RFLAGS, flags);
2463
2464         vmcs_writel(GUEST_CR4, (vmcs_readl(GUEST_CR4) & ~X86_CR4_VME) |
2465                         (vmcs_readl(CR4_READ_SHADOW) & X86_CR4_VME));
2466
2467         update_exception_bitmap(vcpu);
2468
2469         if (emulate_invalid_guest_state)
2470                 return;
2471
2472         fix_pmode_dataseg(VCPU_SREG_ES, &vmx->rmode.es);
2473         fix_pmode_dataseg(VCPU_SREG_DS, &vmx->rmode.ds);
2474         fix_pmode_dataseg(VCPU_SREG_GS, &vmx->rmode.gs);
2475         fix_pmode_dataseg(VCPU_SREG_FS, &vmx->rmode.fs);
2476
2477         vmx_segment_cache_clear(vmx);
2478
2479         vmcs_write16(GUEST_SS_SELECTOR, 0);
2480         vmcs_write32(GUEST_SS_AR_BYTES, 0x93);
2481
2482         vmcs_write16(GUEST_CS_SELECTOR,
2483                      vmcs_read16(GUEST_CS_SELECTOR) & ~SELECTOR_RPL_MASK);
2484         vmcs_write32(GUEST_CS_AR_BYTES, 0x9b);
2485 }
2486
2487 static gva_t rmode_tss_base(struct kvm *kvm)
2488 {
2489         if (!kvm->arch.tss_addr) {
2490                 struct kvm_memslots *slots;
2491                 gfn_t base_gfn;
2492
2493                 slots = kvm_memslots(kvm);
2494                 base_gfn = slots->memslots[0].base_gfn +
2495                                  kvm->memslots->memslots[0].npages - 3;
2496                 return base_gfn << PAGE_SHIFT;
2497         }
2498         return kvm->arch.tss_addr;
2499 }
2500
2501 static void fix_rmode_seg(int seg, struct kvm_save_segment *save)
2502 {
2503         struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
2504
2505         save->selector = vmcs_read16(sf->selector);
2506         save->base = vmcs_readl(sf->base);
2507         save->limit = vmcs_read32(sf->limit);
2508         save->ar = vmcs_read32(sf->ar_bytes);
2509         vmcs_write16(sf->selector, save->base >> 4);
2510         vmcs_write32(sf->base, save->base & 0xffff0);
2511         vmcs_write32(sf->limit, 0xffff);
2512         vmcs_write32(sf->ar_bytes, 0xf3);
2513         if (save->base & 0xf)
2514                 printk_once(KERN_WARNING "kvm: segment base is not paragraph"
2515                             " aligned when entering protected mode (seg=%d)",
2516                             seg);
2517 }
2518
2519 static void enter_rmode(struct kvm_vcpu *vcpu)
2520 {
2521         unsigned long flags;
2522         struct vcpu_vmx *vmx = to_vmx(vcpu);
2523
2524         if (enable_unrestricted_guest)
2525                 return;
2526
2527         vmx->emulation_required = 1;
2528         vmx->rmode.vm86_active = 1;
2529
2530         /*
2531          * Very old userspace does not call KVM_SET_TSS_ADDR before entering
2532          * vcpu. Call it here with phys address pointing 16M below 4G.
2533          */
2534         if (!vcpu->kvm->arch.tss_addr) {
2535                 printk_once(KERN_WARNING "kvm: KVM_SET_TSS_ADDR need to be "
2536                              "called before entering vcpu\n");
2537                 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
2538                 vmx_set_tss_addr(vcpu->kvm, 0xfeffd000);
2539                 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
2540         }
2541
2542         vmx_segment_cache_clear(vmx);
2543
2544         vmx->rmode.tr.selector = vmcs_read16(GUEST_TR_SELECTOR);
2545         vmx->rmode.tr.base = vmcs_readl(GUEST_TR_BASE);
2546         vmcs_writel(GUEST_TR_BASE, rmode_tss_base(vcpu->kvm));
2547
2548         vmx->rmode.tr.limit = vmcs_read32(GUEST_TR_LIMIT);
2549         vmcs_write32(GUEST_TR_LIMIT, RMODE_TSS_SIZE - 1);
2550
2551         vmx->rmode.tr.ar = vmcs_read32(GUEST_TR_AR_BYTES);
2552         vmcs_write32(GUEST_TR_AR_BYTES, 0x008b);
2553
2554         flags = vmcs_readl(GUEST_RFLAGS);
2555         vmx->rmode.save_rflags = flags;
2556
2557         flags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM;
2558
2559         vmcs_writel(GUEST_RFLAGS, flags);
2560         vmcs_writel(GUEST_CR4, vmcs_readl(GUEST_CR4) | X86_CR4_VME);
2561         update_exception_bitmap(vcpu);
2562
2563         if (emulate_invalid_guest_state)
2564                 goto continue_rmode;
2565
2566         vmcs_write16(GUEST_SS_SELECTOR, vmcs_readl(GUEST_SS_BASE) >> 4);
2567         vmcs_write32(GUEST_SS_LIMIT, 0xffff);
2568         vmcs_write32(GUEST_SS_AR_BYTES, 0xf3);
2569
2570         vmcs_write32(GUEST_CS_AR_BYTES, 0xf3);
2571         vmcs_write32(GUEST_CS_LIMIT, 0xffff);
2572         if (vmcs_readl(GUEST_CS_BASE) == 0xffff0000)
2573                 vmcs_writel(GUEST_CS_BASE, 0xf0000);
2574         vmcs_write16(GUEST_CS_SELECTOR, vmcs_readl(GUEST_CS_BASE) >> 4);
2575
2576         fix_rmode_seg(VCPU_SREG_ES, &vmx->rmode.es);
2577         fix_rmode_seg(VCPU_SREG_DS, &vmx->rmode.ds);
2578         fix_rmode_seg(VCPU_SREG_GS, &vmx->rmode.gs);
2579         fix_rmode_seg(VCPU_SREG_FS, &vmx->rmode.fs);
2580
2581 continue_rmode:
2582         kvm_mmu_reset_context(vcpu);
2583 }
2584
2585 static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
2586 {
2587         struct vcpu_vmx *vmx = to_vmx(vcpu);
2588         struct shared_msr_entry *msr = find_msr_entry(vmx, MSR_EFER);
2589
2590         if (!msr)
2591                 return;
2592
2593         /*
2594          * Force kernel_gs_base reloading before EFER changes, as control
2595          * of this msr depends on is_long_mode().
2596          */
2597         vmx_load_host_state(to_vmx(vcpu));
2598         vcpu->arch.efer = efer;
2599         if (efer & EFER_LMA) {
2600                 vmcs_write32(VM_ENTRY_CONTROLS,
2601                              vmcs_read32(VM_ENTRY_CONTROLS) |
2602                              VM_ENTRY_IA32E_MODE);
2603                 msr->data = efer;
2604         } else {
2605                 vmcs_write32(VM_ENTRY_CONTROLS,
2606                              vmcs_read32(VM_ENTRY_CONTROLS) &
2607                              ~VM_ENTRY_IA32E_MODE);
2608
2609                 msr->data = efer & ~EFER_LME;
2610         }
2611         setup_msrs(vmx);
2612 }
2613
2614 #ifdef CONFIG_X86_64
2615
2616 static void enter_lmode(struct kvm_vcpu *vcpu)
2617 {
2618         u32 guest_tr_ar;
2619
2620         vmx_segment_cache_clear(to_vmx(vcpu));
2621
2622         guest_tr_ar = vmcs_read32(GUEST_TR_AR_BYTES);
2623         if ((guest_tr_ar & AR_TYPE_MASK) != AR_TYPE_BUSY_64_TSS) {
2624                 printk(KERN_DEBUG "%s: tss fixup for long mode. \n",
2625                        __func__);
2626                 vmcs_write32(GUEST_TR_AR_BYTES,
2627                              (guest_tr_ar & ~AR_TYPE_MASK)
2628                              | AR_TYPE_BUSY_64_TSS);
2629         }
2630         vmx_set_efer(vcpu, vcpu->arch.efer | EFER_LMA);
2631 }
2632
2633 static void exit_lmode(struct kvm_vcpu *vcpu)
2634 {
2635         vmcs_write32(VM_ENTRY_CONTROLS,
2636                      vmcs_read32(VM_ENTRY_CONTROLS)
2637                      & ~VM_ENTRY_IA32E_MODE);
2638         vmx_set_efer(vcpu, vcpu->arch.efer & ~EFER_LMA);
2639 }
2640
2641 #endif
2642
2643 static void vmx_flush_tlb(struct kvm_vcpu *vcpu)
2644 {
2645         vpid_sync_context(to_vmx(vcpu));
2646         if (enable_ept) {
2647                 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
2648                         return;
2649                 ept_sync_context(construct_eptp(vcpu->arch.mmu.root_hpa));
2650         }
2651 }
2652
2653 static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu)
2654 {
2655         ulong cr0_guest_owned_bits = vcpu->arch.cr0_guest_owned_bits;
2656
2657         vcpu->arch.cr0 &= ~cr0_guest_owned_bits;
2658         vcpu->arch.cr0 |= vmcs_readl(GUEST_CR0) & cr0_guest_owned_bits;
2659 }
2660
2661 static void vmx_decache_cr3(struct kvm_vcpu *vcpu)
2662 {
2663         if (enable_ept && is_paging(vcpu))
2664                 vcpu->arch.cr3 = vmcs_readl(GUEST_CR3);
2665         __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
2666 }
2667
2668 static void vmx_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
2669 {
2670         ulong cr4_guest_owned_bits = vcpu->arch.cr4_guest_owned_bits;
2671
2672         vcpu->arch.cr4 &= ~cr4_guest_owned_bits;
2673         vcpu->arch.cr4 |= vmcs_readl(GUEST_CR4) & cr4_guest_owned_bits;
2674 }
2675
2676 static void ept_load_pdptrs(struct kvm_vcpu *vcpu)
2677 {
2678         if (!test_bit(VCPU_EXREG_PDPTR,
2679                       (unsigned long *)&vcpu->arch.regs_dirty))
2680                 return;
2681
2682         if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) {
2683                 vmcs_write64(GUEST_PDPTR0, vcpu->arch.mmu.pdptrs[0]);
2684                 vmcs_write64(GUEST_PDPTR1, vcpu->arch.mmu.pdptrs[1]);
2685                 vmcs_write64(GUEST_PDPTR2, vcpu->arch.mmu.pdptrs[2]);
2686                 vmcs_write64(GUEST_PDPTR3, vcpu->arch.mmu.pdptrs[3]);
2687         }
2688 }
2689
2690 static void ept_save_pdptrs(struct kvm_vcpu *vcpu)
2691 {
2692         if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) {
2693                 vcpu->arch.mmu.pdptrs[0] = vmcs_read64(GUEST_PDPTR0);
2694                 vcpu->arch.mmu.pdptrs[1] = vmcs_read64(GUEST_PDPTR1);
2695                 vcpu->arch.mmu.pdptrs[2] = vmcs_read64(GUEST_PDPTR2);
2696                 vcpu->arch.mmu.pdptrs[3] = vmcs_read64(GUEST_PDPTR3);
2697         }
2698
2699         __set_bit(VCPU_EXREG_PDPTR,
2700                   (unsigned long *)&vcpu->arch.regs_avail);
2701         __set_bit(VCPU_EXREG_PDPTR,
2702                   (unsigned long *)&vcpu->arch.regs_dirty);
2703 }
2704
2705 static int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
2706
2707 static void ept_update_paging_mode_cr0(unsigned long *hw_cr0,
2708                                         unsigned long cr0,
2709                                         struct kvm_vcpu *vcpu)
2710 {
2711         if (!test_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail))
2712                 vmx_decache_cr3(vcpu);
2713         if (!(cr0 & X86_CR0_PG)) {
2714                 /* From paging/starting to nonpaging */
2715                 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL,
2716                              vmcs_read32(CPU_BASED_VM_EXEC_CONTROL) |
2717                              (CPU_BASED_CR3_LOAD_EXITING |
2718                               CPU_BASED_CR3_STORE_EXITING));
2719                 vcpu->arch.cr0 = cr0;
2720                 vmx_set_cr4(vcpu, kvm_read_cr4(vcpu));
2721         } else if (!is_paging(vcpu)) {
2722                 /* From nonpaging to paging */
2723                 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL,
2724                              vmcs_read32(CPU_BASED_VM_EXEC_CONTROL) &
2725                              ~(CPU_BASED_CR3_LOAD_EXITING |
2726                                CPU_BASED_CR3_STORE_EXITING));
2727                 vcpu->arch.cr0 = cr0;
2728                 vmx_set_cr4(vcpu, kvm_read_cr4(vcpu));
2729         }
2730
2731         if (!(cr0 & X86_CR0_WP))
2732                 *hw_cr0 &= ~X86_CR0_WP;
2733 }
2734
2735 static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
2736 {
2737         struct vcpu_vmx *vmx = to_vmx(vcpu);
2738         unsigned long hw_cr0;
2739
2740         if (enable_unrestricted_guest)
2741                 hw_cr0 = (cr0 & ~KVM_GUEST_CR0_MASK_UNRESTRICTED_GUEST)
2742                         | KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST;
2743         else
2744                 hw_cr0 = (cr0 & ~KVM_GUEST_CR0_MASK) | KVM_VM_CR0_ALWAYS_ON;
2745
2746         if (vmx->rmode.vm86_active && (cr0 & X86_CR0_PE))
2747                 enter_pmode(vcpu);
2748
2749         if (!vmx->rmode.vm86_active && !(cr0 & X86_CR0_PE))
2750                 enter_rmode(vcpu);
2751
2752 #ifdef CONFIG_X86_64
2753         if (vcpu->arch.efer & EFER_LME) {
2754                 if (!is_paging(vcpu) && (cr0 & X86_CR0_PG))
2755                         enter_lmode(vcpu);
2756                 if (is_paging(vcpu) && !(cr0 & X86_CR0_PG))
2757                         exit_lmode(vcpu);
2758         }
2759 #endif
2760
2761         if (enable_ept)
2762                 ept_update_paging_mode_cr0(&hw_cr0, cr0, vcpu);
2763
2764         if (!vcpu->fpu_active)
2765                 hw_cr0 |= X86_CR0_TS | X86_CR0_MP;
2766
2767         vmcs_writel(CR0_READ_SHADOW, cr0);
2768         vmcs_writel(GUEST_CR0, hw_cr0);
2769         vcpu->arch.cr0 = cr0;
2770         __clear_bit(VCPU_EXREG_CPL, (ulong *)&vcpu->arch.regs_avail);
2771 }
2772
2773 static u64 construct_eptp(unsigned long root_hpa)
2774 {
2775         u64 eptp;
2776
2777         /* TODO write the value reading from MSR */
2778         eptp = VMX_EPT_DEFAULT_MT |
2779                 VMX_EPT_DEFAULT_GAW << VMX_EPT_GAW_EPTP_SHIFT;
2780         eptp |= (root_hpa & PAGE_MASK);
2781
2782         return eptp;
2783 }
2784
2785 static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
2786 {
2787         unsigned long guest_cr3;
2788         u64 eptp;
2789
2790         guest_cr3 = cr3;
2791         if (enable_ept) {
2792                 eptp = construct_eptp(cr3);
2793                 vmcs_write64(EPT_POINTER, eptp);
2794                 guest_cr3 = is_paging(vcpu) ? kvm_read_cr3(vcpu) :
2795                         vcpu->kvm->arch.ept_identity_map_addr;
2796                 ept_load_pdptrs(vcpu);
2797         }
2798
2799         vmx_flush_tlb(vcpu);
2800         vmcs_writel(GUEST_CR3, guest_cr3);
2801 }
2802
2803 static int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
2804 {
2805         unsigned long hw_cr4 = cr4 | (to_vmx(vcpu)->rmode.vm86_active ?
2806                     KVM_RMODE_VM_CR4_ALWAYS_ON : KVM_PMODE_VM_CR4_ALWAYS_ON);
2807
2808         if (cr4 & X86_CR4_VMXE) {
2809                 /*
2810                  * To use VMXON (and later other VMX instructions), a guest
2811                  * must first be able to turn on cr4.VMXE (see handle_vmon()).
2812                  * So basically the check on whether to allow nested VMX
2813                  * is here.
2814                  */
2815                 if (!nested_vmx_allowed(vcpu))
2816                         return 1;
2817         } else if (to_vmx(vcpu)->nested.vmxon)
2818                 return 1;
2819
2820         vcpu->arch.cr4 = cr4;
2821         if (enable_ept) {
2822                 if (!is_paging(vcpu)) {
2823                         hw_cr4 &= ~X86_CR4_PAE;
2824                         hw_cr4 |= X86_CR4_PSE;
2825                 } else if (!(cr4 & X86_CR4_PAE)) {
2826                         hw_cr4 &= ~X86_CR4_PAE;
2827                 }
2828         }
2829
2830         vmcs_writel(CR4_READ_SHADOW, cr4);
2831         vmcs_writel(GUEST_CR4, hw_cr4);
2832         return 0;
2833 }
2834
2835 static void vmx_get_segment(struct kvm_vcpu *vcpu,
2836                             struct kvm_segment *var, int seg)
2837 {
2838         struct vcpu_vmx *vmx = to_vmx(vcpu);
2839         struct kvm_save_segment *save;
2840         u32 ar;
2841
2842         if (vmx->rmode.vm86_active
2843             && (seg == VCPU_SREG_TR || seg == VCPU_SREG_ES
2844                 || seg == VCPU_SREG_DS || seg == VCPU_SREG_FS
2845                 || seg == VCPU_SREG_GS)
2846             && !emulate_invalid_guest_state) {
2847                 switch (seg) {
2848                 case VCPU_SREG_TR: save = &vmx->rmode.tr; break;
2849                 case VCPU_SREG_ES: save = &vmx->rmode.es; break;
2850                 case VCPU_SREG_DS: save = &vmx->rmode.ds; break;
2851                 case VCPU_SREG_FS: save = &vmx->rmode.fs; break;
2852                 case VCPU_SREG_GS: save = &vmx->rmode.gs; break;
2853                 default: BUG();
2854                 }
2855                 var->selector = save->selector;
2856                 var->base = save->base;
2857                 var->limit = save->limit;
2858                 ar = save->ar;
2859                 if (seg == VCPU_SREG_TR
2860                     || var->selector == vmx_read_guest_seg_selector(vmx, seg))
2861                         goto use_saved_rmode_seg;
2862         }
2863         var->base = vmx_read_guest_seg_base(vmx, seg);
2864         var->limit = vmx_read_guest_seg_limit(vmx, seg);
2865         var->selector = vmx_read_guest_seg_selector(vmx, seg);
2866         ar = vmx_read_guest_seg_ar(vmx, seg);
2867 use_saved_rmode_seg:
2868         if ((ar & AR_UNUSABLE_MASK) && !emulate_invalid_guest_state)
2869                 ar = 0;
2870         var->type = ar & 15;
2871         var->s = (ar >> 4) & 1;
2872         var->dpl = (ar >> 5) & 3;
2873         var->present = (ar >> 7) & 1;
2874         var->avl = (ar >> 12) & 1;
2875         var->l = (ar >> 13) & 1;
2876         var->db = (ar >> 14) & 1;
2877         var->g = (ar >> 15) & 1;
2878         var->unusable = (ar >> 16) & 1;
2879 }
2880
2881 static u64 vmx_get_segment_base(struct kvm_vcpu *vcpu, int seg)
2882 {
2883         struct kvm_segment s;
2884
2885         if (to_vmx(vcpu)->rmode.vm86_active) {
2886                 vmx_get_segment(vcpu, &s, seg);
2887                 return s.base;
2888         }
2889         return vmx_read_guest_seg_base(to_vmx(vcpu), seg);
2890 }
2891
2892 static int __vmx_get_cpl(struct kvm_vcpu *vcpu)
2893 {
2894         if (!is_protmode(vcpu))
2895                 return 0;
2896
2897         if (!is_long_mode(vcpu)
2898             && (kvm_get_rflags(vcpu) & X86_EFLAGS_VM)) /* if virtual 8086 */
2899                 return 3;
2900
2901         return vmx_read_guest_seg_selector(to_vmx(vcpu), VCPU_SREG_CS) & 3;
2902 }
2903
2904 static int vmx_get_cpl(struct kvm_vcpu *vcpu)
2905 {
2906         if (!test_bit(VCPU_EXREG_CPL, (ulong *)&vcpu->arch.regs_avail)) {
2907                 __set_bit(VCPU_EXREG_CPL, (ulong *)&vcpu->arch.regs_avail);
2908                 to_vmx(vcpu)->cpl = __vmx_get_cpl(vcpu);
2909         }
2910         return to_vmx(vcpu)->cpl;
2911 }
2912
2913
2914 static u32 vmx_segment_access_rights(struct kvm_segment *var)
2915 {
2916         u32 ar;
2917
2918         if (var->unusable)
2919                 ar = 1 << 16;
2920         else {
2921                 ar = var->type & 15;
2922                 ar |= (var->s & 1) << 4;
2923                 ar |= (var->dpl & 3) << 5;
2924                 ar |= (var->present & 1) << 7;
2925                 ar |= (var->avl & 1) << 12;
2926                 ar |= (var->l & 1) << 13;
2927                 ar |= (var->db & 1) << 14;
2928                 ar |= (var->g & 1) << 15;
2929         }
2930         if (ar == 0) /* a 0 value means unusable */
2931                 ar = AR_UNUSABLE_MASK;
2932
2933         return ar;
2934 }
2935
2936 static void vmx_set_segment(struct kvm_vcpu *vcpu,
2937                             struct kvm_segment *var, int seg)
2938 {
2939         struct vcpu_vmx *vmx = to_vmx(vcpu);
2940         struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
2941         u32 ar;
2942
2943         vmx_segment_cache_clear(vmx);
2944
2945         if (vmx->rmode.vm86_active && seg == VCPU_SREG_TR) {
2946                 vmcs_write16(sf->selector, var->selector);
2947                 vmx->rmode.tr.selector = var->selector;
2948                 vmx->rmode.tr.base = var->base;
2949                 vmx->rmode.tr.limit = var->limit;
2950                 vmx->rmode.tr.ar = vmx_segment_access_rights(var);
2951                 return;
2952         }
2953         vmcs_writel(sf->base, var->base);
2954         vmcs_write32(sf->limit, var->limit);
2955         vmcs_write16(sf->selector, var->selector);
2956         if (vmx->rmode.vm86_active && var->s) {
2957                 /*
2958                  * Hack real-mode segments into vm86 compatibility.
2959                  */
2960                 if (var->base == 0xffff0000 && var->selector == 0xf000)
2961                         vmcs_writel(sf->base, 0xf0000);
2962                 ar = 0xf3;
2963         } else
2964                 ar = vmx_segment_access_rights(var);
2965
2966         /*
2967          *   Fix the "Accessed" bit in AR field of segment registers for older
2968          * qemu binaries.
2969          *   IA32 arch specifies that at the time of processor reset the
2970          * "Accessed" bit in the AR field of segment registers is 1. And qemu
2971          * is setting it to 0 in the usedland code. This causes invalid guest
2972          * state vmexit when "unrestricted guest" mode is turned on.
2973          *    Fix for this setup issue in cpu_reset is being pushed in the qemu
2974          * tree. Newer qemu binaries with that qemu fix would not need this
2975          * kvm hack.
2976          */
2977         if (enable_unrestricted_guest && (seg != VCPU_SREG_LDTR))
2978                 ar |= 0x1; /* Accessed */
2979
2980         vmcs_write32(sf->ar_bytes, ar);
2981         __clear_bit(VCPU_EXREG_CPL, (ulong *)&vcpu->arch.regs_avail);
2982 }
2983
2984 static void vmx_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
2985 {
2986         u32 ar = vmx_read_guest_seg_ar(to_vmx(vcpu), VCPU_SREG_CS);
2987
2988         *db = (ar >> 14) & 1;
2989         *l = (ar >> 13) & 1;
2990 }
2991
2992 static void vmx_get_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
2993 {
2994         dt->size = vmcs_read32(GUEST_IDTR_LIMIT);
2995         dt->address = vmcs_readl(GUEST_IDTR_BASE);
2996 }
2997
2998 static void vmx_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
2999 {
3000         vmcs_write32(GUEST_IDTR_LIMIT, dt->size);
3001         vmcs_writel(GUEST_IDTR_BASE, dt->address);
3002 }
3003
3004 static void vmx_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
3005 {
3006         dt->size = vmcs_read32(GUEST_GDTR_LIMIT);
3007         dt->address = vmcs_readl(GUEST_GDTR_BASE);
3008 }
3009
3010 static void vmx_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
3011 {
3012         vmcs_write32(GUEST_GDTR_LIMIT, dt->size);
3013         vmcs_writel(GUEST_GDTR_BASE, dt->address);
3014 }
3015
3016 static bool rmode_segment_valid(struct kvm_vcpu *vcpu, int seg)
3017 {
3018         struct kvm_segment var;
3019         u32 ar;
3020
3021         vmx_get_segment(vcpu, &var, seg);
3022         ar = vmx_segment_access_rights(&var);
3023
3024         if (var.base != (var.selector << 4))
3025                 return false;
3026         if (var.limit != 0xffff)
3027                 return false;
3028         if (ar != 0xf3)
3029                 return false;
3030
3031         return true;
3032 }
3033
3034 static bool code_segment_valid(struct kvm_vcpu *vcpu)
3035 {
3036         struct kvm_segment cs;
3037         unsigned int cs_rpl;
3038
3039         vmx_get_segment(vcpu, &cs, VCPU_SREG_CS);
3040         cs_rpl = cs.selector & SELECTOR_RPL_MASK;
3041
3042         if (cs.unusable)
3043                 return false;
3044         if (~cs.type & (AR_TYPE_CODE_MASK|AR_TYPE_ACCESSES_MASK))
3045                 return false;
3046         if (!cs.s)
3047                 return false;
3048         if (cs.type & AR_TYPE_WRITEABLE_MASK) {
3049                 if (cs.dpl > cs_rpl)
3050                         return false;
3051         } else {
3052                 if (cs.dpl != cs_rpl)
3053                         return false;
3054         }
3055         if (!cs.present)
3056                 return false;
3057
3058         /* TODO: Add Reserved field check, this'll require a new member in the kvm_segment_field structure */
3059         return true;
3060 }
3061
3062 static bool stack_segment_valid(struct kvm_vcpu *vcpu)
3063 {
3064         struct kvm_segment ss;
3065         unsigned int ss_rpl;
3066
3067         vmx_get_segment(vcpu, &ss, VCPU_SREG_SS);
3068         ss_rpl = ss.selector & SELECTOR_RPL_MASK;
3069
3070         if (ss.unusable)
3071                 return true;
3072         if (ss.type != 3 && ss.type != 7)
3073                 return false;
3074         if (!ss.s)
3075                 return false;
3076         if (ss.dpl != ss_rpl) /* DPL != RPL */
3077                 return false;
3078         if (!ss.present)
3079                 return false;
3080
3081         return true;
3082 }
3083
3084 static bool data_segment_valid(struct kvm_vcpu *vcpu, int seg)
3085 {
3086         struct kvm_segment var;
3087         unsigned int rpl;
3088
3089         vmx_get_segment(vcpu, &var, seg);
3090         rpl = var.selector & SELECTOR_RPL_MASK;
3091
3092         if (var.unusable)
3093                 return true;
3094         if (!var.s)
3095                 return false;
3096         if (!var.present)
3097                 return false;
3098         if (~var.type & (AR_TYPE_CODE_MASK|AR_TYPE_WRITEABLE_MASK)) {
3099                 if (var.dpl < rpl) /* DPL < RPL */
3100                         return false;
3101         }
3102
3103         /* TODO: Add other members to kvm_segment_field to allow checking for other access
3104          * rights flags
3105          */
3106         return true;
3107 }
3108
3109 static bool tr_valid(struct kvm_vcpu *vcpu)
3110 {
3111         struct kvm_segment tr;
3112
3113         vmx_get_segment(vcpu, &tr, VCPU_SREG_TR);
3114
3115         if (tr.unusable)
3116                 return false;
3117         if (tr.selector & SELECTOR_TI_MASK)     /* TI = 1 */
3118                 return false;
3119         if (tr.type != 3 && tr.type != 11) /* TODO: Check if guest is in IA32e mode */
3120                 return false;
3121         if (!tr.present)
3122                 return false;
3123
3124         return true;
3125 }
3126
3127 static bool ldtr_valid(struct kvm_vcpu *vcpu)
3128 {
3129         struct kvm_segment ldtr;
3130
3131         vmx_get_segment(vcpu, &ldtr, VCPU_SREG_LDTR);
3132
3133         if (ldtr.unusable)
3134                 return true;
3135         if (ldtr.selector & SELECTOR_TI_MASK)   /* TI = 1 */
3136                 return false;
3137         if (ldtr.type != 2)
3138                 return false;
3139         if (!ldtr.present)
3140                 return false;
3141
3142         return true;
3143 }
3144
3145 static bool cs_ss_rpl_check(struct kvm_vcpu *vcpu)
3146 {
3147         struct kvm_segment cs, ss;
3148
3149         vmx_get_segment(vcpu, &cs, VCPU_SREG_CS);
3150         vmx_get_segment(vcpu, &ss, VCPU_SREG_SS);
3151
3152         return ((cs.selector & SELECTOR_RPL_MASK) ==
3153                  (ss.selector & SELECTOR_RPL_MASK));
3154 }
3155
3156 /*
3157  * Check if guest state is valid. Returns true if valid, false if
3158  * not.
3159  * We assume that registers are always usable
3160  */
3161 static bool guest_state_valid(struct kvm_vcpu *vcpu)
3162 {
3163         /* real mode guest state checks */
3164         if (!is_protmode(vcpu)) {
3165                 if (!rmode_segment_valid(vcpu, VCPU_SREG_CS))
3166                         return false;
3167                 if (!rmode_segment_valid(vcpu, VCPU_SREG_SS))
3168                         return false;
3169                 if (!rmode_segment_valid(vcpu, VCPU_SREG_DS))
3170                         return false;
3171                 if (!rmode_segment_valid(vcpu, VCPU_SREG_ES))
3172                         return false;
3173                 if (!rmode_segment_valid(vcpu, VCPU_SREG_FS))
3174                         return false;
3175                 if (!rmode_segment_valid(vcpu, VCPU_SREG_GS))
3176                         return false;
3177         } else {
3178         /* protected mode guest state checks */
3179                 if (!cs_ss_rpl_check(vcpu))
3180                         return false;
3181                 if (!code_segment_valid(vcpu))
3182                         return false;
3183                 if (!stack_segment_valid(vcpu))
3184                         return false;
3185                 if (!data_segment_valid(vcpu, VCPU_SREG_DS))
3186                         return false;
3187                 if (!data_segment_valid(vcpu, VCPU_SREG_ES))
3188                         return false;
3189                 if (!data_segment_valid(vcpu, VCPU_SREG_FS))
3190                         return false;
3191                 if (!data_segment_valid(vcpu, VCPU_SREG_GS))
3192                         return false;
3193                 if (!tr_valid(vcpu))
3194                         return false;
3195                 if (!ldtr_valid(vcpu))
3196                         return false;
3197         }
3198         /* TODO:
3199          * - Add checks on RIP
3200          * - Add checks on RFLAGS
3201          */
3202
3203         return true;
3204 }
3205
3206 static int init_rmode_tss(struct kvm *kvm)
3207 {
3208         gfn_t fn;
3209         u16 data = 0;
3210         int r, idx, ret = 0;
3211
3212         idx = srcu_read_lock(&kvm->srcu);
3213         fn = rmode_tss_base(kvm) >> PAGE_SHIFT;
3214         r = kvm_clear_guest_page(kvm, fn, 0, PAGE_SIZE);
3215         if (r < 0)
3216                 goto out;
3217         data = TSS_BASE_SIZE + TSS_REDIRECTION_SIZE;
3218         r = kvm_write_guest_page(kvm, fn++, &data,
3219                         TSS_IOPB_BASE_OFFSET, sizeof(u16));
3220         if (r < 0)
3221                 goto out;
3222         r = kvm_clear_guest_page(kvm, fn++, 0, PAGE_SIZE);
3223         if (r < 0)
3224                 goto out;
3225         r = kvm_clear_guest_page(kvm, fn, 0, PAGE_SIZE);
3226         if (r < 0)
3227                 goto out;
3228         data = ~0;
3229         r = kvm_write_guest_page(kvm, fn, &data,
3230                                  RMODE_TSS_SIZE - 2 * PAGE_SIZE - 1,
3231                                  sizeof(u8));
3232         if (r < 0)
3233                 goto out;
3234
3235         ret = 1;
3236 out:
3237         srcu_read_unlock(&kvm->srcu, idx);
3238         return ret;
3239 }
3240
3241 static int init_rmode_identity_map(struct kvm *kvm)
3242 {
3243         int i, idx, r, ret;
3244         pfn_t identity_map_pfn;
3245         u32 tmp;
3246
3247         if (!enable_ept)
3248                 return 1;
3249         if (unlikely(!kvm->arch.ept_identity_pagetable)) {
3250                 printk(KERN_ERR "EPT: identity-mapping pagetable "
3251                         "haven't been allocated!\n");
3252                 return 0;
3253         }
3254         if (likely(kvm->arch.ept_identity_pagetable_done))
3255                 return 1;
3256         ret = 0;
3257         identity_map_pfn = kvm->arch.ept_identity_map_addr >> PAGE_SHIFT;
3258         idx = srcu_read_lock(&kvm->srcu);
3259         r = kvm_clear_guest_page(kvm, identity_map_pfn, 0, PAGE_SIZE);
3260         if (r < 0)
3261                 goto out;
3262         /* Set up identity-mapping pagetable for EPT in real mode */
3263         for (i = 0; i < PT32_ENT_PER_PAGE; i++) {
3264                 tmp = (i << 22) + (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER |
3265                         _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_PSE);
3266                 r = kvm_write_guest_page(kvm, identity_map_pfn,
3267                                 &tmp, i * sizeof(tmp), sizeof(tmp));
3268                 if (r < 0)
3269                         goto out;
3270         }
3271         kvm->arch.ept_identity_pagetable_done = true;
3272         ret = 1;
3273 out:
3274         srcu_read_unlock(&kvm->srcu, idx);
3275         return ret;
3276 }
3277
3278 static void seg_setup(int seg)
3279 {
3280         struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
3281         unsigned int ar;
3282
3283         vmcs_write16(sf->selector, 0);
3284         vmcs_writel(sf->base, 0);
3285         vmcs_write32(sf->limit, 0xffff);
3286         if (enable_unrestricted_guest) {
3287                 ar = 0x93;
3288                 if (seg == VCPU_SREG_CS)
3289                         ar |= 0x08; /* code segment */
3290         } else
3291                 ar = 0xf3;
3292
3293         vmcs_write32(sf->ar_bytes, ar);
3294 }
3295
3296 static int alloc_apic_access_page(struct kvm *kvm)
3297 {
3298         struct kvm_userspace_memory_region kvm_userspace_mem;
3299         int r = 0;
3300
3301         mutex_lock(&kvm->slots_lock);
3302         if (kvm->arch.apic_access_page)
3303                 goto out;
3304         kvm_userspace_mem.slot = APIC_ACCESS_PAGE_PRIVATE_MEMSLOT;
3305         kvm_userspace_mem.flags = 0;
3306         kvm_userspace_mem.guest_phys_addr = 0xfee00000ULL;
3307         kvm_userspace_mem.memory_size = PAGE_SIZE;
3308         r = __kvm_set_memory_region(kvm, &kvm_userspace_mem, 0);
3309         if (r)
3310                 goto out;
3311
3312         kvm->arch.apic_access_page = gfn_to_page(kvm, 0xfee00);
3313 out:
3314         mutex_unlock(&kvm->slots_lock);
3315         return r;
3316 }
3317
3318 static int alloc_identity_pagetable(struct kvm *kvm)
3319 {
3320         struct kvm_userspace_memory_region kvm_userspace_mem;
3321         int r = 0;
3322
3323         mutex_lock(&kvm->slots_lock);
3324         if (kvm->arch.ept_identity_pagetable)
3325                 goto out;
3326         kvm_userspace_mem.slot = IDENTITY_PAGETABLE_PRIVATE_MEMSLOT;
3327         kvm_userspace_mem.flags = 0;
3328         kvm_userspace_mem.guest_phys_addr =
3329                 kvm->arch.ept_identity_map_addr;
3330         kvm_userspace_mem.memory_size = PAGE_SIZE;
3331         r = __kvm_set_memory_region(kvm, &kvm_userspace_mem, 0);
3332         if (r)
3333                 goto out;
3334
3335         kvm->arch.ept_identity_pagetable = gfn_to_page(kvm,
3336                         kvm->arch.ept_identity_map_addr >> PAGE_SHIFT);
3337 out:
3338         mutex_unlock(&kvm->slots_lock);
3339         return r;
3340 }
3341
3342 static void allocate_vpid(struct vcpu_vmx *vmx)
3343 {
3344         int vpid;
3345
3346         vmx->vpid = 0;
3347         if (!enable_vpid)
3348                 return;
3349         spin_lock(&vmx_vpid_lock);
3350         vpid = find_first_zero_bit(vmx_vpid_bitmap, VMX_NR_VPIDS);
3351         if (vpid < VMX_NR_VPIDS) {
3352                 vmx->vpid = vpid;
3353                 __set_bit(vpid, vmx_vpid_bitmap);
3354         }
3355         spin_unlock(&vmx_vpid_lock);
3356 }
3357
3358 static void free_vpid(struct vcpu_vmx *vmx)
3359 {
3360         if (!enable_vpid)
3361                 return;
3362         spin_lock(&vmx_vpid_lock);
3363         if (vmx->vpid != 0)
3364                 __clear_bit(vmx->vpid, vmx_vpid_bitmap);
3365         spin_unlock(&vmx_vpid_lock);
3366 }
3367
3368 static void __vmx_disable_intercept_for_msr(unsigned long *msr_bitmap, u32 msr)
3369 {
3370         int f = sizeof(unsigned long);
3371
3372         if (!cpu_has_vmx_msr_bitmap())
3373                 return;
3374
3375         /*
3376          * See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). Early manuals
3377          * have the write-low and read-high bitmap offsets the wrong way round.
3378          * We can control MSRs 0x00000000-0x00001fff and 0xc0000000-0xc0001fff.
3379          */
3380         if (msr <= 0x1fff) {
3381                 __clear_bit(msr, msr_bitmap + 0x000 / f); /* read-low */
3382                 __clear_bit(msr, msr_bitmap + 0x800 / f); /* write-low */
3383         } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) {
3384                 msr &= 0x1fff;
3385                 __clear_bit(msr, msr_bitmap + 0x400 / f); /* read-high */
3386                 __clear_bit(msr, msr_bitmap + 0xc00 / f); /* write-high */
3387         }
3388 }
3389
3390 static void vmx_disable_intercept_for_msr(u32 msr, bool longmode_only)
3391 {
3392         if (!longmode_only)
3393                 __vmx_disable_intercept_for_msr(vmx_msr_bitmap_legacy, msr);
3394         __vmx_disable_intercept_for_msr(vmx_msr_bitmap_longmode, msr);
3395 }
3396
3397 /*
3398  * Set up the vmcs's constant host-state fields, i.e., host-state fields that
3399  * will not change in the lifetime of the guest.
3400  * Note that host-state that does change is set elsewhere. E.g., host-state
3401  * that is set differently for each CPU is set in vmx_vcpu_load(), not here.
3402  */
3403 static void vmx_set_constant_host_state(void)
3404 {
3405         u32 low32, high32;
3406         unsigned long tmpl;
3407         struct desc_ptr dt;
3408
3409         vmcs_writel(HOST_CR0, read_cr0() | X86_CR0_TS);  /* 22.2.3 */
3410         vmcs_writel(HOST_CR4, read_cr4());  /* 22.2.3, 22.2.5 */
3411         vmcs_writel(HOST_CR3, read_cr3());  /* 22.2.3  FIXME: shadow tables */
3412
3413         vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS);  /* 22.2.4 */
3414         vmcs_write16(HOST_DS_SELECTOR, __KERNEL_DS);  /* 22.2.4 */
3415         vmcs_write16(HOST_ES_SELECTOR, __KERNEL_DS);  /* 22.2.4 */
3416         vmcs_write16(HOST_SS_SELECTOR, __KERNEL_DS);  /* 22.2.4 */
3417         vmcs_write16(HOST_TR_SELECTOR, GDT_ENTRY_TSS*8);  /* 22.2.4 */
3418
3419         native_store_idt(&dt);
3420         vmcs_writel(HOST_IDTR_BASE, dt.address);   /* 22.2.4 */
3421
3422         asm("mov $.Lkvm_vmx_return, %0" : "=r"(tmpl));
3423         vmcs_writel(HOST_RIP, tmpl); /* 22.2.5 */
3424
3425         rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
3426         vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
3427         rdmsrl(MSR_IA32_SYSENTER_EIP, tmpl);
3428         vmcs_writel(HOST_IA32_SYSENTER_EIP, tmpl);   /* 22.2.3 */
3429
3430         if (vmcs_config.vmexit_ctrl & VM_EXIT_LOAD_IA32_PAT) {
3431                 rdmsr(MSR_IA32_CR_PAT, low32, high32);
3432                 vmcs_write64(HOST_IA32_PAT, low32 | ((u64) high32 << 32));
3433         }
3434 }
3435
3436 static void set_cr4_guest_host_mask(struct vcpu_vmx *vmx)
3437 {
3438         vmx->vcpu.arch.cr4_guest_owned_bits = KVM_CR4_GUEST_OWNED_BITS;
3439         if (enable_ept)
3440                 vmx->vcpu.arch.cr4_guest_owned_bits |= X86_CR4_PGE;
3441         vmcs_writel(CR4_GUEST_HOST_MASK, ~vmx->vcpu.arch.cr4_guest_owned_bits);
3442 }
3443
3444 static u32 vmx_exec_control(struct vcpu_vmx *vmx)
3445 {
3446         u32 exec_control = vmcs_config.cpu_based_exec_ctrl;
3447         if (!vm_need_tpr_shadow(vmx->vcpu.kvm)) {
3448                 exec_control &= ~CPU_BASED_TPR_SHADOW;
3449 #ifdef CONFIG_X86_64
3450                 exec_control |= CPU_BASED_CR8_STORE_EXITING |
3451                                 CPU_BASED_CR8_LOAD_EXITING;
3452 #endif
3453         }
3454         if (!enable_ept)
3455                 exec_control |= CPU_BASED_CR3_STORE_EXITING |
3456                                 CPU_BASED_CR3_LOAD_EXITING  |
3457                                 CPU_BASED_INVLPG_EXITING;
3458         return exec_control;
3459 }
3460
3461 static u32 vmx_secondary_exec_control(struct vcpu_vmx *vmx)
3462 {
3463         u32 exec_control = vmcs_config.cpu_based_2nd_exec_ctrl;
3464         if (!vm_need_virtualize_apic_accesses(vmx->vcpu.kvm))
3465                 exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
3466         if (vmx->vpid == 0)
3467                 exec_control &= ~SECONDARY_EXEC_ENABLE_VPID;
3468         if (!enable_ept) {
3469                 exec_control &= ~SECONDARY_EXEC_ENABLE_EPT;
3470                 enable_unrestricted_guest = 0;
3471         }
3472         if (!enable_unrestricted_guest)
3473                 exec_control &= ~SECONDARY_EXEC_UNRESTRICTED_GUEST;
3474         if (!ple_gap)
3475                 exec_control &= ~SECONDARY_EXEC_PAUSE_LOOP_EXITING;
3476         return exec_control;
3477 }
3478
3479 /*
3480  * Sets up the vmcs for emulated real mode.
3481  */
3482 static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
3483 {
3484         unsigned long a;
3485         int i;
3486
3487         /* I/O */
3488         vmcs_write64(IO_BITMAP_A, __pa(vmx_io_bitmap_a));
3489         vmcs_write64(IO_BITMAP_B, __pa(vmx_io_bitmap_b));
3490
3491         if (cpu_has_vmx_msr_bitmap())
3492                 vmcs_write64(MSR_BITMAP, __pa(vmx_msr_bitmap_legacy));
3493
3494         vmcs_write64(VMCS_LINK_POINTER, -1ull); /* 22.3.1.5 */
3495
3496         /* Control */
3497         vmcs_write32(PIN_BASED_VM_EXEC_CONTROL,
3498                 vmcs_config.pin_based_exec_ctrl);
3499
3500         vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, vmx_exec_control(vmx));
3501
3502         if (cpu_has_secondary_exec_ctrls()) {
3503                 vmcs_write32(SECONDARY_VM_EXEC_CONTROL,
3504                                 vmx_secondary_exec_control(vmx));
3505         }
3506
3507         if (ple_gap) {
3508                 vmcs_write32(PLE_GAP, ple_gap);
3509                 vmcs_write32(PLE_WINDOW, ple_window);
3510         }
3511
3512         vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, !!bypass_guest_pf);
3513         vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, !!bypass_guest_pf);
3514         vmcs_write32(CR3_TARGET_COUNT, 0);           /* 22.2.1 */
3515
3516         vmcs_write16(HOST_FS_SELECTOR, 0);            /* 22.2.4 */
3517         vmcs_write16(HOST_GS_SELECTOR, 0);            /* 22.2.4 */
3518         vmx_set_constant_host_state();
3519 #ifdef CONFIG_X86_64
3520         rdmsrl(MSR_FS_BASE, a);
3521         vmcs_writel(HOST_FS_BASE, a); /* 22.2.4 */
3522         rdmsrl(MSR_GS_BASE, a);
3523         vmcs_writel(HOST_GS_BASE, a); /* 22.2.4 */
3524 #else
3525         vmcs_writel(HOST_FS_BASE, 0); /* 22.2.4 */
3526         vmcs_writel(HOST_GS_BASE, 0); /* 22.2.4 */
3527 #endif
3528
3529         vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
3530         vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
3531         vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host));
3532         vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0);
3533         vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest));
3534
3535         if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) {
3536                 u32 msr_low, msr_high;
3537                 u64 host_pat;
3538                 rdmsr(MSR_IA32_CR_PAT, msr_low, msr_high);
3539                 host_pat = msr_low | ((u64) msr_high << 32);
3540                 /* Write the default value follow host pat */
3541                 vmcs_write64(GUEST_IA32_PAT, host_pat);
3542                 /* Keep arch.pat sync with GUEST_IA32_PAT */
3543                 vmx->vcpu.arch.pat = host_pat;
3544         }
3545
3546         for (i = 0; i < NR_VMX_MSR; ++i) {
3547                 u32 index = vmx_msr_index[i];
3548                 u32 data_low, data_high;
3549                 int j = vmx->nmsrs;
3550
3551                 if (rdmsr_safe(index, &data_low, &data_high) < 0)
3552                         continue;
3553                 if (wrmsr_safe(index, data_low, data_high) < 0)
3554                         continue;
3555                 vmx->guest_msrs[j].index = i;
3556                 vmx->guest_msrs[j].data = 0;
3557                 vmx->guest_msrs[j].mask = -1ull;
3558                 ++vmx->nmsrs;
3559         }
3560
3561         vmcs_write32(VM_EXIT_CONTROLS, vmcs_config.vmexit_ctrl);
3562
3563         /* 22.2.1, 20.8.1 */
3564         vmcs_write32(VM_ENTRY_CONTROLS, vmcs_config.vmentry_ctrl);
3565
3566         vmcs_writel(CR0_GUEST_HOST_MASK, ~0UL);
3567         set_cr4_guest_host_mask(vmx);
3568
3569         kvm_write_tsc(&vmx->vcpu, 0);
3570
3571         return 0;
3572 }
3573
3574 static int vmx_vcpu_reset(struct kvm_vcpu *vcpu)
3575 {
3576         struct vcpu_vmx *vmx = to_vmx(vcpu);
3577         u64 msr;
3578         int ret;
3579
3580         vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP));
3581
3582         vmx->rmode.vm86_active = 0;
3583
3584         vmx->soft_vnmi_blocked = 0;
3585
3586         vmx->vcpu.arch.regs[VCPU_REGS_RDX] = get_rdx_init_val();
3587         kvm_set_cr8(&vmx->vcpu, 0);
3588         msr = 0xfee00000 | MSR_IA32_APICBASE_ENABLE;
3589         if (kvm_vcpu_is_bsp(&vmx->vcpu))
3590                 msr |= MSR_IA32_APICBASE_BSP;
3591         kvm_set_apic_base(&vmx->vcpu, msr);
3592
3593         ret = fx_init(&vmx->vcpu);
3594         if (ret != 0)
3595                 goto out;
3596
3597         vmx_segment_cache_clear(vmx);
3598
3599         seg_setup(VCPU_SREG_CS);
3600         /*
3601          * GUEST_CS_BASE should really be 0xffff0000, but VT vm86 mode
3602          * insists on having GUEST_CS_BASE == GUEST_CS_SELECTOR << 4.  Sigh.
3603          */
3604         if (kvm_vcpu_is_bsp(&vmx->vcpu)) {
3605                 vmcs_write16(GUEST_CS_SELECTOR, 0xf000);
3606                 vmcs_writel(GUEST_CS_BASE, 0x000f0000);
3607         } else {
3608                 vmcs_write16(GUEST_CS_SELECTOR, vmx->vcpu.arch.sipi_vector << 8);
3609                 vmcs_writel(GUEST_CS_BASE, vmx->vcpu.arch.sipi_vector << 12);
3610         }
3611
3612         seg_setup(VCPU_SREG_DS);
3613         seg_setup(VCPU_SREG_ES);
3614         seg_setup(VCPU_SREG_FS);
3615         seg_setup(VCPU_SREG_GS);
3616         seg_setup(VCPU_SREG_SS);
3617
3618         vmcs_write16(GUEST_TR_SELECTOR, 0);
3619         vmcs_writel(GUEST_TR_BASE, 0);
3620         vmcs_write32(GUEST_TR_LIMIT, 0xffff);
3621         vmcs_write32(GUEST_TR_AR_BYTES, 0x008b);
3622
3623         vmcs_write16(GUEST_LDTR_SELECTOR, 0);
3624         vmcs_writel(GUEST_LDTR_BASE, 0);
3625         vmcs_write32(GUEST_LDTR_LIMIT, 0xffff);
3626         vmcs_write32(GUEST_LDTR_AR_BYTES, 0x00082);
3627
3628         vmcs_write32(GUEST_SYSENTER_CS, 0);
3629         vmcs_writel(GUEST_SYSENTER_ESP, 0);
3630         vmcs_writel(GUEST_SYSENTER_EIP, 0);
3631
3632         vmcs_writel(GUEST_RFLAGS, 0x02);
3633         if (kvm_vcpu_is_bsp(&vmx->vcpu))
3634                 kvm_rip_write(vcpu, 0xfff0);
3635         else
3636                 kvm_rip_write(vcpu, 0);
3637         kvm_register_write(vcpu, VCPU_REGS_RSP, 0);
3638
3639         vmcs_writel(GUEST_DR7, 0x400);
3640
3641         vmcs_writel(GUEST_GDTR_BASE, 0);
3642         vmcs_write32(GUEST_GDTR_LIMIT, 0xffff);
3643
3644         vmcs_writel(GUEST_IDTR_BASE, 0);
3645         vmcs_write32(GUEST_IDTR_LIMIT, 0xffff);
3646
3647         vmcs_write32(GUEST_ACTIVITY_STATE, GUEST_ACTIVITY_ACTIVE);
3648         vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, 0);
3649         vmcs_write32(GUEST_PENDING_DBG_EXCEPTIONS, 0);
3650
3651         /* Special registers */
3652         vmcs_write64(GUEST_IA32_DEBUGCTL, 0);
3653
3654         setup_msrs(vmx);
3655
3656         vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0);  /* 22.2.1 */
3657
3658         if (cpu_has_vmx_tpr_shadow()) {
3659                 vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, 0);
3660                 if (vm_need_tpr_shadow(vmx->vcpu.kvm))
3661                         vmcs_write64(VIRTUAL_APIC_PAGE_ADDR,
3662                                      __pa(vmx->vcpu.arch.apic->regs));
3663                 vmcs_write32(TPR_THRESHOLD, 0);
3664         }
3665
3666         if (vm_need_virtualize_apic_accesses(vmx->vcpu.kvm))
3667                 vmcs_write64(APIC_ACCESS_ADDR,
3668                              page_to_phys(vmx->vcpu.kvm->arch.apic_access_page));
3669
3670         if (vmx->vpid != 0)
3671                 vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid);
3672
3673         vmx->vcpu.arch.cr0 = X86_CR0_NW | X86_CR0_CD | X86_CR0_ET;
3674         vmx_set_cr0(&vmx->vcpu, kvm_read_cr0(vcpu)); /* enter rmode */
3675         vmx_set_cr4(&vmx->vcpu, 0);
3676         vmx_set_efer(&vmx->vcpu, 0);
3677         vmx_fpu_activate(&vmx->vcpu);
3678         update_exception_bitmap(&vmx->vcpu);
3679
3680         vpid_sync_context(vmx);
3681
3682         ret = 0;
3683
3684         /* HACK: Don't enable emulation on guest boot/reset */
3685         vmx->emulation_required = 0;
3686
3687 out:
3688         return ret;
3689 }
3690
3691 static void enable_irq_window(struct kvm_vcpu *vcpu)
3692 {
3693         u32 cpu_based_vm_exec_control;
3694
3695         cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
3696         cpu_based_vm_exec_control |= CPU_BASED_VIRTUAL_INTR_PENDING;
3697         vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
3698 }
3699
3700 static void enable_nmi_window(struct kvm_vcpu *vcpu)
3701 {
3702         u32 cpu_based_vm_exec_control;
3703
3704         if (!cpu_has_virtual_nmis()) {
3705                 enable_irq_window(vcpu);
3706                 return;
3707         }
3708
3709         if (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_STI) {
3710                 enable_irq_window(vcpu);
3711                 return;
3712         }
3713         cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
3714         cpu_based_vm_exec_control |= CPU_BASED_VIRTUAL_NMI_PENDING;
3715         vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
3716 }
3717
3718 static void vmx_inject_irq(struct kvm_vcpu *vcpu)
3719 {
3720         struct vcpu_vmx *vmx = to_vmx(vcpu);
3721         uint32_t intr;
3722         int irq = vcpu->arch.interrupt.nr;
3723
3724         trace_kvm_inj_virq(irq);
3725
3726         ++vcpu->stat.irq_injections;
3727         if (vmx->rmode.vm86_active) {
3728                 int inc_eip = 0;
3729                 if (vcpu->arch.interrupt.soft)
3730                         inc_eip = vcpu->arch.event_exit_inst_len;
3731                 if (kvm_inject_realmode_interrupt(vcpu, irq, inc_eip) != EMULATE_DONE)
3732                         kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
3733                 return;
3734         }
3735         intr = irq | INTR_INFO_VALID_MASK;
3736         if (vcpu->arch.interrupt.soft) {
3737                 intr |= INTR_TYPE_SOFT_INTR;
3738                 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
3739                              vmx->vcpu.arch.event_exit_inst_len);
3740         } else
3741                 intr |= INTR_TYPE_EXT_INTR;
3742         vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr);
3743         vmx_clear_hlt(vcpu);
3744 }
3745
3746 static void vmx_inject_nmi(struct kvm_vcpu *vcpu)
3747 {
3748         struct vcpu_vmx *vmx = to_vmx(vcpu);
3749
3750         if (!cpu_has_virtual_nmis()) {
3751                 /*
3752                  * Tracking the NMI-blocked state in software is built upon
3753                  * finding the next open IRQ window. This, in turn, depends on
3754                  * well-behaving guests: They have to keep IRQs disabled at
3755                  * least as long as the NMI handler runs. Otherwise we may
3756                  * cause NMI nesting, maybe breaking the guest. But as this is
3757                  * highly unlikely, we can live with the residual risk.
3758                  */
3759                 vmx->soft_vnmi_blocked = 1;
3760                 vmx->vnmi_blocked_time = 0;
3761         }
3762
3763         ++vcpu->stat.nmi_injections;
3764         vmx->nmi_known_unmasked = false;
3765         if (vmx->rmode.vm86_active) {
3766                 if (kvm_inject_realmode_interrupt(vcpu, NMI_VECTOR, 0) != EMULATE_DONE)
3767                         kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
3768                 return;
3769         }
3770         vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
3771                         INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK | NMI_VECTOR);
3772         vmx_clear_hlt(vcpu);
3773 }
3774
3775 static int vmx_nmi_allowed(struct kvm_vcpu *vcpu)
3776 {
3777         if (!cpu_has_virtual_nmis() && to_vmx(vcpu)->soft_vnmi_blocked)
3778                 return 0;
3779
3780         return  !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) &
3781                   (GUEST_INTR_STATE_MOV_SS | GUEST_INTR_STATE_STI
3782                    | GUEST_INTR_STATE_NMI));
3783 }
3784
3785 static bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu)
3786 {
3787         if (!cpu_has_virtual_nmis())
3788                 return to_vmx(vcpu)->soft_vnmi_blocked;
3789         if (to_vmx(vcpu)->nmi_known_unmasked)
3790                 return false;
3791         return vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_NMI;
3792 }
3793
3794 static void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
3795 {
3796         struct vcpu_vmx *vmx = to_vmx(vcpu);
3797
3798         if (!cpu_has_virtual_nmis()) {
3799                 if (vmx->soft_vnmi_blocked != masked) {
3800                         vmx->soft_vnmi_blocked = masked;
3801                         vmx->vnmi_blocked_time = 0;
3802                 }
3803         } else {
3804                 vmx->nmi_known_unmasked = !masked;
3805                 if (masked)
3806                         vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
3807                                       GUEST_INTR_STATE_NMI);
3808                 else
3809                         vmcs_clear_bits(GUEST_INTERRUPTIBILITY_INFO,
3810                                         GUEST_INTR_STATE_NMI);
3811         }
3812 }
3813
3814 static int vmx_interrupt_allowed(struct kvm_vcpu *vcpu)
3815 {
3816         return (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) &&
3817                 !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) &
3818                         (GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS));
3819 }
3820
3821 static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr)
3822 {
3823         int ret;
3824         struct kvm_userspace_memory_region tss_mem = {
3825                 .slot = TSS_PRIVATE_MEMSLOT,
3826                 .guest_phys_addr = addr,
3827                 .memory_size = PAGE_SIZE * 3,
3828                 .flags = 0,
3829         };
3830
3831         ret = kvm_set_memory_region(kvm, &tss_mem, 0);
3832         if (ret)
3833                 return ret;
3834         kvm->arch.tss_addr = addr;
3835         if (!init_rmode_tss(kvm))
3836                 return  -ENOMEM;
3837
3838         return 0;
3839 }
3840
3841 static int handle_rmode_exception(struct kvm_vcpu *vcpu,
3842                                   int vec, u32 err_code)
3843 {
3844         /*
3845          * Instruction with address size override prefix opcode 0x67
3846          * Cause the #SS fault with 0 error code in VM86 mode.
3847          */
3848         if (((vec == GP_VECTOR) || (vec == SS_VECTOR)) && err_code == 0)
3849                 if (emulate_instruction(vcpu, 0) == EMULATE_DONE)
3850                         return 1;
3851         /*
3852          * Forward all other exceptions that are valid in real mode.
3853          * FIXME: Breaks guest debugging in real mode, needs to be fixed with
3854          *        the required debugging infrastructure rework.
3855          */
3856         switch (vec) {
3857         case DB_VECTOR:
3858                 if (vcpu->guest_debug &
3859                     (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))
3860                         return 0;
3861                 kvm_queue_exception(vcpu, vec);
3862                 return 1;
3863         case BP_VECTOR:
3864                 /*
3865                  * Update instruction length as we may reinject the exception
3866                  * from user space while in guest debugging mode.
3867                  */
3868                 to_vmx(vcpu)->vcpu.arch.event_exit_inst_len =
3869                         vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
3870                 if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
3871                         return 0;
3872                 /* fall through */
3873         case DE_VECTOR:
3874         case OF_VECTOR:
3875         case BR_VECTOR:
3876         case UD_VECTOR:
3877         case DF_VECTOR:
3878         case SS_VECTOR:
3879         case GP_VECTOR:
3880         case MF_VECTOR:
3881                 kvm_queue_exception(vcpu, vec);
3882                 return 1;
3883         }
3884         return 0;
3885 }
3886
3887 /*
3888  * Trigger machine check on the host. We assume all the MSRs are already set up
3889  * by the CPU and that we still run on the same CPU as the MCE occurred on.
3890  * We pass a fake environment to the machine check handler because we want
3891  * the guest to be always treated like user space, no matter what context
3892  * it used internally.
3893  */
3894 static void kvm_machine_check(void)
3895 {
3896 #if defined(CONFIG_X86_MCE) && defined(CONFIG_X86_64)
3897         struct pt_regs regs = {
3898                 .cs = 3, /* Fake ring 3 no matter what the guest ran on */
3899                 .flags = X86_EFLAGS_IF,
3900         };
3901
3902         do_machine_check(&regs, 0);
3903 #endif
3904 }
3905
3906 static int handle_machine_check(struct kvm_vcpu *vcpu)
3907 {
3908         /* already handled by vcpu_run */
3909         return 1;
3910 }
3911
3912 static int handle_exception(struct kvm_vcpu *vcpu)
3913 {
3914         struct vcpu_vmx *vmx = to_vmx(vcpu);
3915         struct kvm_run *kvm_run = vcpu->run;
3916         u32 intr_info, ex_no, error_code;
3917         unsigned long cr2, rip, dr6;
3918         u32 vect_info;
3919         enum emulation_result er;
3920
3921         vect_info = vmx->idt_vectoring_info;
3922         intr_info = vmx->exit_intr_info;
3923
3924         if (is_machine_check(intr_info))
3925                 return handle_machine_check(vcpu);
3926
3927         if ((vect_info & VECTORING_INFO_VALID_MASK) &&
3928             !is_page_fault(intr_info)) {
3929                 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
3930                 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_SIMUL_EX;
3931                 vcpu->run->internal.ndata = 2;
3932                 vcpu->run->internal.data[0] = vect_info;
3933                 vcpu->run->internal.data[1] = intr_info;
3934                 return 0;
3935         }
3936
3937         if ((intr_info & INTR_INFO_INTR_TYPE_MASK) == INTR_TYPE_NMI_INTR)
3938                 return 1;  /* already handled by vmx_vcpu_run() */
3939
3940         if (is_no_device(intr_info)) {
3941                 vmx_fpu_activate(vcpu);
3942                 return 1;
3943         }
3944
3945         if (is_invalid_opcode(intr_info)) {
3946                 er = emulate_instruction(vcpu, EMULTYPE_TRAP_UD);
3947                 if (er != EMULATE_DONE)
3948                         kvm_queue_exception(vcpu, UD_VECTOR);
3949                 return 1;
3950         }
3951
3952         error_code = 0;
3953         if (intr_info & INTR_INFO_DELIVER_CODE_MASK)
3954                 error_code = vmcs_read32(VM_EXIT_INTR_ERROR_CODE);
3955         if (is_page_fault(intr_info)) {
3956                 /* EPT won't cause page fault directly */
3957                 if (enable_ept)
3958                         BUG();
3959                 cr2 = vmcs_readl(EXIT_QUALIFICATION);
3960                 trace_kvm_page_fault(cr2, error_code);
3961
3962                 if (kvm_event_needs_reinjection(vcpu))
3963                         kvm_mmu_unprotect_page_virt(vcpu, cr2);
3964                 return kvm_mmu_page_fault(vcpu, cr2, error_code, NULL, 0);
3965         }
3966
3967         if (vmx->rmode.vm86_active &&
3968             handle_rmode_exception(vcpu, intr_info & INTR_INFO_VECTOR_MASK,
3969                                                                 error_code)) {
3970                 if (vcpu->arch.halt_request) {
3971                         vcpu->arch.halt_request = 0;
3972                         return kvm_emulate_halt(vcpu);
3973                 }
3974                 return 1;
3975         }
3976
3977         ex_no = intr_info & INTR_INFO_VECTOR_MASK;
3978         switch (ex_no) {
3979         case DB_VECTOR:
3980                 dr6 = vmcs_readl(EXIT_QUALIFICATION);
3981                 if (!(vcpu->guest_debug &
3982                       (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))) {
3983                         vcpu->arch.dr6 = dr6 | DR6_FIXED_1;
3984                         kvm_queue_exception(vcpu, DB_VECTOR);
3985                         return 1;
3986                 }
3987                 kvm_run->debug.arch.dr6 = dr6 | DR6_FIXED_1;
3988                 kvm_run->debug.arch.dr7 = vmcs_readl(GUEST_DR7);
3989                 /* fall through */
3990         case BP_VECTOR:
3991                 /*
3992                  * Update instruction length as we may reinject #BP from
3993                  * user space while in guest debugging mode. Reading it for
3994                  * #DB as well causes no harm, it is not used in that case.
3995                  */
3996                 vmx->vcpu.arch.event_exit_inst_len =
3997                         vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
3998                 kvm_run->exit_reason = KVM_EXIT_DEBUG;
3999                 rip = kvm_rip_read(vcpu);
4000                 kvm_run->debug.arch.pc = vmcs_readl(GUEST_CS_BASE) + rip;
4001                 kvm_run->debug.arch.exception = ex_no;
4002                 break;
4003         default:
4004                 kvm_run->exit_reason = KVM_EXIT_EXCEPTION;
4005                 kvm_run->ex.exception = ex_no;
4006                 kvm_run->ex.error_code = error_code;
4007                 break;
4008         }
4009         return 0;
4010 }
4011
4012 static int handle_external_interrupt(struct kvm_vcpu *vcpu)
4013 {
4014         ++vcpu->stat.irq_exits;
4015         return 1;
4016 }
4017
4018 static int handle_triple_fault(struct kvm_vcpu *vcpu)
4019 {
4020         vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN;
4021         return 0;
4022 }
4023
4024 static int handle_io(struct kvm_vcpu *vcpu)
4025 {
4026         unsigned long exit_qualification;
4027         int size, in, string;
4028         unsigned port;
4029
4030         exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
4031         string = (exit_qualification & 16) != 0;
4032         in = (exit_qualification & 8) != 0;
4033
4034         ++vcpu->stat.io_exits;
4035
4036         if (string || in)
4037                 return emulate_instruction(vcpu, 0) == EMULATE_DONE;
4038
4039         port = exit_qualification >> 16;
4040         size = (exit_qualification & 7) + 1;
4041         skip_emulated_instruction(vcpu);
4042
4043         return kvm_fast_pio_out(vcpu, size, port);
4044 }
4045
4046 static void
4047 vmx_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall)
4048 {
4049         /*
4050          * Patch in the VMCALL instruction:
4051          */
4052         hypercall[0] = 0x0f;
4053         hypercall[1] = 0x01;
4054         hypercall[2] = 0xc1;
4055 }
4056
4057 static int handle_cr(struct kvm_vcpu *vcpu)
4058 {
4059         unsigned long exit_qualification, val;
4060         int cr;
4061         int reg;
4062         int err;
4063
4064         exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
4065         cr = exit_qualification & 15;
4066         reg = (exit_qualification >> 8) & 15;
4067         switch ((exit_qualification >> 4) & 3) {
4068         case 0: /* mov to cr */
4069                 val = kvm_register_read(vcpu, reg);
4070                 trace_kvm_cr_write(cr, val);
4071                 switch (cr) {
4072                 case 0:
4073                         err = kvm_set_cr0(vcpu, val);
4074                         kvm_complete_insn_gp(vcpu, err);
4075                         return 1;
4076                 case 3:
4077                         err = kvm_set_cr3(vcpu, val);
4078                         kvm_complete_insn_gp(vcpu, err);
4079                         return 1;
4080                 case 4:
4081                         err = kvm_set_cr4(vcpu, val);
4082                         kvm_complete_insn_gp(vcpu, err);
4083                         return 1;
4084                 case 8: {
4085                                 u8 cr8_prev = kvm_get_cr8(vcpu);
4086                                 u8 cr8 = kvm_register_read(vcpu, reg);
4087                                 err = kvm_set_cr8(vcpu, cr8);
4088                                 kvm_complete_insn_gp(vcpu, err);
4089                                 if (irqchip_in_kernel(vcpu->kvm))
4090                                         return 1;
4091                                 if (cr8_prev <= cr8)
4092                                         return 1;
4093                                 vcpu->run->exit_reason = KVM_EXIT_SET_TPR;
4094                                 return 0;
4095                         }
4096                 };
4097                 break;
4098         case 2: /* clts */
4099                 vmx_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~X86_CR0_TS));
4100                 trace_kvm_cr_write(0, kvm_read_cr0(vcpu));
4101                 skip_emulated_instruction(vcpu);
4102                 vmx_fpu_activate(vcpu);
4103                 return 1;
4104         case 1: /*mov from cr*/
4105                 switch (cr) {
4106                 case 3:
4107                         val = kvm_read_cr3(vcpu);
4108                         kvm_register_write(vcpu, reg, val);
4109                         trace_kvm_cr_read(cr, val);
4110                         skip_emulated_instruction(vcpu);
4111                         return 1;
4112                 case 8:
4113                         val = kvm_get_cr8(vcpu);
4114                         kvm_register_write(vcpu, reg, val);
4115                         trace_kvm_cr_read(cr, val);
4116                         skip_emulated_instruction(vcpu);
4117                         return 1;
4118                 }
4119                 break;
4120         case 3: /* lmsw */
4121                 val = (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f;
4122                 trace_kvm_cr_write(0, (kvm_read_cr0(vcpu) & ~0xful) | val);
4123                 kvm_lmsw(vcpu, val);
4124
4125                 skip_emulated_instruction(vcpu);
4126                 return 1;
4127         default:
4128                 break;
4129         }
4130         vcpu->run->exit_reason = 0;
4131         pr_unimpl(vcpu, "unhandled control register: op %d cr %d\n",
4132                (int)(exit_qualification >> 4) & 3, cr);
4133         return 0;
4134 }
4135
4136 static int handle_dr(struct kvm_vcpu *vcpu)
4137 {
4138         unsigned long exit_qualification;
4139         int dr, reg;
4140
4141         /* Do not handle if the CPL > 0, will trigger GP on re-entry */
4142         if (!kvm_require_cpl(vcpu, 0))
4143                 return 1;
4144         dr = vmcs_readl(GUEST_DR7);
4145         if (dr & DR7_GD) {
4146                 /*
4147                  * As the vm-exit takes precedence over the debug trap, we
4148                  * need to emulate the latter, either for the host or the
4149                  * guest debugging itself.
4150                  */
4151                 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) {
4152                         vcpu->run->debug.arch.dr6 = vcpu->arch.dr6;
4153                         vcpu->run->debug.arch.dr7 = dr;
4154                         vcpu->run->debug.arch.pc =
4155                                 vmcs_readl(GUEST_CS_BASE) +
4156                                 vmcs_readl(GUEST_RIP);
4157                         vcpu->run->debug.arch.exception = DB_VECTOR;
4158                         vcpu->run->exit_reason = KVM_EXIT_DEBUG;
4159                         return 0;
4160                 } else {
4161                         vcpu->arch.dr7 &= ~DR7_GD;
4162                         vcpu->arch.dr6 |= DR6_BD;
4163                         vmcs_writel(GUEST_DR7, vcpu->arch.dr7);
4164                         kvm_queue_exception(vcpu, DB_VECTOR);
4165                         return 1;
4166                 }
4167         }
4168
4169         exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
4170         dr = exit_qualification & DEBUG_REG_ACCESS_NUM;
4171         reg = DEBUG_REG_ACCESS_REG(exit_qualification);
4172         if (exit_qualification & TYPE_MOV_FROM_DR) {
4173                 unsigned long val;
4174                 if (!kvm_get_dr(vcpu, dr, &val))
4175                         kvm_register_write(vcpu, reg, val);
4176         } else
4177                 kvm_set_dr(vcpu, dr, vcpu->arch.regs[reg]);
4178         skip_emulated_instruction(vcpu);
4179         return 1;
4180 }
4181
4182 static void vmx_set_dr7(struct kvm_vcpu *vcpu, unsigned long val)
4183 {
4184         vmcs_writel(GUEST_DR7, val);
4185 }
4186
4187 static int handle_cpuid(struct kvm_vcpu *vcpu)
4188 {
4189         kvm_emulate_cpuid(vcpu);
4190         return 1;
4191 }
4192
4193 static int handle_rdmsr(struct kvm_vcpu *vcpu)
4194 {
4195         u32 ecx = vcpu->arch.regs[VCPU_REGS_RCX];
4196         u64 data;
4197
4198         if (vmx_get_msr(vcpu, ecx, &data)) {
4199                 trace_kvm_msr_read_ex(ecx);
4200                 kvm_inject_gp(vcpu, 0);
4201                 return 1;
4202         }
4203
4204         trace_kvm_msr_read(ecx, data);
4205
4206         /* FIXME: handling of bits 32:63 of rax, rdx */
4207         vcpu->arch.regs[VCPU_REGS_RAX] = data & -1u;
4208         vcpu->arch.regs[VCPU_REGS_RDX] = (data >> 32) & -1u;
4209         skip_emulated_instruction(vcpu);
4210         return 1;
4211 }
4212
4213 static int handle_wrmsr(struct kvm_vcpu *vcpu)
4214 {
4215         u32 ecx = vcpu->arch.regs[VCPU_REGS_RCX];
4216         u64 data = (vcpu->arch.regs[VCPU_REGS_RAX] & -1u)
4217                 | ((u64)(vcpu->arch.regs[VCPU_REGS_RDX] & -1u) << 32);
4218
4219         if (vmx_set_msr(vcpu, ecx, data) != 0) {
4220                 trace_kvm_msr_write_ex(ecx, data);
4221                 kvm_inject_gp(vcpu, 0);
4222                 return 1;
4223         }
4224
4225         trace_kvm_msr_write(ecx, data);
4226         skip_emulated_instruction(vcpu);
4227         return 1;
4228 }
4229
4230 static int handle_tpr_below_threshold(struct kvm_vcpu *vcpu)
4231 {
4232         kvm_make_request(KVM_REQ_EVENT, vcpu);
4233         return 1;
4234 }
4235
4236 static int handle_interrupt_window(struct kvm_vcpu *vcpu)
4237 {
4238         u32 cpu_based_vm_exec_control;
4239
4240         /* clear pending irq */
4241         cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
4242         cpu_based_vm_exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING;
4243         vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
4244
4245         kvm_make_request(KVM_REQ_EVENT, vcpu);
4246
4247         ++vcpu->stat.irq_window_exits;
4248
4249         /*
4250          * If the user space waits to inject interrupts, exit as soon as
4251          * possible
4252          */
4253         if (!irqchip_in_kernel(vcpu->kvm) &&
4254             vcpu->run->request_interrupt_window &&
4255             !kvm_cpu_has_interrupt(vcpu)) {
4256                 vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
4257                 return 0;
4258         }
4259         return 1;
4260 }
4261
4262 static int handle_halt(struct kvm_vcpu *vcpu)
4263 {
4264         skip_emulated_instruction(vcpu);
4265         return kvm_emulate_halt(vcpu);
4266 }
4267
4268 static int handle_vmcall(struct kvm_vcpu *vcpu)
4269 {
4270         skip_emulated_instruction(vcpu);
4271         kvm_emulate_hypercall(vcpu);
4272         return 1;
4273 }
4274
4275 static int handle_vmx_insn(struct kvm_vcpu *vcpu)
4276 {
4277         kvm_queue_exception(vcpu, UD_VECTOR);
4278         return 1;
4279 }
4280
4281 static int handle_invd(struct kvm_vcpu *vcpu)
4282 {
4283         return emulate_instruction(vcpu, 0) == EMULATE_DONE;
4284 }
4285
4286 static int handle_invlpg(struct kvm_vcpu *vcpu)
4287 {
4288         unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
4289
4290         kvm_mmu_invlpg(vcpu, exit_qualification);
4291         skip_emulated_instruction(vcpu);
4292         return 1;
4293 }
4294
4295 static int handle_wbinvd(struct kvm_vcpu *vcpu)
4296 {
4297         skip_emulated_instruction(vcpu);
4298         kvm_emulate_wbinvd(vcpu);
4299         return 1;
4300 }
4301
4302 static int handle_xsetbv(struct kvm_vcpu *vcpu)
4303 {
4304         u64 new_bv = kvm_read_edx_eax(vcpu);
4305         u32 index = kvm_register_read(vcpu, VCPU_REGS_RCX);
4306
4307         if (kvm_set_xcr(vcpu, index, new_bv) == 0)
4308                 skip_emulated_instruction(vcpu);
4309         return 1;
4310 }
4311
4312 static int handle_apic_access(struct kvm_vcpu *vcpu)
4313 {
4314         return emulate_instruction(vcpu, 0) == EMULATE_DONE;
4315 }
4316
4317 static int handle_task_switch(struct kvm_vcpu *vcpu)
4318 {
4319         struct vcpu_vmx *vmx = to_vmx(vcpu);
4320         unsigned long exit_qualification;
4321         bool has_error_code = false;
4322         u32 error_code = 0;
4323         u16 tss_selector;
4324         int reason, type, idt_v;
4325
4326         idt_v = (vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK);
4327         type = (vmx->idt_vectoring_info & VECTORING_INFO_TYPE_MASK);
4328
4329         exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
4330
4331         reason = (u32)exit_qualification >> 30;
4332         if (reason == TASK_SWITCH_GATE && idt_v) {
4333                 switch (type) {
4334                 case INTR_TYPE_NMI_INTR:
4335                         vcpu->arch.nmi_injected = false;
4336                         vmx_set_nmi_mask(vcpu, true);
4337                         break;
4338                 case INTR_TYPE_EXT_INTR:
4339                 case INTR_TYPE_SOFT_INTR:
4340                         kvm_clear_interrupt_queue(vcpu);
4341                         break;
4342                 case INTR_TYPE_HARD_EXCEPTION:
4343                         if (vmx->idt_vectoring_info &
4344                             VECTORING_INFO_DELIVER_CODE_MASK) {
4345                                 has_error_code = true;
4346                                 error_code =
4347                                         vmcs_read32(IDT_VECTORING_ERROR_CODE);
4348                         }
4349                         /* fall through */
4350                 case INTR_TYPE_SOFT_EXCEPTION:
4351                         kvm_clear_exception_queue(vcpu);
4352                         break;
4353                 default:
4354                         break;
4355                 }
4356         }
4357         tss_selector = exit_qualification;
4358
4359         if (!idt_v || (type != INTR_TYPE_HARD_EXCEPTION &&
4360                        type != INTR_TYPE_EXT_INTR &&
4361                        type != INTR_TYPE_NMI_INTR))
4362                 skip_emulated_instruction(vcpu);
4363
4364         if (kvm_task_switch(vcpu, tss_selector, reason,
4365                                 has_error_code, error_code) == EMULATE_FAIL) {
4366                 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
4367                 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
4368                 vcpu->run->internal.ndata = 0;
4369                 return 0;
4370         }
4371
4372         /* clear all local breakpoint enable flags */
4373         vmcs_writel(GUEST_DR7, vmcs_readl(GUEST_DR7) & ~55);
4374
4375         /*
4376          * TODO: What about debug traps on tss switch?
4377          *       Are we supposed to inject them and update dr6?
4378          */
4379
4380         return 1;
4381 }
4382
4383 static int handle_ept_violation(struct kvm_vcpu *vcpu)
4384 {
4385         unsigned long exit_qualification;
4386         gpa_t gpa;
4387         int gla_validity;
4388
4389         exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
4390
4391         if (exit_qualification & (1 << 6)) {
4392                 printk(KERN_ERR "EPT: GPA exceeds GAW!\n");
4393                 return -EINVAL;
4394         }
4395
4396         gla_validity = (exit_qualification >> 7) & 0x3;
4397         if (gla_validity != 0x3 && gla_validity != 0x1 && gla_validity != 0) {
4398                 printk(KERN_ERR "EPT: Handling EPT violation failed!\n");
4399                 printk(KERN_ERR "EPT: GPA: 0x%lx, GVA: 0x%lx\n",
4400                         (long unsigned int)vmcs_read64(GUEST_PHYSICAL_ADDRESS),
4401                         vmcs_readl(GUEST_LINEAR_ADDRESS));
4402                 printk(KERN_ERR "EPT: Exit qualification is 0x%lx\n",
4403                         (long unsigned int)exit_qualification);
4404                 vcpu->run->exit_reason = KVM_EXIT_UNKNOWN;
4405                 vcpu->run->hw.hardware_exit_reason = EXIT_REASON_EPT_VIOLATION;
4406                 return 0;
4407         }
4408
4409         gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS);
4410         trace_kvm_page_fault(gpa, exit_qualification);
4411         return kvm_mmu_page_fault(vcpu, gpa, exit_qualification & 0x3, NULL, 0);
4412 }
4413
4414 static u64 ept_rsvd_mask(u64 spte, int level)
4415 {
4416         int i;
4417         u64 mask = 0;
4418
4419         for (i = 51; i > boot_cpu_data.x86_phys_bits; i--)
4420                 mask |= (1ULL << i);
4421
4422         if (level > 2)
4423                 /* bits 7:3 reserved */
4424                 mask |= 0xf8;
4425         else if (level == 2) {
4426                 if (spte & (1ULL << 7))
4427                         /* 2MB ref, bits 20:12 reserved */
4428                         mask |= 0x1ff000;
4429                 else
4430                         /* bits 6:3 reserved */
4431                         mask |= 0x78;
4432         }
4433
4434         return mask;
4435 }
4436
4437 static void ept_misconfig_inspect_spte(struct kvm_vcpu *vcpu, u64 spte,
4438                                        int level)
4439 {
4440         printk(KERN_ERR "%s: spte 0x%llx level %d\n", __func__, spte, level);
4441
4442         /* 010b (write-only) */
4443         WARN_ON((spte & 0x7) == 0x2);
4444
4445         /* 110b (write/execute) */
4446         WARN_ON((spte & 0x7) == 0x6);
4447
4448         /* 100b (execute-only) and value not supported by logical processor */
4449         if (!cpu_has_vmx_ept_execute_only())
4450                 WARN_ON((spte & 0x7) == 0x4);
4451
4452         /* not 000b */
4453         if ((spte & 0x7)) {
4454                 u64 rsvd_bits = spte & ept_rsvd_mask(spte, level);
4455
4456                 if (rsvd_bits != 0) {
4457                         printk(KERN_ERR "%s: rsvd_bits = 0x%llx\n",
4458                                          __func__, rsvd_bits);
4459                         WARN_ON(1);
4460                 }
4461
4462                 if (level == 1 || (level == 2 && (spte & (1ULL << 7)))) {
4463                         u64 ept_mem_type = (spte & 0x38) >> 3;
4464
4465                         if (ept_mem_type == 2 || ept_mem_type == 3 ||
4466                             ept_mem_type == 7) {
4467                                 printk(KERN_ERR "%s: ept_mem_type=0x%llx\n",
4468                                                 __func__, ept_mem_type);
4469                                 WARN_ON(1);
4470                         }
4471                 }
4472         }
4473 }
4474
4475 static int handle_ept_misconfig(struct kvm_vcpu *vcpu)
4476 {
4477         u64 sptes[4];
4478         int nr_sptes, i;
4479         gpa_t gpa;
4480
4481         gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS);
4482
4483         printk(KERN_ERR "EPT: Misconfiguration.\n");
4484         printk(KERN_ERR "EPT: GPA: 0x%llx\n", gpa);
4485
4486         nr_sptes = kvm_mmu_get_spte_hierarchy(vcpu, gpa, sptes);
4487
4488         for (i = PT64_ROOT_LEVEL; i > PT64_ROOT_LEVEL - nr_sptes; --i)
4489                 ept_misconfig_inspect_spte(vcpu, sptes[i-1], i);
4490
4491         vcpu->run->exit_reason = KVM_EXIT_UNKNOWN;
4492         vcpu->run->hw.hardware_exit_reason = EXIT_REASON_EPT_MISCONFIG;
4493
4494         return 0;
4495 }
4496
4497 static int handle_nmi_window(struct kvm_vcpu *vcpu)
4498 {
4499         u32 cpu_based_vm_exec_control;
4500
4501         /* clear pending NMI */
4502         cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
4503         cpu_based_vm_exec_control &= ~CPU_BASED_VIRTUAL_NMI_PENDING;
4504         vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
4505         ++vcpu->stat.nmi_window_exits;
4506         kvm_make_request(KVM_REQ_EVENT, vcpu);
4507
4508         return 1;
4509 }
4510
4511 static int handle_invalid_guest_state(struct kvm_vcpu *vcpu)
4512 {
4513         struct vcpu_vmx *vmx = to_vmx(vcpu);
4514         enum emulation_result err = EMULATE_DONE;
4515         int ret = 1;
4516         u32 cpu_exec_ctrl;
4517         bool intr_window_requested;
4518
4519         cpu_exec_ctrl = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
4520         intr_window_requested = cpu_exec_ctrl & CPU_BASED_VIRTUAL_INTR_PENDING;
4521
4522         while (!guest_state_valid(vcpu)) {
4523                 if (intr_window_requested
4524                     && (kvm_get_rflags(&vmx->vcpu) & X86_EFLAGS_IF))
4525                         return handle_interrupt_window(&vmx->vcpu);
4526
4527                 err = emulate_instruction(vcpu, 0);
4528
4529                 if (err == EMULATE_DO_MMIO) {
4530                         ret = 0;
4531                         goto out;
4532                 }
4533
4534                 if (err != EMULATE_DONE)
4535                         return 0;
4536
4537                 if (signal_pending(current))
4538                         goto out;
4539                 if (need_resched())
4540                         schedule();
4541         }
4542
4543         vmx->emulation_required = 0;
4544 out:
4545         return ret;
4546 }
4547
4548 /*
4549  * Indicate a busy-waiting vcpu in spinlock. We do not enable the PAUSE
4550  * exiting, so only get here on cpu with PAUSE-Loop-Exiting.
4551  */
4552 static int handle_pause(struct kvm_vcpu *vcpu)
4553 {
4554         skip_emulated_instruction(vcpu);
4555         kvm_vcpu_on_spin(vcpu);
4556
4557         return 1;
4558 }
4559
4560 static int handle_invalid_op(struct kvm_vcpu *vcpu)
4561 {
4562         kvm_queue_exception(vcpu, UD_VECTOR);
4563         return 1;
4564 }
4565
4566 /*
4567  * To run an L2 guest, we need a vmcs02 based on the L1-specified vmcs12.
4568  * We could reuse a single VMCS for all the L2 guests, but we also want the
4569  * option to allocate a separate vmcs02 for each separate loaded vmcs12 - this
4570  * allows keeping them loaded on the processor, and in the future will allow
4571  * optimizations where prepare_vmcs02 doesn't need to set all the fields on
4572  * every entry if they never change.
4573  * So we keep, in vmx->nested.vmcs02_pool, a cache of size VMCS02_POOL_SIZE
4574  * (>=0) with a vmcs02 for each recently loaded vmcs12s, most recent first.
4575  *
4576  * The following functions allocate and free a vmcs02 in this pool.
4577  */
4578
4579 /* Get a VMCS from the pool to use as vmcs02 for the current vmcs12. */
4580 static struct loaded_vmcs *nested_get_current_vmcs02(struct vcpu_vmx *vmx)
4581 {
4582         struct vmcs02_list *item;
4583         list_for_each_entry(item, &vmx->nested.vmcs02_pool, list)
4584                 if (item->vmptr == vmx->nested.current_vmptr) {
4585                         list_move(&item->list, &vmx->nested.vmcs02_pool);
4586                         return &item->vmcs02;
4587                 }
4588
4589         if (vmx->nested.vmcs02_num >= max(VMCS02_POOL_SIZE, 1)) {
4590                 /* Recycle the least recently used VMCS. */
4591                 item = list_entry(vmx->nested.vmcs02_pool.prev,
4592                         struct vmcs02_list, list);
4593                 item->vmptr = vmx->nested.current_vmptr;
4594                 list_move(&item->list, &vmx->nested.vmcs02_pool);
4595                 return &item->vmcs02;
4596         }
4597
4598         /* Create a new VMCS */
4599         item = (struct vmcs02_list *)
4600                 kmalloc(sizeof(struct vmcs02_list), GFP_KERNEL);
4601         if (!item)
4602                 return NULL;
4603         item->vmcs02.vmcs = alloc_vmcs();
4604         if (!item->vmcs02.vmcs) {
4605                 kfree(item);
4606                 return NULL;
4607         }
4608         loaded_vmcs_init(&item->vmcs02);
4609         item->vmptr = vmx->nested.current_vmptr;
4610         list_add(&(item->list), &(vmx->nested.vmcs02_pool));
4611         vmx->nested.vmcs02_num++;
4612         return &item->vmcs02;
4613 }
4614
4615 /* Free and remove from pool a vmcs02 saved for a vmcs12 (if there is one) */
4616 static void nested_free_vmcs02(struct vcpu_vmx *vmx, gpa_t vmptr)
4617 {
4618         struct vmcs02_list *item;
4619         list_for_each_entry(item, &vmx->nested.vmcs02_pool, list)
4620                 if (item->vmptr == vmptr) {
4621                         free_loaded_vmcs(&item->vmcs02);
4622                         list_del(&item->list);
4623                         kfree(item);
4624                         vmx->nested.vmcs02_num--;
4625                         return;
4626                 }
4627 }
4628
4629 /*
4630  * Free all VMCSs saved for this vcpu, except the one pointed by
4631  * vmx->loaded_vmcs. These include the VMCSs in vmcs02_pool (except the one
4632  * currently used, if running L2), and vmcs01 when running L2.
4633  */
4634 static void nested_free_all_saved_vmcss(struct vcpu_vmx *vmx)
4635 {
4636         struct vmcs02_list *item, *n;
4637         list_for_each_entry_safe(item, n, &vmx->nested.vmcs02_pool, list) {
4638                 if (vmx->loaded_vmcs != &item->vmcs02)
4639                         free_loaded_vmcs(&item->vmcs02);
4640                 list_del(&item->list);
4641                 kfree(item);
4642         }
4643         vmx->nested.vmcs02_num = 0;
4644
4645         if (vmx->loaded_vmcs != &vmx->vmcs01)
4646                 free_loaded_vmcs(&vmx->vmcs01);
4647 }
4648
4649 /*
4650  * Emulate the VMXON instruction.
4651  * Currently, we just remember that VMX is active, and do not save or even
4652  * inspect the argument to VMXON (the so-called "VMXON pointer") because we
4653  * do not currently need to store anything in that guest-allocated memory
4654  * region. Consequently, VMCLEAR and VMPTRLD also do not verify that the their
4655  * argument is different from the VMXON pointer (which the spec says they do).
4656  */
4657 static int handle_vmon(struct kvm_vcpu *vcpu)
4658 {
4659         struct kvm_segment cs;
4660         struct vcpu_vmx *vmx = to_vmx(vcpu);
4661
4662         /* The Intel VMX Instruction Reference lists a bunch of bits that
4663          * are prerequisite to running VMXON, most notably cr4.VMXE must be
4664          * set to 1 (see vmx_set_cr4() for when we allow the guest to set this).
4665          * Otherwise, we should fail with #UD. We test these now:
4666          */
4667         if (!kvm_read_cr4_bits(vcpu, X86_CR4_VMXE) ||
4668             !kvm_read_cr0_bits(vcpu, X86_CR0_PE) ||
4669             (vmx_get_rflags(vcpu) & X86_EFLAGS_VM)) {
4670                 kvm_queue_exception(vcpu, UD_VECTOR);
4671                 return 1;
4672         }
4673
4674         vmx_get_segment(vcpu, &cs, VCPU_SREG_CS);
4675         if (is_long_mode(vcpu) && !cs.l) {
4676                 kvm_queue_exception(vcpu, UD_VECTOR);
4677                 return 1;
4678         }
4679
4680         if (vmx_get_cpl(vcpu)) {
4681                 kvm_inject_gp(vcpu, 0);
4682                 return 1;
4683         }
4684
4685         INIT_LIST_HEAD(&(vmx->nested.vmcs02_pool));
4686         vmx->nested.vmcs02_num = 0;
4687
4688         vmx->nested.vmxon = true;
4689
4690         skip_emulated_instruction(vcpu);
4691         return 1;
4692 }
4693
4694 /*
4695  * Intel's VMX Instruction Reference specifies a common set of prerequisites
4696  * for running VMX instructions (except VMXON, whose prerequisites are
4697  * slightly different). It also specifies what exception to inject otherwise.
4698  */
4699 static int nested_vmx_check_permission(struct kvm_vcpu *vcpu)
4700 {
4701         struct kvm_segment cs;
4702         struct vcpu_vmx *vmx = to_vmx(vcpu);
4703
4704         if (!vmx->nested.vmxon) {
4705                 kvm_queue_exception(vcpu, UD_VECTOR);
4706                 return 0;
4707         }
4708
4709         vmx_get_segment(vcpu, &cs, VCPU_SREG_CS);
4710         if ((vmx_get_rflags(vcpu) & X86_EFLAGS_VM) ||
4711             (is_long_mode(vcpu) && !cs.l)) {
4712                 kvm_queue_exception(vcpu, UD_VECTOR);
4713                 return 0;
4714         }
4715
4716         if (vmx_get_cpl(vcpu)) {
4717                 kvm_inject_gp(vcpu, 0);
4718                 return 0;
4719         }
4720
4721         return 1;
4722 }
4723
4724 /*
4725  * Free whatever needs to be freed from vmx->nested when L1 goes down, or
4726  * just stops using VMX.
4727  */
4728 static void free_nested(struct vcpu_vmx *vmx)
4729 {
4730         if (!vmx->nested.vmxon)
4731                 return;
4732         vmx->nested.vmxon = false;
4733         if (vmx->nested.current_vmptr != -1ull) {
4734                 kunmap(vmx->nested.current_vmcs12_page);
4735                 nested_release_page(vmx->nested.current_vmcs12_page);
4736                 vmx->nested.current_vmptr = -1ull;
4737                 vmx->nested.current_vmcs12 = NULL;
4738         }
4739
4740         nested_free_all_saved_vmcss(vmx);
4741 }
4742
4743 /* Emulate the VMXOFF instruction */
4744 static int handle_vmoff(struct kvm_vcpu *vcpu)
4745 {
4746         if (!nested_vmx_check_permission(vcpu))
4747                 return 1;
4748         free_nested(to_vmx(vcpu));
4749         skip_emulated_instruction(vcpu);
4750         return 1;
4751 }
4752
4753 /*
4754  * Decode the memory-address operand of a vmx instruction, as recorded on an
4755  * exit caused by such an instruction (run by a guest hypervisor).
4756  * On success, returns 0. When the operand is invalid, returns 1 and throws
4757  * #UD or #GP.
4758  */
4759 static int get_vmx_mem_address(struct kvm_vcpu *vcpu,
4760                                  unsigned long exit_qualification,
4761                                  u32 vmx_instruction_info, gva_t *ret)
4762 {
4763         /*
4764          * According to Vol. 3B, "Information for VM Exits Due to Instruction
4765          * Execution", on an exit, vmx_instruction_info holds most of the
4766          * addressing components of the operand. Only the displacement part
4767          * is put in exit_qualification (see 3B, "Basic VM-Exit Information").
4768          * For how an actual address is calculated from all these components,
4769          * refer to Vol. 1, "Operand Addressing".
4770          */
4771         int  scaling = vmx_instruction_info & 3;
4772         int  addr_size = (vmx_instruction_info >> 7) & 7;
4773         bool is_reg = vmx_instruction_info & (1u << 10);
4774         int  seg_reg = (vmx_instruction_info >> 15) & 7;
4775         int  index_reg = (vmx_instruction_info >> 18) & 0xf;
4776         bool index_is_valid = !(vmx_instruction_info & (1u << 22));
4777         int  base_reg       = (vmx_instruction_info >> 23) & 0xf;
4778         bool base_is_valid  = !(vmx_instruction_info & (1u << 27));
4779
4780         if (is_reg) {
4781                 kvm_queue_exception(vcpu, UD_VECTOR);
4782                 return 1;
4783         }
4784
4785         /* Addr = segment_base + offset */
4786         /* offset = base + [index * scale] + displacement */
4787         *ret = vmx_get_segment_base(vcpu, seg_reg);
4788         if (base_is_valid)
4789                 *ret += kvm_register_read(vcpu, base_reg);
4790         if (index_is_valid)
4791                 *ret += kvm_register_read(vcpu, index_reg)<<scaling;
4792         *ret += exit_qualification; /* holds the displacement */
4793
4794         if (addr_size == 1) /* 32 bit */
4795                 *ret &= 0xffffffff;
4796
4797         /*
4798          * TODO: throw #GP (and return 1) in various cases that the VM*
4799          * instructions require it - e.g., offset beyond segment limit,
4800          * unusable or unreadable/unwritable segment, non-canonical 64-bit
4801          * address, and so on. Currently these are not checked.
4802          */
4803         return 0;
4804 }
4805
4806 /*
4807  * The following 3 functions, nested_vmx_succeed()/failValid()/failInvalid(),
4808  * set the success or error code of an emulated VMX instruction, as specified
4809  * by Vol 2B, VMX Instruction Reference, "Conventions".
4810  */
4811 static void nested_vmx_succeed(struct kvm_vcpu *vcpu)
4812 {
4813         vmx_set_rflags(vcpu, vmx_get_rflags(vcpu)
4814                         & ~(X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
4815                             X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF));
4816 }
4817
4818 static void nested_vmx_failInvalid(struct kvm_vcpu *vcpu)
4819 {
4820         vmx_set_rflags(vcpu, (vmx_get_rflags(vcpu)
4821                         & ~(X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_ZF |
4822                             X86_EFLAGS_SF | X86_EFLAGS_OF))
4823                         | X86_EFLAGS_CF);
4824 }
4825
4826 static void nested_vmx_failValid(struct kvm_vcpu *vcpu,
4827                                         u32 vm_instruction_error)
4828 {
4829         if (to_vmx(vcpu)->nested.current_vmptr == -1ull) {
4830                 /*
4831                  * failValid writes the error number to the current VMCS, which
4832                  * can't be done there isn't a current VMCS.
4833                  */
4834                 nested_vmx_failInvalid(vcpu);
4835                 return;
4836         }
4837         vmx_set_rflags(vcpu, (vmx_get_rflags(vcpu)
4838                         & ~(X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
4839                             X86_EFLAGS_SF | X86_EFLAGS_OF))
4840                         | X86_EFLAGS_ZF);
4841         get_vmcs12(vcpu)->vm_instruction_error = vm_instruction_error;
4842 }
4843
4844 /* Emulate the VMCLEAR instruction */
4845 static int handle_vmclear(struct kvm_vcpu *vcpu)
4846 {
4847         struct vcpu_vmx *vmx = to_vmx(vcpu);
4848         gva_t gva;
4849         gpa_t vmptr;
4850         struct vmcs12 *vmcs12;
4851         struct page *page;
4852         struct x86_exception e;
4853
4854         if (!nested_vmx_check_permission(vcpu))
4855                 return 1;
4856
4857         if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
4858                         vmcs_read32(VMX_INSTRUCTION_INFO), &gva))
4859                 return 1;
4860
4861         if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &vmptr,
4862                                 sizeof(vmptr), &e)) {
4863                 kvm_inject_page_fault(vcpu, &e);
4864                 return 1;
4865         }
4866
4867         if (!IS_ALIGNED(vmptr, PAGE_SIZE)) {
4868                 nested_vmx_failValid(vcpu, VMXERR_VMCLEAR_INVALID_ADDRESS);
4869                 skip_emulated_instruction(vcpu);
4870                 return 1;
4871         }
4872
4873         if (vmptr == vmx->nested.current_vmptr) {
4874                 kunmap(vmx->nested.current_vmcs12_page);
4875                 nested_release_page(vmx->nested.current_vmcs12_page);
4876                 vmx->nested.current_vmptr = -1ull;
4877                 vmx->nested.current_vmcs12 = NULL;
4878         }
4879
4880         page = nested_get_page(vcpu, vmptr);
4881         if (page == NULL) {
4882                 /*
4883                  * For accurate processor emulation, VMCLEAR beyond available
4884                  * physical memory should do nothing at all. However, it is
4885                  * possible that a nested vmx bug, not a guest hypervisor bug,
4886                  * resulted in this case, so let's shut down before doing any
4887                  * more damage:
4888                  */
4889                 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
4890                 return 1;
4891         }
4892         vmcs12 = kmap(page);
4893         vmcs12->launch_state = 0;
4894         kunmap(page);
4895         nested_release_page(page);
4896
4897         nested_free_vmcs02(vmx, vmptr);
4898
4899         skip_emulated_instruction(vcpu);
4900         nested_vmx_succeed(vcpu);
4901         return 1;
4902 }
4903
4904 enum vmcs_field_type {
4905         VMCS_FIELD_TYPE_U16 = 0,
4906         VMCS_FIELD_TYPE_U64 = 1,
4907         VMCS_FIELD_TYPE_U32 = 2,
4908         VMCS_FIELD_TYPE_NATURAL_WIDTH = 3
4909 };
4910
4911 static inline int vmcs_field_type(unsigned long field)
4912 {
4913         if (0x1 & field)        /* the *_HIGH fields are all 32 bit */
4914                 return VMCS_FIELD_TYPE_U32;
4915         return (field >> 13) & 0x3 ;
4916 }
4917
4918 static inline int vmcs_field_readonly(unsigned long field)
4919 {
4920         return (((field >> 10) & 0x3) == 1);
4921 }
4922
4923 /*
4924  * Read a vmcs12 field. Since these can have varying lengths and we return
4925  * one type, we chose the biggest type (u64) and zero-extend the return value
4926  * to that size. Note that the caller, handle_vmread, might need to use only
4927  * some of the bits we return here (e.g., on 32-bit guests, only 32 bits of
4928  * 64-bit fields are to be returned).
4929  */
4930 static inline bool vmcs12_read_any(struct kvm_vcpu *vcpu,
4931                                         unsigned long field, u64 *ret)
4932 {
4933         short offset = vmcs_field_to_offset(field);
4934         char *p;
4935
4936         if (offset < 0)
4937                 return 0;
4938
4939         p = ((char *)(get_vmcs12(vcpu))) + offset;
4940
4941         switch (vmcs_field_type(field)) {
4942         case VMCS_FIELD_TYPE_NATURAL_WIDTH:
4943                 *ret = *((natural_width *)p);
4944                 return 1;
4945         case VMCS_FIELD_TYPE_U16:
4946                 *ret = *((u16 *)p);
4947                 return 1;
4948         case VMCS_FIELD_TYPE_U32:
4949                 *ret = *((u32 *)p);
4950                 return 1;
4951         case VMCS_FIELD_TYPE_U64:
4952                 *ret = *((u64 *)p);
4953                 return 1;
4954         default:
4955                 return 0; /* can never happen. */
4956         }
4957 }
4958
4959 /*
4960  * VMX instructions which assume a current vmcs12 (i.e., that VMPTRLD was
4961  * used before) all generate the same failure when it is missing.
4962  */
4963 static int nested_vmx_check_vmcs12(struct kvm_vcpu *vcpu)
4964 {
4965         struct vcpu_vmx *vmx = to_vmx(vcpu);
4966         if (vmx->nested.current_vmptr == -1ull) {
4967                 nested_vmx_failInvalid(vcpu);
4968                 skip_emulated_instruction(vcpu);
4969                 return 0;
4970         }
4971         return 1;
4972 }
4973
4974 static int handle_vmread(struct kvm_vcpu *vcpu)
4975 {
4976         unsigned long field;
4977         u64 field_value;
4978         unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
4979         u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
4980         gva_t gva = 0;
4981
4982         if (!nested_vmx_check_permission(vcpu) ||
4983             !nested_vmx_check_vmcs12(vcpu))
4984                 return 1;
4985
4986         /* Decode instruction info and find the field to read */
4987         field = kvm_register_read(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
4988         /* Read the field, zero-extended to a u64 field_value */
4989         if (!vmcs12_read_any(vcpu, field, &field_value)) {
4990                 nested_vmx_failValid(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
4991                 skip_emulated_instruction(vcpu);
4992                 return 1;
4993         }
4994         /*
4995          * Now copy part of this value to register or memory, as requested.
4996          * Note that the number of bits actually copied is 32 or 64 depending
4997          * on the guest's mode (32 or 64 bit), not on the given field's length.
4998          */
4999         if (vmx_instruction_info & (1u << 10)) {
5000                 kvm_register_write(vcpu, (((vmx_instruction_info) >> 3) & 0xf),
5001                         field_value);
5002         } else {
5003                 if (get_vmx_mem_address(vcpu, exit_qualification,
5004                                 vmx_instruction_info, &gva))
5005                         return 1;
5006                 /* _system ok, as nested_vmx_check_permission verified cpl=0 */
5007                 kvm_write_guest_virt_system(&vcpu->arch.emulate_ctxt, gva,
5008                              &field_value, (is_long_mode(vcpu) ? 8 : 4), NULL);
5009         }
5010
5011         nested_vmx_succeed(vcpu);
5012         skip_emulated_instruction(vcpu);
5013         return 1;
5014 }
5015
5016
5017 static int handle_vmwrite(struct kvm_vcpu *vcpu)
5018 {
5019         unsigned long field;
5020         gva_t gva;
5021         unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
5022         u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
5023         char *p;
5024         short offset;
5025         /* The value to write might be 32 or 64 bits, depending on L1's long
5026          * mode, and eventually we need to write that into a field of several
5027          * possible lengths. The code below first zero-extends the value to 64
5028          * bit (field_value), and then copies only the approriate number of
5029          * bits into the vmcs12 field.
5030          */
5031         u64 field_value = 0;
5032         struct x86_exception e;
5033
5034         if (!nested_vmx_check_permission(vcpu) ||
5035             !nested_vmx_check_vmcs12(vcpu))
5036                 return 1;
5037
5038         if (vmx_instruction_info & (1u << 10))
5039                 field_value = kvm_register_read(vcpu,
5040                         (((vmx_instruction_info) >> 3) & 0xf));
5041         else {
5042                 if (get_vmx_mem_address(vcpu, exit_qualification,
5043                                 vmx_instruction_info, &gva))
5044                         return 1;
5045                 if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva,
5046                            &field_value, (is_long_mode(vcpu) ? 8 : 4), &e)) {
5047                         kvm_inject_page_fault(vcpu, &e);
5048                         return 1;
5049                 }
5050         }
5051
5052
5053         field = kvm_register_read(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
5054         if (vmcs_field_readonly(field)) {
5055                 nested_vmx_failValid(vcpu,
5056                         VMXERR_VMWRITE_READ_ONLY_VMCS_COMPONENT);
5057                 skip_emulated_instruction(vcpu);
5058                 return 1;
5059         }
5060
5061         offset = vmcs_field_to_offset(field);
5062         if (offset < 0) {
5063                 nested_vmx_failValid(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
5064                 skip_emulated_instruction(vcpu);
5065                 return 1;
5066         }
5067         p = ((char *) get_vmcs12(vcpu)) + offset;
5068
5069         switch (vmcs_field_type(field)) {
5070         case VMCS_FIELD_TYPE_U16:
5071                 *(u16 *)p = field_value;
5072                 break;
5073         case VMCS_FIELD_TYPE_U32:
5074                 *(u32 *)p = field_value;
5075                 break;
5076         case VMCS_FIELD_TYPE_U64:
5077                 *(u64 *)p = field_value;
5078                 break;
5079         case VMCS_FIELD_TYPE_NATURAL_WIDTH:
5080                 *(natural_width *)p = field_value;
5081                 break;
5082         default:
5083                 nested_vmx_failValid(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
5084                 skip_emulated_instruction(vcpu);
5085                 return 1;
5086         }
5087
5088         nested_vmx_succeed(vcpu);
5089         skip_emulated_instruction(vcpu);
5090         return 1;
5091 }
5092
5093 /* Emulate the VMPTRLD instruction */
5094 static int handle_vmptrld(struct kvm_vcpu *vcpu)
5095 {
5096         struct vcpu_vmx *vmx = to_vmx(vcpu);
5097         gva_t gva;
5098         gpa_t vmptr;
5099         struct x86_exception e;
5100
5101         if (!nested_vmx_check_permission(vcpu))
5102                 return 1;
5103
5104         if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
5105                         vmcs_read32(VMX_INSTRUCTION_INFO), &gva))
5106                 return 1;
5107
5108         if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &vmptr,
5109                                 sizeof(vmptr), &e)) {
5110                 kvm_inject_page_fault(vcpu, &e);
5111                 return 1;
5112         }
5113
5114         if (!IS_ALIGNED(vmptr, PAGE_SIZE)) {
5115                 nested_vmx_failValid(vcpu, VMXERR_VMPTRLD_INVALID_ADDRESS);
5116                 skip_emulated_instruction(vcpu);
5117                 return 1;
5118         }
5119
5120         if (vmx->nested.current_vmptr != vmptr) {
5121                 struct vmcs12 *new_vmcs12;
5122                 struct page *page;
5123                 page = nested_get_page(vcpu, vmptr);
5124                 if (page == NULL) {
5125                         nested_vmx_failInvalid(vcpu);
5126                         skip_emulated_instruction(vcpu);
5127                         return 1;
5128                 }
5129                 new_vmcs12 = kmap(page);
5130                 if (new_vmcs12->revision_id != VMCS12_REVISION) {
5131                         kunmap(page);
5132                         nested_release_page_clean(page);
5133                         nested_vmx_failValid(vcpu,
5134                                 VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID);
5135                         skip_emulated_instruction(vcpu);
5136                         return 1;
5137                 }
5138                 if (vmx->nested.current_vmptr != -1ull) {
5139                         kunmap(vmx->nested.current_vmcs12_page);
5140                         nested_release_page(vmx->nested.current_vmcs12_page);
5141                 }
5142
5143                 vmx->nested.current_vmptr = vmptr;
5144                 vmx->nested.current_vmcs12 = new_vmcs12;
5145                 vmx->nested.current_vmcs12_page = page;
5146         }
5147
5148         nested_vmx_succeed(vcpu);
5149         skip_emulated_instruction(vcpu);
5150         return 1;
5151 }
5152
5153 /* Emulate the VMPTRST instruction */
5154 static int handle_vmptrst(struct kvm_vcpu *vcpu)
5155 {
5156         unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
5157         u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
5158         gva_t vmcs_gva;
5159         struct x86_exception e;
5160
5161         if (!nested_vmx_check_permission(vcpu))
5162                 return 1;
5163
5164         if (get_vmx_mem_address(vcpu, exit_qualification,
5165                         vmx_instruction_info, &vmcs_gva))
5166                 return 1;
5167         /* ok to use *_system, as nested_vmx_check_permission verified cpl=0 */
5168         if (kvm_write_guest_virt_system(&vcpu->arch.emulate_ctxt, vmcs_gva,
5169                                  (void *)&to_vmx(vcpu)->nested.current_vmptr,
5170                                  sizeof(u64), &e)) {
5171                 kvm_inject_page_fault(vcpu, &e);
5172                 return 1;
5173         }
5174         nested_vmx_succeed(vcpu);
5175         skip_emulated_instruction(vcpu);
5176         return 1;
5177 }
5178
5179 /*
5180  * The exit handlers return 1 if the exit was handled fully and guest execution
5181  * may resume.  Otherwise they set the kvm_run parameter to indicate what needs
5182  * to be done to userspace and return 0.
5183  */
5184 static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = {
5185         [EXIT_REASON_EXCEPTION_NMI]           = handle_exception,
5186         [EXIT_REASON_EXTERNAL_INTERRUPT]      = handle_external_interrupt,
5187         [EXIT_REASON_TRIPLE_FAULT]            = handle_triple_fault,
5188         [EXIT_REASON_NMI_WINDOW]              = handle_nmi_window,
5189         [EXIT_REASON_IO_INSTRUCTION]          = handle_io,
5190         [EXIT_REASON_CR_ACCESS]               = handle_cr,
5191         [EXIT_REASON_DR_ACCESS]               = handle_dr,
5192         [EXIT_REASON_CPUID]                   = handle_cpuid,
5193         [EXIT_REASON_MSR_READ]                = handle_rdmsr,
5194         [EXIT_REASON_MSR_WRITE]               = handle_wrmsr,
5195         [EXIT_REASON_PENDING_INTERRUPT]       = handle_interrupt_window,
5196         [EXIT_REASON_HLT]                     = handle_halt,
5197         [EXIT_REASON_INVD]                    = handle_invd,
5198         [EXIT_REASON_INVLPG]                  = handle_invlpg,
5199         [EXIT_REASON_VMCALL]                  = handle_vmcall,
5200         [EXIT_REASON_VMCLEAR]                 = handle_vmclear,
5201         [EXIT_REASON_VMLAUNCH]                = handle_vmx_insn,
5202         [EXIT_REASON_VMPTRLD]                 = handle_vmptrld,
5203         [EXIT_REASON_VMPTRST]                 = handle_vmptrst,
5204         [EXIT_REASON_VMREAD]                  = handle_vmread,
5205         [EXIT_REASON_VMRESUME]                = handle_vmx_insn,
5206         [EXIT_REASON_VMWRITE]                 = handle_vmwrite,
5207         [EXIT_REASON_VMOFF]                   = handle_vmoff,
5208         [EXIT_REASON_VMON]                    = handle_vmon,
5209         [EXIT_REASON_TPR_BELOW_THRESHOLD]     = handle_tpr_below_threshold,
5210         [EXIT_REASON_APIC_ACCESS]             = handle_apic_access,
5211         [EXIT_REASON_WBINVD]                  = handle_wbinvd,
5212         [EXIT_REASON_XSETBV]                  = handle_xsetbv,
5213         [EXIT_REASON_TASK_SWITCH]             = handle_task_switch,
5214         [EXIT_REASON_MCE_DURING_VMENTRY]      = handle_machine_check,
5215         [EXIT_REASON_EPT_VIOLATION]           = handle_ept_violation,
5216         [EXIT_REASON_EPT_MISCONFIG]           = handle_ept_misconfig,
5217         [EXIT_REASON_PAUSE_INSTRUCTION]       = handle_pause,
5218         [EXIT_REASON_MWAIT_INSTRUCTION]       = handle_invalid_op,
5219         [EXIT_REASON_MONITOR_INSTRUCTION]     = handle_invalid_op,
5220 };
5221
5222 static const int kvm_vmx_max_exit_handlers =
5223         ARRAY_SIZE(kvm_vmx_exit_handlers);
5224
5225 static void vmx_get_exit_info(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2)
5226 {
5227         *info1 = vmcs_readl(EXIT_QUALIFICATION);
5228         *info2 = vmcs_read32(VM_EXIT_INTR_INFO);
5229 }
5230
5231 /*
5232  * The guest has exited.  See if we can fix it or if we need userspace
5233  * assistance.
5234  */
5235 static int vmx_handle_exit(struct kvm_vcpu *vcpu)
5236 {
5237         struct vcpu_vmx *vmx = to_vmx(vcpu);
5238         u32 exit_reason = vmx->exit_reason;
5239         u32 vectoring_info = vmx->idt_vectoring_info;
5240
5241         trace_kvm_exit(exit_reason, vcpu, KVM_ISA_VMX);
5242
5243         /* If guest state is invalid, start emulating */
5244         if (vmx->emulation_required && emulate_invalid_guest_state)
5245                 return handle_invalid_guest_state(vcpu);
5246
5247         if (exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY) {
5248                 vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY;
5249                 vcpu->run->fail_entry.hardware_entry_failure_reason
5250                         = exit_reason;
5251                 return 0;
5252         }
5253
5254         if (unlikely(vmx->fail)) {
5255                 vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY;
5256                 vcpu->run->fail_entry.hardware_entry_failure_reason
5257                         = vmcs_read32(VM_INSTRUCTION_ERROR);
5258                 return 0;
5259         }
5260
5261         if ((vectoring_info & VECTORING_INFO_VALID_MASK) &&
5262                         (exit_reason != EXIT_REASON_EXCEPTION_NMI &&
5263                         exit_reason != EXIT_REASON_EPT_VIOLATION &&
5264                         exit_reason != EXIT_REASON_TASK_SWITCH))
5265                 printk(KERN_WARNING "%s: unexpected, valid vectoring info "
5266                        "(0x%x) and exit reason is 0x%x\n",
5267                        __func__, vectoring_info, exit_reason);
5268
5269         if (unlikely(!cpu_has_virtual_nmis() && vmx->soft_vnmi_blocked)) {
5270                 if (vmx_interrupt_allowed(vcpu)) {
5271                         vmx->soft_vnmi_blocked = 0;
5272                 } else if (vmx->vnmi_blocked_time > 1000000000LL &&
5273                            vcpu->arch.nmi_pending) {
5274                         /*
5275                          * This CPU don't support us in finding the end of an
5276                          * NMI-blocked window if the guest runs with IRQs
5277                          * disabled. So we pull the trigger after 1 s of
5278                          * futile waiting, but inform the user about this.
5279                          */
5280                         printk(KERN_WARNING "%s: Breaking out of NMI-blocked "
5281                                "state on VCPU %d after 1 s timeout\n",
5282                                __func__, vcpu->vcpu_id);
5283                         vmx->soft_vnmi_blocked = 0;
5284                 }
5285         }
5286
5287         if (exit_reason < kvm_vmx_max_exit_handlers
5288             && kvm_vmx_exit_handlers[exit_reason])
5289                 return kvm_vmx_exit_handlers[exit_reason](vcpu);
5290         else {
5291                 vcpu->run->exit_reason = KVM_EXIT_UNKNOWN;
5292                 vcpu->run->hw.hardware_exit_reason = exit_reason;
5293         }
5294         return 0;
5295 }
5296
5297 static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
5298 {
5299         if (irr == -1 || tpr < irr) {
5300                 vmcs_write32(TPR_THRESHOLD, 0);
5301                 return;
5302         }
5303
5304         vmcs_write32(TPR_THRESHOLD, irr);
5305 }
5306
5307 static void vmx_complete_atomic_exit(struct vcpu_vmx *vmx)
5308 {
5309         u32 exit_intr_info;
5310
5311         if (!(vmx->exit_reason == EXIT_REASON_MCE_DURING_VMENTRY
5312               || vmx->exit_reason == EXIT_REASON_EXCEPTION_NMI))
5313                 return;
5314
5315         vmx->exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
5316         exit_intr_info = vmx->exit_intr_info;
5317
5318         /* Handle machine checks before interrupts are enabled */
5319         if (is_machine_check(exit_intr_info))
5320                 kvm_machine_check();
5321
5322         /* We need to handle NMIs before interrupts are enabled */
5323         if ((exit_intr_info & INTR_INFO_INTR_TYPE_MASK) == INTR_TYPE_NMI_INTR &&
5324             (exit_intr_info & INTR_INFO_VALID_MASK)) {
5325                 kvm_before_handle_nmi(&vmx->vcpu);
5326                 asm("int $2");
5327                 kvm_after_handle_nmi(&vmx->vcpu);
5328         }
5329 }
5330
5331 static void vmx_recover_nmi_blocking(struct vcpu_vmx *vmx)
5332 {
5333         u32 exit_intr_info;
5334         bool unblock_nmi;
5335         u8 vector;
5336         bool idtv_info_valid;
5337
5338         idtv_info_valid = vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK;
5339
5340         if (cpu_has_virtual_nmis()) {
5341                 if (vmx->nmi_known_unmasked)
5342                         return;
5343                 /*
5344                  * Can't use vmx->exit_intr_info since we're not sure what
5345                  * the exit reason is.
5346                  */
5347                 exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
5348                 unblock_nmi = (exit_intr_info & INTR_INFO_UNBLOCK_NMI) != 0;
5349                 vector = exit_intr_info & INTR_INFO_VECTOR_MASK;
5350                 /*
5351                  * SDM 3: 27.7.1.2 (September 2008)
5352                  * Re-set bit "block by NMI" before VM entry if vmexit caused by
5353                  * a guest IRET fault.
5354                  * SDM 3: 23.2.2 (September 2008)
5355                  * Bit 12 is undefined in any of the following cases:
5356                  *  If the VM exit sets the valid bit in the IDT-vectoring
5357                  *   information field.
5358                  *  If the VM exit is due to a double fault.
5359                  */
5360                 if ((exit_intr_info & INTR_INFO_VALID_MASK) && unblock_nmi &&
5361                     vector != DF_VECTOR && !idtv_info_valid)
5362                         vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
5363                                       GUEST_INTR_STATE_NMI);
5364                 else
5365                         vmx->nmi_known_unmasked =
5366                                 !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO)
5367                                   & GUEST_INTR_STATE_NMI);
5368         } else if (unlikely(vmx->soft_vnmi_blocked))
5369                 vmx->vnmi_blocked_time +=
5370                         ktime_to_ns(ktime_sub(ktime_get(), vmx->entry_time));
5371 }
5372
5373 static void __vmx_complete_interrupts(struct vcpu_vmx *vmx,
5374                                       u32 idt_vectoring_info,
5375                                       int instr_len_field,
5376                                       int error_code_field)
5377 {
5378         u8 vector;
5379         int type;
5380         bool idtv_info_valid;
5381
5382         idtv_info_valid = idt_vectoring_info & VECTORING_INFO_VALID_MASK;
5383
5384         vmx->vcpu.arch.nmi_injected = false;
5385         kvm_clear_exception_queue(&vmx->vcpu);
5386         kvm_clear_interrupt_queue(&vmx->vcpu);
5387
5388         if (!idtv_info_valid)
5389                 return;
5390
5391         kvm_make_request(KVM_REQ_EVENT, &vmx->vcpu);
5392
5393         vector = idt_vectoring_info & VECTORING_INFO_VECTOR_MASK;
5394         type = idt_vectoring_info & VECTORING_INFO_TYPE_MASK;
5395
5396         switch (type) {
5397         case INTR_TYPE_NMI_INTR:
5398                 vmx->vcpu.arch.nmi_injected = true;
5399                 /*
5400                  * SDM 3: 27.7.1.2 (September 2008)
5401                  * Clear bit "block by NMI" before VM entry if a NMI
5402                  * delivery faulted.
5403                  */
5404                 vmx_set_nmi_mask(&vmx->vcpu, false);
5405                 break;
5406         case INTR_TYPE_SOFT_EXCEPTION:
5407                 vmx->vcpu.arch.event_exit_inst_len =
5408                         vmcs_read32(instr_len_field);
5409                 /* fall through */
5410         case INTR_TYPE_HARD_EXCEPTION:
5411                 if (idt_vectoring_info & VECTORING_INFO_DELIVER_CODE_MASK) {
5412                         u32 err = vmcs_read32(error_code_field);
5413                         kvm_queue_exception_e(&vmx->vcpu, vector, err);
5414                 } else
5415                         kvm_queue_exception(&vmx->vcpu, vector);
5416                 break;
5417         case INTR_TYPE_SOFT_INTR:
5418                 vmx->vcpu.arch.event_exit_inst_len =
5419                         vmcs_read32(instr_len_field);
5420                 /* fall through */
5421         case INTR_TYPE_EXT_INTR:
5422                 kvm_queue_interrupt(&vmx->vcpu, vector,
5423                         type == INTR_TYPE_SOFT_INTR);
5424                 break;
5425         default:
5426                 break;
5427         }
5428 }
5429
5430 static void vmx_complete_interrupts(struct vcpu_vmx *vmx)
5431 {
5432         __vmx_complete_interrupts(vmx, vmx->idt_vectoring_info,
5433                                   VM_EXIT_INSTRUCTION_LEN,
5434                                   IDT_VECTORING_ERROR_CODE);
5435 }
5436
5437 static void vmx_cancel_injection(struct kvm_vcpu *vcpu)
5438 {
5439         __vmx_complete_interrupts(to_vmx(vcpu),
5440                                   vmcs_read32(VM_ENTRY_INTR_INFO_FIELD),
5441                                   VM_ENTRY_INSTRUCTION_LEN,
5442                                   VM_ENTRY_EXCEPTION_ERROR_CODE);
5443
5444         vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0);
5445 }
5446
5447 #ifdef CONFIG_X86_64
5448 #define R "r"
5449 #define Q "q"
5450 #else
5451 #define R "e"
5452 #define Q "l"
5453 #endif
5454
5455 static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
5456 {
5457         struct vcpu_vmx *vmx = to_vmx(vcpu);
5458
5459         /* Record the guest's net vcpu time for enforced NMI injections. */
5460         if (unlikely(!cpu_has_virtual_nmis() && vmx->soft_vnmi_blocked))
5461                 vmx->entry_time = ktime_get();
5462
5463         /* Don't enter VMX if guest state is invalid, let the exit handler
5464            start emulation until we arrive back to a valid state */
5465         if (vmx->emulation_required && emulate_invalid_guest_state)
5466                 return;
5467
5468         if (test_bit(VCPU_REGS_RSP, (unsigned long *)&vcpu->arch.regs_dirty))
5469                 vmcs_writel(GUEST_RSP, vcpu->arch.regs[VCPU_REGS_RSP]);
5470         if (test_bit(VCPU_REGS_RIP, (unsigned long *)&vcpu->arch.regs_dirty))
5471                 vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]);
5472
5473         /* When single-stepping over STI and MOV SS, we must clear the
5474          * corresponding interruptibility bits in the guest state. Otherwise
5475          * vmentry fails as it then expects bit 14 (BS) in pending debug
5476          * exceptions being set, but that's not correct for the guest debugging
5477          * case. */
5478         if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
5479                 vmx_set_interrupt_shadow(vcpu, 0);
5480
5481         vmx->__launched = vmx->loaded_vmcs->launched;
5482         asm(
5483                 /* Store host registers */
5484                 "push %%"R"dx; push %%"R"bp;"
5485                 "push %%"R"cx \n\t" /* placeholder for guest rcx */
5486                 "push %%"R"cx \n\t"
5487                 "cmp %%"R"sp, %c[host_rsp](%0) \n\t"
5488                 "je 1f \n\t"
5489                 "mov %%"R"sp, %c[host_rsp](%0) \n\t"
5490                 __ex(ASM_VMX_VMWRITE_RSP_RDX) "\n\t"
5491                 "1: \n\t"
5492                 /* Reload cr2 if changed */
5493                 "mov %c[cr2](%0), %%"R"ax \n\t"
5494                 "mov %%cr2, %%"R"dx \n\t"
5495                 "cmp %%"R"ax, %%"R"dx \n\t"
5496                 "je 2f \n\t"
5497                 "mov %%"R"ax, %%cr2 \n\t"
5498                 "2: \n\t"
5499                 /* Check if vmlaunch of vmresume is needed */
5500                 "cmpl $0, %c[launched](%0) \n\t"
5501                 /* Load guest registers.  Don't clobber flags. */
5502                 "mov %c[rax](%0), %%"R"ax \n\t"
5503                 "mov %c[rbx](%0), %%"R"bx \n\t"
5504                 "mov %c[rdx](%0), %%"R"dx \n\t"
5505                 "mov %c[rsi](%0), %%"R"si \n\t"
5506                 "mov %c[rdi](%0), %%"R"di \n\t"
5507                 "mov %c[rbp](%0), %%"R"bp \n\t"
5508 #ifdef CONFIG_X86_64
5509                 "mov %c[r8](%0),  %%r8  \n\t"
5510                 "mov %c[r9](%0),  %%r9  \n\t"
5511                 "mov %c[r10](%0), %%r10 \n\t"
5512                 "mov %c[r11](%0), %%r11 \n\t"
5513                 "mov %c[r12](%0), %%r12 \n\t"
5514                 "mov %c[r13](%0), %%r13 \n\t"
5515                 "mov %c[r14](%0), %%r14 \n\t"
5516                 "mov %c[r15](%0), %%r15 \n\t"
5517 #endif
5518                 "mov %c[rcx](%0), %%"R"cx \n\t" /* kills %0 (ecx) */
5519
5520                 /* Enter guest mode */
5521                 "jne .Llaunched \n\t"
5522                 __ex(ASM_VMX_VMLAUNCH) "\n\t"
5523                 "jmp .Lkvm_vmx_return \n\t"
5524                 ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
5525                 ".Lkvm_vmx_return: "
5526                 /* Save guest registers, load host registers, keep flags */
5527                 "mov %0, %c[wordsize](%%"R"sp) \n\t"
5528                 "pop %0 \n\t"
5529                 "mov %%"R"ax, %c[rax](%0) \n\t"
5530                 "mov %%"R"bx, %c[rbx](%0) \n\t"
5531                 "pop"Q" %c[rcx](%0) \n\t"
5532                 "mov %%"R"dx, %c[rdx](%0) \n\t"
5533                 "mov %%"R"si, %c[rsi](%0) \n\t"
5534                 "mov %%"R"di, %c[rdi](%0) \n\t"
5535                 "mov %%"R"bp, %c[rbp](%0) \n\t"
5536 #ifdef CONFIG_X86_64
5537                 "mov %%r8,  %c[r8](%0) \n\t"
5538                 "mov %%r9,  %c[r9](%0) \n\t"
5539                 "mov %%r10, %c[r10](%0) \n\t"
5540                 "mov %%r11, %c[r11](%0) \n\t"
5541                 "mov %%r12, %c[r12](%0) \n\t"
5542                 "mov %%r13, %c[r13](%0) \n\t"
5543                 "mov %%r14, %c[r14](%0) \n\t"
5544                 "mov %%r15, %c[r15](%0) \n\t"
5545 #endif
5546                 "mov %%cr2, %%"R"ax   \n\t"
5547                 "mov %%"R"ax, %c[cr2](%0) \n\t"
5548
5549                 "pop  %%"R"bp; pop  %%"R"dx \n\t"
5550                 "setbe %c[fail](%0) \n\t"
5551               : : "c"(vmx), "d"((unsigned long)HOST_RSP),
5552                 [launched]"i"(offsetof(struct vcpu_vmx, __launched)),
5553                 [fail]"i"(offsetof(struct vcpu_vmx, fail)),
5554                 [host_rsp]"i"(offsetof(struct vcpu_vmx, host_rsp)),
5555                 [rax]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RAX])),
5556                 [rbx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RBX])),
5557                 [rcx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RCX])),
5558                 [rdx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RDX])),
5559                 [rsi]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RSI])),
5560                 [rdi]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RDI])),
5561                 [rbp]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RBP])),
5562 #ifdef CONFIG_X86_64
5563                 [r8]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R8])),
5564                 [r9]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R9])),
5565                 [r10]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R10])),
5566                 [r11]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R11])),
5567                 [r12]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R12])),
5568                 [r13]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R13])),
5569                 [r14]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R14])),
5570                 [r15]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R15])),
5571 #endif
5572                 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
5573                 [wordsize]"i"(sizeof(ulong))
5574               : "cc", "memory"
5575                 , R"ax", R"bx", R"di", R"si"
5576 #ifdef CONFIG_X86_64
5577                 , "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
5578 #endif
5579               );
5580
5581         vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)
5582                                   | (1 << VCPU_EXREG_RFLAGS)
5583                                   | (1 << VCPU_EXREG_CPL)
5584                                   | (1 << VCPU_EXREG_PDPTR)
5585                                   | (1 << VCPU_EXREG_SEGMENTS)
5586                                   | (1 << VCPU_EXREG_CR3));
5587         vcpu->arch.regs_dirty = 0;
5588
5589         vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
5590
5591         asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
5592         vmx->loaded_vmcs->launched = 1;
5593
5594         vmx->exit_reason = vmcs_read32(VM_EXIT_REASON);
5595
5596         vmx_complete_atomic_exit(vmx);
5597         vmx_recover_nmi_blocking(vmx);
5598         vmx_complete_interrupts(vmx);
5599 }
5600
5601 #undef R
5602 #undef Q
5603
5604 static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
5605 {
5606         struct vcpu_vmx *vmx = to_vmx(vcpu);
5607
5608         free_vpid(vmx);
5609         free_nested(vmx);
5610         free_loaded_vmcs(vmx->loaded_vmcs);
5611         kfree(vmx->guest_msrs);
5612         kvm_vcpu_uninit(vcpu);
5613         kmem_cache_free(kvm_vcpu_cache, vmx);
5614 }
5615
5616 static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
5617 {
5618         int err;
5619         struct vcpu_vmx *vmx = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
5620         int cpu;
5621
5622         if (!vmx)
5623                 return ERR_PTR(-ENOMEM);
5624
5625         allocate_vpid(vmx);
5626
5627         err = kvm_vcpu_init(&vmx->vcpu, kvm, id);
5628         if (err)
5629                 goto free_vcpu;
5630
5631         vmx->guest_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL);
5632         err = -ENOMEM;
5633         if (!vmx->guest_msrs) {
5634                 goto uninit_vcpu;
5635         }
5636
5637         vmx->loaded_vmcs = &vmx->vmcs01;
5638         vmx->loaded_vmcs->vmcs = alloc_vmcs();
5639         if (!vmx->loaded_vmcs->vmcs)
5640                 goto free_msrs;
5641         if (!vmm_exclusive)
5642                 kvm_cpu_vmxon(__pa(per_cpu(vmxarea, raw_smp_processor_id())));
5643         loaded_vmcs_init(vmx->loaded_vmcs);
5644         if (!vmm_exclusive)
5645                 kvm_cpu_vmxoff();
5646
5647         cpu = get_cpu();
5648         vmx_vcpu_load(&vmx->vcpu, cpu);
5649         vmx->vcpu.cpu = cpu;
5650         err = vmx_vcpu_setup(vmx);
5651         vmx_vcpu_put(&vmx->vcpu);
5652         put_cpu();
5653         if (err)
5654                 goto free_vmcs;
5655         if (vm_need_virtualize_apic_accesses(kvm))
5656                 err = alloc_apic_access_page(kvm);
5657                 if (err)
5658                         goto free_vmcs;
5659
5660         if (enable_ept) {
5661                 if (!kvm->arch.ept_identity_map_addr)
5662                         kvm->arch.ept_identity_map_addr =
5663                                 VMX_EPT_IDENTITY_PAGETABLE_ADDR;
5664                 err = -ENOMEM;
5665                 if (alloc_identity_pagetable(kvm) != 0)
5666                         goto free_vmcs;
5667                 if (!init_rmode_identity_map(kvm))
5668                         goto free_vmcs;
5669         }
5670
5671         vmx->nested.current_vmptr = -1ull;
5672         vmx->nested.current_vmcs12 = NULL;
5673
5674         return &vmx->vcpu;
5675
5676 free_vmcs:
5677         free_vmcs(vmx->loaded_vmcs->vmcs);
5678 free_msrs:
5679         kfree(vmx->guest_msrs);
5680 uninit_vcpu:
5681         kvm_vcpu_uninit(&vmx->vcpu);
5682 free_vcpu:
5683         free_vpid(vmx);
5684         kmem_cache_free(kvm_vcpu_cache, vmx);
5685         return ERR_PTR(err);
5686 }
5687
5688 static void __init vmx_check_processor_compat(void *rtn)
5689 {
5690         struct vmcs_config vmcs_conf;
5691
5692         *(int *)rtn = 0;
5693         if (setup_vmcs_config(&vmcs_conf) < 0)
5694                 *(int *)rtn = -EIO;
5695         if (memcmp(&vmcs_config, &vmcs_conf, sizeof(struct vmcs_config)) != 0) {
5696                 printk(KERN_ERR "kvm: CPU %d feature inconsistency!\n",
5697                                 smp_processor_id());
5698                 *(int *)rtn = -EIO;
5699         }
5700 }
5701
5702 static int get_ept_level(void)
5703 {
5704         return VMX_EPT_DEFAULT_GAW + 1;
5705 }
5706
5707 static u64 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
5708 {
5709         u64 ret;
5710
5711         /* For VT-d and EPT combination
5712          * 1. MMIO: always map as UC
5713          * 2. EPT with VT-d:
5714          *   a. VT-d without snooping control feature: can't guarantee the
5715          *      result, try to trust guest.
5716          *   b. VT-d with snooping control feature: snooping control feature of
5717          *      VT-d engine can guarantee the cache correctness. Just set it
5718          *      to WB to keep consistent with host. So the same as item 3.
5719          * 3. EPT without VT-d: always map as WB and set IPAT=1 to keep
5720          *    consistent with host MTRR
5721          */
5722         if (is_mmio)
5723                 ret = MTRR_TYPE_UNCACHABLE << VMX_EPT_MT_EPTE_SHIFT;
5724         else if (vcpu->kvm->arch.iommu_domain &&
5725                 !(vcpu->kvm->arch.iommu_flags & KVM_IOMMU_CACHE_COHERENCY))
5726                 ret = kvm_get_guest_memory_type(vcpu, gfn) <<
5727                       VMX_EPT_MT_EPTE_SHIFT;
5728         else
5729                 ret = (MTRR_TYPE_WRBACK << VMX_EPT_MT_EPTE_SHIFT)
5730                         | VMX_EPT_IPAT_BIT;
5731
5732         return ret;
5733 }
5734
5735 #define _ER(x) { EXIT_REASON_##x, #x }
5736
5737 static const struct trace_print_flags vmx_exit_reasons_str[] = {
5738         _ER(EXCEPTION_NMI),
5739         _ER(EXTERNAL_INTERRUPT),
5740         _ER(TRIPLE_FAULT),
5741         _ER(PENDING_INTERRUPT),
5742         _ER(NMI_WINDOW),
5743         _ER(TASK_SWITCH),
5744         _ER(CPUID),
5745         _ER(HLT),
5746         _ER(INVLPG),
5747         _ER(RDPMC),
5748         _ER(RDTSC),
5749         _ER(VMCALL),
5750         _ER(VMCLEAR),
5751         _ER(VMLAUNCH),
5752         _ER(VMPTRLD),
5753         _ER(VMPTRST),
5754         _ER(VMREAD),
5755         _ER(VMRESUME),
5756         _ER(VMWRITE),
5757         _ER(VMOFF),
5758         _ER(VMON),
5759         _ER(CR_ACCESS),
5760         _ER(DR_ACCESS),
5761         _ER(IO_INSTRUCTION),
5762         _ER(MSR_READ),
5763         _ER(MSR_WRITE),
5764         _ER(MWAIT_INSTRUCTION),
5765         _ER(MONITOR_INSTRUCTION),
5766         _ER(PAUSE_INSTRUCTION),
5767         _ER(MCE_DURING_VMENTRY),
5768         _ER(TPR_BELOW_THRESHOLD),
5769         _ER(APIC_ACCESS),
5770         _ER(EPT_VIOLATION),
5771         _ER(EPT_MISCONFIG),
5772         _ER(WBINVD),
5773         { -1, NULL }
5774 };
5775
5776 #undef _ER
5777
5778 static int vmx_get_lpage_level(void)
5779 {
5780         if (enable_ept && !cpu_has_vmx_ept_1g_page())
5781                 return PT_DIRECTORY_LEVEL;
5782         else
5783                 /* For shadow and EPT supported 1GB page */
5784                 return PT_PDPE_LEVEL;
5785 }
5786
5787 static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
5788 {
5789         struct kvm_cpuid_entry2 *best;
5790         struct vcpu_vmx *vmx = to_vmx(vcpu);
5791         u32 exec_control;
5792
5793         vmx->rdtscp_enabled = false;
5794         if (vmx_rdtscp_supported()) {
5795                 exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
5796                 if (exec_control & SECONDARY_EXEC_RDTSCP) {
5797                         best = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
5798                         if (best && (best->edx & bit(X86_FEATURE_RDTSCP)))
5799                                 vmx->rdtscp_enabled = true;
5800                         else {
5801                                 exec_control &= ~SECONDARY_EXEC_RDTSCP;
5802                                 vmcs_write32(SECONDARY_VM_EXEC_CONTROL,
5803                                                 exec_control);
5804                         }
5805                 }
5806         }
5807 }
5808
5809 static void vmx_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry)
5810 {
5811 }
5812
5813 static int vmx_check_intercept(struct kvm_vcpu *vcpu,
5814                                struct x86_instruction_info *info,
5815                                enum x86_intercept_stage stage)
5816 {
5817         return X86EMUL_CONTINUE;
5818 }
5819
5820 static struct kvm_x86_ops vmx_x86_ops = {
5821         .cpu_has_kvm_support = cpu_has_kvm_support,
5822         .disabled_by_bios = vmx_disabled_by_bios,
5823         .hardware_setup = hardware_setup,
5824         .hardware_unsetup = hardware_unsetup,
5825         .check_processor_compatibility = vmx_check_processor_compat,
5826         .hardware_enable = hardware_enable,
5827         .hardware_disable = hardware_disable,
5828         .cpu_has_accelerated_tpr = report_flexpriority,
5829
5830         .vcpu_create = vmx_create_vcpu,
5831         .vcpu_free = vmx_free_vcpu,
5832         .vcpu_reset = vmx_vcpu_reset,
5833
5834         .prepare_guest_switch = vmx_save_host_state,
5835         .vcpu_load = vmx_vcpu_load,
5836         .vcpu_put = vmx_vcpu_put,
5837
5838         .set_guest_debug = set_guest_debug,
5839         .get_msr = vmx_get_msr,
5840         .set_msr = vmx_set_msr,
5841         .get_segment_base = vmx_get_segment_base,
5842         .get_segment = vmx_get_segment,
5843         .set_segment = vmx_set_segment,
5844         .get_cpl = vmx_get_cpl,
5845         .get_cs_db_l_bits = vmx_get_cs_db_l_bits,
5846         .decache_cr0_guest_bits = vmx_decache_cr0_guest_bits,
5847         .decache_cr3 = vmx_decache_cr3,
5848         .decache_cr4_guest_bits = vmx_decache_cr4_guest_bits,
5849         .set_cr0 = vmx_set_cr0,
5850         .set_cr3 = vmx_set_cr3,
5851         .set_cr4 = vmx_set_cr4,
5852         .set_efer = vmx_set_efer,
5853         .get_idt = vmx_get_idt,
5854         .set_idt = vmx_set_idt,
5855         .get_gdt = vmx_get_gdt,
5856         .set_gdt = vmx_set_gdt,
5857         .set_dr7 = vmx_set_dr7,
5858         .cache_reg = vmx_cache_reg,
5859         .get_rflags = vmx_get_rflags,
5860         .set_rflags = vmx_set_rflags,
5861         .fpu_activate = vmx_fpu_activate,
5862         .fpu_deactivate = vmx_fpu_deactivate,
5863
5864         .tlb_flush = vmx_flush_tlb,
5865
5866         .run = vmx_vcpu_run,
5867         .handle_exit = vmx_handle_exit,
5868         .skip_emulated_instruction = skip_emulated_instruction,
5869         .set_interrupt_shadow = vmx_set_interrupt_shadow,
5870         .get_interrupt_shadow = vmx_get_interrupt_shadow,
5871         .patch_hypercall = vmx_patch_hypercall,
5872         .set_irq = vmx_inject_irq,
5873         .set_nmi = vmx_inject_nmi,
5874         .queue_exception = vmx_queue_exception,
5875         .cancel_injection = vmx_cancel_injection,
5876         .interrupt_allowed = vmx_interrupt_allowed,
5877         .nmi_allowed = vmx_nmi_allowed,
5878         .get_nmi_mask = vmx_get_nmi_mask,
5879         .set_nmi_mask = vmx_set_nmi_mask,
5880         .enable_nmi_window = enable_nmi_window,
5881         .enable_irq_window = enable_irq_window,
5882         .update_cr8_intercept = update_cr8_intercept,
5883
5884         .set_tss_addr = vmx_set_tss_addr,
5885         .get_tdp_level = get_ept_level,
5886         .get_mt_mask = vmx_get_mt_mask,
5887
5888         .get_exit_info = vmx_get_exit_info,
5889         .exit_reasons_str = vmx_exit_reasons_str,
5890
5891         .get_lpage_level = vmx_get_lpage_level,
5892
5893         .cpuid_update = vmx_cpuid_update,
5894
5895         .rdtscp_supported = vmx_rdtscp_supported,
5896
5897         .set_supported_cpuid = vmx_set_supported_cpuid,
5898
5899         .has_wbinvd_exit = cpu_has_vmx_wbinvd_exit,
5900
5901         .set_tsc_khz = vmx_set_tsc_khz,
5902         .write_tsc_offset = vmx_write_tsc_offset,
5903         .adjust_tsc_offset = vmx_adjust_tsc_offset,
5904         .compute_tsc_offset = vmx_compute_tsc_offset,
5905
5906         .set_tdp_cr3 = vmx_set_cr3,
5907
5908         .check_intercept = vmx_check_intercept,
5909 };
5910
5911 static int __init vmx_init(void)
5912 {
5913         int r, i;
5914
5915         rdmsrl_safe(MSR_EFER, &host_efer);
5916
5917         for (i = 0; i < NR_VMX_MSR; ++i)
5918                 kvm_define_shared_msr(i, vmx_msr_index[i]);
5919
5920         vmx_io_bitmap_a = (unsigned long *)__get_free_page(GFP_KERNEL);
5921         if (!vmx_io_bitmap_a)
5922                 return -ENOMEM;
5923
5924         vmx_io_bitmap_b = (unsigned long *)__get_free_page(GFP_KERNEL);
5925         if (!vmx_io_bitmap_b) {
5926                 r = -ENOMEM;
5927                 goto out;
5928         }
5929
5930         vmx_msr_bitmap_legacy = (unsigned long *)__get_free_page(GFP_KERNEL);
5931         if (!vmx_msr_bitmap_legacy) {
5932                 r = -ENOMEM;
5933                 goto out1;
5934         }
5935
5936         vmx_msr_bitmap_longmode = (unsigned long *)__get_free_page(GFP_KERNEL);
5937         if (!vmx_msr_bitmap_longmode) {
5938                 r = -ENOMEM;
5939                 goto out2;
5940         }
5941
5942         /*
5943          * Allow direct access to the PC debug port (it is often used for I/O
5944          * delays, but the vmexits simply slow things down).
5945          */
5946         memset(vmx_io_bitmap_a, 0xff, PAGE_SIZE);
5947         clear_bit(0x80, vmx_io_bitmap_a);
5948
5949         memset(vmx_io_bitmap_b, 0xff, PAGE_SIZE);
5950
5951         memset(vmx_msr_bitmap_legacy, 0xff, PAGE_SIZE);
5952         memset(vmx_msr_bitmap_longmode, 0xff, PAGE_SIZE);
5953
5954         set_bit(0, vmx_vpid_bitmap); /* 0 is reserved for host */
5955
5956         r = kvm_init(&vmx_x86_ops, sizeof(struct vcpu_vmx),
5957                      __alignof__(struct vcpu_vmx), THIS_MODULE);
5958         if (r)
5959                 goto out3;
5960
5961         vmx_disable_intercept_for_msr(MSR_FS_BASE, false);
5962         vmx_disable_intercept_for_msr(MSR_GS_BASE, false);
5963         vmx_disable_intercept_for_msr(MSR_KERNEL_GS_BASE, true);
5964         vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_CS, false);
5965         vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_ESP, false);
5966         vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_EIP, false);
5967
5968         if (enable_ept) {
5969                 bypass_guest_pf = 0;
5970                 kvm_mmu_set_mask_ptes(0ull, 0ull, 0ull, 0ull,
5971                                 VMX_EPT_EXECUTABLE_MASK);
5972                 kvm_enable_tdp();
5973         } else
5974                 kvm_disable_tdp();
5975
5976         if (bypass_guest_pf)
5977                 kvm_mmu_set_nonpresent_ptes(~0xffeull, 0ull);
5978
5979         return 0;
5980
5981 out3:
5982         free_page((unsigned long)vmx_msr_bitmap_longmode);
5983 out2:
5984         free_page((unsigned long)vmx_msr_bitmap_legacy);
5985 out1:
5986         free_page((unsigned long)vmx_io_bitmap_b);
5987 out:
5988         free_page((unsigned long)vmx_io_bitmap_a);
5989         return r;
5990 }
5991
5992 static void __exit vmx_exit(void)
5993 {
5994         free_page((unsigned long)vmx_msr_bitmap_legacy);
5995         free_page((unsigned long)vmx_msr_bitmap_longmode);
5996         free_page((unsigned long)vmx_io_bitmap_b);
5997         free_page((unsigned long)vmx_io_bitmap_a);
5998
5999         kvm_exit();
6000 }
6001
6002 module_init(vmx_init)
6003 module_exit(vmx_exit)