KVM: VMX: fix use after free of vmx->loaded_vmcs
[pandora-kernel.git] / arch / x86 / kvm / vmx.c
1 /*
2  * Kernel-based Virtual Machine driver for Linux
3  *
4  * This module enables machines with Intel VT-x extensions to run virtual
5  * machines without emulation or binary translation.
6  *
7  * Copyright (C) 2006 Qumranet, Inc.
8  * Copyright 2010 Red Hat, Inc. and/or its affiliates.
9  *
10  * Authors:
11  *   Avi Kivity   <avi@qumranet.com>
12  *   Yaniv Kamay  <yaniv@qumranet.com>
13  *
14  * This work is licensed under the terms of the GNU GPL, version 2.  See
15  * the COPYING file in the top-level directory.
16  *
17  */
18
19 #include "irq.h"
20 #include "mmu.h"
21 #include "cpuid.h"
22
23 #include <linux/kvm_host.h>
24 #include <linux/module.h>
25 #include <linux/kernel.h>
26 #include <linux/mm.h>
27 #include <linux/highmem.h>
28 #include <linux/sched.h>
29 #include <linux/moduleparam.h>
30 #include <linux/mod_devicetable.h>
31 #include <linux/ftrace_event.h>
32 #include <linux/slab.h>
33 #include <linux/tboot.h>
34 #include "kvm_cache_regs.h"
35 #include "x86.h"
36
37 #include <asm/io.h>
38 #include <asm/desc.h>
39 #include <asm/vmx.h>
40 #include <asm/virtext.h>
41 #include <asm/mce.h>
42 #include <asm/i387.h>
43 #include <asm/xcr.h>
44 #include <asm/perf_event.h>
45 #include <asm/kexec.h>
46
47 #include "trace.h"
48
49 #define __ex(x) __kvm_handle_fault_on_reboot(x)
50 #define __ex_clear(x, reg) \
51         ____kvm_handle_fault_on_reboot(x, "xor " reg " , " reg)
52
53 MODULE_AUTHOR("Qumranet");
54 MODULE_LICENSE("GPL");
55
56 static const struct x86_cpu_id vmx_cpu_id[] = {
57         X86_FEATURE_MATCH(X86_FEATURE_VMX),
58         {}
59 };
60 MODULE_DEVICE_TABLE(x86cpu, vmx_cpu_id);
61
62 static bool __read_mostly enable_vpid = 1;
63 module_param_named(vpid, enable_vpid, bool, 0444);
64
65 static bool __read_mostly flexpriority_enabled = 1;
66 module_param_named(flexpriority, flexpriority_enabled, bool, S_IRUGO);
67
68 static bool __read_mostly enable_ept = 1;
69 module_param_named(ept, enable_ept, bool, S_IRUGO);
70
71 static bool __read_mostly enable_unrestricted_guest = 1;
72 module_param_named(unrestricted_guest,
73                         enable_unrestricted_guest, bool, S_IRUGO);
74
75 static bool __read_mostly enable_ept_ad_bits = 1;
76 module_param_named(eptad, enable_ept_ad_bits, bool, S_IRUGO);
77
78 static bool __read_mostly emulate_invalid_guest_state = true;
79 module_param(emulate_invalid_guest_state, bool, S_IRUGO);
80
81 static bool __read_mostly vmm_exclusive = 1;
82 module_param(vmm_exclusive, bool, S_IRUGO);
83
84 static bool __read_mostly fasteoi = 1;
85 module_param(fasteoi, bool, S_IRUGO);
86
87 static bool __read_mostly enable_apicv = 1;
88 module_param(enable_apicv, bool, S_IRUGO);
89
90 static bool __read_mostly enable_shadow_vmcs = 1;
91 module_param_named(enable_shadow_vmcs, enable_shadow_vmcs, bool, S_IRUGO);
92 /*
93  * If nested=1, nested virtualization is supported, i.e., guests may use
94  * VMX and be a hypervisor for its own guests. If nested=0, guests may not
95  * use VMX instructions.
96  */
97 static bool __read_mostly nested = 0;
98 module_param(nested, bool, S_IRUGO);
99
100 #define KVM_GUEST_CR0_MASK (X86_CR0_NW | X86_CR0_CD)
101 #define KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST (X86_CR0_WP | X86_CR0_NE)
102 #define KVM_VM_CR0_ALWAYS_ON                                            \
103         (KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST | X86_CR0_PG | X86_CR0_PE)
104 #define KVM_CR4_GUEST_OWNED_BITS                                      \
105         (X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR      \
106          | X86_CR4_OSXMMEXCPT)
107
108 #define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE)
109 #define KVM_RMODE_VM_CR4_ALWAYS_ON (X86_CR4_VME | X86_CR4_PAE | X86_CR4_VMXE)
110
111 #define RMODE_GUEST_OWNED_EFLAGS_BITS (~(X86_EFLAGS_IOPL | X86_EFLAGS_VM))
112
113 /*
114  * These 2 parameters are used to config the controls for Pause-Loop Exiting:
115  * ple_gap:    upper bound on the amount of time between two successive
116  *             executions of PAUSE in a loop. Also indicate if ple enabled.
117  *             According to test, this time is usually smaller than 128 cycles.
118  * ple_window: upper bound on the amount of time a guest is allowed to execute
119  *             in a PAUSE loop. Tests indicate that most spinlocks are held for
120  *             less than 2^12 cycles
121  * Time is measured based on a counter that runs at the same rate as the TSC,
122  * refer SDM volume 3b section 21.6.13 & 22.1.3.
123  */
124 #define KVM_VMX_DEFAULT_PLE_GAP    128
125 #define KVM_VMX_DEFAULT_PLE_WINDOW 4096
126 static int ple_gap = KVM_VMX_DEFAULT_PLE_GAP;
127 module_param(ple_gap, int, S_IRUGO);
128
129 static int ple_window = KVM_VMX_DEFAULT_PLE_WINDOW;
130 module_param(ple_window, int, S_IRUGO);
131
132 extern const ulong vmx_return;
133
134 #define NR_AUTOLOAD_MSRS 8
135 #define VMCS02_POOL_SIZE 1
136
137 struct vmcs {
138         u32 revision_id;
139         u32 abort;
140         char data[0];
141 };
142
143 /*
144  * Track a VMCS that may be loaded on a certain CPU. If it is (cpu!=-1), also
145  * remember whether it was VMLAUNCHed, and maintain a linked list of all VMCSs
146  * loaded on this CPU (so we can clear them if the CPU goes down).
147  */
148 struct loaded_vmcs {
149         struct vmcs *vmcs;
150         int cpu;
151         int launched;
152         struct list_head loaded_vmcss_on_cpu_link;
153 };
154
155 struct shared_msr_entry {
156         unsigned index;
157         u64 data;
158         u64 mask;
159 };
160
161 /*
162  * struct vmcs12 describes the state that our guest hypervisor (L1) keeps for a
163  * single nested guest (L2), hence the name vmcs12. Any VMX implementation has
164  * a VMCS structure, and vmcs12 is our emulated VMX's VMCS. This structure is
165  * stored in guest memory specified by VMPTRLD, but is opaque to the guest,
166  * which must access it using VMREAD/VMWRITE/VMCLEAR instructions.
167  * More than one of these structures may exist, if L1 runs multiple L2 guests.
168  * nested_vmx_run() will use the data here to build a vmcs02: a VMCS for the
169  * underlying hardware which will be used to run L2.
170  * This structure is packed to ensure that its layout is identical across
171  * machines (necessary for live migration).
172  * If there are changes in this struct, VMCS12_REVISION must be changed.
173  */
174 typedef u64 natural_width;
175 struct __packed vmcs12 {
176         /* According to the Intel spec, a VMCS region must start with the
177          * following two fields. Then follow implementation-specific data.
178          */
179         u32 revision_id;
180         u32 abort;
181
182         u32 launch_state; /* set to 0 by VMCLEAR, to 1 by VMLAUNCH */
183         u32 padding[7]; /* room for future expansion */
184
185         u64 io_bitmap_a;
186         u64 io_bitmap_b;
187         u64 msr_bitmap;
188         u64 vm_exit_msr_store_addr;
189         u64 vm_exit_msr_load_addr;
190         u64 vm_entry_msr_load_addr;
191         u64 tsc_offset;
192         u64 virtual_apic_page_addr;
193         u64 apic_access_addr;
194         u64 ept_pointer;
195         u64 guest_physical_address;
196         u64 vmcs_link_pointer;
197         u64 guest_ia32_debugctl;
198         u64 guest_ia32_pat;
199         u64 guest_ia32_efer;
200         u64 guest_ia32_perf_global_ctrl;
201         u64 guest_pdptr0;
202         u64 guest_pdptr1;
203         u64 guest_pdptr2;
204         u64 guest_pdptr3;
205         u64 host_ia32_pat;
206         u64 host_ia32_efer;
207         u64 host_ia32_perf_global_ctrl;
208         u64 padding64[8]; /* room for future expansion */
209         /*
210          * To allow migration of L1 (complete with its L2 guests) between
211          * machines of different natural widths (32 or 64 bit), we cannot have
212          * unsigned long fields with no explict size. We use u64 (aliased
213          * natural_width) instead. Luckily, x86 is little-endian.
214          */
215         natural_width cr0_guest_host_mask;
216         natural_width cr4_guest_host_mask;
217         natural_width cr0_read_shadow;
218         natural_width cr4_read_shadow;
219         natural_width cr3_target_value0;
220         natural_width cr3_target_value1;
221         natural_width cr3_target_value2;
222         natural_width cr3_target_value3;
223         natural_width exit_qualification;
224         natural_width guest_linear_address;
225         natural_width guest_cr0;
226         natural_width guest_cr3;
227         natural_width guest_cr4;
228         natural_width guest_es_base;
229         natural_width guest_cs_base;
230         natural_width guest_ss_base;
231         natural_width guest_ds_base;
232         natural_width guest_fs_base;
233         natural_width guest_gs_base;
234         natural_width guest_ldtr_base;
235         natural_width guest_tr_base;
236         natural_width guest_gdtr_base;
237         natural_width guest_idtr_base;
238         natural_width guest_dr7;
239         natural_width guest_rsp;
240         natural_width guest_rip;
241         natural_width guest_rflags;
242         natural_width guest_pending_dbg_exceptions;
243         natural_width guest_sysenter_esp;
244         natural_width guest_sysenter_eip;
245         natural_width host_cr0;
246         natural_width host_cr3;
247         natural_width host_cr4;
248         natural_width host_fs_base;
249         natural_width host_gs_base;
250         natural_width host_tr_base;
251         natural_width host_gdtr_base;
252         natural_width host_idtr_base;
253         natural_width host_ia32_sysenter_esp;
254         natural_width host_ia32_sysenter_eip;
255         natural_width host_rsp;
256         natural_width host_rip;
257         natural_width paddingl[8]; /* room for future expansion */
258         u32 pin_based_vm_exec_control;
259         u32 cpu_based_vm_exec_control;
260         u32 exception_bitmap;
261         u32 page_fault_error_code_mask;
262         u32 page_fault_error_code_match;
263         u32 cr3_target_count;
264         u32 vm_exit_controls;
265         u32 vm_exit_msr_store_count;
266         u32 vm_exit_msr_load_count;
267         u32 vm_entry_controls;
268         u32 vm_entry_msr_load_count;
269         u32 vm_entry_intr_info_field;
270         u32 vm_entry_exception_error_code;
271         u32 vm_entry_instruction_len;
272         u32 tpr_threshold;
273         u32 secondary_vm_exec_control;
274         u32 vm_instruction_error;
275         u32 vm_exit_reason;
276         u32 vm_exit_intr_info;
277         u32 vm_exit_intr_error_code;
278         u32 idt_vectoring_info_field;
279         u32 idt_vectoring_error_code;
280         u32 vm_exit_instruction_len;
281         u32 vmx_instruction_info;
282         u32 guest_es_limit;
283         u32 guest_cs_limit;
284         u32 guest_ss_limit;
285         u32 guest_ds_limit;
286         u32 guest_fs_limit;
287         u32 guest_gs_limit;
288         u32 guest_ldtr_limit;
289         u32 guest_tr_limit;
290         u32 guest_gdtr_limit;
291         u32 guest_idtr_limit;
292         u32 guest_es_ar_bytes;
293         u32 guest_cs_ar_bytes;
294         u32 guest_ss_ar_bytes;
295         u32 guest_ds_ar_bytes;
296         u32 guest_fs_ar_bytes;
297         u32 guest_gs_ar_bytes;
298         u32 guest_ldtr_ar_bytes;
299         u32 guest_tr_ar_bytes;
300         u32 guest_interruptibility_info;
301         u32 guest_activity_state;
302         u32 guest_sysenter_cs;
303         u32 host_ia32_sysenter_cs;
304         u32 vmx_preemption_timer_value;
305         u32 padding32[7]; /* room for future expansion */
306         u16 virtual_processor_id;
307         u16 guest_es_selector;
308         u16 guest_cs_selector;
309         u16 guest_ss_selector;
310         u16 guest_ds_selector;
311         u16 guest_fs_selector;
312         u16 guest_gs_selector;
313         u16 guest_ldtr_selector;
314         u16 guest_tr_selector;
315         u16 host_es_selector;
316         u16 host_cs_selector;
317         u16 host_ss_selector;
318         u16 host_ds_selector;
319         u16 host_fs_selector;
320         u16 host_gs_selector;
321         u16 host_tr_selector;
322 };
323
324 /*
325  * VMCS12_REVISION is an arbitrary id that should be changed if the content or
326  * layout of struct vmcs12 is changed. MSR_IA32_VMX_BASIC returns this id, and
327  * VMPTRLD verifies that the VMCS region that L1 is loading contains this id.
328  */
329 #define VMCS12_REVISION 0x11e57ed0
330
331 /*
332  * VMCS12_SIZE is the number of bytes L1 should allocate for the VMXON region
333  * and any VMCS region. Although only sizeof(struct vmcs12) are used by the
334  * current implementation, 4K are reserved to avoid future complications.
335  */
336 #define VMCS12_SIZE 0x1000
337
338 /* Used to remember the last vmcs02 used for some recently used vmcs12s */
339 struct vmcs02_list {
340         struct list_head list;
341         gpa_t vmptr;
342         struct loaded_vmcs vmcs02;
343 };
344
345 /*
346  * The nested_vmx structure is part of vcpu_vmx, and holds information we need
347  * for correct emulation of VMX (i.e., nested VMX) on this vcpu.
348  */
349 struct nested_vmx {
350         /* Has the level1 guest done vmxon? */
351         bool vmxon;
352
353         /* The guest-physical address of the current VMCS L1 keeps for L2 */
354         gpa_t current_vmptr;
355         /* The host-usable pointer to the above */
356         struct page *current_vmcs12_page;
357         struct vmcs12 *current_vmcs12;
358         struct vmcs *current_shadow_vmcs;
359         /*
360          * Indicates if the shadow vmcs must be updated with the
361          * data hold by vmcs12
362          */
363         bool sync_shadow_vmcs;
364
365         /* vmcs02_list cache of VMCSs recently used to run L2 guests */
366         struct list_head vmcs02_pool;
367         int vmcs02_num;
368         u64 vmcs01_tsc_offset;
369         /* L2 must run next, and mustn't decide to exit to L1. */
370         bool nested_run_pending;
371         /*
372          * Guest pages referred to in vmcs02 with host-physical pointers, so
373          * we must keep them pinned while L2 runs.
374          */
375         struct page *apic_access_page;
376         u64 msr_ia32_feature_control;
377 };
378
379 #define POSTED_INTR_ON  0
380 /* Posted-Interrupt Descriptor */
381 struct pi_desc {
382         u32 pir[8];     /* Posted interrupt requested */
383         u32 control;    /* bit 0 of control is outstanding notification bit */
384         u32 rsvd[7];
385 } __aligned(64);
386
387 static bool pi_test_and_set_on(struct pi_desc *pi_desc)
388 {
389         return test_and_set_bit(POSTED_INTR_ON,
390                         (unsigned long *)&pi_desc->control);
391 }
392
393 static bool pi_test_and_clear_on(struct pi_desc *pi_desc)
394 {
395         return test_and_clear_bit(POSTED_INTR_ON,
396                         (unsigned long *)&pi_desc->control);
397 }
398
399 static int pi_test_and_set_pir(int vector, struct pi_desc *pi_desc)
400 {
401         return test_and_set_bit(vector, (unsigned long *)pi_desc->pir);
402 }
403
404 struct vcpu_vmx {
405         struct kvm_vcpu       vcpu;
406         unsigned long         host_rsp;
407         u8                    fail;
408         u8                    cpl;
409         bool                  nmi_known_unmasked;
410         u32                   exit_intr_info;
411         u32                   idt_vectoring_info;
412         ulong                 rflags;
413         struct shared_msr_entry *guest_msrs;
414         int                   nmsrs;
415         int                   save_nmsrs;
416         unsigned long         host_idt_base;
417 #ifdef CONFIG_X86_64
418         u64                   msr_host_kernel_gs_base;
419         u64                   msr_guest_kernel_gs_base;
420 #endif
421         u32 vm_entry_controls_shadow;
422         u32 vm_exit_controls_shadow;
423         /*
424          * loaded_vmcs points to the VMCS currently used in this vcpu. For a
425          * non-nested (L1) guest, it always points to vmcs01. For a nested
426          * guest (L2), it points to a different VMCS.
427          */
428         struct loaded_vmcs    vmcs01;
429         struct loaded_vmcs   *loaded_vmcs;
430         bool                  __launched; /* temporary, used in vmx_vcpu_run */
431         struct msr_autoload {
432                 unsigned nr;
433                 struct vmx_msr_entry guest[NR_AUTOLOAD_MSRS];
434                 struct vmx_msr_entry host[NR_AUTOLOAD_MSRS];
435         } msr_autoload;
436         struct {
437                 int           loaded;
438                 u16           fs_sel, gs_sel, ldt_sel;
439 #ifdef CONFIG_X86_64
440                 u16           ds_sel, es_sel;
441 #endif
442                 int           gs_ldt_reload_needed;
443                 int           fs_reload_needed;
444         } host_state;
445         struct {
446                 int vm86_active;
447                 ulong save_rflags;
448                 struct kvm_segment segs[8];
449         } rmode;
450         struct {
451                 u32 bitmask; /* 4 bits per segment (1 bit per field) */
452                 struct kvm_save_segment {
453                         u16 selector;
454                         unsigned long base;
455                         u32 limit;
456                         u32 ar;
457                 } seg[8];
458         } segment_cache;
459         int vpid;
460         bool emulation_required;
461
462         /* Support for vnmi-less CPUs */
463         int soft_vnmi_blocked;
464         ktime_t entry_time;
465         s64 vnmi_blocked_time;
466         u32 exit_reason;
467
468         bool rdtscp_enabled;
469
470         /* Posted interrupt descriptor */
471         struct pi_desc pi_desc;
472
473         /* Support for a guest hypervisor (nested VMX) */
474         struct nested_vmx nested;
475 };
476
477 enum segment_cache_field {
478         SEG_FIELD_SEL = 0,
479         SEG_FIELD_BASE = 1,
480         SEG_FIELD_LIMIT = 2,
481         SEG_FIELD_AR = 3,
482
483         SEG_FIELD_NR = 4
484 };
485
486 static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu)
487 {
488         return container_of(vcpu, struct vcpu_vmx, vcpu);
489 }
490
491 #define VMCS12_OFFSET(x) offsetof(struct vmcs12, x)
492 #define FIELD(number, name)     [number] = VMCS12_OFFSET(name)
493 #define FIELD64(number, name)   [number] = VMCS12_OFFSET(name), \
494                                 [number##_HIGH] = VMCS12_OFFSET(name)+4
495
496
497 static const unsigned long shadow_read_only_fields[] = {
498         /*
499          * We do NOT shadow fields that are modified when L0
500          * traps and emulates any vmx instruction (e.g. VMPTRLD,
501          * VMXON...) executed by L1.
502          * For example, VM_INSTRUCTION_ERROR is read
503          * by L1 if a vmx instruction fails (part of the error path).
504          * Note the code assumes this logic. If for some reason
505          * we start shadowing these fields then we need to
506          * force a shadow sync when L0 emulates vmx instructions
507          * (e.g. force a sync if VM_INSTRUCTION_ERROR is modified
508          * by nested_vmx_failValid)
509          */
510         VM_EXIT_REASON,
511         VM_EXIT_INTR_INFO,
512         VM_EXIT_INSTRUCTION_LEN,
513         IDT_VECTORING_INFO_FIELD,
514         IDT_VECTORING_ERROR_CODE,
515         VM_EXIT_INTR_ERROR_CODE,
516         EXIT_QUALIFICATION,
517         GUEST_LINEAR_ADDRESS,
518         GUEST_PHYSICAL_ADDRESS
519 };
520 static const int max_shadow_read_only_fields =
521         ARRAY_SIZE(shadow_read_only_fields);
522
523 static const unsigned long shadow_read_write_fields[] = {
524         GUEST_RIP,
525         GUEST_RSP,
526         GUEST_CR0,
527         GUEST_CR3,
528         GUEST_CR4,
529         GUEST_INTERRUPTIBILITY_INFO,
530         GUEST_RFLAGS,
531         GUEST_CS_SELECTOR,
532         GUEST_CS_AR_BYTES,
533         GUEST_CS_LIMIT,
534         GUEST_CS_BASE,
535         GUEST_ES_BASE,
536         CR0_GUEST_HOST_MASK,
537         CR0_READ_SHADOW,
538         CR4_READ_SHADOW,
539         TSC_OFFSET,
540         EXCEPTION_BITMAP,
541         CPU_BASED_VM_EXEC_CONTROL,
542         VM_ENTRY_EXCEPTION_ERROR_CODE,
543         VM_ENTRY_INTR_INFO_FIELD,
544         VM_ENTRY_INSTRUCTION_LEN,
545         VM_ENTRY_EXCEPTION_ERROR_CODE,
546         HOST_FS_BASE,
547         HOST_GS_BASE,
548         HOST_FS_SELECTOR,
549         HOST_GS_SELECTOR
550 };
551 static const int max_shadow_read_write_fields =
552         ARRAY_SIZE(shadow_read_write_fields);
553
554 static const unsigned short vmcs_field_to_offset_table[] = {
555         FIELD(VIRTUAL_PROCESSOR_ID, virtual_processor_id),
556         FIELD(GUEST_ES_SELECTOR, guest_es_selector),
557         FIELD(GUEST_CS_SELECTOR, guest_cs_selector),
558         FIELD(GUEST_SS_SELECTOR, guest_ss_selector),
559         FIELD(GUEST_DS_SELECTOR, guest_ds_selector),
560         FIELD(GUEST_FS_SELECTOR, guest_fs_selector),
561         FIELD(GUEST_GS_SELECTOR, guest_gs_selector),
562         FIELD(GUEST_LDTR_SELECTOR, guest_ldtr_selector),
563         FIELD(GUEST_TR_SELECTOR, guest_tr_selector),
564         FIELD(HOST_ES_SELECTOR, host_es_selector),
565         FIELD(HOST_CS_SELECTOR, host_cs_selector),
566         FIELD(HOST_SS_SELECTOR, host_ss_selector),
567         FIELD(HOST_DS_SELECTOR, host_ds_selector),
568         FIELD(HOST_FS_SELECTOR, host_fs_selector),
569         FIELD(HOST_GS_SELECTOR, host_gs_selector),
570         FIELD(HOST_TR_SELECTOR, host_tr_selector),
571         FIELD64(IO_BITMAP_A, io_bitmap_a),
572         FIELD64(IO_BITMAP_B, io_bitmap_b),
573         FIELD64(MSR_BITMAP, msr_bitmap),
574         FIELD64(VM_EXIT_MSR_STORE_ADDR, vm_exit_msr_store_addr),
575         FIELD64(VM_EXIT_MSR_LOAD_ADDR, vm_exit_msr_load_addr),
576         FIELD64(VM_ENTRY_MSR_LOAD_ADDR, vm_entry_msr_load_addr),
577         FIELD64(TSC_OFFSET, tsc_offset),
578         FIELD64(VIRTUAL_APIC_PAGE_ADDR, virtual_apic_page_addr),
579         FIELD64(APIC_ACCESS_ADDR, apic_access_addr),
580         FIELD64(EPT_POINTER, ept_pointer),
581         FIELD64(GUEST_PHYSICAL_ADDRESS, guest_physical_address),
582         FIELD64(VMCS_LINK_POINTER, vmcs_link_pointer),
583         FIELD64(GUEST_IA32_DEBUGCTL, guest_ia32_debugctl),
584         FIELD64(GUEST_IA32_PAT, guest_ia32_pat),
585         FIELD64(GUEST_IA32_EFER, guest_ia32_efer),
586         FIELD64(GUEST_IA32_PERF_GLOBAL_CTRL, guest_ia32_perf_global_ctrl),
587         FIELD64(GUEST_PDPTR0, guest_pdptr0),
588         FIELD64(GUEST_PDPTR1, guest_pdptr1),
589         FIELD64(GUEST_PDPTR2, guest_pdptr2),
590         FIELD64(GUEST_PDPTR3, guest_pdptr3),
591         FIELD64(HOST_IA32_PAT, host_ia32_pat),
592         FIELD64(HOST_IA32_EFER, host_ia32_efer),
593         FIELD64(HOST_IA32_PERF_GLOBAL_CTRL, host_ia32_perf_global_ctrl),
594         FIELD(PIN_BASED_VM_EXEC_CONTROL, pin_based_vm_exec_control),
595         FIELD(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control),
596         FIELD(EXCEPTION_BITMAP, exception_bitmap),
597         FIELD(PAGE_FAULT_ERROR_CODE_MASK, page_fault_error_code_mask),
598         FIELD(PAGE_FAULT_ERROR_CODE_MATCH, page_fault_error_code_match),
599         FIELD(CR3_TARGET_COUNT, cr3_target_count),
600         FIELD(VM_EXIT_CONTROLS, vm_exit_controls),
601         FIELD(VM_EXIT_MSR_STORE_COUNT, vm_exit_msr_store_count),
602         FIELD(VM_EXIT_MSR_LOAD_COUNT, vm_exit_msr_load_count),
603         FIELD(VM_ENTRY_CONTROLS, vm_entry_controls),
604         FIELD(VM_ENTRY_MSR_LOAD_COUNT, vm_entry_msr_load_count),
605         FIELD(VM_ENTRY_INTR_INFO_FIELD, vm_entry_intr_info_field),
606         FIELD(VM_ENTRY_EXCEPTION_ERROR_CODE, vm_entry_exception_error_code),
607         FIELD(VM_ENTRY_INSTRUCTION_LEN, vm_entry_instruction_len),
608         FIELD(TPR_THRESHOLD, tpr_threshold),
609         FIELD(SECONDARY_VM_EXEC_CONTROL, secondary_vm_exec_control),
610         FIELD(VM_INSTRUCTION_ERROR, vm_instruction_error),
611         FIELD(VM_EXIT_REASON, vm_exit_reason),
612         FIELD(VM_EXIT_INTR_INFO, vm_exit_intr_info),
613         FIELD(VM_EXIT_INTR_ERROR_CODE, vm_exit_intr_error_code),
614         FIELD(IDT_VECTORING_INFO_FIELD, idt_vectoring_info_field),
615         FIELD(IDT_VECTORING_ERROR_CODE, idt_vectoring_error_code),
616         FIELD(VM_EXIT_INSTRUCTION_LEN, vm_exit_instruction_len),
617         FIELD(VMX_INSTRUCTION_INFO, vmx_instruction_info),
618         FIELD(GUEST_ES_LIMIT, guest_es_limit),
619         FIELD(GUEST_CS_LIMIT, guest_cs_limit),
620         FIELD(GUEST_SS_LIMIT, guest_ss_limit),
621         FIELD(GUEST_DS_LIMIT, guest_ds_limit),
622         FIELD(GUEST_FS_LIMIT, guest_fs_limit),
623         FIELD(GUEST_GS_LIMIT, guest_gs_limit),
624         FIELD(GUEST_LDTR_LIMIT, guest_ldtr_limit),
625         FIELD(GUEST_TR_LIMIT, guest_tr_limit),
626         FIELD(GUEST_GDTR_LIMIT, guest_gdtr_limit),
627         FIELD(GUEST_IDTR_LIMIT, guest_idtr_limit),
628         FIELD(GUEST_ES_AR_BYTES, guest_es_ar_bytes),
629         FIELD(GUEST_CS_AR_BYTES, guest_cs_ar_bytes),
630         FIELD(GUEST_SS_AR_BYTES, guest_ss_ar_bytes),
631         FIELD(GUEST_DS_AR_BYTES, guest_ds_ar_bytes),
632         FIELD(GUEST_FS_AR_BYTES, guest_fs_ar_bytes),
633         FIELD(GUEST_GS_AR_BYTES, guest_gs_ar_bytes),
634         FIELD(GUEST_LDTR_AR_BYTES, guest_ldtr_ar_bytes),
635         FIELD(GUEST_TR_AR_BYTES, guest_tr_ar_bytes),
636         FIELD(GUEST_INTERRUPTIBILITY_INFO, guest_interruptibility_info),
637         FIELD(GUEST_ACTIVITY_STATE, guest_activity_state),
638         FIELD(GUEST_SYSENTER_CS, guest_sysenter_cs),
639         FIELD(HOST_IA32_SYSENTER_CS, host_ia32_sysenter_cs),
640         FIELD(VMX_PREEMPTION_TIMER_VALUE, vmx_preemption_timer_value),
641         FIELD(CR0_GUEST_HOST_MASK, cr0_guest_host_mask),
642         FIELD(CR4_GUEST_HOST_MASK, cr4_guest_host_mask),
643         FIELD(CR0_READ_SHADOW, cr0_read_shadow),
644         FIELD(CR4_READ_SHADOW, cr4_read_shadow),
645         FIELD(CR3_TARGET_VALUE0, cr3_target_value0),
646         FIELD(CR3_TARGET_VALUE1, cr3_target_value1),
647         FIELD(CR3_TARGET_VALUE2, cr3_target_value2),
648         FIELD(CR3_TARGET_VALUE3, cr3_target_value3),
649         FIELD(EXIT_QUALIFICATION, exit_qualification),
650         FIELD(GUEST_LINEAR_ADDRESS, guest_linear_address),
651         FIELD(GUEST_CR0, guest_cr0),
652         FIELD(GUEST_CR3, guest_cr3),
653         FIELD(GUEST_CR4, guest_cr4),
654         FIELD(GUEST_ES_BASE, guest_es_base),
655         FIELD(GUEST_CS_BASE, guest_cs_base),
656         FIELD(GUEST_SS_BASE, guest_ss_base),
657         FIELD(GUEST_DS_BASE, guest_ds_base),
658         FIELD(GUEST_FS_BASE, guest_fs_base),
659         FIELD(GUEST_GS_BASE, guest_gs_base),
660         FIELD(GUEST_LDTR_BASE, guest_ldtr_base),
661         FIELD(GUEST_TR_BASE, guest_tr_base),
662         FIELD(GUEST_GDTR_BASE, guest_gdtr_base),
663         FIELD(GUEST_IDTR_BASE, guest_idtr_base),
664         FIELD(GUEST_DR7, guest_dr7),
665         FIELD(GUEST_RSP, guest_rsp),
666         FIELD(GUEST_RIP, guest_rip),
667         FIELD(GUEST_RFLAGS, guest_rflags),
668         FIELD(GUEST_PENDING_DBG_EXCEPTIONS, guest_pending_dbg_exceptions),
669         FIELD(GUEST_SYSENTER_ESP, guest_sysenter_esp),
670         FIELD(GUEST_SYSENTER_EIP, guest_sysenter_eip),
671         FIELD(HOST_CR0, host_cr0),
672         FIELD(HOST_CR3, host_cr3),
673         FIELD(HOST_CR4, host_cr4),
674         FIELD(HOST_FS_BASE, host_fs_base),
675         FIELD(HOST_GS_BASE, host_gs_base),
676         FIELD(HOST_TR_BASE, host_tr_base),
677         FIELD(HOST_GDTR_BASE, host_gdtr_base),
678         FIELD(HOST_IDTR_BASE, host_idtr_base),
679         FIELD(HOST_IA32_SYSENTER_ESP, host_ia32_sysenter_esp),
680         FIELD(HOST_IA32_SYSENTER_EIP, host_ia32_sysenter_eip),
681         FIELD(HOST_RSP, host_rsp),
682         FIELD(HOST_RIP, host_rip),
683 };
684 static const int max_vmcs_field = ARRAY_SIZE(vmcs_field_to_offset_table);
685
686 static inline short vmcs_field_to_offset(unsigned long field)
687 {
688         if (field >= max_vmcs_field || vmcs_field_to_offset_table[field] == 0)
689                 return -1;
690         return vmcs_field_to_offset_table[field];
691 }
692
693 static inline struct vmcs12 *get_vmcs12(struct kvm_vcpu *vcpu)
694 {
695         return to_vmx(vcpu)->nested.current_vmcs12;
696 }
697
698 static struct page *nested_get_page(struct kvm_vcpu *vcpu, gpa_t addr)
699 {
700         struct page *page = gfn_to_page(vcpu->kvm, addr >> PAGE_SHIFT);
701         if (is_error_page(page))
702                 return NULL;
703
704         return page;
705 }
706
707 static void nested_release_page(struct page *page)
708 {
709         kvm_release_page_dirty(page);
710 }
711
712 static void nested_release_page_clean(struct page *page)
713 {
714         kvm_release_page_clean(page);
715 }
716
717 static unsigned long nested_ept_get_cr3(struct kvm_vcpu *vcpu);
718 static u64 construct_eptp(unsigned long root_hpa);
719 static void kvm_cpu_vmxon(u64 addr);
720 static void kvm_cpu_vmxoff(void);
721 static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr);
722 static void vmx_set_segment(struct kvm_vcpu *vcpu,
723                             struct kvm_segment *var, int seg);
724 static void vmx_get_segment(struct kvm_vcpu *vcpu,
725                             struct kvm_segment *var, int seg);
726 static bool guest_state_valid(struct kvm_vcpu *vcpu);
727 static u32 vmx_segment_access_rights(struct kvm_segment *var);
728 static void vmx_sync_pir_to_irr_dummy(struct kvm_vcpu *vcpu);
729 static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx);
730 static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx);
731
732 static DEFINE_PER_CPU(struct vmcs *, vmxarea);
733 static DEFINE_PER_CPU(struct vmcs *, current_vmcs);
734 /*
735  * We maintain a per-CPU linked-list of VMCS loaded on that CPU. This is needed
736  * when a CPU is brought down, and we need to VMCLEAR all VMCSs loaded on it.
737  */
738 static DEFINE_PER_CPU(struct list_head, loaded_vmcss_on_cpu);
739 static DEFINE_PER_CPU(struct desc_ptr, host_gdt);
740
741 static unsigned long *vmx_io_bitmap_a;
742 static unsigned long *vmx_io_bitmap_b;
743 static unsigned long *vmx_msr_bitmap_legacy;
744 static unsigned long *vmx_msr_bitmap_longmode;
745 static unsigned long *vmx_msr_bitmap_legacy_x2apic;
746 static unsigned long *vmx_msr_bitmap_longmode_x2apic;
747 static unsigned long *vmx_vmread_bitmap;
748 static unsigned long *vmx_vmwrite_bitmap;
749
750 static bool cpu_has_load_ia32_efer;
751 static bool cpu_has_load_perf_global_ctrl;
752
753 static DECLARE_BITMAP(vmx_vpid_bitmap, VMX_NR_VPIDS);
754 static DEFINE_SPINLOCK(vmx_vpid_lock);
755
756 static struct vmcs_config {
757         int size;
758         int order;
759         u32 revision_id;
760         u32 pin_based_exec_ctrl;
761         u32 cpu_based_exec_ctrl;
762         u32 cpu_based_2nd_exec_ctrl;
763         u32 vmexit_ctrl;
764         u32 vmentry_ctrl;
765 } vmcs_config;
766
767 static struct vmx_capability {
768         u32 ept;
769         u32 vpid;
770 } vmx_capability;
771
772 #define VMX_SEGMENT_FIELD(seg)                                  \
773         [VCPU_SREG_##seg] = {                                   \
774                 .selector = GUEST_##seg##_SELECTOR,             \
775                 .base = GUEST_##seg##_BASE,                     \
776                 .limit = GUEST_##seg##_LIMIT,                   \
777                 .ar_bytes = GUEST_##seg##_AR_BYTES,             \
778         }
779
780 static const struct kvm_vmx_segment_field {
781         unsigned selector;
782         unsigned base;
783         unsigned limit;
784         unsigned ar_bytes;
785 } kvm_vmx_segment_fields[] = {
786         VMX_SEGMENT_FIELD(CS),
787         VMX_SEGMENT_FIELD(DS),
788         VMX_SEGMENT_FIELD(ES),
789         VMX_SEGMENT_FIELD(FS),
790         VMX_SEGMENT_FIELD(GS),
791         VMX_SEGMENT_FIELD(SS),
792         VMX_SEGMENT_FIELD(TR),
793         VMX_SEGMENT_FIELD(LDTR),
794 };
795
796 static u64 host_efer;
797
798 static void ept_save_pdptrs(struct kvm_vcpu *vcpu);
799
800 /*
801  * Keep MSR_STAR at the end, as setup_msrs() will try to optimize it
802  * away by decrementing the array size.
803  */
804 static const u32 vmx_msr_index[] = {
805 #ifdef CONFIG_X86_64
806         MSR_SYSCALL_MASK, MSR_LSTAR, MSR_CSTAR,
807 #endif
808         MSR_EFER, MSR_TSC_AUX, MSR_STAR,
809 };
810 #define NR_VMX_MSR ARRAY_SIZE(vmx_msr_index)
811
812 static inline bool is_page_fault(u32 intr_info)
813 {
814         return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
815                              INTR_INFO_VALID_MASK)) ==
816                 (INTR_TYPE_HARD_EXCEPTION | PF_VECTOR | INTR_INFO_VALID_MASK);
817 }
818
819 static inline bool is_no_device(u32 intr_info)
820 {
821         return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
822                              INTR_INFO_VALID_MASK)) ==
823                 (INTR_TYPE_HARD_EXCEPTION | NM_VECTOR | INTR_INFO_VALID_MASK);
824 }
825
826 static inline bool is_invalid_opcode(u32 intr_info)
827 {
828         return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
829                              INTR_INFO_VALID_MASK)) ==
830                 (INTR_TYPE_HARD_EXCEPTION | UD_VECTOR | INTR_INFO_VALID_MASK);
831 }
832
833 static inline bool is_external_interrupt(u32 intr_info)
834 {
835         return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK))
836                 == (INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK);
837 }
838
839 static inline bool is_machine_check(u32 intr_info)
840 {
841         return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
842                              INTR_INFO_VALID_MASK)) ==
843                 (INTR_TYPE_HARD_EXCEPTION | MC_VECTOR | INTR_INFO_VALID_MASK);
844 }
845
846 static inline bool cpu_has_vmx_msr_bitmap(void)
847 {
848         return vmcs_config.cpu_based_exec_ctrl & CPU_BASED_USE_MSR_BITMAPS;
849 }
850
851 static inline bool cpu_has_vmx_tpr_shadow(void)
852 {
853         return vmcs_config.cpu_based_exec_ctrl & CPU_BASED_TPR_SHADOW;
854 }
855
856 static inline bool vm_need_tpr_shadow(struct kvm *kvm)
857 {
858         return (cpu_has_vmx_tpr_shadow()) && (irqchip_in_kernel(kvm));
859 }
860
861 static inline bool cpu_has_secondary_exec_ctrls(void)
862 {
863         return vmcs_config.cpu_based_exec_ctrl &
864                 CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
865 }
866
867 static inline bool cpu_has_vmx_virtualize_apic_accesses(void)
868 {
869         return vmcs_config.cpu_based_2nd_exec_ctrl &
870                 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
871 }
872
873 static inline bool cpu_has_vmx_virtualize_x2apic_mode(void)
874 {
875         return vmcs_config.cpu_based_2nd_exec_ctrl &
876                 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE;
877 }
878
879 static inline bool cpu_has_vmx_apic_register_virt(void)
880 {
881         return vmcs_config.cpu_based_2nd_exec_ctrl &
882                 SECONDARY_EXEC_APIC_REGISTER_VIRT;
883 }
884
885 static inline bool cpu_has_vmx_virtual_intr_delivery(void)
886 {
887         return vmcs_config.cpu_based_2nd_exec_ctrl &
888                 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY;
889 }
890
891 static inline bool cpu_has_vmx_posted_intr(void)
892 {
893         return vmcs_config.pin_based_exec_ctrl & PIN_BASED_POSTED_INTR;
894 }
895
896 static inline bool cpu_has_vmx_apicv(void)
897 {
898         return cpu_has_vmx_apic_register_virt() &&
899                 cpu_has_vmx_virtual_intr_delivery() &&
900                 cpu_has_vmx_posted_intr();
901 }
902
903 static inline bool cpu_has_vmx_flexpriority(void)
904 {
905         return cpu_has_vmx_tpr_shadow() &&
906                 cpu_has_vmx_virtualize_apic_accesses();
907 }
908
909 static inline bool cpu_has_vmx_ept_execute_only(void)
910 {
911         return vmx_capability.ept & VMX_EPT_EXECUTE_ONLY_BIT;
912 }
913
914 static inline bool cpu_has_vmx_eptp_uncacheable(void)
915 {
916         return vmx_capability.ept & VMX_EPTP_UC_BIT;
917 }
918
919 static inline bool cpu_has_vmx_eptp_writeback(void)
920 {
921         return vmx_capability.ept & VMX_EPTP_WB_BIT;
922 }
923
924 static inline bool cpu_has_vmx_ept_2m_page(void)
925 {
926         return vmx_capability.ept & VMX_EPT_2MB_PAGE_BIT;
927 }
928
929 static inline bool cpu_has_vmx_ept_1g_page(void)
930 {
931         return vmx_capability.ept & VMX_EPT_1GB_PAGE_BIT;
932 }
933
934 static inline bool cpu_has_vmx_ept_4levels(void)
935 {
936         return vmx_capability.ept & VMX_EPT_PAGE_WALK_4_BIT;
937 }
938
939 static inline bool cpu_has_vmx_ept_ad_bits(void)
940 {
941         return vmx_capability.ept & VMX_EPT_AD_BIT;
942 }
943
944 static inline bool cpu_has_vmx_invept_context(void)
945 {
946         return vmx_capability.ept & VMX_EPT_EXTENT_CONTEXT_BIT;
947 }
948
949 static inline bool cpu_has_vmx_invept_global(void)
950 {
951         return vmx_capability.ept & VMX_EPT_EXTENT_GLOBAL_BIT;
952 }
953
954 static inline bool cpu_has_vmx_invvpid_single(void)
955 {
956         return vmx_capability.vpid & VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT;
957 }
958
959 static inline bool cpu_has_vmx_invvpid_global(void)
960 {
961         return vmx_capability.vpid & VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT;
962 }
963
964 static inline bool cpu_has_vmx_ept(void)
965 {
966         return vmcs_config.cpu_based_2nd_exec_ctrl &
967                 SECONDARY_EXEC_ENABLE_EPT;
968 }
969
970 static inline bool cpu_has_vmx_unrestricted_guest(void)
971 {
972         return vmcs_config.cpu_based_2nd_exec_ctrl &
973                 SECONDARY_EXEC_UNRESTRICTED_GUEST;
974 }
975
976 static inline bool cpu_has_vmx_ple(void)
977 {
978         return vmcs_config.cpu_based_2nd_exec_ctrl &
979                 SECONDARY_EXEC_PAUSE_LOOP_EXITING;
980 }
981
982 static inline bool vm_need_virtualize_apic_accesses(struct kvm *kvm)
983 {
984         return flexpriority_enabled && irqchip_in_kernel(kvm);
985 }
986
987 static inline bool cpu_has_vmx_vpid(void)
988 {
989         return vmcs_config.cpu_based_2nd_exec_ctrl &
990                 SECONDARY_EXEC_ENABLE_VPID;
991 }
992
993 static inline bool cpu_has_vmx_rdtscp(void)
994 {
995         return vmcs_config.cpu_based_2nd_exec_ctrl &
996                 SECONDARY_EXEC_RDTSCP;
997 }
998
999 static inline bool cpu_has_vmx_invpcid(void)
1000 {
1001         return vmcs_config.cpu_based_2nd_exec_ctrl &
1002                 SECONDARY_EXEC_ENABLE_INVPCID;
1003 }
1004
1005 static inline bool cpu_has_virtual_nmis(void)
1006 {
1007         return vmcs_config.pin_based_exec_ctrl & PIN_BASED_VIRTUAL_NMIS;
1008 }
1009
1010 static inline bool cpu_has_vmx_wbinvd_exit(void)
1011 {
1012         return vmcs_config.cpu_based_2nd_exec_ctrl &
1013                 SECONDARY_EXEC_WBINVD_EXITING;
1014 }
1015
1016 static inline bool cpu_has_vmx_shadow_vmcs(void)
1017 {
1018         u64 vmx_msr;
1019         rdmsrl(MSR_IA32_VMX_MISC, vmx_msr);
1020         /* check if the cpu supports writing r/o exit information fields */
1021         if (!(vmx_msr & MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS))
1022                 return false;
1023
1024         return vmcs_config.cpu_based_2nd_exec_ctrl &
1025                 SECONDARY_EXEC_SHADOW_VMCS;
1026 }
1027
1028 static inline bool report_flexpriority(void)
1029 {
1030         return flexpriority_enabled;
1031 }
1032
1033 static inline bool nested_cpu_has(struct vmcs12 *vmcs12, u32 bit)
1034 {
1035         return vmcs12->cpu_based_vm_exec_control & bit;
1036 }
1037
1038 static inline bool nested_cpu_has2(struct vmcs12 *vmcs12, u32 bit)
1039 {
1040         return (vmcs12->cpu_based_vm_exec_control &
1041                         CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) &&
1042                 (vmcs12->secondary_vm_exec_control & bit);
1043 }
1044
1045 static inline bool nested_cpu_has_virtual_nmis(struct vmcs12 *vmcs12)
1046 {
1047         return vmcs12->pin_based_vm_exec_control & PIN_BASED_VIRTUAL_NMIS;
1048 }
1049
1050 static inline int nested_cpu_has_ept(struct vmcs12 *vmcs12)
1051 {
1052         return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_EPT);
1053 }
1054
1055 static inline bool is_exception(u32 intr_info)
1056 {
1057         return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK))
1058                 == (INTR_TYPE_HARD_EXCEPTION | INTR_INFO_VALID_MASK);
1059 }
1060
1061 static void nested_vmx_vmexit(struct kvm_vcpu *vcpu);
1062 static void nested_vmx_entry_failure(struct kvm_vcpu *vcpu,
1063                         struct vmcs12 *vmcs12,
1064                         u32 reason, unsigned long qualification);
1065
1066 static int __find_msr_index(struct vcpu_vmx *vmx, u32 msr)
1067 {
1068         int i;
1069
1070         for (i = 0; i < vmx->nmsrs; ++i)
1071                 if (vmx_msr_index[vmx->guest_msrs[i].index] == msr)
1072                         return i;
1073         return -1;
1074 }
1075
1076 static inline void __invvpid(int ext, u16 vpid, gva_t gva)
1077 {
1078     struct {
1079         u64 vpid : 16;
1080         u64 rsvd : 48;
1081         u64 gva;
1082     } operand = { vpid, 0, gva };
1083
1084     asm volatile (__ex(ASM_VMX_INVVPID)
1085                   /* CF==1 or ZF==1 --> rc = -1 */
1086                   "; ja 1f ; ud2 ; 1:"
1087                   : : "a"(&operand), "c"(ext) : "cc", "memory");
1088 }
1089
1090 static inline void __invept(int ext, u64 eptp, gpa_t gpa)
1091 {
1092         struct {
1093                 u64 eptp, gpa;
1094         } operand = {eptp, gpa};
1095
1096         asm volatile (__ex(ASM_VMX_INVEPT)
1097                         /* CF==1 or ZF==1 --> rc = -1 */
1098                         "; ja 1f ; ud2 ; 1:\n"
1099                         : : "a" (&operand), "c" (ext) : "cc", "memory");
1100 }
1101
1102 static struct shared_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr)
1103 {
1104         int i;
1105
1106         i = __find_msr_index(vmx, msr);
1107         if (i >= 0)
1108                 return &vmx->guest_msrs[i];
1109         return NULL;
1110 }
1111
1112 static void vmcs_clear(struct vmcs *vmcs)
1113 {
1114         u64 phys_addr = __pa(vmcs);
1115         u8 error;
1116
1117         asm volatile (__ex(ASM_VMX_VMCLEAR_RAX) "; setna %0"
1118                       : "=qm"(error) : "a"(&phys_addr), "m"(phys_addr)
1119                       : "cc", "memory");
1120         if (error)
1121                 printk(KERN_ERR "kvm: vmclear fail: %p/%llx\n",
1122                        vmcs, phys_addr);
1123 }
1124
1125 static inline void loaded_vmcs_init(struct loaded_vmcs *loaded_vmcs)
1126 {
1127         vmcs_clear(loaded_vmcs->vmcs);
1128         loaded_vmcs->cpu = -1;
1129         loaded_vmcs->launched = 0;
1130 }
1131
1132 static void vmcs_load(struct vmcs *vmcs)
1133 {
1134         u64 phys_addr = __pa(vmcs);
1135         u8 error;
1136
1137         asm volatile (__ex(ASM_VMX_VMPTRLD_RAX) "; setna %0"
1138                         : "=qm"(error) : "a"(&phys_addr), "m"(phys_addr)
1139                         : "cc", "memory");
1140         if (error)
1141                 printk(KERN_ERR "kvm: vmptrld %p/%llx failed\n",
1142                        vmcs, phys_addr);
1143 }
1144
1145 #ifdef CONFIG_KEXEC
1146 /*
1147  * This bitmap is used to indicate whether the vmclear
1148  * operation is enabled on all cpus. All disabled by
1149  * default.
1150  */
1151 static cpumask_t crash_vmclear_enabled_bitmap = CPU_MASK_NONE;
1152
1153 static inline void crash_enable_local_vmclear(int cpu)
1154 {
1155         cpumask_set_cpu(cpu, &crash_vmclear_enabled_bitmap);
1156 }
1157
1158 static inline void crash_disable_local_vmclear(int cpu)
1159 {
1160         cpumask_clear_cpu(cpu, &crash_vmclear_enabled_bitmap);
1161 }
1162
1163 static inline int crash_local_vmclear_enabled(int cpu)
1164 {
1165         return cpumask_test_cpu(cpu, &crash_vmclear_enabled_bitmap);
1166 }
1167
1168 static void crash_vmclear_local_loaded_vmcss(void)
1169 {
1170         int cpu = raw_smp_processor_id();
1171         struct loaded_vmcs *v;
1172
1173         if (!crash_local_vmclear_enabled(cpu))
1174                 return;
1175
1176         list_for_each_entry(v, &per_cpu(loaded_vmcss_on_cpu, cpu),
1177                             loaded_vmcss_on_cpu_link)
1178                 vmcs_clear(v->vmcs);
1179 }
1180 #else
1181 static inline void crash_enable_local_vmclear(int cpu) { }
1182 static inline void crash_disable_local_vmclear(int cpu) { }
1183 #endif /* CONFIG_KEXEC */
1184
1185 static void __loaded_vmcs_clear(void *arg)
1186 {
1187         struct loaded_vmcs *loaded_vmcs = arg;
1188         int cpu = raw_smp_processor_id();
1189
1190         if (loaded_vmcs->cpu != cpu)
1191                 return; /* vcpu migration can race with cpu offline */
1192         if (per_cpu(current_vmcs, cpu) == loaded_vmcs->vmcs)
1193                 per_cpu(current_vmcs, cpu) = NULL;
1194         crash_disable_local_vmclear(cpu);
1195         list_del(&loaded_vmcs->loaded_vmcss_on_cpu_link);
1196
1197         /*
1198          * we should ensure updating loaded_vmcs->loaded_vmcss_on_cpu_link
1199          * is before setting loaded_vmcs->vcpu to -1 which is done in
1200          * loaded_vmcs_init. Otherwise, other cpu can see vcpu = -1 fist
1201          * then adds the vmcs into percpu list before it is deleted.
1202          */
1203         smp_wmb();
1204
1205         loaded_vmcs_init(loaded_vmcs);
1206         crash_enable_local_vmclear(cpu);
1207 }
1208
1209 static void loaded_vmcs_clear(struct loaded_vmcs *loaded_vmcs)
1210 {
1211         int cpu = loaded_vmcs->cpu;
1212
1213         if (cpu != -1)
1214                 smp_call_function_single(cpu,
1215                          __loaded_vmcs_clear, loaded_vmcs, 1);
1216 }
1217
1218 static inline void vpid_sync_vcpu_single(struct vcpu_vmx *vmx)
1219 {
1220         if (vmx->vpid == 0)
1221                 return;
1222
1223         if (cpu_has_vmx_invvpid_single())
1224                 __invvpid(VMX_VPID_EXTENT_SINGLE_CONTEXT, vmx->vpid, 0);
1225 }
1226
1227 static inline void vpid_sync_vcpu_global(void)
1228 {
1229         if (cpu_has_vmx_invvpid_global())
1230                 __invvpid(VMX_VPID_EXTENT_ALL_CONTEXT, 0, 0);
1231 }
1232
1233 static inline void vpid_sync_context(struct vcpu_vmx *vmx)
1234 {
1235         if (cpu_has_vmx_invvpid_single())
1236                 vpid_sync_vcpu_single(vmx);
1237         else
1238                 vpid_sync_vcpu_global();
1239 }
1240
1241 static inline void ept_sync_global(void)
1242 {
1243         if (cpu_has_vmx_invept_global())
1244                 __invept(VMX_EPT_EXTENT_GLOBAL, 0, 0);
1245 }
1246
1247 static inline void ept_sync_context(u64 eptp)
1248 {
1249         if (enable_ept) {
1250                 if (cpu_has_vmx_invept_context())
1251                         __invept(VMX_EPT_EXTENT_CONTEXT, eptp, 0);
1252                 else
1253                         ept_sync_global();
1254         }
1255 }
1256
1257 static __always_inline unsigned long vmcs_readl(unsigned long field)
1258 {
1259         unsigned long value;
1260
1261         asm volatile (__ex_clear(ASM_VMX_VMREAD_RDX_RAX, "%0")
1262                       : "=a"(value) : "d"(field) : "cc");
1263         return value;
1264 }
1265
1266 static __always_inline u16 vmcs_read16(unsigned long field)
1267 {
1268         return vmcs_readl(field);
1269 }
1270
1271 static __always_inline u32 vmcs_read32(unsigned long field)
1272 {
1273         return vmcs_readl(field);
1274 }
1275
1276 static __always_inline u64 vmcs_read64(unsigned long field)
1277 {
1278 #ifdef CONFIG_X86_64
1279         return vmcs_readl(field);
1280 #else
1281         return vmcs_readl(field) | ((u64)vmcs_readl(field+1) << 32);
1282 #endif
1283 }
1284
1285 static noinline void vmwrite_error(unsigned long field, unsigned long value)
1286 {
1287         printk(KERN_ERR "vmwrite error: reg %lx value %lx (err %d)\n",
1288                field, value, vmcs_read32(VM_INSTRUCTION_ERROR));
1289         dump_stack();
1290 }
1291
1292 static void vmcs_writel(unsigned long field, unsigned long value)
1293 {
1294         u8 error;
1295
1296         asm volatile (__ex(ASM_VMX_VMWRITE_RAX_RDX) "; setna %0"
1297                        : "=q"(error) : "a"(value), "d"(field) : "cc");
1298         if (unlikely(error))
1299                 vmwrite_error(field, value);
1300 }
1301
1302 static void vmcs_write16(unsigned long field, u16 value)
1303 {
1304         vmcs_writel(field, value);
1305 }
1306
1307 static void vmcs_write32(unsigned long field, u32 value)
1308 {
1309         vmcs_writel(field, value);
1310 }
1311
1312 static void vmcs_write64(unsigned long field, u64 value)
1313 {
1314         vmcs_writel(field, value);
1315 #ifndef CONFIG_X86_64
1316         asm volatile ("");
1317         vmcs_writel(field+1, value >> 32);
1318 #endif
1319 }
1320
1321 static void vmcs_clear_bits(unsigned long field, u32 mask)
1322 {
1323         vmcs_writel(field, vmcs_readl(field) & ~mask);
1324 }
1325
1326 static void vmcs_set_bits(unsigned long field, u32 mask)
1327 {
1328         vmcs_writel(field, vmcs_readl(field) | mask);
1329 }
1330
1331 static inline void vm_entry_controls_init(struct vcpu_vmx *vmx, u32 val)
1332 {
1333         vmcs_write32(VM_ENTRY_CONTROLS, val);
1334         vmx->vm_entry_controls_shadow = val;
1335 }
1336
1337 static inline void vm_entry_controls_set(struct vcpu_vmx *vmx, u32 val)
1338 {
1339         if (vmx->vm_entry_controls_shadow != val)
1340                 vm_entry_controls_init(vmx, val);
1341 }
1342
1343 static inline u32 vm_entry_controls_get(struct vcpu_vmx *vmx)
1344 {
1345         return vmx->vm_entry_controls_shadow;
1346 }
1347
1348
1349 static inline void vm_entry_controls_setbit(struct vcpu_vmx *vmx, u32 val)
1350 {
1351         vm_entry_controls_set(vmx, vm_entry_controls_get(vmx) | val);
1352 }
1353
1354 static inline void vm_entry_controls_clearbit(struct vcpu_vmx *vmx, u32 val)
1355 {
1356         vm_entry_controls_set(vmx, vm_entry_controls_get(vmx) & ~val);
1357 }
1358
1359 static inline void vm_exit_controls_init(struct vcpu_vmx *vmx, u32 val)
1360 {
1361         vmcs_write32(VM_EXIT_CONTROLS, val);
1362         vmx->vm_exit_controls_shadow = val;
1363 }
1364
1365 static inline void vm_exit_controls_set(struct vcpu_vmx *vmx, u32 val)
1366 {
1367         if (vmx->vm_exit_controls_shadow != val)
1368                 vm_exit_controls_init(vmx, val);
1369 }
1370
1371 static inline u32 vm_exit_controls_get(struct vcpu_vmx *vmx)
1372 {
1373         return vmx->vm_exit_controls_shadow;
1374 }
1375
1376
1377 static inline void vm_exit_controls_setbit(struct vcpu_vmx *vmx, u32 val)
1378 {
1379         vm_exit_controls_set(vmx, vm_exit_controls_get(vmx) | val);
1380 }
1381
1382 static inline void vm_exit_controls_clearbit(struct vcpu_vmx *vmx, u32 val)
1383 {
1384         vm_exit_controls_set(vmx, vm_exit_controls_get(vmx) & ~val);
1385 }
1386
1387 static void vmx_segment_cache_clear(struct vcpu_vmx *vmx)
1388 {
1389         vmx->segment_cache.bitmask = 0;
1390 }
1391
1392 static bool vmx_segment_cache_test_set(struct vcpu_vmx *vmx, unsigned seg,
1393                                        unsigned field)
1394 {
1395         bool ret;
1396         u32 mask = 1 << (seg * SEG_FIELD_NR + field);
1397
1398         if (!(vmx->vcpu.arch.regs_avail & (1 << VCPU_EXREG_SEGMENTS))) {
1399                 vmx->vcpu.arch.regs_avail |= (1 << VCPU_EXREG_SEGMENTS);
1400                 vmx->segment_cache.bitmask = 0;
1401         }
1402         ret = vmx->segment_cache.bitmask & mask;
1403         vmx->segment_cache.bitmask |= mask;
1404         return ret;
1405 }
1406
1407 static u16 vmx_read_guest_seg_selector(struct vcpu_vmx *vmx, unsigned seg)
1408 {
1409         u16 *p = &vmx->segment_cache.seg[seg].selector;
1410
1411         if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_SEL))
1412                 *p = vmcs_read16(kvm_vmx_segment_fields[seg].selector);
1413         return *p;
1414 }
1415
1416 static ulong vmx_read_guest_seg_base(struct vcpu_vmx *vmx, unsigned seg)
1417 {
1418         ulong *p = &vmx->segment_cache.seg[seg].base;
1419
1420         if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_BASE))
1421                 *p = vmcs_readl(kvm_vmx_segment_fields[seg].base);
1422         return *p;
1423 }
1424
1425 static u32 vmx_read_guest_seg_limit(struct vcpu_vmx *vmx, unsigned seg)
1426 {
1427         u32 *p = &vmx->segment_cache.seg[seg].limit;
1428
1429         if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_LIMIT))
1430                 *p = vmcs_read32(kvm_vmx_segment_fields[seg].limit);
1431         return *p;
1432 }
1433
1434 static u32 vmx_read_guest_seg_ar(struct vcpu_vmx *vmx, unsigned seg)
1435 {
1436         u32 *p = &vmx->segment_cache.seg[seg].ar;
1437
1438         if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_AR))
1439                 *p = vmcs_read32(kvm_vmx_segment_fields[seg].ar_bytes);
1440         return *p;
1441 }
1442
1443 static void update_exception_bitmap(struct kvm_vcpu *vcpu)
1444 {
1445         u32 eb;
1446
1447         eb = (1u << PF_VECTOR) | (1u << UD_VECTOR) | (1u << MC_VECTOR) |
1448              (1u << NM_VECTOR) | (1u << DB_VECTOR);
1449         if ((vcpu->guest_debug &
1450              (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP)) ==
1451             (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP))
1452                 eb |= 1u << BP_VECTOR;
1453         if (to_vmx(vcpu)->rmode.vm86_active)
1454                 eb = ~0;
1455         if (enable_ept)
1456                 eb &= ~(1u << PF_VECTOR); /* bypass_guest_pf = 0 */
1457         if (vcpu->fpu_active)
1458                 eb &= ~(1u << NM_VECTOR);
1459
1460         /* When we are running a nested L2 guest and L1 specified for it a
1461          * certain exception bitmap, we must trap the same exceptions and pass
1462          * them to L1. When running L2, we will only handle the exceptions
1463          * specified above if L1 did not want them.
1464          */
1465         if (is_guest_mode(vcpu))
1466                 eb |= get_vmcs12(vcpu)->exception_bitmap;
1467
1468         vmcs_write32(EXCEPTION_BITMAP, eb);
1469 }
1470
1471 static void clear_atomic_switch_msr_special(struct vcpu_vmx *vmx,
1472                 unsigned long entry, unsigned long exit)
1473 {
1474         vm_entry_controls_clearbit(vmx, entry);
1475         vm_exit_controls_clearbit(vmx, exit);
1476 }
1477
1478 static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr)
1479 {
1480         unsigned i;
1481         struct msr_autoload *m = &vmx->msr_autoload;
1482
1483         switch (msr) {
1484         case MSR_EFER:
1485                 if (cpu_has_load_ia32_efer) {
1486                         clear_atomic_switch_msr_special(vmx,
1487                                         VM_ENTRY_LOAD_IA32_EFER,
1488                                         VM_EXIT_LOAD_IA32_EFER);
1489                         return;
1490                 }
1491                 break;
1492         case MSR_CORE_PERF_GLOBAL_CTRL:
1493                 if (cpu_has_load_perf_global_ctrl) {
1494                         clear_atomic_switch_msr_special(vmx,
1495                                         VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL,
1496                                         VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL);
1497                         return;
1498                 }
1499                 break;
1500         }
1501
1502         for (i = 0; i < m->nr; ++i)
1503                 if (m->guest[i].index == msr)
1504                         break;
1505
1506         if (i == m->nr)
1507                 return;
1508         --m->nr;
1509         m->guest[i] = m->guest[m->nr];
1510         m->host[i] = m->host[m->nr];
1511         vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->nr);
1512         vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr);
1513 }
1514
1515 static void add_atomic_switch_msr_special(struct vcpu_vmx *vmx,
1516                 unsigned long entry, unsigned long exit,
1517                 unsigned long guest_val_vmcs, unsigned long host_val_vmcs,
1518                 u64 guest_val, u64 host_val)
1519 {
1520         vmcs_write64(guest_val_vmcs, guest_val);
1521         vmcs_write64(host_val_vmcs, host_val);
1522         vm_entry_controls_setbit(vmx, entry);
1523         vm_exit_controls_setbit(vmx, exit);
1524 }
1525
1526 static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
1527                                   u64 guest_val, u64 host_val)
1528 {
1529         unsigned i;
1530         struct msr_autoload *m = &vmx->msr_autoload;
1531
1532         switch (msr) {
1533         case MSR_EFER:
1534                 if (cpu_has_load_ia32_efer) {
1535                         add_atomic_switch_msr_special(vmx,
1536                                         VM_ENTRY_LOAD_IA32_EFER,
1537                                         VM_EXIT_LOAD_IA32_EFER,
1538                                         GUEST_IA32_EFER,
1539                                         HOST_IA32_EFER,
1540                                         guest_val, host_val);
1541                         return;
1542                 }
1543                 break;
1544         case MSR_CORE_PERF_GLOBAL_CTRL:
1545                 if (cpu_has_load_perf_global_ctrl) {
1546                         add_atomic_switch_msr_special(vmx,
1547                                         VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL,
1548                                         VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL,
1549                                         GUEST_IA32_PERF_GLOBAL_CTRL,
1550                                         HOST_IA32_PERF_GLOBAL_CTRL,
1551                                         guest_val, host_val);
1552                         return;
1553                 }
1554                 break;
1555         }
1556
1557         for (i = 0; i < m->nr; ++i)
1558                 if (m->guest[i].index == msr)
1559                         break;
1560
1561         if (i == NR_AUTOLOAD_MSRS) {
1562                 printk_once(KERN_WARNING "Not enough msr switch entries. "
1563                                 "Can't add msr %x\n", msr);
1564                 return;
1565         } else if (i == m->nr) {
1566                 ++m->nr;
1567                 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->nr);
1568                 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr);
1569         }
1570
1571         m->guest[i].index = msr;
1572         m->guest[i].value = guest_val;
1573         m->host[i].index = msr;
1574         m->host[i].value = host_val;
1575 }
1576
1577 static void reload_tss(void)
1578 {
1579         /*
1580          * VT restores TR but not its size.  Useless.
1581          */
1582         struct desc_ptr *gdt = &__get_cpu_var(host_gdt);
1583         struct desc_struct *descs;
1584
1585         descs = (void *)gdt->address;
1586         descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
1587         load_TR_desc();
1588 }
1589
1590 static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset)
1591 {
1592         u64 guest_efer;
1593         u64 ignore_bits;
1594
1595         guest_efer = vmx->vcpu.arch.efer;
1596
1597         /*
1598          * NX is emulated; LMA and LME handled by hardware; SCE meaningless
1599          * outside long mode
1600          */
1601         ignore_bits = EFER_NX | EFER_SCE;
1602 #ifdef CONFIG_X86_64
1603         ignore_bits |= EFER_LMA | EFER_LME;
1604         /* SCE is meaningful only in long mode on Intel */
1605         if (guest_efer & EFER_LMA)
1606                 ignore_bits &= ~(u64)EFER_SCE;
1607 #endif
1608         guest_efer &= ~ignore_bits;
1609         guest_efer |= host_efer & ignore_bits;
1610         vmx->guest_msrs[efer_offset].data = guest_efer;
1611         vmx->guest_msrs[efer_offset].mask = ~ignore_bits;
1612
1613         clear_atomic_switch_msr(vmx, MSR_EFER);
1614         /* On ept, can't emulate nx, and must switch nx atomically */
1615         if (enable_ept && ((vmx->vcpu.arch.efer ^ host_efer) & EFER_NX)) {
1616                 guest_efer = vmx->vcpu.arch.efer;
1617                 if (!(guest_efer & EFER_LMA))
1618                         guest_efer &= ~EFER_LME;
1619                 add_atomic_switch_msr(vmx, MSR_EFER, guest_efer, host_efer);
1620                 return false;
1621         }
1622
1623         return true;
1624 }
1625
1626 static unsigned long segment_base(u16 selector)
1627 {
1628         struct desc_ptr *gdt = &__get_cpu_var(host_gdt);
1629         struct desc_struct *d;
1630         unsigned long table_base;
1631         unsigned long v;
1632
1633         if (!(selector & ~3))
1634                 return 0;
1635
1636         table_base = gdt->address;
1637
1638         if (selector & 4) {           /* from ldt */
1639                 u16 ldt_selector = kvm_read_ldt();
1640
1641                 if (!(ldt_selector & ~3))
1642                         return 0;
1643
1644                 table_base = segment_base(ldt_selector);
1645         }
1646         d = (struct desc_struct *)(table_base + (selector & ~7));
1647         v = get_desc_base(d);
1648 #ifdef CONFIG_X86_64
1649        if (d->s == 0 && (d->type == 2 || d->type == 9 || d->type == 11))
1650                v |= ((unsigned long)((struct ldttss_desc64 *)d)->base3) << 32;
1651 #endif
1652         return v;
1653 }
1654
1655 static inline unsigned long kvm_read_tr_base(void)
1656 {
1657         u16 tr;
1658         asm("str %0" : "=g"(tr));
1659         return segment_base(tr);
1660 }
1661
1662 static void vmx_save_host_state(struct kvm_vcpu *vcpu)
1663 {
1664         struct vcpu_vmx *vmx = to_vmx(vcpu);
1665         int i;
1666
1667         if (vmx->host_state.loaded)
1668                 return;
1669
1670         vmx->host_state.loaded = 1;
1671         /*
1672          * Set host fs and gs selectors.  Unfortunately, 22.2.3 does not
1673          * allow segment selectors with cpl > 0 or ti == 1.
1674          */
1675         vmx->host_state.ldt_sel = kvm_read_ldt();
1676         vmx->host_state.gs_ldt_reload_needed = vmx->host_state.ldt_sel;
1677         savesegment(fs, vmx->host_state.fs_sel);
1678         if (!(vmx->host_state.fs_sel & 7)) {
1679                 vmcs_write16(HOST_FS_SELECTOR, vmx->host_state.fs_sel);
1680                 vmx->host_state.fs_reload_needed = 0;
1681         } else {
1682                 vmcs_write16(HOST_FS_SELECTOR, 0);
1683                 vmx->host_state.fs_reload_needed = 1;
1684         }
1685         savesegment(gs, vmx->host_state.gs_sel);
1686         if (!(vmx->host_state.gs_sel & 7))
1687                 vmcs_write16(HOST_GS_SELECTOR, vmx->host_state.gs_sel);
1688         else {
1689                 vmcs_write16(HOST_GS_SELECTOR, 0);
1690                 vmx->host_state.gs_ldt_reload_needed = 1;
1691         }
1692
1693 #ifdef CONFIG_X86_64
1694         savesegment(ds, vmx->host_state.ds_sel);
1695         savesegment(es, vmx->host_state.es_sel);
1696 #endif
1697
1698 #ifdef CONFIG_X86_64
1699         vmcs_writel(HOST_FS_BASE, read_msr(MSR_FS_BASE));
1700         vmcs_writel(HOST_GS_BASE, read_msr(MSR_GS_BASE));
1701 #else
1702         vmcs_writel(HOST_FS_BASE, segment_base(vmx->host_state.fs_sel));
1703         vmcs_writel(HOST_GS_BASE, segment_base(vmx->host_state.gs_sel));
1704 #endif
1705
1706 #ifdef CONFIG_X86_64
1707         rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base);
1708         if (is_long_mode(&vmx->vcpu))
1709                 wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
1710 #endif
1711         for (i = 0; i < vmx->save_nmsrs; ++i)
1712                 kvm_set_shared_msr(vmx->guest_msrs[i].index,
1713                                    vmx->guest_msrs[i].data,
1714                                    vmx->guest_msrs[i].mask);
1715 }
1716
1717 static void __vmx_load_host_state(struct vcpu_vmx *vmx)
1718 {
1719         if (!vmx->host_state.loaded)
1720                 return;
1721
1722         ++vmx->vcpu.stat.host_state_reload;
1723         vmx->host_state.loaded = 0;
1724 #ifdef CONFIG_X86_64
1725         if (is_long_mode(&vmx->vcpu))
1726                 rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
1727 #endif
1728         if (vmx->host_state.gs_ldt_reload_needed) {
1729                 kvm_load_ldt(vmx->host_state.ldt_sel);
1730 #ifdef CONFIG_X86_64
1731                 load_gs_index(vmx->host_state.gs_sel);
1732 #else
1733                 loadsegment(gs, vmx->host_state.gs_sel);
1734 #endif
1735         }
1736         if (vmx->host_state.fs_reload_needed)
1737                 loadsegment(fs, vmx->host_state.fs_sel);
1738 #ifdef CONFIG_X86_64
1739         if (unlikely(vmx->host_state.ds_sel | vmx->host_state.es_sel)) {
1740                 loadsegment(ds, vmx->host_state.ds_sel);
1741                 loadsegment(es, vmx->host_state.es_sel);
1742         }
1743 #endif
1744         reload_tss();
1745 #ifdef CONFIG_X86_64
1746         wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base);
1747 #endif
1748         /*
1749          * If the FPU is not active (through the host task or
1750          * the guest vcpu), then restore the cr0.TS bit.
1751          */
1752         if (!user_has_fpu() && !vmx->vcpu.guest_fpu_loaded)
1753                 stts();
1754         load_gdt(&__get_cpu_var(host_gdt));
1755 }
1756
1757 static void vmx_load_host_state(struct vcpu_vmx *vmx)
1758 {
1759         preempt_disable();
1760         __vmx_load_host_state(vmx);
1761         preempt_enable();
1762 }
1763
1764 /*
1765  * Switches to specified vcpu, until a matching vcpu_put(), but assumes
1766  * vcpu mutex is already taken.
1767  */
1768 static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1769 {
1770         struct vcpu_vmx *vmx = to_vmx(vcpu);
1771         u64 phys_addr = __pa(per_cpu(vmxarea, cpu));
1772
1773         if (!vmm_exclusive)
1774                 kvm_cpu_vmxon(phys_addr);
1775         else if (vmx->loaded_vmcs->cpu != cpu)
1776                 loaded_vmcs_clear(vmx->loaded_vmcs);
1777
1778         if (per_cpu(current_vmcs, cpu) != vmx->loaded_vmcs->vmcs) {
1779                 per_cpu(current_vmcs, cpu) = vmx->loaded_vmcs->vmcs;
1780                 vmcs_load(vmx->loaded_vmcs->vmcs);
1781         }
1782
1783         if (vmx->loaded_vmcs->cpu != cpu) {
1784                 struct desc_ptr *gdt = &__get_cpu_var(host_gdt);
1785                 unsigned long sysenter_esp;
1786
1787                 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
1788                 local_irq_disable();
1789                 crash_disable_local_vmclear(cpu);
1790
1791                 /*
1792                  * Read loaded_vmcs->cpu should be before fetching
1793                  * loaded_vmcs->loaded_vmcss_on_cpu_link.
1794                  * See the comments in __loaded_vmcs_clear().
1795                  */
1796                 smp_rmb();
1797
1798                 list_add(&vmx->loaded_vmcs->loaded_vmcss_on_cpu_link,
1799                          &per_cpu(loaded_vmcss_on_cpu, cpu));
1800                 crash_enable_local_vmclear(cpu);
1801                 local_irq_enable();
1802
1803                 /*
1804                  * Linux uses per-cpu TSS and GDT, so set these when switching
1805                  * processors.
1806                  */
1807                 vmcs_writel(HOST_TR_BASE, kvm_read_tr_base()); /* 22.2.4 */
1808                 vmcs_writel(HOST_GDTR_BASE, gdt->address);   /* 22.2.4 */
1809
1810                 rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
1811                 vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
1812                 vmx->loaded_vmcs->cpu = cpu;
1813         }
1814 }
1815
1816 static void vmx_vcpu_put(struct kvm_vcpu *vcpu)
1817 {
1818         __vmx_load_host_state(to_vmx(vcpu));
1819         if (!vmm_exclusive) {
1820                 __loaded_vmcs_clear(to_vmx(vcpu)->loaded_vmcs);
1821                 vcpu->cpu = -1;
1822                 kvm_cpu_vmxoff();
1823         }
1824 }
1825
1826 static void vmx_fpu_activate(struct kvm_vcpu *vcpu)
1827 {
1828         ulong cr0;
1829
1830         if (vcpu->fpu_active)
1831                 return;
1832         vcpu->fpu_active = 1;
1833         cr0 = vmcs_readl(GUEST_CR0);
1834         cr0 &= ~(X86_CR0_TS | X86_CR0_MP);
1835         cr0 |= kvm_read_cr0_bits(vcpu, X86_CR0_TS | X86_CR0_MP);
1836         vmcs_writel(GUEST_CR0, cr0);
1837         update_exception_bitmap(vcpu);
1838         vcpu->arch.cr0_guest_owned_bits = X86_CR0_TS;
1839         if (is_guest_mode(vcpu))
1840                 vcpu->arch.cr0_guest_owned_bits &=
1841                         ~get_vmcs12(vcpu)->cr0_guest_host_mask;
1842         vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits);
1843 }
1844
1845 static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu);
1846
1847 /*
1848  * Return the cr0 value that a nested guest would read. This is a combination
1849  * of the real cr0 used to run the guest (guest_cr0), and the bits shadowed by
1850  * its hypervisor (cr0_read_shadow).
1851  */
1852 static inline unsigned long nested_read_cr0(struct vmcs12 *fields)
1853 {
1854         return (fields->guest_cr0 & ~fields->cr0_guest_host_mask) |
1855                 (fields->cr0_read_shadow & fields->cr0_guest_host_mask);
1856 }
1857 static inline unsigned long nested_read_cr4(struct vmcs12 *fields)
1858 {
1859         return (fields->guest_cr4 & ~fields->cr4_guest_host_mask) |
1860                 (fields->cr4_read_shadow & fields->cr4_guest_host_mask);
1861 }
1862
1863 static void vmx_fpu_deactivate(struct kvm_vcpu *vcpu)
1864 {
1865         /* Note that there is no vcpu->fpu_active = 0 here. The caller must
1866          * set this *before* calling this function.
1867          */
1868         vmx_decache_cr0_guest_bits(vcpu);
1869         vmcs_set_bits(GUEST_CR0, X86_CR0_TS | X86_CR0_MP);
1870         update_exception_bitmap(vcpu);
1871         vcpu->arch.cr0_guest_owned_bits = 0;
1872         vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits);
1873         if (is_guest_mode(vcpu)) {
1874                 /*
1875                  * L1's specified read shadow might not contain the TS bit,
1876                  * so now that we turned on shadowing of this bit, we need to
1877                  * set this bit of the shadow. Like in nested_vmx_run we need
1878                  * nested_read_cr0(vmcs12), but vmcs12->guest_cr0 is not yet
1879                  * up-to-date here because we just decached cr0.TS (and we'll
1880                  * only update vmcs12->guest_cr0 on nested exit).
1881                  */
1882                 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
1883                 vmcs12->guest_cr0 = (vmcs12->guest_cr0 & ~X86_CR0_TS) |
1884                         (vcpu->arch.cr0 & X86_CR0_TS);
1885                 vmcs_writel(CR0_READ_SHADOW, nested_read_cr0(vmcs12));
1886         } else
1887                 vmcs_writel(CR0_READ_SHADOW, vcpu->arch.cr0);
1888 }
1889
1890 static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu)
1891 {
1892         unsigned long rflags, save_rflags;
1893
1894         if (!test_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail)) {
1895                 __set_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail);
1896                 rflags = vmcs_readl(GUEST_RFLAGS);
1897                 if (to_vmx(vcpu)->rmode.vm86_active) {
1898                         rflags &= RMODE_GUEST_OWNED_EFLAGS_BITS;
1899                         save_rflags = to_vmx(vcpu)->rmode.save_rflags;
1900                         rflags |= save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS;
1901                 }
1902                 to_vmx(vcpu)->rflags = rflags;
1903         }
1904         return to_vmx(vcpu)->rflags;
1905 }
1906
1907 static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
1908 {
1909         __set_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail);
1910         to_vmx(vcpu)->rflags = rflags;
1911         if (to_vmx(vcpu)->rmode.vm86_active) {
1912                 to_vmx(vcpu)->rmode.save_rflags = rflags;
1913                 rflags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM;
1914         }
1915         vmcs_writel(GUEST_RFLAGS, rflags);
1916 }
1917
1918 static u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
1919 {
1920         u32 interruptibility = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
1921         int ret = 0;
1922
1923         if (interruptibility & GUEST_INTR_STATE_STI)
1924                 ret |= KVM_X86_SHADOW_INT_STI;
1925         if (interruptibility & GUEST_INTR_STATE_MOV_SS)
1926                 ret |= KVM_X86_SHADOW_INT_MOV_SS;
1927
1928         return ret & mask;
1929 }
1930
1931 static void vmx_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
1932 {
1933         u32 interruptibility_old = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
1934         u32 interruptibility = interruptibility_old;
1935
1936         interruptibility &= ~(GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS);
1937
1938         if (mask & KVM_X86_SHADOW_INT_MOV_SS)
1939                 interruptibility |= GUEST_INTR_STATE_MOV_SS;
1940         else if (mask & KVM_X86_SHADOW_INT_STI)
1941                 interruptibility |= GUEST_INTR_STATE_STI;
1942
1943         if ((interruptibility != interruptibility_old))
1944                 vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, interruptibility);
1945 }
1946
1947 static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
1948 {
1949         unsigned long rip;
1950
1951         rip = kvm_rip_read(vcpu);
1952         rip += vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
1953         kvm_rip_write(vcpu, rip);
1954
1955         /* skipping an emulated instruction also counts */
1956         vmx_set_interrupt_shadow(vcpu, 0);
1957 }
1958
1959 /*
1960  * KVM wants to inject page-faults which it got to the guest. This function
1961  * checks whether in a nested guest, we need to inject them to L1 or L2.
1962  */
1963 static int nested_vmx_check_exception(struct kvm_vcpu *vcpu, unsigned nr)
1964 {
1965         struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
1966
1967         if (!(vmcs12->exception_bitmap & (1u << nr)))
1968                 return 0;
1969
1970         nested_vmx_vmexit(vcpu);
1971         return 1;
1972 }
1973
1974 static void vmx_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
1975                                 bool has_error_code, u32 error_code,
1976                                 bool reinject)
1977 {
1978         struct vcpu_vmx *vmx = to_vmx(vcpu);
1979         u32 intr_info = nr | INTR_INFO_VALID_MASK;
1980
1981         if (!reinject && is_guest_mode(vcpu) &&
1982             nested_vmx_check_exception(vcpu, nr))
1983                 return;
1984
1985         if (has_error_code) {
1986                 vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code);
1987                 intr_info |= INTR_INFO_DELIVER_CODE_MASK;
1988         }
1989
1990         if (vmx->rmode.vm86_active) {
1991                 int inc_eip = 0;
1992                 if (kvm_exception_is_soft(nr))
1993                         inc_eip = vcpu->arch.event_exit_inst_len;
1994                 if (kvm_inject_realmode_interrupt(vcpu, nr, inc_eip) != EMULATE_DONE)
1995                         kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
1996                 return;
1997         }
1998
1999         if (kvm_exception_is_soft(nr)) {
2000                 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
2001                              vmx->vcpu.arch.event_exit_inst_len);
2002                 intr_info |= INTR_TYPE_SOFT_EXCEPTION;
2003         } else
2004                 intr_info |= INTR_TYPE_HARD_EXCEPTION;
2005
2006         vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr_info);
2007 }
2008
2009 static bool vmx_rdtscp_supported(void)
2010 {
2011         return cpu_has_vmx_rdtscp();
2012 }
2013
2014 static bool vmx_invpcid_supported(void)
2015 {
2016         return cpu_has_vmx_invpcid() && enable_ept;
2017 }
2018
2019 /*
2020  * Swap MSR entry in host/guest MSR entry array.
2021  */
2022 static void move_msr_up(struct vcpu_vmx *vmx, int from, int to)
2023 {
2024         struct shared_msr_entry tmp;
2025
2026         tmp = vmx->guest_msrs[to];
2027         vmx->guest_msrs[to] = vmx->guest_msrs[from];
2028         vmx->guest_msrs[from] = tmp;
2029 }
2030
2031 static void vmx_set_msr_bitmap(struct kvm_vcpu *vcpu)
2032 {
2033         unsigned long *msr_bitmap;
2034
2035         if (irqchip_in_kernel(vcpu->kvm) && apic_x2apic_mode(vcpu->arch.apic)) {
2036                 if (is_long_mode(vcpu))
2037                         msr_bitmap = vmx_msr_bitmap_longmode_x2apic;
2038                 else
2039                         msr_bitmap = vmx_msr_bitmap_legacy_x2apic;
2040         } else {
2041                 if (is_long_mode(vcpu))
2042                         msr_bitmap = vmx_msr_bitmap_longmode;
2043                 else
2044                         msr_bitmap = vmx_msr_bitmap_legacy;
2045         }
2046
2047         vmcs_write64(MSR_BITMAP, __pa(msr_bitmap));
2048 }
2049
2050 /*
2051  * Set up the vmcs to automatically save and restore system
2052  * msrs.  Don't touch the 64-bit msrs if the guest is in legacy
2053  * mode, as fiddling with msrs is very expensive.
2054  */
2055 static void setup_msrs(struct vcpu_vmx *vmx)
2056 {
2057         int save_nmsrs, index;
2058
2059         save_nmsrs = 0;
2060 #ifdef CONFIG_X86_64
2061         if (is_long_mode(&vmx->vcpu)) {
2062                 index = __find_msr_index(vmx, MSR_SYSCALL_MASK);
2063                 if (index >= 0)
2064                         move_msr_up(vmx, index, save_nmsrs++);
2065                 index = __find_msr_index(vmx, MSR_LSTAR);
2066                 if (index >= 0)
2067                         move_msr_up(vmx, index, save_nmsrs++);
2068                 index = __find_msr_index(vmx, MSR_CSTAR);
2069                 if (index >= 0)
2070                         move_msr_up(vmx, index, save_nmsrs++);
2071                 index = __find_msr_index(vmx, MSR_TSC_AUX);
2072                 if (index >= 0 && vmx->rdtscp_enabled)
2073                         move_msr_up(vmx, index, save_nmsrs++);
2074                 /*
2075                  * MSR_STAR is only needed on long mode guests, and only
2076                  * if efer.sce is enabled.
2077                  */
2078                 index = __find_msr_index(vmx, MSR_STAR);
2079                 if ((index >= 0) && (vmx->vcpu.arch.efer & EFER_SCE))
2080                         move_msr_up(vmx, index, save_nmsrs++);
2081         }
2082 #endif
2083         index = __find_msr_index(vmx, MSR_EFER);
2084         if (index >= 0 && update_transition_efer(vmx, index))
2085                 move_msr_up(vmx, index, save_nmsrs++);
2086
2087         vmx->save_nmsrs = save_nmsrs;
2088
2089         if (cpu_has_vmx_msr_bitmap())
2090                 vmx_set_msr_bitmap(&vmx->vcpu);
2091 }
2092
2093 /*
2094  * reads and returns guest's timestamp counter "register"
2095  * guest_tsc = host_tsc + tsc_offset    -- 21.3
2096  */
2097 static u64 guest_read_tsc(void)
2098 {
2099         u64 host_tsc, tsc_offset;
2100
2101         rdtscll(host_tsc);
2102         tsc_offset = vmcs_read64(TSC_OFFSET);
2103         return host_tsc + tsc_offset;
2104 }
2105
2106 /*
2107  * Like guest_read_tsc, but always returns L1's notion of the timestamp
2108  * counter, even if a nested guest (L2) is currently running.
2109  */
2110 u64 vmx_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc)
2111 {
2112         u64 tsc_offset;
2113
2114         tsc_offset = is_guest_mode(vcpu) ?
2115                 to_vmx(vcpu)->nested.vmcs01_tsc_offset :
2116                 vmcs_read64(TSC_OFFSET);
2117         return host_tsc + tsc_offset;
2118 }
2119
2120 /*
2121  * Engage any workarounds for mis-matched TSC rates.  Currently limited to
2122  * software catchup for faster rates on slower CPUs.
2123  */
2124 static void vmx_set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale)
2125 {
2126         if (!scale)
2127                 return;
2128
2129         if (user_tsc_khz > tsc_khz) {
2130                 vcpu->arch.tsc_catchup = 1;
2131                 vcpu->arch.tsc_always_catchup = 1;
2132         } else
2133                 WARN(1, "user requested TSC rate below hardware speed\n");
2134 }
2135
2136 static u64 vmx_read_tsc_offset(struct kvm_vcpu *vcpu)
2137 {
2138         return vmcs_read64(TSC_OFFSET);
2139 }
2140
2141 /*
2142  * writes 'offset' into guest's timestamp counter offset register
2143  */
2144 static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
2145 {
2146         if (is_guest_mode(vcpu)) {
2147                 /*
2148                  * We're here if L1 chose not to trap WRMSR to TSC. According
2149                  * to the spec, this should set L1's TSC; The offset that L1
2150                  * set for L2 remains unchanged, and still needs to be added
2151                  * to the newly set TSC to get L2's TSC.
2152                  */
2153                 struct vmcs12 *vmcs12;
2154                 to_vmx(vcpu)->nested.vmcs01_tsc_offset = offset;
2155                 /* recalculate vmcs02.TSC_OFFSET: */
2156                 vmcs12 = get_vmcs12(vcpu);
2157                 vmcs_write64(TSC_OFFSET, offset +
2158                         (nested_cpu_has(vmcs12, CPU_BASED_USE_TSC_OFFSETING) ?
2159                          vmcs12->tsc_offset : 0));
2160         } else {
2161                 trace_kvm_write_tsc_offset(vcpu->vcpu_id,
2162                                            vmcs_read64(TSC_OFFSET), offset);
2163                 vmcs_write64(TSC_OFFSET, offset);
2164         }
2165 }
2166
2167 static void vmx_adjust_tsc_offset(struct kvm_vcpu *vcpu, s64 adjustment, bool host)
2168 {
2169         u64 offset = vmcs_read64(TSC_OFFSET);
2170
2171         vmcs_write64(TSC_OFFSET, offset + adjustment);
2172         if (is_guest_mode(vcpu)) {
2173                 /* Even when running L2, the adjustment needs to apply to L1 */
2174                 to_vmx(vcpu)->nested.vmcs01_tsc_offset += adjustment;
2175         } else
2176                 trace_kvm_write_tsc_offset(vcpu->vcpu_id, offset,
2177                                            offset + adjustment);
2178 }
2179
2180 static u64 vmx_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc)
2181 {
2182         return target_tsc - native_read_tsc();
2183 }
2184
2185 static bool guest_cpuid_has_vmx(struct kvm_vcpu *vcpu)
2186 {
2187         struct kvm_cpuid_entry2 *best = kvm_find_cpuid_entry(vcpu, 1, 0);
2188         return best && (best->ecx & (1 << (X86_FEATURE_VMX & 31)));
2189 }
2190
2191 /*
2192  * nested_vmx_allowed() checks whether a guest should be allowed to use VMX
2193  * instructions and MSRs (i.e., nested VMX). Nested VMX is disabled for
2194  * all guests if the "nested" module option is off, and can also be disabled
2195  * for a single guest by disabling its VMX cpuid bit.
2196  */
2197 static inline bool nested_vmx_allowed(struct kvm_vcpu *vcpu)
2198 {
2199         return nested && guest_cpuid_has_vmx(vcpu);
2200 }
2201
2202 /*
2203  * nested_vmx_setup_ctls_msrs() sets up variables containing the values to be
2204  * returned for the various VMX controls MSRs when nested VMX is enabled.
2205  * The same values should also be used to verify that vmcs12 control fields are
2206  * valid during nested entry from L1 to L2.
2207  * Each of these control msrs has a low and high 32-bit half: A low bit is on
2208  * if the corresponding bit in the (32-bit) control field *must* be on, and a
2209  * bit in the high half is on if the corresponding bit in the control field
2210  * may be on. See also vmx_control_verify().
2211  * TODO: allow these variables to be modified (downgraded) by module options
2212  * or other means.
2213  */
2214 static u32 nested_vmx_procbased_ctls_low, nested_vmx_procbased_ctls_high;
2215 static u32 nested_vmx_secondary_ctls_low, nested_vmx_secondary_ctls_high;
2216 static u32 nested_vmx_pinbased_ctls_low, nested_vmx_pinbased_ctls_high;
2217 static u32 nested_vmx_exit_ctls_low, nested_vmx_exit_ctls_high;
2218 static u32 nested_vmx_entry_ctls_low, nested_vmx_entry_ctls_high;
2219 static u32 nested_vmx_misc_low, nested_vmx_misc_high;
2220 static u32 nested_vmx_ept_caps;
2221 static __init void nested_vmx_setup_ctls_msrs(void)
2222 {
2223         /*
2224          * Note that as a general rule, the high half of the MSRs (bits in
2225          * the control fields which may be 1) should be initialized by the
2226          * intersection of the underlying hardware's MSR (i.e., features which
2227          * can be supported) and the list of features we want to expose -
2228          * because they are known to be properly supported in our code.
2229          * Also, usually, the low half of the MSRs (bits which must be 1) can
2230          * be set to 0, meaning that L1 may turn off any of these bits. The
2231          * reason is that if one of these bits is necessary, it will appear
2232          * in vmcs01 and prepare_vmcs02, when it bitwise-or's the control
2233          * fields of vmcs01 and vmcs02, will turn these bits off - and
2234          * nested_vmx_exit_handled() will not pass related exits to L1.
2235          * These rules have exceptions below.
2236          */
2237
2238         /* pin-based controls */
2239         rdmsr(MSR_IA32_VMX_PINBASED_CTLS,
2240               nested_vmx_pinbased_ctls_low, nested_vmx_pinbased_ctls_high);
2241         /*
2242          * According to the Intel spec, if bit 55 of VMX_BASIC is off (as it is
2243          * in our case), bits 1, 2 and 4 (i.e., 0x16) must be 1 in this MSR.
2244          */
2245         nested_vmx_pinbased_ctls_low |= PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
2246         nested_vmx_pinbased_ctls_high &= PIN_BASED_EXT_INTR_MASK |
2247                 PIN_BASED_NMI_EXITING | PIN_BASED_VIRTUAL_NMIS |
2248                 PIN_BASED_VMX_PREEMPTION_TIMER;
2249         nested_vmx_pinbased_ctls_high |= PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
2250
2251         /*
2252          * Exit controls
2253          * If bit 55 of VMX_BASIC is off, bits 0-8 and 10, 11, 13, 14, 16 and
2254          * 17 must be 1.
2255          */
2256         rdmsr(MSR_IA32_VMX_EXIT_CTLS,
2257                 nested_vmx_exit_ctls_low, nested_vmx_exit_ctls_high);
2258         nested_vmx_exit_ctls_low = VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR;
2259         /* Note that guest use of VM_EXIT_ACK_INTR_ON_EXIT is not supported. */
2260         nested_vmx_exit_ctls_high &=
2261 #ifdef CONFIG_X86_64
2262                 VM_EXIT_HOST_ADDR_SPACE_SIZE |
2263 #endif
2264                 VM_EXIT_LOAD_IA32_PAT | VM_EXIT_SAVE_IA32_PAT |
2265                 VM_EXIT_SAVE_VMX_PREEMPTION_TIMER;
2266         if (!(nested_vmx_pinbased_ctls_high & PIN_BASED_VMX_PREEMPTION_TIMER) ||
2267             !(nested_vmx_exit_ctls_high & VM_EXIT_SAVE_VMX_PREEMPTION_TIMER)) {
2268                 nested_vmx_exit_ctls_high &= ~VM_EXIT_SAVE_VMX_PREEMPTION_TIMER;
2269                 nested_vmx_pinbased_ctls_high &= ~PIN_BASED_VMX_PREEMPTION_TIMER;
2270         }
2271         nested_vmx_exit_ctls_high |= (VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR |
2272                 VM_EXIT_LOAD_IA32_EFER | VM_EXIT_SAVE_IA32_EFER);
2273
2274         /* entry controls */
2275         rdmsr(MSR_IA32_VMX_ENTRY_CTLS,
2276                 nested_vmx_entry_ctls_low, nested_vmx_entry_ctls_high);
2277         /* If bit 55 of VMX_BASIC is off, bits 0-8 and 12 must be 1. */
2278         nested_vmx_entry_ctls_low = VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR;
2279         nested_vmx_entry_ctls_high &=
2280 #ifdef CONFIG_X86_64
2281                 VM_ENTRY_IA32E_MODE |
2282 #endif
2283                 VM_ENTRY_LOAD_IA32_PAT;
2284         nested_vmx_entry_ctls_high |= (VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR |
2285                                        VM_ENTRY_LOAD_IA32_EFER);
2286
2287         /* cpu-based controls */
2288         rdmsr(MSR_IA32_VMX_PROCBASED_CTLS,
2289                 nested_vmx_procbased_ctls_low, nested_vmx_procbased_ctls_high);
2290         nested_vmx_procbased_ctls_low = 0;
2291         nested_vmx_procbased_ctls_high &=
2292                 CPU_BASED_VIRTUAL_INTR_PENDING |
2293                 CPU_BASED_VIRTUAL_NMI_PENDING | CPU_BASED_USE_TSC_OFFSETING |
2294                 CPU_BASED_HLT_EXITING | CPU_BASED_INVLPG_EXITING |
2295                 CPU_BASED_MWAIT_EXITING | CPU_BASED_CR3_LOAD_EXITING |
2296                 CPU_BASED_CR3_STORE_EXITING |
2297 #ifdef CONFIG_X86_64
2298                 CPU_BASED_CR8_LOAD_EXITING | CPU_BASED_CR8_STORE_EXITING |
2299 #endif
2300                 CPU_BASED_MOV_DR_EXITING | CPU_BASED_UNCOND_IO_EXITING |
2301                 CPU_BASED_USE_IO_BITMAPS | CPU_BASED_MONITOR_EXITING |
2302                 CPU_BASED_RDPMC_EXITING | CPU_BASED_RDTSC_EXITING |
2303                 CPU_BASED_PAUSE_EXITING |
2304                 CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
2305         /*
2306          * We can allow some features even when not supported by the
2307          * hardware. For example, L1 can specify an MSR bitmap - and we
2308          * can use it to avoid exits to L1 - even when L0 runs L2
2309          * without MSR bitmaps.
2310          */
2311         nested_vmx_procbased_ctls_high |= CPU_BASED_USE_MSR_BITMAPS;
2312
2313         /* secondary cpu-based controls */
2314         rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2,
2315                 nested_vmx_secondary_ctls_low, nested_vmx_secondary_ctls_high);
2316         nested_vmx_secondary_ctls_low = 0;
2317         nested_vmx_secondary_ctls_high &=
2318                 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
2319                 SECONDARY_EXEC_UNRESTRICTED_GUEST |
2320                 SECONDARY_EXEC_WBINVD_EXITING;
2321
2322         if (enable_ept) {
2323                 /* nested EPT: emulate EPT also to L1 */
2324                 nested_vmx_secondary_ctls_high |= SECONDARY_EXEC_ENABLE_EPT;
2325                 nested_vmx_ept_caps = VMX_EPT_PAGE_WALK_4_BIT |
2326                          VMX_EPTP_WB_BIT | VMX_EPT_2MB_PAGE_BIT |
2327                          VMX_EPT_INVEPT_BIT;
2328                 nested_vmx_ept_caps &= vmx_capability.ept;
2329                 /*
2330                  * Since invept is completely emulated we support both global
2331                  * and context invalidation independent of what host cpu
2332                  * supports
2333                  */
2334                 nested_vmx_ept_caps |= VMX_EPT_EXTENT_GLOBAL_BIT |
2335                         VMX_EPT_EXTENT_CONTEXT_BIT;
2336         } else
2337                 nested_vmx_ept_caps = 0;
2338
2339         /* miscellaneous data */
2340         rdmsr(MSR_IA32_VMX_MISC, nested_vmx_misc_low, nested_vmx_misc_high);
2341         nested_vmx_misc_low &= VMX_MISC_PREEMPTION_TIMER_RATE_MASK |
2342                 VMX_MISC_SAVE_EFER_LMA;
2343         nested_vmx_misc_low |= VMX_MISC_ACTIVITY_HLT;
2344         nested_vmx_misc_high = 0;
2345 }
2346
2347 static inline bool vmx_control_verify(u32 control, u32 low, u32 high)
2348 {
2349         /*
2350          * Bits 0 in high must be 0, and bits 1 in low must be 1.
2351          */
2352         return ((control & high) | low) == control;
2353 }
2354
2355 static inline u64 vmx_control_msr(u32 low, u32 high)
2356 {
2357         return low | ((u64)high << 32);
2358 }
2359
2360 /*
2361  * If we allow our guest to use VMX instructions (i.e., nested VMX), we should
2362  * also let it use VMX-specific MSRs.
2363  * vmx_get_vmx_msr() and vmx_set_vmx_msr() return 1 when we handled a
2364  * VMX-specific MSR, or 0 when we haven't (and the caller should handle it
2365  * like all other MSRs).
2366  */
2367 static int vmx_get_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
2368 {
2369         if (!nested_vmx_allowed(vcpu) && msr_index >= MSR_IA32_VMX_BASIC &&
2370                      msr_index <= MSR_IA32_VMX_TRUE_ENTRY_CTLS) {
2371                 /*
2372                  * According to the spec, processors which do not support VMX
2373                  * should throw a #GP(0) when VMX capability MSRs are read.
2374                  */
2375                 kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
2376                 return 1;
2377         }
2378
2379         switch (msr_index) {
2380         case MSR_IA32_FEATURE_CONTROL:
2381                 if (nested_vmx_allowed(vcpu)) {
2382                         *pdata = to_vmx(vcpu)->nested.msr_ia32_feature_control;
2383                         break;
2384                 }
2385                 return 0;
2386         case MSR_IA32_VMX_BASIC:
2387                 /*
2388                  * This MSR reports some information about VMX support. We
2389                  * should return information about the VMX we emulate for the
2390                  * guest, and the VMCS structure we give it - not about the
2391                  * VMX support of the underlying hardware.
2392                  */
2393                 *pdata = VMCS12_REVISION |
2394                            ((u64)VMCS12_SIZE << VMX_BASIC_VMCS_SIZE_SHIFT) |
2395                            (VMX_BASIC_MEM_TYPE_WB << VMX_BASIC_MEM_TYPE_SHIFT);
2396                 break;
2397         case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
2398         case MSR_IA32_VMX_PINBASED_CTLS:
2399                 *pdata = vmx_control_msr(nested_vmx_pinbased_ctls_low,
2400                                         nested_vmx_pinbased_ctls_high);
2401                 break;
2402         case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
2403         case MSR_IA32_VMX_PROCBASED_CTLS:
2404                 *pdata = vmx_control_msr(nested_vmx_procbased_ctls_low,
2405                                         nested_vmx_procbased_ctls_high);
2406                 break;
2407         case MSR_IA32_VMX_TRUE_EXIT_CTLS:
2408         case MSR_IA32_VMX_EXIT_CTLS:
2409                 *pdata = vmx_control_msr(nested_vmx_exit_ctls_low,
2410                                         nested_vmx_exit_ctls_high);
2411                 break;
2412         case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
2413         case MSR_IA32_VMX_ENTRY_CTLS:
2414                 *pdata = vmx_control_msr(nested_vmx_entry_ctls_low,
2415                                         nested_vmx_entry_ctls_high);
2416                 break;
2417         case MSR_IA32_VMX_MISC:
2418                 *pdata = vmx_control_msr(nested_vmx_misc_low,
2419                                          nested_vmx_misc_high);
2420                 break;
2421         /*
2422          * These MSRs specify bits which the guest must keep fixed (on or off)
2423          * while L1 is in VMXON mode (in L1's root mode, or running an L2).
2424          * We picked the standard core2 setting.
2425          */
2426 #define VMXON_CR0_ALWAYSON      (X86_CR0_PE | X86_CR0_PG | X86_CR0_NE)
2427 #define VMXON_CR4_ALWAYSON      X86_CR4_VMXE
2428         case MSR_IA32_VMX_CR0_FIXED0:
2429                 *pdata = VMXON_CR0_ALWAYSON;
2430                 break;
2431         case MSR_IA32_VMX_CR0_FIXED1:
2432                 *pdata = -1ULL;
2433                 break;
2434         case MSR_IA32_VMX_CR4_FIXED0:
2435                 *pdata = VMXON_CR4_ALWAYSON;
2436                 break;
2437         case MSR_IA32_VMX_CR4_FIXED1:
2438                 *pdata = -1ULL;
2439                 break;
2440         case MSR_IA32_VMX_VMCS_ENUM:
2441                 *pdata = 0x1f;
2442                 break;
2443         case MSR_IA32_VMX_PROCBASED_CTLS2:
2444                 *pdata = vmx_control_msr(nested_vmx_secondary_ctls_low,
2445                                         nested_vmx_secondary_ctls_high);
2446                 break;
2447         case MSR_IA32_VMX_EPT_VPID_CAP:
2448                 /* Currently, no nested vpid support */
2449                 *pdata = nested_vmx_ept_caps;
2450                 break;
2451         default:
2452                 return 0;
2453         }
2454
2455         return 1;
2456 }
2457
2458 static int vmx_set_vmx_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
2459 {
2460         u32 msr_index = msr_info->index;
2461         u64 data = msr_info->data;
2462         bool host_initialized = msr_info->host_initiated;
2463
2464         if (!nested_vmx_allowed(vcpu))
2465                 return 0;
2466
2467         if (msr_index == MSR_IA32_FEATURE_CONTROL) {
2468                 if (!host_initialized &&
2469                                 to_vmx(vcpu)->nested.msr_ia32_feature_control
2470                                 & FEATURE_CONTROL_LOCKED)
2471                         return 0;
2472                 to_vmx(vcpu)->nested.msr_ia32_feature_control = data;
2473                 return 1;
2474         }
2475
2476         /*
2477          * No need to treat VMX capability MSRs specially: If we don't handle
2478          * them, handle_wrmsr will #GP(0), which is correct (they are readonly)
2479          */
2480         return 0;
2481 }
2482
2483 /*
2484  * Reads an msr value (of 'msr_index') into 'pdata'.
2485  * Returns 0 on success, non-0 otherwise.
2486  * Assumes vcpu_load() was already called.
2487  */
2488 static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
2489 {
2490         u64 data;
2491         struct shared_msr_entry *msr;
2492
2493         if (!pdata) {
2494                 printk(KERN_ERR "BUG: get_msr called with NULL pdata\n");
2495                 return -EINVAL;
2496         }
2497
2498         switch (msr_index) {
2499 #ifdef CONFIG_X86_64
2500         case MSR_FS_BASE:
2501                 data = vmcs_readl(GUEST_FS_BASE);
2502                 break;
2503         case MSR_GS_BASE:
2504                 data = vmcs_readl(GUEST_GS_BASE);
2505                 break;
2506         case MSR_KERNEL_GS_BASE:
2507                 vmx_load_host_state(to_vmx(vcpu));
2508                 data = to_vmx(vcpu)->msr_guest_kernel_gs_base;
2509                 break;
2510 #endif
2511         case MSR_EFER:
2512                 return kvm_get_msr_common(vcpu, msr_index, pdata);
2513         case MSR_IA32_TSC:
2514                 data = guest_read_tsc();
2515                 break;
2516         case MSR_IA32_SYSENTER_CS:
2517                 data = vmcs_read32(GUEST_SYSENTER_CS);
2518                 break;
2519         case MSR_IA32_SYSENTER_EIP:
2520                 data = vmcs_readl(GUEST_SYSENTER_EIP);
2521                 break;
2522         case MSR_IA32_SYSENTER_ESP:
2523                 data = vmcs_readl(GUEST_SYSENTER_ESP);
2524                 break;
2525         case MSR_TSC_AUX:
2526                 if (!to_vmx(vcpu)->rdtscp_enabled)
2527                         return 1;
2528                 /* Otherwise falls through */
2529         default:
2530                 if (vmx_get_vmx_msr(vcpu, msr_index, pdata))
2531                         return 0;
2532                 msr = find_msr_entry(to_vmx(vcpu), msr_index);
2533                 if (msr) {
2534                         data = msr->data;
2535                         break;
2536                 }
2537                 return kvm_get_msr_common(vcpu, msr_index, pdata);
2538         }
2539
2540         *pdata = data;
2541         return 0;
2542 }
2543
2544 /*
2545  * Writes msr value into into the appropriate "register".
2546  * Returns 0 on success, non-0 otherwise.
2547  * Assumes vcpu_load() was already called.
2548  */
2549 static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
2550 {
2551         struct vcpu_vmx *vmx = to_vmx(vcpu);
2552         struct shared_msr_entry *msr;
2553         int ret = 0;
2554         u32 msr_index = msr_info->index;
2555         u64 data = msr_info->data;
2556
2557         switch (msr_index) {
2558         case MSR_EFER:
2559                 ret = kvm_set_msr_common(vcpu, msr_info);
2560                 break;
2561 #ifdef CONFIG_X86_64
2562         case MSR_FS_BASE:
2563                 vmx_segment_cache_clear(vmx);
2564                 vmcs_writel(GUEST_FS_BASE, data);
2565                 break;
2566         case MSR_GS_BASE:
2567                 vmx_segment_cache_clear(vmx);
2568                 vmcs_writel(GUEST_GS_BASE, data);
2569                 break;
2570         case MSR_KERNEL_GS_BASE:
2571                 vmx_load_host_state(vmx);
2572                 vmx->msr_guest_kernel_gs_base = data;
2573                 break;
2574 #endif
2575         case MSR_IA32_SYSENTER_CS:
2576                 vmcs_write32(GUEST_SYSENTER_CS, data);
2577                 break;
2578         case MSR_IA32_SYSENTER_EIP:
2579                 vmcs_writel(GUEST_SYSENTER_EIP, data);
2580                 break;
2581         case MSR_IA32_SYSENTER_ESP:
2582                 vmcs_writel(GUEST_SYSENTER_ESP, data);
2583                 break;
2584         case MSR_IA32_TSC:
2585                 kvm_write_tsc(vcpu, msr_info);
2586                 break;
2587         case MSR_IA32_CR_PAT:
2588                 if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) {
2589                         vmcs_write64(GUEST_IA32_PAT, data);
2590                         vcpu->arch.pat = data;
2591                         break;
2592                 }
2593                 ret = kvm_set_msr_common(vcpu, msr_info);
2594                 break;
2595         case MSR_IA32_TSC_ADJUST:
2596                 ret = kvm_set_msr_common(vcpu, msr_info);
2597                 break;
2598         case MSR_TSC_AUX:
2599                 if (!vmx->rdtscp_enabled)
2600                         return 1;
2601                 /* Check reserved bit, higher 32 bits should be zero */
2602                 if ((data >> 32) != 0)
2603                         return 1;
2604                 /* Otherwise falls through */
2605         default:
2606                 if (vmx_set_vmx_msr(vcpu, msr_info))
2607                         break;
2608                 msr = find_msr_entry(vmx, msr_index);
2609                 if (msr) {
2610                         msr->data = data;
2611                         if (msr - vmx->guest_msrs < vmx->save_nmsrs) {
2612                                 preempt_disable();
2613                                 kvm_set_shared_msr(msr->index, msr->data,
2614                                                    msr->mask);
2615                                 preempt_enable();
2616                         }
2617                         break;
2618                 }
2619                 ret = kvm_set_msr_common(vcpu, msr_info);
2620         }
2621
2622         return ret;
2623 }
2624
2625 static void vmx_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
2626 {
2627         __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
2628         switch (reg) {
2629         case VCPU_REGS_RSP:
2630                 vcpu->arch.regs[VCPU_REGS_RSP] = vmcs_readl(GUEST_RSP);
2631                 break;
2632         case VCPU_REGS_RIP:
2633                 vcpu->arch.regs[VCPU_REGS_RIP] = vmcs_readl(GUEST_RIP);
2634                 break;
2635         case VCPU_EXREG_PDPTR:
2636                 if (enable_ept)
2637                         ept_save_pdptrs(vcpu);
2638                 break;
2639         default:
2640                 break;
2641         }
2642 }
2643
2644 static __init int cpu_has_kvm_support(void)
2645 {
2646         return cpu_has_vmx();
2647 }
2648
2649 static __init int vmx_disabled_by_bios(void)
2650 {
2651         u64 msr;
2652
2653         rdmsrl(MSR_IA32_FEATURE_CONTROL, msr);
2654         if (msr & FEATURE_CONTROL_LOCKED) {
2655                 /* launched w/ TXT and VMX disabled */
2656                 if (!(msr & FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX)
2657                         && tboot_enabled())
2658                         return 1;
2659                 /* launched w/o TXT and VMX only enabled w/ TXT */
2660                 if (!(msr & FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX)
2661                         && (msr & FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX)
2662                         && !tboot_enabled()) {
2663                         printk(KERN_WARNING "kvm: disable TXT in the BIOS or "
2664                                 "activate TXT before enabling KVM\n");
2665                         return 1;
2666                 }
2667                 /* launched w/o TXT and VMX disabled */
2668                 if (!(msr & FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX)
2669                         && !tboot_enabled())
2670                         return 1;
2671         }
2672
2673         return 0;
2674 }
2675
2676 static void kvm_cpu_vmxon(u64 addr)
2677 {
2678         asm volatile (ASM_VMX_VMXON_RAX
2679                         : : "a"(&addr), "m"(addr)
2680                         : "memory", "cc");
2681 }
2682
2683 static int hardware_enable(void *garbage)
2684 {
2685         int cpu = raw_smp_processor_id();
2686         u64 phys_addr = __pa(per_cpu(vmxarea, cpu));
2687         u64 old, test_bits;
2688
2689         if (read_cr4() & X86_CR4_VMXE)
2690                 return -EBUSY;
2691
2692         INIT_LIST_HEAD(&per_cpu(loaded_vmcss_on_cpu, cpu));
2693
2694         /*
2695          * Now we can enable the vmclear operation in kdump
2696          * since the loaded_vmcss_on_cpu list on this cpu
2697          * has been initialized.
2698          *
2699          * Though the cpu is not in VMX operation now, there
2700          * is no problem to enable the vmclear operation
2701          * for the loaded_vmcss_on_cpu list is empty!
2702          */
2703         crash_enable_local_vmclear(cpu);
2704
2705         rdmsrl(MSR_IA32_FEATURE_CONTROL, old);
2706
2707         test_bits = FEATURE_CONTROL_LOCKED;
2708         test_bits |= FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX;
2709         if (tboot_enabled())
2710                 test_bits |= FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX;
2711
2712         if ((old & test_bits) != test_bits) {
2713                 /* enable and lock */
2714                 wrmsrl(MSR_IA32_FEATURE_CONTROL, old | test_bits);
2715         }
2716         write_cr4(read_cr4() | X86_CR4_VMXE); /* FIXME: not cpu hotplug safe */
2717
2718         if (vmm_exclusive) {
2719                 kvm_cpu_vmxon(phys_addr);
2720                 ept_sync_global();
2721         }
2722
2723         native_store_gdt(&__get_cpu_var(host_gdt));
2724
2725         return 0;
2726 }
2727
2728 static void vmclear_local_loaded_vmcss(void)
2729 {
2730         int cpu = raw_smp_processor_id();
2731         struct loaded_vmcs *v, *n;
2732
2733         list_for_each_entry_safe(v, n, &per_cpu(loaded_vmcss_on_cpu, cpu),
2734                                  loaded_vmcss_on_cpu_link)
2735                 __loaded_vmcs_clear(v);
2736 }
2737
2738
2739 /* Just like cpu_vmxoff(), but with the __kvm_handle_fault_on_reboot()
2740  * tricks.
2741  */
2742 static void kvm_cpu_vmxoff(void)
2743 {
2744         asm volatile (__ex(ASM_VMX_VMXOFF) : : : "cc");
2745 }
2746
2747 static void hardware_disable(void *garbage)
2748 {
2749         if (vmm_exclusive) {
2750                 vmclear_local_loaded_vmcss();
2751                 kvm_cpu_vmxoff();
2752         }
2753         write_cr4(read_cr4() & ~X86_CR4_VMXE);
2754 }
2755
2756 static __init int adjust_vmx_controls(u32 ctl_min, u32 ctl_opt,
2757                                       u32 msr, u32 *result)
2758 {
2759         u32 vmx_msr_low, vmx_msr_high;
2760         u32 ctl = ctl_min | ctl_opt;
2761
2762         rdmsr(msr, vmx_msr_low, vmx_msr_high);
2763
2764         ctl &= vmx_msr_high; /* bit == 0 in high word ==> must be zero */
2765         ctl |= vmx_msr_low;  /* bit == 1 in low word  ==> must be one  */
2766
2767         /* Ensure minimum (required) set of control bits are supported. */
2768         if (ctl_min & ~ctl)
2769                 return -EIO;
2770
2771         *result = ctl;
2772         return 0;
2773 }
2774
2775 static __init bool allow_1_setting(u32 msr, u32 ctl)
2776 {
2777         u32 vmx_msr_low, vmx_msr_high;
2778
2779         rdmsr(msr, vmx_msr_low, vmx_msr_high);
2780         return vmx_msr_high & ctl;
2781 }
2782
2783 static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
2784 {
2785         u32 vmx_msr_low, vmx_msr_high;
2786         u32 min, opt, min2, opt2;
2787         u32 _pin_based_exec_control = 0;
2788         u32 _cpu_based_exec_control = 0;
2789         u32 _cpu_based_2nd_exec_control = 0;
2790         u32 _vmexit_control = 0;
2791         u32 _vmentry_control = 0;
2792
2793         min = CPU_BASED_HLT_EXITING |
2794 #ifdef CONFIG_X86_64
2795               CPU_BASED_CR8_LOAD_EXITING |
2796               CPU_BASED_CR8_STORE_EXITING |
2797 #endif
2798               CPU_BASED_CR3_LOAD_EXITING |
2799               CPU_BASED_CR3_STORE_EXITING |
2800               CPU_BASED_USE_IO_BITMAPS |
2801               CPU_BASED_MOV_DR_EXITING |
2802               CPU_BASED_USE_TSC_OFFSETING |
2803               CPU_BASED_MWAIT_EXITING |
2804               CPU_BASED_MONITOR_EXITING |
2805               CPU_BASED_INVLPG_EXITING |
2806               CPU_BASED_RDPMC_EXITING;
2807
2808         opt = CPU_BASED_TPR_SHADOW |
2809               CPU_BASED_USE_MSR_BITMAPS |
2810               CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
2811         if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PROCBASED_CTLS,
2812                                 &_cpu_based_exec_control) < 0)
2813                 return -EIO;
2814 #ifdef CONFIG_X86_64
2815         if ((_cpu_based_exec_control & CPU_BASED_TPR_SHADOW))
2816                 _cpu_based_exec_control &= ~CPU_BASED_CR8_LOAD_EXITING &
2817                                            ~CPU_BASED_CR8_STORE_EXITING;
2818 #endif
2819         if (_cpu_based_exec_control & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) {
2820                 min2 = 0;
2821                 opt2 = SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
2822                         SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
2823                         SECONDARY_EXEC_WBINVD_EXITING |
2824                         SECONDARY_EXEC_ENABLE_VPID |
2825                         SECONDARY_EXEC_ENABLE_EPT |
2826                         SECONDARY_EXEC_UNRESTRICTED_GUEST |
2827                         SECONDARY_EXEC_PAUSE_LOOP_EXITING |
2828                         SECONDARY_EXEC_RDTSCP |
2829                         SECONDARY_EXEC_ENABLE_INVPCID |
2830                         SECONDARY_EXEC_APIC_REGISTER_VIRT |
2831                         SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
2832                         SECONDARY_EXEC_SHADOW_VMCS;
2833                 if (adjust_vmx_controls(min2, opt2,
2834                                         MSR_IA32_VMX_PROCBASED_CTLS2,
2835                                         &_cpu_based_2nd_exec_control) < 0)
2836                         return -EIO;
2837         }
2838 #ifndef CONFIG_X86_64
2839         if (!(_cpu_based_2nd_exec_control &
2840                                 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES))
2841                 _cpu_based_exec_control &= ~CPU_BASED_TPR_SHADOW;
2842 #endif
2843
2844         if (!(_cpu_based_exec_control & CPU_BASED_TPR_SHADOW))
2845                 _cpu_based_2nd_exec_control &= ~(
2846                                 SECONDARY_EXEC_APIC_REGISTER_VIRT |
2847                                 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
2848                                 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY);
2849
2850         if (_cpu_based_2nd_exec_control & SECONDARY_EXEC_ENABLE_EPT) {
2851                 /* CR3 accesses and invlpg don't need to cause VM Exits when EPT
2852                    enabled */
2853                 _cpu_based_exec_control &= ~(CPU_BASED_CR3_LOAD_EXITING |
2854                                              CPU_BASED_CR3_STORE_EXITING |
2855                                              CPU_BASED_INVLPG_EXITING);
2856                 rdmsr(MSR_IA32_VMX_EPT_VPID_CAP,
2857                       vmx_capability.ept, vmx_capability.vpid);
2858         }
2859
2860         min = 0;
2861 #ifdef CONFIG_X86_64
2862         min |= VM_EXIT_HOST_ADDR_SPACE_SIZE;
2863 #endif
2864         opt = VM_EXIT_SAVE_IA32_PAT | VM_EXIT_LOAD_IA32_PAT |
2865                 VM_EXIT_ACK_INTR_ON_EXIT;
2866         if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_EXIT_CTLS,
2867                                 &_vmexit_control) < 0)
2868                 return -EIO;
2869
2870         min = PIN_BASED_EXT_INTR_MASK | PIN_BASED_NMI_EXITING;
2871         opt = PIN_BASED_VIRTUAL_NMIS | PIN_BASED_POSTED_INTR;
2872         if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PINBASED_CTLS,
2873                                 &_pin_based_exec_control) < 0)
2874                 return -EIO;
2875
2876         if (!(_cpu_based_2nd_exec_control &
2877                 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY) ||
2878                 !(_vmexit_control & VM_EXIT_ACK_INTR_ON_EXIT))
2879                 _pin_based_exec_control &= ~PIN_BASED_POSTED_INTR;
2880
2881         min = 0;
2882         opt = VM_ENTRY_LOAD_IA32_PAT;
2883         if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_ENTRY_CTLS,
2884                                 &_vmentry_control) < 0)
2885                 return -EIO;
2886
2887         rdmsr(MSR_IA32_VMX_BASIC, vmx_msr_low, vmx_msr_high);
2888
2889         /* IA-32 SDM Vol 3B: VMCS size is never greater than 4kB. */
2890         if ((vmx_msr_high & 0x1fff) > PAGE_SIZE)
2891                 return -EIO;
2892
2893 #ifdef CONFIG_X86_64
2894         /* IA-32 SDM Vol 3B: 64-bit CPUs always have VMX_BASIC_MSR[48]==0. */
2895         if (vmx_msr_high & (1u<<16))
2896                 return -EIO;
2897 #endif
2898
2899         /* Require Write-Back (WB) memory type for VMCS accesses. */
2900         if (((vmx_msr_high >> 18) & 15) != 6)
2901                 return -EIO;
2902
2903         vmcs_conf->size = vmx_msr_high & 0x1fff;
2904         vmcs_conf->order = get_order(vmcs_config.size);
2905         vmcs_conf->revision_id = vmx_msr_low;
2906
2907         vmcs_conf->pin_based_exec_ctrl = _pin_based_exec_control;
2908         vmcs_conf->cpu_based_exec_ctrl = _cpu_based_exec_control;
2909         vmcs_conf->cpu_based_2nd_exec_ctrl = _cpu_based_2nd_exec_control;
2910         vmcs_conf->vmexit_ctrl         = _vmexit_control;
2911         vmcs_conf->vmentry_ctrl        = _vmentry_control;
2912
2913         cpu_has_load_ia32_efer =
2914                 allow_1_setting(MSR_IA32_VMX_ENTRY_CTLS,
2915                                 VM_ENTRY_LOAD_IA32_EFER)
2916                 && allow_1_setting(MSR_IA32_VMX_EXIT_CTLS,
2917                                    VM_EXIT_LOAD_IA32_EFER);
2918
2919         cpu_has_load_perf_global_ctrl =
2920                 allow_1_setting(MSR_IA32_VMX_ENTRY_CTLS,
2921                                 VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL)
2922                 && allow_1_setting(MSR_IA32_VMX_EXIT_CTLS,
2923                                    VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL);
2924
2925         /*
2926          * Some cpus support VM_ENTRY_(LOAD|SAVE)_IA32_PERF_GLOBAL_CTRL
2927          * but due to arrata below it can't be used. Workaround is to use
2928          * msr load mechanism to switch IA32_PERF_GLOBAL_CTRL.
2929          *
2930          * VM Exit May Incorrectly Clear IA32_PERF_GLOBAL_CTRL [34:32]
2931          *
2932          * AAK155             (model 26)
2933          * AAP115             (model 30)
2934          * AAT100             (model 37)
2935          * BC86,AAY89,BD102   (model 44)
2936          * BA97               (model 46)
2937          *
2938          */
2939         if (cpu_has_load_perf_global_ctrl && boot_cpu_data.x86 == 0x6) {
2940                 switch (boot_cpu_data.x86_model) {
2941                 case 26:
2942                 case 30:
2943                 case 37:
2944                 case 44:
2945                 case 46:
2946                         cpu_has_load_perf_global_ctrl = false;
2947                         printk_once(KERN_WARNING"kvm: VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL "
2948                                         "does not work properly. Using workaround\n");
2949                         break;
2950                 default:
2951                         break;
2952                 }
2953         }
2954
2955         return 0;
2956 }
2957
2958 static struct vmcs *alloc_vmcs_cpu(int cpu)
2959 {
2960         int node = cpu_to_node(cpu);
2961         struct page *pages;
2962         struct vmcs *vmcs;
2963
2964         pages = alloc_pages_exact_node(node, GFP_KERNEL, vmcs_config.order);
2965         if (!pages)
2966                 return NULL;
2967         vmcs = page_address(pages);
2968         memset(vmcs, 0, vmcs_config.size);
2969         vmcs->revision_id = vmcs_config.revision_id; /* vmcs revision id */
2970         return vmcs;
2971 }
2972
2973 static struct vmcs *alloc_vmcs(void)
2974 {
2975         return alloc_vmcs_cpu(raw_smp_processor_id());
2976 }
2977
2978 static void free_vmcs(struct vmcs *vmcs)
2979 {
2980         free_pages((unsigned long)vmcs, vmcs_config.order);
2981 }
2982
2983 /*
2984  * Free a VMCS, but before that VMCLEAR it on the CPU where it was last loaded
2985  */
2986 static void free_loaded_vmcs(struct loaded_vmcs *loaded_vmcs)
2987 {
2988         if (!loaded_vmcs->vmcs)
2989                 return;
2990         loaded_vmcs_clear(loaded_vmcs);
2991         free_vmcs(loaded_vmcs->vmcs);
2992         loaded_vmcs->vmcs = NULL;
2993 }
2994
2995 static void free_kvm_area(void)
2996 {
2997         int cpu;
2998
2999         for_each_possible_cpu(cpu) {
3000                 free_vmcs(per_cpu(vmxarea, cpu));
3001                 per_cpu(vmxarea, cpu) = NULL;
3002         }
3003 }
3004
3005 static __init int alloc_kvm_area(void)
3006 {
3007         int cpu;
3008
3009         for_each_possible_cpu(cpu) {
3010                 struct vmcs *vmcs;
3011
3012                 vmcs = alloc_vmcs_cpu(cpu);
3013                 if (!vmcs) {
3014                         free_kvm_area();
3015                         return -ENOMEM;
3016                 }
3017
3018                 per_cpu(vmxarea, cpu) = vmcs;
3019         }
3020         return 0;
3021 }
3022
3023 static __init int hardware_setup(void)
3024 {
3025         if (setup_vmcs_config(&vmcs_config) < 0)
3026                 return -EIO;
3027
3028         if (boot_cpu_has(X86_FEATURE_NX))
3029                 kvm_enable_efer_bits(EFER_NX);
3030
3031         if (!cpu_has_vmx_vpid())
3032                 enable_vpid = 0;
3033         if (!cpu_has_vmx_shadow_vmcs())
3034                 enable_shadow_vmcs = 0;
3035
3036         if (!cpu_has_vmx_ept() ||
3037             !cpu_has_vmx_ept_4levels()) {
3038                 enable_ept = 0;
3039                 enable_unrestricted_guest = 0;
3040                 enable_ept_ad_bits = 0;
3041         }
3042
3043         if (!cpu_has_vmx_ept_ad_bits())
3044                 enable_ept_ad_bits = 0;
3045
3046         if (!cpu_has_vmx_unrestricted_guest())
3047                 enable_unrestricted_guest = 0;
3048
3049         if (!cpu_has_vmx_flexpriority())
3050                 flexpriority_enabled = 0;
3051
3052         if (!cpu_has_vmx_tpr_shadow())
3053                 kvm_x86_ops->update_cr8_intercept = NULL;
3054
3055         if (enable_ept && !cpu_has_vmx_ept_2m_page())
3056                 kvm_disable_largepages();
3057
3058         if (!cpu_has_vmx_ple())
3059                 ple_gap = 0;
3060
3061         if (!cpu_has_vmx_apicv())
3062                 enable_apicv = 0;
3063
3064         if (enable_apicv)
3065                 kvm_x86_ops->update_cr8_intercept = NULL;
3066         else {
3067                 kvm_x86_ops->hwapic_irr_update = NULL;
3068                 kvm_x86_ops->deliver_posted_interrupt = NULL;
3069                 kvm_x86_ops->sync_pir_to_irr = vmx_sync_pir_to_irr_dummy;
3070         }
3071
3072         if (nested)
3073                 nested_vmx_setup_ctls_msrs();
3074
3075         return alloc_kvm_area();
3076 }
3077
3078 static __exit void hardware_unsetup(void)
3079 {
3080         free_kvm_area();
3081 }
3082
3083 static bool emulation_required(struct kvm_vcpu *vcpu)
3084 {
3085         return emulate_invalid_guest_state && !guest_state_valid(vcpu);
3086 }
3087
3088 static void fix_pmode_seg(struct kvm_vcpu *vcpu, int seg,
3089                 struct kvm_segment *save)
3090 {
3091         if (!emulate_invalid_guest_state) {
3092                 /*
3093                  * CS and SS RPL should be equal during guest entry according
3094                  * to VMX spec, but in reality it is not always so. Since vcpu
3095                  * is in the middle of the transition from real mode to
3096                  * protected mode it is safe to assume that RPL 0 is a good
3097                  * default value.
3098                  */
3099                 if (seg == VCPU_SREG_CS || seg == VCPU_SREG_SS)
3100                         save->selector &= ~SELECTOR_RPL_MASK;
3101                 save->dpl = save->selector & SELECTOR_RPL_MASK;
3102                 save->s = 1;
3103         }
3104         vmx_set_segment(vcpu, save, seg);
3105 }
3106
3107 static void enter_pmode(struct kvm_vcpu *vcpu)
3108 {
3109         unsigned long flags;
3110         struct vcpu_vmx *vmx = to_vmx(vcpu);
3111
3112         /*
3113          * Update real mode segment cache. It may be not up-to-date if sement
3114          * register was written while vcpu was in a guest mode.
3115          */
3116         vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_ES], VCPU_SREG_ES);
3117         vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_DS], VCPU_SREG_DS);
3118         vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_FS], VCPU_SREG_FS);
3119         vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_GS], VCPU_SREG_GS);
3120         vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_SS], VCPU_SREG_SS);
3121         vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_CS], VCPU_SREG_CS);
3122
3123         vmx->rmode.vm86_active = 0;
3124
3125         vmx_segment_cache_clear(vmx);
3126
3127         vmx_set_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_TR], VCPU_SREG_TR);
3128
3129         flags = vmcs_readl(GUEST_RFLAGS);
3130         flags &= RMODE_GUEST_OWNED_EFLAGS_BITS;
3131         flags |= vmx->rmode.save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS;
3132         vmcs_writel(GUEST_RFLAGS, flags);
3133
3134         vmcs_writel(GUEST_CR4, (vmcs_readl(GUEST_CR4) & ~X86_CR4_VME) |
3135                         (vmcs_readl(CR4_READ_SHADOW) & X86_CR4_VME));
3136
3137         update_exception_bitmap(vcpu);
3138
3139         fix_pmode_seg(vcpu, VCPU_SREG_CS, &vmx->rmode.segs[VCPU_SREG_CS]);
3140         fix_pmode_seg(vcpu, VCPU_SREG_SS, &vmx->rmode.segs[VCPU_SREG_SS]);
3141         fix_pmode_seg(vcpu, VCPU_SREG_ES, &vmx->rmode.segs[VCPU_SREG_ES]);
3142         fix_pmode_seg(vcpu, VCPU_SREG_DS, &vmx->rmode.segs[VCPU_SREG_DS]);
3143         fix_pmode_seg(vcpu, VCPU_SREG_FS, &vmx->rmode.segs[VCPU_SREG_FS]);
3144         fix_pmode_seg(vcpu, VCPU_SREG_GS, &vmx->rmode.segs[VCPU_SREG_GS]);
3145
3146         /* CPL is always 0 when CPU enters protected mode */
3147         __set_bit(VCPU_EXREG_CPL, (ulong *)&vcpu->arch.regs_avail);
3148         vmx->cpl = 0;
3149 }
3150
3151 static void fix_rmode_seg(int seg, struct kvm_segment *save)
3152 {
3153         const struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
3154         struct kvm_segment var = *save;
3155
3156         var.dpl = 0x3;
3157         if (seg == VCPU_SREG_CS)
3158                 var.type = 0x3;
3159
3160         if (!emulate_invalid_guest_state) {
3161                 var.selector = var.base >> 4;
3162                 var.base = var.base & 0xffff0;
3163                 var.limit = 0xffff;
3164                 var.g = 0;
3165                 var.db = 0;
3166                 var.present = 1;
3167                 var.s = 1;
3168                 var.l = 0;
3169                 var.unusable = 0;
3170                 var.type = 0x3;
3171                 var.avl = 0;
3172                 if (save->base & 0xf)
3173                         printk_once(KERN_WARNING "kvm: segment base is not "
3174                                         "paragraph aligned when entering "
3175                                         "protected mode (seg=%d)", seg);
3176         }
3177
3178         vmcs_write16(sf->selector, var.selector);
3179         vmcs_write32(sf->base, var.base);
3180         vmcs_write32(sf->limit, var.limit);
3181         vmcs_write32(sf->ar_bytes, vmx_segment_access_rights(&var));
3182 }
3183
3184 static void enter_rmode(struct kvm_vcpu *vcpu)
3185 {
3186         unsigned long flags;
3187         struct vcpu_vmx *vmx = to_vmx(vcpu);
3188
3189         vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_TR], VCPU_SREG_TR);
3190         vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_ES], VCPU_SREG_ES);
3191         vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_DS], VCPU_SREG_DS);
3192         vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_FS], VCPU_SREG_FS);
3193         vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_GS], VCPU_SREG_GS);
3194         vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_SS], VCPU_SREG_SS);
3195         vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_CS], VCPU_SREG_CS);
3196
3197         vmx->rmode.vm86_active = 1;
3198
3199         /*
3200          * Very old userspace does not call KVM_SET_TSS_ADDR before entering
3201          * vcpu. Warn the user that an update is overdue.
3202          */
3203         if (!vcpu->kvm->arch.tss_addr)
3204                 printk_once(KERN_WARNING "kvm: KVM_SET_TSS_ADDR need to be "
3205                              "called before entering vcpu\n");
3206
3207         vmx_segment_cache_clear(vmx);
3208
3209         vmcs_writel(GUEST_TR_BASE, vcpu->kvm->arch.tss_addr);
3210         vmcs_write32(GUEST_TR_LIMIT, RMODE_TSS_SIZE - 1);
3211         vmcs_write32(GUEST_TR_AR_BYTES, 0x008b);
3212
3213         flags = vmcs_readl(GUEST_RFLAGS);
3214         vmx->rmode.save_rflags = flags;
3215
3216         flags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM;
3217
3218         vmcs_writel(GUEST_RFLAGS, flags);
3219         vmcs_writel(GUEST_CR4, vmcs_readl(GUEST_CR4) | X86_CR4_VME);
3220         update_exception_bitmap(vcpu);
3221
3222         fix_rmode_seg(VCPU_SREG_SS, &vmx->rmode.segs[VCPU_SREG_SS]);
3223         fix_rmode_seg(VCPU_SREG_CS, &vmx->rmode.segs[VCPU_SREG_CS]);
3224         fix_rmode_seg(VCPU_SREG_ES, &vmx->rmode.segs[VCPU_SREG_ES]);
3225         fix_rmode_seg(VCPU_SREG_DS, &vmx->rmode.segs[VCPU_SREG_DS]);
3226         fix_rmode_seg(VCPU_SREG_GS, &vmx->rmode.segs[VCPU_SREG_GS]);
3227         fix_rmode_seg(VCPU_SREG_FS, &vmx->rmode.segs[VCPU_SREG_FS]);
3228
3229         kvm_mmu_reset_context(vcpu);
3230 }
3231
3232 static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
3233 {
3234         struct vcpu_vmx *vmx = to_vmx(vcpu);
3235         struct shared_msr_entry *msr = find_msr_entry(vmx, MSR_EFER);
3236
3237         if (!msr)
3238                 return;
3239
3240         /*
3241          * Force kernel_gs_base reloading before EFER changes, as control
3242          * of this msr depends on is_long_mode().
3243          */
3244         vmx_load_host_state(to_vmx(vcpu));
3245         vcpu->arch.efer = efer;
3246         if (efer & EFER_LMA) {
3247                 vm_entry_controls_setbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE);
3248                 msr->data = efer;
3249         } else {
3250                 vm_entry_controls_clearbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE);
3251
3252                 msr->data = efer & ~EFER_LME;
3253         }
3254         setup_msrs(vmx);
3255 }
3256
3257 #ifdef CONFIG_X86_64
3258
3259 static void enter_lmode(struct kvm_vcpu *vcpu)
3260 {
3261         u32 guest_tr_ar;
3262
3263         vmx_segment_cache_clear(to_vmx(vcpu));
3264
3265         guest_tr_ar = vmcs_read32(GUEST_TR_AR_BYTES);
3266         if ((guest_tr_ar & AR_TYPE_MASK) != AR_TYPE_BUSY_64_TSS) {
3267                 pr_debug_ratelimited("%s: tss fixup for long mode. \n",
3268                                      __func__);
3269                 vmcs_write32(GUEST_TR_AR_BYTES,
3270                              (guest_tr_ar & ~AR_TYPE_MASK)
3271                              | AR_TYPE_BUSY_64_TSS);
3272         }
3273         vmx_set_efer(vcpu, vcpu->arch.efer | EFER_LMA);
3274 }
3275
3276 static void exit_lmode(struct kvm_vcpu *vcpu)
3277 {
3278         vm_entry_controls_clearbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE);
3279         vmx_set_efer(vcpu, vcpu->arch.efer & ~EFER_LMA);
3280 }
3281
3282 #endif
3283
3284 static void vmx_flush_tlb(struct kvm_vcpu *vcpu)
3285 {
3286         vpid_sync_context(to_vmx(vcpu));
3287         if (enable_ept) {
3288                 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
3289                         return;
3290                 ept_sync_context(construct_eptp(vcpu->arch.mmu.root_hpa));
3291         }
3292 }
3293
3294 static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu)
3295 {
3296         ulong cr0_guest_owned_bits = vcpu->arch.cr0_guest_owned_bits;
3297
3298         vcpu->arch.cr0 &= ~cr0_guest_owned_bits;
3299         vcpu->arch.cr0 |= vmcs_readl(GUEST_CR0) & cr0_guest_owned_bits;
3300 }
3301
3302 static void vmx_decache_cr3(struct kvm_vcpu *vcpu)
3303 {
3304         if (enable_ept && is_paging(vcpu))
3305                 vcpu->arch.cr3 = vmcs_readl(GUEST_CR3);
3306         __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
3307 }
3308
3309 static void vmx_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
3310 {
3311         ulong cr4_guest_owned_bits = vcpu->arch.cr4_guest_owned_bits;
3312
3313         vcpu->arch.cr4 &= ~cr4_guest_owned_bits;
3314         vcpu->arch.cr4 |= vmcs_readl(GUEST_CR4) & cr4_guest_owned_bits;
3315 }
3316
3317 static void ept_load_pdptrs(struct kvm_vcpu *vcpu)
3318 {
3319         struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
3320
3321         if (!test_bit(VCPU_EXREG_PDPTR,
3322                       (unsigned long *)&vcpu->arch.regs_dirty))
3323                 return;
3324
3325         if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) {
3326                 vmcs_write64(GUEST_PDPTR0, mmu->pdptrs[0]);
3327                 vmcs_write64(GUEST_PDPTR1, mmu->pdptrs[1]);
3328                 vmcs_write64(GUEST_PDPTR2, mmu->pdptrs[2]);
3329                 vmcs_write64(GUEST_PDPTR3, mmu->pdptrs[3]);
3330         }
3331 }
3332
3333 static void ept_save_pdptrs(struct kvm_vcpu *vcpu)
3334 {
3335         struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
3336
3337         if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) {
3338                 mmu->pdptrs[0] = vmcs_read64(GUEST_PDPTR0);
3339                 mmu->pdptrs[1] = vmcs_read64(GUEST_PDPTR1);
3340                 mmu->pdptrs[2] = vmcs_read64(GUEST_PDPTR2);
3341                 mmu->pdptrs[3] = vmcs_read64(GUEST_PDPTR3);
3342         }
3343
3344         __set_bit(VCPU_EXREG_PDPTR,
3345                   (unsigned long *)&vcpu->arch.regs_avail);
3346         __set_bit(VCPU_EXREG_PDPTR,
3347                   (unsigned long *)&vcpu->arch.regs_dirty);
3348 }
3349
3350 static int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
3351
3352 static void ept_update_paging_mode_cr0(unsigned long *hw_cr0,
3353                                         unsigned long cr0,
3354                                         struct kvm_vcpu *vcpu)
3355 {
3356         if (!test_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail))
3357                 vmx_decache_cr3(vcpu);
3358         if (!(cr0 & X86_CR0_PG)) {
3359                 /* From paging/starting to nonpaging */
3360                 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL,
3361                              vmcs_read32(CPU_BASED_VM_EXEC_CONTROL) |
3362                              (CPU_BASED_CR3_LOAD_EXITING |
3363                               CPU_BASED_CR3_STORE_EXITING));
3364                 vcpu->arch.cr0 = cr0;
3365                 vmx_set_cr4(vcpu, kvm_read_cr4(vcpu));
3366         } else if (!is_paging(vcpu)) {
3367                 /* From nonpaging to paging */
3368                 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL,
3369                              vmcs_read32(CPU_BASED_VM_EXEC_CONTROL) &
3370                              ~(CPU_BASED_CR3_LOAD_EXITING |
3371                                CPU_BASED_CR3_STORE_EXITING));
3372                 vcpu->arch.cr0 = cr0;
3373                 vmx_set_cr4(vcpu, kvm_read_cr4(vcpu));
3374         }
3375
3376         if (!(cr0 & X86_CR0_WP))
3377                 *hw_cr0 &= ~X86_CR0_WP;
3378 }
3379
3380 static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
3381 {
3382         struct vcpu_vmx *vmx = to_vmx(vcpu);
3383         unsigned long hw_cr0;
3384
3385         hw_cr0 = (cr0 & ~KVM_GUEST_CR0_MASK);
3386         if (enable_unrestricted_guest)
3387                 hw_cr0 |= KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST;
3388         else {
3389                 hw_cr0 |= KVM_VM_CR0_ALWAYS_ON;
3390
3391                 if (vmx->rmode.vm86_active && (cr0 & X86_CR0_PE))
3392                         enter_pmode(vcpu);
3393
3394                 if (!vmx->rmode.vm86_active && !(cr0 & X86_CR0_PE))
3395                         enter_rmode(vcpu);
3396         }
3397
3398 #ifdef CONFIG_X86_64
3399         if (vcpu->arch.efer & EFER_LME) {
3400                 if (!is_paging(vcpu) && (cr0 & X86_CR0_PG))
3401                         enter_lmode(vcpu);
3402                 if (is_paging(vcpu) && !(cr0 & X86_CR0_PG))
3403                         exit_lmode(vcpu);
3404         }
3405 #endif
3406
3407         if (enable_ept)
3408                 ept_update_paging_mode_cr0(&hw_cr0, cr0, vcpu);
3409
3410         if (!vcpu->fpu_active)
3411                 hw_cr0 |= X86_CR0_TS | X86_CR0_MP;
3412
3413         vmcs_writel(CR0_READ_SHADOW, cr0);
3414         vmcs_writel(GUEST_CR0, hw_cr0);
3415         vcpu->arch.cr0 = cr0;
3416
3417         /* depends on vcpu->arch.cr0 to be set to a new value */
3418         vmx->emulation_required = emulation_required(vcpu);
3419 }
3420
3421 static u64 construct_eptp(unsigned long root_hpa)
3422 {
3423         u64 eptp;
3424
3425         /* TODO write the value reading from MSR */
3426         eptp = VMX_EPT_DEFAULT_MT |
3427                 VMX_EPT_DEFAULT_GAW << VMX_EPT_GAW_EPTP_SHIFT;
3428         if (enable_ept_ad_bits)
3429                 eptp |= VMX_EPT_AD_ENABLE_BIT;
3430         eptp |= (root_hpa & PAGE_MASK);
3431
3432         return eptp;
3433 }
3434
3435 static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
3436 {
3437         unsigned long guest_cr3;
3438         u64 eptp;
3439
3440         guest_cr3 = cr3;
3441         if (enable_ept) {
3442                 eptp = construct_eptp(cr3);
3443                 vmcs_write64(EPT_POINTER, eptp);
3444                 if (is_paging(vcpu) || is_guest_mode(vcpu))
3445                         guest_cr3 = kvm_read_cr3(vcpu);
3446                 else
3447                         guest_cr3 = vcpu->kvm->arch.ept_identity_map_addr;
3448                 ept_load_pdptrs(vcpu);
3449         }
3450
3451         vmx_flush_tlb(vcpu);
3452         vmcs_writel(GUEST_CR3, guest_cr3);
3453 }
3454
3455 static int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
3456 {
3457         unsigned long hw_cr4 = cr4 | (to_vmx(vcpu)->rmode.vm86_active ?
3458                     KVM_RMODE_VM_CR4_ALWAYS_ON : KVM_PMODE_VM_CR4_ALWAYS_ON);
3459
3460         if (cr4 & X86_CR4_VMXE) {
3461                 /*
3462                  * To use VMXON (and later other VMX instructions), a guest
3463                  * must first be able to turn on cr4.VMXE (see handle_vmon()).
3464                  * So basically the check on whether to allow nested VMX
3465                  * is here.
3466                  */
3467                 if (!nested_vmx_allowed(vcpu))
3468                         return 1;
3469         }
3470         if (to_vmx(vcpu)->nested.vmxon &&
3471             ((cr4 & VMXON_CR4_ALWAYSON) != VMXON_CR4_ALWAYSON))
3472                 return 1;
3473
3474         vcpu->arch.cr4 = cr4;
3475         if (enable_ept) {
3476                 if (!is_paging(vcpu)) {
3477                         hw_cr4 &= ~X86_CR4_PAE;
3478                         hw_cr4 |= X86_CR4_PSE;
3479                         /*
3480                          * SMEP is disabled if CPU is in non-paging mode in
3481                          * hardware. However KVM always uses paging mode to
3482                          * emulate guest non-paging mode with TDP.
3483                          * To emulate this behavior, SMEP needs to be manually
3484                          * disabled when guest switches to non-paging mode.
3485                          */
3486                         hw_cr4 &= ~X86_CR4_SMEP;
3487                 } else if (!(cr4 & X86_CR4_PAE)) {
3488                         hw_cr4 &= ~X86_CR4_PAE;
3489                 }
3490         }
3491
3492         vmcs_writel(CR4_READ_SHADOW, cr4);
3493         vmcs_writel(GUEST_CR4, hw_cr4);
3494         return 0;
3495 }
3496
3497 static void vmx_get_segment(struct kvm_vcpu *vcpu,
3498                             struct kvm_segment *var, int seg)
3499 {
3500         struct vcpu_vmx *vmx = to_vmx(vcpu);
3501         u32 ar;
3502
3503         if (vmx->rmode.vm86_active && seg != VCPU_SREG_LDTR) {
3504                 *var = vmx->rmode.segs[seg];
3505                 if (seg == VCPU_SREG_TR
3506                     || var->selector == vmx_read_guest_seg_selector(vmx, seg))
3507                         return;
3508                 var->base = vmx_read_guest_seg_base(vmx, seg);
3509                 var->selector = vmx_read_guest_seg_selector(vmx, seg);
3510                 return;
3511         }
3512         var->base = vmx_read_guest_seg_base(vmx, seg);
3513         var->limit = vmx_read_guest_seg_limit(vmx, seg);
3514         var->selector = vmx_read_guest_seg_selector(vmx, seg);
3515         ar = vmx_read_guest_seg_ar(vmx, seg);
3516         var->unusable = (ar >> 16) & 1;
3517         var->type = ar & 15;
3518         var->s = (ar >> 4) & 1;
3519         var->dpl = (ar >> 5) & 3;
3520         /*
3521          * Some userspaces do not preserve unusable property. Since usable
3522          * segment has to be present according to VMX spec we can use present
3523          * property to amend userspace bug by making unusable segment always
3524          * nonpresent. vmx_segment_access_rights() already marks nonpresent
3525          * segment as unusable.
3526          */
3527         var->present = !var->unusable;
3528         var->avl = (ar >> 12) & 1;
3529         var->l = (ar >> 13) & 1;
3530         var->db = (ar >> 14) & 1;
3531         var->g = (ar >> 15) & 1;
3532 }
3533
3534 static u64 vmx_get_segment_base(struct kvm_vcpu *vcpu, int seg)
3535 {
3536         struct kvm_segment s;
3537
3538         if (to_vmx(vcpu)->rmode.vm86_active) {
3539                 vmx_get_segment(vcpu, &s, seg);
3540                 return s.base;
3541         }
3542         return vmx_read_guest_seg_base(to_vmx(vcpu), seg);
3543 }
3544
3545 static int vmx_get_cpl(struct kvm_vcpu *vcpu)
3546 {
3547         struct vcpu_vmx *vmx = to_vmx(vcpu);
3548
3549         if (!is_protmode(vcpu))
3550                 return 0;
3551
3552         if (!is_long_mode(vcpu)
3553             && (kvm_get_rflags(vcpu) & X86_EFLAGS_VM)) /* if virtual 8086 */
3554                 return 3;
3555
3556         if (!test_bit(VCPU_EXREG_CPL, (ulong *)&vcpu->arch.regs_avail)) {
3557                 __set_bit(VCPU_EXREG_CPL, (ulong *)&vcpu->arch.regs_avail);
3558                 vmx->cpl = vmx_read_guest_seg_selector(vmx, VCPU_SREG_CS) & 3;
3559         }
3560
3561         return vmx->cpl;
3562 }
3563
3564
3565 static u32 vmx_segment_access_rights(struct kvm_segment *var)
3566 {
3567         u32 ar;
3568
3569         if (var->unusable || !var->present)
3570                 ar = 1 << 16;
3571         else {
3572                 ar = var->type & 15;
3573                 ar |= (var->s & 1) << 4;
3574                 ar |= (var->dpl & 3) << 5;
3575                 ar |= (var->present & 1) << 7;
3576                 ar |= (var->avl & 1) << 12;
3577                 ar |= (var->l & 1) << 13;
3578                 ar |= (var->db & 1) << 14;
3579                 ar |= (var->g & 1) << 15;
3580         }
3581
3582         return ar;
3583 }
3584
3585 static void vmx_set_segment(struct kvm_vcpu *vcpu,
3586                             struct kvm_segment *var, int seg)
3587 {
3588         struct vcpu_vmx *vmx = to_vmx(vcpu);
3589         const struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
3590
3591         vmx_segment_cache_clear(vmx);
3592         if (seg == VCPU_SREG_CS)
3593                 __clear_bit(VCPU_EXREG_CPL, (ulong *)&vcpu->arch.regs_avail);
3594
3595         if (vmx->rmode.vm86_active && seg != VCPU_SREG_LDTR) {
3596                 vmx->rmode.segs[seg] = *var;
3597                 if (seg == VCPU_SREG_TR)
3598                         vmcs_write16(sf->selector, var->selector);
3599                 else if (var->s)
3600                         fix_rmode_seg(seg, &vmx->rmode.segs[seg]);
3601                 goto out;
3602         }
3603
3604         vmcs_writel(sf->base, var->base);
3605         vmcs_write32(sf->limit, var->limit);
3606         vmcs_write16(sf->selector, var->selector);
3607
3608         /*
3609          *   Fix the "Accessed" bit in AR field of segment registers for older
3610          * qemu binaries.
3611          *   IA32 arch specifies that at the time of processor reset the
3612          * "Accessed" bit in the AR field of segment registers is 1. And qemu
3613          * is setting it to 0 in the userland code. This causes invalid guest
3614          * state vmexit when "unrestricted guest" mode is turned on.
3615          *    Fix for this setup issue in cpu_reset is being pushed in the qemu
3616          * tree. Newer qemu binaries with that qemu fix would not need this
3617          * kvm hack.
3618          */
3619         if (enable_unrestricted_guest && (seg != VCPU_SREG_LDTR))
3620                 var->type |= 0x1; /* Accessed */
3621
3622         vmcs_write32(sf->ar_bytes, vmx_segment_access_rights(var));
3623
3624 out:
3625         vmx->emulation_required |= emulation_required(vcpu);
3626 }
3627
3628 static void vmx_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
3629 {
3630         u32 ar = vmx_read_guest_seg_ar(to_vmx(vcpu), VCPU_SREG_CS);
3631
3632         *db = (ar >> 14) & 1;
3633         *l = (ar >> 13) & 1;
3634 }
3635
3636 static void vmx_get_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
3637 {
3638         dt->size = vmcs_read32(GUEST_IDTR_LIMIT);
3639         dt->address = vmcs_readl(GUEST_IDTR_BASE);
3640 }
3641
3642 static void vmx_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
3643 {
3644         vmcs_write32(GUEST_IDTR_LIMIT, dt->size);
3645         vmcs_writel(GUEST_IDTR_BASE, dt->address);
3646 }
3647
3648 static void vmx_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
3649 {
3650         dt->size = vmcs_read32(GUEST_GDTR_LIMIT);
3651         dt->address = vmcs_readl(GUEST_GDTR_BASE);
3652 }
3653
3654 static void vmx_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
3655 {
3656         vmcs_write32(GUEST_GDTR_LIMIT, dt->size);
3657         vmcs_writel(GUEST_GDTR_BASE, dt->address);
3658 }