631d2e544491ad450029677c0b59dc8fd7d1e3f4
[pandora-kernel.git] / arch / x86 / kvm / svm.c
1 /*
2  * Kernel-based Virtual Machine driver for Linux
3  *
4  * AMD SVM support
5  *
6  * Copyright (C) 2006 Qumranet, Inc.
7  *
8  * Authors:
9  *   Yaniv Kamay  <yaniv@qumranet.com>
10  *   Avi Kivity   <avi@qumranet.com>
11  *
12  * This work is licensed under the terms of the GNU GPL, version 2.  See
13  * the COPYING file in the top-level directory.
14  *
15  */
16 #include <linux/kvm_host.h>
17
18 #include "irq.h"
19 #include "mmu.h"
20 #include "kvm_cache_regs.h"
21 #include "x86.h"
22
23 #include <linux/module.h>
24 #include <linux/kernel.h>
25 #include <linux/vmalloc.h>
26 #include <linux/highmem.h>
27 #include <linux/sched.h>
28 #include <linux/ftrace_event.h>
29 #include <linux/slab.h>
30
31 #include <asm/desc.h>
32
33 #include <asm/virtext.h>
34 #include "trace.h"
35
36 #define __ex(x) __kvm_handle_fault_on_reboot(x)
37
38 MODULE_AUTHOR("Qumranet");
39 MODULE_LICENSE("GPL");
40
41 #define IOPM_ALLOC_ORDER 2
42 #define MSRPM_ALLOC_ORDER 1
43
44 #define SEG_TYPE_LDT 2
45 #define SEG_TYPE_BUSY_TSS16 3
46
47 #define SVM_FEATURE_NPT  (1 << 0)
48 #define SVM_FEATURE_LBRV (1 << 1)
49 #define SVM_FEATURE_SVML (1 << 2)
50 #define SVM_FEATURE_NRIP (1 << 3)
51 #define SVM_FEATURE_PAUSE_FILTER (1 << 10)
52
53 #define NESTED_EXIT_HOST        0       /* Exit handled on host level */
54 #define NESTED_EXIT_DONE        1       /* Exit caused nested vmexit  */
55 #define NESTED_EXIT_CONTINUE    2       /* Further checks needed      */
56
57 #define DEBUGCTL_RESERVED_BITS (~(0x3fULL))
58
59 static const u32 host_save_user_msrs[] = {
60 #ifdef CONFIG_X86_64
61         MSR_STAR, MSR_LSTAR, MSR_CSTAR, MSR_SYSCALL_MASK, MSR_KERNEL_GS_BASE,
62         MSR_FS_BASE,
63 #endif
64         MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
65 };
66
67 #define NR_HOST_SAVE_USER_MSRS ARRAY_SIZE(host_save_user_msrs)
68
69 struct kvm_vcpu;
70
71 struct nested_state {
72         struct vmcb *hsave;
73         u64 hsave_msr;
74         u64 vm_cr_msr;
75         u64 vmcb;
76
77         /* These are the merged vectors */
78         u32 *msrpm;
79
80         /* gpa pointers to the real vectors */
81         u64 vmcb_msrpm;
82
83         /* A VMEXIT is required but not yet emulated */
84         bool exit_required;
85
86         /* cache for intercepts of the guest */
87         u16 intercept_cr_read;
88         u16 intercept_cr_write;
89         u16 intercept_dr_read;
90         u16 intercept_dr_write;
91         u32 intercept_exceptions;
92         u64 intercept;
93
94 };
95
96 struct vcpu_svm {
97         struct kvm_vcpu vcpu;
98         struct vmcb *vmcb;
99         unsigned long vmcb_pa;
100         struct svm_cpu_data *svm_data;
101         uint64_t asid_generation;
102         uint64_t sysenter_esp;
103         uint64_t sysenter_eip;
104
105         u64 next_rip;
106
107         u64 host_user_msrs[NR_HOST_SAVE_USER_MSRS];
108         u64 host_gs_base;
109
110         u32 *msrpm;
111
112         struct nested_state nested;
113
114         bool nmi_singlestep;
115
116         unsigned int3_injected;
117         unsigned long int3_rip;
118 };
119
120 /* enable NPT for AMD64 and X86 with PAE */
121 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
122 static bool npt_enabled = true;
123 #else
124 static bool npt_enabled;
125 #endif
126 static int npt = 1;
127
128 module_param(npt, int, S_IRUGO);
129
130 static int nested = 1;
131 module_param(nested, int, S_IRUGO);
132
133 static void svm_flush_tlb(struct kvm_vcpu *vcpu);
134 static void svm_complete_interrupts(struct vcpu_svm *svm);
135
136 static int nested_svm_exit_handled(struct vcpu_svm *svm);
137 static int nested_svm_intercept(struct vcpu_svm *svm);
138 static int nested_svm_vmexit(struct vcpu_svm *svm);
139 static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
140                                       bool has_error_code, u32 error_code);
141
142 static inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu)
143 {
144         return container_of(vcpu, struct vcpu_svm, vcpu);
145 }
146
147 static inline bool is_nested(struct vcpu_svm *svm)
148 {
149         return svm->nested.vmcb;
150 }
151
152 static inline void enable_gif(struct vcpu_svm *svm)
153 {
154         svm->vcpu.arch.hflags |= HF_GIF_MASK;
155 }
156
157 static inline void disable_gif(struct vcpu_svm *svm)
158 {
159         svm->vcpu.arch.hflags &= ~HF_GIF_MASK;
160 }
161
162 static inline bool gif_set(struct vcpu_svm *svm)
163 {
164         return !!(svm->vcpu.arch.hflags & HF_GIF_MASK);
165 }
166
167 static unsigned long iopm_base;
168
169 struct kvm_ldttss_desc {
170         u16 limit0;
171         u16 base0;
172         unsigned base1:8, type:5, dpl:2, p:1;
173         unsigned limit1:4, zero0:3, g:1, base2:8;
174         u32 base3;
175         u32 zero1;
176 } __attribute__((packed));
177
178 struct svm_cpu_data {
179         int cpu;
180
181         u64 asid_generation;
182         u32 max_asid;
183         u32 next_asid;
184         struct kvm_ldttss_desc *tss_desc;
185
186         struct page *save_area;
187 };
188
189 static DEFINE_PER_CPU(struct svm_cpu_data *, svm_data);
190 static uint32_t svm_features;
191
192 struct svm_init_data {
193         int cpu;
194         int r;
195 };
196
197 static u32 msrpm_ranges[] = {0, 0xc0000000, 0xc0010000};
198
199 #define NUM_MSR_MAPS ARRAY_SIZE(msrpm_ranges)
200 #define MSRS_RANGE_SIZE 2048
201 #define MSRS_IN_RANGE (MSRS_RANGE_SIZE * 8 / 2)
202
203 #define MAX_INST_SIZE 15
204
205 static inline u32 svm_has(u32 feat)
206 {
207         return svm_features & feat;
208 }
209
210 static inline void clgi(void)
211 {
212         asm volatile (__ex(SVM_CLGI));
213 }
214
215 static inline void stgi(void)
216 {
217         asm volatile (__ex(SVM_STGI));
218 }
219
220 static inline void invlpga(unsigned long addr, u32 asid)
221 {
222         asm volatile (__ex(SVM_INVLPGA) : : "a"(addr), "c"(asid));
223 }
224
225 static inline void force_new_asid(struct kvm_vcpu *vcpu)
226 {
227         to_svm(vcpu)->asid_generation--;
228 }
229
230 static inline void flush_guest_tlb(struct kvm_vcpu *vcpu)
231 {
232         force_new_asid(vcpu);
233 }
234
235 static void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer)
236 {
237         if (!npt_enabled && !(efer & EFER_LMA))
238                 efer &= ~EFER_LME;
239
240         to_svm(vcpu)->vmcb->save.efer = efer | EFER_SVME;
241         vcpu->arch.efer = efer;
242 }
243
244 static int is_external_interrupt(u32 info)
245 {
246         info &= SVM_EVTINJ_TYPE_MASK | SVM_EVTINJ_VALID;
247         return info == (SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR);
248 }
249
250 static u32 svm_get_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
251 {
252         struct vcpu_svm *svm = to_svm(vcpu);
253         u32 ret = 0;
254
255         if (svm->vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK)
256                 ret |= KVM_X86_SHADOW_INT_STI | KVM_X86_SHADOW_INT_MOV_SS;
257         return ret & mask;
258 }
259
260 static void svm_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
261 {
262         struct vcpu_svm *svm = to_svm(vcpu);
263
264         if (mask == 0)
265                 svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK;
266         else
267                 svm->vmcb->control.int_state |= SVM_INTERRUPT_SHADOW_MASK;
268
269 }
270
271 static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
272 {
273         struct vcpu_svm *svm = to_svm(vcpu);
274
275         if (!svm->next_rip) {
276                 if (emulate_instruction(vcpu, 0, 0, EMULTYPE_SKIP) !=
277                                 EMULATE_DONE)
278                         printk(KERN_DEBUG "%s: NOP\n", __func__);
279                 return;
280         }
281         if (svm->next_rip - kvm_rip_read(vcpu) > MAX_INST_SIZE)
282                 printk(KERN_ERR "%s: ip 0x%lx next 0x%llx\n",
283                        __func__, kvm_rip_read(vcpu), svm->next_rip);
284
285         kvm_rip_write(vcpu, svm->next_rip);
286         svm_set_interrupt_shadow(vcpu, 0);
287 }
288
289 static void svm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
290                                 bool has_error_code, u32 error_code)
291 {
292         struct vcpu_svm *svm = to_svm(vcpu);
293
294         /*
295          * If we are within a nested VM we'd better #VMEXIT and let the guest
296          * handle the exception
297          */
298         if (nested_svm_check_exception(svm, nr, has_error_code, error_code))
299                 return;
300
301         if (nr == BP_VECTOR && !svm_has(SVM_FEATURE_NRIP)) {
302                 unsigned long rip, old_rip = kvm_rip_read(&svm->vcpu);
303
304                 /*
305                  * For guest debugging where we have to reinject #BP if some
306                  * INT3 is guest-owned:
307                  * Emulate nRIP by moving RIP forward. Will fail if injection
308                  * raises a fault that is not intercepted. Still better than
309                  * failing in all cases.
310                  */
311                 skip_emulated_instruction(&svm->vcpu);
312                 rip = kvm_rip_read(&svm->vcpu);
313                 svm->int3_rip = rip + svm->vmcb->save.cs.base;
314                 svm->int3_injected = rip - old_rip;
315         }
316
317         svm->vmcb->control.event_inj = nr
318                 | SVM_EVTINJ_VALID
319                 | (has_error_code ? SVM_EVTINJ_VALID_ERR : 0)
320                 | SVM_EVTINJ_TYPE_EXEPT;
321         svm->vmcb->control.event_inj_err = error_code;
322 }
323
324 static int has_svm(void)
325 {
326         const char *msg;
327
328         if (!cpu_has_svm(&msg)) {
329                 printk(KERN_INFO "has_svm: %s\n", msg);
330                 return 0;
331         }
332
333         return 1;
334 }
335
336 static void svm_hardware_disable(void *garbage)
337 {
338         cpu_svm_disable();
339 }
340
341 static int svm_hardware_enable(void *garbage)
342 {
343
344         struct svm_cpu_data *sd;
345         uint64_t efer;
346         struct desc_ptr gdt_descr;
347         struct desc_struct *gdt;
348         int me = raw_smp_processor_id();
349
350         rdmsrl(MSR_EFER, efer);
351         if (efer & EFER_SVME)
352                 return -EBUSY;
353
354         if (!has_svm()) {
355                 printk(KERN_ERR "svm_hardware_enable: err EOPNOTSUPP on %d\n",
356                        me);
357                 return -EINVAL;
358         }
359         sd = per_cpu(svm_data, me);
360
361         if (!sd) {
362                 printk(KERN_ERR "svm_hardware_enable: svm_data is NULL on %d\n",
363                        me);
364                 return -EINVAL;
365         }
366
367         sd->asid_generation = 1;
368         sd->max_asid = cpuid_ebx(SVM_CPUID_FUNC) - 1;
369         sd->next_asid = sd->max_asid + 1;
370
371         kvm_get_gdt(&gdt_descr);
372         gdt = (struct desc_struct *)gdt_descr.address;
373         sd->tss_desc = (struct kvm_ldttss_desc *)(gdt + GDT_ENTRY_TSS);
374
375         wrmsrl(MSR_EFER, efer | EFER_SVME);
376
377         wrmsrl(MSR_VM_HSAVE_PA, page_to_pfn(sd->save_area) << PAGE_SHIFT);
378
379         return 0;
380 }
381
382 static void svm_cpu_uninit(int cpu)
383 {
384         struct svm_cpu_data *sd = per_cpu(svm_data, raw_smp_processor_id());
385
386         if (!sd)
387                 return;
388
389         per_cpu(svm_data, raw_smp_processor_id()) = NULL;
390         __free_page(sd->save_area);
391         kfree(sd);
392 }
393
394 static int svm_cpu_init(int cpu)
395 {
396         struct svm_cpu_data *sd;
397         int r;
398
399         sd = kzalloc(sizeof(struct svm_cpu_data), GFP_KERNEL);
400         if (!sd)
401                 return -ENOMEM;
402         sd->cpu = cpu;
403         sd->save_area = alloc_page(GFP_KERNEL);
404         r = -ENOMEM;
405         if (!sd->save_area)
406                 goto err_1;
407
408         per_cpu(svm_data, cpu) = sd;
409
410         return 0;
411
412 err_1:
413         kfree(sd);
414         return r;
415
416 }
417
418 static void set_msr_interception(u32 *msrpm, unsigned msr,
419                                  int read, int write)
420 {
421         int i;
422
423         for (i = 0; i < NUM_MSR_MAPS; i++) {
424                 if (msr >= msrpm_ranges[i] &&
425                     msr < msrpm_ranges[i] + MSRS_IN_RANGE) {
426                         u32 msr_offset = (i * MSRS_IN_RANGE + msr -
427                                           msrpm_ranges[i]) * 2;
428
429                         u32 *base = msrpm + (msr_offset / 32);
430                         u32 msr_shift = msr_offset % 32;
431                         u32 mask = ((write) ? 0 : 2) | ((read) ? 0 : 1);
432                         *base = (*base & ~(0x3 << msr_shift)) |
433                                 (mask << msr_shift);
434                         return;
435                 }
436         }
437         BUG();
438 }
439
440 static void svm_vcpu_init_msrpm(u32 *msrpm)
441 {
442         memset(msrpm, 0xff, PAGE_SIZE * (1 << MSRPM_ALLOC_ORDER));
443
444 #ifdef CONFIG_X86_64
445         set_msr_interception(msrpm, MSR_GS_BASE, 1, 1);
446         set_msr_interception(msrpm, MSR_FS_BASE, 1, 1);
447         set_msr_interception(msrpm, MSR_KERNEL_GS_BASE, 1, 1);
448         set_msr_interception(msrpm, MSR_LSTAR, 1, 1);
449         set_msr_interception(msrpm, MSR_CSTAR, 1, 1);
450         set_msr_interception(msrpm, MSR_SYSCALL_MASK, 1, 1);
451 #endif
452         set_msr_interception(msrpm, MSR_K6_STAR, 1, 1);
453         set_msr_interception(msrpm, MSR_IA32_SYSENTER_CS, 1, 1);
454 }
455
456 static void svm_enable_lbrv(struct vcpu_svm *svm)
457 {
458         u32 *msrpm = svm->msrpm;
459
460         svm->vmcb->control.lbr_ctl = 1;
461         set_msr_interception(msrpm, MSR_IA32_LASTBRANCHFROMIP, 1, 1);
462         set_msr_interception(msrpm, MSR_IA32_LASTBRANCHTOIP, 1, 1);
463         set_msr_interception(msrpm, MSR_IA32_LASTINTFROMIP, 1, 1);
464         set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 1, 1);
465 }
466
467 static void svm_disable_lbrv(struct vcpu_svm *svm)
468 {
469         u32 *msrpm = svm->msrpm;
470
471         svm->vmcb->control.lbr_ctl = 0;
472         set_msr_interception(msrpm, MSR_IA32_LASTBRANCHFROMIP, 0, 0);
473         set_msr_interception(msrpm, MSR_IA32_LASTBRANCHTOIP, 0, 0);
474         set_msr_interception(msrpm, MSR_IA32_LASTINTFROMIP, 0, 0);
475         set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 0, 0);
476 }
477
478 static __init int svm_hardware_setup(void)
479 {
480         int cpu;
481         struct page *iopm_pages;
482         void *iopm_va;
483         int r;
484
485         iopm_pages = alloc_pages(GFP_KERNEL, IOPM_ALLOC_ORDER);
486
487         if (!iopm_pages)
488                 return -ENOMEM;
489
490         iopm_va = page_address(iopm_pages);
491         memset(iopm_va, 0xff, PAGE_SIZE * (1 << IOPM_ALLOC_ORDER));
492         iopm_base = page_to_pfn(iopm_pages) << PAGE_SHIFT;
493
494         if (boot_cpu_has(X86_FEATURE_NX))
495                 kvm_enable_efer_bits(EFER_NX);
496
497         if (boot_cpu_has(X86_FEATURE_FXSR_OPT))
498                 kvm_enable_efer_bits(EFER_FFXSR);
499
500         if (nested) {
501                 printk(KERN_INFO "kvm: Nested Virtualization enabled\n");
502                 kvm_enable_efer_bits(EFER_SVME);
503         }
504
505         for_each_possible_cpu(cpu) {
506                 r = svm_cpu_init(cpu);
507                 if (r)
508                         goto err;
509         }
510
511         svm_features = cpuid_edx(SVM_CPUID_FUNC);
512
513         if (!svm_has(SVM_FEATURE_NPT))
514                 npt_enabled = false;
515
516         if (npt_enabled && !npt) {
517                 printk(KERN_INFO "kvm: Nested Paging disabled\n");
518                 npt_enabled = false;
519         }
520
521         if (npt_enabled) {
522                 printk(KERN_INFO "kvm: Nested Paging enabled\n");
523                 kvm_enable_tdp();
524         } else
525                 kvm_disable_tdp();
526
527         return 0;
528
529 err:
530         __free_pages(iopm_pages, IOPM_ALLOC_ORDER);
531         iopm_base = 0;
532         return r;
533 }
534
535 static __exit void svm_hardware_unsetup(void)
536 {
537         int cpu;
538
539         for_each_possible_cpu(cpu)
540                 svm_cpu_uninit(cpu);
541
542         __free_pages(pfn_to_page(iopm_base >> PAGE_SHIFT), IOPM_ALLOC_ORDER);
543         iopm_base = 0;
544 }
545
546 static void init_seg(struct vmcb_seg *seg)
547 {
548         seg->selector = 0;
549         seg->attrib = SVM_SELECTOR_P_MASK | SVM_SELECTOR_S_MASK |
550                       SVM_SELECTOR_WRITE_MASK; /* Read/Write Data Segment */
551         seg->limit = 0xffff;
552         seg->base = 0;
553 }
554
555 static void init_sys_seg(struct vmcb_seg *seg, uint32_t type)
556 {
557         seg->selector = 0;
558         seg->attrib = SVM_SELECTOR_P_MASK | type;
559         seg->limit = 0xffff;
560         seg->base = 0;
561 }
562
563 static void init_vmcb(struct vcpu_svm *svm)
564 {
565         struct vmcb_control_area *control = &svm->vmcb->control;
566         struct vmcb_save_area *save = &svm->vmcb->save;
567
568         svm->vcpu.fpu_active = 1;
569
570         control->intercept_cr_read =    INTERCEPT_CR0_MASK |
571                                         INTERCEPT_CR3_MASK |
572                                         INTERCEPT_CR4_MASK;
573
574         control->intercept_cr_write =   INTERCEPT_CR0_MASK |
575                                         INTERCEPT_CR3_MASK |
576                                         INTERCEPT_CR4_MASK |
577                                         INTERCEPT_CR8_MASK;
578
579         control->intercept_dr_read =    INTERCEPT_DR0_MASK |
580                                         INTERCEPT_DR1_MASK |
581                                         INTERCEPT_DR2_MASK |
582                                         INTERCEPT_DR3_MASK |
583                                         INTERCEPT_DR4_MASK |
584                                         INTERCEPT_DR5_MASK |
585                                         INTERCEPT_DR6_MASK |
586                                         INTERCEPT_DR7_MASK;
587
588         control->intercept_dr_write =   INTERCEPT_DR0_MASK |
589                                         INTERCEPT_DR1_MASK |
590                                         INTERCEPT_DR2_MASK |
591                                         INTERCEPT_DR3_MASK |
592                                         INTERCEPT_DR4_MASK |
593                                         INTERCEPT_DR5_MASK |
594                                         INTERCEPT_DR6_MASK |
595                                         INTERCEPT_DR7_MASK;
596
597         control->intercept_exceptions = (1 << PF_VECTOR) |
598                                         (1 << UD_VECTOR) |
599                                         (1 << MC_VECTOR);
600
601
602         control->intercept =    (1ULL << INTERCEPT_INTR) |
603                                 (1ULL << INTERCEPT_NMI) |
604                                 (1ULL << INTERCEPT_SMI) |
605                                 (1ULL << INTERCEPT_SELECTIVE_CR0) |
606                                 (1ULL << INTERCEPT_CPUID) |
607                                 (1ULL << INTERCEPT_INVD) |
608                                 (1ULL << INTERCEPT_HLT) |
609                                 (1ULL << INTERCEPT_INVLPG) |
610                                 (1ULL << INTERCEPT_INVLPGA) |
611                                 (1ULL << INTERCEPT_IOIO_PROT) |
612                                 (1ULL << INTERCEPT_MSR_PROT) |
613                                 (1ULL << INTERCEPT_TASK_SWITCH) |
614                                 (1ULL << INTERCEPT_SHUTDOWN) |
615                                 (1ULL << INTERCEPT_VMRUN) |
616                                 (1ULL << INTERCEPT_VMMCALL) |
617                                 (1ULL << INTERCEPT_VMLOAD) |
618                                 (1ULL << INTERCEPT_VMSAVE) |
619                                 (1ULL << INTERCEPT_STGI) |
620                                 (1ULL << INTERCEPT_CLGI) |
621                                 (1ULL << INTERCEPT_SKINIT) |
622                                 (1ULL << INTERCEPT_WBINVD) |
623                                 (1ULL << INTERCEPT_MONITOR) |
624                                 (1ULL << INTERCEPT_MWAIT);
625
626         control->iopm_base_pa = iopm_base;
627         control->msrpm_base_pa = __pa(svm->msrpm);
628         control->tsc_offset = 0;
629         control->int_ctl = V_INTR_MASKING_MASK;
630
631         init_seg(&save->es);
632         init_seg(&save->ss);
633         init_seg(&save->ds);
634         init_seg(&save->fs);
635         init_seg(&save->gs);
636
637         save->cs.selector = 0xf000;
638         /* Executable/Readable Code Segment */
639         save->cs.attrib = SVM_SELECTOR_READ_MASK | SVM_SELECTOR_P_MASK |
640                 SVM_SELECTOR_S_MASK | SVM_SELECTOR_CODE_MASK;
641         save->cs.limit = 0xffff;
642         /*
643          * cs.base should really be 0xffff0000, but vmx can't handle that, so
644          * be consistent with it.
645          *
646          * Replace when we have real mode working for vmx.
647          */
648         save->cs.base = 0xf0000;
649
650         save->gdtr.limit = 0xffff;
651         save->idtr.limit = 0xffff;
652
653         init_sys_seg(&save->ldtr, SEG_TYPE_LDT);
654         init_sys_seg(&save->tr, SEG_TYPE_BUSY_TSS16);
655
656         save->efer = EFER_SVME;
657         save->dr6 = 0xffff0ff0;
658         save->dr7 = 0x400;
659         save->rflags = 2;
660         save->rip = 0x0000fff0;
661         svm->vcpu.arch.regs[VCPU_REGS_RIP] = save->rip;
662
663         /*
664          * This is the guest-visible cr0 value.
665          * svm_set_cr0() sets PG and WP and clears NW and CD on save->cr0.
666          */
667         svm->vcpu.arch.cr0 = X86_CR0_NW | X86_CR0_CD | X86_CR0_ET;
668         kvm_set_cr0(&svm->vcpu, svm->vcpu.arch.cr0);
669
670         save->cr4 = X86_CR4_PAE;
671         /* rdx = ?? */
672
673         if (npt_enabled) {
674                 /* Setup VMCB for Nested Paging */
675                 control->nested_ctl = 1;
676                 control->intercept &= ~((1ULL << INTERCEPT_TASK_SWITCH) |
677                                         (1ULL << INTERCEPT_INVLPG));
678                 control->intercept_exceptions &= ~(1 << PF_VECTOR);
679                 control->intercept_cr_read &= ~INTERCEPT_CR3_MASK;
680                 control->intercept_cr_write &= ~INTERCEPT_CR3_MASK;
681                 save->g_pat = 0x0007040600070406ULL;
682                 save->cr3 = 0;
683                 save->cr4 = 0;
684         }
685         force_new_asid(&svm->vcpu);
686
687         svm->nested.vmcb = 0;
688         svm->vcpu.arch.hflags = 0;
689
690         if (svm_has(SVM_FEATURE_PAUSE_FILTER)) {
691                 control->pause_filter_count = 3000;
692                 control->intercept |= (1ULL << INTERCEPT_PAUSE);
693         }
694
695         enable_gif(svm);
696 }
697
698 static int svm_vcpu_reset(struct kvm_vcpu *vcpu)
699 {
700         struct vcpu_svm *svm = to_svm(vcpu);
701
702         init_vmcb(svm);
703
704         if (!kvm_vcpu_is_bsp(vcpu)) {
705                 kvm_rip_write(vcpu, 0);
706                 svm->vmcb->save.cs.base = svm->vcpu.arch.sipi_vector << 12;
707                 svm->vmcb->save.cs.selector = svm->vcpu.arch.sipi_vector << 8;
708         }
709         vcpu->arch.regs_avail = ~0;
710         vcpu->arch.regs_dirty = ~0;
711
712         return 0;
713 }
714
715 static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
716 {
717         struct vcpu_svm *svm;
718         struct page *page;
719         struct page *msrpm_pages;
720         struct page *hsave_page;
721         struct page *nested_msrpm_pages;
722         int err;
723
724         svm = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
725         if (!svm) {
726                 err = -ENOMEM;
727                 goto out;
728         }
729
730         err = kvm_vcpu_init(&svm->vcpu, kvm, id);
731         if (err)
732                 goto free_svm;
733
734         err = -ENOMEM;
735         page = alloc_page(GFP_KERNEL);
736         if (!page)
737                 goto uninit;
738
739         msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER);
740         if (!msrpm_pages)
741                 goto free_page1;
742
743         nested_msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER);
744         if (!nested_msrpm_pages)
745                 goto free_page2;
746
747         hsave_page = alloc_page(GFP_KERNEL);
748         if (!hsave_page)
749                 goto free_page3;
750
751         svm->nested.hsave = page_address(hsave_page);
752
753         svm->msrpm = page_address(msrpm_pages);
754         svm_vcpu_init_msrpm(svm->msrpm);
755
756         svm->nested.msrpm = page_address(nested_msrpm_pages);
757
758         svm->vmcb = page_address(page);
759         clear_page(svm->vmcb);
760         svm->vmcb_pa = page_to_pfn(page) << PAGE_SHIFT;
761         svm->asid_generation = 0;
762         init_vmcb(svm);
763
764         fx_init(&svm->vcpu);
765         svm->vcpu.arch.apic_base = 0xfee00000 | MSR_IA32_APICBASE_ENABLE;
766         if (kvm_vcpu_is_bsp(&svm->vcpu))
767                 svm->vcpu.arch.apic_base |= MSR_IA32_APICBASE_BSP;
768
769         return &svm->vcpu;
770
771 free_page3:
772         __free_pages(nested_msrpm_pages, MSRPM_ALLOC_ORDER);
773 free_page2:
774         __free_pages(msrpm_pages, MSRPM_ALLOC_ORDER);
775 free_page1:
776         __free_page(page);
777 uninit:
778         kvm_vcpu_uninit(&svm->vcpu);
779 free_svm:
780         kmem_cache_free(kvm_vcpu_cache, svm);
781 out:
782         return ERR_PTR(err);
783 }
784
785 static void svm_free_vcpu(struct kvm_vcpu *vcpu)
786 {
787         struct vcpu_svm *svm = to_svm(vcpu);
788
789         __free_page(pfn_to_page(svm->vmcb_pa >> PAGE_SHIFT));
790         __free_pages(virt_to_page(svm->msrpm), MSRPM_ALLOC_ORDER);
791         __free_page(virt_to_page(svm->nested.hsave));
792         __free_pages(virt_to_page(svm->nested.msrpm), MSRPM_ALLOC_ORDER);
793         kvm_vcpu_uninit(vcpu);
794         kmem_cache_free(kvm_vcpu_cache, svm);
795 }
796
797 static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
798 {
799         struct vcpu_svm *svm = to_svm(vcpu);
800         int i;
801
802         if (unlikely(cpu != vcpu->cpu)) {
803                 u64 delta;
804
805                 if (check_tsc_unstable()) {
806                         /*
807                          * Make sure that the guest sees a monotonically
808                          * increasing TSC.
809                          */
810                         delta = vcpu->arch.host_tsc - native_read_tsc();
811                         svm->vmcb->control.tsc_offset += delta;
812                         if (is_nested(svm))
813                                 svm->nested.hsave->control.tsc_offset += delta;
814                 }
815                 vcpu->cpu = cpu;
816                 kvm_migrate_timers(vcpu);
817                 svm->asid_generation = 0;
818         }
819
820         for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
821                 rdmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
822 }
823
824 static void svm_vcpu_put(struct kvm_vcpu *vcpu)
825 {
826         struct vcpu_svm *svm = to_svm(vcpu);
827         int i;
828
829         ++vcpu->stat.host_state_reload;
830         for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
831                 wrmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
832
833         vcpu->arch.host_tsc = native_read_tsc();
834 }
835
836 static unsigned long svm_get_rflags(struct kvm_vcpu *vcpu)
837 {
838         return to_svm(vcpu)->vmcb->save.rflags;
839 }
840
841 static void svm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
842 {
843         to_svm(vcpu)->vmcb->save.rflags = rflags;
844 }
845
846 static void svm_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
847 {
848         switch (reg) {
849         case VCPU_EXREG_PDPTR:
850                 BUG_ON(!npt_enabled);
851                 load_pdptrs(vcpu, vcpu->arch.cr3);
852                 break;
853         default:
854                 BUG();
855         }
856 }
857
858 static void svm_set_vintr(struct vcpu_svm *svm)
859 {
860         svm->vmcb->control.intercept |= 1ULL << INTERCEPT_VINTR;
861 }
862
863 static void svm_clear_vintr(struct vcpu_svm *svm)
864 {
865         svm->vmcb->control.intercept &= ~(1ULL << INTERCEPT_VINTR);
866 }
867
868 static struct vmcb_seg *svm_seg(struct kvm_vcpu *vcpu, int seg)
869 {
870         struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save;
871
872         switch (seg) {
873         case VCPU_SREG_CS: return &save->cs;
874         case VCPU_SREG_DS: return &save->ds;
875         case VCPU_SREG_ES: return &save->es;
876         case VCPU_SREG_FS: return &save->fs;
877         case VCPU_SREG_GS: return &save->gs;
878         case VCPU_SREG_SS: return &save->ss;
879         case VCPU_SREG_TR: return &save->tr;
880         case VCPU_SREG_LDTR: return &save->ldtr;
881         }
882         BUG();
883         return NULL;
884 }
885
886 static u64 svm_get_segment_base(struct kvm_vcpu *vcpu, int seg)
887 {
888         struct vmcb_seg *s = svm_seg(vcpu, seg);
889
890         return s->base;
891 }
892
893 static void svm_get_segment(struct kvm_vcpu *vcpu,
894                             struct kvm_segment *var, int seg)
895 {
896         struct vmcb_seg *s = svm_seg(vcpu, seg);
897
898         var->base = s->base;
899         var->limit = s->limit;
900         var->selector = s->selector;
901         var->type = s->attrib & SVM_SELECTOR_TYPE_MASK;
902         var->s = (s->attrib >> SVM_SELECTOR_S_SHIFT) & 1;
903         var->dpl = (s->attrib >> SVM_SELECTOR_DPL_SHIFT) & 3;
904         var->present = (s->attrib >> SVM_SELECTOR_P_SHIFT) & 1;
905         var->avl = (s->attrib >> SVM_SELECTOR_AVL_SHIFT) & 1;
906         var->l = (s->attrib >> SVM_SELECTOR_L_SHIFT) & 1;
907         var->db = (s->attrib >> SVM_SELECTOR_DB_SHIFT) & 1;
908         var->g = (s->attrib >> SVM_SELECTOR_G_SHIFT) & 1;
909
910         /*
911          * AMD's VMCB does not have an explicit unusable field, so emulate it
912          * for cross vendor migration purposes by "not present"
913          */
914         var->unusable = !var->present || (var->type == 0);
915
916         switch (seg) {
917         case VCPU_SREG_CS:
918                 /*
919                  * SVM always stores 0 for the 'G' bit in the CS selector in
920                  * the VMCB on a VMEXIT. This hurts cross-vendor migration:
921                  * Intel's VMENTRY has a check on the 'G' bit.
922                  */
923                 var->g = s->limit > 0xfffff;
924                 break;
925         case VCPU_SREG_TR:
926                 /*
927                  * Work around a bug where the busy flag in the tr selector
928                  * isn't exposed
929                  */
930                 var->type |= 0x2;
931                 break;
932         case VCPU_SREG_DS:
933         case VCPU_SREG_ES:
934         case VCPU_SREG_FS:
935         case VCPU_SREG_GS:
936                 /*
937                  * The accessed bit must always be set in the segment
938                  * descriptor cache, although it can be cleared in the
939                  * descriptor, the cached bit always remains at 1. Since
940                  * Intel has a check on this, set it here to support
941                  * cross-vendor migration.
942                  */
943                 if (!var->unusable)
944                         var->type |= 0x1;
945                 break;
946         case VCPU_SREG_SS:
947                 /*
948                  * On AMD CPUs sometimes the DB bit in the segment
949                  * descriptor is left as 1, although the whole segment has
950                  * been made unusable. Clear it here to pass an Intel VMX
951                  * entry check when cross vendor migrating.
952                  */
953                 if (var->unusable)
954                         var->db = 0;
955                 break;
956         }
957 }
958
959 static int svm_get_cpl(struct kvm_vcpu *vcpu)
960 {
961         struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save;
962
963         return save->cpl;
964 }
965
966 static void svm_get_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
967 {
968         struct vcpu_svm *svm = to_svm(vcpu);
969
970         dt->size = svm->vmcb->save.idtr.limit;
971         dt->address = svm->vmcb->save.idtr.base;
972 }
973
974 static void svm_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
975 {
976         struct vcpu_svm *svm = to_svm(vcpu);
977
978         svm->vmcb->save.idtr.limit = dt->size;
979         svm->vmcb->save.idtr.base = dt->address ;
980 }
981
982 static void svm_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
983 {
984         struct vcpu_svm *svm = to_svm(vcpu);
985
986         dt->size = svm->vmcb->save.gdtr.limit;
987         dt->address = svm->vmcb->save.gdtr.base;
988 }
989
990 static void svm_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
991 {
992         struct vcpu_svm *svm = to_svm(vcpu);
993
994         svm->vmcb->save.gdtr.limit = dt->size;
995         svm->vmcb->save.gdtr.base = dt->address ;
996 }
997
998 static void svm_decache_cr0_guest_bits(struct kvm_vcpu *vcpu)
999 {
1000 }
1001
1002 static void svm_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
1003 {
1004 }
1005
1006 static void update_cr0_intercept(struct vcpu_svm *svm)
1007 {
1008         struct vmcb *vmcb = svm->vmcb;
1009         ulong gcr0 = svm->vcpu.arch.cr0;
1010         u64 *hcr0 = &svm->vmcb->save.cr0;
1011
1012         if (!svm->vcpu.fpu_active)
1013                 *hcr0 |= SVM_CR0_SELECTIVE_MASK;
1014         else
1015                 *hcr0 = (*hcr0 & ~SVM_CR0_SELECTIVE_MASK)
1016                         | (gcr0 & SVM_CR0_SELECTIVE_MASK);
1017
1018
1019         if (gcr0 == *hcr0 && svm->vcpu.fpu_active) {
1020                 vmcb->control.intercept_cr_read &= ~INTERCEPT_CR0_MASK;
1021                 vmcb->control.intercept_cr_write &= ~INTERCEPT_CR0_MASK;
1022                 if (is_nested(svm)) {
1023                         struct vmcb *hsave = svm->nested.hsave;
1024
1025                         hsave->control.intercept_cr_read  &= ~INTERCEPT_CR0_MASK;
1026                         hsave->control.intercept_cr_write &= ~INTERCEPT_CR0_MASK;
1027                         vmcb->control.intercept_cr_read  |= svm->nested.intercept_cr_read;
1028                         vmcb->control.intercept_cr_write |= svm->nested.intercept_cr_write;
1029                 }
1030         } else {
1031                 svm->vmcb->control.intercept_cr_read |= INTERCEPT_CR0_MASK;
1032                 svm->vmcb->control.intercept_cr_write |= INTERCEPT_CR0_MASK;
1033                 if (is_nested(svm)) {
1034                         struct vmcb *hsave = svm->nested.hsave;
1035
1036                         hsave->control.intercept_cr_read |= INTERCEPT_CR0_MASK;
1037                         hsave->control.intercept_cr_write |= INTERCEPT_CR0_MASK;
1038                 }
1039         }
1040 }
1041
1042 static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
1043 {
1044         struct vcpu_svm *svm = to_svm(vcpu);
1045
1046         if (is_nested(svm)) {
1047                 /*
1048                  * We are here because we run in nested mode, the host kvm
1049                  * intercepts cr0 writes but the l1 hypervisor does not.
1050                  * But the L1 hypervisor may intercept selective cr0 writes.
1051                  * This needs to be checked here.
1052                  */
1053                 unsigned long old, new;
1054
1055                 /* Remove bits that would trigger a real cr0 write intercept */
1056                 old = vcpu->arch.cr0 & SVM_CR0_SELECTIVE_MASK;
1057                 new = cr0 & SVM_CR0_SELECTIVE_MASK;
1058
1059                 if (old == new) {
1060                         /* cr0 write with ts and mp unchanged */
1061                         svm->vmcb->control.exit_code = SVM_EXIT_CR0_SEL_WRITE;
1062                         if (nested_svm_exit_handled(svm) == NESTED_EXIT_DONE)
1063                                 return;
1064                 }
1065         }
1066
1067 #ifdef CONFIG_X86_64
1068         if (vcpu->arch.efer & EFER_LME) {
1069                 if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
1070                         vcpu->arch.efer |= EFER_LMA;
1071                         svm->vmcb->save.efer |= EFER_LMA | EFER_LME;
1072                 }
1073
1074                 if (is_paging(vcpu) && !(cr0 & X86_CR0_PG)) {
1075                         vcpu->arch.efer &= ~EFER_LMA;
1076                         svm->vmcb->save.efer &= ~(EFER_LMA | EFER_LME);
1077                 }
1078         }
1079 #endif
1080         vcpu->arch.cr0 = cr0;
1081
1082         if (!npt_enabled)
1083                 cr0 |= X86_CR0_PG | X86_CR0_WP;
1084
1085         if (!vcpu->fpu_active)
1086                 cr0 |= X86_CR0_TS;
1087         /*
1088          * re-enable caching here because the QEMU bios
1089          * does not do it - this results in some delay at
1090          * reboot
1091          */
1092         cr0 &= ~(X86_CR0_CD | X86_CR0_NW);
1093         svm->vmcb->save.cr0 = cr0;
1094         update_cr0_intercept(svm);
1095 }
1096
1097 static void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
1098 {
1099         unsigned long host_cr4_mce = read_cr4() & X86_CR4_MCE;
1100         unsigned long old_cr4 = to_svm(vcpu)->vmcb->save.cr4;
1101
1102         if (npt_enabled && ((old_cr4 ^ cr4) & X86_CR4_PGE))
1103                 force_new_asid(vcpu);
1104
1105         vcpu->arch.cr4 = cr4;
1106         if (!npt_enabled)
1107                 cr4 |= X86_CR4_PAE;
1108         cr4 |= host_cr4_mce;
1109         to_svm(vcpu)->vmcb->save.cr4 = cr4;
1110 }
1111
1112 static void svm_set_segment(struct kvm_vcpu *vcpu,
1113                             struct kvm_segment *var, int seg)
1114 {
1115         struct vcpu_svm *svm = to_svm(vcpu);
1116         struct vmcb_seg *s = svm_seg(vcpu, seg);
1117
1118         s->base = var->base;
1119         s->limit = var->limit;
1120         s->selector = var->selector;
1121         if (var->unusable)
1122                 s->attrib = 0;
1123         else {
1124                 s->attrib = (var->type & SVM_SELECTOR_TYPE_MASK);
1125                 s->attrib |= (var->s & 1) << SVM_SELECTOR_S_SHIFT;
1126                 s->attrib |= (var->dpl & 3) << SVM_SELECTOR_DPL_SHIFT;
1127                 s->attrib |= (var->present & 1) << SVM_SELECTOR_P_SHIFT;
1128                 s->attrib |= (var->avl & 1) << SVM_SELECTOR_AVL_SHIFT;
1129                 s->attrib |= (var->l & 1) << SVM_SELECTOR_L_SHIFT;
1130                 s->attrib |= (var->db & 1) << SVM_SELECTOR_DB_SHIFT;
1131                 s->attrib |= (var->g & 1) << SVM_SELECTOR_G_SHIFT;
1132         }
1133         if (seg == VCPU_SREG_CS)
1134                 svm->vmcb->save.cpl
1135                         = (svm->vmcb->save.cs.attrib
1136                            >> SVM_SELECTOR_DPL_SHIFT) & 3;
1137
1138 }
1139
1140 static void update_db_intercept(struct kvm_vcpu *vcpu)
1141 {
1142         struct vcpu_svm *svm = to_svm(vcpu);
1143
1144         svm->vmcb->control.intercept_exceptions &=
1145                 ~((1 << DB_VECTOR) | (1 << BP_VECTOR));
1146
1147         if (svm->nmi_singlestep)
1148                 svm->vmcb->control.intercept_exceptions |= (1 << DB_VECTOR);
1149
1150         if (vcpu->guest_debug & KVM_GUESTDBG_ENABLE) {
1151                 if (vcpu->guest_debug &
1152                     (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))
1153                         svm->vmcb->control.intercept_exceptions |=
1154                                 1 << DB_VECTOR;
1155                 if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
1156                         svm->vmcb->control.intercept_exceptions |=
1157                                 1 << BP_VECTOR;
1158         } else
1159                 vcpu->guest_debug = 0;
1160 }
1161
1162 static void svm_guest_debug(struct kvm_vcpu *vcpu, struct kvm_guest_debug *dbg)
1163 {
1164         struct vcpu_svm *svm = to_svm(vcpu);
1165
1166         if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
1167                 svm->vmcb->save.dr7 = dbg->arch.debugreg[7];
1168         else
1169                 svm->vmcb->save.dr7 = vcpu->arch.dr7;
1170
1171         update_db_intercept(vcpu);
1172 }
1173
1174 static void load_host_msrs(struct kvm_vcpu *vcpu)
1175 {
1176 #ifdef CONFIG_X86_64
1177         wrmsrl(MSR_GS_BASE, to_svm(vcpu)->host_gs_base);
1178 #endif
1179 }
1180
1181 static void save_host_msrs(struct kvm_vcpu *vcpu)
1182 {
1183 #ifdef CONFIG_X86_64
1184         rdmsrl(MSR_GS_BASE, to_svm(vcpu)->host_gs_base);
1185 #endif
1186 }
1187
1188 static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *sd)
1189 {
1190         if (sd->next_asid > sd->max_asid) {
1191                 ++sd->asid_generation;
1192                 sd->next_asid = 1;
1193                 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID;
1194         }
1195
1196         svm->asid_generation = sd->asid_generation;
1197         svm->vmcb->control.asid = sd->next_asid++;
1198 }
1199
1200 static int svm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *dest)
1201 {
1202         struct vcpu_svm *svm = to_svm(vcpu);
1203
1204         switch (dr) {
1205         case 0 ... 3:
1206                 *dest = vcpu->arch.db[dr];
1207                 break;
1208         case 4:
1209                 if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
1210                         return EMULATE_FAIL; /* will re-inject UD */
1211                 /* fall through */
1212         case 6:
1213                 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
1214                         *dest = vcpu->arch.dr6;
1215                 else
1216                         *dest = svm->vmcb->save.dr6;
1217                 break;
1218         case 5:
1219                 if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
1220                         return EMULATE_FAIL; /* will re-inject UD */
1221                 /* fall through */
1222         case 7:
1223                 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
1224                         *dest = vcpu->arch.dr7;
1225                 else
1226                         *dest = svm->vmcb->save.dr7;
1227                 break;
1228         }
1229
1230         return EMULATE_DONE;
1231 }
1232
1233 static int svm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long value)
1234 {
1235         struct vcpu_svm *svm = to_svm(vcpu);
1236
1237         switch (dr) {
1238         case 0 ... 3:
1239                 vcpu->arch.db[dr] = value;
1240                 if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP))
1241                         vcpu->arch.eff_db[dr] = value;
1242                 break;
1243         case 4:
1244                 if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
1245                         return EMULATE_FAIL; /* will re-inject UD */
1246                 /* fall through */
1247         case 6:
1248                 vcpu->arch.dr6 = (value & DR6_VOLATILE) | DR6_FIXED_1;
1249                 break;
1250         case 5:
1251                 if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
1252                         return EMULATE_FAIL; /* will re-inject UD */
1253                 /* fall through */
1254         case 7:
1255                 vcpu->arch.dr7 = (value & DR7_VOLATILE) | DR7_FIXED_1;
1256                 if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) {
1257                         svm->vmcb->save.dr7 = vcpu->arch.dr7;
1258                         vcpu->arch.switch_db_regs = (value & DR7_BP_EN_MASK);
1259                 }
1260                 break;
1261         }
1262
1263         return EMULATE_DONE;
1264 }
1265
1266 static int pf_interception(struct vcpu_svm *svm)
1267 {
1268         u64 fault_address;
1269         u32 error_code;
1270
1271         fault_address  = svm->vmcb->control.exit_info_2;
1272         error_code = svm->vmcb->control.exit_info_1;
1273
1274         trace_kvm_page_fault(fault_address, error_code);
1275         if (!npt_enabled && kvm_event_needs_reinjection(&svm->vcpu))
1276                 kvm_mmu_unprotect_page_virt(&svm->vcpu, fault_address);
1277         return kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code);
1278 }
1279
1280 static int db_interception(struct vcpu_svm *svm)
1281 {
1282         struct kvm_run *kvm_run = svm->vcpu.run;
1283
1284         if (!(svm->vcpu.guest_debug &
1285               (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) &&
1286                 !svm->nmi_singlestep) {
1287                 kvm_queue_exception(&svm->vcpu, DB_VECTOR);
1288                 return 1;
1289         }
1290
1291         if (svm->nmi_singlestep) {
1292                 svm->nmi_singlestep = false;
1293                 if (!(svm->vcpu.guest_debug & KVM_GUESTDBG_SINGLESTEP))
1294                         svm->vmcb->save.rflags &=
1295                                 ~(X86_EFLAGS_TF | X86_EFLAGS_RF);
1296                 update_db_intercept(&svm->vcpu);
1297         }
1298
1299         if (svm->vcpu.guest_debug &
1300             (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) {
1301                 kvm_run->exit_reason = KVM_EXIT_DEBUG;
1302                 kvm_run->debug.arch.pc =
1303                         svm->vmcb->save.cs.base + svm->vmcb->save.rip;
1304                 kvm_run->debug.arch.exception = DB_VECTOR;
1305                 return 0;
1306         }
1307
1308         return 1;
1309 }
1310
1311 static int bp_interception(struct vcpu_svm *svm)
1312 {
1313         struct kvm_run *kvm_run = svm->vcpu.run;
1314
1315         kvm_run->exit_reason = KVM_EXIT_DEBUG;
1316         kvm_run->debug.arch.pc = svm->vmcb->save.cs.base + svm->vmcb->save.rip;
1317         kvm_run->debug.arch.exception = BP_VECTOR;
1318         return 0;
1319 }
1320
1321 static int ud_interception(struct vcpu_svm *svm)
1322 {
1323         int er;
1324
1325         er = emulate_instruction(&svm->vcpu, 0, 0, EMULTYPE_TRAP_UD);
1326         if (er != EMULATE_DONE)
1327                 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
1328         return 1;
1329 }
1330
1331 static void svm_fpu_activate(struct kvm_vcpu *vcpu)
1332 {
1333         struct vcpu_svm *svm = to_svm(vcpu);
1334         u32 excp;
1335
1336         if (is_nested(svm)) {
1337                 u32 h_excp, n_excp;
1338
1339                 h_excp  = svm->nested.hsave->control.intercept_exceptions;
1340                 n_excp  = svm->nested.intercept_exceptions;
1341                 h_excp &= ~(1 << NM_VECTOR);
1342                 excp    = h_excp | n_excp;
1343         } else {
1344                 excp  = svm->vmcb->control.intercept_exceptions;
1345                 excp &= ~(1 << NM_VECTOR);
1346         }
1347
1348         svm->vmcb->control.intercept_exceptions = excp;
1349
1350         svm->vcpu.fpu_active = 1;
1351         update_cr0_intercept(svm);
1352 }
1353
1354 static int nm_interception(struct vcpu_svm *svm)
1355 {
1356         svm_fpu_activate(&svm->vcpu);
1357         return 1;
1358 }
1359
1360 static int mc_interception(struct vcpu_svm *svm)
1361 {
1362         /*
1363          * On an #MC intercept the MCE handler is not called automatically in
1364          * the host. So do it by hand here.
1365          */
1366         asm volatile (
1367                 "int $0x12\n");
1368         /* not sure if we ever come back to this point */
1369
1370         return 1;
1371 }
1372
1373 static int shutdown_interception(struct vcpu_svm *svm)
1374 {
1375         struct kvm_run *kvm_run = svm->vcpu.run;
1376
1377         /*
1378          * VMCB is undefined after a SHUTDOWN intercept
1379          * so reinitialize it.
1380          */
1381         clear_page(svm->vmcb);
1382         init_vmcb(svm);
1383
1384         kvm_run->exit_reason = KVM_EXIT_SHUTDOWN;
1385         return 0;
1386 }
1387
1388 static int io_interception(struct vcpu_svm *svm)
1389 {
1390         u32 io_info = svm->vmcb->control.exit_info_1; /* address size bug? */
1391         int size, in, string;
1392         unsigned port;
1393
1394         ++svm->vcpu.stat.io_exits;
1395
1396         svm->next_rip = svm->vmcb->control.exit_info_2;
1397
1398         string = (io_info & SVM_IOIO_STR_MASK) != 0;
1399
1400         if (string) {
1401                 if (emulate_instruction(&svm->vcpu,
1402                                         0, 0, 0) == EMULATE_DO_MMIO)
1403                         return 0;
1404                 return 1;
1405         }
1406
1407         in = (io_info & SVM_IOIO_TYPE_MASK) != 0;
1408         port = io_info >> 16;
1409         size = (io_info & SVM_IOIO_SIZE_MASK) >> SVM_IOIO_SIZE_SHIFT;
1410
1411         skip_emulated_instruction(&svm->vcpu);
1412         return kvm_emulate_pio(&svm->vcpu, in, size, port);
1413 }
1414
1415 static int nmi_interception(struct vcpu_svm *svm)
1416 {
1417         return 1;
1418 }
1419
1420 static int intr_interception(struct vcpu_svm *svm)
1421 {
1422         ++svm->vcpu.stat.irq_exits;
1423         return 1;
1424 }
1425
1426 static int nop_on_interception(struct vcpu_svm *svm)
1427 {
1428         return 1;
1429 }
1430
1431 static int halt_interception(struct vcpu_svm *svm)
1432 {
1433         svm->next_rip = kvm_rip_read(&svm->vcpu) + 1;
1434         skip_emulated_instruction(&svm->vcpu);
1435         return kvm_emulate_halt(&svm->vcpu);
1436 }
1437
1438 static int vmmcall_interception(struct vcpu_svm *svm)
1439 {
1440         svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
1441         skip_emulated_instruction(&svm->vcpu);
1442         kvm_emulate_hypercall(&svm->vcpu);
1443         return 1;
1444 }
1445
1446 static int nested_svm_check_permissions(struct vcpu_svm *svm)
1447 {
1448         if (!(svm->vcpu.arch.efer & EFER_SVME)
1449             || !is_paging(&svm->vcpu)) {
1450                 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
1451                 return 1;
1452         }
1453
1454         if (svm->vmcb->save.cpl) {
1455                 kvm_inject_gp(&svm->vcpu, 0);
1456                 return 1;
1457         }
1458
1459        return 0;
1460 }
1461
1462 static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
1463                                       bool has_error_code, u32 error_code)
1464 {
1465         int vmexit;
1466
1467         if (!is_nested(svm))
1468                 return 0;
1469
1470         svm->vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + nr;
1471         svm->vmcb->control.exit_code_hi = 0;
1472         svm->vmcb->control.exit_info_1 = error_code;
1473         svm->vmcb->control.exit_info_2 = svm->vcpu.arch.cr2;
1474
1475         vmexit = nested_svm_intercept(svm);
1476         if (vmexit == NESTED_EXIT_DONE)
1477                 svm->nested.exit_required = true;
1478
1479         return vmexit;
1480 }
1481
1482 /* This function returns true if it is save to enable the irq window */
1483 static inline bool nested_svm_intr(struct vcpu_svm *svm)
1484 {
1485         if (!is_nested(svm))
1486                 return true;
1487
1488         if (!(svm->vcpu.arch.hflags & HF_VINTR_MASK))
1489                 return true;
1490
1491         if (!(svm->vcpu.arch.hflags & HF_HIF_MASK))
1492                 return false;
1493
1494         svm->vmcb->control.exit_code = SVM_EXIT_INTR;
1495
1496         if (svm->nested.intercept & 1ULL) {
1497                 /*
1498                  * The #vmexit can't be emulated here directly because this
1499                  * code path runs with irqs and preemtion disabled. A
1500                  * #vmexit emulation might sleep. Only signal request for
1501                  * the #vmexit here.
1502                  */
1503                 svm->nested.exit_required = true;
1504                 trace_kvm_nested_intr_vmexit(svm->vmcb->save.rip);
1505                 return false;
1506         }
1507
1508         return true;
1509 }
1510
1511 /* This function returns true if it is save to enable the nmi window */
1512 static inline bool nested_svm_nmi(struct vcpu_svm *svm)
1513 {
1514         if (!is_nested(svm))
1515                 return true;
1516
1517         if (!(svm->nested.intercept & (1ULL << INTERCEPT_NMI)))
1518                 return true;
1519
1520         svm->vmcb->control.exit_code = SVM_EXIT_NMI;
1521         svm->nested.exit_required = true;
1522
1523         return false;
1524 }
1525
1526 static void *nested_svm_map(struct vcpu_svm *svm, u64 gpa, struct page **_page)
1527 {
1528         struct page *page;
1529
1530         might_sleep();
1531
1532         page = gfn_to_page(svm->vcpu.kvm, gpa >> PAGE_SHIFT);
1533         if (is_error_page(page))
1534                 goto error;
1535
1536         *_page = page;
1537
1538         return kmap(page);
1539
1540 error:
1541         kvm_release_page_clean(page);
1542         kvm_inject_gp(&svm->vcpu, 0);
1543
1544         return NULL;
1545 }
1546
1547 static void nested_svm_unmap(struct page *page)
1548 {
1549         kunmap(page);
1550         kvm_release_page_dirty(page);
1551 }
1552
1553 static bool nested_svm_exit_handled_msr(struct vcpu_svm *svm)
1554 {
1555         u32 param = svm->vmcb->control.exit_info_1 & 1;
1556         u32 msr = svm->vcpu.arch.regs[VCPU_REGS_RCX];
1557         bool ret = false;
1558         u32 t0, t1;
1559         u8 val;
1560
1561         if (!(svm->nested.intercept & (1ULL << INTERCEPT_MSR_PROT)))
1562                 return false;
1563
1564         switch (msr) {
1565         case 0 ... 0x1fff:
1566                 t0 = (msr * 2) % 8;
1567                 t1 = msr / 8;
1568                 break;
1569         case 0xc0000000 ... 0xc0001fff:
1570                 t0 = (8192 + msr - 0xc0000000) * 2;
1571                 t1 = (t0 / 8);
1572                 t0 %= 8;
1573                 break;
1574         case 0xc0010000 ... 0xc0011fff:
1575                 t0 = (16384 + msr - 0xc0010000) * 2;
1576                 t1 = (t0 / 8);
1577                 t0 %= 8;
1578                 break;
1579         default:
1580                 ret = true;
1581                 goto out;
1582         }
1583
1584         if (!kvm_read_guest(svm->vcpu.kvm, svm->nested.vmcb_msrpm + t1, &val, 1))
1585                 ret = val & ((1 << param) << t0);
1586
1587 out:
1588         return ret;
1589 }
1590
1591 static int nested_svm_exit_special(struct vcpu_svm *svm)
1592 {
1593         u32 exit_code = svm->vmcb->control.exit_code;
1594
1595         switch (exit_code) {
1596         case SVM_EXIT_INTR:
1597         case SVM_EXIT_NMI:
1598                 return NESTED_EXIT_HOST;
1599         case SVM_EXIT_NPF:
1600                 /* For now we are always handling NPFs when using them */
1601                 if (npt_enabled)
1602                         return NESTED_EXIT_HOST;
1603                 break;
1604         case SVM_EXIT_EXCP_BASE + PF_VECTOR:
1605                 /* When we're shadowing, trap PFs */
1606                 if (!npt_enabled)
1607                         return NESTED_EXIT_HOST;
1608                 break;
1609         case SVM_EXIT_EXCP_BASE + NM_VECTOR:
1610                 nm_interception(svm);
1611                 break;
1612         default:
1613                 break;
1614         }
1615
1616         return NESTED_EXIT_CONTINUE;
1617 }
1618
1619 /*
1620  * If this function returns true, this #vmexit was already handled
1621  */
1622 static int nested_svm_intercept(struct vcpu_svm *svm)
1623 {
1624         u32 exit_code = svm->vmcb->control.exit_code;
1625         int vmexit = NESTED_EXIT_HOST;
1626
1627         switch (exit_code) {
1628         case SVM_EXIT_MSR:
1629                 vmexit = nested_svm_exit_handled_msr(svm);
1630                 break;
1631         case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR8: {
1632                 u32 cr_bits = 1 << (exit_code - SVM_EXIT_READ_CR0);
1633                 if (svm->nested.intercept_cr_read & cr_bits)
1634                         vmexit = NESTED_EXIT_DONE;
1635                 break;
1636         }
1637         case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR8: {
1638                 u32 cr_bits = 1 << (exit_code - SVM_EXIT_WRITE_CR0);
1639                 if (svm->nested.intercept_cr_write & cr_bits)
1640                         vmexit = NESTED_EXIT_DONE;
1641                 break;
1642         }
1643         case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR7: {
1644                 u32 dr_bits = 1 << (exit_code - SVM_EXIT_READ_DR0);
1645                 if (svm->nested.intercept_dr_read & dr_bits)
1646                         vmexit = NESTED_EXIT_DONE;
1647                 break;
1648         }
1649         case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR7: {
1650                 u32 dr_bits = 1 << (exit_code - SVM_EXIT_WRITE_DR0);
1651                 if (svm->nested.intercept_dr_write & dr_bits)
1652                         vmexit = NESTED_EXIT_DONE;
1653                 break;
1654         }
1655         case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: {
1656                 u32 excp_bits = 1 << (exit_code - SVM_EXIT_EXCP_BASE);
1657                 if (svm->nested.intercept_exceptions & excp_bits)
1658                         vmexit = NESTED_EXIT_DONE;
1659                 break;
1660         }
1661         default: {
1662                 u64 exit_bits = 1ULL << (exit_code - SVM_EXIT_INTR);
1663                 if (svm->nested.intercept & exit_bits)
1664                         vmexit = NESTED_EXIT_DONE;
1665         }
1666         }
1667
1668         return vmexit;
1669 }
1670
1671 static int nested_svm_exit_handled(struct vcpu_svm *svm)
1672 {
1673         int vmexit;
1674
1675         vmexit = nested_svm_intercept(svm);
1676
1677         if (vmexit == NESTED_EXIT_DONE)
1678                 nested_svm_vmexit(svm);
1679
1680         return vmexit;
1681 }
1682
1683 static inline void copy_vmcb_control_area(struct vmcb *dst_vmcb, struct vmcb *from_vmcb)
1684 {
1685         struct vmcb_control_area *dst  = &dst_vmcb->control;
1686         struct vmcb_control_area *from = &from_vmcb->control;
1687
1688         dst->intercept_cr_read    = from->intercept_cr_read;
1689         dst->intercept_cr_write   = from->intercept_cr_write;
1690         dst->intercept_dr_read    = from->intercept_dr_read;
1691         dst->intercept_dr_write   = from->intercept_dr_write;
1692         dst->intercept_exceptions = from->intercept_exceptions;
1693         dst->intercept            = from->intercept;
1694         dst->iopm_base_pa         = from->iopm_base_pa;
1695         dst->msrpm_base_pa        = from->msrpm_base_pa;
1696         dst->tsc_offset           = from->tsc_offset;
1697         dst->asid                 = from->asid;
1698         dst->tlb_ctl              = from->tlb_ctl;
1699         dst->int_ctl              = from->int_ctl;
1700         dst->int_vector           = from->int_vector;
1701         dst->int_state            = from->int_state;
1702         dst->exit_code            = from->exit_code;
1703         dst->exit_code_hi         = from->exit_code_hi;
1704         dst->exit_info_1          = from->exit_info_1;
1705         dst->exit_info_2          = from->exit_info_2;
1706         dst->exit_int_info        = from->exit_int_info;
1707         dst->exit_int_info_err    = from->exit_int_info_err;
1708         dst->nested_ctl           = from->nested_ctl;
1709         dst->event_inj            = from->event_inj;
1710         dst->event_inj_err        = from->event_inj_err;
1711         dst->nested_cr3           = from->nested_cr3;
1712         dst->lbr_ctl              = from->lbr_ctl;
1713 }
1714
1715 static int nested_svm_vmexit(struct vcpu_svm *svm)
1716 {
1717         struct vmcb *nested_vmcb;
1718         struct vmcb *hsave = svm->nested.hsave;
1719         struct vmcb *vmcb = svm->vmcb;
1720         struct page *page;
1721
1722         trace_kvm_nested_vmexit_inject(vmcb->control.exit_code,
1723                                        vmcb->control.exit_info_1,
1724                                        vmcb->control.exit_info_2,
1725                                        vmcb->control.exit_int_info,
1726                                        vmcb->control.exit_int_info_err);
1727
1728         nested_vmcb = nested_svm_map(svm, svm->nested.vmcb, &page);
1729         if (!nested_vmcb)
1730                 return 1;
1731
1732         /* Exit nested SVM mode */
1733         svm->nested.vmcb = 0;
1734
1735         /* Give the current vmcb to the guest */
1736         disable_gif(svm);
1737
1738         nested_vmcb->save.es     = vmcb->save.es;
1739         nested_vmcb->save.cs     = vmcb->save.cs;
1740         nested_vmcb->save.ss     = vmcb->save.ss;
1741         nested_vmcb->save.ds     = vmcb->save.ds;
1742         nested_vmcb->save.gdtr   = vmcb->save.gdtr;
1743         nested_vmcb->save.idtr   = vmcb->save.idtr;
1744         nested_vmcb->save.cr0    = kvm_read_cr0(&svm->vcpu);
1745         if (npt_enabled)
1746                 nested_vmcb->save.cr3    = vmcb->save.cr3;
1747         else
1748                 nested_vmcb->save.cr3    = svm->vcpu.arch.cr3;
1749         nested_vmcb->save.cr2    = vmcb->save.cr2;
1750         nested_vmcb->save.cr4    = svm->vcpu.arch.cr4;
1751         nested_vmcb->save.rflags = vmcb->save.rflags;
1752         nested_vmcb->save.rip    = vmcb->save.rip;
1753         nested_vmcb->save.rsp    = vmcb->save.rsp;
1754         nested_vmcb->save.rax    = vmcb->save.rax;
1755         nested_vmcb->save.dr7    = vmcb->save.dr7;
1756         nested_vmcb->save.dr6    = vmcb->save.dr6;
1757         nested_vmcb->save.cpl    = vmcb->save.cpl;
1758
1759         nested_vmcb->control.int_ctl           = vmcb->control.int_ctl;
1760         nested_vmcb->control.int_vector        = vmcb->control.int_vector;
1761         nested_vmcb->control.int_state         = vmcb->control.int_state;
1762         nested_vmcb->control.exit_code         = vmcb->control.exit_code;
1763         nested_vmcb->control.exit_code_hi      = vmcb->control.exit_code_hi;
1764         nested_vmcb->control.exit_info_1       = vmcb->control.exit_info_1;
1765         nested_vmcb->control.exit_info_2       = vmcb->control.exit_info_2;
1766         nested_vmcb->control.exit_int_info     = vmcb->control.exit_int_info;
1767         nested_vmcb->control.exit_int_info_err = vmcb->control.exit_int_info_err;
1768
1769         /*
1770          * If we emulate a VMRUN/#VMEXIT in the same host #vmexit cycle we have
1771          * to make sure that we do not lose injected events. So check event_inj
1772          * here and copy it to exit_int_info if it is valid.
1773          * Exit_int_info and event_inj can't be both valid because the case
1774          * below only happens on a VMRUN instruction intercept which has
1775          * no valid exit_int_info set.
1776          */
1777         if (vmcb->control.event_inj & SVM_EVTINJ_VALID) {
1778                 struct vmcb_control_area *nc = &nested_vmcb->control;
1779
1780                 nc->exit_int_info     = vmcb->control.event_inj;
1781                 nc->exit_int_info_err = vmcb->control.event_inj_err;
1782         }
1783
1784         nested_vmcb->control.tlb_ctl           = 0;
1785         nested_vmcb->control.event_inj         = 0;
1786         nested_vmcb->control.event_inj_err     = 0;
1787
1788         /* We always set V_INTR_MASKING and remember the old value in hflags */
1789         if (!(svm->vcpu.arch.hflags & HF_VINTR_MASK))
1790                 nested_vmcb->control.int_ctl &= ~V_INTR_MASKING_MASK;
1791
1792         /* Restore the original control entries */
1793         copy_vmcb_control_area(vmcb, hsave);
1794
1795         kvm_clear_exception_queue(&svm->vcpu);
1796         kvm_clear_interrupt_queue(&svm->vcpu);
1797
1798         /* Restore selected save entries */
1799         svm->vmcb->save.es = hsave->save.es;
1800         svm->vmcb->save.cs = hsave->save.cs;
1801         svm->vmcb->save.ss = hsave->save.ss;
1802         svm->vmcb->save.ds = hsave->save.ds;
1803         svm->vmcb->save.gdtr = hsave->save.gdtr;
1804         svm->vmcb->save.idtr = hsave->save.idtr;
1805         svm->vmcb->save.rflags = hsave->save.rflags;
1806         svm_set_efer(&svm->vcpu, hsave->save.efer);
1807         svm_set_cr0(&svm->vcpu, hsave->save.cr0 | X86_CR0_PE);
1808         svm_set_cr4(&svm->vcpu, hsave->save.cr4);
1809         if (npt_enabled) {
1810                 svm->vmcb->save.cr3 = hsave->save.cr3;
1811                 svm->vcpu.arch.cr3 = hsave->save.cr3;
1812         } else {
1813                 kvm_set_cr3(&svm->vcpu, hsave->save.cr3);
1814         }
1815         kvm_register_write(&svm->vcpu, VCPU_REGS_RAX, hsave->save.rax);
1816         kvm_register_write(&svm->vcpu, VCPU_REGS_RSP, hsave->save.rsp);
1817         kvm_register_write(&svm->vcpu, VCPU_REGS_RIP, hsave->save.rip);
1818         svm->vmcb->save.dr7 = 0;
1819         svm->vmcb->save.cpl = 0;
1820         svm->vmcb->control.exit_int_info = 0;
1821
1822         nested_svm_unmap(page);
1823
1824         kvm_mmu_reset_context(&svm->vcpu);
1825         kvm_mmu_load(&svm->vcpu);
1826
1827         return 0;
1828 }
1829
1830 static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm)
1831 {
1832         u32 *nested_msrpm;
1833         struct page *page;
1834         int i;
1835
1836         nested_msrpm = nested_svm_map(svm, svm->nested.vmcb_msrpm, &page);
1837         if (!nested_msrpm)
1838                 return false;
1839
1840         for (i = 0; i < PAGE_SIZE * (1 << MSRPM_ALLOC_ORDER) / 4; i++)
1841                 svm->nested.msrpm[i] = svm->msrpm[i] | nested_msrpm[i];
1842
1843         svm->vmcb->control.msrpm_base_pa = __pa(svm->nested.msrpm);
1844
1845         nested_svm_unmap(page);
1846
1847         return true;
1848 }
1849
1850 static bool nested_svm_vmrun(struct vcpu_svm *svm)
1851 {
1852         struct vmcb *nested_vmcb;
1853         struct vmcb *hsave = svm->nested.hsave;
1854         struct vmcb *vmcb = svm->vmcb;
1855         struct page *page;
1856         u64 vmcb_gpa;
1857
1858         vmcb_gpa = svm->vmcb->save.rax;
1859
1860         nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page);
1861         if (!nested_vmcb)
1862                 return false;
1863
1864         trace_kvm_nested_vmrun(svm->vmcb->save.rip - 3, vmcb_gpa,
1865                                nested_vmcb->save.rip,
1866                                nested_vmcb->control.int_ctl,
1867                                nested_vmcb->control.event_inj,
1868                                nested_vmcb->control.nested_ctl);
1869
1870         trace_kvm_nested_intercepts(nested_vmcb->control.intercept_cr_read,
1871                                     nested_vmcb->control.intercept_cr_write,
1872                                     nested_vmcb->control.intercept_exceptions,
1873                                     nested_vmcb->control.intercept);
1874
1875         /* Clear internal status */
1876         kvm_clear_exception_queue(&svm->vcpu);
1877         kvm_clear_interrupt_queue(&svm->vcpu);
1878
1879         /*
1880          * Save the old vmcb, so we don't need to pick what we save, but can
1881          * restore everything when a VMEXIT occurs
1882          */
1883         hsave->save.es     = vmcb->save.es;
1884         hsave->save.cs     = vmcb->save.cs;
1885         hsave->save.ss     = vmcb->save.ss;
1886         hsave->save.ds     = vmcb->save.ds;
1887         hsave->save.gdtr   = vmcb->save.gdtr;
1888         hsave->save.idtr   = vmcb->save.idtr;
1889         hsave->save.efer   = svm->vcpu.arch.efer;
1890         hsave->save.cr0    = kvm_read_cr0(&svm->vcpu);
1891         hsave->save.cr4    = svm->vcpu.arch.cr4;
1892         hsave->save.rflags = vmcb->save.rflags;
1893         hsave->save.rip    = svm->next_rip;
1894         hsave->save.rsp    = vmcb->save.rsp;
1895         hsave->save.rax    = vmcb->save.rax;
1896         if (npt_enabled)
1897                 hsave->save.cr3    = vmcb->save.cr3;
1898         else
1899                 hsave->save.cr3    = svm->vcpu.arch.cr3;
1900
1901         copy_vmcb_control_area(hsave, vmcb);
1902
1903         if (svm->vmcb->save.rflags & X86_EFLAGS_IF)
1904                 svm->vcpu.arch.hflags |= HF_HIF_MASK;
1905         else
1906                 svm->vcpu.arch.hflags &= ~HF_HIF_MASK;
1907
1908         /* Load the nested guest state */
1909         svm->vmcb->save.es = nested_vmcb->save.es;
1910         svm->vmcb->save.cs = nested_vmcb->save.cs;
1911         svm->vmcb->save.ss = nested_vmcb->save.ss;
1912         svm->vmcb->save.ds = nested_vmcb->save.ds;
1913         svm->vmcb->save.gdtr = nested_vmcb->save.gdtr;
1914         svm->vmcb->save.idtr = nested_vmcb->save.idtr;
1915         svm->vmcb->save.rflags = nested_vmcb->save.rflags;
1916         svm_set_efer(&svm->vcpu, nested_vmcb->save.efer);
1917         svm_set_cr0(&svm->vcpu, nested_vmcb->save.cr0);
1918         svm_set_cr4(&svm->vcpu, nested_vmcb->save.cr4);
1919         if (npt_enabled) {
1920                 svm->vmcb->save.cr3 = nested_vmcb->save.cr3;
1921                 svm->vcpu.arch.cr3 = nested_vmcb->save.cr3;
1922         } else
1923                 kvm_set_cr3(&svm->vcpu, nested_vmcb->save.cr3);
1924
1925         /* Guest paging mode is active - reset mmu */
1926         kvm_mmu_reset_context(&svm->vcpu);
1927
1928         svm->vmcb->save.cr2 = svm->vcpu.arch.cr2 = nested_vmcb->save.cr2;
1929         kvm_register_write(&svm->vcpu, VCPU_REGS_RAX, nested_vmcb->save.rax);
1930         kvm_register_write(&svm->vcpu, VCPU_REGS_RSP, nested_vmcb->save.rsp);
1931         kvm_register_write(&svm->vcpu, VCPU_REGS_RIP, nested_vmcb->save.rip);
1932
1933         /* In case we don't even reach vcpu_run, the fields are not updated */
1934         svm->vmcb->save.rax = nested_vmcb->save.rax;
1935         svm->vmcb->save.rsp = nested_vmcb->save.rsp;
1936         svm->vmcb->save.rip = nested_vmcb->save.rip;
1937         svm->vmcb->save.dr7 = nested_vmcb->save.dr7;
1938         svm->vmcb->save.dr6 = nested_vmcb->save.dr6;
1939         svm->vmcb->save.cpl = nested_vmcb->save.cpl;
1940
1941         svm->nested.vmcb_msrpm = nested_vmcb->control.msrpm_base_pa;
1942
1943         /* cache intercepts */
1944         svm->nested.intercept_cr_read    = nested_vmcb->control.intercept_cr_read;
1945         svm->nested.intercept_cr_write   = nested_vmcb->control.intercept_cr_write;
1946         svm->nested.intercept_dr_read    = nested_vmcb->control.intercept_dr_read;
1947         svm->nested.intercept_dr_write   = nested_vmcb->control.intercept_dr_write;
1948         svm->nested.intercept_exceptions = nested_vmcb->control.intercept_exceptions;
1949         svm->nested.intercept            = nested_vmcb->control.intercept;
1950
1951         force_new_asid(&svm->vcpu);
1952         svm->vmcb->control.int_ctl = nested_vmcb->control.int_ctl | V_INTR_MASKING_MASK;
1953         if (nested_vmcb->control.int_ctl & V_INTR_MASKING_MASK)
1954                 svm->vcpu.arch.hflags |= HF_VINTR_MASK;
1955         else
1956                 svm->vcpu.arch.hflags &= ~HF_VINTR_MASK;
1957
1958         if (svm->vcpu.arch.hflags & HF_VINTR_MASK) {
1959                 /* We only want the cr8 intercept bits of the guest */
1960                 svm->vmcb->control.intercept_cr_read &= ~INTERCEPT_CR8_MASK;
1961                 svm->vmcb->control.intercept_cr_write &= ~INTERCEPT_CR8_MASK;
1962         }
1963
1964         /*
1965          * We don't want a nested guest to be more powerful than the guest, so
1966          * all intercepts are ORed
1967          */
1968         svm->vmcb->control.intercept_cr_read |=
1969                 nested_vmcb->control.intercept_cr_read;
1970         svm->vmcb->control.intercept_cr_write |=
1971                 nested_vmcb->control.intercept_cr_write;
1972         svm->vmcb->control.intercept_dr_read |=
1973                 nested_vmcb->control.intercept_dr_read;
1974         svm->vmcb->control.intercept_dr_write |=
1975                 nested_vmcb->control.intercept_dr_write;
1976         svm->vmcb->control.intercept_exceptions |=
1977                 nested_vmcb->control.intercept_exceptions;
1978
1979         svm->vmcb->control.intercept |= nested_vmcb->control.intercept;
1980
1981         svm->vmcb->control.lbr_ctl = nested_vmcb->control.lbr_ctl;
1982         svm->vmcb->control.int_vector = nested_vmcb->control.int_vector;
1983         svm->vmcb->control.int_state = nested_vmcb->control.int_state;
1984         svm->vmcb->control.tsc_offset += nested_vmcb->control.tsc_offset;
1985         svm->vmcb->control.event_inj = nested_vmcb->control.event_inj;
1986         svm->vmcb->control.event_inj_err = nested_vmcb->control.event_inj_err;
1987
1988         nested_svm_unmap(page);
1989
1990         /* nested_vmcb is our indicator if nested SVM is activated */
1991         svm->nested.vmcb = vmcb_gpa;
1992
1993         enable_gif(svm);
1994
1995         return true;
1996 }
1997
1998 static void nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb)
1999 {
2000         to_vmcb->save.fs = from_vmcb->save.fs;
2001         to_vmcb->save.gs = from_vmcb->save.gs;
2002         to_vmcb->save.tr = from_vmcb->save.tr;
2003         to_vmcb->save.ldtr = from_vmcb->save.ldtr;
2004         to_vmcb->save.kernel_gs_base = from_vmcb->save.kernel_gs_base;
2005         to_vmcb->save.star = from_vmcb->save.star;
2006         to_vmcb->save.lstar = from_vmcb->save.lstar;
2007         to_vmcb->save.cstar = from_vmcb->save.cstar;
2008         to_vmcb->save.sfmask = from_vmcb->save.sfmask;
2009         to_vmcb->save.sysenter_cs = from_vmcb->save.sysenter_cs;
2010         to_vmcb->save.sysenter_esp = from_vmcb->save.sysenter_esp;
2011         to_vmcb->save.sysenter_eip = from_vmcb->save.sysenter_eip;
2012 }
2013
2014 static int vmload_interception(struct vcpu_svm *svm)
2015 {
2016         struct vmcb *nested_vmcb;
2017         struct page *page;
2018
2019         if (nested_svm_check_permissions(svm))
2020                 return 1;
2021
2022         svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
2023         skip_emulated_instruction(&svm->vcpu);
2024
2025         nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page);
2026         if (!nested_vmcb)
2027                 return 1;
2028
2029         nested_svm_vmloadsave(nested_vmcb, svm->vmcb);
2030         nested_svm_unmap(page);
2031
2032         return 1;
2033 }
2034
2035 static int vmsave_interception(struct vcpu_svm *svm)
2036 {
2037         struct vmcb *nested_vmcb;
2038         struct page *page;
2039
2040         if (nested_svm_check_permissions(svm))
2041                 return 1;
2042
2043         svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
2044         skip_emulated_instruction(&svm->vcpu);
2045
2046         nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page);
2047         if (!nested_vmcb)
2048                 return 1;
2049
2050         nested_svm_vmloadsave(svm->vmcb, nested_vmcb);
2051         nested_svm_unmap(page);
2052
2053         return 1;
2054 }
2055
2056 static int vmrun_interception(struct vcpu_svm *svm)
2057 {
2058         if (nested_svm_check_permissions(svm))
2059                 return 1;
2060
2061         svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
2062         skip_emulated_instruction(&svm->vcpu);
2063
2064         if (!nested_svm_vmrun(svm))
2065                 return 1;
2066
2067         if (!nested_svm_vmrun_msrpm(svm))
2068                 goto failed;
2069
2070         return 1;
2071
2072 failed:
2073
2074         svm->vmcb->control.exit_code    = SVM_EXIT_ERR;
2075         svm->vmcb->control.exit_code_hi = 0;
2076         svm->vmcb->control.exit_info_1  = 0;
2077         svm->vmcb->control.exit_info_2  = 0;
2078
2079         nested_svm_vmexit(svm);
2080
2081         return 1;
2082 }
2083
2084 static int stgi_interception(struct vcpu_svm *svm)
2085 {
2086         if (nested_svm_check_permissions(svm))
2087                 return 1;
2088
2089         svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
2090         skip_emulated_instruction(&svm->vcpu);
2091
2092         enable_gif(svm);
2093
2094         return 1;
2095 }
2096
2097 static int clgi_interception(struct vcpu_svm *svm)
2098 {
2099         if (nested_svm_check_permissions(svm))
2100                 return 1;
2101
2102         svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
2103         skip_emulated_instruction(&svm->vcpu);
2104
2105         disable_gif(svm);
2106
2107         /* After a CLGI no interrupts should come */
2108         svm_clear_vintr(svm);
2109         svm->vmcb->control.int_ctl &= ~V_IRQ_MASK;
2110
2111         return 1;
2112 }
2113
2114 static int invlpga_interception(struct vcpu_svm *svm)
2115 {
2116         struct kvm_vcpu *vcpu = &svm->vcpu;
2117
2118         trace_kvm_invlpga(svm->vmcb->save.rip, vcpu->arch.regs[VCPU_REGS_RCX],
2119                           vcpu->arch.regs[VCPU_REGS_RAX]);
2120
2121         /* Let's treat INVLPGA the same as INVLPG (can be optimized!) */
2122         kvm_mmu_invlpg(vcpu, vcpu->arch.regs[VCPU_REGS_RAX]);
2123
2124         svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
2125         skip_emulated_instruction(&svm->vcpu);
2126         return 1;
2127 }
2128
2129 static int skinit_interception(struct vcpu_svm *svm)
2130 {
2131         trace_kvm_skinit(svm->vmcb->save.rip, svm->vcpu.arch.regs[VCPU_REGS_RAX]);
2132
2133         kvm_queue_exception(&svm->vcpu, UD_VECTOR);
2134         return 1;
2135 }
2136
2137 static int invalid_op_interception(struct vcpu_svm *svm)
2138 {
2139         kvm_queue_exception(&svm->vcpu, UD_VECTOR);
2140         return 1;
2141 }
2142
2143 static int task_switch_interception(struct vcpu_svm *svm)
2144 {
2145         u16 tss_selector;
2146         int reason;
2147         int int_type = svm->vmcb->control.exit_int_info &
2148                 SVM_EXITINTINFO_TYPE_MASK;
2149         int int_vec = svm->vmcb->control.exit_int_info & SVM_EVTINJ_VEC_MASK;
2150         uint32_t type =
2151                 svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_TYPE_MASK;
2152         uint32_t idt_v =
2153                 svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_VALID;
2154
2155         tss_selector = (u16)svm->vmcb->control.exit_info_1;
2156
2157         if (svm->vmcb->control.exit_info_2 &
2158             (1ULL << SVM_EXITINFOSHIFT_TS_REASON_IRET))
2159                 reason = TASK_SWITCH_IRET;
2160         else if (svm->vmcb->control.exit_info_2 &
2161                  (1ULL << SVM_EXITINFOSHIFT_TS_REASON_JMP))
2162                 reason = TASK_SWITCH_JMP;
2163         else if (idt_v)
2164                 reason = TASK_SWITCH_GATE;
2165         else
2166                 reason = TASK_SWITCH_CALL;
2167
2168         if (reason == TASK_SWITCH_GATE) {
2169                 switch (type) {
2170                 case SVM_EXITINTINFO_TYPE_NMI:
2171                         svm->vcpu.arch.nmi_injected = false;
2172                         break;
2173                 case SVM_EXITINTINFO_TYPE_EXEPT:
2174                         kvm_clear_exception_queue(&svm->vcpu);
2175                         break;
2176                 case SVM_EXITINTINFO_TYPE_INTR:
2177                         kvm_clear_interrupt_queue(&svm->vcpu);
2178                         break;
2179                 default:
2180                         break;
2181                 }
2182         }
2183
2184         if (reason != TASK_SWITCH_GATE ||
2185             int_type == SVM_EXITINTINFO_TYPE_SOFT ||
2186             (int_type == SVM_EXITINTINFO_TYPE_EXEPT &&
2187              (int_vec == OF_VECTOR || int_vec == BP_VECTOR)))
2188                 skip_emulated_instruction(&svm->vcpu);
2189
2190         return kvm_task_switch(&svm->vcpu, tss_selector, reason);
2191 }
2192
2193 static int cpuid_interception(struct vcpu_svm *svm)
2194 {
2195         svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
2196         kvm_emulate_cpuid(&svm->vcpu);
2197         return 1;
2198 }
2199
2200 static int iret_interception(struct vcpu_svm *svm)
2201 {
2202         ++svm->vcpu.stat.nmi_window_exits;
2203         svm->vmcb->control.intercept &= ~(1UL << INTERCEPT_IRET);
2204         svm->vcpu.arch.hflags |= HF_IRET_MASK;
2205         return 1;
2206 }
2207
2208 static int invlpg_interception(struct vcpu_svm *svm)
2209 {
2210         if (emulate_instruction(&svm->vcpu, 0, 0, 0) != EMULATE_DONE)
2211                 pr_unimpl(&svm->vcpu, "%s: failed\n", __func__);
2212         return 1;
2213 }
2214
2215 static int emulate_on_interception(struct vcpu_svm *svm)
2216 {
2217         if (emulate_instruction(&svm->vcpu, 0, 0, 0) != EMULATE_DONE)
2218                 pr_unimpl(&svm->vcpu, "%s: failed\n", __func__);
2219         return 1;
2220 }
2221
2222 static int cr8_write_interception(struct vcpu_svm *svm)
2223 {
2224         struct kvm_run *kvm_run = svm->vcpu.run;
2225
2226         u8 cr8_prev = kvm_get_cr8(&svm->vcpu);
2227         /* instruction emulation calls kvm_set_cr8() */
2228         emulate_instruction(&svm->vcpu, 0, 0, 0);
2229         if (irqchip_in_kernel(svm->vcpu.kvm)) {
2230                 svm->vmcb->control.intercept_cr_write &= ~INTERCEPT_CR8_MASK;
2231                 return 1;
2232         }
2233         if (cr8_prev <= kvm_get_cr8(&svm->vcpu))
2234                 return 1;
2235         kvm_run->exit_reason = KVM_EXIT_SET_TPR;
2236         return 0;
2237 }
2238
2239 static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data)
2240 {
2241         struct vcpu_svm *svm = to_svm(vcpu);
2242
2243         switch (ecx) {
2244         case MSR_IA32_TSC: {
2245                 u64 tsc_offset;
2246
2247                 if (is_nested(svm))
2248                         tsc_offset = svm->nested.hsave->control.tsc_offset;
2249                 else
2250                         tsc_offset = svm->vmcb->control.tsc_offset;
2251
2252                 *data = tsc_offset + native_read_tsc();
2253                 break;
2254         }
2255         case MSR_K6_STAR:
2256                 *data = svm->vmcb->save.star;
2257                 break;
2258 #ifdef CONFIG_X86_64
2259         case MSR_LSTAR:
2260                 *data = svm->vmcb->save.lstar;
2261                 break;
2262         case MSR_CSTAR:
2263                 *data = svm->vmcb->save.cstar;
2264                 break;
2265         case MSR_KERNEL_GS_BASE:
2266                 *data = svm->vmcb->save.kernel_gs_base;
2267                 break;
2268         case MSR_SYSCALL_MASK:
2269                 *data = svm->vmcb->save.sfmask;
2270                 break;
2271 #endif
2272         case MSR_IA32_SYSENTER_CS:
2273                 *data = svm->vmcb->save.sysenter_cs;
2274                 break;
2275         case MSR_IA32_SYSENTER_EIP:
2276                 *data = svm->sysenter_eip;
2277                 break;
2278         case MSR_IA32_SYSENTER_ESP:
2279                 *data = svm->sysenter_esp;
2280                 break;
2281         /*
2282          * Nobody will change the following 5 values in the VMCB so we can
2283          * safely return them on rdmsr. They will always be 0 until LBRV is
2284          * implemented.
2285          */
2286         case MSR_IA32_DEBUGCTLMSR:
2287                 *data = svm->vmcb->save.dbgctl;
2288                 break;
2289         case MSR_IA32_LASTBRANCHFROMIP:
2290                 *data = svm->vmcb->save.br_from;
2291                 break;
2292         case MSR_IA32_LASTBRANCHTOIP:
2293                 *data = svm->vmcb->save.br_to;
2294                 break;
2295         case MSR_IA32_LASTINTFROMIP:
2296                 *data = svm->vmcb->save.last_excp_from;
2297                 break;
2298         case MSR_IA32_LASTINTTOIP:
2299                 *data = svm->vmcb->save.last_excp_to;
2300                 break;
2301         case MSR_VM_HSAVE_PA:
2302                 *data = svm->nested.hsave_msr;
2303                 break;
2304         case MSR_VM_CR:
2305                 *data = svm->nested.vm_cr_msr;
2306                 break;
2307         case MSR_IA32_UCODE_REV:
2308                 *data = 0x01000065;
2309                 break;
2310         default:
2311                 return kvm_get_msr_common(vcpu, ecx, data);
2312         }
2313         return 0;
2314 }
2315
2316 static int rdmsr_interception(struct vcpu_svm *svm)
2317 {
2318         u32 ecx = svm->vcpu.arch.regs[VCPU_REGS_RCX];
2319         u64 data;
2320
2321         if (svm_get_msr(&svm->vcpu, ecx, &data)) {
2322                 trace_kvm_msr_read_ex(ecx);
2323                 kvm_inject_gp(&svm->vcpu, 0);
2324         } else {
2325                 trace_kvm_msr_read(ecx, data);
2326
2327                 svm->vcpu.arch.regs[VCPU_REGS_RAX] = data & 0xffffffff;
2328                 svm->vcpu.arch.regs[VCPU_REGS_RDX] = data >> 32;
2329                 svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
2330                 skip_emulated_instruction(&svm->vcpu);
2331         }
2332         return 1;
2333 }
2334
2335 static int svm_set_vm_cr(struct kvm_vcpu *vcpu, u64 data)
2336 {
2337         struct vcpu_svm *svm = to_svm(vcpu);
2338         int svm_dis, chg_mask;
2339
2340         if (data & ~SVM_VM_CR_VALID_MASK)
2341                 return 1;
2342
2343         chg_mask = SVM_VM_CR_VALID_MASK;
2344
2345         if (svm->nested.vm_cr_msr & SVM_VM_CR_SVM_DIS_MASK)
2346                 chg_mask &= ~(SVM_VM_CR_SVM_LOCK_MASK | SVM_VM_CR_SVM_DIS_MASK);
2347
2348         svm->nested.vm_cr_msr &= ~chg_mask;
2349         svm->nested.vm_cr_msr |= (data & chg_mask);
2350
2351         svm_dis = svm->nested.vm_cr_msr & SVM_VM_CR_SVM_DIS_MASK;
2352
2353         /* check for svm_disable while efer.svme is set */
2354         if (svm_dis && (vcpu->arch.efer & EFER_SVME))
2355                 return 1;
2356
2357         return 0;
2358 }
2359
2360 static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data)
2361 {
2362         struct vcpu_svm *svm = to_svm(vcpu);
2363
2364         switch (ecx) {
2365         case MSR_IA32_TSC: {
2366                 u64 tsc_offset = data - native_read_tsc();
2367                 u64 g_tsc_offset = 0;
2368
2369                 if (is_nested(svm)) {
2370                         g_tsc_offset = svm->vmcb->control.tsc_offset -
2371                                        svm->nested.hsave->control.tsc_offset;
2372                         svm->nested.hsave->control.tsc_offset = tsc_offset;
2373                 }
2374
2375                 svm->vmcb->control.tsc_offset = tsc_offset + g_tsc_offset;
2376
2377                 break;
2378         }
2379         case MSR_K6_STAR:
2380                 svm->vmcb->save.star = data;
2381                 break;
2382 #ifdef CONFIG_X86_64
2383         case MSR_LSTAR:
2384                 svm->vmcb->save.lstar = data;
2385                 break;
2386         case MSR_CSTAR:
2387                 svm->vmcb->save.cstar = data;
2388                 break;
2389         case MSR_KERNEL_GS_BASE:
2390                 svm->vmcb->save.kernel_gs_base = data;
2391                 break;
2392         case MSR_SYSCALL_MASK:
2393                 svm->vmcb->save.sfmask = data;
2394                 break;
2395 #endif
2396         case MSR_IA32_SYSENTER_CS:
2397                 svm->vmcb->save.sysenter_cs = data;
2398                 break;
2399         case MSR_IA32_SYSENTER_EIP:
2400                 svm->sysenter_eip = data;
2401                 svm->vmcb->save.sysenter_eip = data;
2402                 break;
2403         case MSR_IA32_SYSENTER_ESP:
2404                 svm->sysenter_esp = data;
2405                 svm->vmcb->save.sysenter_esp = data;
2406                 break;
2407         case MSR_IA32_DEBUGCTLMSR:
2408                 if (!svm_has(SVM_FEATURE_LBRV)) {
2409                         pr_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTL 0x%llx, nop\n",
2410                                         __func__, data);
2411                         break;
2412                 }
2413                 if (data & DEBUGCTL_RESERVED_BITS)
2414                         return 1;
2415
2416                 svm->vmcb->save.dbgctl = data;
2417                 if (data & (1ULL<<0))
2418                         svm_enable_lbrv(svm);
2419                 else
2420                         svm_disable_lbrv(svm);
2421                 break;
2422         case MSR_VM_HSAVE_PA:
2423                 svm->nested.hsave_msr = data;
2424                 break;
2425         case MSR_VM_CR:
2426                 return svm_set_vm_cr(vcpu, data);
2427         case MSR_VM_IGNNE:
2428                 pr_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data);
2429                 break;
2430         default:
2431                 return kvm_set_msr_common(vcpu, ecx, data);
2432         }
2433         return 0;
2434 }
2435
2436 static int wrmsr_interception(struct vcpu_svm *svm)
2437 {
2438         u32 ecx = svm->vcpu.arch.regs[VCPU_REGS_RCX];
2439         u64 data = (svm->vcpu.arch.regs[VCPU_REGS_RAX] & -1u)
2440                 | ((u64)(svm->vcpu.arch.regs[VCPU_REGS_RDX] & -1u) << 32);
2441
2442
2443         svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
2444         if (svm_set_msr(&svm->vcpu, ecx, data)) {
2445                 trace_kvm_msr_write_ex(ecx, data);
2446                 kvm_inject_gp(&svm->vcpu, 0);
2447         } else {
2448                 trace_kvm_msr_write(ecx, data);
2449                 skip_emulated_instruction(&svm->vcpu);
2450         }
2451         return 1;
2452 }
2453
2454 static int msr_interception(struct vcpu_svm *svm)
2455 {
2456         if (svm->vmcb->control.exit_info_1)
2457                 return wrmsr_interception(svm);
2458         else
2459                 return rdmsr_interception(svm);
2460 }
2461
2462 static int interrupt_window_interception(struct vcpu_svm *svm)
2463 {
2464         struct kvm_run *kvm_run = svm->vcpu.run;
2465
2466         svm_clear_vintr(svm);
2467         svm->vmcb->control.int_ctl &= ~V_IRQ_MASK;
2468         /*
2469          * If the user space waits to inject interrupts, exit as soon as
2470          * possible
2471          */
2472         if (!irqchip_in_kernel(svm->vcpu.kvm) &&
2473             kvm_run->request_interrupt_window &&
2474             !kvm_cpu_has_interrupt(&svm->vcpu)) {
2475                 ++svm->vcpu.stat.irq_window_exits;
2476                 kvm_run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
2477                 return 0;
2478         }
2479
2480         return 1;
2481 }
2482
2483 static int pause_interception(struct vcpu_svm *svm)
2484 {
2485         kvm_vcpu_on_spin(&(svm->vcpu));
2486         return 1;
2487 }
2488
2489 static int (*svm_exit_handlers[])(struct vcpu_svm *svm) = {
2490         [SVM_EXIT_READ_CR0]                     = emulate_on_interception,
2491         [SVM_EXIT_READ_CR3]                     = emulate_on_interception,
2492         [SVM_EXIT_READ_CR4]                     = emulate_on_interception,
2493         [SVM_EXIT_READ_CR8]                     = emulate_on_interception,
2494         [SVM_EXIT_CR0_SEL_WRITE]                = emulate_on_interception,
2495         [SVM_EXIT_WRITE_CR0]                    = emulate_on_interception,
2496         [SVM_EXIT_WRITE_CR3]                    = emulate_on_interception,
2497         [SVM_EXIT_WRITE_CR4]                    = emulate_on_interception,
2498         [SVM_EXIT_WRITE_CR8]                    = cr8_write_interception,
2499         [SVM_EXIT_READ_DR0]                     = emulate_on_interception,
2500         [SVM_EXIT_READ_DR1]                     = emulate_on_interception,
2501         [SVM_EXIT_READ_DR2]                     = emulate_on_interception,
2502         [SVM_EXIT_READ_DR3]                     = emulate_on_interception,
2503         [SVM_EXIT_READ_DR4]                     = emulate_on_interception,
2504         [SVM_EXIT_READ_DR5]                     = emulate_on_interception,
2505         [SVM_EXIT_READ_DR6]                     = emulate_on_interception,
2506         [SVM_EXIT_READ_DR7]                     = emulate_on_interception,
2507         [SVM_EXIT_WRITE_DR0]                    = emulate_on_interception,
2508         [SVM_EXIT_WRITE_DR1]                    = emulate_on_interception,
2509         [SVM_EXIT_WRITE_DR2]                    = emulate_on_interception,
2510         [SVM_EXIT_WRITE_DR3]                    = emulate_on_interception,
2511         [SVM_EXIT_WRITE_DR4]                    = emulate_on_interception,
2512         [SVM_EXIT_WRITE_DR5]                    = emulate_on_interception,
2513         [SVM_EXIT_WRITE_DR6]                    = emulate_on_interception,
2514         [SVM_EXIT_WRITE_DR7]                    = emulate_on_interception,
2515         [SVM_EXIT_EXCP_BASE + DB_VECTOR]        = db_interception,
2516         [SVM_EXIT_EXCP_BASE + BP_VECTOR]        = bp_interception,
2517         [SVM_EXIT_EXCP_BASE + UD_VECTOR]        = ud_interception,
2518         [SVM_EXIT_EXCP_BASE + PF_VECTOR]        = pf_interception,
2519         [SVM_EXIT_EXCP_BASE + NM_VECTOR]        = nm_interception,
2520         [SVM_EXIT_EXCP_BASE + MC_VECTOR]        = mc_interception,
2521         [SVM_EXIT_INTR]                         = intr_interception,
2522         [SVM_EXIT_NMI]                          = nmi_interception,
2523         [SVM_EXIT_SMI]                          = nop_on_interception,
2524         [SVM_EXIT_INIT]                         = nop_on_interception,
2525         [SVM_EXIT_VINTR]                        = interrupt_window_interception,
2526         [SVM_EXIT_CPUID]                        = cpuid_interception,
2527         [SVM_EXIT_IRET]                         = iret_interception,
2528         [SVM_EXIT_INVD]                         = emulate_on_interception,
2529         [SVM_EXIT_PAUSE]                        = pause_interception,
2530         [SVM_EXIT_HLT]                          = halt_interception,
2531         [SVM_EXIT_INVLPG]                       = invlpg_interception,
2532         [SVM_EXIT_INVLPGA]                      = invlpga_interception,
2533         [SVM_EXIT_IOIO]                         = io_interception,
2534         [SVM_EXIT_MSR]                          = msr_interception,
2535         [SVM_EXIT_TASK_SWITCH]                  = task_switch_interception,
2536         [SVM_EXIT_SHUTDOWN]                     = shutdown_interception,
2537         [SVM_EXIT_VMRUN]                        = vmrun_interception,
2538         [SVM_EXIT_VMMCALL]                      = vmmcall_interception,
2539         [SVM_EXIT_VMLOAD]                       = vmload_interception,
2540         [SVM_EXIT_VMSAVE]                       = vmsave_interception,
2541         [SVM_EXIT_STGI]                         = stgi_interception,
2542         [SVM_EXIT_CLGI]                         = clgi_interception,
2543         [SVM_EXIT_SKINIT]                       = skinit_interception,
2544         [SVM_EXIT_WBINVD]                       = emulate_on_interception,
2545         [SVM_EXIT_MONITOR]                      = invalid_op_interception,
2546         [SVM_EXIT_MWAIT]                        = invalid_op_interception,
2547         [SVM_EXIT_NPF]                          = pf_interception,
2548 };
2549
2550 static int handle_exit(struct kvm_vcpu *vcpu)
2551 {
2552         struct vcpu_svm *svm = to_svm(vcpu);
2553         struct kvm_run *kvm_run = vcpu->run;
2554         u32 exit_code = svm->vmcb->control.exit_code;
2555
2556         trace_kvm_exit(exit_code, svm->vmcb->save.rip);
2557
2558         if (unlikely(svm->nested.exit_required)) {
2559                 nested_svm_vmexit(svm);
2560                 svm->nested.exit_required = false;
2561
2562                 return 1;
2563         }
2564
2565         if (is_nested(svm)) {
2566                 int vmexit;
2567
2568                 trace_kvm_nested_vmexit(svm->vmcb->save.rip, exit_code,
2569                                         svm->vmcb->control.exit_info_1,
2570                                         svm->vmcb->control.exit_info_2,
2571                                         svm->vmcb->control.exit_int_info,
2572                                         svm->vmcb->control.exit_int_info_err);
2573
2574                 vmexit = nested_svm_exit_special(svm);
2575
2576                 if (vmexit == NESTED_EXIT_CONTINUE)
2577                         vmexit = nested_svm_exit_handled(svm);
2578
2579                 if (vmexit == NESTED_EXIT_DONE)
2580                         return 1;
2581         }
2582
2583         svm_complete_interrupts(svm);
2584
2585         if (!(svm->vmcb->control.intercept_cr_write & INTERCEPT_CR0_MASK))
2586                 vcpu->arch.cr0 = svm->vmcb->save.cr0;
2587         if (npt_enabled)
2588                 vcpu->arch.cr3 = svm->vmcb->save.cr3;
2589
2590         if (svm->vmcb->control.exit_code == SVM_EXIT_ERR) {
2591                 kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
2592                 kvm_run->fail_entry.hardware_entry_failure_reason
2593                         = svm->vmcb->control.exit_code;
2594                 return 0;
2595         }
2596
2597         if (is_external_interrupt(svm->vmcb->control.exit_int_info) &&
2598             exit_code != SVM_EXIT_EXCP_BASE + PF_VECTOR &&
2599             exit_code != SVM_EXIT_NPF && exit_code != SVM_EXIT_TASK_SWITCH)
2600                 printk(KERN_ERR "%s: unexpected exit_ini_info 0x%x "
2601                        "exit_code 0x%x\n",
2602                        __func__, svm->vmcb->control.exit_int_info,
2603                        exit_code);
2604
2605         if (exit_code >= ARRAY_SIZE(svm_exit_handlers)
2606             || !svm_exit_handlers[exit_code]) {
2607                 kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
2608                 kvm_run->hw.hardware_exit_reason = exit_code;
2609                 return 0;
2610         }
2611
2612         return svm_exit_handlers[exit_code](svm);
2613 }
2614
2615 static void reload_tss(struct kvm_vcpu *vcpu)
2616 {
2617         int cpu = raw_smp_processor_id();
2618
2619         struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
2620         sd->tss_desc->type = 9; /* available 32/64-bit TSS */
2621         load_TR_desc();
2622 }
2623
2624 static void pre_svm_run(struct vcpu_svm *svm)
2625 {
2626         int cpu = raw_smp_processor_id();
2627
2628         struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
2629
2630         svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING;
2631         /* FIXME: handle wraparound of asid_generation */
2632         if (svm->asid_generation != sd->asid_generation)
2633                 new_asid(svm, sd);
2634 }
2635
2636 static void svm_inject_nmi(struct kvm_vcpu *vcpu)
2637 {
2638         struct vcpu_svm *svm = to_svm(vcpu);
2639
2640         svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI;
2641         vcpu->arch.hflags |= HF_NMI_MASK;
2642         svm->vmcb->control.intercept |= (1UL << INTERCEPT_IRET);
2643         ++vcpu->stat.nmi_injections;
2644 }
2645
2646 static inline void svm_inject_irq(struct vcpu_svm *svm, int irq)
2647 {
2648         struct vmcb_control_area *control;
2649
2650         trace_kvm_inj_virq(irq);
2651
2652         ++svm->vcpu.stat.irq_injections;
2653         control = &svm->vmcb->control;
2654         control->int_vector = irq;
2655         control->int_ctl &= ~V_INTR_PRIO_MASK;
2656         control->int_ctl |= V_IRQ_MASK |
2657                 ((/*control->int_vector >> 4*/ 0xf) << V_INTR_PRIO_SHIFT);
2658 }
2659
2660 static void svm_set_irq(struct kvm_vcpu *vcpu)
2661 {
2662         struct vcpu_svm *svm = to_svm(vcpu);
2663
2664         BUG_ON(!(gif_set(svm)));
2665
2666         svm->vmcb->control.event_inj = vcpu->arch.interrupt.nr |
2667                 SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR;
2668 }
2669
2670 static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
2671 {
2672         struct vcpu_svm *svm = to_svm(vcpu);
2673
2674         if (is_nested(svm) && (vcpu->arch.hflags & HF_VINTR_MASK))
2675                 return;
2676
2677         if (irr == -1)
2678                 return;
2679
2680         if (tpr >= irr)
2681                 svm->vmcb->control.intercept_cr_write |= INTERCEPT_CR8_MASK;
2682 }
2683
2684 static int svm_nmi_allowed(struct kvm_vcpu *vcpu)
2685 {
2686         struct vcpu_svm *svm = to_svm(vcpu);
2687         struct vmcb *vmcb = svm->vmcb;
2688         return !(vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) &&
2689                 !(svm->vcpu.arch.hflags & HF_NMI_MASK);
2690 }
2691
2692 static bool svm_get_nmi_mask(struct kvm_vcpu *vcpu)
2693 {
2694         struct vcpu_svm *svm = to_svm(vcpu);
2695
2696         return !!(svm->vcpu.arch.hflags & HF_NMI_MASK);
2697 }
2698
2699 static void svm_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
2700 {
2701         struct vcpu_svm *svm = to_svm(vcpu);
2702
2703         if (masked) {
2704                 svm->vcpu.arch.hflags |= HF_NMI_MASK;
2705                 svm->vmcb->control.intercept |= (1UL << INTERCEPT_IRET);
2706         } else {
2707                 svm->vcpu.arch.hflags &= ~HF_NMI_MASK;
2708                 svm->vmcb->control.intercept &= ~(1UL << INTERCEPT_IRET);
2709         }
2710 }
2711
2712 static int svm_interrupt_allowed(struct kvm_vcpu *vcpu)
2713 {
2714         struct vcpu_svm *svm = to_svm(vcpu);
2715         struct vmcb *vmcb = svm->vmcb;
2716         int ret;
2717
2718         if (!gif_set(svm) ||
2719              (vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK))
2720                 return 0;
2721
2722         ret = !!(vmcb->save.rflags & X86_EFLAGS_IF);
2723
2724         if (is_nested(svm))
2725                 return ret && !(svm->vcpu.arch.hflags & HF_VINTR_MASK);
2726
2727         return ret;
2728 }
2729
2730 static void enable_irq_window(struct kvm_vcpu *vcpu)
2731 {
2732         struct vcpu_svm *svm = to_svm(vcpu);
2733
2734         /*
2735          * In case GIF=0 we can't rely on the CPU to tell us when GIF becomes
2736          * 1, because that's a separate STGI/VMRUN intercept.  The next time we
2737          * get that intercept, this function will be called again though and
2738          * we'll get the vintr intercept.
2739          */
2740         if (gif_set(svm) && nested_svm_intr(svm)) {
2741                 svm_set_vintr(svm);
2742                 svm_inject_irq(svm, 0x0);
2743         }
2744 }
2745
2746 static void enable_nmi_window(struct kvm_vcpu *vcpu)
2747 {
2748         struct vcpu_svm *svm = to_svm(vcpu);
2749
2750         if ((svm->vcpu.arch.hflags & (HF_NMI_MASK | HF_IRET_MASK))
2751             == HF_NMI_MASK)
2752                 return; /* IRET will cause a vm exit */
2753
2754         /*
2755          * Something prevents NMI from been injected. Single step over possible
2756          * problem (IRET or exception injection or interrupt shadow)
2757          */
2758         if (gif_set(svm) && nested_svm_nmi(svm)) {
2759                 svm->nmi_singlestep = true;
2760                 svm->vmcb->save.rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF);
2761                 update_db_intercept(vcpu);
2762         }
2763 }
2764
2765 static int svm_set_tss_addr(struct kvm *kvm, unsigned int addr)
2766 {
2767         return 0;
2768 }
2769
2770 static void svm_flush_tlb(struct kvm_vcpu *vcpu)
2771 {
2772         force_new_asid(vcpu);
2773 }
2774
2775 static void svm_prepare_guest_switch(struct kvm_vcpu *vcpu)
2776 {
2777 }
2778
2779 static inline void sync_cr8_to_lapic(struct kvm_vcpu *vcpu)
2780 {
2781         struct vcpu_svm *svm = to_svm(vcpu);
2782
2783         if (is_nested(svm) && (vcpu->arch.hflags & HF_VINTR_MASK))
2784                 return;
2785
2786         if (!(svm->vmcb->control.intercept_cr_write & INTERCEPT_CR8_MASK)) {
2787                 int cr8 = svm->vmcb->control.int_ctl & V_TPR_MASK;
2788                 kvm_set_cr8(vcpu, cr8);
2789         }
2790 }
2791
2792 static inline void sync_lapic_to_cr8(struct kvm_vcpu *vcpu)
2793 {
2794         struct vcpu_svm *svm = to_svm(vcpu);
2795         u64 cr8;
2796
2797         if (is_nested(svm) && (vcpu->arch.hflags & HF_VINTR_MASK))
2798                 return;
2799
2800         cr8 = kvm_get_cr8(vcpu);
2801         svm->vmcb->control.int_ctl &= ~V_TPR_MASK;
2802         svm->vmcb->control.int_ctl |= cr8 & V_TPR_MASK;
2803 }
2804
2805 static void svm_complete_interrupts(struct vcpu_svm *svm)
2806 {
2807         u8 vector;
2808         int type;
2809         u32 exitintinfo = svm->vmcb->control.exit_int_info;
2810         unsigned int3_injected = svm->int3_injected;
2811
2812         svm->int3_injected = 0;
2813
2814         if (svm->vcpu.arch.hflags & HF_IRET_MASK)
2815                 svm->vcpu.arch.hflags &= ~(HF_NMI_MASK | HF_IRET_MASK);
2816
2817         svm->vcpu.arch.nmi_injected = false;
2818         kvm_clear_exception_queue(&svm->vcpu);
2819         kvm_clear_interrupt_queue(&svm->vcpu);
2820
2821         if (!(exitintinfo & SVM_EXITINTINFO_VALID))
2822                 return;
2823
2824         vector = exitintinfo & SVM_EXITINTINFO_VEC_MASK;
2825         type = exitintinfo & SVM_EXITINTINFO_TYPE_MASK;
2826
2827         switch (type) {
2828         case SVM_EXITINTINFO_TYPE_NMI:
2829                 svm->vcpu.arch.nmi_injected = true;
2830                 break;
2831         case SVM_EXITINTINFO_TYPE_EXEPT:
2832                 if (is_nested(svm))
2833                         break;
2834                 /*
2835                  * In case of software exceptions, do not reinject the vector,
2836                  * but re-execute the instruction instead. Rewind RIP first
2837                  * if we emulated INT3 before.
2838                  */
2839                 if (kvm_exception_is_soft(vector)) {
2840                         if (vector == BP_VECTOR && int3_injected &&
2841                             kvm_is_linear_rip(&svm->vcpu, svm->int3_rip))
2842                                 kvm_rip_write(&svm->vcpu,
2843                                               kvm_rip_read(&svm->vcpu) -
2844                                               int3_injected);
2845                         break;
2846                 }
2847                 if (exitintinfo & SVM_EXITINTINFO_VALID_ERR) {
2848                         u32 err = svm->vmcb->control.exit_int_info_err;
2849                         kvm_queue_exception_e(&svm->vcpu, vector, err);
2850
2851                 } else
2852                         kvm_queue_exception(&svm->vcpu, vector);
2853                 break;
2854         case SVM_EXITINTINFO_TYPE_INTR:
2855                 kvm_queue_interrupt(&svm->vcpu, vector, false);
2856                 break;
2857         default:
2858                 break;
2859         }
2860 }
2861
2862 #ifdef CONFIG_X86_64
2863 #define R "r"
2864 #else
2865 #define R "e"
2866 #endif
2867
2868 static void svm_vcpu_run(struct kvm_vcpu *vcpu)
2869 {
2870         struct vcpu_svm *svm = to_svm(vcpu);
2871         u16 fs_selector;
2872         u16 gs_selector;
2873         u16 ldt_selector;
2874
2875         /*
2876          * A vmexit emulation is required before the vcpu can be executed
2877          * again.
2878          */
2879         if (unlikely(svm->nested.exit_required))
2880                 return;
2881
2882         svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
2883         svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
2884         svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP];
2885
2886         pre_svm_run(svm);
2887
2888         sync_lapic_to_cr8(vcpu);
2889
2890         save_host_msrs(vcpu);
2891         fs_selector = kvm_read_fs();
2892         gs_selector = kvm_read_gs();
2893         ldt_selector = kvm_read_ldt();
2894         svm->vmcb->save.cr2 = vcpu->arch.cr2;
2895         /* required for live migration with NPT */
2896         if (npt_enabled)
2897                 svm->vmcb->save.cr3 = vcpu->arch.cr3;
2898
2899         clgi();
2900
2901         local_irq_enable();
2902
2903         asm volatile (
2904                 "push %%"R"bp; \n\t"
2905                 "mov %c[rbx](%[svm]), %%"R"bx \n\t"
2906                 "mov %c[rcx](%[svm]), %%"R"cx \n\t"
2907                 "mov %c[rdx](%[svm]), %%"R"dx \n\t"
2908                 "mov %c[rsi](%[svm]), %%"R"si \n\t"
2909                 "mov %c[rdi](%[svm]), %%"R"di \n\t"
2910                 "mov %c[rbp](%[svm]), %%"R"bp \n\t"
2911 #ifdef CONFIG_X86_64
2912                 "mov %c[r8](%[svm]),  %%r8  \n\t"
2913                 "mov %c[r9](%[svm]),  %%r9  \n\t"
2914                 "mov %c[r10](%[svm]), %%r10 \n\t"
2915                 "mov %c[r11](%[svm]), %%r11 \n\t"
2916                 "mov %c[r12](%[svm]), %%r12 \n\t"
2917                 "mov %c[r13](%[svm]), %%r13 \n\t"
2918                 "mov %c[r14](%[svm]), %%r14 \n\t"
2919                 "mov %c[r15](%[svm]), %%r15 \n\t"
2920 #endif
2921
2922                 /* Enter guest mode */
2923                 "push %%"R"ax \n\t"
2924                 "mov %c[vmcb](%[svm]), %%"R"ax \n\t"
2925                 __ex(SVM_VMLOAD) "\n\t"
2926                 __ex(SVM_VMRUN) "\n\t"
2927                 __ex(SVM_VMSAVE) "\n\t"
2928                 "pop %%"R"ax \n\t"
2929
2930                 /* Save guest registers, load host registers */
2931                 "mov %%"R"bx, %c[rbx](%[svm]) \n\t"
2932                 "mov %%"R"cx, %c[rcx](%[svm]) \n\t"
2933                 "mov %%"R"dx, %c[rdx](%[svm]) \n\t"
2934                 "mov %%"R"si, %c[rsi](%[svm]) \n\t"
2935                 "mov %%"R"di, %c[rdi](%[svm]) \n\t"
2936                 "mov %%"R"bp, %c[rbp](%[svm]) \n\t"
2937 #ifdef CONFIG_X86_64
2938                 "mov %%r8,  %c[r8](%[svm]) \n\t"
2939                 "mov %%r9,  %c[r9](%[svm]) \n\t"
2940                 "mov %%r10, %c[r10](%[svm]) \n\t"
2941                 "mov %%r11, %c[r11](%[svm]) \n\t"
2942                 "mov %%r12, %c[r12](%[svm]) \n\t"
2943                 "mov %%r13, %c[r13](%[svm]) \n\t"
2944                 "mov %%r14, %c[r14](%[svm]) \n\t"
2945                 "mov %%r15, %c[r15](%[svm]) \n\t"
2946 #endif
2947                 "pop %%"R"bp"
2948                 :
2949                 : [svm]"a"(svm),
2950                   [vmcb]"i"(offsetof(struct vcpu_svm, vmcb_pa)),
2951                   [rbx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RBX])),
2952                   [rcx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RCX])),
2953                   [rdx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RDX])),
2954                   [rsi]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RSI])),
2955                   [rdi]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RDI])),
2956                   [rbp]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RBP]))
2957 #ifdef CONFIG_X86_64
2958                   , [r8]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R8])),
2959                   [r9]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R9])),
2960                   [r10]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R10])),
2961                   [r11]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R11])),
2962                   [r12]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R12])),
2963                   [r13]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R13])),
2964                   [r14]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R14])),
2965                   [r15]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R15]))
2966 #endif
2967                 : "cc", "memory"
2968                 , R"bx", R"cx", R"dx", R"si", R"di"
2969 #ifdef CONFIG_X86_64
2970                 , "r8", "r9", "r10", "r11" , "r12", "r13", "r14", "r15"
2971 #endif
2972                 );
2973
2974         vcpu->arch.cr2 = svm->vmcb->save.cr2;
2975         vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax;
2976         vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
2977         vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip;
2978
2979         kvm_load_fs(fs_selector);
2980         kvm_load_gs(gs_selector);
2981         kvm_load_ldt(ldt_selector);
2982         load_host_msrs(vcpu);
2983
2984         reload_tss(vcpu);
2985
2986         local_irq_disable();
2987
2988         stgi();
2989
2990         sync_cr8_to_lapic(vcpu);
2991
2992         svm->next_rip = 0;
2993
2994         if (npt_enabled) {
2995                 vcpu->arch.regs_avail &= ~(1 << VCPU_EXREG_PDPTR);
2996                 vcpu->arch.regs_dirty &= ~(1 << VCPU_EXREG_PDPTR);
2997         }
2998 }
2999
3000 #undef R
3001
3002 static void svm_set_cr3(struct kvm_vcpu *vcpu, unsigned long root)
3003 {
3004         struct vcpu_svm *svm = to_svm(vcpu);
3005
3006         if (npt_enabled) {
3007                 svm->vmcb->control.nested_cr3 = root;
3008                 force_new_asid(vcpu);
3009                 return;
3010         }
3011
3012         svm->vmcb->save.cr3 = root;
3013         force_new_asid(vcpu);
3014 }
3015
3016 static int is_disabled(void)
3017 {
3018         u64 vm_cr;
3019
3020         rdmsrl(MSR_VM_CR, vm_cr);
3021         if (vm_cr & (1 << SVM_VM_CR_SVM_DISABLE))
3022                 return 1;
3023
3024         return 0;
3025 }
3026
3027 static void
3028 svm_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall)
3029 {
3030         /*
3031          * Patch in the VMMCALL instruction:
3032          */
3033         hypercall[0] = 0x0f;
3034         hypercall[1] = 0x01;
3035         hypercall[2] = 0xd9;
3036 }
3037
3038 static void svm_check_processor_compat(void *rtn)
3039 {
3040         *(int *)rtn = 0;
3041 }
3042
3043 static bool svm_cpu_has_accelerated_tpr(void)
3044 {
3045         return false;
3046 }
3047
3048 static int get_npt_level(void)
3049 {
3050 #ifdef CONFIG_X86_64
3051         return PT64_ROOT_LEVEL;
3052 #else
3053         return PT32E_ROOT_LEVEL;
3054 #endif
3055 }
3056
3057 static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
3058 {
3059         return 0;
3060 }
3061
3062 static void svm_cpuid_update(struct kvm_vcpu *vcpu)
3063 {
3064 }
3065
3066 static const struct trace_print_flags svm_exit_reasons_str[] = {
3067         { SVM_EXIT_READ_CR0,                    "read_cr0" },
3068         { SVM_EXIT_READ_CR3,                    "read_cr3" },
3069         { SVM_EXIT_READ_CR4,                    "read_cr4" },
3070         { SVM_EXIT_READ_CR8,                    "read_cr8" },
3071         { SVM_EXIT_WRITE_CR0,                   "write_cr0" },
3072         { SVM_EXIT_WRITE_CR3,                   "write_cr3" },
3073         { SVM_EXIT_WRITE_CR4,                   "write_cr4" },
3074         { SVM_EXIT_WRITE_CR8,                   "write_cr8" },
3075         { SVM_EXIT_READ_DR0,                    "read_dr0" },
3076         { SVM_EXIT_READ_DR1,                    "read_dr1" },
3077         { SVM_EXIT_READ_DR2,                    "read_dr2" },
3078         { SVM_EXIT_READ_DR3,                    "read_dr3" },
3079         { SVM_EXIT_WRITE_DR0,                   "write_dr0" },
3080         { SVM_EXIT_WRITE_DR1,                   "write_dr1" },
3081         { SVM_EXIT_WRITE_DR2,                   "write_dr2" },
3082         { SVM_EXIT_WRITE_DR3,                   "write_dr3" },
3083         { SVM_EXIT_WRITE_DR5,                   "write_dr5" },
3084         { SVM_EXIT_WRITE_DR7,                   "write_dr7" },
3085         { SVM_EXIT_EXCP_BASE + DB_VECTOR,       "DB excp" },
3086         { SVM_EXIT_EXCP_BASE + BP_VECTOR,       "BP excp" },
3087         { SVM_EXIT_EXCP_BASE + UD_VECTOR,       "UD excp" },
3088         { SVM_EXIT_EXCP_BASE + PF_VECTOR,       "PF excp" },
3089         { SVM_EXIT_EXCP_BASE + NM_VECTOR,       "NM excp" },
3090         { SVM_EXIT_EXCP_BASE + MC_VECTOR,       "MC excp" },
3091         { SVM_EXIT_INTR,                        "interrupt" },
3092         { SVM_EXIT_NMI,                         "nmi" },
3093         { SVM_EXIT_SMI,                         "smi" },
3094         { SVM_EXIT_INIT,                        "init" },
3095         { SVM_EXIT_VINTR,                       "vintr" },
3096         { SVM_EXIT_CPUID,                       "cpuid" },
3097         { SVM_EXIT_INVD,                        "invd" },
3098         { SVM_EXIT_HLT,                         "hlt" },
3099         { SVM_EXIT_INVLPG,                      "invlpg" },
3100         { SVM_EXIT_INVLPGA,                     "invlpga" },
3101         { SVM_EXIT_IOIO,                        "io" },
3102         { SVM_EXIT_MSR,                         "msr" },
3103         { SVM_EXIT_TASK_SWITCH,                 "task_switch" },
3104         { SVM_EXIT_SHUTDOWN,                    "shutdown" },
3105         { SVM_EXIT_VMRUN,                       "vmrun" },
3106         { SVM_EXIT_VMMCALL,                     "hypercall" },
3107         { SVM_EXIT_VMLOAD,                      "vmload" },
3108         { SVM_EXIT_VMSAVE,                      "vmsave" },
3109         { SVM_EXIT_STGI,                        "stgi" },
3110         { SVM_EXIT_CLGI,                        "clgi" },
3111         { SVM_EXIT_SKINIT,                      "skinit" },
3112         { SVM_EXIT_WBINVD,                      "wbinvd" },
3113         { SVM_EXIT_MONITOR,                     "monitor" },
3114         { SVM_EXIT_MWAIT,                       "mwait" },
3115         { SVM_EXIT_NPF,                         "npf" },
3116         { -1, NULL }
3117 };
3118
3119 static int svm_get_lpage_level(void)
3120 {
3121         return PT_PDPE_LEVEL;
3122 }
3123
3124 static bool svm_rdtscp_supported(void)
3125 {
3126         return false;
3127 }
3128
3129 static void svm_fpu_deactivate(struct kvm_vcpu *vcpu)
3130 {
3131         struct vcpu_svm *svm = to_svm(vcpu);
3132
3133         svm->vmcb->control.intercept_exceptions |= 1 << NM_VECTOR;
3134         if (is_nested(svm))
3135                 svm->nested.hsave->control.intercept_exceptions |= 1 << NM_VECTOR;
3136         update_cr0_intercept(svm);
3137 }
3138
3139 static struct kvm_x86_ops svm_x86_ops = {
3140         .cpu_has_kvm_support = has_svm,
3141         .disabled_by_bios = is_disabled,
3142         .hardware_setup = svm_hardware_setup,
3143         .hardware_unsetup = svm_hardware_unsetup,
3144         .check_processor_compatibility = svm_check_processor_compat,
3145         .hardware_enable = svm_hardware_enable,
3146         .hardware_disable = svm_hardware_disable,
3147         .cpu_has_accelerated_tpr = svm_cpu_has_accelerated_tpr,
3148
3149         .vcpu_create = svm_create_vcpu,
3150         .vcpu_free = svm_free_vcpu,
3151         .vcpu_reset = svm_vcpu_reset,
3152
3153         .prepare_guest_switch = svm_prepare_guest_switch,
3154         .vcpu_load = svm_vcpu_load,
3155         .vcpu_put = svm_vcpu_put,
3156
3157         .set_guest_debug = svm_guest_debug,
3158         .get_msr = svm_get_msr,
3159         .set_msr = svm_set_msr,
3160         .get_segment_base = svm_get_segment_base,
3161         .get_segment = svm_get_segment,
3162         .set_segment = svm_set_segment,
3163         .get_cpl = svm_get_cpl,
3164         .get_cs_db_l_bits = kvm_get_cs_db_l_bits,
3165         .decache_cr0_guest_bits = svm_decache_cr0_guest_bits,
3166         .decache_cr4_guest_bits = svm_decache_cr4_guest_bits,
3167         .set_cr0 = svm_set_cr0,
3168         .set_cr3 = svm_set_cr3,
3169         .set_cr4 = svm_set_cr4,
3170         .set_efer = svm_set_efer,
3171         .get_idt = svm_get_idt,
3172         .set_idt = svm_set_idt,
3173         .get_gdt = svm_get_gdt,
3174         .set_gdt = svm_set_gdt,
3175         .get_dr = svm_get_dr,
3176         .set_dr = svm_set_dr,
3177         .cache_reg = svm_cache_reg,
3178         .get_rflags = svm_get_rflags,
3179         .set_rflags = svm_set_rflags,
3180         .fpu_activate = svm_fpu_activate,
3181         .fpu_deactivate = svm_fpu_deactivate,
3182
3183         .tlb_flush = svm_flush_tlb,
3184
3185         .run = svm_vcpu_run,
3186         .handle_exit = handle_exit,
3187         .skip_emulated_instruction = skip_emulated_instruction,
3188         .set_interrupt_shadow = svm_set_interrupt_shadow,
3189         .get_interrupt_shadow = svm_get_interrupt_shadow,
3190         .patch_hypercall = svm_patch_hypercall,
3191         .set_irq = svm_set_irq,
3192         .set_nmi = svm_inject_nmi,
3193         .queue_exception = svm_queue_exception,
3194         .interrupt_allowed = svm_interrupt_allowed,
3195         .nmi_allowed = svm_nmi_allowed,
3196         .get_nmi_mask = svm_get_nmi_mask,
3197         .set_nmi_mask = svm_set_nmi_mask,
3198         .enable_nmi_window = enable_nmi_window,
3199         .enable_irq_window = enable_irq_window,
3200         .update_cr8_intercept = update_cr8_intercept,
3201
3202         .set_tss_addr = svm_set_tss_addr,
3203         .get_tdp_level = get_npt_level,
3204         .get_mt_mask = svm_get_mt_mask,
3205
3206         .exit_reasons_str = svm_exit_reasons_str,
3207         .get_lpage_level = svm_get_lpage_level,
3208
3209         .cpuid_update = svm_cpuid_update,
3210
3211         .rdtscp_supported = svm_rdtscp_supported,
3212 };
3213
3214 static int __init svm_init(void)
3215 {
3216         return kvm_init(&svm_x86_ops, sizeof(struct vcpu_svm),
3217                               THIS_MODULE);
3218 }
3219
3220 static void __exit svm_exit(void)
3221 {
3222         kvm_exit();
3223 }
3224
3225 module_init(svm_init)
3226 module_exit(svm_exit)