KVM: SVM: Don't use kmap_atomic in nested_svm_map
[pandora-kernel.git] / arch / x86 / kvm / svm.c
1 /*
2  * Kernel-based Virtual Machine driver for Linux
3  *
4  * AMD SVM support
5  *
6  * Copyright (C) 2006 Qumranet, Inc.
7  *
8  * Authors:
9  *   Yaniv Kamay  <yaniv@qumranet.com>
10  *   Avi Kivity   <avi@qumranet.com>
11  *
12  * This work is licensed under the terms of the GNU GPL, version 2.  See
13  * the COPYING file in the top-level directory.
14  *
15  */
16 #include <linux/kvm_host.h>
17
18 #include "irq.h"
19 #include "mmu.h"
20 #include "kvm_cache_regs.h"
21 #include "x86.h"
22
23 #include <linux/module.h>
24 #include <linux/kernel.h>
25 #include <linux/vmalloc.h>
26 #include <linux/highmem.h>
27 #include <linux/sched.h>
28 #include <linux/ftrace_event.h>
29 #include <linux/slab.h>
30
31 #include <asm/desc.h>
32
33 #include <asm/virtext.h>
34 #include "trace.h"
35
36 #define __ex(x) __kvm_handle_fault_on_reboot(x)
37
38 MODULE_AUTHOR("Qumranet");
39 MODULE_LICENSE("GPL");
40
41 #define IOPM_ALLOC_ORDER 2
42 #define MSRPM_ALLOC_ORDER 1
43
44 #define SEG_TYPE_LDT 2
45 #define SEG_TYPE_BUSY_TSS16 3
46
47 #define SVM_FEATURE_NPT  (1 << 0)
48 #define SVM_FEATURE_LBRV (1 << 1)
49 #define SVM_FEATURE_SVML (1 << 2)
50 #define SVM_FEATURE_PAUSE_FILTER (1 << 10)
51
52 #define NESTED_EXIT_HOST        0       /* Exit handled on host level */
53 #define NESTED_EXIT_DONE        1       /* Exit caused nested vmexit  */
54 #define NESTED_EXIT_CONTINUE    2       /* Further checks needed      */
55
56 #define DEBUGCTL_RESERVED_BITS (~(0x3fULL))
57
58 static const u32 host_save_user_msrs[] = {
59 #ifdef CONFIG_X86_64
60         MSR_STAR, MSR_LSTAR, MSR_CSTAR, MSR_SYSCALL_MASK, MSR_KERNEL_GS_BASE,
61         MSR_FS_BASE,
62 #endif
63         MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
64 };
65
66 #define NR_HOST_SAVE_USER_MSRS ARRAY_SIZE(host_save_user_msrs)
67
68 struct kvm_vcpu;
69
70 struct nested_state {
71         struct vmcb *hsave;
72         u64 hsave_msr;
73         u64 vmcb;
74
75         /* These are the merged vectors */
76         u32 *msrpm;
77
78         /* gpa pointers to the real vectors */
79         u64 vmcb_msrpm;
80
81         /* A VMEXIT is required but not yet emulated */
82         bool exit_required;
83
84         /* cache for intercepts of the guest */
85         u16 intercept_cr_read;
86         u16 intercept_cr_write;
87         u16 intercept_dr_read;
88         u16 intercept_dr_write;
89         u32 intercept_exceptions;
90         u64 intercept;
91
92 };
93
94 struct vcpu_svm {
95         struct kvm_vcpu vcpu;
96         struct vmcb *vmcb;
97         unsigned long vmcb_pa;
98         struct svm_cpu_data *svm_data;
99         uint64_t asid_generation;
100         uint64_t sysenter_esp;
101         uint64_t sysenter_eip;
102
103         u64 next_rip;
104
105         u64 host_user_msrs[NR_HOST_SAVE_USER_MSRS];
106         u64 host_gs_base;
107
108         u32 *msrpm;
109
110         struct nested_state nested;
111
112         bool nmi_singlestep;
113 };
114
115 /* enable NPT for AMD64 and X86 with PAE */
116 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
117 static bool npt_enabled = true;
118 #else
119 static bool npt_enabled = false;
120 #endif
121 static int npt = 1;
122
123 module_param(npt, int, S_IRUGO);
124
125 static int nested = 1;
126 module_param(nested, int, S_IRUGO);
127
128 static void svm_flush_tlb(struct kvm_vcpu *vcpu);
129 static void svm_complete_interrupts(struct vcpu_svm *svm);
130
131 static int nested_svm_exit_handled(struct vcpu_svm *svm);
132 static int nested_svm_vmexit(struct vcpu_svm *svm);
133 static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
134                                       bool has_error_code, u32 error_code);
135
136 static inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu)
137 {
138         return container_of(vcpu, struct vcpu_svm, vcpu);
139 }
140
141 static inline bool is_nested(struct vcpu_svm *svm)
142 {
143         return svm->nested.vmcb;
144 }
145
146 static inline void enable_gif(struct vcpu_svm *svm)
147 {
148         svm->vcpu.arch.hflags |= HF_GIF_MASK;
149 }
150
151 static inline void disable_gif(struct vcpu_svm *svm)
152 {
153         svm->vcpu.arch.hflags &= ~HF_GIF_MASK;
154 }
155
156 static inline bool gif_set(struct vcpu_svm *svm)
157 {
158         return !!(svm->vcpu.arch.hflags & HF_GIF_MASK);
159 }
160
161 static unsigned long iopm_base;
162
163 struct kvm_ldttss_desc {
164         u16 limit0;
165         u16 base0;
166         unsigned base1 : 8, type : 5, dpl : 2, p : 1;
167         unsigned limit1 : 4, zero0 : 3, g : 1, base2 : 8;
168         u32 base3;
169         u32 zero1;
170 } __attribute__((packed));
171
172 struct svm_cpu_data {
173         int cpu;
174
175         u64 asid_generation;
176         u32 max_asid;
177         u32 next_asid;
178         struct kvm_ldttss_desc *tss_desc;
179
180         struct page *save_area;
181 };
182
183 static DEFINE_PER_CPU(struct svm_cpu_data *, svm_data);
184 static uint32_t svm_features;
185
186 struct svm_init_data {
187         int cpu;
188         int r;
189 };
190
191 static u32 msrpm_ranges[] = {0, 0xc0000000, 0xc0010000};
192
193 #define NUM_MSR_MAPS ARRAY_SIZE(msrpm_ranges)
194 #define MSRS_RANGE_SIZE 2048
195 #define MSRS_IN_RANGE (MSRS_RANGE_SIZE * 8 / 2)
196
197 #define MAX_INST_SIZE 15
198
199 static inline u32 svm_has(u32 feat)
200 {
201         return svm_features & feat;
202 }
203
204 static inline void clgi(void)
205 {
206         asm volatile (__ex(SVM_CLGI));
207 }
208
209 static inline void stgi(void)
210 {
211         asm volatile (__ex(SVM_STGI));
212 }
213
214 static inline void invlpga(unsigned long addr, u32 asid)
215 {
216         asm volatile (__ex(SVM_INVLPGA) :: "a"(addr), "c"(asid));
217 }
218
219 static inline void force_new_asid(struct kvm_vcpu *vcpu)
220 {
221         to_svm(vcpu)->asid_generation--;
222 }
223
224 static inline void flush_guest_tlb(struct kvm_vcpu *vcpu)
225 {
226         force_new_asid(vcpu);
227 }
228
229 static void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer)
230 {
231         if (!npt_enabled && !(efer & EFER_LMA))
232                 efer &= ~EFER_LME;
233
234         to_svm(vcpu)->vmcb->save.efer = efer | EFER_SVME;
235         vcpu->arch.efer = efer;
236 }
237
238 static void svm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
239                                 bool has_error_code, u32 error_code)
240 {
241         struct vcpu_svm *svm = to_svm(vcpu);
242
243         /* If we are within a nested VM we'd better #VMEXIT and let the
244            guest handle the exception */
245         if (nested_svm_check_exception(svm, nr, has_error_code, error_code))
246                 return;
247
248         svm->vmcb->control.event_inj = nr
249                 | SVM_EVTINJ_VALID
250                 | (has_error_code ? SVM_EVTINJ_VALID_ERR : 0)
251                 | SVM_EVTINJ_TYPE_EXEPT;
252         svm->vmcb->control.event_inj_err = error_code;
253 }
254
255 static int is_external_interrupt(u32 info)
256 {
257         info &= SVM_EVTINJ_TYPE_MASK | SVM_EVTINJ_VALID;
258         return info == (SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR);
259 }
260
261 static u32 svm_get_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
262 {
263         struct vcpu_svm *svm = to_svm(vcpu);
264         u32 ret = 0;
265
266         if (svm->vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK)
267                 ret |= X86_SHADOW_INT_STI | X86_SHADOW_INT_MOV_SS;
268         return ret & mask;
269 }
270
271 static void svm_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
272 {
273         struct vcpu_svm *svm = to_svm(vcpu);
274
275         if (mask == 0)
276                 svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK;
277         else
278                 svm->vmcb->control.int_state |= SVM_INTERRUPT_SHADOW_MASK;
279
280 }
281
282 static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
283 {
284         struct vcpu_svm *svm = to_svm(vcpu);
285
286         if (!svm->next_rip) {
287                 if (emulate_instruction(vcpu, 0, 0, EMULTYPE_SKIP) !=
288                                 EMULATE_DONE)
289                         printk(KERN_DEBUG "%s: NOP\n", __func__);
290                 return;
291         }
292         if (svm->next_rip - kvm_rip_read(vcpu) > MAX_INST_SIZE)
293                 printk(KERN_ERR "%s: ip 0x%lx next 0x%llx\n",
294                        __func__, kvm_rip_read(vcpu), svm->next_rip);
295
296         kvm_rip_write(vcpu, svm->next_rip);
297         svm_set_interrupt_shadow(vcpu, 0);
298 }
299
300 static int has_svm(void)
301 {
302         const char *msg;
303
304         if (!cpu_has_svm(&msg)) {
305                 printk(KERN_INFO "has_svm: %s\n", msg);
306                 return 0;
307         }
308
309         return 1;
310 }
311
312 static void svm_hardware_disable(void *garbage)
313 {
314         cpu_svm_disable();
315 }
316
317 static int svm_hardware_enable(void *garbage)
318 {
319
320         struct svm_cpu_data *sd;
321         uint64_t efer;
322         struct desc_ptr gdt_descr;
323         struct desc_struct *gdt;
324         int me = raw_smp_processor_id();
325
326         rdmsrl(MSR_EFER, efer);
327         if (efer & EFER_SVME)
328                 return -EBUSY;
329
330         if (!has_svm()) {
331                 printk(KERN_ERR "svm_hardware_enable: err EOPNOTSUPP on %d\n",
332                        me);
333                 return -EINVAL;
334         }
335         sd = per_cpu(svm_data, me);
336
337         if (!sd) {
338                 printk(KERN_ERR "svm_hardware_enable: svm_data is NULL on %d\n",
339                        me);
340                 return -EINVAL;
341         }
342
343         sd->asid_generation = 1;
344         sd->max_asid = cpuid_ebx(SVM_CPUID_FUNC) - 1;
345         sd->next_asid = sd->max_asid + 1;
346
347         kvm_get_gdt(&gdt_descr);
348         gdt = (struct desc_struct *)gdt_descr.address;
349         sd->tss_desc = (struct kvm_ldttss_desc *)(gdt + GDT_ENTRY_TSS);
350
351         wrmsrl(MSR_EFER, efer | EFER_SVME);
352
353         wrmsrl(MSR_VM_HSAVE_PA, page_to_pfn(sd->save_area) << PAGE_SHIFT);
354
355         return 0;
356 }
357
358 static void svm_cpu_uninit(int cpu)
359 {
360         struct svm_cpu_data *sd = per_cpu(svm_data, raw_smp_processor_id());
361
362         if (!sd)
363                 return;
364
365         per_cpu(svm_data, raw_smp_processor_id()) = NULL;
366         __free_page(sd->save_area);
367         kfree(sd);
368 }
369
370 static int svm_cpu_init(int cpu)
371 {
372         struct svm_cpu_data *sd;
373         int r;
374
375         sd = kzalloc(sizeof(struct svm_cpu_data), GFP_KERNEL);
376         if (!sd)
377                 return -ENOMEM;
378         sd->cpu = cpu;
379         sd->save_area = alloc_page(GFP_KERNEL);
380         r = -ENOMEM;
381         if (!sd->save_area)
382                 goto err_1;
383
384         per_cpu(svm_data, cpu) = sd;
385
386         return 0;
387
388 err_1:
389         kfree(sd);
390         return r;
391
392 }
393
394 static void set_msr_interception(u32 *msrpm, unsigned msr,
395                                  int read, int write)
396 {
397         int i;
398
399         for (i = 0; i < NUM_MSR_MAPS; i++) {
400                 if (msr >= msrpm_ranges[i] &&
401                     msr < msrpm_ranges[i] + MSRS_IN_RANGE) {
402                         u32 msr_offset = (i * MSRS_IN_RANGE + msr -
403                                           msrpm_ranges[i]) * 2;
404
405                         u32 *base = msrpm + (msr_offset / 32);
406                         u32 msr_shift = msr_offset % 32;
407                         u32 mask = ((write) ? 0 : 2) | ((read) ? 0 : 1);
408                         *base = (*base & ~(0x3 << msr_shift)) |
409                                 (mask << msr_shift);
410                         return;
411                 }
412         }
413         BUG();
414 }
415
416 static void svm_vcpu_init_msrpm(u32 *msrpm)
417 {
418         memset(msrpm, 0xff, PAGE_SIZE * (1 << MSRPM_ALLOC_ORDER));
419
420 #ifdef CONFIG_X86_64
421         set_msr_interception(msrpm, MSR_GS_BASE, 1, 1);
422         set_msr_interception(msrpm, MSR_FS_BASE, 1, 1);
423         set_msr_interception(msrpm, MSR_KERNEL_GS_BASE, 1, 1);
424         set_msr_interception(msrpm, MSR_LSTAR, 1, 1);
425         set_msr_interception(msrpm, MSR_CSTAR, 1, 1);
426         set_msr_interception(msrpm, MSR_SYSCALL_MASK, 1, 1);
427 #endif
428         set_msr_interception(msrpm, MSR_K6_STAR, 1, 1);
429         set_msr_interception(msrpm, MSR_IA32_SYSENTER_CS, 1, 1);
430 }
431
432 static void svm_enable_lbrv(struct vcpu_svm *svm)
433 {
434         u32 *msrpm = svm->msrpm;
435
436         svm->vmcb->control.lbr_ctl = 1;
437         set_msr_interception(msrpm, MSR_IA32_LASTBRANCHFROMIP, 1, 1);
438         set_msr_interception(msrpm, MSR_IA32_LASTBRANCHTOIP, 1, 1);
439         set_msr_interception(msrpm, MSR_IA32_LASTINTFROMIP, 1, 1);
440         set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 1, 1);
441 }
442
443 static void svm_disable_lbrv(struct vcpu_svm *svm)
444 {
445         u32 *msrpm = svm->msrpm;
446
447         svm->vmcb->control.lbr_ctl = 0;
448         set_msr_interception(msrpm, MSR_IA32_LASTBRANCHFROMIP, 0, 0);
449         set_msr_interception(msrpm, MSR_IA32_LASTBRANCHTOIP, 0, 0);
450         set_msr_interception(msrpm, MSR_IA32_LASTINTFROMIP, 0, 0);
451         set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 0, 0);
452 }
453
454 static __init int svm_hardware_setup(void)
455 {
456         int cpu;
457         struct page *iopm_pages;
458         void *iopm_va;
459         int r;
460
461         iopm_pages = alloc_pages(GFP_KERNEL, IOPM_ALLOC_ORDER);
462
463         if (!iopm_pages)
464                 return -ENOMEM;
465
466         iopm_va = page_address(iopm_pages);
467         memset(iopm_va, 0xff, PAGE_SIZE * (1 << IOPM_ALLOC_ORDER));
468         iopm_base = page_to_pfn(iopm_pages) << PAGE_SHIFT;
469
470         if (boot_cpu_has(X86_FEATURE_NX))
471                 kvm_enable_efer_bits(EFER_NX);
472
473         if (boot_cpu_has(X86_FEATURE_FXSR_OPT))
474                 kvm_enable_efer_bits(EFER_FFXSR);
475
476         if (nested) {
477                 printk(KERN_INFO "kvm: Nested Virtualization enabled\n");
478                 kvm_enable_efer_bits(EFER_SVME);
479         }
480
481         for_each_possible_cpu(cpu) {
482                 r = svm_cpu_init(cpu);
483                 if (r)
484                         goto err;
485         }
486
487         svm_features = cpuid_edx(SVM_CPUID_FUNC);
488
489         if (!svm_has(SVM_FEATURE_NPT))
490                 npt_enabled = false;
491
492         if (npt_enabled && !npt) {
493                 printk(KERN_INFO "kvm: Nested Paging disabled\n");
494                 npt_enabled = false;
495         }
496
497         if (npt_enabled) {
498                 printk(KERN_INFO "kvm: Nested Paging enabled\n");
499                 kvm_enable_tdp();
500         } else
501                 kvm_disable_tdp();
502
503         return 0;
504
505 err:
506         __free_pages(iopm_pages, IOPM_ALLOC_ORDER);
507         iopm_base = 0;
508         return r;
509 }
510
511 static __exit void svm_hardware_unsetup(void)
512 {
513         int cpu;
514
515         for_each_possible_cpu(cpu)
516                 svm_cpu_uninit(cpu);
517
518         __free_pages(pfn_to_page(iopm_base >> PAGE_SHIFT), IOPM_ALLOC_ORDER);
519         iopm_base = 0;
520 }
521
522 static void init_seg(struct vmcb_seg *seg)
523 {
524         seg->selector = 0;
525         seg->attrib = SVM_SELECTOR_P_MASK | SVM_SELECTOR_S_MASK |
526                 SVM_SELECTOR_WRITE_MASK; /* Read/Write Data Segment */
527         seg->limit = 0xffff;
528         seg->base = 0;
529 }
530
531 static void init_sys_seg(struct vmcb_seg *seg, uint32_t type)
532 {
533         seg->selector = 0;
534         seg->attrib = SVM_SELECTOR_P_MASK | type;
535         seg->limit = 0xffff;
536         seg->base = 0;
537 }
538
539 static void init_vmcb(struct vcpu_svm *svm)
540 {
541         struct vmcb_control_area *control = &svm->vmcb->control;
542         struct vmcb_save_area *save = &svm->vmcb->save;
543
544         svm->vcpu.fpu_active = 1;
545
546         control->intercept_cr_read =    INTERCEPT_CR0_MASK |
547                                         INTERCEPT_CR3_MASK |
548                                         INTERCEPT_CR4_MASK;
549
550         control->intercept_cr_write =   INTERCEPT_CR0_MASK |
551                                         INTERCEPT_CR3_MASK |
552                                         INTERCEPT_CR4_MASK |
553                                         INTERCEPT_CR8_MASK;
554
555         control->intercept_dr_read =    INTERCEPT_DR0_MASK |
556                                         INTERCEPT_DR1_MASK |
557                                         INTERCEPT_DR2_MASK |
558                                         INTERCEPT_DR3_MASK |
559                                         INTERCEPT_DR4_MASK |
560                                         INTERCEPT_DR5_MASK |
561                                         INTERCEPT_DR6_MASK |
562                                         INTERCEPT_DR7_MASK;
563
564         control->intercept_dr_write =   INTERCEPT_DR0_MASK |
565                                         INTERCEPT_DR1_MASK |
566                                         INTERCEPT_DR2_MASK |
567                                         INTERCEPT_DR3_MASK |
568                                         INTERCEPT_DR4_MASK |
569                                         INTERCEPT_DR5_MASK |
570                                         INTERCEPT_DR6_MASK |
571                                         INTERCEPT_DR7_MASK;
572
573         control->intercept_exceptions = (1 << PF_VECTOR) |
574                                         (1 << UD_VECTOR) |
575                                         (1 << MC_VECTOR);
576
577
578         control->intercept =    (1ULL << INTERCEPT_INTR) |
579                                 (1ULL << INTERCEPT_NMI) |
580                                 (1ULL << INTERCEPT_SMI) |
581                                 (1ULL << INTERCEPT_SELECTIVE_CR0) |
582                                 (1ULL << INTERCEPT_CPUID) |
583                                 (1ULL << INTERCEPT_INVD) |
584                                 (1ULL << INTERCEPT_HLT) |
585                                 (1ULL << INTERCEPT_INVLPG) |
586                                 (1ULL << INTERCEPT_INVLPGA) |
587                                 (1ULL << INTERCEPT_IOIO_PROT) |
588                                 (1ULL << INTERCEPT_MSR_PROT) |
589                                 (1ULL << INTERCEPT_TASK_SWITCH) |
590                                 (1ULL << INTERCEPT_SHUTDOWN) |
591                                 (1ULL << INTERCEPT_VMRUN) |
592                                 (1ULL << INTERCEPT_VMMCALL) |
593                                 (1ULL << INTERCEPT_VMLOAD) |
594                                 (1ULL << INTERCEPT_VMSAVE) |
595                                 (1ULL << INTERCEPT_STGI) |
596                                 (1ULL << INTERCEPT_CLGI) |
597                                 (1ULL << INTERCEPT_SKINIT) |
598                                 (1ULL << INTERCEPT_WBINVD) |
599                                 (1ULL << INTERCEPT_MONITOR) |
600                                 (1ULL << INTERCEPT_MWAIT);
601
602         control->iopm_base_pa = iopm_base;
603         control->msrpm_base_pa = __pa(svm->msrpm);
604         control->tsc_offset = 0;
605         control->int_ctl = V_INTR_MASKING_MASK;
606
607         init_seg(&save->es);
608         init_seg(&save->ss);
609         init_seg(&save->ds);
610         init_seg(&save->fs);
611         init_seg(&save->gs);
612
613         save->cs.selector = 0xf000;
614         /* Executable/Readable Code Segment */
615         save->cs.attrib = SVM_SELECTOR_READ_MASK | SVM_SELECTOR_P_MASK |
616                 SVM_SELECTOR_S_MASK | SVM_SELECTOR_CODE_MASK;
617         save->cs.limit = 0xffff;
618         /*
619          * cs.base should really be 0xffff0000, but vmx can't handle that, so
620          * be consistent with it.
621          *
622          * Replace when we have real mode working for vmx.
623          */
624         save->cs.base = 0xf0000;
625
626         save->gdtr.limit = 0xffff;
627         save->idtr.limit = 0xffff;
628
629         init_sys_seg(&save->ldtr, SEG_TYPE_LDT);
630         init_sys_seg(&save->tr, SEG_TYPE_BUSY_TSS16);
631
632         save->efer = EFER_SVME;
633         save->dr6 = 0xffff0ff0;
634         save->dr7 = 0x400;
635         save->rflags = 2;
636         save->rip = 0x0000fff0;
637         svm->vcpu.arch.regs[VCPU_REGS_RIP] = save->rip;
638
639         /* This is the guest-visible cr0 value.
640          * svm_set_cr0() sets PG and WP and clears NW and CD on save->cr0.
641          */
642         svm->vcpu.arch.cr0 = X86_CR0_NW | X86_CR0_CD | X86_CR0_ET;
643         kvm_set_cr0(&svm->vcpu, svm->vcpu.arch.cr0);
644
645         save->cr4 = X86_CR4_PAE;
646         /* rdx = ?? */
647
648         if (npt_enabled) {
649                 /* Setup VMCB for Nested Paging */
650                 control->nested_ctl = 1;
651                 control->intercept &= ~((1ULL << INTERCEPT_TASK_SWITCH) |
652                                         (1ULL << INTERCEPT_INVLPG));
653                 control->intercept_exceptions &= ~(1 << PF_VECTOR);
654                 control->intercept_cr_read &= ~INTERCEPT_CR3_MASK;
655                 control->intercept_cr_write &= ~INTERCEPT_CR3_MASK;
656                 save->g_pat = 0x0007040600070406ULL;
657                 save->cr3 = 0;
658                 save->cr4 = 0;
659         }
660         force_new_asid(&svm->vcpu);
661
662         svm->nested.vmcb = 0;
663         svm->vcpu.arch.hflags = 0;
664
665         if (svm_has(SVM_FEATURE_PAUSE_FILTER)) {
666                 control->pause_filter_count = 3000;
667                 control->intercept |= (1ULL << INTERCEPT_PAUSE);
668         }
669
670         enable_gif(svm);
671 }
672
673 static int svm_vcpu_reset(struct kvm_vcpu *vcpu)
674 {
675         struct vcpu_svm *svm = to_svm(vcpu);
676
677         init_vmcb(svm);
678
679         if (!kvm_vcpu_is_bsp(vcpu)) {
680                 kvm_rip_write(vcpu, 0);
681                 svm->vmcb->save.cs.base = svm->vcpu.arch.sipi_vector << 12;
682                 svm->vmcb->save.cs.selector = svm->vcpu.arch.sipi_vector << 8;
683         }
684         vcpu->arch.regs_avail = ~0;
685         vcpu->arch.regs_dirty = ~0;
686
687         return 0;
688 }
689
690 static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
691 {
692         struct vcpu_svm *svm;
693         struct page *page;
694         struct page *msrpm_pages;
695         struct page *hsave_page;
696         struct page *nested_msrpm_pages;
697         int err;
698
699         svm = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
700         if (!svm) {
701                 err = -ENOMEM;
702                 goto out;
703         }
704
705         err = kvm_vcpu_init(&svm->vcpu, kvm, id);
706         if (err)
707                 goto free_svm;
708
709         err = -ENOMEM;
710         page = alloc_page(GFP_KERNEL);
711         if (!page)
712                 goto uninit;
713
714         msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER);
715         if (!msrpm_pages)
716                 goto free_page1;
717
718         nested_msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER);
719         if (!nested_msrpm_pages)
720                 goto free_page2;
721
722         hsave_page = alloc_page(GFP_KERNEL);
723         if (!hsave_page)
724                 goto free_page3;
725
726         svm->nested.hsave = page_address(hsave_page);
727
728         svm->msrpm = page_address(msrpm_pages);
729         svm_vcpu_init_msrpm(svm->msrpm);
730
731         svm->nested.msrpm = page_address(nested_msrpm_pages);
732
733         svm->vmcb = page_address(page);
734         clear_page(svm->vmcb);
735         svm->vmcb_pa = page_to_pfn(page) << PAGE_SHIFT;
736         svm->asid_generation = 0;
737         init_vmcb(svm);
738
739         fx_init(&svm->vcpu);
740         svm->vcpu.arch.apic_base = 0xfee00000 | MSR_IA32_APICBASE_ENABLE;
741         if (kvm_vcpu_is_bsp(&svm->vcpu))
742                 svm->vcpu.arch.apic_base |= MSR_IA32_APICBASE_BSP;
743
744         return &svm->vcpu;
745
746 free_page3:
747         __free_pages(nested_msrpm_pages, MSRPM_ALLOC_ORDER);
748 free_page2:
749         __free_pages(msrpm_pages, MSRPM_ALLOC_ORDER);
750 free_page1:
751         __free_page(page);
752 uninit:
753         kvm_vcpu_uninit(&svm->vcpu);
754 free_svm:
755         kmem_cache_free(kvm_vcpu_cache, svm);
756 out:
757         return ERR_PTR(err);
758 }
759
760 static void svm_free_vcpu(struct kvm_vcpu *vcpu)
761 {
762         struct vcpu_svm *svm = to_svm(vcpu);
763
764         __free_page(pfn_to_page(svm->vmcb_pa >> PAGE_SHIFT));
765         __free_pages(virt_to_page(svm->msrpm), MSRPM_ALLOC_ORDER);
766         __free_page(virt_to_page(svm->nested.hsave));
767         __free_pages(virt_to_page(svm->nested.msrpm), MSRPM_ALLOC_ORDER);
768         kvm_vcpu_uninit(vcpu);
769         kmem_cache_free(kvm_vcpu_cache, svm);
770 }
771
772 static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
773 {
774         struct vcpu_svm *svm = to_svm(vcpu);
775         int i;
776
777         if (unlikely(cpu != vcpu->cpu)) {
778                 u64 delta;
779
780                 if (check_tsc_unstable()) {
781                         /*
782                          * Make sure that the guest sees a monotonically
783                          * increasing TSC.
784                          */
785                         delta = vcpu->arch.host_tsc - native_read_tsc();
786                         svm->vmcb->control.tsc_offset += delta;
787                         if (is_nested(svm))
788                                 svm->nested.hsave->control.tsc_offset += delta;
789                 }
790                 vcpu->cpu = cpu;
791                 kvm_migrate_timers(vcpu);
792                 svm->asid_generation = 0;
793         }
794
795         for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
796                 rdmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
797 }
798
799 static void svm_vcpu_put(struct kvm_vcpu *vcpu)
800 {
801         struct vcpu_svm *svm = to_svm(vcpu);
802         int i;
803
804         ++vcpu->stat.host_state_reload;
805         for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
806                 wrmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
807
808         vcpu->arch.host_tsc = native_read_tsc();
809 }
810
811 static unsigned long svm_get_rflags(struct kvm_vcpu *vcpu)
812 {
813         return to_svm(vcpu)->vmcb->save.rflags;
814 }
815
816 static void svm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
817 {
818         to_svm(vcpu)->vmcb->save.rflags = rflags;
819 }
820
821 static void svm_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
822 {
823         switch (reg) {
824         case VCPU_EXREG_PDPTR:
825                 BUG_ON(!npt_enabled);
826                 load_pdptrs(vcpu, vcpu->arch.cr3);
827                 break;
828         default:
829                 BUG();
830         }
831 }
832
833 static void svm_set_vintr(struct vcpu_svm *svm)
834 {
835         svm->vmcb->control.intercept |= 1ULL << INTERCEPT_VINTR;
836 }
837
838 static void svm_clear_vintr(struct vcpu_svm *svm)
839 {
840         svm->vmcb->control.intercept &= ~(1ULL << INTERCEPT_VINTR);
841 }
842
843 static struct vmcb_seg *svm_seg(struct kvm_vcpu *vcpu, int seg)
844 {
845         struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save;
846
847         switch (seg) {
848         case VCPU_SREG_CS: return &save->cs;
849         case VCPU_SREG_DS: return &save->ds;
850         case VCPU_SREG_ES: return &save->es;
851         case VCPU_SREG_FS: return &save->fs;
852         case VCPU_SREG_GS: return &save->gs;
853         case VCPU_SREG_SS: return &save->ss;
854         case VCPU_SREG_TR: return &save->tr;
855         case VCPU_SREG_LDTR: return &save->ldtr;
856         }
857         BUG();
858         return NULL;
859 }
860
861 static u64 svm_get_segment_base(struct kvm_vcpu *vcpu, int seg)
862 {
863         struct vmcb_seg *s = svm_seg(vcpu, seg);
864
865         return s->base;
866 }
867
868 static void svm_get_segment(struct kvm_vcpu *vcpu,
869                             struct kvm_segment *var, int seg)
870 {
871         struct vmcb_seg *s = svm_seg(vcpu, seg);
872
873         var->base = s->base;
874         var->limit = s->limit;
875         var->selector = s->selector;
876         var->type = s->attrib & SVM_SELECTOR_TYPE_MASK;
877         var->s = (s->attrib >> SVM_SELECTOR_S_SHIFT) & 1;
878         var->dpl = (s->attrib >> SVM_SELECTOR_DPL_SHIFT) & 3;
879         var->present = (s->attrib >> SVM_SELECTOR_P_SHIFT) & 1;
880         var->avl = (s->attrib >> SVM_SELECTOR_AVL_SHIFT) & 1;
881         var->l = (s->attrib >> SVM_SELECTOR_L_SHIFT) & 1;
882         var->db = (s->attrib >> SVM_SELECTOR_DB_SHIFT) & 1;
883         var->g = (s->attrib >> SVM_SELECTOR_G_SHIFT) & 1;
884
885         /* AMD's VMCB does not have an explicit unusable field, so emulate it
886          * for cross vendor migration purposes by "not present"
887          */
888         var->unusable = !var->present || (var->type == 0);
889
890         switch (seg) {
891         case VCPU_SREG_CS:
892                 /*
893                  * SVM always stores 0 for the 'G' bit in the CS selector in
894                  * the VMCB on a VMEXIT. This hurts cross-vendor migration:
895                  * Intel's VMENTRY has a check on the 'G' bit.
896                  */
897                 var->g = s->limit > 0xfffff;
898                 break;
899         case VCPU_SREG_TR:
900                 /*
901                  * Work around a bug where the busy flag in the tr selector
902                  * isn't exposed
903                  */
904                 var->type |= 0x2;
905                 break;
906         case VCPU_SREG_DS:
907         case VCPU_SREG_ES:
908         case VCPU_SREG_FS:
909         case VCPU_SREG_GS:
910                 /*
911                  * The accessed bit must always be set in the segment
912                  * descriptor cache, although it can be cleared in the
913                  * descriptor, the cached bit always remains at 1. Since
914                  * Intel has a check on this, set it here to support
915                  * cross-vendor migration.
916                  */
917                 if (!var->unusable)
918                         var->type |= 0x1;
919                 break;
920         case VCPU_SREG_SS:
921                 /* On AMD CPUs sometimes the DB bit in the segment
922                  * descriptor is left as 1, although the whole segment has
923                  * been made unusable. Clear it here to pass an Intel VMX
924                  * entry check when cross vendor migrating.
925                  */
926                 if (var->unusable)
927                         var->db = 0;
928                 break;
929         }
930 }
931
932 static int svm_get_cpl(struct kvm_vcpu *vcpu)
933 {
934         struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save;
935
936         return save->cpl;
937 }
938
939 static void svm_get_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
940 {
941         struct vcpu_svm *svm = to_svm(vcpu);
942
943         dt->size = svm->vmcb->save.idtr.limit;
944         dt->address = svm->vmcb->save.idtr.base;
945 }
946
947 static void svm_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
948 {
949         struct vcpu_svm *svm = to_svm(vcpu);
950
951         svm->vmcb->save.idtr.limit = dt->size;
952         svm->vmcb->save.idtr.base = dt->address ;
953 }
954
955 static void svm_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
956 {
957         struct vcpu_svm *svm = to_svm(vcpu);
958
959         dt->size = svm->vmcb->save.gdtr.limit;
960         dt->address = svm->vmcb->save.gdtr.base;
961 }
962
963 static void svm_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
964 {
965         struct vcpu_svm *svm = to_svm(vcpu);
966
967         svm->vmcb->save.gdtr.limit = dt->size;
968         svm->vmcb->save.gdtr.base = dt->address ;
969 }
970
971 static void svm_decache_cr0_guest_bits(struct kvm_vcpu *vcpu)
972 {
973 }
974
975 static void svm_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
976 {
977 }
978
979 static void update_cr0_intercept(struct vcpu_svm *svm)
980 {
981         ulong gcr0 = svm->vcpu.arch.cr0;
982         u64 *hcr0 = &svm->vmcb->save.cr0;
983
984         if (!svm->vcpu.fpu_active)
985                 *hcr0 |= SVM_CR0_SELECTIVE_MASK;
986         else
987                 *hcr0 = (*hcr0 & ~SVM_CR0_SELECTIVE_MASK)
988                         | (gcr0 & SVM_CR0_SELECTIVE_MASK);
989
990
991         if (gcr0 == *hcr0 && svm->vcpu.fpu_active) {
992                 svm->vmcb->control.intercept_cr_read &= ~INTERCEPT_CR0_MASK;
993                 svm->vmcb->control.intercept_cr_write &= ~INTERCEPT_CR0_MASK;
994         } else {
995                 svm->vmcb->control.intercept_cr_read |= INTERCEPT_CR0_MASK;
996                 svm->vmcb->control.intercept_cr_write |= INTERCEPT_CR0_MASK;
997         }
998 }
999
1000 static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
1001 {
1002         struct vcpu_svm *svm = to_svm(vcpu);
1003
1004 #ifdef CONFIG_X86_64
1005         if (vcpu->arch.efer & EFER_LME) {
1006                 if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
1007                         vcpu->arch.efer |= EFER_LMA;
1008                         svm->vmcb->save.efer |= EFER_LMA | EFER_LME;
1009                 }
1010
1011                 if (is_paging(vcpu) && !(cr0 & X86_CR0_PG)) {
1012                         vcpu->arch.efer &= ~EFER_LMA;
1013                         svm->vmcb->save.efer &= ~(EFER_LMA | EFER_LME);
1014                 }
1015         }
1016 #endif
1017         vcpu->arch.cr0 = cr0;
1018
1019         if (!npt_enabled)
1020                 cr0 |= X86_CR0_PG | X86_CR0_WP;
1021
1022         if (!vcpu->fpu_active)
1023                 cr0 |= X86_CR0_TS;
1024         /*
1025          * re-enable caching here because the QEMU bios
1026          * does not do it - this results in some delay at
1027          * reboot
1028          */
1029         cr0 &= ~(X86_CR0_CD | X86_CR0_NW);
1030         svm->vmcb->save.cr0 = cr0;
1031         update_cr0_intercept(svm);
1032 }
1033
1034 static void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
1035 {
1036         unsigned long host_cr4_mce = read_cr4() & X86_CR4_MCE;
1037         unsigned long old_cr4 = to_svm(vcpu)->vmcb->save.cr4;
1038
1039         if (npt_enabled && ((old_cr4 ^ cr4) & X86_CR4_PGE))
1040                 force_new_asid(vcpu);
1041
1042         vcpu->arch.cr4 = cr4;
1043         if (!npt_enabled)
1044                 cr4 |= X86_CR4_PAE;
1045         cr4 |= host_cr4_mce;
1046         to_svm(vcpu)->vmcb->save.cr4 = cr4;
1047 }
1048
1049 static void svm_set_segment(struct kvm_vcpu *vcpu,
1050                             struct kvm_segment *var, int seg)
1051 {
1052         struct vcpu_svm *svm = to_svm(vcpu);
1053         struct vmcb_seg *s = svm_seg(vcpu, seg);
1054
1055         s->base = var->base;
1056         s->limit = var->limit;
1057         s->selector = var->selector;
1058         if (var->unusable)
1059                 s->attrib = 0;
1060         else {
1061                 s->attrib = (var->type & SVM_SELECTOR_TYPE_MASK);
1062                 s->attrib |= (var->s & 1) << SVM_SELECTOR_S_SHIFT;
1063                 s->attrib |= (var->dpl & 3) << SVM_SELECTOR_DPL_SHIFT;
1064                 s->attrib |= (var->present & 1) << SVM_SELECTOR_P_SHIFT;
1065                 s->attrib |= (var->avl & 1) << SVM_SELECTOR_AVL_SHIFT;
1066                 s->attrib |= (var->l & 1) << SVM_SELECTOR_L_SHIFT;
1067                 s->attrib |= (var->db & 1) << SVM_SELECTOR_DB_SHIFT;
1068                 s->attrib |= (var->g & 1) << SVM_SELECTOR_G_SHIFT;
1069         }
1070         if (seg == VCPU_SREG_CS)
1071                 svm->vmcb->save.cpl
1072                         = (svm->vmcb->save.cs.attrib
1073                            >> SVM_SELECTOR_DPL_SHIFT) & 3;
1074
1075 }
1076
1077 static void update_db_intercept(struct kvm_vcpu *vcpu)
1078 {
1079         struct vcpu_svm *svm = to_svm(vcpu);
1080
1081         svm->vmcb->control.intercept_exceptions &=
1082                 ~((1 << DB_VECTOR) | (1 << BP_VECTOR));
1083
1084         if (svm->nmi_singlestep)
1085                 svm->vmcb->control.intercept_exceptions |= (1 << DB_VECTOR);
1086
1087         if (vcpu->guest_debug & KVM_GUESTDBG_ENABLE) {
1088                 if (vcpu->guest_debug &
1089                     (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))
1090                         svm->vmcb->control.intercept_exceptions |=
1091                                 1 << DB_VECTOR;
1092                 if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
1093                         svm->vmcb->control.intercept_exceptions |=
1094                                 1 << BP_VECTOR;
1095         } else
1096                 vcpu->guest_debug = 0;
1097 }
1098
1099 static void svm_guest_debug(struct kvm_vcpu *vcpu, struct kvm_guest_debug *dbg)
1100 {
1101         struct vcpu_svm *svm = to_svm(vcpu);
1102
1103         if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
1104                 svm->vmcb->save.dr7 = dbg->arch.debugreg[7];
1105         else
1106                 svm->vmcb->save.dr7 = vcpu->arch.dr7;
1107
1108         update_db_intercept(vcpu);
1109 }
1110
1111 static void load_host_msrs(struct kvm_vcpu *vcpu)
1112 {
1113 #ifdef CONFIG_X86_64
1114         wrmsrl(MSR_GS_BASE, to_svm(vcpu)->host_gs_base);
1115 #endif
1116 }
1117
1118 static void save_host_msrs(struct kvm_vcpu *vcpu)
1119 {
1120 #ifdef CONFIG_X86_64
1121         rdmsrl(MSR_GS_BASE, to_svm(vcpu)->host_gs_base);
1122 #endif
1123 }
1124
1125 static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *sd)
1126 {
1127         if (sd->next_asid > sd->max_asid) {
1128                 ++sd->asid_generation;
1129                 sd->next_asid = 1;
1130                 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID;
1131         }
1132
1133         svm->asid_generation = sd->asid_generation;
1134         svm->vmcb->control.asid = sd->next_asid++;
1135 }
1136
1137 static int svm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *dest)
1138 {
1139         struct vcpu_svm *svm = to_svm(vcpu);
1140
1141         switch (dr) {
1142         case 0 ... 3:
1143                 *dest = vcpu->arch.db[dr];
1144                 break;
1145         case 4:
1146                 if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
1147                         return EMULATE_FAIL; /* will re-inject UD */
1148                 /* fall through */
1149         case 6:
1150                 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
1151                         *dest = vcpu->arch.dr6;
1152                 else
1153                         *dest = svm->vmcb->save.dr6;
1154                 break;
1155         case 5:
1156                 if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
1157                         return EMULATE_FAIL; /* will re-inject UD */
1158                 /* fall through */
1159         case 7:
1160                 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
1161                         *dest = vcpu->arch.dr7;
1162                 else
1163                         *dest = svm->vmcb->save.dr7;
1164                 break;
1165         }
1166
1167         return EMULATE_DONE;
1168 }
1169
1170 static int svm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long value)
1171 {
1172         struct vcpu_svm *svm = to_svm(vcpu);
1173
1174         switch (dr) {
1175         case 0 ... 3:
1176                 vcpu->arch.db[dr] = value;
1177                 if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP))
1178                         vcpu->arch.eff_db[dr] = value;
1179                 break;
1180         case 4:
1181                 if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
1182                         return EMULATE_FAIL; /* will re-inject UD */
1183                 /* fall through */
1184         case 6:
1185                 vcpu->arch.dr6 = (value & DR6_VOLATILE) | DR6_FIXED_1;
1186                 break;
1187         case 5:
1188                 if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
1189                         return EMULATE_FAIL; /* will re-inject UD */
1190                 /* fall through */
1191         case 7:
1192                 vcpu->arch.dr7 = (value & DR7_VOLATILE) | DR7_FIXED_1;
1193                 if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) {
1194                         svm->vmcb->save.dr7 = vcpu->arch.dr7;
1195                         vcpu->arch.switch_db_regs = (value & DR7_BP_EN_MASK);
1196                 }
1197                 break;
1198         }
1199
1200         return EMULATE_DONE;
1201 }
1202
1203 static int pf_interception(struct vcpu_svm *svm)
1204 {
1205         u64 fault_address;
1206         u32 error_code;
1207
1208         fault_address  = svm->vmcb->control.exit_info_2;
1209         error_code = svm->vmcb->control.exit_info_1;
1210
1211         trace_kvm_page_fault(fault_address, error_code);
1212         if (!npt_enabled && kvm_event_needs_reinjection(&svm->vcpu))
1213                 kvm_mmu_unprotect_page_virt(&svm->vcpu, fault_address);
1214         return kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code);
1215 }
1216
1217 static int db_interception(struct vcpu_svm *svm)
1218 {
1219         struct kvm_run *kvm_run = svm->vcpu.run;
1220
1221         if (!(svm->vcpu.guest_debug &
1222               (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) &&
1223                 !svm->nmi_singlestep) {
1224                 kvm_queue_exception(&svm->vcpu, DB_VECTOR);
1225                 return 1;
1226         }
1227
1228         if (svm->nmi_singlestep) {
1229                 svm->nmi_singlestep = false;
1230                 if (!(svm->vcpu.guest_debug & KVM_GUESTDBG_SINGLESTEP))
1231                         svm->vmcb->save.rflags &=
1232                                 ~(X86_EFLAGS_TF | X86_EFLAGS_RF);
1233                 update_db_intercept(&svm->vcpu);
1234         }
1235
1236         if (svm->vcpu.guest_debug &
1237             (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)){
1238                 kvm_run->exit_reason = KVM_EXIT_DEBUG;
1239                 kvm_run->debug.arch.pc =
1240                         svm->vmcb->save.cs.base + svm->vmcb->save.rip;
1241                 kvm_run->debug.arch.exception = DB_VECTOR;
1242                 return 0;
1243         }
1244
1245         return 1;
1246 }
1247
1248 static int bp_interception(struct vcpu_svm *svm)
1249 {
1250         struct kvm_run *kvm_run = svm->vcpu.run;
1251
1252         kvm_run->exit_reason = KVM_EXIT_DEBUG;
1253         kvm_run->debug.arch.pc = svm->vmcb->save.cs.base + svm->vmcb->save.rip;
1254         kvm_run->debug.arch.exception = BP_VECTOR;
1255         return 0;
1256 }
1257
1258 static int ud_interception(struct vcpu_svm *svm)
1259 {
1260         int er;
1261
1262         er = emulate_instruction(&svm->vcpu, 0, 0, EMULTYPE_TRAP_UD);
1263         if (er != EMULATE_DONE)
1264                 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
1265         return 1;
1266 }
1267
1268 static void svm_fpu_activate(struct kvm_vcpu *vcpu)
1269 {
1270         struct vcpu_svm *svm = to_svm(vcpu);
1271         svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR);
1272         svm->vcpu.fpu_active = 1;
1273         update_cr0_intercept(svm);
1274 }
1275
1276 static int nm_interception(struct vcpu_svm *svm)
1277 {
1278         svm_fpu_activate(&svm->vcpu);
1279         return 1;
1280 }
1281
1282 static int mc_interception(struct vcpu_svm *svm)
1283 {
1284         /*
1285          * On an #MC intercept the MCE handler is not called automatically in
1286          * the host. So do it by hand here.
1287          */
1288         asm volatile (
1289                 "int $0x12\n");
1290         /* not sure if we ever come back to this point */
1291
1292         return 1;
1293 }
1294
1295 static int shutdown_interception(struct vcpu_svm *svm)
1296 {
1297         struct kvm_run *kvm_run = svm->vcpu.run;
1298
1299         /*
1300          * VMCB is undefined after a SHUTDOWN intercept
1301          * so reinitialize it.
1302          */
1303         clear_page(svm->vmcb);
1304         init_vmcb(svm);
1305
1306         kvm_run->exit_reason = KVM_EXIT_SHUTDOWN;
1307         return 0;
1308 }
1309
1310 static int io_interception(struct vcpu_svm *svm)
1311 {
1312         u32 io_info = svm->vmcb->control.exit_info_1; /* address size bug? */
1313         int size, in, string;
1314         unsigned port;
1315
1316         ++svm->vcpu.stat.io_exits;
1317
1318         svm->next_rip = svm->vmcb->control.exit_info_2;
1319
1320         string = (io_info & SVM_IOIO_STR_MASK) != 0;
1321
1322         if (string) {
1323                 if (emulate_instruction(&svm->vcpu,
1324                                         0, 0, 0) == EMULATE_DO_MMIO)
1325                         return 0;
1326                 return 1;
1327         }
1328
1329         in = (io_info & SVM_IOIO_TYPE_MASK) != 0;
1330         port = io_info >> 16;
1331         size = (io_info & SVM_IOIO_SIZE_MASK) >> SVM_IOIO_SIZE_SHIFT;
1332
1333         skip_emulated_instruction(&svm->vcpu);
1334         return kvm_emulate_pio(&svm->vcpu, in, size, port);
1335 }
1336
1337 static int nmi_interception(struct vcpu_svm *svm)
1338 {
1339         return 1;
1340 }
1341
1342 static int intr_interception(struct vcpu_svm *svm)
1343 {
1344         ++svm->vcpu.stat.irq_exits;
1345         return 1;
1346 }
1347
1348 static int nop_on_interception(struct vcpu_svm *svm)
1349 {
1350         return 1;
1351 }
1352
1353 static int halt_interception(struct vcpu_svm *svm)
1354 {
1355         svm->next_rip = kvm_rip_read(&svm->vcpu) + 1;
1356         skip_emulated_instruction(&svm->vcpu);
1357         return kvm_emulate_halt(&svm->vcpu);
1358 }
1359
1360 static int vmmcall_interception(struct vcpu_svm *svm)
1361 {
1362         svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
1363         skip_emulated_instruction(&svm->vcpu);
1364         kvm_emulate_hypercall(&svm->vcpu);
1365         return 1;
1366 }
1367
1368 static int nested_svm_check_permissions(struct vcpu_svm *svm)
1369 {
1370         if (!(svm->vcpu.arch.efer & EFER_SVME)
1371             || !is_paging(&svm->vcpu)) {
1372                 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
1373                 return 1;
1374         }
1375
1376         if (svm->vmcb->save.cpl) {
1377                 kvm_inject_gp(&svm->vcpu, 0);
1378                 return 1;
1379         }
1380
1381        return 0;
1382 }
1383
1384 static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
1385                                       bool has_error_code, u32 error_code)
1386 {
1387         if (!is_nested(svm))
1388                 return 0;
1389
1390         svm->vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + nr;
1391         svm->vmcb->control.exit_code_hi = 0;
1392         svm->vmcb->control.exit_info_1 = error_code;
1393         svm->vmcb->control.exit_info_2 = svm->vcpu.arch.cr2;
1394
1395         return nested_svm_exit_handled(svm);
1396 }
1397
1398 static inline int nested_svm_intr(struct vcpu_svm *svm)
1399 {
1400         if (!is_nested(svm))
1401                 return 0;
1402
1403         if (!(svm->vcpu.arch.hflags & HF_VINTR_MASK))
1404                 return 0;
1405
1406         if (!(svm->vcpu.arch.hflags & HF_HIF_MASK))
1407                 return 0;
1408
1409         svm->vmcb->control.exit_code = SVM_EXIT_INTR;
1410
1411         if (svm->nested.intercept & 1ULL) {
1412                 /*
1413                  * The #vmexit can't be emulated here directly because this
1414                  * code path runs with irqs and preemtion disabled. A
1415                  * #vmexit emulation might sleep. Only signal request for
1416                  * the #vmexit here.
1417                  */
1418                 svm->nested.exit_required = true;
1419                 trace_kvm_nested_intr_vmexit(svm->vmcb->save.rip);
1420                 return 1;
1421         }
1422
1423         return 0;
1424 }
1425
1426 static void *nested_svm_map(struct vcpu_svm *svm, u64 gpa, struct page **_page)
1427 {
1428         struct page *page;
1429
1430         page = gfn_to_page(svm->vcpu.kvm, gpa >> PAGE_SHIFT);
1431         if (is_error_page(page))
1432                 goto error;
1433
1434         *_page = page;
1435
1436         return kmap(page);
1437
1438 error:
1439         kvm_release_page_clean(page);
1440         kvm_inject_gp(&svm->vcpu, 0);
1441
1442         return NULL;
1443 }
1444
1445 static void nested_svm_unmap(struct page *page)
1446 {
1447         kunmap(page);
1448         kvm_release_page_dirty(page);
1449 }
1450
1451 static bool nested_svm_exit_handled_msr(struct vcpu_svm *svm)
1452 {
1453         u32 param = svm->vmcb->control.exit_info_1 & 1;
1454         u32 msr = svm->vcpu.arch.regs[VCPU_REGS_RCX];
1455         struct page *page;
1456         bool ret = false;
1457         u32 t0, t1;
1458         u8 *msrpm;
1459
1460         if (!(svm->nested.intercept & (1ULL << INTERCEPT_MSR_PROT)))
1461                 return false;
1462
1463         msrpm = nested_svm_map(svm, svm->nested.vmcb_msrpm, &page);
1464
1465         if (!msrpm)
1466                 goto out;
1467
1468         switch (msr) {
1469         case 0 ... 0x1fff:
1470                 t0 = (msr * 2) % 8;
1471                 t1 = msr / 8;
1472                 break;
1473         case 0xc0000000 ... 0xc0001fff:
1474                 t0 = (8192 + msr - 0xc0000000) * 2;
1475                 t1 = (t0 / 8);
1476                 t0 %= 8;
1477                 break;
1478         case 0xc0010000 ... 0xc0011fff:
1479                 t0 = (16384 + msr - 0xc0010000) * 2;
1480                 t1 = (t0 / 8);
1481                 t0 %= 8;
1482                 break;
1483         default:
1484                 ret = true;
1485                 goto out;
1486         }
1487
1488         ret = msrpm[t1] & ((1 << param) << t0);
1489
1490 out:
1491         nested_svm_unmap(page);
1492
1493         return ret;
1494 }
1495
1496 static int nested_svm_exit_special(struct vcpu_svm *svm)
1497 {
1498         u32 exit_code = svm->vmcb->control.exit_code;
1499
1500         switch (exit_code) {
1501         case SVM_EXIT_INTR:
1502         case SVM_EXIT_NMI:
1503                 return NESTED_EXIT_HOST;
1504                 /* For now we are always handling NPFs when using them */
1505         case SVM_EXIT_NPF:
1506                 if (npt_enabled)
1507                         return NESTED_EXIT_HOST;
1508                 break;
1509         /* When we're shadowing, trap PFs */
1510         case SVM_EXIT_EXCP_BASE + PF_VECTOR:
1511                 if (!npt_enabled)
1512                         return NESTED_EXIT_HOST;
1513                 break;
1514         default:
1515                 break;
1516         }
1517
1518         return NESTED_EXIT_CONTINUE;
1519 }
1520
1521 /*
1522  * If this function returns true, this #vmexit was already handled
1523  */
1524 static int nested_svm_exit_handled(struct vcpu_svm *svm)
1525 {
1526         u32 exit_code = svm->vmcb->control.exit_code;
1527         int vmexit = NESTED_EXIT_HOST;
1528
1529         switch (exit_code) {
1530         case SVM_EXIT_MSR:
1531                 vmexit = nested_svm_exit_handled_msr(svm);
1532                 break;
1533         case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR8: {
1534                 u32 cr_bits = 1 << (exit_code - SVM_EXIT_READ_CR0);
1535                 if (svm->nested.intercept_cr_read & cr_bits)
1536                         vmexit = NESTED_EXIT_DONE;
1537                 break;
1538         }
1539         case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR8: {
1540                 u32 cr_bits = 1 << (exit_code - SVM_EXIT_WRITE_CR0);
1541                 if (svm->nested.intercept_cr_write & cr_bits)
1542                         vmexit = NESTED_EXIT_DONE;
1543                 break;
1544         }
1545         case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR7: {
1546                 u32 dr_bits = 1 << (exit_code - SVM_EXIT_READ_DR0);
1547                 if (svm->nested.intercept_dr_read & dr_bits)
1548                         vmexit = NESTED_EXIT_DONE;
1549                 break;
1550         }
1551         case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR7: {
1552                 u32 dr_bits = 1 << (exit_code - SVM_EXIT_WRITE_DR0);
1553                 if (svm->nested.intercept_dr_write & dr_bits)
1554                         vmexit = NESTED_EXIT_DONE;
1555                 break;
1556         }
1557         case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: {
1558                 u32 excp_bits = 1 << (exit_code - SVM_EXIT_EXCP_BASE);
1559                 if (svm->nested.intercept_exceptions & excp_bits)
1560                         vmexit = NESTED_EXIT_DONE;
1561                 break;
1562         }
1563         default: {
1564                 u64 exit_bits = 1ULL << (exit_code - SVM_EXIT_INTR);
1565                 if (svm->nested.intercept & exit_bits)
1566                         vmexit = NESTED_EXIT_DONE;
1567         }
1568         }
1569
1570         if (vmexit == NESTED_EXIT_DONE) {
1571                 nested_svm_vmexit(svm);
1572         }
1573
1574         return vmexit;
1575 }
1576
1577 static inline void copy_vmcb_control_area(struct vmcb *dst_vmcb, struct vmcb *from_vmcb)
1578 {
1579         struct vmcb_control_area *dst  = &dst_vmcb->control;
1580         struct vmcb_control_area *from = &from_vmcb->control;
1581
1582         dst->intercept_cr_read    = from->intercept_cr_read;
1583         dst->intercept_cr_write   = from->intercept_cr_write;
1584         dst->intercept_dr_read    = from->intercept_dr_read;
1585         dst->intercept_dr_write   = from->intercept_dr_write;
1586         dst->intercept_exceptions = from->intercept_exceptions;
1587         dst->intercept            = from->intercept;
1588         dst->iopm_base_pa         = from->iopm_base_pa;
1589         dst->msrpm_base_pa        = from->msrpm_base_pa;
1590         dst->tsc_offset           = from->tsc_offset;
1591         dst->asid                 = from->asid;
1592         dst->tlb_ctl              = from->tlb_ctl;
1593         dst->int_ctl              = from->int_ctl;
1594         dst->int_vector           = from->int_vector;
1595         dst->int_state            = from->int_state;
1596         dst->exit_code            = from->exit_code;
1597         dst->exit_code_hi         = from->exit_code_hi;
1598         dst->exit_info_1          = from->exit_info_1;
1599         dst->exit_info_2          = from->exit_info_2;
1600         dst->exit_int_info        = from->exit_int_info;
1601         dst->exit_int_info_err    = from->exit_int_info_err;
1602         dst->nested_ctl           = from->nested_ctl;
1603         dst->event_inj            = from->event_inj;
1604         dst->event_inj_err        = from->event_inj_err;
1605         dst->nested_cr3           = from->nested_cr3;
1606         dst->lbr_ctl              = from->lbr_ctl;
1607 }
1608
1609 static int nested_svm_vmexit(struct vcpu_svm *svm)
1610 {
1611         struct vmcb *nested_vmcb;
1612         struct vmcb *hsave = svm->nested.hsave;
1613         struct vmcb *vmcb = svm->vmcb;
1614         struct page *page;
1615
1616         trace_kvm_nested_vmexit_inject(vmcb->control.exit_code,
1617                                        vmcb->control.exit_info_1,
1618                                        vmcb->control.exit_info_2,
1619                                        vmcb->control.exit_int_info,
1620                                        vmcb->control.exit_int_info_err);
1621
1622         nested_vmcb = nested_svm_map(svm, svm->nested.vmcb, &page);
1623         if (!nested_vmcb)
1624                 return 1;
1625
1626         /* Give the current vmcb to the guest */
1627         disable_gif(svm);
1628
1629         nested_vmcb->save.es     = vmcb->save.es;
1630         nested_vmcb->save.cs     = vmcb->save.cs;
1631         nested_vmcb->save.ss     = vmcb->save.ss;
1632         nested_vmcb->save.ds     = vmcb->save.ds;
1633         nested_vmcb->save.gdtr   = vmcb->save.gdtr;
1634         nested_vmcb->save.idtr   = vmcb->save.idtr;
1635         if (npt_enabled)
1636                 nested_vmcb->save.cr3    = vmcb->save.cr3;
1637         nested_vmcb->save.cr2    = vmcb->save.cr2;
1638         nested_vmcb->save.rflags = vmcb->save.rflags;
1639         nested_vmcb->save.rip    = vmcb->save.rip;
1640         nested_vmcb->save.rsp    = vmcb->save.rsp;
1641         nested_vmcb->save.rax    = vmcb->save.rax;
1642         nested_vmcb->save.dr7    = vmcb->save.dr7;
1643         nested_vmcb->save.dr6    = vmcb->save.dr6;
1644         nested_vmcb->save.cpl    = vmcb->save.cpl;
1645
1646         nested_vmcb->control.int_ctl           = vmcb->control.int_ctl;
1647         nested_vmcb->control.int_vector        = vmcb->control.int_vector;
1648         nested_vmcb->control.int_state         = vmcb->control.int_state;
1649         nested_vmcb->control.exit_code         = vmcb->control.exit_code;
1650         nested_vmcb->control.exit_code_hi      = vmcb->control.exit_code_hi;
1651         nested_vmcb->control.exit_info_1       = vmcb->control.exit_info_1;
1652         nested_vmcb->control.exit_info_2       = vmcb->control.exit_info_2;
1653         nested_vmcb->control.exit_int_info     = vmcb->control.exit_int_info;
1654         nested_vmcb->control.exit_int_info_err = vmcb->control.exit_int_info_err;
1655
1656         /*
1657          * If we emulate a VMRUN/#VMEXIT in the same host #vmexit cycle we have
1658          * to make sure that we do not lose injected events. So check event_inj
1659          * here and copy it to exit_int_info if it is valid.
1660          * Exit_int_info and event_inj can't be both valid because the case
1661          * below only happens on a VMRUN instruction intercept which has
1662          * no valid exit_int_info set.
1663          */
1664         if (vmcb->control.event_inj & SVM_EVTINJ_VALID) {
1665                 struct vmcb_control_area *nc = &nested_vmcb->control;
1666
1667                 nc->exit_int_info     = vmcb->control.event_inj;
1668                 nc->exit_int_info_err = vmcb->control.event_inj_err;
1669         }
1670
1671         nested_vmcb->control.tlb_ctl           = 0;
1672         nested_vmcb->control.event_inj         = 0;
1673         nested_vmcb->control.event_inj_err     = 0;
1674
1675         /* We always set V_INTR_MASKING and remember the old value in hflags */
1676         if (!(svm->vcpu.arch.hflags & HF_VINTR_MASK))
1677                 nested_vmcb->control.int_ctl &= ~V_INTR_MASKING_MASK;
1678
1679         /* Restore the original control entries */
1680         copy_vmcb_control_area(vmcb, hsave);
1681
1682         kvm_clear_exception_queue(&svm->vcpu);
1683         kvm_clear_interrupt_queue(&svm->vcpu);
1684
1685         /* Restore selected save entries */
1686         svm->vmcb->save.es = hsave->save.es;
1687         svm->vmcb->save.cs = hsave->save.cs;
1688         svm->vmcb->save.ss = hsave->save.ss;
1689         svm->vmcb->save.ds = hsave->save.ds;
1690         svm->vmcb->save.gdtr = hsave->save.gdtr;
1691         svm->vmcb->save.idtr = hsave->save.idtr;
1692         svm->vmcb->save.rflags = hsave->save.rflags;
1693         svm_set_efer(&svm->vcpu, hsave->save.efer);
1694         svm_set_cr0(&svm->vcpu, hsave->save.cr0 | X86_CR0_PE);
1695         svm_set_cr4(&svm->vcpu, hsave->save.cr4);
1696         if (npt_enabled) {
1697                 svm->vmcb->save.cr3 = hsave->save.cr3;
1698                 svm->vcpu.arch.cr3 = hsave->save.cr3;
1699         } else {
1700                 kvm_set_cr3(&svm->vcpu, hsave->save.cr3);
1701         }
1702         kvm_register_write(&svm->vcpu, VCPU_REGS_RAX, hsave->save.rax);
1703         kvm_register_write(&svm->vcpu, VCPU_REGS_RSP, hsave->save.rsp);
1704         kvm_register_write(&svm->vcpu, VCPU_REGS_RIP, hsave->save.rip);
1705         svm->vmcb->save.dr7 = 0;
1706         svm->vmcb->save.cpl = 0;
1707         svm->vmcb->control.exit_int_info = 0;
1708
1709         /* Exit nested SVM mode */
1710         svm->nested.vmcb = 0;
1711
1712         nested_svm_unmap(page);
1713
1714         kvm_mmu_reset_context(&svm->vcpu);
1715         kvm_mmu_load(&svm->vcpu);
1716
1717         return 0;
1718 }
1719
1720 static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm)
1721 {
1722         u32 *nested_msrpm;
1723         struct page *page;
1724         int i;
1725
1726         nested_msrpm = nested_svm_map(svm, svm->nested.vmcb_msrpm, &page);
1727         if (!nested_msrpm)
1728                 return false;
1729
1730         for (i=0; i< PAGE_SIZE * (1 << MSRPM_ALLOC_ORDER) / 4; i++)
1731                 svm->nested.msrpm[i] = svm->msrpm[i] | nested_msrpm[i];
1732
1733         svm->vmcb->control.msrpm_base_pa = __pa(svm->nested.msrpm);
1734
1735         nested_svm_unmap(page);
1736
1737         return true;
1738 }
1739
1740 static bool nested_svm_vmrun(struct vcpu_svm *svm)
1741 {
1742         struct vmcb *nested_vmcb;
1743         struct vmcb *hsave = svm->nested.hsave;
1744         struct vmcb *vmcb = svm->vmcb;
1745         struct page *page;
1746
1747         nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page);
1748         if (!nested_vmcb)
1749                 return false;
1750
1751         /* nested_vmcb is our indicator if nested SVM is activated */
1752         svm->nested.vmcb = svm->vmcb->save.rax;
1753
1754         trace_kvm_nested_vmrun(svm->vmcb->save.rip - 3, svm->nested.vmcb,
1755                                nested_vmcb->save.rip,
1756                                nested_vmcb->control.int_ctl,
1757                                nested_vmcb->control.event_inj,
1758                                nested_vmcb->control.nested_ctl);
1759
1760         /* Clear internal status */
1761         kvm_clear_exception_queue(&svm->vcpu);
1762         kvm_clear_interrupt_queue(&svm->vcpu);
1763
1764         /* Save the old vmcb, so we don't need to pick what we save, but
1765            can restore everything when a VMEXIT occurs */
1766         hsave->save.es     = vmcb->save.es;
1767         hsave->save.cs     = vmcb->save.cs;
1768         hsave->save.ss     = vmcb->save.ss;
1769         hsave->save.ds     = vmcb->save.ds;
1770         hsave->save.gdtr   = vmcb->save.gdtr;
1771         hsave->save.idtr   = vmcb->save.idtr;
1772         hsave->save.efer   = svm->vcpu.arch.efer;
1773         hsave->save.cr0    = kvm_read_cr0(&svm->vcpu);
1774         hsave->save.cr4    = svm->vcpu.arch.cr4;
1775         hsave->save.rflags = vmcb->save.rflags;
1776         hsave->save.rip    = svm->next_rip;
1777         hsave->save.rsp    = vmcb->save.rsp;
1778         hsave->save.rax    = vmcb->save.rax;
1779         if (npt_enabled)
1780                 hsave->save.cr3    = vmcb->save.cr3;
1781         else
1782                 hsave->save.cr3    = svm->vcpu.arch.cr3;
1783
1784         copy_vmcb_control_area(hsave, vmcb);
1785
1786         if (svm->vmcb->save.rflags & X86_EFLAGS_IF)
1787                 svm->vcpu.arch.hflags |= HF_HIF_MASK;
1788         else
1789                 svm->vcpu.arch.hflags &= ~HF_HIF_MASK;
1790
1791         /* Load the nested guest state */
1792         svm->vmcb->save.es = nested_vmcb->save.es;
1793         svm->vmcb->save.cs = nested_vmcb->save.cs;
1794         svm->vmcb->save.ss = nested_vmcb->save.ss;
1795         svm->vmcb->save.ds = nested_vmcb->save.ds;
1796         svm->vmcb->save.gdtr = nested_vmcb->save.gdtr;
1797         svm->vmcb->save.idtr = nested_vmcb->save.idtr;
1798         svm->vmcb->save.rflags = nested_vmcb->save.rflags;
1799         svm_set_efer(&svm->vcpu, nested_vmcb->save.efer);
1800         svm_set_cr0(&svm->vcpu, nested_vmcb->save.cr0);
1801         svm_set_cr4(&svm->vcpu, nested_vmcb->save.cr4);
1802         if (npt_enabled) {
1803                 svm->vmcb->save.cr3 = nested_vmcb->save.cr3;
1804                 svm->vcpu.arch.cr3 = nested_vmcb->save.cr3;
1805         } else {
1806                 kvm_set_cr3(&svm->vcpu, nested_vmcb->save.cr3);
1807                 kvm_mmu_reset_context(&svm->vcpu);
1808         }
1809         svm->vmcb->save.cr2 = svm->vcpu.arch.cr2 = nested_vmcb->save.cr2;
1810         kvm_register_write(&svm->vcpu, VCPU_REGS_RAX, nested_vmcb->save.rax);
1811         kvm_register_write(&svm->vcpu, VCPU_REGS_RSP, nested_vmcb->save.rsp);
1812         kvm_register_write(&svm->vcpu, VCPU_REGS_RIP, nested_vmcb->save.rip);
1813         /* In case we don't even reach vcpu_run, the fields are not updated */
1814         svm->vmcb->save.rax = nested_vmcb->save.rax;
1815         svm->vmcb->save.rsp = nested_vmcb->save.rsp;
1816         svm->vmcb->save.rip = nested_vmcb->save.rip;
1817         svm->vmcb->save.dr7 = nested_vmcb->save.dr7;
1818         svm->vmcb->save.dr6 = nested_vmcb->save.dr6;
1819         svm->vmcb->save.cpl = nested_vmcb->save.cpl;
1820
1821         /* We don't want a nested guest to be more powerful than the guest,
1822            so all intercepts are ORed */
1823         svm->vmcb->control.intercept_cr_read |=
1824                 nested_vmcb->control.intercept_cr_read;
1825         svm->vmcb->control.intercept_cr_write |=
1826                 nested_vmcb->control.intercept_cr_write;
1827         svm->vmcb->control.intercept_dr_read |=
1828                 nested_vmcb->control.intercept_dr_read;
1829         svm->vmcb->control.intercept_dr_write |=
1830                 nested_vmcb->control.intercept_dr_write;
1831         svm->vmcb->control.intercept_exceptions |=
1832                 nested_vmcb->control.intercept_exceptions;
1833
1834         svm->vmcb->control.intercept |= nested_vmcb->control.intercept;
1835
1836         svm->nested.vmcb_msrpm = nested_vmcb->control.msrpm_base_pa;
1837
1838         /* cache intercepts */
1839         svm->nested.intercept_cr_read    = nested_vmcb->control.intercept_cr_read;
1840         svm->nested.intercept_cr_write   = nested_vmcb->control.intercept_cr_write;
1841         svm->nested.intercept_dr_read    = nested_vmcb->control.intercept_dr_read;
1842         svm->nested.intercept_dr_write   = nested_vmcb->control.intercept_dr_write;
1843         svm->nested.intercept_exceptions = nested_vmcb->control.intercept_exceptions;
1844         svm->nested.intercept            = nested_vmcb->control.intercept;
1845
1846         force_new_asid(&svm->vcpu);
1847         svm->vmcb->control.int_ctl = nested_vmcb->control.int_ctl | V_INTR_MASKING_MASK;
1848         if (nested_vmcb->control.int_ctl & V_INTR_MASKING_MASK)
1849                 svm->vcpu.arch.hflags |= HF_VINTR_MASK;
1850         else
1851                 svm->vcpu.arch.hflags &= ~HF_VINTR_MASK;
1852
1853         svm->vmcb->control.int_vector = nested_vmcb->control.int_vector;
1854         svm->vmcb->control.int_state = nested_vmcb->control.int_state;
1855         svm->vmcb->control.tsc_offset += nested_vmcb->control.tsc_offset;
1856         svm->vmcb->control.event_inj = nested_vmcb->control.event_inj;
1857         svm->vmcb->control.event_inj_err = nested_vmcb->control.event_inj_err;
1858
1859         nested_svm_unmap(page);
1860
1861         enable_gif(svm);
1862
1863         return true;
1864 }
1865
1866 static void nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb)
1867 {
1868         to_vmcb->save.fs = from_vmcb->save.fs;
1869         to_vmcb->save.gs = from_vmcb->save.gs;
1870         to_vmcb->save.tr = from_vmcb->save.tr;
1871         to_vmcb->save.ldtr = from_vmcb->save.ldtr;
1872         to_vmcb->save.kernel_gs_base = from_vmcb->save.kernel_gs_base;
1873         to_vmcb->save.star = from_vmcb->save.star;
1874         to_vmcb->save.lstar = from_vmcb->save.lstar;
1875         to_vmcb->save.cstar = from_vmcb->save.cstar;
1876         to_vmcb->save.sfmask = from_vmcb->save.sfmask;
1877         to_vmcb->save.sysenter_cs = from_vmcb->save.sysenter_cs;
1878         to_vmcb->save.sysenter_esp = from_vmcb->save.sysenter_esp;
1879         to_vmcb->save.sysenter_eip = from_vmcb->save.sysenter_eip;
1880 }
1881
1882 static int vmload_interception(struct vcpu_svm *svm)
1883 {
1884         struct vmcb *nested_vmcb;
1885         struct page *page;
1886
1887         if (nested_svm_check_permissions(svm))
1888                 return 1;
1889
1890         svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
1891         skip_emulated_instruction(&svm->vcpu);
1892
1893         nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page);
1894         if (!nested_vmcb)
1895                 return 1;
1896
1897         nested_svm_vmloadsave(nested_vmcb, svm->vmcb);
1898         nested_svm_unmap(page);
1899
1900         return 1;
1901 }
1902
1903 static int vmsave_interception(struct vcpu_svm *svm)
1904 {
1905         struct vmcb *nested_vmcb;
1906         struct page *page;
1907
1908         if (nested_svm_check_permissions(svm))
1909                 return 1;
1910
1911         svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
1912         skip_emulated_instruction(&svm->vcpu);
1913
1914         nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page);
1915         if (!nested_vmcb)
1916                 return 1;
1917
1918         nested_svm_vmloadsave(svm->vmcb, nested_vmcb);
1919         nested_svm_unmap(page);
1920
1921         return 1;
1922 }
1923
1924 static int vmrun_interception(struct vcpu_svm *svm)
1925 {
1926         if (nested_svm_check_permissions(svm))
1927                 return 1;
1928
1929         svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
1930         skip_emulated_instruction(&svm->vcpu);
1931
1932         if (!nested_svm_vmrun(svm))
1933                 return 1;
1934
1935         if (!nested_svm_vmrun_msrpm(svm))
1936                 goto failed;
1937
1938         return 1;
1939
1940 failed:
1941
1942         svm->vmcb->control.exit_code    = SVM_EXIT_ERR;
1943         svm->vmcb->control.exit_code_hi = 0;
1944         svm->vmcb->control.exit_info_1  = 0;
1945         svm->vmcb->control.exit_info_2  = 0;
1946
1947         nested_svm_vmexit(svm);
1948
1949         return 1;
1950 }
1951
1952 static int stgi_interception(struct vcpu_svm *svm)
1953 {
1954         if (nested_svm_check_permissions(svm))
1955                 return 1;
1956
1957         svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
1958         skip_emulated_instruction(&svm->vcpu);
1959
1960         enable_gif(svm);
1961
1962         return 1;
1963 }
1964
1965 static int clgi_interception(struct vcpu_svm *svm)
1966 {
1967         if (nested_svm_check_permissions(svm))
1968                 return 1;
1969
1970         svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
1971         skip_emulated_instruction(&svm->vcpu);
1972
1973         disable_gif(svm);
1974
1975         /* After a CLGI no interrupts should come */
1976         svm_clear_vintr(svm);
1977         svm->vmcb->control.int_ctl &= ~V_IRQ_MASK;
1978
1979         return 1;
1980 }
1981
1982 static int invlpga_interception(struct vcpu_svm *svm)
1983 {
1984         struct kvm_vcpu *vcpu = &svm->vcpu;
1985
1986         trace_kvm_invlpga(svm->vmcb->save.rip, vcpu->arch.regs[VCPU_REGS_RCX],
1987                           vcpu->arch.regs[VCPU_REGS_RAX]);
1988
1989         /* Let's treat INVLPGA the same as INVLPG (can be optimized!) */
1990         kvm_mmu_invlpg(vcpu, vcpu->arch.regs[VCPU_REGS_RAX]);
1991
1992         svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
1993         skip_emulated_instruction(&svm->vcpu);
1994         return 1;
1995 }
1996
1997 static int skinit_interception(struct vcpu_svm *svm)
1998 {
1999         trace_kvm_skinit(svm->vmcb->save.rip, svm->vcpu.arch.regs[VCPU_REGS_RAX]);
2000
2001         kvm_queue_exception(&svm->vcpu, UD_VECTOR);
2002         return 1;
2003 }
2004
2005 static int invalid_op_interception(struct vcpu_svm *svm)
2006 {
2007         kvm_queue_exception(&svm->vcpu, UD_VECTOR);
2008         return 1;
2009 }
2010
2011 static int task_switch_interception(struct vcpu_svm *svm)
2012 {
2013         u16 tss_selector;
2014         int reason;
2015         int int_type = svm->vmcb->control.exit_int_info &
2016                 SVM_EXITINTINFO_TYPE_MASK;
2017         int int_vec = svm->vmcb->control.exit_int_info & SVM_EVTINJ_VEC_MASK;
2018         uint32_t type =
2019                 svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_TYPE_MASK;
2020         uint32_t idt_v =
2021                 svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_VALID;
2022
2023         tss_selector = (u16)svm->vmcb->control.exit_info_1;
2024
2025         if (svm->vmcb->control.exit_info_2 &
2026             (1ULL << SVM_EXITINFOSHIFT_TS_REASON_IRET))
2027                 reason = TASK_SWITCH_IRET;
2028         else if (svm->vmcb->control.exit_info_2 &
2029                  (1ULL << SVM_EXITINFOSHIFT_TS_REASON_JMP))
2030                 reason = TASK_SWITCH_JMP;
2031         else if (idt_v)
2032                 reason = TASK_SWITCH_GATE;
2033         else
2034                 reason = TASK_SWITCH_CALL;
2035
2036         if (reason == TASK_SWITCH_GATE) {
2037                 switch (type) {
2038                 case SVM_EXITINTINFO_TYPE_NMI:
2039                         svm->vcpu.arch.nmi_injected = false;
2040                         break;
2041                 case SVM_EXITINTINFO_TYPE_EXEPT:
2042                         kvm_clear_exception_queue(&svm->vcpu);
2043                         break;
2044                 case SVM_EXITINTINFO_TYPE_INTR:
2045                         kvm_clear_interrupt_queue(&svm->vcpu);
2046                         break;
2047                 default:
2048                         break;
2049                 }
2050         }
2051
2052         if (reason != TASK_SWITCH_GATE ||
2053             int_type == SVM_EXITINTINFO_TYPE_SOFT ||
2054             (int_type == SVM_EXITINTINFO_TYPE_EXEPT &&
2055              (int_vec == OF_VECTOR || int_vec == BP_VECTOR)))
2056                 skip_emulated_instruction(&svm->vcpu);
2057
2058         return kvm_task_switch(&svm->vcpu, tss_selector, reason);
2059 }
2060
2061 static int cpuid_interception(struct vcpu_svm *svm)
2062 {
2063         svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
2064         kvm_emulate_cpuid(&svm->vcpu);
2065         return 1;
2066 }
2067
2068 static int iret_interception(struct vcpu_svm *svm)
2069 {
2070         ++svm->vcpu.stat.nmi_window_exits;
2071         svm->vmcb->control.intercept &= ~(1UL << INTERCEPT_IRET);
2072         svm->vcpu.arch.hflags |= HF_IRET_MASK;
2073         return 1;
2074 }
2075
2076 static int invlpg_interception(struct vcpu_svm *svm)
2077 {
2078         if (emulate_instruction(&svm->vcpu, 0, 0, 0) != EMULATE_DONE)
2079                 pr_unimpl(&svm->vcpu, "%s: failed\n", __func__);
2080         return 1;
2081 }
2082
2083 static int emulate_on_interception(struct vcpu_svm *svm)
2084 {
2085         if (emulate_instruction(&svm->vcpu, 0, 0, 0) != EMULATE_DONE)
2086                 pr_unimpl(&svm->vcpu, "%s: failed\n", __func__);
2087         return 1;
2088 }
2089
2090 static int cr8_write_interception(struct vcpu_svm *svm)
2091 {
2092         struct kvm_run *kvm_run = svm->vcpu.run;
2093
2094         u8 cr8_prev = kvm_get_cr8(&svm->vcpu);
2095         /* instruction emulation calls kvm_set_cr8() */
2096         emulate_instruction(&svm->vcpu, 0, 0, 0);
2097         if (irqchip_in_kernel(svm->vcpu.kvm)) {
2098                 svm->vmcb->control.intercept_cr_write &= ~INTERCEPT_CR8_MASK;
2099                 return 1;
2100         }
2101         if (cr8_prev <= kvm_get_cr8(&svm->vcpu))
2102                 return 1;
2103         kvm_run->exit_reason = KVM_EXIT_SET_TPR;
2104         return 0;
2105 }
2106
2107 static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data)
2108 {
2109         struct vcpu_svm *svm = to_svm(vcpu);
2110
2111         switch (ecx) {
2112         case MSR_IA32_TSC: {
2113                 u64 tsc_offset;
2114
2115                 if (is_nested(svm))
2116                         tsc_offset = svm->nested.hsave->control.tsc_offset;
2117                 else
2118                         tsc_offset = svm->vmcb->control.tsc_offset;
2119
2120                 *data = tsc_offset + native_read_tsc();
2121                 break;
2122         }
2123         case MSR_K6_STAR:
2124                 *data = svm->vmcb->save.star;
2125                 break;
2126 #ifdef CONFIG_X86_64
2127         case MSR_LSTAR:
2128                 *data = svm->vmcb->save.lstar;
2129                 break;
2130         case MSR_CSTAR:
2131                 *data = svm->vmcb->save.cstar;
2132                 break;
2133         case MSR_KERNEL_GS_BASE:
2134                 *data = svm->vmcb->save.kernel_gs_base;
2135                 break;
2136         case MSR_SYSCALL_MASK:
2137                 *data = svm->vmcb->save.sfmask;
2138                 break;
2139 #endif
2140         case MSR_IA32_SYSENTER_CS:
2141                 *data = svm->vmcb->save.sysenter_cs;
2142                 break;
2143         case MSR_IA32_SYSENTER_EIP:
2144                 *data = svm->sysenter_eip;
2145                 break;
2146         case MSR_IA32_SYSENTER_ESP:
2147                 *data = svm->sysenter_esp;
2148                 break;
2149         /* Nobody will change the following 5 values in the VMCB so
2150            we can safely return them on rdmsr. They will always be 0
2151            until LBRV is implemented. */
2152         case MSR_IA32_DEBUGCTLMSR:
2153                 *data = svm->vmcb->save.dbgctl;
2154                 break;
2155         case MSR_IA32_LASTBRANCHFROMIP:
2156                 *data = svm->vmcb->save.br_from;
2157                 break;
2158         case MSR_IA32_LASTBRANCHTOIP:
2159                 *data = svm->vmcb->save.br_to;
2160                 break;
2161         case MSR_IA32_LASTINTFROMIP:
2162                 *data = svm->vmcb->save.last_excp_from;
2163                 break;
2164         case MSR_IA32_LASTINTTOIP:
2165                 *data = svm->vmcb->save.last_excp_to;
2166                 break;
2167         case MSR_VM_HSAVE_PA:
2168                 *data = svm->nested.hsave_msr;
2169                 break;
2170         case MSR_VM_CR:
2171                 *data = 0;
2172                 break;
2173         case MSR_IA32_UCODE_REV:
2174                 *data = 0x01000065;
2175                 break;
2176         default:
2177                 return kvm_get_msr_common(vcpu, ecx, data);
2178         }
2179         return 0;
2180 }
2181
2182 static int rdmsr_interception(struct vcpu_svm *svm)
2183 {
2184         u32 ecx = svm->vcpu.arch.regs[VCPU_REGS_RCX];
2185         u64 data;
2186
2187         if (svm_get_msr(&svm->vcpu, ecx, &data)) {
2188                 trace_kvm_msr_read_ex(ecx);
2189                 kvm_inject_gp(&svm->vcpu, 0);
2190         } else {
2191                 trace_kvm_msr_read(ecx, data);
2192
2193                 svm->vcpu.arch.regs[VCPU_REGS_RAX] = data & 0xffffffff;
2194                 svm->vcpu.arch.regs[VCPU_REGS_RDX] = data >> 32;
2195                 svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
2196                 skip_emulated_instruction(&svm->vcpu);
2197         }
2198         return 1;
2199 }
2200
2201 static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data)
2202 {
2203         struct vcpu_svm *svm = to_svm(vcpu);
2204
2205         switch (ecx) {
2206         case MSR_IA32_TSC: {
2207                 u64 tsc_offset = data - native_read_tsc();
2208                 u64 g_tsc_offset = 0;
2209
2210                 if (is_nested(svm)) {
2211                         g_tsc_offset = svm->vmcb->control.tsc_offset -
2212                                        svm->nested.hsave->control.tsc_offset;
2213                         svm->nested.hsave->control.tsc_offset = tsc_offset;
2214                 }
2215
2216                 svm->vmcb->control.tsc_offset = tsc_offset + g_tsc_offset;
2217
2218                 break;
2219         }
2220         case MSR_K6_STAR:
2221                 svm->vmcb->save.star = data;
2222                 break;
2223 #ifdef CONFIG_X86_64
2224         case MSR_LSTAR:
2225                 svm->vmcb->save.lstar = data;
2226                 break;
2227         case MSR_CSTAR:
2228                 svm->vmcb->save.cstar = data;
2229                 break;
2230         case MSR_KERNEL_GS_BASE:
2231                 svm->vmcb->save.kernel_gs_base = data;
2232                 break;
2233         case MSR_SYSCALL_MASK:
2234                 svm->vmcb->save.sfmask = data;
2235                 break;
2236 #endif
2237         case MSR_IA32_SYSENTER_CS:
2238                 svm->vmcb->save.sysenter_cs = data;
2239                 break;
2240         case MSR_IA32_SYSENTER_EIP:
2241                 svm->sysenter_eip = data;
2242                 svm->vmcb->save.sysenter_eip = data;
2243                 break;
2244         case MSR_IA32_SYSENTER_ESP:
2245                 svm->sysenter_esp = data;
2246                 svm->vmcb->save.sysenter_esp = data;
2247                 break;
2248         case MSR_IA32_DEBUGCTLMSR:
2249                 if (!svm_has(SVM_FEATURE_LBRV)) {
2250                         pr_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTL 0x%llx, nop\n",
2251                                         __func__, data);
2252                         break;
2253                 }
2254                 if (data & DEBUGCTL_RESERVED_BITS)
2255                         return 1;
2256
2257                 svm->vmcb->save.dbgctl = data;
2258                 if (data & (1ULL<<0))
2259                         svm_enable_lbrv(svm);
2260                 else
2261                         svm_disable_lbrv(svm);
2262                 break;
2263         case MSR_VM_HSAVE_PA:
2264                 svm->nested.hsave_msr = data;
2265                 break;
2266         case MSR_VM_CR:
2267         case MSR_VM_IGNNE:
2268                 pr_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data);
2269                 break;
2270         default:
2271                 return kvm_set_msr_common(vcpu, ecx, data);
2272         }
2273         return 0;
2274 }
2275
2276 static int wrmsr_interception(struct vcpu_svm *svm)
2277 {
2278         u32 ecx = svm->vcpu.arch.regs[VCPU_REGS_RCX];
2279         u64 data = (svm->vcpu.arch.regs[VCPU_REGS_RAX] & -1u)
2280                 | ((u64)(svm->vcpu.arch.regs[VCPU_REGS_RDX] & -1u) << 32);
2281
2282
2283         svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
2284         if (svm_set_msr(&svm->vcpu, ecx, data)) {
2285                 trace_kvm_msr_write_ex(ecx, data);
2286                 kvm_inject_gp(&svm->vcpu, 0);
2287         } else {
2288                 trace_kvm_msr_write(ecx, data);
2289                 skip_emulated_instruction(&svm->vcpu);
2290         }
2291         return 1;
2292 }
2293
2294 static int msr_interception(struct vcpu_svm *svm)
2295 {
2296         if (svm->vmcb->control.exit_info_1)
2297                 return wrmsr_interception(svm);
2298         else
2299                 return rdmsr_interception(svm);
2300 }
2301
2302 static int interrupt_window_interception(struct vcpu_svm *svm)
2303 {
2304         struct kvm_run *kvm_run = svm->vcpu.run;
2305
2306         svm_clear_vintr(svm);
2307         svm->vmcb->control.int_ctl &= ~V_IRQ_MASK;
2308         /*
2309          * If the user space waits to inject interrupts, exit as soon as
2310          * possible
2311          */
2312         if (!irqchip_in_kernel(svm->vcpu.kvm) &&
2313             kvm_run->request_interrupt_window &&
2314             !kvm_cpu_has_interrupt(&svm->vcpu)) {
2315                 ++svm->vcpu.stat.irq_window_exits;
2316                 kvm_run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
2317                 return 0;
2318         }
2319
2320         return 1;
2321 }
2322
2323 static int pause_interception(struct vcpu_svm *svm)
2324 {
2325         kvm_vcpu_on_spin(&(svm->vcpu));
2326         return 1;
2327 }
2328
2329 static int (*svm_exit_handlers[])(struct vcpu_svm *svm) = {
2330         [SVM_EXIT_READ_CR0]                     = emulate_on_interception,
2331         [SVM_EXIT_READ_CR3]                     = emulate_on_interception,
2332         [SVM_EXIT_READ_CR4]                     = emulate_on_interception,
2333         [SVM_EXIT_READ_CR8]                     = emulate_on_interception,
2334         [SVM_EXIT_CR0_SEL_WRITE]                = emulate_on_interception,
2335         [SVM_EXIT_WRITE_CR0]                    = emulate_on_interception,
2336         [SVM_EXIT_WRITE_CR3]                    = emulate_on_interception,
2337         [SVM_EXIT_WRITE_CR4]                    = emulate_on_interception,
2338         [SVM_EXIT_WRITE_CR8]                    = cr8_write_interception,
2339         [SVM_EXIT_READ_DR0]                     = emulate_on_interception,
2340         [SVM_EXIT_READ_DR1]                     = emulate_on_interception,
2341         [SVM_EXIT_READ_DR2]                     = emulate_on_interception,
2342         [SVM_EXIT_READ_DR3]                     = emulate_on_interception,
2343         [SVM_EXIT_READ_DR4]                     = emulate_on_interception,
2344         [SVM_EXIT_READ_DR5]                     = emulate_on_interception,
2345         [SVM_EXIT_READ_DR6]                     = emulate_on_interception,
2346         [SVM_EXIT_READ_DR7]                     = emulate_on_interception,
2347         [SVM_EXIT_WRITE_DR0]                    = emulate_on_interception,
2348         [SVM_EXIT_WRITE_DR1]                    = emulate_on_interception,
2349         [SVM_EXIT_WRITE_DR2]                    = emulate_on_interception,
2350         [SVM_EXIT_WRITE_DR3]                    = emulate_on_interception,
2351         [SVM_EXIT_WRITE_DR4]                    = emulate_on_interception,
2352         [SVM_EXIT_WRITE_DR5]                    = emulate_on_interception,
2353         [SVM_EXIT_WRITE_DR6]                    = emulate_on_interception,
2354         [SVM_EXIT_WRITE_DR7]                    = emulate_on_interception,
2355         [SVM_EXIT_EXCP_BASE + DB_VECTOR]        = db_interception,
2356         [SVM_EXIT_EXCP_BASE + BP_VECTOR]        = bp_interception,
2357         [SVM_EXIT_EXCP_BASE + UD_VECTOR]        = ud_interception,
2358         [SVM_EXIT_EXCP_BASE + PF_VECTOR]        = pf_interception,
2359         [SVM_EXIT_EXCP_BASE + NM_VECTOR]        = nm_interception,
2360         [SVM_EXIT_EXCP_BASE + MC_VECTOR]        = mc_interception,
2361         [SVM_EXIT_INTR]                         = intr_interception,
2362         [SVM_EXIT_NMI]                          = nmi_interception,
2363         [SVM_EXIT_SMI]                          = nop_on_interception,
2364         [SVM_EXIT_INIT]                         = nop_on_interception,
2365         [SVM_EXIT_VINTR]                        = interrupt_window_interception,
2366         /* [SVM_EXIT_CR0_SEL_WRITE]             = emulate_on_interception, */
2367         [SVM_EXIT_CPUID]                        = cpuid_interception,
2368         [SVM_EXIT_IRET]                         = iret_interception,
2369         [SVM_EXIT_INVD]                         = emulate_on_interception,
2370         [SVM_EXIT_PAUSE]                        = pause_interception,
2371         [SVM_EXIT_HLT]                          = halt_interception,
2372         [SVM_EXIT_INVLPG]                       = invlpg_interception,
2373         [SVM_EXIT_INVLPGA]                      = invlpga_interception,
2374         [SVM_EXIT_IOIO]                         = io_interception,
2375         [SVM_EXIT_MSR]                          = msr_interception,
2376         [SVM_EXIT_TASK_SWITCH]                  = task_switch_interception,
2377         [SVM_EXIT_SHUTDOWN]                     = shutdown_interception,
2378         [SVM_EXIT_VMRUN]                        = vmrun_interception,
2379         [SVM_EXIT_VMMCALL]                      = vmmcall_interception,
2380         [SVM_EXIT_VMLOAD]                       = vmload_interception,
2381         [SVM_EXIT_VMSAVE]                       = vmsave_interception,
2382         [SVM_EXIT_STGI]                         = stgi_interception,
2383         [SVM_EXIT_CLGI]                         = clgi_interception,
2384         [SVM_EXIT_SKINIT]                       = skinit_interception,
2385         [SVM_EXIT_WBINVD]                       = emulate_on_interception,
2386         [SVM_EXIT_MONITOR]                      = invalid_op_interception,
2387         [SVM_EXIT_MWAIT]                        = invalid_op_interception,
2388         [SVM_EXIT_NPF]                          = pf_interception,
2389 };
2390
2391 static int handle_exit(struct kvm_vcpu *vcpu)
2392 {
2393         struct vcpu_svm *svm = to_svm(vcpu);
2394         struct kvm_run *kvm_run = vcpu->run;
2395         u32 exit_code = svm->vmcb->control.exit_code;
2396
2397         trace_kvm_exit(exit_code, svm->vmcb->save.rip);
2398
2399         if (unlikely(svm->nested.exit_required)) {
2400                 nested_svm_vmexit(svm);
2401                 svm->nested.exit_required = false;
2402
2403                 return 1;
2404         }
2405
2406         if (is_nested(svm)) {
2407                 int vmexit;
2408
2409                 trace_kvm_nested_vmexit(svm->vmcb->save.rip, exit_code,
2410                                         svm->vmcb->control.exit_info_1,
2411                                         svm->vmcb->control.exit_info_2,
2412                                         svm->vmcb->control.exit_int_info,
2413                                         svm->vmcb->control.exit_int_info_err);
2414
2415                 vmexit = nested_svm_exit_special(svm);
2416
2417                 if (vmexit == NESTED_EXIT_CONTINUE)
2418                         vmexit = nested_svm_exit_handled(svm);
2419
2420                 if (vmexit == NESTED_EXIT_DONE)
2421                         return 1;
2422         }
2423
2424         svm_complete_interrupts(svm);
2425
2426         if (!(svm->vmcb->control.intercept_cr_write & INTERCEPT_CR0_MASK))
2427                 vcpu->arch.cr0 = svm->vmcb->save.cr0;
2428         if (npt_enabled)
2429                 vcpu->arch.cr3 = svm->vmcb->save.cr3;
2430
2431         if (svm->vmcb->control.exit_code == SVM_EXIT_ERR) {
2432                 kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
2433                 kvm_run->fail_entry.hardware_entry_failure_reason
2434                         = svm->vmcb->control.exit_code;
2435                 return 0;
2436         }
2437
2438         if (is_external_interrupt(svm->vmcb->control.exit_int_info) &&
2439             exit_code != SVM_EXIT_EXCP_BASE + PF_VECTOR &&
2440             exit_code != SVM_EXIT_NPF && exit_code != SVM_EXIT_TASK_SWITCH)
2441                 printk(KERN_ERR "%s: unexpected exit_ini_info 0x%x "
2442                        "exit_code 0x%x\n",
2443                        __func__, svm->vmcb->control.exit_int_info,
2444                        exit_code);
2445
2446         if (exit_code >= ARRAY_SIZE(svm_exit_handlers)
2447             || !svm_exit_handlers[exit_code]) {
2448                 kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
2449                 kvm_run->hw.hardware_exit_reason = exit_code;
2450                 return 0;
2451         }
2452
2453         return svm_exit_handlers[exit_code](svm);
2454 }
2455
2456 static void reload_tss(struct kvm_vcpu *vcpu)
2457 {
2458         int cpu = raw_smp_processor_id();
2459
2460         struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
2461         sd->tss_desc->type = 9; /* available 32/64-bit TSS */
2462         load_TR_desc();
2463 }
2464
2465 static void pre_svm_run(struct vcpu_svm *svm)
2466 {
2467         int cpu = raw_smp_processor_id();
2468
2469         struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
2470
2471         svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING;
2472         /* FIXME: handle wraparound of asid_generation */
2473         if (svm->asid_generation != sd->asid_generation)
2474                 new_asid(svm, sd);
2475 }
2476
2477 static void svm_inject_nmi(struct kvm_vcpu *vcpu)
2478 {
2479         struct vcpu_svm *svm = to_svm(vcpu);
2480
2481         svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI;
2482         vcpu->arch.hflags |= HF_NMI_MASK;
2483         svm->vmcb->control.intercept |= (1UL << INTERCEPT_IRET);
2484         ++vcpu->stat.nmi_injections;
2485 }
2486
2487 static inline void svm_inject_irq(struct vcpu_svm *svm, int irq)
2488 {
2489         struct vmcb_control_area *control;
2490
2491         trace_kvm_inj_virq(irq);
2492
2493         ++svm->vcpu.stat.irq_injections;
2494         control = &svm->vmcb->control;
2495         control->int_vector = irq;
2496         control->int_ctl &= ~V_INTR_PRIO_MASK;
2497         control->int_ctl |= V_IRQ_MASK |
2498                 ((/*control->int_vector >> 4*/ 0xf) << V_INTR_PRIO_SHIFT);
2499 }
2500
2501 static void svm_set_irq(struct kvm_vcpu *vcpu)
2502 {
2503         struct vcpu_svm *svm = to_svm(vcpu);
2504
2505         BUG_ON(!(gif_set(svm)));
2506
2507         svm->vmcb->control.event_inj = vcpu->arch.interrupt.nr |
2508                 SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR;
2509 }
2510
2511 static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
2512 {
2513         struct vcpu_svm *svm = to_svm(vcpu);
2514
2515         if (irr == -1)
2516                 return;
2517
2518         if (tpr >= irr)
2519                 svm->vmcb->control.intercept_cr_write |= INTERCEPT_CR8_MASK;
2520 }
2521
2522 static int svm_nmi_allowed(struct kvm_vcpu *vcpu)
2523 {
2524         struct vcpu_svm *svm = to_svm(vcpu);
2525         struct vmcb *vmcb = svm->vmcb;
2526         return !(vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) &&
2527                 !(svm->vcpu.arch.hflags & HF_NMI_MASK);
2528 }
2529
2530 static bool svm_get_nmi_mask(struct kvm_vcpu *vcpu)
2531 {
2532         struct vcpu_svm *svm = to_svm(vcpu);
2533
2534         return !!(svm->vcpu.arch.hflags & HF_NMI_MASK);
2535 }
2536
2537 static void svm_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
2538 {
2539         struct vcpu_svm *svm = to_svm(vcpu);
2540
2541         if (masked) {
2542                 svm->vcpu.arch.hflags |= HF_NMI_MASK;
2543                 svm->vmcb->control.intercept |= (1UL << INTERCEPT_IRET);
2544         } else {
2545                 svm->vcpu.arch.hflags &= ~HF_NMI_MASK;
2546                 svm->vmcb->control.intercept &= ~(1UL << INTERCEPT_IRET);
2547         }
2548 }
2549
2550 static int svm_interrupt_allowed(struct kvm_vcpu *vcpu)
2551 {
2552         struct vcpu_svm *svm = to_svm(vcpu);
2553         struct vmcb *vmcb = svm->vmcb;
2554         int ret;
2555
2556         if (!gif_set(svm) ||
2557              (vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK))
2558                 return 0;
2559
2560         ret = !!(vmcb->save.rflags & X86_EFLAGS_IF);
2561
2562         if (is_nested(svm))
2563                 return ret && !(svm->vcpu.arch.hflags & HF_VINTR_MASK);
2564
2565         return ret;
2566 }
2567
2568 static void enable_irq_window(struct kvm_vcpu *vcpu)
2569 {
2570         struct vcpu_svm *svm = to_svm(vcpu);
2571
2572         nested_svm_intr(svm);
2573
2574         /* In case GIF=0 we can't rely on the CPU to tell us when
2575          * GIF becomes 1, because that's a separate STGI/VMRUN intercept.
2576          * The next time we get that intercept, this function will be
2577          * called again though and we'll get the vintr intercept. */
2578         if (gif_set(svm)) {
2579                 svm_set_vintr(svm);
2580                 svm_inject_irq(svm, 0x0);
2581         }
2582 }
2583
2584 static void enable_nmi_window(struct kvm_vcpu *vcpu)
2585 {
2586         struct vcpu_svm *svm = to_svm(vcpu);
2587
2588         if ((svm->vcpu.arch.hflags & (HF_NMI_MASK | HF_IRET_MASK))
2589             == HF_NMI_MASK)
2590                 return; /* IRET will cause a vm exit */
2591
2592         /* Something prevents NMI from been injected. Single step over
2593            possible problem (IRET or exception injection or interrupt
2594            shadow) */
2595         svm->nmi_singlestep = true;
2596         svm->vmcb->save.rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF);
2597         update_db_intercept(vcpu);
2598 }
2599
2600 static int svm_set_tss_addr(struct kvm *kvm, unsigned int addr)
2601 {
2602         return 0;
2603 }
2604
2605 static void svm_flush_tlb(struct kvm_vcpu *vcpu)
2606 {
2607         force_new_asid(vcpu);
2608 }
2609
2610 static void svm_prepare_guest_switch(struct kvm_vcpu *vcpu)
2611 {
2612 }
2613
2614 static inline void sync_cr8_to_lapic(struct kvm_vcpu *vcpu)
2615 {
2616         struct vcpu_svm *svm = to_svm(vcpu);
2617
2618         if (!(svm->vmcb->control.intercept_cr_write & INTERCEPT_CR8_MASK)) {
2619                 int cr8 = svm->vmcb->control.int_ctl & V_TPR_MASK;
2620                 kvm_set_cr8(vcpu, cr8);
2621         }
2622 }
2623
2624 static inline void sync_lapic_to_cr8(struct kvm_vcpu *vcpu)
2625 {
2626         struct vcpu_svm *svm = to_svm(vcpu);
2627         u64 cr8;
2628
2629         cr8 = kvm_get_cr8(vcpu);
2630         svm->vmcb->control.int_ctl &= ~V_TPR_MASK;
2631         svm->vmcb->control.int_ctl |= cr8 & V_TPR_MASK;
2632 }
2633
2634 static void svm_complete_interrupts(struct vcpu_svm *svm)
2635 {
2636         u8 vector;
2637         int type;
2638         u32 exitintinfo = svm->vmcb->control.exit_int_info;
2639
2640         if (svm->vcpu.arch.hflags & HF_IRET_MASK)
2641                 svm->vcpu.arch.hflags &= ~(HF_NMI_MASK | HF_IRET_MASK);
2642
2643         svm->vcpu.arch.nmi_injected = false;
2644         kvm_clear_exception_queue(&svm->vcpu);
2645         kvm_clear_interrupt_queue(&svm->vcpu);
2646
2647         if (!(exitintinfo & SVM_EXITINTINFO_VALID))
2648                 return;
2649
2650         vector = exitintinfo & SVM_EXITINTINFO_VEC_MASK;
2651         type = exitintinfo & SVM_EXITINTINFO_TYPE_MASK;
2652
2653         switch (type) {
2654         case SVM_EXITINTINFO_TYPE_NMI:
2655                 svm->vcpu.arch.nmi_injected = true;
2656                 break;
2657         case SVM_EXITINTINFO_TYPE_EXEPT:
2658                 /* In case of software exception do not reinject an exception
2659                    vector, but re-execute and instruction instead */
2660                 if (is_nested(svm))
2661                         break;
2662                 if (kvm_exception_is_soft(vector))
2663                         break;
2664                 if (exitintinfo & SVM_EXITINTINFO_VALID_ERR) {
2665                         u32 err = svm->vmcb->control.exit_int_info_err;
2666                         kvm_queue_exception_e(&svm->vcpu, vector, err);
2667
2668                 } else
2669                         kvm_queue_exception(&svm->vcpu, vector);
2670                 break;
2671         case SVM_EXITINTINFO_TYPE_INTR:
2672                 kvm_queue_interrupt(&svm->vcpu, vector, false);
2673                 break;
2674         default:
2675                 break;
2676         }
2677 }
2678
2679 #ifdef CONFIG_X86_64
2680 #define R "r"
2681 #else
2682 #define R "e"
2683 #endif
2684
2685 static void svm_vcpu_run(struct kvm_vcpu *vcpu)
2686 {
2687         struct vcpu_svm *svm = to_svm(vcpu);
2688         u16 fs_selector;
2689         u16 gs_selector;
2690         u16 ldt_selector;
2691
2692         /*
2693          * A vmexit emulation is required before the vcpu can be executed
2694          * again.
2695          */
2696         if (unlikely(svm->nested.exit_required))
2697                 return;
2698
2699         svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
2700         svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
2701         svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP];
2702
2703         pre_svm_run(svm);
2704
2705         sync_lapic_to_cr8(vcpu);
2706
2707         save_host_msrs(vcpu);
2708         fs_selector = kvm_read_fs();
2709         gs_selector = kvm_read_gs();
2710         ldt_selector = kvm_read_ldt();
2711         svm->vmcb->save.cr2 = vcpu->arch.cr2;
2712         /* required for live migration with NPT */
2713         if (npt_enabled)
2714                 svm->vmcb->save.cr3 = vcpu->arch.cr3;
2715
2716         clgi();
2717
2718         local_irq_enable();
2719
2720         asm volatile (
2721                 "push %%"R"bp; \n\t"
2722                 "mov %c[rbx](%[svm]), %%"R"bx \n\t"
2723                 "mov %c[rcx](%[svm]), %%"R"cx \n\t"
2724                 "mov %c[rdx](%[svm]), %%"R"dx \n\t"
2725                 "mov %c[rsi](%[svm]), %%"R"si \n\t"
2726                 "mov %c[rdi](%[svm]), %%"R"di \n\t"
2727                 "mov %c[rbp](%[svm]), %%"R"bp \n\t"
2728 #ifdef CONFIG_X86_64
2729                 "mov %c[r8](%[svm]),  %%r8  \n\t"
2730                 "mov %c[r9](%[svm]),  %%r9  \n\t"
2731                 "mov %c[r10](%[svm]), %%r10 \n\t"
2732                 "mov %c[r11](%[svm]), %%r11 \n\t"
2733                 "mov %c[r12](%[svm]), %%r12 \n\t"
2734                 "mov %c[r13](%[svm]), %%r13 \n\t"
2735                 "mov %c[r14](%[svm]), %%r14 \n\t"
2736                 "mov %c[r15](%[svm]), %%r15 \n\t"
2737 #endif
2738
2739                 /* Enter guest mode */
2740                 "push %%"R"ax \n\t"
2741                 "mov %c[vmcb](%[svm]), %%"R"ax \n\t"
2742                 __ex(SVM_VMLOAD) "\n\t"
2743                 __ex(SVM_VMRUN) "\n\t"
2744                 __ex(SVM_VMSAVE) "\n\t"
2745                 "pop %%"R"ax \n\t"
2746
2747                 /* Save guest registers, load host registers */
2748                 "mov %%"R"bx, %c[rbx](%[svm]) \n\t"
2749                 "mov %%"R"cx, %c[rcx](%[svm]) \n\t"
2750                 "mov %%"R"dx, %c[rdx](%[svm]) \n\t"
2751                 "mov %%"R"si, %c[rsi](%[svm]) \n\t"
2752                 "mov %%"R"di, %c[rdi](%[svm]) \n\t"
2753                 "mov %%"R"bp, %c[rbp](%[svm]) \n\t"
2754 #ifdef CONFIG_X86_64
2755                 "mov %%r8,  %c[r8](%[svm]) \n\t"
2756                 "mov %%r9,  %c[r9](%[svm]) \n\t"
2757                 "mov %%r10, %c[r10](%[svm]) \n\t"
2758                 "mov %%r11, %c[r11](%[svm]) \n\t"
2759                 "mov %%r12, %c[r12](%[svm]) \n\t"
2760                 "mov %%r13, %c[r13](%[svm]) \n\t"
2761                 "mov %%r14, %c[r14](%[svm]) \n\t"
2762                 "mov %%r15, %c[r15](%[svm]) \n\t"
2763 #endif
2764                 "pop %%"R"bp"
2765                 :
2766                 : [svm]"a"(svm),
2767                   [vmcb]"i"(offsetof(struct vcpu_svm, vmcb_pa)),
2768                   [rbx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RBX])),
2769                   [rcx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RCX])),
2770                   [rdx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RDX])),
2771                   [rsi]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RSI])),
2772                   [rdi]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RDI])),
2773                   [rbp]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RBP]))
2774 #ifdef CONFIG_X86_64
2775                   , [r8]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R8])),
2776                   [r9]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R9])),
2777                   [r10]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R10])),
2778                   [r11]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R11])),
2779                   [r12]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R12])),
2780                   [r13]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R13])),
2781                   [r14]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R14])),
2782                   [r15]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R15]))
2783 #endif
2784                 : "cc", "memory"
2785                 , R"bx", R"cx", R"dx", R"si", R"di"
2786 #ifdef CONFIG_X86_64
2787                 , "r8", "r9", "r10", "r11" , "r12", "r13", "r14", "r15"
2788 #endif
2789                 );
2790
2791         vcpu->arch.cr2 = svm->vmcb->save.cr2;
2792         vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax;
2793         vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
2794         vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip;
2795
2796         kvm_load_fs(fs_selector);
2797         kvm_load_gs(gs_selector);
2798         kvm_load_ldt(ldt_selector);
2799         load_host_msrs(vcpu);
2800
2801         reload_tss(vcpu);
2802
2803         local_irq_disable();
2804
2805         stgi();
2806
2807         sync_cr8_to_lapic(vcpu);
2808
2809         svm->next_rip = 0;
2810
2811         if (npt_enabled) {
2812                 vcpu->arch.regs_avail &= ~(1 << VCPU_EXREG_PDPTR);
2813                 vcpu->arch.regs_dirty &= ~(1 << VCPU_EXREG_PDPTR);
2814         }
2815 }
2816
2817 #undef R
2818
2819 static void svm_set_cr3(struct kvm_vcpu *vcpu, unsigned long root)
2820 {
2821         struct vcpu_svm *svm = to_svm(vcpu);
2822
2823         if (npt_enabled) {
2824                 svm->vmcb->control.nested_cr3 = root;
2825                 force_new_asid(vcpu);
2826                 return;
2827         }
2828
2829         svm->vmcb->save.cr3 = root;
2830         force_new_asid(vcpu);
2831 }
2832
2833 static int is_disabled(void)
2834 {
2835         u64 vm_cr;
2836
2837         rdmsrl(MSR_VM_CR, vm_cr);
2838         if (vm_cr & (1 << SVM_VM_CR_SVM_DISABLE))
2839                 return 1;
2840
2841         return 0;
2842 }
2843
2844 static void
2845 svm_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall)
2846 {
2847         /*
2848          * Patch in the VMMCALL instruction:
2849          */
2850         hypercall[0] = 0x0f;
2851         hypercall[1] = 0x01;
2852         hypercall[2] = 0xd9;
2853 }
2854
2855 static void svm_check_processor_compat(void *rtn)
2856 {
2857         *(int *)rtn = 0;
2858 }
2859
2860 static bool svm_cpu_has_accelerated_tpr(void)
2861 {
2862         return false;
2863 }
2864
2865 static int get_npt_level(void)
2866 {
2867 #ifdef CONFIG_X86_64
2868         return PT64_ROOT_LEVEL;
2869 #else
2870         return PT32E_ROOT_LEVEL;
2871 #endif
2872 }
2873
2874 static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
2875 {
2876         return 0;
2877 }
2878
2879 static void svm_cpuid_update(struct kvm_vcpu *vcpu)
2880 {
2881 }
2882
2883 static const struct trace_print_flags svm_exit_reasons_str[] = {
2884         { SVM_EXIT_READ_CR0,                    "read_cr0" },
2885         { SVM_EXIT_READ_CR3,                    "read_cr3" },
2886         { SVM_EXIT_READ_CR4,                    "read_cr4" },
2887         { SVM_EXIT_READ_CR8,                    "read_cr8" },
2888         { SVM_EXIT_WRITE_CR0,                   "write_cr0" },
2889         { SVM_EXIT_WRITE_CR3,                   "write_cr3" },
2890         { SVM_EXIT_WRITE_CR4,                   "write_cr4" },
2891         { SVM_EXIT_WRITE_CR8,                   "write_cr8" },
2892         { SVM_EXIT_READ_DR0,                    "read_dr0" },
2893         { SVM_EXIT_READ_DR1,                    "read_dr1" },
2894         { SVM_EXIT_READ_DR2,                    "read_dr2" },
2895         { SVM_EXIT_READ_DR3,                    "read_dr3" },
2896         { SVM_EXIT_WRITE_DR0,                   "write_dr0" },
2897         { SVM_EXIT_WRITE_DR1,                   "write_dr1" },
2898         { SVM_EXIT_WRITE_DR2,                   "write_dr2" },
2899         { SVM_EXIT_WRITE_DR3,                   "write_dr3" },
2900         { SVM_EXIT_WRITE_DR5,                   "write_dr5" },
2901         { SVM_EXIT_WRITE_DR7,                   "write_dr7" },
2902         { SVM_EXIT_EXCP_BASE + DB_VECTOR,       "DB excp" },
2903         { SVM_EXIT_EXCP_BASE + BP_VECTOR,       "BP excp" },
2904         { SVM_EXIT_EXCP_BASE + UD_VECTOR,       "UD excp" },
2905         { SVM_EXIT_EXCP_BASE + PF_VECTOR,       "PF excp" },
2906         { SVM_EXIT_EXCP_BASE + NM_VECTOR,       "NM excp" },
2907         { SVM_EXIT_EXCP_BASE + MC_VECTOR,       "MC excp" },
2908         { SVM_EXIT_INTR,                        "interrupt" },
2909         { SVM_EXIT_NMI,                         "nmi" },
2910         { SVM_EXIT_SMI,                         "smi" },
2911         { SVM_EXIT_INIT,                        "init" },
2912         { SVM_EXIT_VINTR,                       "vintr" },
2913         { SVM_EXIT_CPUID,                       "cpuid" },
2914         { SVM_EXIT_INVD,                        "invd" },
2915         { SVM_EXIT_HLT,                         "hlt" },
2916         { SVM_EXIT_INVLPG,                      "invlpg" },
2917         { SVM_EXIT_INVLPGA,                     "invlpga" },
2918         { SVM_EXIT_IOIO,                        "io" },
2919         { SVM_EXIT_MSR,                         "msr" },
2920         { SVM_EXIT_TASK_SWITCH,                 "task_switch" },
2921         { SVM_EXIT_SHUTDOWN,                    "shutdown" },
2922         { SVM_EXIT_VMRUN,                       "vmrun" },
2923         { SVM_EXIT_VMMCALL,                     "hypercall" },
2924         { SVM_EXIT_VMLOAD,                      "vmload" },
2925         { SVM_EXIT_VMSAVE,                      "vmsave" },
2926         { SVM_EXIT_STGI,                        "stgi" },
2927         { SVM_EXIT_CLGI,                        "clgi" },
2928         { SVM_EXIT_SKINIT,                      "skinit" },
2929         { SVM_EXIT_WBINVD,                      "wbinvd" },
2930         { SVM_EXIT_MONITOR,                     "monitor" },
2931         { SVM_EXIT_MWAIT,                       "mwait" },
2932         { SVM_EXIT_NPF,                         "npf" },
2933         { -1, NULL }
2934 };
2935
2936 static int svm_get_lpage_level(void)
2937 {
2938         return PT_PDPE_LEVEL;
2939 }
2940
2941 static bool svm_rdtscp_supported(void)
2942 {
2943         return false;
2944 }
2945
2946 static void svm_fpu_deactivate(struct kvm_vcpu *vcpu)
2947 {
2948         struct vcpu_svm *svm = to_svm(vcpu);
2949
2950         update_cr0_intercept(svm);
2951         svm->vmcb->control.intercept_exceptions |= 1 << NM_VECTOR;
2952 }
2953
2954 static struct kvm_x86_ops svm_x86_ops = {
2955         .cpu_has_kvm_support = has_svm,
2956         .disabled_by_bios = is_disabled,
2957         .hardware_setup = svm_hardware_setup,
2958         .hardware_unsetup = svm_hardware_unsetup,
2959         .check_processor_compatibility = svm_check_processor_compat,
2960         .hardware_enable = svm_hardware_enable,
2961         .hardware_disable = svm_hardware_disable,
2962         .cpu_has_accelerated_tpr = svm_cpu_has_accelerated_tpr,
2963
2964         .vcpu_create = svm_create_vcpu,
2965         .vcpu_free = svm_free_vcpu,
2966         .vcpu_reset = svm_vcpu_reset,
2967
2968         .prepare_guest_switch = svm_prepare_guest_switch,
2969         .vcpu_load = svm_vcpu_load,
2970         .vcpu_put = svm_vcpu_put,
2971
2972         .set_guest_debug = svm_guest_debug,
2973         .get_msr = svm_get_msr,
2974         .set_msr = svm_set_msr,
2975         .get_segment_base = svm_get_segment_base,
2976         .get_segment = svm_get_segment,
2977         .set_segment = svm_set_segment,
2978         .get_cpl = svm_get_cpl,
2979         .get_cs_db_l_bits = kvm_get_cs_db_l_bits,
2980         .decache_cr0_guest_bits = svm_decache_cr0_guest_bits,
2981         .decache_cr4_guest_bits = svm_decache_cr4_guest_bits,
2982         .set_cr0 = svm_set_cr0,
2983         .set_cr3 = svm_set_cr3,
2984         .set_cr4 = svm_set_cr4,
2985         .set_efer = svm_set_efer,
2986         .get_idt = svm_get_idt,
2987         .set_idt = svm_set_idt,
2988         .get_gdt = svm_get_gdt,
2989         .set_gdt = svm_set_gdt,
2990         .get_dr = svm_get_dr,
2991         .set_dr = svm_set_dr,
2992         .cache_reg = svm_cache_reg,
2993         .get_rflags = svm_get_rflags,
2994         .set_rflags = svm_set_rflags,
2995         .fpu_activate = svm_fpu_activate,
2996         .fpu_deactivate = svm_fpu_deactivate,
2997
2998         .tlb_flush = svm_flush_tlb,
2999
3000         .run = svm_vcpu_run,
3001         .handle_exit = handle_exit,
3002         .skip_emulated_instruction = skip_emulated_instruction,
3003         .set_interrupt_shadow = svm_set_interrupt_shadow,
3004         .get_interrupt_shadow = svm_get_interrupt_shadow,
3005         .patch_hypercall = svm_patch_hypercall,
3006         .set_irq = svm_set_irq,
3007         .set_nmi = svm_inject_nmi,
3008         .queue_exception = svm_queue_exception,
3009         .interrupt_allowed = svm_interrupt_allowed,
3010         .nmi_allowed = svm_nmi_allowed,
3011         .get_nmi_mask = svm_get_nmi_mask,
3012         .set_nmi_mask = svm_set_nmi_mask,
3013         .enable_nmi_window = enable_nmi_window,
3014         .enable_irq_window = enable_irq_window,
3015         .update_cr8_intercept = update_cr8_intercept,
3016
3017         .set_tss_addr = svm_set_tss_addr,
3018         .get_tdp_level = get_npt_level,
3019         .get_mt_mask = svm_get_mt_mask,
3020
3021         .exit_reasons_str = svm_exit_reasons_str,
3022         .get_lpage_level = svm_get_lpage_level,
3023
3024         .cpuid_update = svm_cpuid_update,
3025
3026         .rdtscp_supported = svm_rdtscp_supported,
3027 };
3028
3029 static int __init svm_init(void)
3030 {
3031         return kvm_init(&svm_x86_ops, sizeof(struct vcpu_svm),
3032                               THIS_MODULE);
3033 }
3034
3035 static void __exit svm_exit(void)
3036 {
3037         kvm_exit();
3038 }
3039
3040 module_init(svm_init)
3041 module_exit(svm_exit)