KVM: Check for pending events before attempting injection
[pandora-kernel.git] / arch / x86 / kvm / x86.c
1 /*
2  * Kernel-based Virtual Machine driver for Linux
3  *
4  * derived from drivers/kvm/kvm_main.c
5  *
6  * Copyright (C) 2006 Qumranet, Inc.
7  * Copyright (C) 2008 Qumranet, Inc.
8  * Copyright IBM Corporation, 2008
9  * Copyright 2010 Red Hat, Inc. and/or its affilates.
10  *
11  * Authors:
12  *   Avi Kivity   <avi@qumranet.com>
13  *   Yaniv Kamay  <yaniv@qumranet.com>
14  *   Amit Shah    <amit.shah@qumranet.com>
15  *   Ben-Ami Yassour <benami@il.ibm.com>
16  *
17  * This work is licensed under the terms of the GNU GPL, version 2.  See
18  * the COPYING file in the top-level directory.
19  *
20  */
21
22 #include <linux/kvm_host.h>
23 #include "irq.h"
24 #include "mmu.h"
25 #include "i8254.h"
26 #include "tss.h"
27 #include "kvm_cache_regs.h"
28 #include "x86.h"
29
30 #include <linux/clocksource.h>
31 #include <linux/interrupt.h>
32 #include <linux/kvm.h>
33 #include <linux/fs.h>
34 #include <linux/vmalloc.h>
35 #include <linux/module.h>
36 #include <linux/mman.h>
37 #include <linux/highmem.h>
38 #include <linux/iommu.h>
39 #include <linux/intel-iommu.h>
40 #include <linux/cpufreq.h>
41 #include <linux/user-return-notifier.h>
42 #include <linux/srcu.h>
43 #include <linux/slab.h>
44 #include <linux/perf_event.h>
45 #include <linux/uaccess.h>
46 #include <trace/events/kvm.h>
47
48 #define CREATE_TRACE_POINTS
49 #include "trace.h"
50
51 #include <asm/debugreg.h>
52 #include <asm/msr.h>
53 #include <asm/desc.h>
54 #include <asm/mtrr.h>
55 #include <asm/mce.h>
56 #include <asm/i387.h>
57 #include <asm/xcr.h>
58 #include <asm/pvclock.h>
59 #include <asm/div64.h>
60
61 #define MAX_IO_MSRS 256
62 #define CR0_RESERVED_BITS                                               \
63         (~(unsigned long)(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS \
64                           | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM \
65                           | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG))
66 #define CR4_RESERVED_BITS                                               \
67         (~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\
68                           | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE     \
69                           | X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR  \
70                           | X86_CR4_OSXSAVE \
71                           | X86_CR4_OSXMMEXCPT | X86_CR4_VMXE))
72
73 #define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR)
74
75 #define KVM_MAX_MCE_BANKS 32
76 #define KVM_MCE_CAP_SUPPORTED MCG_CTL_P
77
78 /* EFER defaults:
79  * - enable syscall per default because its emulated by KVM
80  * - enable LME and LMA per default on 64 bit KVM
81  */
82 #ifdef CONFIG_X86_64
83 static u64 __read_mostly efer_reserved_bits = 0xfffffffffffffafeULL;
84 #else
85 static u64 __read_mostly efer_reserved_bits = 0xfffffffffffffffeULL;
86 #endif
87
88 #define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
89 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
90
91 static void update_cr8_intercept(struct kvm_vcpu *vcpu);
92 static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
93                                     struct kvm_cpuid_entry2 __user *entries);
94
95 struct kvm_x86_ops *kvm_x86_ops;
96 EXPORT_SYMBOL_GPL(kvm_x86_ops);
97
98 int ignore_msrs = 0;
99 module_param_named(ignore_msrs, ignore_msrs, bool, S_IRUGO | S_IWUSR);
100
101 #define KVM_NR_SHARED_MSRS 16
102
103 struct kvm_shared_msrs_global {
104         int nr;
105         u32 msrs[KVM_NR_SHARED_MSRS];
106 };
107
108 struct kvm_shared_msrs {
109         struct user_return_notifier urn;
110         bool registered;
111         struct kvm_shared_msr_values {
112                 u64 host;
113                 u64 curr;
114         } values[KVM_NR_SHARED_MSRS];
115 };
116
117 static struct kvm_shared_msrs_global __read_mostly shared_msrs_global;
118 static DEFINE_PER_CPU(struct kvm_shared_msrs, shared_msrs);
119
120 struct kvm_stats_debugfs_item debugfs_entries[] = {
121         { "pf_fixed", VCPU_STAT(pf_fixed) },
122         { "pf_guest", VCPU_STAT(pf_guest) },
123         { "tlb_flush", VCPU_STAT(tlb_flush) },
124         { "invlpg", VCPU_STAT(invlpg) },
125         { "exits", VCPU_STAT(exits) },
126         { "io_exits", VCPU_STAT(io_exits) },
127         { "mmio_exits", VCPU_STAT(mmio_exits) },
128         { "signal_exits", VCPU_STAT(signal_exits) },
129         { "irq_window", VCPU_STAT(irq_window_exits) },
130         { "nmi_window", VCPU_STAT(nmi_window_exits) },
131         { "halt_exits", VCPU_STAT(halt_exits) },
132         { "halt_wakeup", VCPU_STAT(halt_wakeup) },
133         { "hypercalls", VCPU_STAT(hypercalls) },
134         { "request_irq", VCPU_STAT(request_irq_exits) },
135         { "irq_exits", VCPU_STAT(irq_exits) },
136         { "host_state_reload", VCPU_STAT(host_state_reload) },
137         { "efer_reload", VCPU_STAT(efer_reload) },
138         { "fpu_reload", VCPU_STAT(fpu_reload) },
139         { "insn_emulation", VCPU_STAT(insn_emulation) },
140         { "insn_emulation_fail", VCPU_STAT(insn_emulation_fail) },
141         { "irq_injections", VCPU_STAT(irq_injections) },
142         { "nmi_injections", VCPU_STAT(nmi_injections) },
143         { "mmu_shadow_zapped", VM_STAT(mmu_shadow_zapped) },
144         { "mmu_pte_write", VM_STAT(mmu_pte_write) },
145         { "mmu_pte_updated", VM_STAT(mmu_pte_updated) },
146         { "mmu_pde_zapped", VM_STAT(mmu_pde_zapped) },
147         { "mmu_flooded", VM_STAT(mmu_flooded) },
148         { "mmu_recycled", VM_STAT(mmu_recycled) },
149         { "mmu_cache_miss", VM_STAT(mmu_cache_miss) },
150         { "mmu_unsync", VM_STAT(mmu_unsync) },
151         { "remote_tlb_flush", VM_STAT(remote_tlb_flush) },
152         { "largepages", VM_STAT(lpages) },
153         { NULL }
154 };
155
156 u64 __read_mostly host_xcr0;
157
158 static inline u32 bit(int bitno)
159 {
160         return 1 << (bitno & 31);
161 }
162
163 static void kvm_on_user_return(struct user_return_notifier *urn)
164 {
165         unsigned slot;
166         struct kvm_shared_msrs *locals
167                 = container_of(urn, struct kvm_shared_msrs, urn);
168         struct kvm_shared_msr_values *values;
169
170         for (slot = 0; slot < shared_msrs_global.nr; ++slot) {
171                 values = &locals->values[slot];
172                 if (values->host != values->curr) {
173                         wrmsrl(shared_msrs_global.msrs[slot], values->host);
174                         values->curr = values->host;
175                 }
176         }
177         locals->registered = false;
178         user_return_notifier_unregister(urn);
179 }
180
181 static void shared_msr_update(unsigned slot, u32 msr)
182 {
183         struct kvm_shared_msrs *smsr;
184         u64 value;
185
186         smsr = &__get_cpu_var(shared_msrs);
187         /* only read, and nobody should modify it at this time,
188          * so don't need lock */
189         if (slot >= shared_msrs_global.nr) {
190                 printk(KERN_ERR "kvm: invalid MSR slot!");
191                 return;
192         }
193         rdmsrl_safe(msr, &value);
194         smsr->values[slot].host = value;
195         smsr->values[slot].curr = value;
196 }
197
198 void kvm_define_shared_msr(unsigned slot, u32 msr)
199 {
200         if (slot >= shared_msrs_global.nr)
201                 shared_msrs_global.nr = slot + 1;
202         shared_msrs_global.msrs[slot] = msr;
203         /* we need ensured the shared_msr_global have been updated */
204         smp_wmb();
205 }
206 EXPORT_SYMBOL_GPL(kvm_define_shared_msr);
207
208 static void kvm_shared_msr_cpu_online(void)
209 {
210         unsigned i;
211
212         for (i = 0; i < shared_msrs_global.nr; ++i)
213                 shared_msr_update(i, shared_msrs_global.msrs[i]);
214 }
215
216 void kvm_set_shared_msr(unsigned slot, u64 value, u64 mask)
217 {
218         struct kvm_shared_msrs *smsr = &__get_cpu_var(shared_msrs);
219
220         if (((value ^ smsr->values[slot].curr) & mask) == 0)
221                 return;
222         smsr->values[slot].curr = value;
223         wrmsrl(shared_msrs_global.msrs[slot], value);
224         if (!smsr->registered) {
225                 smsr->urn.on_user_return = kvm_on_user_return;
226                 user_return_notifier_register(&smsr->urn);
227                 smsr->registered = true;
228         }
229 }
230 EXPORT_SYMBOL_GPL(kvm_set_shared_msr);
231
232 static void drop_user_return_notifiers(void *ignore)
233 {
234         struct kvm_shared_msrs *smsr = &__get_cpu_var(shared_msrs);
235
236         if (smsr->registered)
237                 kvm_on_user_return(&smsr->urn);
238 }
239
240 u64 kvm_get_apic_base(struct kvm_vcpu *vcpu)
241 {
242         if (irqchip_in_kernel(vcpu->kvm))
243                 return vcpu->arch.apic_base;
244         else
245                 return vcpu->arch.apic_base;
246 }
247 EXPORT_SYMBOL_GPL(kvm_get_apic_base);
248
249 void kvm_set_apic_base(struct kvm_vcpu *vcpu, u64 data)
250 {
251         /* TODO: reserve bits check */
252         if (irqchip_in_kernel(vcpu->kvm))
253                 kvm_lapic_set_base(vcpu, data);
254         else
255                 vcpu->arch.apic_base = data;
256 }
257 EXPORT_SYMBOL_GPL(kvm_set_apic_base);
258
259 #define EXCPT_BENIGN            0
260 #define EXCPT_CONTRIBUTORY      1
261 #define EXCPT_PF                2
262
263 static int exception_class(int vector)
264 {
265         switch (vector) {
266         case PF_VECTOR:
267                 return EXCPT_PF;
268         case DE_VECTOR:
269         case TS_VECTOR:
270         case NP_VECTOR:
271         case SS_VECTOR:
272         case GP_VECTOR:
273                 return EXCPT_CONTRIBUTORY;
274         default:
275                 break;
276         }
277         return EXCPT_BENIGN;
278 }
279
280 static void kvm_multiple_exception(struct kvm_vcpu *vcpu,
281                 unsigned nr, bool has_error, u32 error_code,
282                 bool reinject)
283 {
284         u32 prev_nr;
285         int class1, class2;
286
287         kvm_make_request(KVM_REQ_EVENT, vcpu);
288
289         if (!vcpu->arch.exception.pending) {
290         queue:
291                 vcpu->arch.exception.pending = true;
292                 vcpu->arch.exception.has_error_code = has_error;
293                 vcpu->arch.exception.nr = nr;
294                 vcpu->arch.exception.error_code = error_code;
295                 vcpu->arch.exception.reinject = reinject;
296                 return;
297         }
298
299         /* to check exception */
300         prev_nr = vcpu->arch.exception.nr;
301         if (prev_nr == DF_VECTOR) {
302                 /* triple fault -> shutdown */
303                 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
304                 return;
305         }
306         class1 = exception_class(prev_nr);
307         class2 = exception_class(nr);
308         if ((class1 == EXCPT_CONTRIBUTORY && class2 == EXCPT_CONTRIBUTORY)
309                 || (class1 == EXCPT_PF && class2 != EXCPT_BENIGN)) {
310                 /* generate double fault per SDM Table 5-5 */
311                 vcpu->arch.exception.pending = true;
312                 vcpu->arch.exception.has_error_code = true;
313                 vcpu->arch.exception.nr = DF_VECTOR;
314                 vcpu->arch.exception.error_code = 0;
315         } else
316                 /* replace previous exception with a new one in a hope
317                    that instruction re-execution will regenerate lost
318                    exception */
319                 goto queue;
320 }
321
322 void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr)
323 {
324         kvm_multiple_exception(vcpu, nr, false, 0, false);
325 }
326 EXPORT_SYMBOL_GPL(kvm_queue_exception);
327
328 void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr)
329 {
330         kvm_multiple_exception(vcpu, nr, false, 0, true);
331 }
332 EXPORT_SYMBOL_GPL(kvm_requeue_exception);
333
334 void kvm_inject_page_fault(struct kvm_vcpu *vcpu)
335 {
336         unsigned error_code = vcpu->arch.fault.error_code;
337
338         ++vcpu->stat.pf_guest;
339         vcpu->arch.cr2 = vcpu->arch.fault.address;
340         kvm_queue_exception_e(vcpu, PF_VECTOR, error_code);
341 }
342
343 void kvm_propagate_fault(struct kvm_vcpu *vcpu)
344 {
345         u32 nested, error;
346
347         error   = vcpu->arch.fault.error_code;
348         nested  = error &  PFERR_NESTED_MASK;
349         error   = error & ~PFERR_NESTED_MASK;
350
351         vcpu->arch.fault.error_code = error;
352
353         if (mmu_is_nested(vcpu) && !nested)
354                 vcpu->arch.nested_mmu.inject_page_fault(vcpu);
355         else
356                 vcpu->arch.mmu.inject_page_fault(vcpu);
357 }
358
359 void kvm_inject_nmi(struct kvm_vcpu *vcpu)
360 {
361         kvm_make_request(KVM_REQ_EVENT, vcpu);
362         vcpu->arch.nmi_pending = 1;
363 }
364 EXPORT_SYMBOL_GPL(kvm_inject_nmi);
365
366 void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
367 {
368         kvm_multiple_exception(vcpu, nr, true, error_code, false);
369 }
370 EXPORT_SYMBOL_GPL(kvm_queue_exception_e);
371
372 void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
373 {
374         kvm_multiple_exception(vcpu, nr, true, error_code, true);
375 }
376 EXPORT_SYMBOL_GPL(kvm_requeue_exception_e);
377
378 /*
379  * Checks if cpl <= required_cpl; if true, return true.  Otherwise queue
380  * a #GP and return false.
381  */
382 bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl)
383 {
384         if (kvm_x86_ops->get_cpl(vcpu) <= required_cpl)
385                 return true;
386         kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
387         return false;
388 }
389 EXPORT_SYMBOL_GPL(kvm_require_cpl);
390
391 /*
392  * This function will be used to read from the physical memory of the currently
393  * running guest. The difference to kvm_read_guest_page is that this function
394  * can read from guest physical or from the guest's guest physical memory.
395  */
396 int kvm_read_guest_page_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
397                             gfn_t ngfn, void *data, int offset, int len,
398                             u32 access)
399 {
400         gfn_t real_gfn;
401         gpa_t ngpa;
402
403         ngpa     = gfn_to_gpa(ngfn);
404         real_gfn = mmu->translate_gpa(vcpu, ngpa, access);
405         if (real_gfn == UNMAPPED_GVA)
406                 return -EFAULT;
407
408         real_gfn = gpa_to_gfn(real_gfn);
409
410         return kvm_read_guest_page(vcpu->kvm, real_gfn, data, offset, len);
411 }
412 EXPORT_SYMBOL_GPL(kvm_read_guest_page_mmu);
413
414 int kvm_read_nested_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn,
415                                void *data, int offset, int len, u32 access)
416 {
417         return kvm_read_guest_page_mmu(vcpu, vcpu->arch.walk_mmu, gfn,
418                                        data, offset, len, access);
419 }
420
421 /*
422  * Load the pae pdptrs.  Return true is they are all valid.
423  */
424 int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3)
425 {
426         gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT;
427         unsigned offset = ((cr3 & (PAGE_SIZE-1)) >> 5) << 2;
428         int i;
429         int ret;
430         u64 pdpte[ARRAY_SIZE(mmu->pdptrs)];
431
432         ret = kvm_read_guest_page_mmu(vcpu, mmu, pdpt_gfn, pdpte,
433                                       offset * sizeof(u64), sizeof(pdpte),
434                                       PFERR_USER_MASK|PFERR_WRITE_MASK);
435         if (ret < 0) {
436                 ret = 0;
437                 goto out;
438         }
439         for (i = 0; i < ARRAY_SIZE(pdpte); ++i) {
440                 if (is_present_gpte(pdpte[i]) &&
441                     (pdpte[i] & vcpu->arch.mmu.rsvd_bits_mask[0][2])) {
442                         ret = 0;
443                         goto out;
444                 }
445         }
446         ret = 1;
447
448         memcpy(mmu->pdptrs, pdpte, sizeof(mmu->pdptrs));
449         __set_bit(VCPU_EXREG_PDPTR,
450                   (unsigned long *)&vcpu->arch.regs_avail);
451         __set_bit(VCPU_EXREG_PDPTR,
452                   (unsigned long *)&vcpu->arch.regs_dirty);
453 out:
454
455         return ret;
456 }
457 EXPORT_SYMBOL_GPL(load_pdptrs);
458
459 static bool pdptrs_changed(struct kvm_vcpu *vcpu)
460 {
461         u64 pdpte[ARRAY_SIZE(vcpu->arch.walk_mmu->pdptrs)];
462         bool changed = true;
463         int offset;
464         gfn_t gfn;
465         int r;
466
467         if (is_long_mode(vcpu) || !is_pae(vcpu))
468                 return false;
469
470         if (!test_bit(VCPU_EXREG_PDPTR,
471                       (unsigned long *)&vcpu->arch.regs_avail))
472                 return true;
473
474         gfn = (vcpu->arch.cr3 & ~31u) >> PAGE_SHIFT;
475         offset = (vcpu->arch.cr3 & ~31u) & (PAGE_SIZE - 1);
476         r = kvm_read_nested_guest_page(vcpu, gfn, pdpte, offset, sizeof(pdpte),
477                                        PFERR_USER_MASK | PFERR_WRITE_MASK);
478         if (r < 0)
479                 goto out;
480         changed = memcmp(pdpte, vcpu->arch.walk_mmu->pdptrs, sizeof(pdpte)) != 0;
481 out:
482
483         return changed;
484 }
485
486 int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
487 {
488         unsigned long old_cr0 = kvm_read_cr0(vcpu);
489         unsigned long update_bits = X86_CR0_PG | X86_CR0_WP |
490                                     X86_CR0_CD | X86_CR0_NW;
491
492         cr0 |= X86_CR0_ET;
493
494 #ifdef CONFIG_X86_64
495         if (cr0 & 0xffffffff00000000UL)
496                 return 1;
497 #endif
498
499         cr0 &= ~CR0_RESERVED_BITS;
500
501         if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD))
502                 return 1;
503
504         if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE))
505                 return 1;
506
507         if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
508 #ifdef CONFIG_X86_64
509                 if ((vcpu->arch.efer & EFER_LME)) {
510                         int cs_db, cs_l;
511
512                         if (!is_pae(vcpu))
513                                 return 1;
514                         kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
515                         if (cs_l)
516                                 return 1;
517                 } else
518 #endif
519                 if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.walk_mmu,
520                                                  vcpu->arch.cr3))
521                         return 1;
522         }
523
524         kvm_x86_ops->set_cr0(vcpu, cr0);
525
526         if ((cr0 ^ old_cr0) & update_bits)
527                 kvm_mmu_reset_context(vcpu);
528         return 0;
529 }
530 EXPORT_SYMBOL_GPL(kvm_set_cr0);
531
532 void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
533 {
534         (void)kvm_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~0x0eul) | (msw & 0x0f));
535 }
536 EXPORT_SYMBOL_GPL(kvm_lmsw);
537
538 int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
539 {
540         u64 xcr0;
541
542         /* Only support XCR_XFEATURE_ENABLED_MASK(xcr0) now  */
543         if (index != XCR_XFEATURE_ENABLED_MASK)
544                 return 1;
545         xcr0 = xcr;
546         if (kvm_x86_ops->get_cpl(vcpu) != 0)
547                 return 1;
548         if (!(xcr0 & XSTATE_FP))
549                 return 1;
550         if ((xcr0 & XSTATE_YMM) && !(xcr0 & XSTATE_SSE))
551                 return 1;
552         if (xcr0 & ~host_xcr0)
553                 return 1;
554         vcpu->arch.xcr0 = xcr0;
555         vcpu->guest_xcr0_loaded = 0;
556         return 0;
557 }
558
559 int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
560 {
561         if (__kvm_set_xcr(vcpu, index, xcr)) {
562                 kvm_inject_gp(vcpu, 0);
563                 return 1;
564         }
565         return 0;
566 }
567 EXPORT_SYMBOL_GPL(kvm_set_xcr);
568
569 static bool guest_cpuid_has_xsave(struct kvm_vcpu *vcpu)
570 {
571         struct kvm_cpuid_entry2 *best;
572
573         best = kvm_find_cpuid_entry(vcpu, 1, 0);
574         return best && (best->ecx & bit(X86_FEATURE_XSAVE));
575 }
576
577 static void update_cpuid(struct kvm_vcpu *vcpu)
578 {
579         struct kvm_cpuid_entry2 *best;
580
581         best = kvm_find_cpuid_entry(vcpu, 1, 0);
582         if (!best)
583                 return;
584
585         /* Update OSXSAVE bit */
586         if (cpu_has_xsave && best->function == 0x1) {
587                 best->ecx &= ~(bit(X86_FEATURE_OSXSAVE));
588                 if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE))
589                         best->ecx |= bit(X86_FEATURE_OSXSAVE);
590         }
591 }
592
593 int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
594 {
595         unsigned long old_cr4 = kvm_read_cr4(vcpu);
596         unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE;
597
598         if (cr4 & CR4_RESERVED_BITS)
599                 return 1;
600
601         if (!guest_cpuid_has_xsave(vcpu) && (cr4 & X86_CR4_OSXSAVE))
602                 return 1;
603
604         if (is_long_mode(vcpu)) {
605                 if (!(cr4 & X86_CR4_PAE))
606                         return 1;
607         } else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE)
608                    && ((cr4 ^ old_cr4) & pdptr_bits)
609                    && !load_pdptrs(vcpu, vcpu->arch.walk_mmu, vcpu->arch.cr3))
610                 return 1;
611
612         if (cr4 & X86_CR4_VMXE)
613                 return 1;
614
615         kvm_x86_ops->set_cr4(vcpu, cr4);
616
617         if ((cr4 ^ old_cr4) & pdptr_bits)
618                 kvm_mmu_reset_context(vcpu);
619
620         if ((cr4 ^ old_cr4) & X86_CR4_OSXSAVE)
621                 update_cpuid(vcpu);
622
623         return 0;
624 }
625 EXPORT_SYMBOL_GPL(kvm_set_cr4);
626
627 int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
628 {
629         if (cr3 == vcpu->arch.cr3 && !pdptrs_changed(vcpu)) {
630                 kvm_mmu_sync_roots(vcpu);
631                 kvm_mmu_flush_tlb(vcpu);
632                 return 0;
633         }
634
635         if (is_long_mode(vcpu)) {
636                 if (cr3 & CR3_L_MODE_RESERVED_BITS)
637                         return 1;
638         } else {
639                 if (is_pae(vcpu)) {
640                         if (cr3 & CR3_PAE_RESERVED_BITS)
641                                 return 1;
642                         if (is_paging(vcpu) &&
643                             !load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3))
644                                 return 1;
645                 }
646                 /*
647                  * We don't check reserved bits in nonpae mode, because
648                  * this isn't enforced, and VMware depends on this.
649                  */
650         }
651
652         /*
653          * Does the new cr3 value map to physical memory? (Note, we
654          * catch an invalid cr3 even in real-mode, because it would
655          * cause trouble later on when we turn on paging anyway.)
656          *
657          * A real CPU would silently accept an invalid cr3 and would
658          * attempt to use it - with largely undefined (and often hard
659          * to debug) behavior on the guest side.
660          */
661         if (unlikely(!gfn_to_memslot(vcpu->kvm, cr3 >> PAGE_SHIFT)))
662                 return 1;
663         vcpu->arch.cr3 = cr3;
664         vcpu->arch.mmu.new_cr3(vcpu);
665         return 0;
666 }
667 EXPORT_SYMBOL_GPL(kvm_set_cr3);
668
669 int __kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
670 {
671         if (cr8 & CR8_RESERVED_BITS)
672                 return 1;
673         if (irqchip_in_kernel(vcpu->kvm))
674                 kvm_lapic_set_tpr(vcpu, cr8);
675         else
676                 vcpu->arch.cr8 = cr8;
677         return 0;
678 }
679
680 void kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
681 {
682         if (__kvm_set_cr8(vcpu, cr8))
683                 kvm_inject_gp(vcpu, 0);
684 }
685 EXPORT_SYMBOL_GPL(kvm_set_cr8);
686
687 unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu)
688 {
689         if (irqchip_in_kernel(vcpu->kvm))
690                 return kvm_lapic_get_cr8(vcpu);
691         else
692                 return vcpu->arch.cr8;
693 }
694 EXPORT_SYMBOL_GPL(kvm_get_cr8);
695
696 static int __kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val)
697 {
698         switch (dr) {
699         case 0 ... 3:
700                 vcpu->arch.db[dr] = val;
701                 if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP))
702                         vcpu->arch.eff_db[dr] = val;
703                 break;
704         case 4:
705                 if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
706                         return 1; /* #UD */
707                 /* fall through */
708         case 6:
709                 if (val & 0xffffffff00000000ULL)
710                         return -1; /* #GP */
711                 vcpu->arch.dr6 = (val & DR6_VOLATILE) | DR6_FIXED_1;
712                 break;
713         case 5:
714                 if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
715                         return 1; /* #UD */
716                 /* fall through */
717         default: /* 7 */
718                 if (val & 0xffffffff00000000ULL)
719                         return -1; /* #GP */
720                 vcpu->arch.dr7 = (val & DR7_VOLATILE) | DR7_FIXED_1;
721                 if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) {
722                         kvm_x86_ops->set_dr7(vcpu, vcpu->arch.dr7);
723                         vcpu->arch.switch_db_regs = (val & DR7_BP_EN_MASK);
724                 }
725                 break;
726         }
727
728         return 0;
729 }
730
731 int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val)
732 {
733         int res;
734
735         res = __kvm_set_dr(vcpu, dr, val);
736         if (res > 0)
737                 kvm_queue_exception(vcpu, UD_VECTOR);
738         else if (res < 0)
739                 kvm_inject_gp(vcpu, 0);
740
741         return res;
742 }
743 EXPORT_SYMBOL_GPL(kvm_set_dr);
744
745 static int _kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val)
746 {
747         switch (dr) {
748         case 0 ... 3:
749                 *val = vcpu->arch.db[dr];
750                 break;
751         case 4:
752                 if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
753                         return 1;
754                 /* fall through */
755         case 6:
756                 *val = vcpu->arch.dr6;
757                 break;
758         case 5:
759                 if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
760                         return 1;
761                 /* fall through */
762         default: /* 7 */
763                 *val = vcpu->arch.dr7;
764                 break;
765         }
766
767         return 0;
768 }
769
770 int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val)
771 {
772         if (_kvm_get_dr(vcpu, dr, val)) {
773                 kvm_queue_exception(vcpu, UD_VECTOR);
774                 return 1;
775         }
776         return 0;
777 }
778 EXPORT_SYMBOL_GPL(kvm_get_dr);
779
780 /*
781  * List of msr numbers which we expose to userspace through KVM_GET_MSRS
782  * and KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST.
783  *
784  * This list is modified at module load time to reflect the
785  * capabilities of the host cpu. This capabilities test skips MSRs that are
786  * kvm-specific. Those are put in the beginning of the list.
787  */
788
789 #define KVM_SAVE_MSRS_BEGIN     7
790 static u32 msrs_to_save[] = {
791         MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK,
792         MSR_KVM_SYSTEM_TIME_NEW, MSR_KVM_WALL_CLOCK_NEW,
793         HV_X64_MSR_GUEST_OS_ID, HV_X64_MSR_HYPERCALL,
794         HV_X64_MSR_APIC_ASSIST_PAGE,
795         MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
796         MSR_STAR,
797 #ifdef CONFIG_X86_64
798         MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR,
799 #endif
800         MSR_IA32_TSC, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA
801 };
802
803 static unsigned num_msrs_to_save;
804
805 static u32 emulated_msrs[] = {
806         MSR_IA32_MISC_ENABLE,
807         MSR_IA32_MCG_STATUS,
808         MSR_IA32_MCG_CTL,
809 };
810
811 static int set_efer(struct kvm_vcpu *vcpu, u64 efer)
812 {
813         u64 old_efer = vcpu->arch.efer;
814
815         if (efer & efer_reserved_bits)
816                 return 1;
817
818         if (is_paging(vcpu)
819             && (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME))
820                 return 1;
821
822         if (efer & EFER_FFXSR) {
823                 struct kvm_cpuid_entry2 *feat;
824
825                 feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
826                 if (!feat || !(feat->edx & bit(X86_FEATURE_FXSR_OPT)))
827                         return 1;
828         }
829
830         if (efer & EFER_SVME) {
831                 struct kvm_cpuid_entry2 *feat;
832
833                 feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
834                 if (!feat || !(feat->ecx & bit(X86_FEATURE_SVM)))
835                         return 1;
836         }
837
838         efer &= ~EFER_LMA;
839         efer |= vcpu->arch.efer & EFER_LMA;
840
841         kvm_x86_ops->set_efer(vcpu, efer);
842
843         vcpu->arch.mmu.base_role.nxe = (efer & EFER_NX) && !tdp_enabled;
844         kvm_mmu_reset_context(vcpu);
845
846         /* Update reserved bits */
847         if ((efer ^ old_efer) & EFER_NX)
848                 kvm_mmu_reset_context(vcpu);
849
850         return 0;
851 }
852
853 void kvm_enable_efer_bits(u64 mask)
854 {
855        efer_reserved_bits &= ~mask;
856 }
857 EXPORT_SYMBOL_GPL(kvm_enable_efer_bits);
858
859
860 /*
861  * Writes msr value into into the appropriate "register".
862  * Returns 0 on success, non-0 otherwise.
863  * Assumes vcpu_load() was already called.
864  */
865 int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
866 {
867         return kvm_x86_ops->set_msr(vcpu, msr_index, data);
868 }
869
870 /*
871  * Adapt set_msr() to msr_io()'s calling convention
872  */
873 static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
874 {
875         return kvm_set_msr(vcpu, index, *data);
876 }
877
878 static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock)
879 {
880         int version;
881         int r;
882         struct pvclock_wall_clock wc;
883         struct timespec boot;
884
885         if (!wall_clock)
886                 return;
887
888         r = kvm_read_guest(kvm, wall_clock, &version, sizeof(version));
889         if (r)
890                 return;
891
892         if (version & 1)
893                 ++version;  /* first time write, random junk */
894
895         ++version;
896
897         kvm_write_guest(kvm, wall_clock, &version, sizeof(version));
898
899         /*
900          * The guest calculates current wall clock time by adding
901          * system time (updated by kvm_write_guest_time below) to the
902          * wall clock specified here.  guest system time equals host
903          * system time for us, thus we must fill in host boot time here.
904          */
905         getboottime(&boot);
906
907         wc.sec = boot.tv_sec;
908         wc.nsec = boot.tv_nsec;
909         wc.version = version;
910
911         kvm_write_guest(kvm, wall_clock, &wc, sizeof(wc));
912
913         version++;
914         kvm_write_guest(kvm, wall_clock, &version, sizeof(version));
915 }
916
917 static uint32_t div_frac(uint32_t dividend, uint32_t divisor)
918 {
919         uint32_t quotient, remainder;
920
921         /* Don't try to replace with do_div(), this one calculates
922          * "(dividend << 32) / divisor" */
923         __asm__ ( "divl %4"
924                   : "=a" (quotient), "=d" (remainder)
925                   : "0" (0), "1" (dividend), "r" (divisor) );
926         return quotient;
927 }
928
929 static void kvm_set_time_scale(uint32_t tsc_khz, struct pvclock_vcpu_time_info *hv_clock)
930 {
931         uint64_t nsecs = 1000000000LL;
932         int32_t  shift = 0;
933         uint64_t tps64;
934         uint32_t tps32;
935
936         tps64 = tsc_khz * 1000LL;
937         while (tps64 > nsecs*2) {
938                 tps64 >>= 1;
939                 shift--;
940         }
941
942         tps32 = (uint32_t)tps64;
943         while (tps32 <= (uint32_t)nsecs) {
944                 tps32 <<= 1;
945                 shift++;
946         }
947
948         hv_clock->tsc_shift = shift;
949         hv_clock->tsc_to_system_mul = div_frac(nsecs, tps32);
950
951         pr_debug("%s: tsc_khz %u, tsc_shift %d, tsc_mul %u\n",
952                  __func__, tsc_khz, hv_clock->tsc_shift,
953                  hv_clock->tsc_to_system_mul);
954 }
955
956 static inline u64 get_kernel_ns(void)
957 {
958         struct timespec ts;
959
960         WARN_ON(preemptible());
961         ktime_get_ts(&ts);
962         monotonic_to_bootbased(&ts);
963         return timespec_to_ns(&ts);
964 }
965
966 static DEFINE_PER_CPU(unsigned long, cpu_tsc_khz);
967
968 static inline int kvm_tsc_changes_freq(void)
969 {
970         int cpu = get_cpu();
971         int ret = !boot_cpu_has(X86_FEATURE_CONSTANT_TSC) &&
972                   cpufreq_quick_get(cpu) != 0;
973         put_cpu();
974         return ret;
975 }
976
977 static inline u64 nsec_to_cycles(u64 nsec)
978 {
979         u64 ret;
980
981         WARN_ON(preemptible());
982         if (kvm_tsc_changes_freq())
983                 printk_once(KERN_WARNING
984                  "kvm: unreliable cycle conversion on adjustable rate TSC\n");
985         ret = nsec * __get_cpu_var(cpu_tsc_khz);
986         do_div(ret, USEC_PER_SEC);
987         return ret;
988 }
989
990 void kvm_write_tsc(struct kvm_vcpu *vcpu, u64 data)
991 {
992         struct kvm *kvm = vcpu->kvm;
993         u64 offset, ns, elapsed;
994         unsigned long flags;
995         s64 sdiff;
996
997         spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags);
998         offset = data - native_read_tsc();
999         ns = get_kernel_ns();
1000         elapsed = ns - kvm->arch.last_tsc_nsec;
1001         sdiff = data - kvm->arch.last_tsc_write;
1002         if (sdiff < 0)
1003                 sdiff = -sdiff;
1004
1005         /*
1006          * Special case: close write to TSC within 5 seconds of
1007          * another CPU is interpreted as an attempt to synchronize
1008          * The 5 seconds is to accomodate host load / swapping as
1009          * well as any reset of TSC during the boot process.
1010          *
1011          * In that case, for a reliable TSC, we can match TSC offsets,
1012          * or make a best guest using elapsed value.
1013          */
1014         if (sdiff < nsec_to_cycles(5ULL * NSEC_PER_SEC) &&
1015             elapsed < 5ULL * NSEC_PER_SEC) {
1016                 if (!check_tsc_unstable()) {
1017                         offset = kvm->arch.last_tsc_offset;
1018                         pr_debug("kvm: matched tsc offset for %llu\n", data);
1019                 } else {
1020                         u64 delta = nsec_to_cycles(elapsed);
1021                         offset += delta;
1022                         pr_debug("kvm: adjusted tsc offset by %llu\n", delta);
1023                 }
1024                 ns = kvm->arch.last_tsc_nsec;
1025         }
1026         kvm->arch.last_tsc_nsec = ns;
1027         kvm->arch.last_tsc_write = data;
1028         kvm->arch.last_tsc_offset = offset;
1029         kvm_x86_ops->write_tsc_offset(vcpu, offset);
1030         spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags);
1031
1032         /* Reset of TSC must disable overshoot protection below */
1033         vcpu->arch.hv_clock.tsc_timestamp = 0;
1034 }
1035 EXPORT_SYMBOL_GPL(kvm_write_tsc);
1036
1037 static int kvm_write_guest_time(struct kvm_vcpu *v)
1038 {
1039         unsigned long flags;
1040         struct kvm_vcpu_arch *vcpu = &v->arch;
1041         void *shared_kaddr;
1042         unsigned long this_tsc_khz;
1043         s64 kernel_ns, max_kernel_ns;
1044         u64 tsc_timestamp;
1045
1046         if ((!vcpu->time_page))
1047                 return 0;
1048
1049         /* Keep irq disabled to prevent changes to the clock */
1050         local_irq_save(flags);
1051         kvm_get_msr(v, MSR_IA32_TSC, &tsc_timestamp);
1052         kernel_ns = get_kernel_ns();
1053         this_tsc_khz = __get_cpu_var(cpu_tsc_khz);
1054         local_irq_restore(flags);
1055
1056         if (unlikely(this_tsc_khz == 0)) {
1057                 kvm_make_request(KVM_REQ_KVMCLOCK_UPDATE, v);
1058                 return 1;
1059         }
1060
1061         /*
1062          * Time as measured by the TSC may go backwards when resetting the base
1063          * tsc_timestamp.  The reason for this is that the TSC resolution is
1064          * higher than the resolution of the other clock scales.  Thus, many
1065          * possible measurments of the TSC correspond to one measurement of any
1066          * other clock, and so a spread of values is possible.  This is not a
1067          * problem for the computation of the nanosecond clock; with TSC rates
1068          * around 1GHZ, there can only be a few cycles which correspond to one
1069          * nanosecond value, and any path through this code will inevitably
1070          * take longer than that.  However, with the kernel_ns value itself,
1071          * the precision may be much lower, down to HZ granularity.  If the
1072          * first sampling of TSC against kernel_ns ends in the low part of the
1073          * range, and the second in the high end of the range, we can get:
1074          *
1075          * (TSC - offset_low) * S + kns_old > (TSC - offset_high) * S + kns_new
1076          *
1077          * As the sampling errors potentially range in the thousands of cycles,
1078          * it is possible such a time value has already been observed by the
1079          * guest.  To protect against this, we must compute the system time as
1080          * observed by the guest and ensure the new system time is greater.
1081          */
1082         max_kernel_ns = 0;
1083         if (vcpu->hv_clock.tsc_timestamp && vcpu->last_guest_tsc) {
1084                 max_kernel_ns = vcpu->last_guest_tsc -
1085                                 vcpu->hv_clock.tsc_timestamp;
1086                 max_kernel_ns = pvclock_scale_delta(max_kernel_ns,
1087                                     vcpu->hv_clock.tsc_to_system_mul,
1088                                     vcpu->hv_clock.tsc_shift);
1089                 max_kernel_ns += vcpu->last_kernel_ns;
1090         }
1091
1092         if (unlikely(vcpu->hw_tsc_khz != this_tsc_khz)) {
1093                 kvm_set_time_scale(this_tsc_khz, &vcpu->hv_clock);
1094                 vcpu->hw_tsc_khz = this_tsc_khz;
1095         }
1096
1097         if (max_kernel_ns > kernel_ns)
1098                 kernel_ns = max_kernel_ns;
1099
1100         /* With all the info we got, fill in the values */
1101         vcpu->hv_clock.tsc_timestamp = tsc_timestamp;
1102         vcpu->hv_clock.system_time = kernel_ns + v->kvm->arch.kvmclock_offset;
1103         vcpu->last_kernel_ns = kernel_ns;
1104         vcpu->hv_clock.flags = 0;
1105
1106         /*
1107          * The interface expects us to write an even number signaling that the
1108          * update is finished. Since the guest won't see the intermediate
1109          * state, we just increase by 2 at the end.
1110          */
1111         vcpu->hv_clock.version += 2;
1112
1113         shared_kaddr = kmap_atomic(vcpu->time_page, KM_USER0);
1114
1115         memcpy(shared_kaddr + vcpu->time_offset, &vcpu->hv_clock,
1116                sizeof(vcpu->hv_clock));
1117
1118         kunmap_atomic(shared_kaddr, KM_USER0);
1119
1120         mark_page_dirty(v->kvm, vcpu->time >> PAGE_SHIFT);
1121         return 0;
1122 }
1123
1124 static int kvm_request_guest_time_update(struct kvm_vcpu *v)
1125 {
1126         struct kvm_vcpu_arch *vcpu = &v->arch;
1127
1128         if (!vcpu->time_page)
1129                 return 0;
1130         kvm_make_request(KVM_REQ_KVMCLOCK_UPDATE, v);
1131         return 1;
1132 }
1133
1134 static bool msr_mtrr_valid(unsigned msr)
1135 {
1136         switch (msr) {
1137         case 0x200 ... 0x200 + 2 * KVM_NR_VAR_MTRR - 1:
1138         case MSR_MTRRfix64K_00000:
1139         case MSR_MTRRfix16K_80000:
1140         case MSR_MTRRfix16K_A0000:
1141         case MSR_MTRRfix4K_C0000:
1142         case MSR_MTRRfix4K_C8000:
1143         case MSR_MTRRfix4K_D0000:
1144         case MSR_MTRRfix4K_D8000:
1145         case MSR_MTRRfix4K_E0000:
1146         case MSR_MTRRfix4K_E8000:
1147         case MSR_MTRRfix4K_F0000:
1148         case MSR_MTRRfix4K_F8000:
1149         case MSR_MTRRdefType:
1150         case MSR_IA32_CR_PAT:
1151                 return true;
1152         case 0x2f8:
1153                 return true;
1154         }
1155         return false;
1156 }
1157
1158 static bool valid_pat_type(unsigned t)
1159 {
1160         return t < 8 && (1 << t) & 0xf3; /* 0, 1, 4, 5, 6, 7 */
1161 }
1162
1163 static bool valid_mtrr_type(unsigned t)
1164 {
1165         return t < 8 && (1 << t) & 0x73; /* 0, 1, 4, 5, 6 */
1166 }
1167
1168 static bool mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data)
1169 {
1170         int i;
1171
1172         if (!msr_mtrr_valid(msr))
1173                 return false;
1174
1175         if (msr == MSR_IA32_CR_PAT) {
1176                 for (i = 0; i < 8; i++)
1177                         if (!valid_pat_type((data >> (i * 8)) & 0xff))
1178                                 return false;
1179                 return true;
1180         } else if (msr == MSR_MTRRdefType) {
1181                 if (data & ~0xcff)
1182                         return false;
1183                 return valid_mtrr_type(data & 0xff);
1184         } else if (msr >= MSR_MTRRfix64K_00000 && msr <= MSR_MTRRfix4K_F8000) {
1185                 for (i = 0; i < 8 ; i++)
1186                         if (!valid_mtrr_type((data >> (i * 8)) & 0xff))
1187                                 return false;
1188                 return true;
1189         }
1190
1191         /* variable MTRRs */
1192         return valid_mtrr_type(data & 0xff);
1193 }
1194
1195 static int set_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
1196 {
1197         u64 *p = (u64 *)&vcpu->arch.mtrr_state.fixed_ranges;
1198
1199         if (!mtrr_valid(vcpu, msr, data))
1200                 return 1;
1201
1202         if (msr == MSR_MTRRdefType) {
1203                 vcpu->arch.mtrr_state.def_type = data;
1204                 vcpu->arch.mtrr_state.enabled = (data & 0xc00) >> 10;
1205         } else if (msr == MSR_MTRRfix64K_00000)
1206                 p[0] = data;
1207         else if (msr == MSR_MTRRfix16K_80000 || msr == MSR_MTRRfix16K_A0000)
1208                 p[1 + msr - MSR_MTRRfix16K_80000] = data;
1209         else if (msr >= MSR_MTRRfix4K_C0000 && msr <= MSR_MTRRfix4K_F8000)
1210                 p[3 + msr - MSR_MTRRfix4K_C0000] = data;
1211         else if (msr == MSR_IA32_CR_PAT)
1212                 vcpu->arch.pat = data;
1213         else {  /* Variable MTRRs */
1214                 int idx, is_mtrr_mask;
1215                 u64 *pt;
1216
1217                 idx = (msr - 0x200) / 2;
1218                 is_mtrr_mask = msr - 0x200 - 2 * idx;
1219                 if (!is_mtrr_mask)
1220                         pt =
1221                           (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].base_lo;
1222                 else
1223                         pt =
1224                           (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].mask_lo;
1225                 *pt = data;
1226         }
1227
1228         kvm_mmu_reset_context(vcpu);
1229         return 0;
1230 }
1231
1232 static int set_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 data)
1233 {
1234         u64 mcg_cap = vcpu->arch.mcg_cap;
1235         unsigned bank_num = mcg_cap & 0xff;
1236
1237         switch (msr) {
1238         case MSR_IA32_MCG_STATUS:
1239                 vcpu->arch.mcg_status = data;
1240                 break;
1241         case MSR_IA32_MCG_CTL:
1242                 if (!(mcg_cap & MCG_CTL_P))
1243                         return 1;
1244                 if (data != 0 && data != ~(u64)0)
1245                         return -1;
1246                 vcpu->arch.mcg_ctl = data;
1247                 break;
1248         default:
1249                 if (msr >= MSR_IA32_MC0_CTL &&
1250                     msr < MSR_IA32_MC0_CTL + 4 * bank_num) {
1251                         u32 offset = msr - MSR_IA32_MC0_CTL;
1252                         /* only 0 or all 1s can be written to IA32_MCi_CTL
1253                          * some Linux kernels though clear bit 10 in bank 4 to
1254                          * workaround a BIOS/GART TBL issue on AMD K8s, ignore
1255                          * this to avoid an uncatched #GP in the guest
1256                          */
1257                         if ((offset & 0x3) == 0 &&
1258                             data != 0 && (data | (1 << 10)) != ~(u64)0)
1259                                 return -1;
1260                         vcpu->arch.mce_banks[offset] = data;
1261                         break;
1262                 }
1263                 return 1;
1264         }
1265         return 0;
1266 }
1267
1268 static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
1269 {
1270         struct kvm *kvm = vcpu->kvm;
1271         int lm = is_long_mode(vcpu);
1272         u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
1273                 : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
1274         u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
1275                 : kvm->arch.xen_hvm_config.blob_size_32;
1276         u32 page_num = data & ~PAGE_MASK;
1277         u64 page_addr = data & PAGE_MASK;
1278         u8 *page;
1279         int r;
1280
1281         r = -E2BIG;
1282         if (page_num >= blob_size)
1283                 goto out;
1284         r = -ENOMEM;
1285         page = kzalloc(PAGE_SIZE, GFP_KERNEL);
1286         if (!page)
1287                 goto out;
1288         r = -EFAULT;
1289         if (copy_from_user(page, blob_addr + (page_num * PAGE_SIZE), PAGE_SIZE))
1290                 goto out_free;
1291         if (kvm_write_guest(kvm, page_addr, page, PAGE_SIZE))
1292                 goto out_free;
1293         r = 0;
1294 out_free:
1295         kfree(page);
1296 out:
1297         return r;
1298 }
1299
1300 static bool kvm_hv_hypercall_enabled(struct kvm *kvm)
1301 {
1302         return kvm->arch.hv_hypercall & HV_X64_MSR_HYPERCALL_ENABLE;
1303 }
1304
1305 static bool kvm_hv_msr_partition_wide(u32 msr)
1306 {
1307         bool r = false;
1308         switch (msr) {
1309         case HV_X64_MSR_GUEST_OS_ID:
1310         case HV_X64_MSR_HYPERCALL:
1311                 r = true;
1312                 break;
1313         }
1314
1315         return r;
1316 }
1317
1318 static int set_msr_hyperv_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data)
1319 {
1320         struct kvm *kvm = vcpu->kvm;
1321
1322         switch (msr) {
1323         case HV_X64_MSR_GUEST_OS_ID:
1324                 kvm->arch.hv_guest_os_id = data;
1325                 /* setting guest os id to zero disables hypercall page */
1326                 if (!kvm->arch.hv_guest_os_id)
1327                         kvm->arch.hv_hypercall &= ~HV_X64_MSR_HYPERCALL_ENABLE;
1328                 break;
1329         case HV_X64_MSR_HYPERCALL: {
1330                 u64 gfn;
1331                 unsigned long addr;
1332                 u8 instructions[4];
1333
1334                 /* if guest os id is not set hypercall should remain disabled */
1335                 if (!kvm->arch.hv_guest_os_id)
1336                         break;
1337                 if (!(data & HV_X64_MSR_HYPERCALL_ENABLE)) {
1338                         kvm->arch.hv_hypercall = data;
1339                         break;
1340                 }
1341                 gfn = data >> HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT;
1342                 addr = gfn_to_hva(kvm, gfn);
1343                 if (kvm_is_error_hva(addr))
1344                         return 1;
1345                 kvm_x86_ops->patch_hypercall(vcpu, instructions);
1346                 ((unsigned char *)instructions)[3] = 0xc3; /* ret */
1347                 if (copy_to_user((void __user *)addr, instructions, 4))
1348                         return 1;
1349                 kvm->arch.hv_hypercall = data;
1350                 break;
1351         }
1352         default:
1353                 pr_unimpl(vcpu, "HYPER-V unimplemented wrmsr: 0x%x "
1354                           "data 0x%llx\n", msr, data);
1355                 return 1;
1356         }
1357         return 0;
1358 }
1359
1360 static int set_msr_hyperv(struct kvm_vcpu *vcpu, u32 msr, u64 data)
1361 {
1362         switch (msr) {
1363         case HV_X64_MSR_APIC_ASSIST_PAGE: {
1364                 unsigned long addr;
1365
1366                 if (!(data & HV_X64_MSR_APIC_ASSIST_PAGE_ENABLE)) {
1367                         vcpu->arch.hv_vapic = data;
1368                         break;
1369                 }
1370                 addr = gfn_to_hva(vcpu->kvm, data >>
1371                                   HV_X64_MSR_APIC_ASSIST_PAGE_ADDRESS_SHIFT);
1372                 if (kvm_is_error_hva(addr))
1373                         return 1;
1374                 if (clear_user((void __user *)addr, PAGE_SIZE))
1375                         return 1;
1376                 vcpu->arch.hv_vapic = data;
1377                 break;
1378         }
1379         case HV_X64_MSR_EOI:
1380                 return kvm_hv_vapic_msr_write(vcpu, APIC_EOI, data);
1381         case HV_X64_MSR_ICR:
1382                 return kvm_hv_vapic_msr_write(vcpu, APIC_ICR, data);
1383         case HV_X64_MSR_TPR:
1384                 return kvm_hv_vapic_msr_write(vcpu, APIC_TASKPRI, data);
1385         default:
1386                 pr_unimpl(vcpu, "HYPER-V unimplemented wrmsr: 0x%x "
1387                           "data 0x%llx\n", msr, data);
1388                 return 1;
1389         }
1390
1391         return 0;
1392 }
1393
1394 int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
1395 {
1396         switch (msr) {
1397         case MSR_EFER:
1398                 return set_efer(vcpu, data);
1399         case MSR_K7_HWCR:
1400                 data &= ~(u64)0x40;     /* ignore flush filter disable */
1401                 data &= ~(u64)0x100;    /* ignore ignne emulation enable */
1402                 if (data != 0) {
1403                         pr_unimpl(vcpu, "unimplemented HWCR wrmsr: 0x%llx\n",
1404                                 data);
1405                         return 1;
1406                 }
1407                 break;
1408         case MSR_FAM10H_MMIO_CONF_BASE:
1409                 if (data != 0) {
1410                         pr_unimpl(vcpu, "unimplemented MMIO_CONF_BASE wrmsr: "
1411                                 "0x%llx\n", data);
1412                         return 1;
1413                 }
1414                 break;
1415         case MSR_AMD64_NB_CFG:
1416                 break;
1417         case MSR_IA32_DEBUGCTLMSR:
1418                 if (!data) {
1419                         /* We support the non-activated case already */
1420                         break;
1421                 } else if (data & ~(DEBUGCTLMSR_LBR | DEBUGCTLMSR_BTF)) {
1422                         /* Values other than LBR and BTF are vendor-specific,
1423                            thus reserved and should throw a #GP */
1424                         return 1;
1425                 }
1426                 pr_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTLMSR 0x%llx, nop\n",
1427                         __func__, data);
1428                 break;
1429         case MSR_IA32_UCODE_REV:
1430         case MSR_IA32_UCODE_WRITE:
1431         case MSR_VM_HSAVE_PA:
1432         case MSR_AMD64_PATCH_LOADER:
1433                 break;
1434         case 0x200 ... 0x2ff:
1435                 return set_msr_mtrr(vcpu, msr, data);
1436         case MSR_IA32_APICBASE:
1437                 kvm_set_apic_base(vcpu, data);
1438                 break;
1439         case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff:
1440                 return kvm_x2apic_msr_write(vcpu, msr, data);
1441         case MSR_IA32_MISC_ENABLE:
1442                 vcpu->arch.ia32_misc_enable_msr = data;
1443                 break;
1444         case MSR_KVM_WALL_CLOCK_NEW:
1445         case MSR_KVM_WALL_CLOCK:
1446                 vcpu->kvm->arch.wall_clock = data;
1447                 kvm_write_wall_clock(vcpu->kvm, data);
1448                 break;
1449         case MSR_KVM_SYSTEM_TIME_NEW:
1450         case MSR_KVM_SYSTEM_TIME: {
1451                 if (vcpu->arch.time_page) {
1452                         kvm_release_page_dirty(vcpu->arch.time_page);
1453                         vcpu->arch.time_page = NULL;
1454                 }
1455
1456                 vcpu->arch.time = data;
1457
1458                 /* we verify if the enable bit is set... */
1459                 if (!(data & 1))
1460                         break;
1461
1462                 /* ...but clean it before doing the actual write */
1463                 vcpu->arch.time_offset = data & ~(PAGE_MASK | 1);
1464
1465                 vcpu->arch.time_page =
1466                                 gfn_to_page(vcpu->kvm, data >> PAGE_SHIFT);
1467
1468                 if (is_error_page(vcpu->arch.time_page)) {
1469                         kvm_release_page_clean(vcpu->arch.time_page);
1470                         vcpu->arch.time_page = NULL;
1471                 }
1472
1473                 kvm_request_guest_time_update(vcpu);
1474                 break;
1475         }
1476         case MSR_IA32_MCG_CTL:
1477         case MSR_IA32_MCG_STATUS:
1478         case MSR_IA32_MC0_CTL ... MSR_IA32_MC0_CTL + 4 * KVM_MAX_MCE_BANKS - 1:
1479                 return set_msr_mce(vcpu, msr, data);
1480
1481         /* Performance counters are not protected by a CPUID bit,
1482          * so we should check all of them in the generic path for the sake of
1483          * cross vendor migration.
1484          * Writing a zero into the event select MSRs disables them,
1485          * which we perfectly emulate ;-). Any other value should be at least
1486          * reported, some guests depend on them.
1487          */
1488         case MSR_P6_EVNTSEL0:
1489         case MSR_P6_EVNTSEL1:
1490         case MSR_K7_EVNTSEL0:
1491         case MSR_K7_EVNTSEL1:
1492         case MSR_K7_EVNTSEL2:
1493         case MSR_K7_EVNTSEL3:
1494                 if (data != 0)
1495                         pr_unimpl(vcpu, "unimplemented perfctr wrmsr: "
1496                                 "0x%x data 0x%llx\n", msr, data);
1497                 break;
1498         /* at least RHEL 4 unconditionally writes to the perfctr registers,
1499          * so we ignore writes to make it happy.
1500          */
1501         case MSR_P6_PERFCTR0:
1502         case MSR_P6_PERFCTR1:
1503         case MSR_K7_PERFCTR0:
1504         case MSR_K7_PERFCTR1:
1505         case MSR_K7_PERFCTR2:
1506         case MSR_K7_PERFCTR3:
1507                 pr_unimpl(vcpu, "unimplemented perfctr wrmsr: "
1508                         "0x%x data 0x%llx\n", msr, data);
1509                 break;
1510         case MSR_K7_CLK_CTL:
1511                 /*
1512                  * Ignore all writes to this no longer documented MSR.
1513                  * Writes are only relevant for old K7 processors,
1514                  * all pre-dating SVM, but a recommended workaround from
1515                  * AMD for these chips. It is possible to speicify the
1516                  * affected processor models on the command line, hence
1517                  * the need to ignore the workaround.
1518                  */
1519                 break;
1520         case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15:
1521                 if (kvm_hv_msr_partition_wide(msr)) {
1522                         int r;
1523                         mutex_lock(&vcpu->kvm->lock);
1524                         r = set_msr_hyperv_pw(vcpu, msr, data);
1525                         mutex_unlock(&vcpu->kvm->lock);
1526                         return r;
1527                 } else
1528                         return set_msr_hyperv(vcpu, msr, data);
1529                 break;
1530         default:
1531                 if (msr && (msr == vcpu->kvm->arch.xen_hvm_config.msr))
1532                         return xen_hvm_config(vcpu, data);
1533                 if (!ignore_msrs) {
1534                         pr_unimpl(vcpu, "unhandled wrmsr: 0x%x data %llx\n",
1535                                 msr, data);
1536                         return 1;
1537                 } else {
1538                         pr_unimpl(vcpu, "ignored wrmsr: 0x%x data %llx\n",
1539                                 msr, data);
1540                         break;
1541                 }
1542         }
1543         return 0;
1544 }
1545 EXPORT_SYMBOL_GPL(kvm_set_msr_common);
1546
1547
1548 /*
1549  * Reads an msr value (of 'msr_index') into 'pdata'.
1550  * Returns 0 on success, non-0 otherwise.
1551  * Assumes vcpu_load() was already called.
1552  */
1553 int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
1554 {
1555         return kvm_x86_ops->get_msr(vcpu, msr_index, pdata);
1556 }
1557
1558 static int get_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
1559 {
1560         u64 *p = (u64 *)&vcpu->arch.mtrr_state.fixed_ranges;
1561
1562         if (!msr_mtrr_valid(msr))
1563                 return 1;
1564
1565         if (msr == MSR_MTRRdefType)
1566                 *pdata = vcpu->arch.mtrr_state.def_type +
1567                          (vcpu->arch.mtrr_state.enabled << 10);
1568         else if (msr == MSR_MTRRfix64K_00000)
1569                 *pdata = p[0];
1570         else if (msr == MSR_MTRRfix16K_80000 || msr == MSR_MTRRfix16K_A0000)
1571                 *pdata = p[1 + msr - MSR_MTRRfix16K_80000];
1572         else if (msr >= MSR_MTRRfix4K_C0000 && msr <= MSR_MTRRfix4K_F8000)
1573                 *pdata = p[3 + msr - MSR_MTRRfix4K_C0000];
1574         else if (msr == MSR_IA32_CR_PAT)
1575                 *pdata = vcpu->arch.pat;
1576         else {  /* Variable MTRRs */
1577                 int idx, is_mtrr_mask;
1578                 u64 *pt;
1579
1580                 idx = (msr - 0x200) / 2;
1581                 is_mtrr_mask = msr - 0x200 - 2 * idx;
1582                 if (!is_mtrr_mask)
1583                         pt =
1584                           (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].base_lo;
1585                 else
1586                         pt =
1587                           (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].mask_lo;
1588                 *pdata = *pt;
1589         }
1590
1591         return 0;
1592 }
1593
1594 static int get_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
1595 {
1596         u64 data;
1597         u64 mcg_cap = vcpu->arch.mcg_cap;
1598         unsigned bank_num = mcg_cap & 0xff;
1599
1600         switch (msr) {
1601         case MSR_IA32_P5_MC_ADDR:
1602         case MSR_IA32_P5_MC_TYPE:
1603                 data = 0;
1604                 break;
1605         case MSR_IA32_MCG_CAP:
1606                 data = vcpu->arch.mcg_cap;
1607                 break;
1608         case MSR_IA32_MCG_CTL:
1609                 if (!(mcg_cap & MCG_CTL_P))
1610                         return 1;
1611                 data = vcpu->arch.mcg_ctl;
1612                 break;
1613         case MSR_IA32_MCG_STATUS:
1614                 data = vcpu->arch.mcg_status;
1615                 break;
1616         default:
1617                 if (msr >= MSR_IA32_MC0_CTL &&
1618                     msr < MSR_IA32_MC0_CTL + 4 * bank_num) {
1619                         u32 offset = msr - MSR_IA32_MC0_CTL;
1620                         data = vcpu->arch.mce_banks[offset];
1621                         break;
1622                 }
1623                 return 1;
1624         }
1625         *pdata = data;
1626         return 0;
1627 }
1628
1629 static int get_msr_hyperv_pw(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
1630 {
1631         u64 data = 0;
1632         struct kvm *kvm = vcpu->kvm;
1633
1634         switch (msr) {
1635         case HV_X64_MSR_GUEST_OS_ID:
1636                 data = kvm->arch.hv_guest_os_id;
1637                 break;
1638         case HV_X64_MSR_HYPERCALL:
1639                 data = kvm->arch.hv_hypercall;
1640                 break;
1641         default:
1642                 pr_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
1643                 return 1;
1644         }
1645
1646         *pdata = data;
1647         return 0;
1648 }
1649
1650 static int get_msr_hyperv(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
1651 {
1652         u64 data = 0;
1653
1654         switch (msr) {
1655         case HV_X64_MSR_VP_INDEX: {
1656                 int r;
1657                 struct kvm_vcpu *v;
1658                 kvm_for_each_vcpu(r, v, vcpu->kvm)
1659                         if (v == vcpu)
1660                                 data = r;
1661                 break;
1662         }
1663         case HV_X64_MSR_EOI:
1664                 return kvm_hv_vapic_msr_read(vcpu, APIC_EOI, pdata);
1665         case HV_X64_MSR_ICR:
1666                 return kvm_hv_vapic_msr_read(vcpu, APIC_ICR, pdata);
1667         case HV_X64_MSR_TPR:
1668                 return kvm_hv_vapic_msr_read(vcpu, APIC_TASKPRI, pdata);
1669         default:
1670                 pr_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
1671                 return 1;
1672         }
1673         *pdata = data;
1674         return 0;
1675 }
1676
1677 int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
1678 {
1679         u64 data;
1680
1681         switch (msr) {
1682         case MSR_IA32_PLATFORM_ID:
1683         case MSR_IA32_UCODE_REV:
1684         case MSR_IA32_EBL_CR_POWERON:
1685         case MSR_IA32_DEBUGCTLMSR:
1686         case MSR_IA32_LASTBRANCHFROMIP:
1687         case MSR_IA32_LASTBRANCHTOIP:
1688         case MSR_IA32_LASTINTFROMIP:
1689         case MSR_IA32_LASTINTTOIP:
1690         case MSR_K8_SYSCFG:
1691         case MSR_K7_HWCR:
1692         case MSR_VM_HSAVE_PA:
1693         case MSR_P6_PERFCTR0:
1694         case MSR_P6_PERFCTR1:
1695         case MSR_P6_EVNTSEL0:
1696         case MSR_P6_EVNTSEL1:
1697         case MSR_K7_EVNTSEL0:
1698         case MSR_K7_PERFCTR0:
1699         case MSR_K8_INT_PENDING_MSG:
1700         case MSR_AMD64_NB_CFG:
1701         case MSR_FAM10H_MMIO_CONF_BASE:
1702                 data = 0;
1703                 break;
1704         case MSR_MTRRcap:
1705                 data = 0x500 | KVM_NR_VAR_MTRR;
1706                 break;
1707         case 0x200 ... 0x2ff:
1708                 return get_msr_mtrr(vcpu, msr, pdata);
1709         case 0xcd: /* fsb frequency */
1710                 data = 3;
1711                 break;
1712                 /*
1713                  * MSR_EBC_FREQUENCY_ID
1714                  * Conservative value valid for even the basic CPU models.
1715                  * Models 0,1: 000 in bits 23:21 indicating a bus speed of
1716                  * 100MHz, model 2 000 in bits 18:16 indicating 100MHz,
1717                  * and 266MHz for model 3, or 4. Set Core Clock
1718                  * Frequency to System Bus Frequency Ratio to 1 (bits
1719                  * 31:24) even though these are only valid for CPU
1720                  * models > 2, however guests may end up dividing or
1721                  * multiplying by zero otherwise.
1722                  */
1723         case MSR_EBC_FREQUENCY_ID:
1724                 data = 1 << 24;
1725                 break;
1726         case MSR_IA32_APICBASE:
1727                 data = kvm_get_apic_base(vcpu);
1728                 break;
1729         case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff:
1730                 return kvm_x2apic_msr_read(vcpu, msr, pdata);
1731                 break;
1732         case MSR_IA32_MISC_ENABLE:
1733                 data = vcpu->arch.ia32_misc_enable_msr;
1734                 break;
1735         case MSR_IA32_PERF_STATUS:
1736                 /* TSC increment by tick */
1737                 data = 1000ULL;
1738                 /* CPU multiplier */
1739                 data |= (((uint64_t)4ULL) << 40);
1740                 break;
1741         case MSR_EFER:
1742                 data = vcpu->arch.efer;
1743                 break;
1744         case MSR_KVM_WALL_CLOCK:
1745         case MSR_KVM_WALL_CLOCK_NEW:
1746                 data = vcpu->kvm->arch.wall_clock;
1747                 break;
1748         case MSR_KVM_SYSTEM_TIME:
1749         case MSR_KVM_SYSTEM_TIME_NEW:
1750                 data = vcpu->arch.time;
1751                 break;
1752         case MSR_IA32_P5_MC_ADDR:
1753         case MSR_IA32_P5_MC_TYPE:
1754         case MSR_IA32_MCG_CAP:
1755         case MSR_IA32_MCG_CTL:
1756         case MSR_IA32_MCG_STATUS:
1757         case MSR_IA32_MC0_CTL ... MSR_IA32_MC0_CTL + 4 * KVM_MAX_MCE_BANKS - 1:
1758                 return get_msr_mce(vcpu, msr, pdata);
1759         case MSR_K7_CLK_CTL:
1760                 /*
1761                  * Provide expected ramp-up count for K7. All other
1762                  * are set to zero, indicating minimum divisors for
1763                  * every field.
1764                  *
1765                  * This prevents guest kernels on AMD host with CPU
1766                  * type 6, model 8 and higher from exploding due to
1767                  * the rdmsr failing.
1768                  */
1769                 data = 0x20000000;
1770                 break;
1771         case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15:
1772                 if (kvm_hv_msr_partition_wide(msr)) {
1773                         int r;
1774                         mutex_lock(&vcpu->kvm->lock);
1775                         r = get_msr_hyperv_pw(vcpu, msr, pdata);
1776                         mutex_unlock(&vcpu->kvm->lock);
1777                         return r;
1778                 } else
1779                         return get_msr_hyperv(vcpu, msr, pdata);
1780                 break;
1781         default:
1782                 if (!ignore_msrs) {
1783                         pr_unimpl(vcpu, "unhandled rdmsr: 0x%x\n", msr);
1784                         return 1;
1785                 } else {
1786                         pr_unimpl(vcpu, "ignored rdmsr: 0x%x\n", msr);
1787                         data = 0;
1788                 }
1789                 break;
1790         }
1791         *pdata = data;
1792         return 0;
1793 }
1794 EXPORT_SYMBOL_GPL(kvm_get_msr_common);
1795
1796 /*
1797  * Read or write a bunch of msrs. All parameters are kernel addresses.
1798  *
1799  * @return number of msrs set successfully.
1800  */
1801 static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs,
1802                     struct kvm_msr_entry *entries,
1803                     int (*do_msr)(struct kvm_vcpu *vcpu,
1804                                   unsigned index, u64 *data))
1805 {
1806         int i, idx;
1807
1808         idx = srcu_read_lock(&vcpu->kvm->srcu);
1809         for (i = 0; i < msrs->nmsrs; ++i)
1810                 if (do_msr(vcpu, entries[i].index, &entries[i].data))
1811                         break;
1812         srcu_read_unlock(&vcpu->kvm->srcu, idx);
1813
1814         return i;
1815 }
1816
1817 /*
1818  * Read or write a bunch of msrs. Parameters are user addresses.
1819  *
1820  * @return number of msrs set successfully.
1821  */
1822 static int msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs __user *user_msrs,
1823                   int (*do_msr)(struct kvm_vcpu *vcpu,
1824                                 unsigned index, u64 *data),
1825                   int writeback)
1826 {
1827         struct kvm_msrs msrs;
1828         struct kvm_msr_entry *entries;
1829         int r, n;
1830         unsigned size;
1831
1832         r = -EFAULT;
1833         if (copy_from_user(&msrs, user_msrs, sizeof msrs))
1834                 goto out;
1835
1836         r = -E2BIG;
1837         if (msrs.nmsrs >= MAX_IO_MSRS)
1838                 goto out;
1839
1840         r = -ENOMEM;
1841         size = sizeof(struct kvm_msr_entry) * msrs.nmsrs;
1842         entries = kmalloc(size, GFP_KERNEL);
1843         if (!entries)
1844                 goto out;
1845
1846         r = -EFAULT;
1847         if (copy_from_user(entries, user_msrs->entries, size))
1848                 goto out_free;
1849
1850         r = n = __msr_io(vcpu, &msrs, entries, do_msr);
1851         if (r < 0)
1852                 goto out_free;
1853
1854         r = -EFAULT;
1855         if (writeback && copy_to_user(user_msrs->entries, entries, size))
1856                 goto out_free;
1857
1858         r = n;
1859
1860 out_free:
1861         kfree(entries);
1862 out:
1863         return r;
1864 }
1865
1866 int kvm_dev_ioctl_check_extension(long ext)
1867 {
1868         int r;
1869
1870         switch (ext) {
1871         case KVM_CAP_IRQCHIP:
1872         case KVM_CAP_HLT:
1873         case KVM_CAP_MMU_SHADOW_CACHE_CONTROL:
1874         case KVM_CAP_SET_TSS_ADDR:
1875         case KVM_CAP_EXT_CPUID:
1876         case KVM_CAP_CLOCKSOURCE:
1877         case KVM_CAP_PIT:
1878         case KVM_CAP_NOP_IO_DELAY:
1879         case KVM_CAP_MP_STATE:
1880         case KVM_CAP_SYNC_MMU:
1881         case KVM_CAP_REINJECT_CONTROL:
1882         case KVM_CAP_IRQ_INJECT_STATUS:
1883         case KVM_CAP_ASSIGN_DEV_IRQ:
1884         case KVM_CAP_IRQFD:
1885         case KVM_CAP_IOEVENTFD:
1886         case KVM_CAP_PIT2:
1887         case KVM_CAP_PIT_STATE2:
1888         case KVM_CAP_SET_IDENTITY_MAP_ADDR:
1889         case KVM_CAP_XEN_HVM:
1890         case KVM_CAP_ADJUST_CLOCK:
1891         case KVM_CAP_VCPU_EVENTS:
1892         case KVM_CAP_HYPERV:
1893         case KVM_CAP_HYPERV_VAPIC:
1894         case KVM_CAP_HYPERV_SPIN:
1895         case KVM_CAP_PCI_SEGMENT:
1896         case KVM_CAP_DEBUGREGS:
1897         case KVM_CAP_X86_ROBUST_SINGLESTEP:
1898         case KVM_CAP_XSAVE:
1899                 r = 1;
1900                 break;
1901         case KVM_CAP_COALESCED_MMIO:
1902                 r = KVM_COALESCED_MMIO_PAGE_OFFSET;
1903                 break;
1904         case KVM_CAP_VAPIC:
1905                 r = !kvm_x86_ops->cpu_has_accelerated_tpr();
1906                 break;
1907         case KVM_CAP_NR_VCPUS:
1908                 r = KVM_MAX_VCPUS;
1909                 break;
1910         case KVM_CAP_NR_MEMSLOTS:
1911                 r = KVM_MEMORY_SLOTS;
1912                 break;
1913         case KVM_CAP_PV_MMU:    /* obsolete */
1914                 r = 0;
1915                 break;
1916         case KVM_CAP_IOMMU:
1917                 r = iommu_found();
1918                 break;
1919         case KVM_CAP_MCE:
1920                 r = KVM_MAX_MCE_BANKS;
1921                 break;
1922         case KVM_CAP_XCRS:
1923                 r = cpu_has_xsave;
1924                 break;
1925         default:
1926                 r = 0;
1927                 break;
1928         }
1929         return r;
1930
1931 }
1932
1933 long kvm_arch_dev_ioctl(struct file *filp,
1934                         unsigned int ioctl, unsigned long arg)
1935 {
1936         void __user *argp = (void __user *)arg;
1937         long r;
1938
1939         switch (ioctl) {
1940         case KVM_GET_MSR_INDEX_LIST: {
1941                 struct kvm_msr_list __user *user_msr_list = argp;
1942                 struct kvm_msr_list msr_list;
1943                 unsigned n;
1944
1945                 r = -EFAULT;
1946                 if (copy_from_user(&msr_list, user_msr_list, sizeof msr_list))
1947                         goto out;
1948                 n = msr_list.nmsrs;
1949                 msr_list.nmsrs = num_msrs_to_save + ARRAY_SIZE(emulated_msrs);
1950                 if (copy_to_user(user_msr_list, &msr_list, sizeof msr_list))
1951                         goto out;
1952                 r = -E2BIG;
1953                 if (n < msr_list.nmsrs)
1954                         goto out;
1955                 r = -EFAULT;
1956                 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
1957                                  num_msrs_to_save * sizeof(u32)))
1958                         goto out;
1959                 if (copy_to_user(user_msr_list->indices + num_msrs_to_save,
1960                                  &emulated_msrs,
1961                                  ARRAY_SIZE(emulated_msrs) * sizeof(u32)))
1962                         goto out;
1963                 r = 0;
1964                 break;
1965         }
1966         case KVM_GET_SUPPORTED_CPUID: {
1967                 struct kvm_cpuid2 __user *cpuid_arg = argp;
1968                 struct kvm_cpuid2 cpuid;
1969
1970                 r = -EFAULT;
1971                 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
1972                         goto out;
1973                 r = kvm_dev_ioctl_get_supported_cpuid(&cpuid,
1974                                                       cpuid_arg->entries);
1975                 if (r)
1976                         goto out;
1977
1978                 r = -EFAULT;
1979                 if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid))
1980                         goto out;
1981                 r = 0;
1982                 break;
1983         }
1984         case KVM_X86_GET_MCE_CAP_SUPPORTED: {
1985                 u64 mce_cap;
1986
1987                 mce_cap = KVM_MCE_CAP_SUPPORTED;
1988                 r = -EFAULT;
1989                 if (copy_to_user(argp, &mce_cap, sizeof mce_cap))
1990                         goto out;
1991                 r = 0;
1992                 break;
1993         }
1994         default:
1995                 r = -EINVAL;
1996         }
1997 out:
1998         return r;
1999 }
2000
2001 static void wbinvd_ipi(void *garbage)
2002 {
2003         wbinvd();
2004 }
2005
2006 static bool need_emulate_wbinvd(struct kvm_vcpu *vcpu)
2007 {
2008         return vcpu->kvm->arch.iommu_domain &&
2009                 !(vcpu->kvm->arch.iommu_flags & KVM_IOMMU_CACHE_COHERENCY);
2010 }
2011
2012 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
2013 {
2014         /* Address WBINVD may be executed by guest */
2015         if (need_emulate_wbinvd(vcpu)) {
2016                 if (kvm_x86_ops->has_wbinvd_exit())
2017                         cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask);
2018                 else if (vcpu->cpu != -1 && vcpu->cpu != cpu)
2019                         smp_call_function_single(vcpu->cpu,
2020                                         wbinvd_ipi, NULL, 1);
2021         }
2022
2023         kvm_x86_ops->vcpu_load(vcpu, cpu);
2024         if (unlikely(vcpu->cpu != cpu) || check_tsc_unstable()) {
2025                 /* Make sure TSC doesn't go backwards */
2026                 s64 tsc_delta = !vcpu->arch.last_host_tsc ? 0 :
2027                                 native_read_tsc() - vcpu->arch.last_host_tsc;
2028                 if (tsc_delta < 0)
2029                         mark_tsc_unstable("KVM discovered backwards TSC");
2030                 if (check_tsc_unstable())
2031                         kvm_x86_ops->adjust_tsc_offset(vcpu, -tsc_delta);
2032                 kvm_migrate_timers(vcpu);
2033                 vcpu->cpu = cpu;
2034         }
2035 }
2036
2037 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
2038 {
2039         kvm_x86_ops->vcpu_put(vcpu);
2040         kvm_put_guest_fpu(vcpu);
2041         vcpu->arch.last_host_tsc = native_read_tsc();
2042 }
2043
2044 static int is_efer_nx(void)
2045 {
2046         unsigned long long efer = 0;
2047
2048         rdmsrl_safe(MSR_EFER, &efer);
2049         return efer & EFER_NX;
2050 }
2051
2052 static void cpuid_fix_nx_cap(struct kvm_vcpu *vcpu)
2053 {
2054         int i;
2055         struct kvm_cpuid_entry2 *e, *entry;
2056
2057         entry = NULL;
2058         for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
2059                 e = &vcpu->arch.cpuid_entries[i];
2060                 if (e->function == 0x80000001) {
2061                         entry = e;
2062                         break;
2063                 }
2064         }
2065         if (entry && (entry->edx & (1 << 20)) && !is_efer_nx()) {
2066                 entry->edx &= ~(1 << 20);
2067                 printk(KERN_INFO "kvm: guest NX capability removed\n");
2068         }
2069 }
2070
2071 /* when an old userspace process fills a new kernel module */
2072 static int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
2073                                     struct kvm_cpuid *cpuid,
2074                                     struct kvm_cpuid_entry __user *entries)
2075 {
2076         int r, i;
2077         struct kvm_cpuid_entry *cpuid_entries;
2078
2079         r = -E2BIG;
2080         if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
2081                 goto out;
2082         r = -ENOMEM;
2083         cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry) * cpuid->nent);
2084         if (!cpuid_entries)
2085                 goto out;
2086         r = -EFAULT;
2087         if (copy_from_user(cpuid_entries, entries,
2088                            cpuid->nent * sizeof(struct kvm_cpuid_entry)))
2089                 goto out_free;
2090         for (i = 0; i < cpuid->nent; i++) {
2091                 vcpu->arch.cpuid_entries[i].function = cpuid_entries[i].function;
2092                 vcpu->arch.cpuid_entries[i].eax = cpuid_entries[i].eax;
2093                 vcpu->arch.cpuid_entries[i].ebx = cpuid_entries[i].ebx;
2094                 vcpu->arch.cpuid_entries[i].ecx = cpuid_entries[i].ecx;
2095                 vcpu->arch.cpuid_entries[i].edx = cpuid_entries[i].edx;
2096                 vcpu->arch.cpuid_entries[i].index = 0;
2097                 vcpu->arch.cpuid_entries[i].flags = 0;
2098                 vcpu->arch.cpuid_entries[i].padding[0] = 0;
2099                 vcpu->arch.cpuid_entries[i].padding[1] = 0;
2100                 vcpu->arch.cpuid_entries[i].padding[2] = 0;
2101         }
2102         vcpu->arch.cpuid_nent = cpuid->nent;
2103         cpuid_fix_nx_cap(vcpu);
2104         r = 0;
2105         kvm_apic_set_version(vcpu);
2106         kvm_x86_ops->cpuid_update(vcpu);
2107         update_cpuid(vcpu);
2108
2109 out_free:
2110         vfree(cpuid_entries);
2111 out:
2112         return r;
2113 }
2114
2115 static int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
2116                                      struct kvm_cpuid2 *cpuid,
2117                                      struct kvm_cpuid_entry2 __user *entries)
2118 {
2119         int r;
2120
2121         r = -E2BIG;
2122         if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
2123                 goto out;
2124         r = -EFAULT;
2125         if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
2126                            cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
2127                 goto out;
2128         vcpu->arch.cpuid_nent = cpuid->nent;
2129         kvm_apic_set_version(vcpu);
2130         kvm_x86_ops->cpuid_update(vcpu);
2131         update_cpuid(vcpu);
2132         return 0;
2133
2134 out:
2135         return r;
2136 }
2137
2138 static int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
2139                                      struct kvm_cpuid2 *cpuid,
2140                                      struct kvm_cpuid_entry2 __user *entries)
2141 {
2142         int r;
2143
2144         r = -E2BIG;
2145         if (cpuid->nent < vcpu->arch.cpuid_nent)
2146                 goto out;
2147         r = -EFAULT;
2148         if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
2149                          vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
2150                 goto out;
2151         return 0;
2152
2153 out:
2154         cpuid->nent = vcpu->arch.cpuid_nent;
2155         return r;
2156 }
2157
2158 static void do_cpuid_1_ent(struct kvm_cpuid_entry2 *entry, u32 function,
2159                            u32 index)
2160 {
2161         entry->function = function;
2162         entry->index = index;
2163         cpuid_count(entry->function, entry->index,
2164                     &entry->eax, &entry->ebx, &entry->ecx, &entry->edx);
2165         entry->flags = 0;
2166 }
2167
2168 #define F(x) bit(X86_FEATURE_##x)
2169
2170 static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
2171                          u32 index, int *nent, int maxnent)
2172 {
2173         unsigned f_nx = is_efer_nx() ? F(NX) : 0;
2174 #ifdef CONFIG_X86_64
2175         unsigned f_gbpages = (kvm_x86_ops->get_lpage_level() == PT_PDPE_LEVEL)
2176                                 ? F(GBPAGES) : 0;
2177         unsigned f_lm = F(LM);
2178 #else
2179         unsigned f_gbpages = 0;
2180         unsigned f_lm = 0;
2181 #endif
2182         unsigned f_rdtscp = kvm_x86_ops->rdtscp_supported() ? F(RDTSCP) : 0;
2183
2184         /* cpuid 1.edx */
2185         const u32 kvm_supported_word0_x86_features =
2186                 F(FPU) | F(VME) | F(DE) | F(PSE) |
2187                 F(TSC) | F(MSR) | F(PAE) | F(MCE) |
2188                 F(CX8) | F(APIC) | 0 /* Reserved */ | F(SEP) |
2189                 F(MTRR) | F(PGE) | F(MCA) | F(CMOV) |
2190                 F(PAT) | F(PSE36) | 0 /* PSN */ | F(CLFLSH) |
2191                 0 /* Reserved, DS, ACPI */ | F(MMX) |
2192                 F(FXSR) | F(XMM) | F(XMM2) | F(SELFSNOOP) |
2193                 0 /* HTT, TM, Reserved, PBE */;
2194         /* cpuid 0x80000001.edx */
2195         const u32 kvm_supported_word1_x86_features =
2196                 F(FPU) | F(VME) | F(DE) | F(PSE) |
2197                 F(TSC) | F(MSR) | F(PAE) | F(MCE) |
2198                 F(CX8) | F(APIC) | 0 /* Reserved */ | F(SYSCALL) |
2199                 F(MTRR) | F(PGE) | F(MCA) | F(CMOV) |
2200                 F(PAT) | F(PSE36) | 0 /* Reserved */ |
2201                 f_nx | 0 /* Reserved */ | F(MMXEXT) | F(MMX) |
2202                 F(FXSR) | F(FXSR_OPT) | f_gbpages | f_rdtscp |
2203                 0 /* Reserved */ | f_lm | F(3DNOWEXT) | F(3DNOW);
2204         /* cpuid 1.ecx */
2205         const u32 kvm_supported_word4_x86_features =
2206                 F(XMM3) | F(PCLMULQDQ) | 0 /* DTES64, MONITOR */ |
2207                 0 /* DS-CPL, VMX, SMX, EST */ |
2208                 0 /* TM2 */ | F(SSSE3) | 0 /* CNXT-ID */ | 0 /* Reserved */ |
2209                 0 /* Reserved */ | F(CX16) | 0 /* xTPR Update, PDCM */ |
2210                 0 /* Reserved, DCA */ | F(XMM4_1) |
2211                 F(XMM4_2) | F(X2APIC) | F(MOVBE) | F(POPCNT) |
2212                 0 /* Reserved, AES */ | F(XSAVE) | 0 /* OSXSAVE */ | F(AVX);
2213         /* cpuid 0x80000001.ecx */
2214         const u32 kvm_supported_word6_x86_features =
2215                 F(LAHF_LM) | F(CMP_LEGACY) | 0 /*SVM*/ | 0 /* ExtApicSpace */ |
2216                 F(CR8_LEGACY) | F(ABM) | F(SSE4A) | F(MISALIGNSSE) |
2217                 F(3DNOWPREFETCH) | 0 /* OSVW */ | 0 /* IBS */ | F(SSE5) |
2218                 0 /* SKINIT */ | 0 /* WDT */;
2219
2220         /* all calls to cpuid_count() should be made on the same cpu */
2221         get_cpu();
2222         do_cpuid_1_ent(entry, function, index);
2223         ++*nent;
2224
2225         switch (function) {
2226         case 0:
2227                 entry->eax = min(entry->eax, (u32)0xd);
2228                 break;
2229         case 1:
2230                 entry->edx &= kvm_supported_word0_x86_features;
2231                 entry->ecx &= kvm_supported_word4_x86_features;
2232                 /* we support x2apic emulation even if host does not support
2233                  * it since we emulate x2apic in software */
2234                 entry->ecx |= F(X2APIC);
2235                 break;
2236         /* function 2 entries are STATEFUL. That is, repeated cpuid commands
2237          * may return different values. This forces us to get_cpu() before
2238          * issuing the first command, and also to emulate this annoying behavior
2239          * in kvm_emulate_cpuid() using KVM_CPUID_FLAG_STATE_READ_NEXT */
2240         case 2: {
2241                 int t, times = entry->eax & 0xff;
2242
2243                 entry->flags |= KVM_CPUID_FLAG_STATEFUL_FUNC;
2244                 entry->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
2245                 for (t = 1; t < times && *nent < maxnent; ++t) {
2246                         do_cpuid_1_ent(&entry[t], function, 0);
2247                         entry[t].flags |= KVM_CPUID_FLAG_STATEFUL_FUNC;
2248                         ++*nent;
2249                 }
2250                 break;
2251         }
2252         /* function 4 and 0xb have additional index. */
2253         case 4: {
2254                 int i, cache_type;
2255
2256                 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
2257                 /* read more entries until cache_type is zero */
2258                 for (i = 1; *nent < maxnent; ++i) {
2259                         cache_type = entry[i - 1].eax & 0x1f;
2260                         if (!cache_type)
2261                                 break;
2262                         do_cpuid_1_ent(&entry[i], function, i);
2263                         entry[i].flags |=
2264                                KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
2265                         ++*nent;
2266                 }
2267                 break;
2268         }
2269         case 0xb: {
2270                 int i, level_type;
2271
2272                 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
2273                 /* read more entries until level_type is zero */
2274                 for (i = 1; *nent < maxnent; ++i) {
2275                         level_type = entry[i - 1].ecx & 0xff00;
2276                         if (!level_type)
2277                                 break;
2278                         do_cpuid_1_ent(&entry[i], function, i);
2279                         entry[i].flags |=
2280                                KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
2281                         ++*nent;
2282                 }
2283                 break;
2284         }
2285         case 0xd: {
2286                 int i;
2287
2288                 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
2289                 for (i = 1; *nent < maxnent; ++i) {
2290                         if (entry[i - 1].eax == 0 && i != 2)
2291                                 break;
2292                         do_cpuid_1_ent(&entry[i], function, i);
2293                         entry[i].flags |=
2294                                KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
2295                         ++*nent;
2296                 }
2297                 break;
2298         }
2299         case KVM_CPUID_SIGNATURE: {
2300                 char signature[12] = "KVMKVMKVM\0\0";
2301                 u32 *sigptr = (u32 *)signature;
2302                 entry->eax = 0;
2303                 entry->ebx = sigptr[0];
2304                 entry->ecx = sigptr[1];
2305                 entry->edx = sigptr[2];
2306                 break;
2307         }
2308         case KVM_CPUID_FEATURES:
2309                 entry->eax = (1 << KVM_FEATURE_CLOCKSOURCE) |
2310                              (1 << KVM_FEATURE_NOP_IO_DELAY) |
2311                              (1 << KVM_FEATURE_CLOCKSOURCE2) |
2312                              (1 << KVM_FEATURE_CLOCKSOURCE_STABLE_BIT);
2313                 entry->ebx = 0;
2314                 entry->ecx = 0;
2315                 entry->edx = 0;
2316                 break;
2317         case 0x80000000:
2318                 entry->eax = min(entry->eax, 0x8000001a);
2319                 break;
2320         case 0x80000001:
2321                 entry->edx &= kvm_supported_word1_x86_features;
2322                 entry->ecx &= kvm_supported_word6_x86_features;
2323                 break;
2324         }
2325
2326         kvm_x86_ops->set_supported_cpuid(function, entry);
2327
2328         put_cpu();
2329 }
2330
2331 #undef F
2332
2333 static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
2334                                      struct kvm_cpuid_entry2 __user *entries)
2335 {
2336         struct kvm_cpuid_entry2 *cpuid_entries;
2337         int limit, nent = 0, r = -E2BIG;
2338         u32 func;
2339
2340         if (cpuid->nent < 1)
2341                 goto out;
2342         if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
2343                 cpuid->nent = KVM_MAX_CPUID_ENTRIES;
2344         r = -ENOMEM;
2345         cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry2) * cpuid->nent);
2346         if (!cpuid_entries)
2347                 goto out;
2348
2349         do_cpuid_ent(&cpuid_entries[0], 0, 0, &nent, cpuid->nent);
2350         limit = cpuid_entries[0].eax;
2351         for (func = 1; func <= limit && nent < cpuid->nent; ++func)
2352                 do_cpuid_ent(&cpuid_entries[nent], func, 0,
2353                              &nent, cpuid->nent);
2354         r = -E2BIG;
2355         if (nent >= cpuid->nent)
2356                 goto out_free;
2357
2358         do_cpuid_ent(&cpuid_entries[nent], 0x80000000, 0, &nent, cpuid->nent);
2359         limit = cpuid_entries[nent - 1].eax;
2360         for (func = 0x80000001; func <= limit && nent < cpuid->nent; ++func)
2361                 do_cpuid_ent(&cpuid_entries[nent], func, 0,
2362                              &nent, cpuid->nent);
2363
2364
2365
2366         r = -E2BIG;
2367         if (nent >= cpuid->nent)
2368                 goto out_free;
2369
2370         do_cpuid_ent(&cpuid_entries[nent], KVM_CPUID_SIGNATURE, 0, &nent,
2371                      cpuid->nent);
2372
2373         r = -E2BIG;
2374         if (nent >= cpuid->nent)
2375                 goto out_free;
2376
2377         do_cpuid_ent(&cpuid_entries[nent], KVM_CPUID_FEATURES, 0, &nent,
2378                      cpuid->nent);
2379
2380         r = -E2BIG;
2381         if (nent >= cpuid->nent)
2382                 goto out_free;
2383
2384         r = -EFAULT;
2385         if (copy_to_user(entries, cpuid_entries,
2386                          nent * sizeof(struct kvm_cpuid_entry2)))
2387                 goto out_free;
2388         cpuid->nent = nent;
2389         r = 0;
2390
2391 out_free:
2392         vfree(cpuid_entries);
2393 out:
2394         return r;
2395 }
2396
2397 static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu,
2398                                     struct kvm_lapic_state *s)
2399 {
2400         memcpy(s->regs, vcpu->arch.apic->regs, sizeof *s);
2401
2402         return 0;
2403 }
2404
2405 static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
2406                                     struct kvm_lapic_state *s)
2407 {
2408         memcpy(vcpu->arch.apic->regs, s->regs, sizeof *s);
2409         kvm_apic_post_state_restore(vcpu);
2410         update_cr8_intercept(vcpu);
2411
2412         return 0;
2413 }
2414
2415 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
2416                                     struct kvm_interrupt *irq)
2417 {
2418         if (irq->irq < 0 || irq->irq >= 256)
2419                 return -EINVAL;
2420         if (irqchip_in_kernel(vcpu->kvm))
2421                 return -ENXIO;
2422
2423         kvm_queue_interrupt(vcpu, irq->irq, false);
2424         kvm_make_request(KVM_REQ_EVENT, vcpu);
2425
2426         return 0;
2427 }
2428
2429 static int kvm_vcpu_ioctl_nmi(struct kvm_vcpu *vcpu)
2430 {
2431         kvm_inject_nmi(vcpu);
2432
2433         return 0;
2434 }
2435
2436 static int vcpu_ioctl_tpr_access_reporting(struct kvm_vcpu *vcpu,
2437                                            struct kvm_tpr_access_ctl *tac)
2438 {
2439         if (tac->flags)
2440                 return -EINVAL;
2441         vcpu->arch.tpr_access_reporting = !!tac->enabled;
2442         return 0;
2443 }
2444
2445 static int kvm_vcpu_ioctl_x86_setup_mce(struct kvm_vcpu *vcpu,
2446                                         u64 mcg_cap)
2447 {
2448         int r;
2449         unsigned bank_num = mcg_cap & 0xff, bank;
2450
2451         r = -EINVAL;
2452         if (!bank_num || bank_num >= KVM_MAX_MCE_BANKS)
2453                 goto out;
2454         if (mcg_cap & ~(KVM_MCE_CAP_SUPPORTED | 0xff | 0xff0000))
2455                 goto out;
2456         r = 0;
2457         vcpu->arch.mcg_cap = mcg_cap;
2458         /* Init IA32_MCG_CTL to all 1s */
2459         if (mcg_cap & MCG_CTL_P)
2460                 vcpu->arch.mcg_ctl = ~(u64)0;
2461         /* Init IA32_MCi_CTL to all 1s */
2462         for (bank = 0; bank < bank_num; bank++)
2463                 vcpu->arch.mce_banks[bank*4] = ~(u64)0;
2464 out:
2465         return r;
2466 }
2467
2468 static int kvm_vcpu_ioctl_x86_set_mce(struct kvm_vcpu *vcpu,
2469                                       struct kvm_x86_mce *mce)
2470 {
2471         u64 mcg_cap = vcpu->arch.mcg_cap;
2472         unsigned bank_num = mcg_cap & 0xff;
2473         u64 *banks = vcpu->arch.mce_banks;
2474
2475         if (mce->bank >= bank_num || !(mce->status & MCI_STATUS_VAL))
2476                 return -EINVAL;
2477         /*
2478          * if IA32_MCG_CTL is not all 1s, the uncorrected error
2479          * reporting is disabled
2480          */
2481         if ((mce->status & MCI_STATUS_UC) && (mcg_cap & MCG_CTL_P) &&
2482             vcpu->arch.mcg_ctl != ~(u64)0)
2483                 return 0;
2484         banks += 4 * mce->bank;
2485         /*
2486          * if IA32_MCi_CTL is not all 1s, the uncorrected error
2487          * reporting is disabled for the bank
2488          */
2489         if ((mce->status & MCI_STATUS_UC) && banks[0] != ~(u64)0)
2490                 return 0;
2491         if (mce->status & MCI_STATUS_UC) {
2492                 if ((vcpu->arch.mcg_status & MCG_STATUS_MCIP) ||
2493                     !kvm_read_cr4_bits(vcpu, X86_CR4_MCE)) {
2494                         printk(KERN_DEBUG "kvm: set_mce: "
2495                                "injects mce exception while "
2496                                "previous one is in progress!\n");
2497                         kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
2498                         return 0;
2499                 }
2500                 if (banks[1] & MCI_STATUS_VAL)
2501                         mce->status |= MCI_STATUS_OVER;
2502                 banks[2] = mce->addr;
2503                 banks[3] = mce->misc;
2504                 vcpu->arch.mcg_status = mce->mcg_status;
2505                 banks[1] = mce->status;
2506                 kvm_queue_exception(vcpu, MC_VECTOR);
2507         } else if (!(banks[1] & MCI_STATUS_VAL)
2508                    || !(banks[1] & MCI_STATUS_UC)) {
2509                 if (banks[1] & MCI_STATUS_VAL)
2510                         mce->status |= MCI_STATUS_OVER;
2511                 banks[2] = mce->addr;
2512                 banks[3] = mce->misc;
2513                 banks[1] = mce->status;
2514         } else
2515                 banks[1] |= MCI_STATUS_OVER;
2516         return 0;
2517 }
2518
2519 static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
2520                                                struct kvm_vcpu_events *events)
2521 {
2522         events->exception.injected =
2523                 vcpu->arch.exception.pending &&
2524                 !kvm_exception_is_soft(vcpu->arch.exception.nr);
2525         events->exception.nr = vcpu->arch.exception.nr;
2526         events->exception.has_error_code = vcpu->arch.exception.has_error_code;
2527         events->exception.error_code = vcpu->arch.exception.error_code;
2528
2529         events->interrupt.injected =
2530                 vcpu->arch.interrupt.pending && !vcpu->arch.interrupt.soft;
2531         events->interrupt.nr = vcpu->arch.interrupt.nr;
2532         events->interrupt.soft = 0;
2533         events->interrupt.shadow =
2534                 kvm_x86_ops->get_interrupt_shadow(vcpu,
2535                         KVM_X86_SHADOW_INT_MOV_SS | KVM_X86_SHADOW_INT_STI);
2536
2537         events->nmi.injected = vcpu->arch.nmi_injected;
2538         events->nmi.pending = vcpu->arch.nmi_pending;
2539         events->nmi.masked = kvm_x86_ops->get_nmi_mask(vcpu);
2540
2541         events->sipi_vector = vcpu->arch.sipi_vector;
2542
2543         events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING
2544                          | KVM_VCPUEVENT_VALID_SIPI_VECTOR
2545                          | KVM_VCPUEVENT_VALID_SHADOW);
2546 }
2547
2548 static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
2549                                               struct kvm_vcpu_events *events)
2550 {
2551         if (events->flags & ~(KVM_VCPUEVENT_VALID_NMI_PENDING
2552                               | KVM_VCPUEVENT_VALID_SIPI_VECTOR
2553                               | KVM_VCPUEVENT_VALID_SHADOW))
2554                 return -EINVAL;
2555
2556         vcpu->arch.exception.pending = events->exception.injected;
2557         vcpu->arch.exception.nr = events->exception.nr;
2558         vcpu->arch.exception.has_error_code = events->exception.has_error_code;
2559         vcpu->arch.exception.error_code = events->exception.error_code;
2560
2561         vcpu->arch.interrupt.pending = events->interrupt.injected;
2562         vcpu->arch.interrupt.nr = events->interrupt.nr;
2563         vcpu->arch.interrupt.soft = events->interrupt.soft;
2564         if (vcpu->arch.interrupt.pending && irqchip_in_kernel(vcpu->kvm))
2565                 kvm_pic_clear_isr_ack(vcpu->kvm);
2566         if (events->flags & KVM_VCPUEVENT_VALID_SHADOW)
2567                 kvm_x86_ops->set_interrupt_shadow(vcpu,
2568                                                   events->interrupt.shadow);
2569
2570         vcpu->arch.nmi_injected = events->nmi.injected;
2571         if (events->flags & KVM_VCPUEVENT_VALID_NMI_PENDING)
2572                 vcpu->arch.nmi_pending = events->nmi.pending;
2573         kvm_x86_ops->set_nmi_mask(vcpu, events->nmi.masked);
2574
2575         if (events->flags & KVM_VCPUEVENT_VALID_SIPI_VECTOR)
2576                 vcpu->arch.sipi_vector = events->sipi_vector;
2577
2578         kvm_make_request(KVM_REQ_EVENT, vcpu);
2579
2580         return 0;
2581 }
2582
2583 static void kvm_vcpu_ioctl_x86_get_debugregs(struct kvm_vcpu *vcpu,
2584                                              struct kvm_debugregs *dbgregs)
2585 {
2586         memcpy(dbgregs->db, vcpu->arch.db, sizeof(vcpu->arch.db));
2587         dbgregs->dr6 = vcpu->arch.dr6;
2588         dbgregs->dr7 = vcpu->arch.dr7;
2589         dbgregs->flags = 0;
2590 }
2591
2592 static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu,
2593                                             struct kvm_debugregs *dbgregs)
2594 {
2595         if (dbgregs->flags)
2596                 return -EINVAL;
2597
2598         memcpy(vcpu->arch.db, dbgregs->db, sizeof(vcpu->arch.db));
2599         vcpu->arch.dr6 = dbgregs->dr6;
2600         vcpu->arch.dr7 = dbgregs->dr7;
2601
2602         return 0;
2603 }
2604
2605 static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu,
2606                                          struct kvm_xsave *guest_xsave)
2607 {
2608         if (cpu_has_xsave)
2609                 memcpy(guest_xsave->region,
2610                         &vcpu->arch.guest_fpu.state->xsave,
2611                         xstate_size);
2612         else {
2613                 memcpy(guest_xsave->region,
2614                         &vcpu->arch.guest_fpu.state->fxsave,
2615                         sizeof(struct i387_fxsave_struct));
2616                 *(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)] =
2617                         XSTATE_FPSSE;
2618         }
2619 }
2620
2621 static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu,
2622                                         struct kvm_xsave *guest_xsave)
2623 {
2624         u64 xstate_bv =
2625                 *(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)];
2626
2627         if (cpu_has_xsave)
2628                 memcpy(&vcpu->arch.guest_fpu.state->xsave,
2629                         guest_xsave->region, xstate_size);
2630         else {
2631                 if (xstate_bv & ~XSTATE_FPSSE)
2632                         return -EINVAL;
2633                 memcpy(&vcpu->arch.guest_fpu.state->fxsave,
2634                         guest_xsave->region, sizeof(struct i387_fxsave_struct));
2635         }
2636         return 0;
2637 }
2638
2639 static void kvm_vcpu_ioctl_x86_get_xcrs(struct kvm_vcpu *vcpu,
2640                                         struct kvm_xcrs *guest_xcrs)
2641 {
2642         if (!cpu_has_xsave) {
2643                 guest_xcrs->nr_xcrs = 0;
2644                 return;
2645         }
2646
2647         guest_xcrs->nr_xcrs = 1;
2648         guest_xcrs->flags = 0;
2649         guest_xcrs->xcrs[0].xcr = XCR_XFEATURE_ENABLED_MASK;
2650         guest_xcrs->xcrs[0].value = vcpu->arch.xcr0;
2651 }
2652
2653 static int kvm_vcpu_ioctl_x86_set_xcrs(struct kvm_vcpu *vcpu,
2654                                        struct kvm_xcrs *guest_xcrs)
2655 {
2656         int i, r = 0;
2657
2658         if (!cpu_has_xsave)
2659                 return -EINVAL;
2660
2661         if (guest_xcrs->nr_xcrs > KVM_MAX_XCRS || guest_xcrs->flags)
2662                 return -EINVAL;
2663
2664         for (i = 0; i < guest_xcrs->nr_xcrs; i++)
2665                 /* Only support XCR0 currently */
2666                 if (guest_xcrs->xcrs[0].xcr == XCR_XFEATURE_ENABLED_MASK) {
2667                         r = __kvm_set_xcr(vcpu, XCR_XFEATURE_ENABLED_MASK,
2668                                 guest_xcrs->xcrs[0].value);
2669                         break;
2670                 }
2671         if (r)
2672                 r = -EINVAL;
2673         return r;
2674 }
2675
2676 long kvm_arch_vcpu_ioctl(struct file *filp,
2677                          unsigned int ioctl, unsigned long arg)
2678 {
2679         struct kvm_vcpu *vcpu = filp->private_data;
2680         void __user *argp = (void __user *)arg;
2681         int r;
2682         union {
2683                 struct kvm_lapic_state *lapic;
2684                 struct kvm_xsave *xsave;
2685                 struct kvm_xcrs *xcrs;
2686                 void *buffer;
2687         } u;
2688
2689         u.buffer = NULL;
2690         switch (ioctl) {
2691         case KVM_GET_LAPIC: {
2692                 r = -EINVAL;
2693                 if (!vcpu->arch.apic)
2694                         goto out;
2695                 u.lapic = kzalloc(sizeof(struct kvm_lapic_state), GFP_KERNEL);
2696
2697                 r = -ENOMEM;
2698                 if (!u.lapic)
2699                         goto out;
2700                 r = kvm_vcpu_ioctl_get_lapic(vcpu, u.lapic);
2701                 if (r)
2702                         goto out;
2703                 r = -EFAULT;
2704                 if (copy_to_user(argp, u.lapic, sizeof(struct kvm_lapic_state)))
2705                         goto out;
2706                 r = 0;
2707                 break;
2708         }
2709         case KVM_SET_LAPIC: {
2710                 r = -EINVAL;
2711                 if (!vcpu->arch.apic)
2712                         goto out;
2713                 u.lapic = kmalloc(sizeof(struct kvm_lapic_state), GFP_KERNEL);
2714                 r = -ENOMEM;
2715                 if (!u.lapic)
2716                         goto out;
2717                 r = -EFAULT;
2718                 if (copy_from_user(u.lapic, argp, sizeof(struct kvm_lapic_state)))
2719                         goto out;
2720                 r = kvm_vcpu_ioctl_set_lapic(vcpu, u.lapic);
2721                 if (r)
2722                         goto out;
2723                 r = 0;
2724                 break;
2725         }
2726         case KVM_INTERRUPT: {
2727                 struct kvm_interrupt irq;
2728
2729                 r = -EFAULT;
2730                 if (copy_from_user(&irq, argp, sizeof irq))
2731                         goto out;
2732                 r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
2733                 if (r)
2734                         goto out;
2735                 r = 0;
2736                 break;
2737         }
2738         case KVM_NMI: {
2739                 r = kvm_vcpu_ioctl_nmi(vcpu);
2740                 if (r)
2741                         goto out;
2742                 r = 0;
2743                 break;
2744         }
2745         case KVM_SET_CPUID: {
2746                 struct kvm_cpuid __user *cpuid_arg = argp;
2747                 struct kvm_cpuid cpuid;
2748
2749                 r = -EFAULT;
2750                 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
2751                         goto out;
2752                 r = kvm_vcpu_ioctl_set_cpuid(vcpu, &cpuid, cpuid_arg->entries);
2753                 if (r)
2754                         goto out;
2755                 break;
2756         }
2757         case KVM_SET_CPUID2: {
2758                 struct kvm_cpuid2 __user *cpuid_arg = argp;
2759                 struct kvm_cpuid2 cpuid;
2760
2761                 r = -EFAULT;
2762                 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
2763                         goto out;
2764                 r = kvm_vcpu_ioctl_set_cpuid2(vcpu, &cpuid,
2765                                               cpuid_arg->entries);
2766                 if (r)
2767                         goto out;
2768                 break;
2769         }
2770         case KVM_GET_CPUID2: {
2771                 struct kvm_cpuid2 __user *cpuid_arg = argp;
2772                 struct kvm_cpuid2 cpuid;
2773
2774                 r = -EFAULT;
2775                 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
2776                         goto out;
2777                 r = kvm_vcpu_ioctl_get_cpuid2(vcpu, &cpuid,
2778                                               cpuid_arg->entries);
2779                 if (r)
2780                         goto out;
2781                 r = -EFAULT;
2782                 if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid))
2783                         goto out;
2784                 r = 0;
2785                 break;
2786         }
2787         case KVM_GET_MSRS:
2788                 r = msr_io(vcpu, argp, kvm_get_msr, 1);
2789                 break;
2790         case KVM_SET_MSRS:
2791                 r = msr_io(vcpu, argp, do_set_msr, 0);
2792                 break;
2793         case KVM_TPR_ACCESS_REPORTING: {
2794                 struct kvm_tpr_access_ctl tac;
2795
2796                 r = -EFAULT;
2797                 if (copy_from_user(&tac, argp, sizeof tac))
2798                         goto out;
2799                 r = vcpu_ioctl_tpr_access_reporting(vcpu, &tac);
2800                 if (r)
2801                         goto out;
2802                 r = -EFAULT;
2803                 if (copy_to_user(argp, &tac, sizeof tac))
2804                         goto out;
2805                 r = 0;
2806                 break;
2807         };
2808         case KVM_SET_VAPIC_ADDR: {
2809                 struct kvm_vapic_addr va;
2810
2811                 r = -EINVAL;
2812                 if (!irqchip_in_kernel(vcpu->kvm))
2813                         goto out;
2814                 r = -EFAULT;
2815                 if (copy_from_user(&va, argp, sizeof va))
2816                         goto out;
2817                 r = 0;
2818                 kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr);
2819                 break;
2820         }
2821         case KVM_X86_SETUP_MCE: {
2822                 u64 mcg_cap;
2823
2824                 r = -EFAULT;
2825                 if (copy_from_user(&mcg_cap, argp, sizeof mcg_cap))
2826                         goto out;
2827                 r = kvm_vcpu_ioctl_x86_setup_mce(vcpu, mcg_cap);
2828                 break;
2829         }
2830         case KVM_X86_SET_MCE: {
2831                 struct kvm_x86_mce mce;
2832
2833                 r = -EFAULT;
2834                 if (copy_from_user(&mce, argp, sizeof mce))
2835                         goto out;
2836                 r = kvm_vcpu_ioctl_x86_set_mce(vcpu, &mce);
2837                 break;
2838         }
2839         case KVM_GET_VCPU_EVENTS: {
2840                 struct kvm_vcpu_events events;
2841
2842                 kvm_vcpu_ioctl_x86_get_vcpu_events(vcpu, &events);
2843
2844                 r = -EFAULT;
2845                 if (copy_to_user(argp, &events, sizeof(struct kvm_vcpu_events)))
2846                         break;
2847                 r = 0;
2848                 break;
2849         }
2850         case KVM_SET_VCPU_EVENTS: {
2851                 struct kvm_vcpu_events events;
2852
2853                 r = -EFAULT;
2854                 if (copy_from_user(&events, argp, sizeof(struct kvm_vcpu_events)))
2855                         break;
2856
2857                 r = kvm_vcpu_ioctl_x86_set_vcpu_events(vcpu, &events);
2858                 break;
2859         }
2860         case KVM_GET_DEBUGREGS: {
2861                 struct kvm_debugregs dbgregs;
2862
2863                 kvm_vcpu_ioctl_x86_get_debugregs(vcpu, &dbgregs);
2864
2865                 r = -EFAULT;
2866                 if (copy_to_user(argp, &dbgregs,
2867                                  sizeof(struct kvm_debugregs)))
2868                         break;
2869                 r = 0;
2870                 break;
2871         }
2872         case KVM_SET_DEBUGREGS: {
2873                 struct kvm_debugregs dbgregs;
2874
2875                 r = -EFAULT;
2876                 if (copy_from_user(&dbgregs, argp,
2877                                    sizeof(struct kvm_debugregs)))
2878                         break;
2879
2880                 r = kvm_vcpu_ioctl_x86_set_debugregs(vcpu, &dbgregs);
2881                 break;
2882         }
2883         case KVM_GET_XSAVE: {
2884                 u.xsave = kzalloc(sizeof(struct kvm_xsave), GFP_KERNEL);
2885                 r = -ENOMEM;
2886                 if (!u.xsave)
2887                         break;
2888
2889                 kvm_vcpu_ioctl_x86_get_xsave(vcpu, u.xsave);
2890
2891                 r = -EFAULT;
2892                 if (copy_to_user(argp, u.xsave, sizeof(struct kvm_xsave)))
2893                         break;
2894                 r = 0;
2895                 break;
2896         }
2897         case KVM_SET_XSAVE: {
2898                 u.xsave = kzalloc(sizeof(struct kvm_xsave), GFP_KERNEL);
2899                 r = -ENOMEM;
2900                 if (!u.xsave)
2901                         break;
2902
2903                 r = -EFAULT;
2904                 if (copy_from_user(u.xsave, argp, sizeof(struct kvm_xsave)))
2905                         break;
2906
2907                 r = kvm_vcpu_ioctl_x86_set_xsave(vcpu, u.xsave);
2908                 break;
2909         }
2910         case KVM_GET_XCRS: {
2911                 u.xcrs = kzalloc(sizeof(struct kvm_xcrs), GFP_KERNEL);
2912                 r = -ENOMEM;
2913                 if (!u.xcrs)
2914                         break;
2915
2916                 kvm_vcpu_ioctl_x86_get_xcrs(vcpu, u.xcrs);
2917
2918                 r = -EFAULT;
2919                 if (copy_to_user(argp, u.xcrs,
2920                                  sizeof(struct kvm_xcrs)))
2921                         break;
2922                 r = 0;
2923                 break;
2924         }
2925         case KVM_SET_XCRS: {
2926                 u.xcrs = kzalloc(sizeof(struct kvm_xcrs), GFP_KERNEL);
2927                 r = -ENOMEM;
2928                 if (!u.xcrs)
2929                         break;
2930
2931                 r = -EFAULT;
2932                 if (copy_from_user(u.xcrs, argp,
2933                                    sizeof(struct kvm_xcrs)))
2934                         break;
2935
2936                 r = kvm_vcpu_ioctl_x86_set_xcrs(vcpu, u.xcrs);
2937                 break;
2938         }
2939         default:
2940                 r = -EINVAL;
2941         }
2942 out:
2943         kfree(u.buffer);
2944         return r;
2945 }
2946
2947 static int kvm_vm_ioctl_set_tss_addr(struct kvm *kvm, unsigned long addr)
2948 {
2949         int ret;
2950
2951         if (addr > (unsigned int)(-3 * PAGE_SIZE))
2952                 return -1;
2953         ret = kvm_x86_ops->set_tss_addr(kvm, addr);
2954         return ret;
2955 }
2956
2957 static int kvm_vm_ioctl_set_identity_map_addr(struct kvm *kvm,
2958                                               u64 ident_addr)
2959 {
2960         kvm->arch.ept_identity_map_addr = ident_addr;
2961         return 0;
2962 }
2963
2964 static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm,
2965                                           u32 kvm_nr_mmu_pages)
2966 {
2967         if (kvm_nr_mmu_pages < KVM_MIN_ALLOC_MMU_PAGES)
2968                 return -EINVAL;
2969
2970         mutex_lock(&kvm->slots_lock);
2971         spin_lock(&kvm->mmu_lock);
2972
2973         kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages);
2974         kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages;
2975
2976         spin_unlock(&kvm->mmu_lock);
2977         mutex_unlock(&kvm->slots_lock);
2978         return 0;
2979 }
2980
2981 static int kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm)
2982 {
2983         return kvm->arch.n_max_mmu_pages;
2984 }
2985
2986 static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
2987 {
2988         int r;
2989
2990         r = 0;
2991         switch (chip->chip_id) {
2992         case KVM_IRQCHIP_PIC_MASTER:
2993                 memcpy(&chip->chip.pic,
2994                         &pic_irqchip(kvm)->pics[0],
2995                         sizeof(struct kvm_pic_state));
2996                 break;
2997         case KVM_IRQCHIP_PIC_SLAVE:
2998                 memcpy(&chip->chip.pic,
2999                         &pic_irqchip(kvm)->pics[1],
3000                         sizeof(struct kvm_pic_state));
3001                 break;
3002         case KVM_IRQCHIP_IOAPIC:
3003                 r = kvm_get_ioapic(kvm, &chip->chip.ioapic);
3004                 break;
3005         default:
3006                 r = -EINVAL;
3007                 break;
3008         }
3009         return r;
3010 }
3011
3012 static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
3013 {
3014         int r;
3015
3016         r = 0;
3017         switch (chip->chip_id) {
3018         case KVM_IRQCHIP_PIC_MASTER:
3019                 raw_spin_lock(&pic_irqchip(kvm)->lock);
3020                 memcpy(&pic_irqchip(kvm)->pics[0],
3021                         &chip->chip.pic,
3022                         sizeof(struct kvm_pic_state));
3023                 raw_spin_unlock(&pic_irqchip(kvm)->lock);
3024                 break;
3025         case KVM_IRQCHIP_PIC_SLAVE:
3026                 raw_spin_lock(&pic_irqchip(kvm)->lock);
3027                 memcpy(&pic_irqchip(kvm)->pics[1],
3028                         &chip->chip.pic,
3029                         sizeof(struct kvm_pic_state));
3030                 raw_spin_unlock(&pic_irqchip(kvm)->lock);
3031                 break;
3032         case KVM_IRQCHIP_IOAPIC:
3033                 r = kvm_set_ioapic(kvm, &chip->chip.ioapic);
3034                 break;
3035         default:
3036                 r = -EINVAL;
3037                 break;
3038         }
3039         kvm_pic_update_irq(pic_irqchip(kvm));
3040         return r;
3041 }
3042
3043 static int kvm_vm_ioctl_get_pit(struct kvm *kvm, struct kvm_pit_state *ps)
3044 {
3045         int r = 0;
3046
3047         mutex_lock(&kvm->arch.vpit->pit_state.lock);
3048         memcpy(ps, &kvm->arch.vpit->pit_state, sizeof(struct kvm_pit_state));
3049         mutex_unlock(&kvm->arch.vpit->pit_state.lock);
3050         return r;
3051 }
3052
3053 static int kvm_vm_ioctl_set_pit(struct kvm *kvm, struct kvm_pit_state *ps)
3054 {
3055         int r = 0;
3056
3057         mutex_lock(&kvm->arch.vpit->pit_state.lock);
3058         memcpy(&kvm->arch.vpit->pit_state, ps, sizeof(struct kvm_pit_state));
3059         kvm_pit_load_count(kvm, 0, ps->channels[0].count, 0);
3060         mutex_unlock(&kvm->arch.vpit->pit_state.lock);
3061         return r;
3062 }
3063
3064 static int kvm_vm_ioctl_get_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
3065 {
3066         int r = 0;
3067
3068         mutex_lock(&kvm->arch.vpit->pit_state.lock);
3069         memcpy(ps->channels, &kvm->arch.vpit->pit_state.channels,
3070                 sizeof(ps->channels));
3071         ps->flags = kvm->arch.vpit->pit_state.flags;
3072         mutex_unlock(&kvm->arch.vpit->pit_state.lock);
3073         return r;
3074 }
3075
3076 static int kvm_vm_ioctl_set_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
3077 {
3078         int r = 0, start = 0;
3079         u32 prev_legacy, cur_legacy;
3080         mutex_lock(&kvm->arch.vpit->pit_state.lock);
3081         prev_legacy = kvm->arch.vpit->pit_state.flags & KVM_PIT_FLAGS_HPET_LEGACY;
3082         cur_legacy = ps->flags & KVM_PIT_FLAGS_HPET_LEGACY;
3083         if (!prev_legacy && cur_legacy)
3084                 start = 1;
3085         memcpy(&kvm->arch.vpit->pit_state.channels, &ps->channels,
3086                sizeof(kvm->arch.vpit->pit_state.channels));
3087         kvm->arch.vpit->pit_state.flags = ps->flags;
3088         kvm_pit_load_count(kvm, 0, kvm->arch.vpit->pit_state.channels[0].count, start);
3089         mutex_unlock(&kvm->arch.vpit->pit_state.lock);
3090         return r;
3091 }
3092
3093 static int kvm_vm_ioctl_reinject(struct kvm *kvm,
3094                                  struct kvm_reinject_control *control)
3095 {
3096         if (!kvm->arch.vpit)
3097                 return -ENXIO;
3098         mutex_lock(&kvm->arch.vpit->pit_state.lock);
3099         kvm->arch.vpit->pit_state.pit_timer.reinject = control->pit_reinject;
3100         mutex_unlock(&kvm->arch.vpit->pit_state.lock);
3101         return 0;
3102 }
3103
3104 /*
3105  * Get (and clear) the dirty memory log for a memory slot.
3106  */
3107 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
3108                                       struct kvm_dirty_log *log)
3109 {
3110         int r, i;
3111         struct kvm_memory_slot *memslot;
3112         unsigned long n;
3113         unsigned long is_dirty = 0;
3114
3115         mutex_lock(&kvm->slots_lock);
3116
3117         r = -EINVAL;
3118         if (log->slot >= KVM_MEMORY_SLOTS)
3119                 goto out;
3120
3121         memslot = &kvm->memslots->memslots[log->slot];
3122         r = -ENOENT;
3123         if (!memslot->dirty_bitmap)
3124                 goto out;
3125
3126         n = kvm_dirty_bitmap_bytes(memslot);
3127
3128         for (i = 0; !is_dirty && i < n/sizeof(long); i++)
3129                 is_dirty = memslot->dirty_bitmap[i];
3130
3131         /* If nothing is dirty, don't bother messing with page tables. */
3132         if (is_dirty) {
3133                 struct kvm_memslots *slots, *old_slots;
3134                 unsigned long *dirty_bitmap;
3135
3136                 spin_lock(&kvm->mmu_lock);
3137                 kvm_mmu_slot_remove_write_access(kvm, log->slot);
3138                 spin_unlock(&kvm->mmu_lock);
3139
3140                 r = -ENOMEM;
3141                 dirty_bitmap = vmalloc(n);
3142                 if (!dirty_bitmap)
3143                         goto out;
3144                 memset(dirty_bitmap, 0, n);
3145
3146                 r = -ENOMEM;
3147                 slots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
3148                 if (!slots) {
3149                         vfree(dirty_bitmap);
3150                         goto out;
3151                 }
3152                 memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots));
3153                 slots->memslots[log->slot].dirty_bitmap = dirty_bitmap;
3154
3155                 old_slots = kvm->memslots;
3156                 rcu_assign_pointer(kvm->memslots, slots);
3157                 synchronize_srcu_expedited(&kvm->srcu);
3158                 dirty_bitmap = old_slots->memslots[log->slot].dirty_bitmap;
3159                 kfree(old_slots);
3160
3161                 r = -EFAULT;
3162                 if (copy_to_user(log->dirty_bitmap, dirty_bitmap, n)) {
3163                         vfree(dirty_bitmap);
3164                         goto out;
3165                 }
3166                 vfree(dirty_bitmap);
3167         } else {
3168                 r = -EFAULT;
3169                 if (clear_user(log->dirty_bitmap, n))
3170                         goto out;
3171         }
3172
3173         r = 0;
3174 out:
3175         mutex_unlock(&kvm->slots_lock);
3176         return r;
3177 }
3178
3179 long kvm_arch_vm_ioctl(struct file *filp,
3180                        unsigned int ioctl, unsigned long arg)
3181 {
3182         struct kvm *kvm = filp->private_data;
3183         void __user *argp = (void __user *)arg;
3184         int r = -ENOTTY;
3185         /*
3186          * This union makes it completely explicit to gcc-3.x
3187          * that these two variables' stack usage should be
3188          * combined, not added together.
3189          */
3190         union {
3191                 struct kvm_pit_state ps;
3192                 struct kvm_pit_state2 ps2;
3193                 struct kvm_pit_config pit_config;
3194         } u;
3195
3196         switch (ioctl) {
3197         case KVM_SET_TSS_ADDR:
3198                 r = kvm_vm_ioctl_set_tss_addr(kvm, arg);
3199                 if (r < 0)
3200                         goto out;
3201                 break;
3202         case KVM_SET_IDENTITY_MAP_ADDR: {
3203                 u64 ident_addr;
3204
3205                 r = -EFAULT;
3206                 if (copy_from_user(&ident_addr, argp, sizeof ident_addr))
3207                         goto out;
3208                 r = kvm_vm_ioctl_set_identity_map_addr(kvm, ident_addr);
3209                 if (r < 0)
3210                         goto out;
3211                 break;
3212         }
3213         case KVM_SET_NR_MMU_PAGES:
3214                 r = kvm_vm_ioctl_set_nr_mmu_pages(kvm, arg);
3215                 if (r)
3216                         goto out;
3217                 break;
3218         case KVM_GET_NR_MMU_PAGES:
3219                 r = kvm_vm_ioctl_get_nr_mmu_pages(kvm);
3220                 break;
3221         case KVM_CREATE_IRQCHIP: {
3222                 struct kvm_pic *vpic;
3223
3224                 mutex_lock(&kvm->lock);
3225                 r = -EEXIST;
3226                 if (kvm->arch.vpic)
3227                         goto create_irqchip_unlock;
3228                 r = -ENOMEM;
3229                 vpic = kvm_create_pic(kvm);
3230                 if (vpic) {
3231                         r = kvm_ioapic_init(kvm);
3232                         if (r) {
3233                                 kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS,
3234                                                           &vpic->dev);
3235                                 kfree(vpic);
3236                                 goto create_irqchip_unlock;
3237                         }
3238                 } else
3239                         goto create_irqchip_unlock;
3240                 smp_wmb();
3241                 kvm->arch.vpic = vpic;
3242                 smp_wmb();
3243                 r = kvm_setup_default_irq_routing(kvm);
3244                 if (r) {
3245                         mutex_lock(&kvm->irq_lock);
3246                         kvm_ioapic_destroy(kvm);
3247                         kvm_destroy_pic(kvm);
3248                         mutex_unlock(&kvm->irq_lock);
3249                 }
3250         create_irqchip_unlock:
3251                 mutex_unlock(&kvm->lock);
3252                 break;
3253         }
3254         case KVM_CREATE_PIT:
3255                 u.pit_config.flags = KVM_PIT_SPEAKER_DUMMY;
3256                 goto create_pit;
3257         case KVM_CREATE_PIT2:
3258                 r = -EFAULT;
3259                 if (copy_from_user(&u.pit_config, argp,
3260                                    sizeof(struct kvm_pit_config)))
3261                         goto out;
3262         create_pit:
3263                 mutex_lock(&kvm->slots_lock);
3264                 r = -EEXIST;
3265                 if (kvm->arch.vpit)
3266                         goto create_pit_unlock;
3267                 r = -ENOMEM;
3268                 kvm->arch.vpit = kvm_create_pit(kvm, u.pit_config.flags);
3269                 if (kvm->arch.vpit)
3270                         r = 0;
3271         create_pit_unlock:
3272                 mutex_unlock(&kvm->slots_lock);
3273                 break;
3274         case KVM_IRQ_LINE_STATUS:
3275         case KVM_IRQ_LINE: {
3276                 struct kvm_irq_level irq_event;
3277
3278                 r = -EFAULT;
3279                 if (copy_from_user(&irq_event, argp, sizeof irq_event))
3280                         goto out;
3281                 r = -ENXIO;
3282                 if (irqchip_in_kernel(kvm)) {
3283                         __s32 status;
3284                         status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
3285                                         irq_event.irq, irq_event.level);
3286                         if (ioctl == KVM_IRQ_LINE_STATUS) {
3287                                 r = -EFAULT;
3288                                 irq_event.status = status;
3289                                 if (copy_to_user(argp, &irq_event,
3290                                                         sizeof irq_event))
3291                                         goto out;
3292                         }
3293                         r = 0;
3294                 }
3295                 break;
3296         }
3297         case KVM_GET_IRQCHIP: {
3298                 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
3299                 struct kvm_irqchip *chip = kmalloc(sizeof(*chip), GFP_KERNEL);
3300
3301                 r = -ENOMEM;
3302                 if (!chip)
3303                         goto out;
3304                 r = -EFAULT;
3305                 if (copy_from_user(chip, argp, sizeof *chip))
3306                         goto get_irqchip_out;
3307                 r = -ENXIO;
3308                 if (!irqchip_in_kernel(kvm))
3309                         goto get_irqchip_out;
3310                 r = kvm_vm_ioctl_get_irqchip(kvm, chip);
3311                 if (r)
3312                         goto get_irqchip_out;
3313                 r = -EFAULT;
3314                 if (copy_to_user(argp, chip, sizeof *chip))
3315                         goto get_irqchip_out;
3316                 r = 0;
3317         get_irqchip_out:
3318                 kfree(chip);
3319                 if (r)
3320                         goto out;
3321                 break;
3322         }
3323         case KVM_SET_IRQCHIP: {
3324                 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
3325                 struct kvm_irqchip *chip = kmalloc(sizeof(*chip), GFP_KERNEL);
3326
3327                 r = -ENOMEM;
3328                 if (!chip)
3329                         goto out;
3330                 r = -EFAULT;
3331                 if (copy_from_user(chip, argp, sizeof *chip))
3332                         goto set_irqchip_out;
3333                 r = -ENXIO;
3334                 if (!irqchip_in_kernel(kvm))
3335                         goto set_irqchip_out;
3336                 r = kvm_vm_ioctl_set_irqchip(kvm, chip);
3337                 if (r)
3338                         goto set_irqchip_out;
3339                 r = 0;
3340         set_irqchip_out:
3341                 kfree(chip);
3342                 if (r)
3343                         goto out;
3344                 break;
3345         }
3346         case KVM_GET_PIT: {
3347                 r = -EFAULT;
3348                 if (copy_from_user(&u.ps, argp, sizeof(struct kvm_pit_state)))
3349                         goto out;
3350                 r = -ENXIO;
3351                 if (!kvm->arch.vpit)
3352                         goto out;
3353                 r = kvm_vm_ioctl_get_pit(kvm, &u.ps);
3354                 if (r)
3355                         goto out;
3356                 r = -EFAULT;
3357                 if (copy_to_user(argp, &u.ps, sizeof(struct kvm_pit_state)))
3358                         goto out;
3359                 r = 0;
3360                 break;
3361         }
3362         case KVM_SET_PIT: {
3363                 r = -EFAULT;
3364                 if (copy_from_user(&u.ps, argp, sizeof u.ps))
3365                         goto out;
3366                 r = -ENXIO;
3367                 if (!kvm->arch.vpit)
3368                         goto out;
3369                 r = kvm_vm_ioctl_set_pit(kvm, &u.ps);
3370                 if (r)
3371                         goto out;
3372                 r = 0;
3373                 break;
3374         }
3375         case KVM_GET_PIT2: {
3376                 r = -ENXIO;
3377                 if (!kvm->arch.vpit)
3378                         goto out;
3379                 r = kvm_vm_ioctl_get_pit2(kvm, &u.ps2);
3380                 if (r)
3381                         goto out;
3382                 r = -EFAULT;
3383                 if (copy_to_user(argp, &u.ps2, sizeof(u.ps2)))
3384                         goto out;
3385                 r = 0;
3386                 break;
3387         }
3388         case KVM_SET_PIT2: {
3389                 r = -EFAULT;
3390                 if (copy_from_user(&u.ps2, argp, sizeof(u.ps2)))
3391                         goto out;
3392                 r = -ENXIO;
3393                 if (!kvm->arch.vpit)
3394                         goto out;
3395                 r = kvm_vm_ioctl_set_pit2(kvm, &u.ps2);
3396                 if (r)
3397                         goto out;
3398                 r = 0;
3399                 break;
3400         }
3401         case KVM_REINJECT_CONTROL: {
3402                 struct kvm_reinject_control control;
3403                 r =  -EFAULT;
3404                 if (copy_from_user(&control, argp, sizeof(control)))
3405                         goto out;
3406                 r = kvm_vm_ioctl_reinject(kvm, &control);
3407                 if (r)
3408                         goto out;
3409                 r = 0;
3410                 break;
3411         }
3412         case KVM_XEN_HVM_CONFIG: {
3413                 r = -EFAULT;
3414                 if (copy_from_user(&kvm->arch.xen_hvm_config, argp,
3415                                    sizeof(struct kvm_xen_hvm_config)))
3416                         goto out;
3417                 r = -EINVAL;
3418                 if (kvm->arch.xen_hvm_config.flags)
3419                         goto out;
3420                 r = 0;
3421                 break;
3422         }
3423         case KVM_SET_CLOCK: {
3424                 struct kvm_clock_data user_ns;
3425                 u64 now_ns;
3426                 s64 delta;
3427
3428                 r = -EFAULT;
3429                 if (copy_from_user(&user_ns, argp, sizeof(user_ns)))
3430                         goto out;
3431
3432                 r = -EINVAL;
3433                 if (user_ns.flags)
3434                         goto out;
3435
3436                 r = 0;
3437                 now_ns = get_kernel_ns();
3438                 delta = user_ns.clock - now_ns;
3439                 kvm->arch.kvmclock_offset = delta;
3440                 break;
3441         }
3442         case KVM_GET_CLOCK: {
3443                 struct kvm_clock_data user_ns;
3444                 u64 now_ns;
3445
3446                 now_ns = get_kernel_ns();
3447                 user_ns.clock = kvm->arch.kvmclock_offset + now_ns;
3448                 user_ns.flags = 0;
3449
3450                 r = -EFAULT;
3451                 if (copy_to_user(argp, &user_ns, sizeof(user_ns)))
3452                         goto out;
3453                 r = 0;
3454                 break;
3455         }
3456
3457         default:
3458                 ;
3459         }
3460 out:
3461         return r;
3462 }
3463
3464 static void kvm_init_msr_list(void)
3465 {
3466         u32 dummy[2];
3467         unsigned i, j;
3468
3469         /* skip the first msrs in the list. KVM-specific */
3470         for (i = j = KVM_SAVE_MSRS_BEGIN; i < ARRAY_SIZE(msrs_to_save); i++) {
3471                 if (rdmsr_safe(msrs_to_save[i], &dummy[0], &dummy[1]) < 0)
3472                         continue;
3473                 if (j < i)
3474                         msrs_to_save[j] = msrs_to_save[i];
3475                 j++;
3476         }
3477         num_msrs_to_save = j;
3478 }
3479
3480 static int vcpu_mmio_write(struct kvm_vcpu *vcpu, gpa_t addr, int len,
3481                            const void *v)
3482 {
3483         if (vcpu->arch.apic &&
3484             !kvm_iodevice_write(&vcpu->arch.apic->dev, addr, len, v))
3485                 return 0;
3486
3487         return kvm_io_bus_write(vcpu->kvm, KVM_MMIO_BUS, addr, len, v);
3488 }
3489
3490 static int vcpu_mmio_read(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *v)
3491 {
3492         if (vcpu->arch.apic &&
3493             !kvm_iodevice_read(&vcpu->arch.apic->dev, addr, len, v))
3494                 return 0;
3495
3496         return kvm_io_bus_read(vcpu->kvm, KVM_MMIO_BUS, addr, len, v);
3497 }
3498
3499 static void kvm_set_segment(struct kvm_vcpu *vcpu,
3500                         struct kvm_segment *var, int seg)
3501 {
3502         kvm_x86_ops->set_segment(vcpu, var, seg);
3503 }
3504
3505 void kvm_get_segment(struct kvm_vcpu *vcpu,
3506                      struct kvm_segment *var, int seg)
3507 {
3508         kvm_x86_ops->get_segment(vcpu, var, seg);
3509 }
3510
3511 static gpa_t translate_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access)
3512 {
3513         return gpa;
3514 }
3515
3516 static gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access)
3517 {
3518         gpa_t t_gpa;
3519         u32 error;
3520
3521         BUG_ON(!mmu_is_nested(vcpu));
3522
3523         /* NPT walks are always user-walks */
3524         access |= PFERR_USER_MASK;
3525         t_gpa  = vcpu->arch.mmu.gva_to_gpa(vcpu, gpa, access, &error);
3526         if (t_gpa == UNMAPPED_GVA)
3527                 vcpu->arch.fault.error_code |= PFERR_NESTED_MASK;
3528
3529         return t_gpa;
3530 }
3531
3532 gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, u32 *error)
3533 {
3534         u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
3535         return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, error);
3536 }
3537
3538  gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva, u32 *error)
3539 {
3540         u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
3541         access |= PFERR_FETCH_MASK;
3542         return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, error);
3543 }
3544
3545 gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva, u32 *error)
3546 {
3547         u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
3548         access |= PFERR_WRITE_MASK;
3549         return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, error);
3550 }
3551
3552 /* uses this to access any guest's mapped memory without checking CPL */
3553 gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva, u32 *error)
3554 {
3555         return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, 0, error);
3556 }
3557
3558 static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes,
3559                                       struct kvm_vcpu *vcpu, u32 access,
3560                                       u32 *error)
3561 {
3562         void *data = val;
3563         int r = X86EMUL_CONTINUE;
3564
3565         while (bytes) {
3566                 gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, access,
3567                                                             error);
3568                 unsigned offset = addr & (PAGE_SIZE-1);
3569                 unsigned toread = min(bytes, (unsigned)PAGE_SIZE - offset);
3570                 int ret;
3571
3572                 if (gpa == UNMAPPED_GVA) {
3573                         r = X86EMUL_PROPAGATE_FAULT;
3574                         goto out;
3575                 }
3576                 ret = kvm_read_guest(vcpu->kvm, gpa, data, toread);
3577                 if (ret < 0) {
3578                         r = X86EMUL_IO_NEEDED;
3579                         goto out;
3580                 }
3581
3582                 bytes -= toread;
3583                 data += toread;
3584                 addr += toread;
3585         }
3586 out:
3587         return r;
3588 }
3589
3590 /* used for instruction fetching */
3591 static int kvm_fetch_guest_virt(gva_t addr, void *val, unsigned int bytes,
3592                                 struct kvm_vcpu *vcpu, u32 *error)
3593 {
3594         u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
3595         return kvm_read_guest_virt_helper(addr, val, bytes, vcpu,
3596                                           access | PFERR_FETCH_MASK, error);
3597 }
3598
3599 static int kvm_read_guest_virt(gva_t addr, void *val, unsigned int bytes,
3600                                struct kvm_vcpu *vcpu, u32 *error)
3601 {
3602         u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
3603         return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access,
3604                                           error);
3605 }
3606
3607 static int kvm_read_guest_virt_system(gva_t addr, void *val, unsigned int bytes,
3608                                struct kvm_vcpu *vcpu, u32 *error)
3609 {
3610         return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, 0, error);
3611 }
3612
3613 static int kvm_write_guest_virt_system(gva_t addr, void *val,
3614                                        unsigned int bytes,
3615                                        struct kvm_vcpu *vcpu,
3616                                        u32 *error)
3617 {
3618         void *data = val;
3619         int r = X86EMUL_CONTINUE;
3620
3621         while (bytes) {
3622                 gpa_t gpa =  vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr,
3623                                                              PFERR_WRITE_MASK,
3624                                                              error);
3625                 unsigned offset = addr & (PAGE_SIZE-1);
3626                 unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset);
3627                 int ret;
3628
3629                 if (gpa == UNMAPPED_GVA) {
3630                         r = X86EMUL_PROPAGATE_FAULT;
3631                         goto out;
3632                 }
3633                 ret = kvm_write_guest(vcpu->kvm, gpa, data, towrite);
3634                 if (ret < 0) {
3635                         r = X86EMUL_IO_NEEDED;
3636                         goto out;
3637                 }
3638
3639                 bytes -= towrite;
3640                 data += towrite;
3641                 addr += towrite;
3642         }
3643 out:
3644         return r;
3645 }
3646
3647 static int emulator_read_emulated(unsigned long addr,
3648                                   void *val,
3649                                   unsigned int bytes,
3650                                   unsigned int *error_code,
3651                                   struct kvm_vcpu *vcpu)
3652 {
3653         gpa_t                 gpa;
3654
3655         if (vcpu->mmio_read_completed) {
3656                 memcpy(val, vcpu->mmio_data, bytes);
3657                 trace_kvm_mmio(KVM_TRACE_MMIO_READ, bytes,
3658                                vcpu->mmio_phys_addr, *(u64 *)val);
3659                 vcpu->mmio_read_completed = 0;
3660                 return X86EMUL_CONTINUE;
3661         }
3662
3663         gpa = kvm_mmu_gva_to_gpa_read(vcpu, addr, error_code);
3664
3665         if (gpa == UNMAPPED_GVA)
3666                 return X86EMUL_PROPAGATE_FAULT;
3667
3668         /* For APIC access vmexit */
3669         if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
3670                 goto mmio;
3671
3672         if (kvm_read_guest_virt(addr, val, bytes, vcpu, NULL)
3673                                 == X86EMUL_CONTINUE)
3674                 return X86EMUL_CONTINUE;
3675
3676 mmio:
3677         /*
3678          * Is this MMIO handled locally?
3679          */
3680         if (!vcpu_mmio_read(vcpu, gpa, bytes, val)) {
3681                 trace_kvm_mmio(KVM_TRACE_MMIO_READ, bytes, gpa, *(u64 *)val);
3682                 return X86EMUL_CONTINUE;
3683         }
3684
3685         trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, bytes, gpa, 0);
3686
3687         vcpu->mmio_needed = 1;
3688         vcpu->run->exit_reason = KVM_EXIT_MMIO;
3689         vcpu->run->mmio.phys_addr = vcpu->mmio_phys_addr = gpa;
3690         vcpu->run->mmio.len = vcpu->mmio_size = bytes;
3691         vcpu->run->mmio.is_write = vcpu->mmio_is_write = 0;
3692
3693         return X86EMUL_IO_NEEDED;
3694 }
3695
3696 int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
3697                           const void *val, int bytes)
3698 {
3699         int ret;
3700
3701         ret = kvm_write_guest(vcpu->kvm, gpa, val, bytes);
3702         if (ret < 0)
3703                 return 0;
3704         kvm_mmu_pte_write(vcpu, gpa, val, bytes, 1);
3705         return 1;
3706 }
3707
3708 static int emulator_write_emulated_onepage(unsigned long addr,
3709                                            const void *val,
3710                                            unsigned int bytes,
3711                                            unsigned int *error_code,
3712                                            struct kvm_vcpu *vcpu)
3713 {
3714         gpa_t                 gpa;
3715
3716         gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, error_code);
3717
3718         if (gpa == UNMAPPED_GVA)
3719                 return X86EMUL_PROPAGATE_FAULT;
3720
3721         /* For APIC access vmexit */
3722         if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
3723                 goto mmio;
3724
3725         if (emulator_write_phys(vcpu, gpa, val, bytes))
3726                 return X86EMUL_CONTINUE;
3727
3728 mmio:
3729         trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, bytes, gpa, *(u64 *)val);
3730         /*
3731          * Is this MMIO handled locally?
3732          */
3733         if (!vcpu_mmio_write(vcpu, gpa, bytes, val))
3734                 return X86EMUL_CONTINUE;
3735
3736         vcpu->mmio_needed = 1;
3737         vcpu->run->exit_reason = KVM_EXIT_MMIO;
3738         vcpu->run->mmio.phys_addr = vcpu->mmio_phys_addr = gpa;
3739         vcpu->run->mmio.len = vcpu->mmio_size = bytes;
3740         vcpu->run->mmio.is_write = vcpu->mmio_is_write = 1;
3741         memcpy(vcpu->run->mmio.data, val, bytes);
3742
3743         return X86EMUL_CONTINUE;
3744 }
3745
3746 int emulator_write_emulated(unsigned long addr,
3747                             const void *val,
3748                             unsigned int bytes,
3749                             unsigned int *error_code,
3750                             struct kvm_vcpu *vcpu)
3751 {
3752         /* Crossing a page boundary? */
3753         if (((addr + bytes - 1) ^ addr) & PAGE_MASK) {
3754                 int rc, now;
3755
3756                 now = -addr & ~PAGE_MASK;
3757                 rc = emulator_write_emulated_onepage(addr, val, now, error_code,
3758                                                      vcpu);
3759                 if (rc != X86EMUL_CONTINUE)
3760                         return rc;
3761                 addr += now;
3762                 val += now;
3763                 bytes -= now;
3764         }
3765         return emulator_write_emulated_onepage(addr, val, bytes, error_code,
3766                                                vcpu);
3767 }
3768
3769 #define CMPXCHG_TYPE(t, ptr, old, new) \
3770         (cmpxchg((t *)(ptr), *(t *)(old), *(t *)(new)) == *(t *)(old))
3771
3772 #ifdef CONFIG_X86_64
3773 #  define CMPXCHG64(ptr, old, new) CMPXCHG_TYPE(u64, ptr, old, new)
3774 #else
3775 #  define CMPXCHG64(ptr, old, new) \
3776         (cmpxchg64((u64 *)(ptr), *(u64 *)(old), *(u64 *)(new)) == *(u64 *)(old))
3777 #endif
3778
3779 static int emulator_cmpxchg_emulated(unsigned long addr,
3780                                      const void *old,
3781                                      const void *new,
3782                                      unsigned int bytes,
3783                                      unsigned int *error_code,
3784                                      struct kvm_vcpu *vcpu)
3785 {
3786         gpa_t gpa;
3787         struct page *page;
3788         char *kaddr;
3789         bool exchanged;
3790
3791         /* guests cmpxchg8b have to be emulated atomically */
3792         if (bytes > 8 || (bytes & (bytes - 1)))
3793                 goto emul_write;
3794
3795         gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, NULL);
3796
3797         if (gpa == UNMAPPED_GVA ||
3798             (gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
3799                 goto emul_write;
3800
3801         if (((gpa + bytes - 1) & PAGE_MASK) != (gpa & PAGE_MASK))
3802                 goto emul_write;
3803
3804         page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
3805         if (is_error_page(page)) {
3806                 kvm_release_page_clean(page);
3807                 goto emul_write;
3808         }
3809
3810         kaddr = kmap_atomic(page, KM_USER0);
3811         kaddr += offset_in_page(gpa);
3812         switch (bytes) {
3813         case 1:
3814                 exchanged = CMPXCHG_TYPE(u8, kaddr, old, new);
3815                 break;
3816         case 2:
3817                 exchanged = CMPXCHG_TYPE(u16, kaddr, old, new);
3818                 break;
3819         case 4:
3820                 exchanged = CMPXCHG_TYPE(u32, kaddr, old, new);
3821                 break;
3822         case 8:
3823                 exchanged = CMPXCHG64(kaddr, old, new);
3824                 break;
3825         default:
3826                 BUG();
3827         }
3828         kunmap_atomic(kaddr, KM_USER0);
3829         kvm_release_page_dirty(page);
3830
3831         if (!exchanged)
3832                 return X86EMUL_CMPXCHG_FAILED;
3833
3834         kvm_mmu_pte_write(vcpu, gpa, new, bytes, 1);
3835
3836         return X86EMUL_CONTINUE;
3837
3838 emul_write:
3839         printk_once(KERN_WARNING "kvm: emulating exchange as write\n");
3840
3841         return emulator_write_emulated(addr, new, bytes, error_code, vcpu);
3842 }
3843
3844 static int kernel_pio(struct kvm_vcpu *vcpu, void *pd)
3845 {
3846         /* TODO: String I/O for in kernel device */
3847         int r;
3848
3849         if (vcpu->arch.pio.in)
3850                 r = kvm_io_bus_read(vcpu->kvm, KVM_PIO_BUS, vcpu->arch.pio.port,
3851                                     vcpu->arch.pio.size, pd);
3852         else
3853                 r = kvm_io_bus_write(vcpu->kvm, KVM_PIO_BUS,
3854                                      vcpu->arch.pio.port, vcpu->arch.pio.size,
3855                                      pd);
3856         return r;
3857 }
3858
3859
3860 static int emulator_pio_in_emulated(int size, unsigned short port, void *val,
3861                              unsigned int count, struct kvm_vcpu *vcpu)
3862 {
3863         if (vcpu->arch.pio.count)
3864                 goto data_avail;
3865
3866         trace_kvm_pio(0, port, size, 1);
3867
3868         vcpu->arch.pio.port = port;
3869         vcpu->arch.pio.in = 1;
3870         vcpu->arch.pio.count  = count;
3871         vcpu->arch.pio.size = size;
3872
3873         if (!kernel_pio(vcpu, vcpu->arch.pio_data)) {
3874         data_avail:
3875                 memcpy(val, vcpu->arch.pio_data, size * count);
3876                 vcpu->arch.pio.count = 0;
3877                 return 1;
3878         }
3879
3880         vcpu->run->exit_reason = KVM_EXIT_IO;
3881         vcpu->run->io.direction = KVM_EXIT_IO_IN;
3882         vcpu->run->io.size = size;
3883         vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
3884         vcpu->run->io.count = count;
3885         vcpu->run->io.port = port;
3886
3887         return 0;
3888 }
3889
3890 static int emulator_pio_out_emulated(int size, unsigned short port,
3891                               const void *val, unsigned int count,
3892                               struct kvm_vcpu *vcpu)
3893 {
3894         trace_kvm_pio(1, port, size, 1);
3895
3896         vcpu->arch.pio.port = port;
3897         vcpu->arch.pio.in = 0;
3898         vcpu->arch.pio.count = count;
3899         vcpu->arch.pio.size = size;
3900
3901         memcpy(vcpu->arch.pio_data, val, size * count);
3902
3903         if (!kernel_pio(vcpu, vcpu->arch.pio_data)) {
3904                 vcpu->arch.pio.count = 0;
3905                 return 1;
3906         }
3907
3908         vcpu->run->exit_reason = KVM_EXIT_IO;
3909         vcpu->run->io.direction = KVM_EXIT_IO_OUT;
3910         vcpu->run->io.size = size;
3911         vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
3912         vcpu->run->io.count = count;
3913         vcpu->run->io.port = port;
3914
3915         return 0;
3916 }
3917
3918 static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg)
3919 {
3920         return kvm_x86_ops->get_segment_base(vcpu, seg);
3921 }
3922
3923 int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address)
3924 {
3925         kvm_mmu_invlpg(vcpu, address);
3926         return X86EMUL_CONTINUE;
3927 }
3928
3929 int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu)
3930 {
3931         if (!need_emulate_wbinvd(vcpu))
3932                 return X86EMUL_CONTINUE;
3933
3934         if (kvm_x86_ops->has_wbinvd_exit()) {
3935                 smp_call_function_many(vcpu->arch.wbinvd_dirty_mask,
3936                                 wbinvd_ipi, NULL, 1);
3937                 cpumask_clear(vcpu->arch.wbinvd_dirty_mask);
3938         }
3939         wbinvd();
3940         return X86EMUL_CONTINUE;
3941 }
3942 EXPORT_SYMBOL_GPL(kvm_emulate_wbinvd);
3943
3944 int emulate_clts(struct kvm_vcpu *vcpu)
3945 {
3946         kvm_x86_ops->set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~X86_CR0_TS));
3947         kvm_x86_ops->fpu_activate(vcpu);
3948         return X86EMUL_CONTINUE;
3949 }
3950
3951 int emulator_get_dr(int dr, unsigned long *dest, struct kvm_vcpu *vcpu)
3952 {
3953         return _kvm_get_dr(vcpu, dr, dest);
3954 }
3955
3956 int emulator_set_dr(int dr, unsigned long value, struct kvm_vcpu *vcpu)
3957 {
3958
3959         return __kvm_set_dr(vcpu, dr, value);
3960 }
3961
3962 static u64 mk_cr_64(u64 curr_cr, u32 new_val)
3963 {
3964         return (curr_cr & ~((1ULL << 32) - 1)) | new_val;
3965 }
3966
3967 static unsigned long emulator_get_cr(int cr, struct kvm_vcpu *vcpu)
3968 {
3969         unsigned long value;
3970
3971         switch (cr) {
3972         case 0:
3973                 value = kvm_read_cr0(vcpu);
3974                 break;
3975         case 2:
3976                 value = vcpu->arch.cr2;
3977                 break;
3978         case 3:
3979                 value = vcpu->arch.cr3;
3980                 break;
3981         case 4:
3982                 value = kvm_read_cr4(vcpu);
3983                 break;
3984         case 8:
3985                 value = kvm_get_cr8(vcpu);
3986                 break;
3987         default:
3988                 vcpu_printf(vcpu, "%s: unexpected cr %u\n", __func__, cr);
3989                 return 0;
3990         }
3991
3992         return value;
3993 }
3994
3995 static int emulator_set_cr(int cr, unsigned long val, struct kvm_vcpu *vcpu)
3996 {
3997         int res = 0;
3998
3999         switch (cr) {
4000         case 0:
4001                 res = kvm_set_cr0(vcpu, mk_cr_64(kvm_read_cr0(vcpu), val));
4002                 break;
4003         case 2:
4004                 vcpu->arch.cr2 = val;
4005                 break;
4006         case 3:
4007                 res = kvm_set_cr3(vcpu, val);
4008                 break;
4009         case 4:
4010                 res = kvm_set_cr4(vcpu, mk_cr_64(kvm_read_cr4(vcpu), val));
4011                 break;
4012         case 8:
4013                 res = __kvm_set_cr8(vcpu, val & 0xfUL);
4014                 break;
4015         default:
4016                 vcpu_printf(vcpu, "%s: unexpected cr %u\n", __func__, cr);
4017                 res = -1;
4018         }
4019
4020         return res;
4021 }
4022
4023 static int emulator_get_cpl(struct kvm_vcpu *vcpu)
4024 {
4025         return kvm_x86_ops->get_cpl(vcpu);
4026 }
4027
4028 static void emulator_get_gdt(struct desc_ptr *dt, struct kvm_vcpu *vcpu)
4029 {
4030         kvm_x86_ops->get_gdt(vcpu, dt);
4031 }
4032
4033 static void emulator_get_idt(struct desc_ptr *dt, struct kvm_vcpu *vcpu)
4034 {
4035         kvm_x86_ops->get_idt(vcpu, dt);
4036 }
4037
4038 static unsigned long emulator_get_cached_segment_base(int seg,
4039                                                       struct kvm_vcpu *vcpu)
4040 {
4041         return get_segment_base(vcpu, seg);
4042 }
4043
4044 static bool emulator_get_cached_descriptor(struct desc_struct *desc, int seg,
4045                                            struct kvm_vcpu *vcpu)
4046 {
4047         struct kvm_segment var;
4048
4049         kvm_get_segment(vcpu, &var, seg);
4050
4051         if (var.unusable)
4052                 return false;
4053
4054         if (var.g)
4055                 var.limit >>= 12;
4056         set_desc_limit(desc, var.limit);
4057         set_desc_base(desc, (unsigned long)var.base);
4058         desc->type = var.type;
4059         desc->s = var.s;
4060         desc->dpl = var.dpl;
4061         desc->p = var.present;
4062         desc->avl = var.avl;
4063         desc->l = var.l;
4064         desc->d = var.db;
4065         desc->g = var.g;
4066
4067         return true;
4068 }
4069
4070 static void emulator_set_cached_descriptor(struct desc_struct *desc, int seg,
4071                                            struct kvm_vcpu *vcpu)
4072 {
4073         struct kvm_segment var;
4074
4075         /* needed to preserve selector */
4076         kvm_get_segment(vcpu, &var, seg);
4077
4078         var.base = get_desc_base(desc);
4079         var.limit = get_desc_limit(desc);
4080         if (desc->g)
4081                 var.limit = (var.limit << 12) | 0xfff;
4082         var.type = desc->type;
4083         var.present = desc->p;
4084         var.dpl = desc->dpl;
4085         var.db = desc->d;
4086         var.s = desc->s;
4087         var.l = desc->l;
4088         var.g = desc->g;
4089         var.avl = desc->avl;
4090         var.present = desc->p;
4091         var.unusable = !var.present;
4092         var.padding = 0;
4093
4094         kvm_set_segment(vcpu, &var, seg);
4095         return;
4096 }
4097
4098 static u16 emulator_get_segment_selector(int seg, struct kvm_vcpu *vcpu)
4099 {
4100         struct kvm_segment kvm_seg;
4101
4102         kvm_get_segment(vcpu, &kvm_seg, seg);
4103         return kvm_seg.selector;
4104 }
4105
4106 static void emulator_set_segment_selector(u16 sel, int seg,
4107                                           struct kvm_vcpu *vcpu)
4108 {
4109         struct kvm_segment kvm_seg;
4110
4111         kvm_get_segment(vcpu, &kvm_seg, seg);
4112         kvm_seg.selector = sel;
4113         kvm_set_segment(vcpu, &kvm_seg, seg);
4114 }
4115
4116 static struct x86_emulate_ops emulate_ops = {
4117         .read_std            = kvm_read_guest_virt_system,
4118         .write_std           = kvm_write_guest_virt_system,
4119         .fetch               = kvm_fetch_guest_virt,
4120         .read_emulated       = emulator_read_emulated,
4121         .write_emulated      = emulator_write_emulated,
4122         .cmpxchg_emulated    = emulator_cmpxchg_emulated,
4123         .pio_in_emulated     = emulator_pio_in_emulated,
4124         .pio_out_emulated    = emulator_pio_out_emulated,
4125         .get_cached_descriptor = emulator_get_cached_descriptor,
4126         .set_cached_descriptor = emulator_set_cached_descriptor,
4127         .get_segment_selector = emulator_get_segment_selector,
4128         .set_segment_selector = emulator_set_segment_selector,
4129         .get_cached_segment_base = emulator_get_cached_segment_base,
4130         .get_gdt             = emulator_get_gdt,
4131         .get_idt             = emulator_get_idt,
4132         .get_cr              = emulator_get_cr,
4133         .set_cr              = emulator_set_cr,
4134         .cpl                 = emulator_get_cpl,
4135         .get_dr              = emulator_get_dr,
4136         .set_dr              = emulator_set_dr,
4137         .set_msr             = kvm_set_msr,
4138         .get_msr             = kvm_get_msr,
4139 };
4140
4141 static void cache_all_regs(struct kvm_vcpu *vcpu)
4142 {
4143         kvm_register_read(vcpu, VCPU_REGS_RAX);
4144         kvm_register_read(vcpu, VCPU_REGS_RSP);
4145         kvm_register_read(vcpu, VCPU_REGS_RIP);
4146         vcpu->arch.regs_dirty = ~0;
4147 }
4148
4149 static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask)
4150 {
4151         u32 int_shadow = kvm_x86_ops->get_interrupt_shadow(vcpu, mask);
4152         /*
4153          * an sti; sti; sequence only disable interrupts for the first
4154          * instruction. So, if the last instruction, be it emulated or
4155          * not, left the system with the INT_STI flag enabled, it
4156          * means that the last instruction is an sti. We should not
4157          * leave the flag on in this case. The same goes for mov ss
4158          */
4159         if (!(int_shadow & mask))
4160                 kvm_x86_ops->set_interrupt_shadow(vcpu, mask);
4161 }
4162
4163 static void inject_emulated_exception(struct kvm_vcpu *vcpu)
4164 {
4165         struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
4166         if (ctxt->exception == PF_VECTOR)
4167                 kvm_propagate_fault(vcpu);
4168         else if (ctxt->error_code_valid)
4169                 kvm_queue_exception_e(vcpu, ctxt->exception, ctxt->error_code);
4170         else
4171                 kvm_queue_exception(vcpu, ctxt->exception);
4172 }
4173
4174 static void init_emulate_ctxt(struct kvm_vcpu *vcpu)
4175 {
4176         struct decode_cache *c = &vcpu->arch.emulate_ctxt.decode;
4177         int cs_db, cs_l;
4178
4179         cache_all_regs(vcpu);
4180
4181         kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
4182
4183         vcpu->arch.emulate_ctxt.vcpu = vcpu;
4184         vcpu->arch.emulate_ctxt.eflags = kvm_x86_ops->get_rflags(vcpu);
4185         vcpu->arch.emulate_ctxt.eip = kvm_rip_read(vcpu);
4186         vcpu->arch.emulate_ctxt.mode =
4187                 (!is_protmode(vcpu)) ? X86EMUL_MODE_REAL :
4188                 (vcpu->arch.emulate_ctxt.eflags & X86_EFLAGS_VM)
4189                 ? X86EMUL_MODE_VM86 : cs_l
4190                 ? X86EMUL_MODE_PROT64 : cs_db
4191                 ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
4192         memset(c, 0, sizeof(struct decode_cache));
4193         memcpy(c->regs, vcpu->arch.regs, sizeof c->regs);
4194 }
4195
4196 static int handle_emulation_failure(struct kvm_vcpu *vcpu)
4197 {
4198         ++vcpu->stat.insn_emulation_fail;
4199         trace_kvm_emulate_insn_failed(vcpu);
4200         vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
4201         vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
4202         vcpu->run->internal.ndata = 0;
4203         kvm_queue_exception(vcpu, UD_VECTOR);
4204         return EMULATE_FAIL;
4205 }
4206
4207 static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t gva)
4208 {
4209         gpa_t gpa;
4210
4211         if (tdp_enabled)
4212                 return false;
4213
4214         /*
4215          * if emulation was due to access to shadowed page table
4216          * and it failed try to unshadow page and re-entetr the
4217          * guest to let CPU execute the instruction.
4218          */
4219         if (kvm_mmu_unprotect_page_virt(vcpu, gva))
4220                 return true;
4221
4222         gpa = kvm_mmu_gva_to_gpa_system(vcpu, gva, NULL);
4223
4224         if (gpa == UNMAPPED_GVA)
4225                 return true; /* let cpu generate fault */
4226
4227         if (!kvm_is_error_hva(gfn_to_hva(vcpu->kvm, gpa >> PAGE_SHIFT)))
4228                 return true;
4229
4230         return false;
4231 }
4232
4233 int emulate_instruction(struct kvm_vcpu *vcpu,
4234                         unsigned long cr2,
4235                         u16 error_code,
4236                         int emulation_type)
4237 {
4238         int r;
4239         struct decode_cache *c = &vcpu->arch.emulate_ctxt.decode;
4240
4241         kvm_clear_exception_queue(vcpu);
4242         vcpu->arch.mmio_fault_cr2 = cr2;
4243         /*
4244          * TODO: fix emulate.c to use guest_read/write_register
4245          * instead of direct ->regs accesses, can save hundred cycles
4246          * on Intel for instructions that don't read/change RSP, for
4247          * for example.
4248          */
4249         cache_all_regs(vcpu);
4250
4251         if (!(emulation_type & EMULTYPE_NO_DECODE)) {
4252                 init_emulate_ctxt(vcpu);
4253                 vcpu->arch.emulate_ctxt.interruptibility = 0;
4254                 vcpu->arch.emulate_ctxt.exception = -1;
4255                 vcpu->arch.emulate_ctxt.perm_ok = false;
4256
4257                 r = x86_decode_insn(&vcpu->arch.emulate_ctxt);
4258                 if (r == X86EMUL_PROPAGATE_FAULT)
4259                         goto done;
4260
4261                 trace_kvm_emulate_insn_start(vcpu);
4262
4263                 /* Only allow emulation of specific instructions on #UD
4264                  * (namely VMMCALL, sysenter, sysexit, syscall)*/
4265                 if (emulation_type & EMULTYPE_TRAP_UD) {
4266                         if (!c->twobyte)
4267                                 return EMULATE_FAIL;
4268                         switch (c->b) {
4269                         case 0x01: /* VMMCALL */
4270                                 if (c->modrm_mod != 3 || c->modrm_rm != 1)
4271                                         return EMULATE_FAIL;
4272                                 break;
4273                         case 0x34: /* sysenter */
4274                         case 0x35: /* sysexit */
4275                                 if (c->modrm_mod != 0 || c->modrm_rm != 0)
4276                                         return EMULATE_FAIL;
4277                                 break;
4278                         case 0x05: /* syscall */
4279                                 if (c->modrm_mod != 0 || c->modrm_rm != 0)
4280                                         return EMULATE_FAIL;
4281                                 break;
4282                         default:
4283                                 return EMULATE_FAIL;
4284                         }
4285
4286                         if (!(c->modrm_reg == 0 || c->modrm_reg == 3))
4287                                 return EMULATE_FAIL;
4288                 }
4289
4290                 ++vcpu->stat.insn_emulation;
4291                 if (r)  {
4292                         if (reexecute_instruction(vcpu, cr2))
4293                                 return EMULATE_DONE;
4294                         if (emulation_type & EMULTYPE_SKIP)
4295                                 return EMULATE_FAIL;
4296                         return handle_emulation_failure(vcpu);
4297                 }
4298         }
4299
4300         if (emulation_type & EMULTYPE_SKIP) {
4301                 kvm_rip_write(vcpu, vcpu->arch.emulate_ctxt.decode.eip);
4302                 return EMULATE_DONE;
4303         }
4304
4305         /* this is needed for vmware backdor interface to work since it
4306            changes registers values  during IO operation */
4307         memcpy(c->regs, vcpu->arch.regs, sizeof c->regs);
4308
4309 restart:
4310         r = x86_emulate_insn(&vcpu->arch.emulate_ctxt);
4311
4312         if (r == EMULATION_FAILED) {
4313                 if (reexecute_instruction(vcpu, cr2))
4314                         return EMULATE_DONE;
4315
4316                 return handle_emulation_failure(vcpu);
4317         }
4318
4319 done:
4320         if (vcpu->arch.emulate_ctxt.exception >= 0) {
4321                 inject_emulated_exception(vcpu);
4322                 r = EMULATE_DONE;
4323         } else if (vcpu->arch.pio.count) {
4324                 if (!vcpu->arch.pio.in)
4325                         vcpu->arch.pio.count = 0;
4326                 r = EMULATE_DO_MMIO;
4327         } else if (vcpu->mmio_needed) {
4328                 if (vcpu->mmio_is_write)
4329                         vcpu->mmio_needed = 0;
4330                 r = EMULATE_DO_MMIO;
4331         } else if (r == EMULATION_RESTART)
4332                 goto restart;
4333         else
4334                 r = EMULATE_DONE;
4335
4336         toggle_interruptibility(vcpu, vcpu->arch.emulate_ctxt.interruptibility);
4337         kvm_x86_ops->set_rflags(vcpu, vcpu->arch.emulate_ctxt.eflags);
4338         kvm_make_request(KVM_REQ_EVENT, vcpu);
4339         memcpy(vcpu->arch.regs, c->regs, sizeof c->regs);
4340         kvm_rip_write(vcpu, vcpu->arch.emulate_ctxt.eip);
4341
4342         return r;
4343 }
4344 EXPORT_SYMBOL_GPL(emulate_instruction);
4345
4346 int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size, unsigned short port)
4347 {
4348         unsigned long val = kvm_register_read(vcpu, VCPU_REGS_RAX);
4349         int ret = emulator_pio_out_emulated(size, port, &val, 1, vcpu);
4350         /* do not return to emulator after return from userspace */
4351         vcpu->arch.pio.count = 0;
4352         return ret;
4353 }
4354 EXPORT_SYMBOL_GPL(kvm_fast_pio_out);
4355
4356 static void tsc_bad(void *info)
4357 {
4358         __get_cpu_var(cpu_tsc_khz) = 0;
4359 }
4360
4361 static void tsc_khz_changed(void *data)
4362 {
4363         struct cpufreq_freqs *freq = data;
4364         unsigned long khz = 0;
4365
4366         if (data)
4367                 khz = freq->new;
4368         else if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
4369                 khz = cpufreq_quick_get(raw_smp_processor_id());
4370         if (!khz)
4371                 khz = tsc_khz;
4372         __get_cpu_var(cpu_tsc_khz) = khz;
4373 }
4374
4375 static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
4376                                      void *data)
4377 {
4378         struct cpufreq_freqs *freq = data;
4379         struct kvm *kvm;
4380         struct kvm_vcpu *vcpu;
4381         int i, send_ipi = 0;
4382
4383         /*
4384          * We allow guests to temporarily run on slowing clocks,
4385          * provided we notify them after, or to run on accelerating
4386          * clocks, provided we notify them before.  Thus time never
4387          * goes backwards.
4388          *
4389          * However, we have a problem.  We can't atomically update
4390          * the frequency of a given CPU from this function; it is
4391          * merely a notifier, which can be called from any CPU.
4392          * Changing the TSC frequency at arbitrary points in time
4393          * requires a recomputation of local variables related to
4394          * the TSC for each VCPU.  We must flag these local variables
4395          * to be updated and be sure the update takes place with the
4396          * new frequency before any guests proceed.
4397          *
4398          * Unfortunately, the combination of hotplug CPU and frequency
4399          * change creates an intractable locking scenario; the order
4400          * of when these callouts happen is undefined with respect to
4401          * CPU hotplug, and they can race with each other.  As such,
4402          * merely setting per_cpu(cpu_tsc_khz) = X during a hotadd is
4403          * undefined; you can actually have a CPU frequency change take
4404          * place in between the computation of X and the setting of the
4405          * variable.  To protect against this problem, all updates of
4406          * the per_cpu tsc_khz variable are done in an interrupt
4407          * protected IPI, and all callers wishing to update the value
4408          * must wait for a synchronous IPI to complete (which is trivial
4409          * if the caller is on the CPU already).  This establishes the
4410          * necessary total order on variable updates.
4411          *
4412          * Note that because a guest time update may take place
4413          * anytime after the setting of the VCPU's request bit, the
4414          * correct TSC value must be set before the request.  However,
4415          * to ensure the update actually makes it to any guest which
4416          * starts running in hardware virtualization between the set
4417          * and the acquisition of the spinlock, we must also ping the
4418          * CPU after setting the request bit.
4419          *
4420          */
4421
4422         if (val == CPUFREQ_PRECHANGE && freq->old > freq->new)
4423                 return 0;
4424         if (val == CPUFREQ_POSTCHANGE && freq->old < freq->new)
4425                 return 0;
4426
4427         smp_call_function_single(freq->cpu, tsc_khz_changed, freq, 1);
4428
4429         spin_lock(&kvm_lock);
4430         list_for_each_entry(kvm, &vm_list, vm_list) {
4431                 kvm_for_each_vcpu(i, vcpu, kvm) {
4432                         if (vcpu->cpu != freq->cpu)
4433                                 continue;
4434                         if (!kvm_request_guest_time_update(vcpu))
4435                                 continue;
4436                         if (vcpu->cpu != smp_processor_id())
4437                                 send_ipi = 1;
4438                 }
4439         }
4440         spin_unlock(&kvm_lock);
4441
4442         if (freq->old < freq->new && send_ipi) {
4443                 /*
4444                  * We upscale the frequency.  Must make the guest
4445                  * doesn't see old kvmclock values while running with
4446                  * the new frequency, otherwise we risk the guest sees
4447                  * time go backwards.
4448                  *
4449                  * In case we update the frequency for another cpu
4450                  * (which might be in guest context) send an interrupt
4451                  * to kick the cpu out of guest context.  Next time
4452                  * guest context is entered kvmclock will be updated,
4453                  * so the guest will not see stale values.
4454                  */
4455                 smp_call_function_single(freq->cpu, tsc_khz_changed, freq, 1);
4456         }
4457         return 0;
4458 }
4459
4460 static struct notifier_block kvmclock_cpufreq_notifier_block = {
4461         .notifier_call  = kvmclock_cpufreq_notifier
4462 };
4463
4464 static int kvmclock_cpu_notifier(struct notifier_block *nfb,
4465                                         unsigned long action, void *hcpu)
4466 {
4467         unsigned int cpu = (unsigned long)hcpu;
4468
4469         switch (action) {
4470                 case CPU_ONLINE:
4471                 case CPU_DOWN_FAILED:
4472                         smp_call_function_single(cpu, tsc_khz_changed, NULL, 1);
4473                         break;
4474                 case CPU_DOWN_PREPARE:
4475                         smp_call_function_single(cpu, tsc_bad, NULL, 1);
4476                         break;
4477         }
4478         return NOTIFY_OK;
4479 }
4480
4481 static struct notifier_block kvmclock_cpu_notifier_block = {
4482         .notifier_call  = kvmclock_cpu_notifier,
4483         .priority = -INT_MAX
4484 };
4485
4486 static void kvm_timer_init(void)
4487 {
4488         int cpu;
4489
4490         register_hotcpu_notifier(&kvmclock_cpu_notifier_block);
4491         if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) {
4492                 cpufreq_register_notifier(&kvmclock_cpufreq_notifier_block,
4493                                           CPUFREQ_TRANSITION_NOTIFIER);
4494         }
4495         for_each_online_cpu(cpu)
4496                 smp_call_function_single(cpu, tsc_khz_changed, NULL, 1);
4497 }
4498
4499 static DEFINE_PER_CPU(struct kvm_vcpu *, current_vcpu);
4500
4501 static int kvm_is_in_guest(void)
4502 {
4503         return percpu_read(current_vcpu) != NULL;
4504 }
4505
4506 static int kvm_is_user_mode(void)
4507 {
4508         int user_mode = 3;
4509
4510         if (percpu_read(current_vcpu))
4511                 user_mode = kvm_x86_ops->get_cpl(percpu_read(current_vcpu));
4512
4513         return user_mode != 0;
4514 }
4515
4516 static unsigned long kvm_get_guest_ip(void)
4517 {
4518         unsigned long ip = 0;
4519
4520         if (percpu_read(current_vcpu))
4521                 ip = kvm_rip_read(percpu_read(current_vcpu));
4522
4523         return ip;
4524 }
4525
4526 static struct perf_guest_info_callbacks kvm_guest_cbs = {
4527         .is_in_guest            = kvm_is_in_guest,
4528         .is_user_mode           = kvm_is_user_mode,
4529         .get_guest_ip           = kvm_get_guest_ip,
4530 };
4531
4532 void kvm_before_handle_nmi(struct kvm_vcpu *vcpu)
4533 {
4534         percpu_write(current_vcpu, vcpu);
4535 }
4536 EXPORT_SYMBOL_GPL(kvm_before_handle_nmi);
4537
4538 void kvm_after_handle_nmi(struct kvm_vcpu *vcpu)
4539 {
4540         percpu_write(current_vcpu, NULL);
4541 }
4542 EXPORT_SYMBOL_GPL(kvm_after_handle_nmi);
4543
4544 int kvm_arch_init(void *opaque)
4545 {
4546         int r;
4547         struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
4548
4549         if (kvm_x86_ops) {
4550                 printk(KERN_ERR "kvm: already loaded the other module\n");
4551                 r = -EEXIST;
4552                 goto out;
4553         }
4554
4555         if (!ops->cpu_has_kvm_support()) {
4556                 printk(KERN_ERR "kvm: no hardware support\n");
4557                 r = -EOPNOTSUPP;
4558                 goto out;
4559         }
4560         if (ops->disabled_by_bios()) {
4561                 printk(KERN_ERR "kvm: disabled by bios\n");
4562                 r = -EOPNOTSUPP;
4563                 goto out;
4564         }
4565
4566         r = kvm_mmu_module_init();
4567         if (r)
4568                 goto out;
4569
4570         kvm_init_msr_list();
4571
4572         kvm_x86_ops = ops;
4573         kvm_mmu_set_nonpresent_ptes(0ull, 0ull);
4574         kvm_mmu_set_base_ptes(PT_PRESENT_MASK);
4575         kvm_mmu_set_mask_ptes(PT_USER_MASK, PT_ACCESSED_MASK,
4576                         PT_DIRTY_MASK, PT64_NX_MASK, 0);
4577
4578         kvm_timer_init();
4579
4580         perf_register_guest_info_callbacks(&kvm_guest_cbs);
4581
4582         if (cpu_has_xsave)
4583                 host_xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK);
4584
4585         return 0;
4586
4587 out:
4588         return r;
4589 }
4590
4591 void kvm_arch_exit(void)
4592 {
4593         perf_unregister_guest_info_callbacks(&kvm_guest_cbs);
4594
4595         if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
4596                 cpufreq_unregister_notifier(&kvmclock_cpufreq_notifier_block,
4597                                             CPUFREQ_TRANSITION_NOTIFIER);
4598         unregister_hotcpu_notifier(&kvmclock_cpu_notifier_block);
4599         kvm_x86_ops = NULL;
4600         kvm_mmu_module_exit();
4601 }
4602
4603 int kvm_emulate_halt(struct kvm_vcpu *vcpu)
4604 {
4605         ++vcpu->stat.halt_exits;
4606         if (irqchip_in_kernel(vcpu->kvm)) {
4607                 vcpu->arch.mp_state = KVM_MP_STATE_HALTED;
4608                 return 1;
4609         } else {
4610                 vcpu->run->exit_reason = KVM_EXIT_HLT;
4611                 return 0;
4612         }
4613 }
4614 EXPORT_SYMBOL_GPL(kvm_emulate_halt);
4615
4616 static inline gpa_t hc_gpa(struct kvm_vcpu *vcpu, unsigned long a0,
4617                            unsigned long a1)
4618 {
4619         if (is_long_mode(vcpu))
4620                 return a0;
4621         else
4622                 return a0 | ((gpa_t)a1 << 32);
4623 }
4624
4625 int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
4626 {
4627         u64 param, ingpa, outgpa, ret;
4628         uint16_t code, rep_idx, rep_cnt, res = HV_STATUS_SUCCESS, rep_done = 0;
4629         bool fast, longmode;
4630         int cs_db, cs_l;
4631
4632         /*
4633          * hypercall generates UD from non zero cpl and real mode
4634          * per HYPER-V spec
4635          */
4636         if (kvm_x86_ops->get_cpl(vcpu) != 0 || !is_protmode(vcpu)) {
4637                 kvm_queue_exception(vcpu, UD_VECTOR);
4638                 return 0;
4639         }
4640
4641         kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
4642         longmode = is_long_mode(vcpu) && cs_l == 1;
4643
4644         if (!longmode) {
4645                 param = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDX) << 32) |
4646                         (kvm_register_read(vcpu, VCPU_REGS_RAX) & 0xffffffff);
4647                 ingpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RBX) << 32) |
4648                         (kvm_register_read(vcpu, VCPU_REGS_RCX) & 0xffffffff);
4649                 outgpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDI) << 32) |
4650                         (kvm_register_read(vcpu, VCPU_REGS_RSI) & 0xffffffff);
4651         }
4652 #ifdef CONFIG_X86_64
4653         else {
4654                 param = kvm_register_read(vcpu, VCPU_REGS_RCX);
4655                 ingpa = kvm_register_read(vcpu, VCPU_REGS_RDX);
4656                 outgpa = kvm_register_read(vcpu, VCPU_REGS_R8);
4657         }
4658 #endif
4659
4660         code = param & 0xffff;
4661         fast = (param >> 16) & 0x1;
4662         rep_cnt = (param >> 32) & 0xfff;
4663         rep_idx = (param >> 48) & 0xfff;
4664
4665         trace_kvm_hv_hypercall(code, fast, rep_cnt, rep_idx, ingpa, outgpa);
4666
4667         switch (code) {
4668         case HV_X64_HV_NOTIFY_LONG_SPIN_WAIT:
4669                 kvm_vcpu_on_spin(vcpu);
4670                 break;
4671         default:
4672                 res = HV_STATUS_INVALID_HYPERCALL_CODE;
4673                 break;
4674         }
4675
4676         ret = res | (((u64)rep_done & 0xfff) << 32);
4677         if (longmode) {
4678                 kvm_register_write(vcpu, VCPU_REGS_RAX, ret);
4679         } else {
4680                 kvm_register_write(vcpu, VCPU_REGS_RDX, ret >> 32);
4681                 kvm_register_write(vcpu, VCPU_REGS_RAX, ret & 0xffffffff);
4682         }
4683
4684         return 1;
4685 }
4686
4687 int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
4688 {
4689         unsigned long nr, a0, a1, a2, a3, ret;
4690         int r = 1;
4691
4692         if (kvm_hv_hypercall_enabled(vcpu->kvm))
4693                 return kvm_hv_hypercall(vcpu);
4694
4695         nr = kvm_register_read(vcpu, VCPU_REGS_RAX);
4696         a0 = kvm_register_read(vcpu, VCPU_REGS_RBX);
4697         a1 = kvm_register_read(vcpu, VCPU_REGS_RCX);
4698         a2 = kvm_register_read(vcpu, VCPU_REGS_RDX);
4699         a3 = kvm_register_read(vcpu, VCPU_REGS_RSI);
4700
4701         trace_kvm_hypercall(nr, a0, a1, a2, a3);
4702
4703         if (!is_long_mode(vcpu)) {
4704                 nr &= 0xFFFFFFFF;
4705                 a0 &= 0xFFFFFFFF;
4706                 a1 &= 0xFFFFFFFF;
4707                 a2 &= 0xFFFFFFFF;
4708                 a3 &= 0xFFFFFFFF;
4709         }
4710
4711         if (kvm_x86_ops->get_cpl(vcpu) != 0) {
4712                 ret = -KVM_EPERM;
4713                 goto out;
4714         }
4715
4716         switch (nr) {
4717         case KVM_HC_VAPIC_POLL_IRQ:
4718                 ret = 0;
4719                 break;
4720         case KVM_HC_MMU_OP:
4721                 r = kvm_pv_mmu_op(vcpu, a0, hc_gpa(vcpu, a1, a2), &ret);
4722                 break;
4723         default:
4724                 ret = -KVM_ENOSYS;
4725                 break;
4726         }
4727 out:
4728         kvm_register_write(vcpu, VCPU_REGS_RAX, ret);
4729         ++vcpu->stat.hypercalls;
4730         return r;
4731 }
4732 EXPORT_SYMBOL_GPL(kvm_emulate_hypercall);
4733
4734 int kvm_fix_hypercall(struct kvm_vcpu *vcpu)
4735 {
4736         char instruction[3];
4737         unsigned long rip = kvm_rip_read(vcpu);
4738
4739         /*
4740          * Blow out the MMU to ensure that no other VCPU has an active mapping
4741          * to ensure that the updated hypercall appears atomically across all
4742          * VCPUs.
4743          */
4744         kvm_mmu_zap_all(vcpu->kvm);
4745
4746         kvm_x86_ops->patch_hypercall(vcpu, instruction);
4747
4748         return emulator_write_emulated(rip, instruction, 3, NULL, vcpu);
4749 }
4750
4751 void realmode_lgdt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)
4752 {
4753         struct desc_ptr dt = { limit, base };
4754
4755         kvm_x86_ops->set_gdt(vcpu, &dt);
4756 }
4757
4758 void realmode_lidt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)
4759 {
4760         struct desc_ptr dt = { limit, base };
4761
4762         kvm_x86_ops->set_idt(vcpu, &dt);
4763 }
4764
4765 static int move_to_next_stateful_cpuid_entry(struct kvm_vcpu *vcpu, int i)
4766 {
4767         struct kvm_cpuid_entry2 *e = &vcpu->arch.cpuid_entries[i];
4768         int j, nent = vcpu->arch.cpuid_nent;
4769
4770         e->flags &= ~KVM_CPUID_FLAG_STATE_READ_NEXT;
4771         /* when no next entry is found, the current entry[i] is reselected */
4772         for (j = i + 1; ; j = (j + 1) % nent) {
4773                 struct kvm_cpuid_entry2 *ej = &vcpu->arch.cpuid_entries[j];
4774                 if (ej->function == e->function) {
4775                         ej->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
4776                         return j;
4777                 }
4778         }
4779         return 0; /* silence gcc, even though control never reaches here */
4780 }
4781
4782 /* find an entry with matching function, matching index (if needed), and that
4783  * should be read next (if it's stateful) */
4784 static int is_matching_cpuid_entry(struct kvm_cpuid_entry2 *e,
4785         u32 function, u32 index)
4786 {
4787         if (e->function != function)
4788                 return 0;
4789         if ((e->flags & KVM_CPUID_FLAG_SIGNIFCANT_INDEX) && e->index != index)
4790                 return 0;
4791         if ((e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC) &&
4792             !(e->flags & KVM_CPUID_FLAG_STATE_READ_NEXT))
4793                 return 0;
4794         return 1;
4795 }
4796
4797 struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
4798                                               u32 function, u32 index)
4799 {
4800         int i;
4801         struct kvm_cpuid_entry2 *best = NULL;
4802
4803         for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
4804                 struct kvm_cpuid_entry2 *e;
4805
4806                 e = &vcpu->arch.cpuid_entries[i];
4807                 if (is_matching_cpuid_entry(e, function, index)) {
4808                         if (e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC)
4809                                 move_to_next_stateful_cpuid_entry(vcpu, i);
4810                         best = e;
4811                         break;
4812                 }
4813                 /*
4814                  * Both basic or both extended?
4815                  */
4816                 if (((e->function ^ function) & 0x80000000) == 0)
4817                         if (!best || e->function > best->function)
4818                                 best = e;
4819         }
4820         return best;
4821 }
4822 EXPORT_SYMBOL_GPL(kvm_find_cpuid_entry);
4823
4824 int cpuid_maxphyaddr(struct kvm_vcpu *vcpu)
4825 {
4826         struct kvm_cpuid_entry2 *best;
4827
4828         best = kvm_find_cpuid_entry(vcpu, 0x80000000, 0);
4829         if (!best || best->eax < 0x80000008)
4830                 goto not_found;
4831         best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0);
4832         if (best)
4833                 return best->eax & 0xff;
4834 not_found:
4835         return 36;
4836 }
4837
4838 void kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
4839 {
4840         u32 function, index;
4841         struct kvm_cpuid_entry2 *best;
4842
4843         function = kvm_register_read(vcpu, VCPU_REGS_RAX);
4844         index = kvm_register_read(vcpu, VCPU_REGS_RCX);
4845         kvm_register_write(vcpu, VCPU_REGS_RAX, 0);
4846         kvm_register_write(vcpu, VCPU_REGS_RBX, 0);
4847         kvm_register_write(vcpu, VCPU_REGS_RCX, 0);
4848         kvm_register_write(vcpu, VCPU_REGS_RDX, 0);
4849         best = kvm_find_cpuid_entry(vcpu, function, index);
4850         if (best) {
4851                 kvm_register_write(vcpu, VCPU_REGS_RAX, best->eax);
4852                 kvm_register_write(vcpu, VCPU_REGS_RBX, best->ebx);
4853                 kvm_register_write(vcpu, VCPU_REGS_RCX, best->ecx);
4854                 kvm_register_write(vcpu, VCPU_REGS_RDX, best->edx);
4855         }
4856         kvm_x86_ops->skip_emulated_instruction(vcpu);
4857         trace_kvm_cpuid(function,
4858                         kvm_register_read(vcpu, VCPU_REGS_RAX),
4859                         kvm_register_read(vcpu, VCPU_REGS_RBX),
4860                         kvm_register_read(vcpu, VCPU_REGS_RCX),
4861                         kvm_register_read(vcpu, VCPU_REGS_RDX));
4862 }
4863 EXPORT_SYMBOL_GPL(kvm_emulate_cpuid);
4864
4865 /*
4866  * Check if userspace requested an interrupt window, and that the
4867  * interrupt window is open.
4868  *
4869  * No need to exit to userspace if we already have an interrupt queued.
4870  */
4871 static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu)
4872 {
4873         return (!irqchip_in_kernel(vcpu->kvm) && !kvm_cpu_has_interrupt(vcpu) &&
4874                 vcpu->run->request_interrupt_window &&
4875                 kvm_arch_interrupt_allowed(vcpu));
4876 }
4877
4878 static void post_kvm_run_save(struct kvm_vcpu *vcpu)
4879 {
4880         struct kvm_run *kvm_run = vcpu->run;
4881
4882         kvm_run->if_flag = (kvm_get_rflags(vcpu) & X86_EFLAGS_IF) != 0;
4883         kvm_run->cr8 = kvm_get_cr8(vcpu);
4884         kvm_run->apic_base = kvm_get_apic_base(vcpu);
4885         if (irqchip_in_kernel(vcpu->kvm))
4886                 kvm_run->ready_for_interrupt_injection = 1;
4887         else
4888                 kvm_run->ready_for_interrupt_injection =
4889                         kvm_arch_interrupt_allowed(vcpu) &&
4890                         !kvm_cpu_has_interrupt(vcpu) &&
4891                         !kvm_event_needs_reinjection(vcpu);
4892 }
4893
4894 static void vapic_enter(struct kvm_vcpu *vcpu)
4895 {
4896         struct kvm_lapic *apic = vcpu->arch.apic;
4897         struct page *page;
4898
4899         if (!apic || !apic->vapic_addr)
4900                 return;
4901
4902         page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
4903
4904         vcpu->arch.apic->vapic_page = page;
4905 }
4906
4907 static void vapic_exit(struct kvm_vcpu *vcpu)
4908 {
4909         struct kvm_lapic *apic = vcpu->arch.apic;
4910         int idx;
4911
4912         if (!apic || !apic->vapic_addr)
4913                 return;
4914
4915         idx = srcu_read_lock(&vcpu->kvm->srcu);
4916         kvm_release_page_dirty(apic->vapic_page);
4917         mark_page_dirty(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
4918         srcu_read_unlock(&vcpu->kvm->srcu, idx);
4919 }
4920
4921 static void update_cr8_intercept(struct kvm_vcpu *vcpu)
4922 {
4923         int max_irr, tpr;
4924
4925         if (!kvm_x86_ops->update_cr8_intercept)
4926                 return;
4927
4928         if (!vcpu->arch.apic)
4929                 return;
4930
4931         if (!vcpu->arch.apic->vapic_addr)
4932                 max_irr = kvm_lapic_find_highest_irr(vcpu);
4933         else
4934                 max_irr = -1;
4935
4936         if (max_irr != -1)
4937                 max_irr >>= 4;
4938
4939         tpr = kvm_lapic_get_cr8(vcpu);
4940
4941         kvm_x86_ops->update_cr8_intercept(vcpu, tpr, max_irr);
4942 }
4943
4944 static void inject_pending_event(struct kvm_vcpu *vcpu)
4945 {
4946         /* try to reinject previous events if any */
4947         if (vcpu->arch.exception.pending) {
4948                 trace_kvm_inj_exception(vcpu->arch.exception.nr,
4949                                         vcpu->arch.exception.has_error_code,
4950                                         vcpu->arch.exception.error_code);
4951                 kvm_x86_ops->queue_exception(vcpu, vcpu->arch.exception.nr,
4952                                           vcpu->arch.exception.has_error_code,
4953                                           vcpu->arch.exception.error_code,
4954                                           vcpu->arch.exception.reinject);
4955                 return;
4956         }
4957
4958         if (vcpu->arch.nmi_injected) {
4959                 kvm_x86_ops->set_nmi(vcpu);
4960                 return;
4961         }
4962
4963         if (vcpu->arch.interrupt.pending) {
4964                 kvm_x86_ops->set_irq(vcpu);
4965                 return;
4966         }
4967
4968         /* try to inject new event if pending */
4969         if (vcpu->arch.nmi_pending) {
4970                 if (kvm_x86_ops->nmi_allowed(vcpu)) {
4971                         vcpu->arch.nmi_pending = false;
4972                         vcpu->arch.nmi_injected = true;
4973                         kvm_x86_ops->set_nmi(vcpu);
4974                 }
4975         } else if (kvm_cpu_has_interrupt(vcpu)) {
4976                 if (kvm_x86_ops->interrupt_allowed(vcpu)) {
4977                         kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu),
4978                                             false);
4979                         kvm_x86_ops->set_irq(vcpu);
4980                 }
4981         }
4982 }
4983
4984 static void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu)
4985 {
4986         if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE) &&
4987                         !vcpu->guest_xcr0_loaded) {
4988                 /* kvm_set_xcr() also depends on this */
4989                 xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0);
4990                 vcpu->guest_xcr0_loaded = 1;
4991         }
4992 }
4993
4994 static void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu)
4995 {
4996         if (vcpu->guest_xcr0_loaded) {
4997                 if (vcpu->arch.xcr0 != host_xcr0)
4998                         xsetbv(XCR_XFEATURE_ENABLED_MASK, host_xcr0);
4999                 vcpu->guest_xcr0_loaded = 0;
5000         }
5001 }
5002
5003 static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
5004 {
5005         int r;
5006         bool req_int_win = !irqchip_in_kernel(vcpu->kvm) &&
5007                 vcpu->run->request_interrupt_window;
5008         bool req_event;
5009
5010         if (vcpu->requests) {
5011                 if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu))
5012                         kvm_mmu_unload(vcpu);
5013                 if (kvm_check_request(KVM_REQ_MIGRATE_TIMER, vcpu))
5014                         __kvm_migrate_timers(vcpu);
5015                 if (kvm_check_request(KVM_REQ_KVMCLOCK_UPDATE, vcpu)) {
5016                         r = kvm_write_guest_time(vcpu);
5017                         if (unlikely(r))
5018                                 goto out;
5019                 }
5020                 if (kvm_check_request(KVM_REQ_MMU_SYNC, vcpu))
5021                         kvm_mmu_sync_roots(vcpu);
5022                 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
5023                         kvm_x86_ops->tlb_flush(vcpu);
5024                 if (kvm_check_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu)) {
5025                         vcpu->run->exit_reason = KVM_EXIT_TPR_ACCESS;
5026                         r = 0;
5027                         goto out;
5028                 }
5029                 if (kvm_check_request(KVM_REQ_TRIPLE_FAULT, vcpu)) {
5030                         vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN;
5031                         r = 0;
5032                         goto out;
5033                 }
5034                 if (kvm_check_request(KVM_REQ_DEACTIVATE_FPU, vcpu)) {
5035                         vcpu->fpu_active = 0;
5036                         kvm_x86_ops->fpu_deactivate(vcpu);
5037                 }
5038         }
5039
5040         r = kvm_mmu_reload(vcpu);
5041         if (unlikely(r))
5042                 goto out;
5043
5044         preempt_disable();
5045
5046         kvm_x86_ops->prepare_guest_switch(vcpu);
5047         if (vcpu->fpu_active)
5048                 kvm_load_guest_fpu(vcpu);
5049         kvm_load_guest_xcr0(vcpu);
5050
5051         atomic_set(&vcpu->guest_mode, 1);
5052         smp_wmb();
5053
5054         local_irq_disable();
5055
5056         req_event = kvm_check_request(KVM_REQ_EVENT, vcpu);
5057
5058         if (!atomic_read(&vcpu->guest_mode) || vcpu->requests
5059             || need_resched() || signal_pending(current)) {
5060                 if (req_event)
5061                         kvm_make_request(KVM_REQ_EVENT, vcpu);
5062                 atomic_set(&vcpu->guest_mode, 0);
5063                 smp_wmb();
5064                 local_irq_enable();
5065                 preempt_enable();
5066                 r = 1;
5067                 goto out;
5068         }
5069
5070         if (req_event || req_int_win) {
5071                 inject_pending_event(vcpu);
5072
5073                 /* enable NMI/IRQ window open exits if needed */
5074                 if (vcpu->arch.nmi_pending)
5075                         kvm_x86_ops->enable_nmi_window(vcpu);
5076                 else if (kvm_cpu_has_interrupt(vcpu) || req_int_win)
5077                         kvm_x86_ops->enable_irq_window(vcpu);
5078
5079                 if (kvm_lapic_enabled(vcpu)) {
5080                         update_cr8_intercept(vcpu);
5081                         kvm_lapic_sync_to_vapic(vcpu);
5082                 }
5083         }
5084
5085         srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
5086
5087         kvm_guest_enter();
5088
5089         if (unlikely(vcpu->arch.switch_db_regs)) {
5090                 set_debugreg(0, 7);
5091                 set_debugreg(vcpu->arch.eff_db[0], 0);
5092                 set_debugreg(vcpu->arch.eff_db[1], 1);
5093                 set_debugreg(vcpu->arch.eff_db[2], 2);
5094                 set_debugreg(vcpu->arch.eff_db[3], 3);
5095         }
5096
5097         trace_kvm_entry(vcpu->vcpu_id);
5098         kvm_x86_ops->run(vcpu);
5099
5100         /*
5101          * If the guest has used debug registers, at least dr7
5102          * will be disabled while returning to the host.
5103          * If we don't have active breakpoints in the host, we don't
5104          * care about the messed up debug address registers. But if
5105          * we have some of them active, restore the old state.
5106          */
5107         if (hw_breakpoint_active())
5108                 hw_breakpoint_restore();
5109
5110         kvm_get_msr(vcpu, MSR_IA32_TSC, &vcpu->arch.last_guest_tsc);
5111
5112         atomic_set(&vcpu->guest_mode, 0);
5113         smp_wmb();
5114         local_irq_enable();
5115
5116         ++vcpu->stat.exits;
5117
5118         /*
5119          * We must have an instruction between local_irq_enable() and
5120          * kvm_guest_exit(), so the timer interrupt isn't delayed by
5121          * the interrupt shadow.  The stat.exits increment will do nicely.
5122          * But we need to prevent reordering, hence this barrier():
5123          */
5124         barrier();
5125
5126         kvm_guest_exit();
5127
5128         preempt_enable();
5129
5130         vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
5131
5132         /*
5133          * Profile KVM exit RIPs:
5134          */
5135         if (unlikely(prof_on == KVM_PROFILING)) {
5136                 unsigned long rip = kvm_rip_read(vcpu);
5137                 profile_hit(KVM_PROFILING, (void *)rip);
5138         }
5139
5140
5141         kvm_lapic_sync_from_vapic(vcpu);
5142
5143         r = kvm_x86_ops->handle_exit(vcpu);
5144 out:
5145         return r;
5146 }
5147
5148
5149 static int __vcpu_run(struct kvm_vcpu *vcpu)
5150 {
5151         int r;
5152         struct kvm *kvm = vcpu->kvm;
5153
5154         if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED)) {
5155                 pr_debug("vcpu %d received sipi with vector # %x\n",
5156                          vcpu->vcpu_id, vcpu->arch.sipi_vector);
5157                 kvm_lapic_reset(vcpu);
5158                 r = kvm_arch_vcpu_reset(vcpu);
5159                 if (r)
5160                         return r;
5161                 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
5162         }
5163
5164         vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
5165         vapic_enter(vcpu);
5166
5167         r = 1;
5168         while (r > 0) {
5169                 if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE)
5170                         r = vcpu_enter_guest(vcpu);
5171                 else {
5172                         srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
5173                         kvm_vcpu_block(vcpu);
5174                         vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
5175                         if (kvm_check_request(KVM_REQ_UNHALT, vcpu))
5176                         {
5177                                 switch(vcpu->arch.mp_state) {
5178                                 case KVM_MP_STATE_HALTED:
5179                                         vcpu->arch.mp_state =
5180                                                 KVM_MP_STATE_RUNNABLE;
5181                                 case KVM_MP_STATE_RUNNABLE:
5182                                         break;
5183                                 case KVM_MP_STATE_SIPI_RECEIVED:
5184                                 default:
5185                                         r = -EINTR;
5186                                         break;
5187                                 }
5188                         }
5189                 }
5190
5191                 if (r <= 0)
5192                         break;
5193
5194                 clear_bit(KVM_REQ_PENDING_TIMER, &vcpu->requests);
5195                 if (kvm_cpu_has_pending_timer(vcpu))
5196                         kvm_inject_pending_timer_irqs(vcpu);
5197
5198                 if (dm_request_for_irq_injection(vcpu)) {
5199                         r = -EINTR;
5200                         vcpu->run->exit_reason = KVM_EXIT_INTR;
5201                         ++vcpu->stat.request_irq_exits;
5202                 }
5203                 if (signal_pending(current)) {
5204                         r = -EINTR;
5205                         vcpu->run->exit_reason = KVM_EXIT_INTR;
5206                         ++vcpu->stat.signal_exits;
5207                 }
5208                 if (need_resched()) {
5209                         srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
5210                         kvm_resched(vcpu);
5211                         vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
5212                 }
5213         }
5214
5215         srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
5216
5217         vapic_exit(vcpu);
5218
5219         return r;
5220 }
5221
5222 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
5223 {
5224         int r;
5225         sigset_t sigsaved;
5226
5227         if (vcpu->sigset_active)
5228                 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
5229
5230         if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) {
5231                 kvm_vcpu_block(vcpu);
5232                 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
5233                 r = -EAGAIN;
5234                 goto out;
5235         }
5236
5237         /* re-sync apic's tpr */
5238         if (!irqchip_in_kernel(vcpu->kvm))
5239                 kvm_set_cr8(vcpu, kvm_run->cr8);
5240
5241         if (vcpu->arch.pio.count || vcpu->mmio_needed) {
5242                 if (vcpu->mmio_needed) {
5243                         memcpy(vcpu->mmio_data, kvm_run->mmio.data, 8);
5244                         vcpu->mmio_read_completed = 1;
5245                         vcpu->mmio_needed = 0;
5246                 }
5247                 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
5248                 r = emulate_instruction(vcpu, 0, 0, EMULTYPE_NO_DECODE);
5249                 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
5250                 if (r != EMULATE_DONE) {
5251                         r = 0;
5252                         goto out;
5253                 }
5254         }
5255         if (kvm_run->exit_reason == KVM_EXIT_HYPERCALL)
5256                 kvm_register_write(vcpu, VCPU_REGS_RAX,
5257                                      kvm_run->hypercall.ret);
5258
5259         r = __vcpu_run(vcpu);
5260
5261 out:
5262         post_kvm_run_save(vcpu);
5263         if (vcpu->sigset_active)
5264                 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
5265
5266         return r;
5267 }
5268
5269 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
5270 {
5271         regs->rax = kvm_register_read(vcpu, VCPU_REGS_RAX);
5272         regs->rbx = kvm_register_read(vcpu, VCPU_REGS_RBX);
5273         regs->rcx = kvm_register_read(vcpu, VCPU_REGS_RCX);
5274         regs->rdx = kvm_register_read(vcpu, VCPU_REGS_RDX);
5275         regs->rsi = kvm_register_read(vcpu, VCPU_REGS_RSI);
5276         regs->rdi = kvm_register_read(vcpu, VCPU_REGS_RDI);
5277         regs->rsp = kvm_register_read(vcpu, VCPU_REGS_RSP);
5278         regs->rbp = kvm_register_read(vcpu, VCPU_REGS_RBP);
5279 #ifdef CONFIG_X86_64
5280         regs->r8 = kvm_register_read(vcpu, VCPU_REGS_R8);
5281         regs->r9 = kvm_register_read(vcpu, VCPU_REGS_R9);
5282         regs->r10 = kvm_register_read(vcpu, VCPU_REGS_R10);
5283         regs->r11 = kvm_register_read(vcpu, VCPU_REGS_R11);
5284         regs->r12 = kvm_register_read(vcpu, VCPU_REGS_R12);
5285         regs->r13 = kvm_register_read(vcpu, VCPU_REGS_R13);
5286         regs->r14 = kvm_register_read(vcpu, VCPU_REGS_R14);
5287         regs->r15 = kvm_register_read(vcpu, VCPU_REGS_R15);
5288 #endif
5289
5290         regs->rip = kvm_rip_read(vcpu);
5291         regs->rflags = kvm_get_rflags(vcpu);
5292
5293         return 0;
5294 }
5295
5296 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
5297 {
5298         kvm_register_write(vcpu, VCPU_REGS_RAX, regs->rax);
5299         kvm_register_write(vcpu, VCPU_REGS_RBX, regs->rbx);
5300         kvm_register_write(vcpu, VCPU_REGS_RCX, regs->rcx);
5301         kvm_register_write(vcpu, VCPU_REGS_RDX, regs->rdx);
5302         kvm_register_write(vcpu, VCPU_REGS_RSI, regs->rsi);
5303         kvm_register_write(vcpu, VCPU_REGS_RDI, regs->rdi);
5304         kvm_register_write(vcpu, VCPU_REGS_RSP, regs->rsp);
5305         kvm_register_write(vcpu, VCPU_REGS_RBP, regs->rbp);
5306 #ifdef CONFIG_X86_64
5307         kvm_register_write(vcpu, VCPU_REGS_R8, regs->r8);
5308         kvm_register_write(vcpu, VCPU_REGS_R9, regs->r9);
5309         kvm_register_write(vcpu, VCPU_REGS_R10, regs->r10);
5310         kvm_register_write(vcpu, VCPU_REGS_R11, regs->r11);
5311         kvm_register_write(vcpu, VCPU_REGS_R12, regs->r12);
5312         kvm_register_write(vcpu, VCPU_REGS_R13, regs->r13);
5313         kvm_register_write(vcpu, VCPU_REGS_R14, regs->r14);
5314         kvm_register_write(vcpu, VCPU_REGS_R15, regs->r15);
5315 #endif
5316
5317         kvm_rip_write(vcpu, regs->rip);
5318         kvm_set_rflags(vcpu, regs->rflags);
5319
5320         vcpu->arch.exception.pending = false;
5321
5322         kvm_make_request(KVM_REQ_EVENT, vcpu);
5323
5324         return 0;
5325 }
5326
5327 void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
5328 {
5329         struct kvm_segment cs;
5330
5331         kvm_get_segment(vcpu, &cs, VCPU_SREG_CS);
5332         *db = cs.db;
5333         *l = cs.l;
5334 }
5335 EXPORT_SYMBOL_GPL(kvm_get_cs_db_l_bits);
5336
5337 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
5338                                   struct kvm_sregs *sregs)
5339 {
5340         struct desc_ptr dt;
5341
5342         kvm_get_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
5343         kvm_get_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
5344         kvm_get_segment(vcpu, &sregs->es, VCPU_SREG_ES);
5345         kvm_get_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
5346         kvm_get_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
5347         kvm_get_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
5348
5349         kvm_get_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
5350         kvm_get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
5351
5352         kvm_x86_ops->get_idt(vcpu, &dt);
5353         sregs->idt.limit = dt.size;
5354         sregs->idt.base = dt.address;
5355         kvm_x86_ops->get_gdt(vcpu, &dt);
5356         sregs->gdt.limit = dt.size;
5357         sregs->gdt.base = dt.address;
5358
5359         sregs->cr0 = kvm_read_cr0(vcpu);
5360         sregs->cr2 = vcpu->arch.cr2;
5361         sregs->cr3 = vcpu->arch.cr3;
5362         sregs->cr4 = kvm_read_cr4(vcpu);
5363         sregs->cr8 = kvm_get_cr8(vcpu);
5364         sregs->efer = vcpu->arch.efer;
5365         sregs->apic_base = kvm_get_apic_base(vcpu);
5366
5367         memset(sregs->interrupt_bitmap, 0, sizeof sregs->interrupt_bitmap);
5368
5369         if (vcpu->arch.interrupt.pending && !vcpu->arch.interrupt.soft)
5370                 set_bit(vcpu->arch.interrupt.nr,
5371                         (unsigned long *)sregs->interrupt_bitmap);
5372
5373         return 0;
5374 }
5375
5376 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
5377                                     struct kvm_mp_state *mp_state)
5378 {
5379         mp_state->mp_state = vcpu->arch.mp_state;
5380         return 0;
5381 }
5382
5383 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
5384                                     struct kvm_mp_state *mp_state)
5385 {
5386         vcpu->arch.mp_state = mp_state->mp_state;
5387         kvm_make_request(KVM_REQ_EVENT, vcpu);
5388         return 0;
5389 }
5390
5391 int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason,
5392                     bool has_error_code, u32 error_code)
5393 {
5394         struct decode_cache *c = &vcpu->arch.emulate_ctxt.decode;
5395         int ret;
5396
5397         init_emulate_ctxt(vcpu);
5398
5399         ret = emulator_task_switch(&vcpu->arch.emulate_ctxt,
5400                                    tss_selector, reason, has_error_code,
5401                                    error_code);
5402
5403         if (ret)
5404                 return EMULATE_FAIL;
5405
5406         memcpy(vcpu->arch.regs, c->regs, sizeof c->regs);
5407         kvm_rip_write(vcpu, vcpu->arch.emulate_ctxt.eip);
5408         kvm_x86_ops->set_rflags(vcpu, vcpu->arch.emulate_ctxt.eflags);
5409         kvm_make_request(KVM_REQ_EVENT, vcpu);
5410         return EMULATE_DONE;
5411 }
5412 EXPORT_SYMBOL_GPL(kvm_task_switch);
5413
5414 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
5415                                   struct kvm_sregs *sregs)
5416 {
5417         int mmu_reset_needed = 0;
5418         int pending_vec, max_bits;
5419         struct desc_ptr dt;
5420
5421         dt.size = sregs->idt.limit;
5422         dt.address = sregs->idt.base;
5423         kvm_x86_ops->set_idt(vcpu, &dt);
5424         dt.size = sregs->gdt.limit;
5425         dt.address = sregs->gdt.base;
5426         kvm_x86_ops->set_gdt(vcpu, &dt);
5427
5428         vcpu->arch.cr2 = sregs->cr2;
5429         mmu_reset_needed |= vcpu->arch.cr3 != sregs->cr3;
5430         vcpu->arch.cr3 = sregs->cr3;
5431
5432         kvm_set_cr8(vcpu, sregs->cr8);
5433
5434         mmu_reset_needed |= vcpu->arch.efer != sregs->efer;
5435         kvm_x86_ops->set_efer(vcpu, sregs->efer);
5436         kvm_set_apic_base(vcpu, sregs->apic_base);
5437
5438         mmu_reset_needed |= kvm_read_cr0(vcpu) != sregs->cr0;
5439         kvm_x86_ops->set_cr0(vcpu, sregs->cr0);
5440         vcpu->arch.cr0 = sregs->cr0;
5441
5442         mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4;
5443         kvm_x86_ops->set_cr4(vcpu, sregs->cr4);
5444         if (!is_long_mode(vcpu) && is_pae(vcpu)) {
5445                 load_pdptrs(vcpu, vcpu->arch.walk_mmu, vcpu->arch.cr3);
5446                 mmu_reset_needed = 1;
5447         }
5448
5449         if (mmu_reset_needed)
5450                 kvm_mmu_reset_context(vcpu);
5451
5452         max_bits = (sizeof sregs->interrupt_bitmap) << 3;
5453         pending_vec = find_first_bit(
5454                 (const unsigned long *)sregs->interrupt_bitmap, max_bits);
5455         if (pending_vec < max_bits) {
5456                 kvm_queue_interrupt(vcpu, pending_vec, false);
5457                 pr_debug("Set back pending irq %d\n", pending_vec);
5458                 if (irqchip_in_kernel(vcpu->kvm))
5459                         kvm_pic_clear_isr_ack(vcpu->kvm);
5460         }
5461
5462         kvm_set_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
5463         kvm_set_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
5464         kvm_set_segment(vcpu, &sregs->es, VCPU_SREG_ES);
5465         kvm_set_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
5466         kvm_set_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
5467         kvm_set_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
5468
5469         kvm_set_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
5470         kvm_set_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
5471
5472         update_cr8_intercept(vcpu);
5473
5474         /* Older userspace won't unhalt the vcpu on reset. */
5475         if (kvm_vcpu_is_bsp(vcpu) && kvm_rip_read(vcpu) == 0xfff0 &&
5476             sregs->cs.selector == 0xf000 && sregs->cs.base == 0xffff0000 &&
5477             !is_protmode(vcpu))
5478                 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
5479
5480         kvm_make_request(KVM_REQ_EVENT, vcpu);
5481
5482         return 0;
5483 }
5484
5485 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
5486                                         struct kvm_guest_debug *dbg)
5487 {
5488         unsigned long rflags;
5489         int i, r;
5490
5491         if (dbg->control & (KVM_GUESTDBG_INJECT_DB | KVM_GUESTDBG_INJECT_BP)) {
5492                 r = -EBUSY;
5493                 if (vcpu->arch.exception.pending)
5494                         goto out;
5495                 if (dbg->control & KVM_GUESTDBG_INJECT_DB)
5496                         kvm_queue_exception(vcpu, DB_VECTOR);
5497                 else
5498                         kvm_queue_exception(vcpu, BP_VECTOR);
5499         }
5500
5501         /*
5502          * Read rflags as long as potentially injected trace flags are still
5503          * filtered out.
5504          */
5505         rflags = kvm_get_rflags(vcpu);
5506
5507         vcpu->guest_debug = dbg->control;
5508         if (!(vcpu->guest_debug & KVM_GUESTDBG_ENABLE))
5509                 vcpu->guest_debug = 0;
5510
5511         if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) {
5512                 for (i = 0; i < KVM_NR_DB_REGS; ++i)
5513                         vcpu->arch.eff_db[i] = dbg->arch.debugreg[i];
5514                 vcpu->arch.switch_db_regs =
5515                         (dbg->arch.debugreg[7] & DR7_BP_EN_MASK);
5516         } else {
5517                 for (i = 0; i < KVM_NR_DB_REGS; i++)
5518                         vcpu->arch.eff_db[i] = vcpu->arch.db[i];
5519                 vcpu->arch.switch_db_regs = (vcpu->arch.dr7 & DR7_BP_EN_MASK);
5520         }
5521
5522         if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
5523                 vcpu->arch.singlestep_rip = kvm_rip_read(vcpu) +
5524                         get_segment_base(vcpu, VCPU_SREG_CS);
5525
5526         /*
5527          * Trigger an rflags update that will inject or remove the trace
5528          * flags.
5529          */
5530         kvm_set_rflags(vcpu, rflags);
5531
5532         kvm_x86_ops->set_guest_debug(vcpu, dbg);
5533
5534         r = 0;
5535
5536 out:
5537
5538         return r;
5539 }
5540
5541 /*
5542  * Translate a guest virtual address to a guest physical address.
5543  */
5544 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
5545                                     struct kvm_translation *tr)
5546 {
5547         unsigned long vaddr = tr->linear_address;
5548         gpa_t gpa;
5549         int idx;
5550
5551         idx = srcu_read_lock(&vcpu->kvm->srcu);
5552         gpa = kvm_mmu_gva_to_gpa_system(vcpu, vaddr, NULL);
5553         srcu_read_unlock(&vcpu->kvm->srcu, idx);
5554         tr->physical_address = gpa;
5555         tr->valid = gpa != UNMAPPED_GVA;
5556         tr->writeable = 1;
5557         tr->usermode = 0;
5558
5559         return 0;
5560 }
5561
5562 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
5563 {
5564         struct i387_fxsave_struct *fxsave =
5565                         &vcpu->arch.guest_fpu.state->fxsave;
5566
5567         memcpy(fpu->fpr, fxsave->st_space, 128);
5568         fpu->fcw = fxsave->cwd;
5569         fpu->fsw = fxsave->swd;
5570         fpu->ftwx = fxsave->twd;
5571         fpu->last_opcode = fxsave->fop;
5572         fpu->last_ip = fxsave->rip;
5573         fpu->last_dp = fxsave->rdp;
5574         memcpy(fpu->xmm, fxsave->xmm_space, sizeof fxsave->xmm_space);
5575
5576         return 0;
5577 }
5578
5579 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
5580 {
5581         struct i387_fxsave_struct *fxsave =
5582                         &vcpu->arch.guest_fpu.state->fxsave;
5583
5584         memcpy(fxsave->st_space, fpu->fpr, 128);
5585         fxsave->cwd = fpu->fcw;
5586         fxsave->swd = fpu->fsw;
5587         fxsave->twd = fpu->ftwx;
5588         fxsave->fop = fpu->last_opcode;
5589         fxsave->rip = fpu->last_ip;
5590         fxsave->rdp = fpu->last_dp;
5591         memcpy(fxsave->xmm_space, fpu->xmm, sizeof fxsave->xmm_space);
5592
5593         return 0;
5594 }
5595
5596 int fx_init(struct kvm_vcpu *vcpu)
5597 {
5598         int err;
5599
5600         err = fpu_alloc(&vcpu->arch.guest_fpu);
5601         if (err)
5602                 return err;
5603
5604         fpu_finit(&vcpu->arch.guest_fpu);
5605
5606         /*
5607          * Ensure guest xcr0 is valid for loading
5608          */
5609         vcpu->arch.xcr0 = XSTATE_FP;
5610
5611         vcpu->arch.cr0 |= X86_CR0_ET;
5612
5613         return 0;
5614 }
5615 EXPORT_SYMBOL_GPL(fx_init);
5616
5617 static void fx_free(struct kvm_vcpu *vcpu)
5618 {
5619         fpu_free(&vcpu->arch.guest_fpu);
5620 }
5621
5622 void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
5623 {
5624         if (vcpu->guest_fpu_loaded)
5625                 return;
5626
5627         /*
5628          * Restore all possible states in the guest,
5629          * and assume host would use all available bits.
5630          * Guest xcr0 would be loaded later.
5631          */
5632         kvm_put_guest_xcr0(vcpu);
5633         vcpu->guest_fpu_loaded = 1;
5634         unlazy_fpu(current);
5635         fpu_restore_checking(&vcpu->arch.guest_fpu);
5636         trace_kvm_fpu(1);
5637 }
5638
5639 void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
5640 {
5641         kvm_put_guest_xcr0(vcpu);
5642
5643         if (!vcpu->guest_fpu_loaded)
5644                 return;
5645
5646         vcpu->guest_fpu_loaded = 0;
5647         fpu_save_init(&vcpu->arch.guest_fpu);
5648         ++vcpu->stat.fpu_reload;
5649         kvm_make_request(KVM_REQ_DEACTIVATE_FPU, vcpu);
5650         trace_kvm_fpu(0);
5651 }
5652
5653 void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
5654 {
5655         if (vcpu->arch.time_page) {
5656                 kvm_release_page_dirty(vcpu->arch.time_page);
5657                 vcpu->arch.time_page = NULL;
5658         }
5659
5660         free_cpumask_var(vcpu->arch.wbinvd_dirty_mask);
5661         fx_free(vcpu);
5662         kvm_x86_ops->vcpu_free(vcpu);
5663 }
5664
5665 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
5666                                                 unsigned int id)
5667 {
5668         if (check_tsc_unstable() && atomic_read(&kvm->online_vcpus) != 0)
5669                 printk_once(KERN_WARNING
5670                 "kvm: SMP vm created on host with unstable TSC; "
5671                 "guest TSC will not be reliable\n");
5672         return kvm_x86_ops->vcpu_create(kvm, id);
5673 }
5674
5675 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
5676 {
5677         int r;
5678
5679         vcpu->arch.mtrr_state.have_fixed = 1;
5680         vcpu_load(vcpu);
5681         r = kvm_arch_vcpu_reset(vcpu);
5682         if (r == 0)
5683                 r = kvm_mmu_setup(vcpu);
5684         vcpu_put(vcpu);
5685         if (r < 0)
5686                 goto free_vcpu;
5687
5688         return 0;
5689 free_vcpu:
5690         kvm_x86_ops->vcpu_free(vcpu);
5691         return r;
5692 }
5693
5694 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
5695 {
5696         vcpu_load(vcpu);
5697         kvm_mmu_unload(vcpu);
5698         vcpu_put(vcpu);
5699
5700         fx_free(vcpu);
5701         kvm_x86_ops->vcpu_free(vcpu);
5702 }
5703
5704 int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu)
5705 {
5706         vcpu->arch.nmi_pending = false;
5707         vcpu->arch.nmi_injected = false;
5708
5709         vcpu->arch.switch_db_regs = 0;
5710         memset(vcpu->arch.db, 0, sizeof(vcpu->arch.db));
5711         vcpu->arch.dr6 = DR6_FIXED_1;
5712         vcpu->arch.dr7 = DR7_FIXED_1;
5713
5714         kvm_make_request(KVM_REQ_EVENT, vcpu);
5715
5716         return kvm_x86_ops->vcpu_reset(vcpu);
5717 }
5718
5719 int kvm_arch_hardware_enable(void *garbage)
5720 {
5721         struct kvm *kvm;
5722         struct kvm_vcpu *vcpu;
5723         int i;
5724
5725         kvm_shared_msr_cpu_online();
5726         list_for_each_entry(kvm, &vm_list, vm_list)
5727                 kvm_for_each_vcpu(i, vcpu, kvm)
5728                         if (vcpu->cpu == smp_processor_id())
5729                                 kvm_request_guest_time_update(vcpu);
5730         return kvm_x86_ops->hardware_enable(garbage);
5731 }
5732
5733 void kvm_arch_hardware_disable(void *garbage)
5734 {
5735         kvm_x86_ops->hardware_disable(garbage);
5736         drop_user_return_notifiers(garbage);
5737 }
5738
5739 int kvm_arch_hardware_setup(void)
5740 {
5741         return kvm_x86_ops->hardware_setup();
5742 }
5743
5744 void kvm_arch_hardware_unsetup(void)
5745 {
5746         kvm_x86_ops->hardware_unsetup();
5747 }
5748
5749 void kvm_arch_check_processor_compat(void *rtn)
5750 {
5751         kvm_x86_ops->check_processor_compatibility(rtn);
5752 }
5753
5754 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
5755 {
5756         struct page *page;
5757         struct kvm *kvm;
5758         int r;
5759
5760         BUG_ON(vcpu->kvm == NULL);
5761         kvm = vcpu->kvm;
5762
5763         vcpu->arch.emulate_ctxt.ops = &emulate_ops;
5764         vcpu->arch.walk_mmu = &vcpu->arch.mmu;
5765         vcpu->arch.mmu.root_hpa = INVALID_PAGE;
5766         vcpu->arch.mmu.translate_gpa = translate_gpa;
5767         vcpu->arch.nested_mmu.translate_gpa = translate_nested_gpa;
5768         if (!irqchip_in_kernel(kvm) || kvm_vcpu_is_bsp(vcpu))
5769                 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
5770         else
5771                 vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED;
5772
5773         page = alloc_page(GFP_KERNEL | __GFP_ZERO);
5774         if (!page) {
5775                 r = -ENOMEM;
5776                 goto fail;
5777         }
5778         vcpu->arch.pio_data = page_address(page);
5779
5780         r = kvm_mmu_create(vcpu);
5781         if (r < 0)
5782                 goto fail_free_pio_data;
5783
5784         if (irqchip_in_kernel(kvm)) {
5785                 r = kvm_create_lapic(vcpu);
5786                 if (r < 0)
5787                         goto fail_mmu_destroy;
5788         }
5789
5790         vcpu->arch.mce_banks = kzalloc(KVM_MAX_MCE_BANKS * sizeof(u64) * 4,
5791                                        GFP_KERNEL);
5792         if (!vcpu->arch.mce_banks) {
5793                 r = -ENOMEM;
5794                 goto fail_free_lapic;
5795         }
5796         vcpu->arch.mcg_cap = KVM_MAX_MCE_BANKS;
5797
5798         if (!zalloc_cpumask_var(&vcpu->arch.wbinvd_dirty_mask, GFP_KERNEL))
5799                 goto fail_free_mce_banks;
5800
5801         return 0;
5802 fail_free_mce_banks:
5803         kfree(vcpu->arch.mce_banks);
5804 fail_free_lapic:
5805         kvm_free_lapic(vcpu);
5806 fail_mmu_destroy:
5807         kvm_mmu_destroy(vcpu);
5808 fail_free_pio_data:
5809         free_page((unsigned long)vcpu->arch.pio_data);
5810 fail:
5811         return r;
5812 }
5813
5814 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
5815 {
5816         int idx;
5817
5818         kfree(vcpu->arch.mce_banks);
5819         kvm_free_lapic(vcpu);
5820         idx = srcu_read_lock(&vcpu->kvm->srcu);
5821         kvm_mmu_destroy(vcpu);
5822         srcu_read_unlock(&vcpu->kvm->srcu, idx);
5823         free_page((unsigned long)vcpu->arch.pio_data);
5824 }
5825
5826 struct  kvm *kvm_arch_create_vm(void)
5827 {
5828         struct kvm *kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
5829
5830         if (!kvm)
5831                 return ERR_PTR(-ENOMEM);
5832
5833         INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
5834         INIT_LIST_HEAD(&kvm->arch.assigned_dev_head);
5835
5836         /* Reserve bit 0 of irq_sources_bitmap for userspace irq source */
5837         set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap);
5838
5839         spin_lock_init(&kvm->arch.tsc_write_lock);
5840
5841         return kvm;
5842 }
5843
5844 static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu)
5845 {
5846         vcpu_load(vcpu);
5847         kvm_mmu_unload(vcpu);
5848         vcpu_put(vcpu);
5849 }
5850
5851 static void kvm_free_vcpus(struct kvm *kvm)
5852 {
5853         unsigned int i;
5854         struct kvm_vcpu *vcpu;
5855
5856         /*
5857          * Unpin any mmu pages first.
5858          */
5859         kvm_for_each_vcpu(i, vcpu, kvm)
5860                 kvm_unload_vcpu_mmu(vcpu);
5861         kvm_for_each_vcpu(i, vcpu, kvm)
5862                 kvm_arch_vcpu_free(vcpu);
5863
5864         mutex_lock(&kvm->lock);
5865         for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
5866                 kvm->vcpus[i] = NULL;
5867
5868         atomic_set(&kvm->online_vcpus, 0);
5869         mutex_unlock(&kvm->lock);
5870 }
5871
5872 void kvm_arch_sync_events(struct kvm *kvm)
5873 {
5874         kvm_free_all_assigned_devices(kvm);
5875         kvm_free_pit(kvm);
5876 }
5877
5878 void kvm_arch_destroy_vm(struct kvm *kvm)
5879 {
5880         kvm_iommu_unmap_guest(kvm);
5881         kfree(kvm->arch.vpic);
5882         kfree(kvm->arch.vioapic);
5883         kvm_free_vcpus(kvm);
5884         kvm_free_physmem(kvm);
5885         if (kvm->arch.apic_access_page)
5886                 put_page(kvm->arch.apic_access_page);
5887         if (kvm->arch.ept_identity_pagetable)
5888                 put_page(kvm->arch.ept_identity_pagetable);
5889         cleanup_srcu_struct(&kvm->srcu);
5890         kfree(kvm);
5891 }
5892
5893 int kvm_arch_prepare_memory_region(struct kvm *kvm,
5894                                 struct kvm_memory_slot *memslot,
5895                                 struct kvm_memory_slot old,
5896                                 struct kvm_userspace_memory_region *mem,
5897                                 int user_alloc)
5898 {
5899         int npages = memslot->npages;
5900         int map_flags = MAP_PRIVATE | MAP_ANONYMOUS;
5901
5902         /* Prevent internal slot pages from being moved by fork()/COW. */
5903         if (memslot->id >= KVM_MEMORY_SLOTS)
5904                 map_flags = MAP_SHARED | MAP_ANONYMOUS;
5905
5906         /*To keep backward compatibility with older userspace,
5907          *x86 needs to hanlde !user_alloc case.
5908          */
5909         if (!user_alloc) {
5910                 if (npages && !old.rmap) {
5911                         unsigned long userspace_addr;
5912
5913                         down_write(&current->mm->mmap_sem);
5914                         userspace_addr = do_mmap(NULL, 0,
5915                                                  npages * PAGE_SIZE,
5916                                                  PROT_READ | PROT_WRITE,
5917                                                  map_flags,
5918                                                  0);
5919                         up_write(&current->mm->mmap_sem);
5920
5921                         if (IS_ERR((void *)userspace_addr))
5922                                 return PTR_ERR((void *)userspace_addr);
5923
5924                         memslot->userspace_addr = userspace_addr;
5925                 }
5926         }
5927
5928
5929         return 0;
5930 }
5931
5932 void kvm_arch_commit_memory_region(struct kvm *kvm,
5933                                 struct kvm_userspace_memory_region *mem,
5934                                 struct kvm_memory_slot old,
5935                                 int user_alloc)
5936 {
5937
5938         int npages = mem->memory_size >> PAGE_SHIFT;
5939
5940         if (!user_alloc && !old.user_alloc && old.rmap && !npages) {
5941                 int ret;
5942
5943                 down_write(&current->mm->mmap_sem);
5944                 ret = do_munmap(current->mm, old.userspace_addr,
5945                                 old.npages * PAGE_SIZE);
5946                 up_write(&current->mm->mmap_sem);
5947                 if (ret < 0)
5948                         printk(KERN_WARNING
5949                                "kvm_vm_ioctl_set_memory_region: "
5950                                "failed to munmap memory\n");
5951         }
5952
5953         spin_lock(&kvm->mmu_lock);
5954         if (!kvm->arch.n_requested_mmu_pages) {
5955                 unsigned int nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm);
5956                 kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages);
5957         }
5958
5959         kvm_mmu_slot_remove_write_access(kvm, mem->slot);
5960         spin_unlock(&kvm->mmu_lock);
5961 }
5962
5963 void kvm_arch_flush_shadow(struct kvm *kvm)
5964 {
5965         kvm_mmu_zap_all(kvm);
5966         kvm_reload_remote_mmus(kvm);
5967 }
5968
5969 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
5970 {
5971         return vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE
5972                 || vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED
5973                 || vcpu->arch.nmi_pending ||
5974                 (kvm_arch_interrupt_allowed(vcpu) &&
5975                  kvm_cpu_has_interrupt(vcpu));
5976 }
5977
5978 void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
5979 {
5980         int me;
5981         int cpu = vcpu->cpu;
5982
5983         if (waitqueue_active(&vcpu->wq)) {
5984                 wake_up_interruptible(&vcpu->wq);
5985                 ++vcpu->stat.halt_wakeup;
5986         }
5987
5988         me = get_cpu();
5989         if (cpu != me && (unsigned)cpu < nr_cpu_ids && cpu_online(cpu))
5990                 if (atomic_xchg(&vcpu->guest_mode, 0))
5991                         smp_send_reschedule(cpu);
5992         put_cpu();
5993 }
5994
5995 int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu)
5996 {
5997         return kvm_x86_ops->interrupt_allowed(vcpu);
5998 }
5999
6000 bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip)
6001 {
6002         unsigned long current_rip = kvm_rip_read(vcpu) +
6003                 get_segment_base(vcpu, VCPU_SREG_CS);
6004
6005         return current_rip == linear_rip;
6006 }
6007 EXPORT_SYMBOL_GPL(kvm_is_linear_rip);
6008
6009 unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu)
6010 {
6011         unsigned long rflags;
6012
6013         rflags = kvm_x86_ops->get_rflags(vcpu);
6014         if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
6015                 rflags &= ~X86_EFLAGS_TF;
6016         return rflags;
6017 }
6018 EXPORT_SYMBOL_GPL(kvm_get_rflags);
6019
6020 void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
6021 {
6022         if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP &&
6023             kvm_is_linear_rip(vcpu, vcpu->arch.singlestep_rip))
6024                 rflags |= X86_EFLAGS_TF;
6025         kvm_x86_ops->set_rflags(vcpu, rflags);
6026         kvm_make_request(KVM_REQ_EVENT, vcpu);
6027 }
6028 EXPORT_SYMBOL_GPL(kvm_set_rflags);
6029
6030 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_exit);
6031 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_inj_virq);
6032 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_page_fault);
6033 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_msr);
6034 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_cr);
6035 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmrun);
6036 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmexit);
6037 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmexit_inject);
6038 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_intr_vmexit);
6039 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_invlpga);
6040 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_skinit);
6041 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_intercepts);