KVM: x86: Move TSC offset writes to common code
[pandora-kernel.git] / arch / x86 / kvm / x86.c
1 /*
2  * Kernel-based Virtual Machine driver for Linux
3  *
4  * derived from drivers/kvm/kvm_main.c
5  *
6  * Copyright (C) 2006 Qumranet, Inc.
7  * Copyright (C) 2008 Qumranet, Inc.
8  * Copyright IBM Corporation, 2008
9  * Copyright 2010 Red Hat, Inc. and/or its affilates.
10  *
11  * Authors:
12  *   Avi Kivity   <avi@qumranet.com>
13  *   Yaniv Kamay  <yaniv@qumranet.com>
14  *   Amit Shah    <amit.shah@qumranet.com>
15  *   Ben-Ami Yassour <benami@il.ibm.com>
16  *
17  * This work is licensed under the terms of the GNU GPL, version 2.  See
18  * the COPYING file in the top-level directory.
19  *
20  */
21
22 #include <linux/kvm_host.h>
23 #include "irq.h"
24 #include "mmu.h"
25 #include "i8254.h"
26 #include "tss.h"
27 #include "kvm_cache_regs.h"
28 #include "x86.h"
29
30 #include <linux/clocksource.h>
31 #include <linux/interrupt.h>
32 #include <linux/kvm.h>
33 #include <linux/fs.h>
34 #include <linux/vmalloc.h>
35 #include <linux/module.h>
36 #include <linux/mman.h>
37 #include <linux/highmem.h>
38 #include <linux/iommu.h>
39 #include <linux/intel-iommu.h>
40 #include <linux/cpufreq.h>
41 #include <linux/user-return-notifier.h>
42 #include <linux/srcu.h>
43 #include <linux/slab.h>
44 #include <linux/perf_event.h>
45 #include <linux/uaccess.h>
46 #include <trace/events/kvm.h>
47
48 #define CREATE_TRACE_POINTS
49 #include "trace.h"
50
51 #include <asm/debugreg.h>
52 #include <asm/msr.h>
53 #include <asm/desc.h>
54 #include <asm/mtrr.h>
55 #include <asm/mce.h>
56 #include <asm/i387.h>
57 #include <asm/xcr.h>
58
59 #define MAX_IO_MSRS 256
60 #define CR0_RESERVED_BITS                                               \
61         (~(unsigned long)(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS \
62                           | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM \
63                           | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG))
64 #define CR4_RESERVED_BITS                                               \
65         (~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\
66                           | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE     \
67                           | X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR  \
68                           | X86_CR4_OSXSAVE \
69                           | X86_CR4_OSXMMEXCPT | X86_CR4_VMXE))
70
71 #define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR)
72
73 #define KVM_MAX_MCE_BANKS 32
74 #define KVM_MCE_CAP_SUPPORTED MCG_CTL_P
75
76 /* EFER defaults:
77  * - enable syscall per default because its emulated by KVM
78  * - enable LME and LMA per default on 64 bit KVM
79  */
80 #ifdef CONFIG_X86_64
81 static u64 __read_mostly efer_reserved_bits = 0xfffffffffffffafeULL;
82 #else
83 static u64 __read_mostly efer_reserved_bits = 0xfffffffffffffffeULL;
84 #endif
85
86 #define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
87 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
88
89 static void update_cr8_intercept(struct kvm_vcpu *vcpu);
90 static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
91                                     struct kvm_cpuid_entry2 __user *entries);
92
93 struct kvm_x86_ops *kvm_x86_ops;
94 EXPORT_SYMBOL_GPL(kvm_x86_ops);
95
96 int ignore_msrs = 0;
97 module_param_named(ignore_msrs, ignore_msrs, bool, S_IRUGO | S_IWUSR);
98
99 #define KVM_NR_SHARED_MSRS 16
100
101 struct kvm_shared_msrs_global {
102         int nr;
103         u32 msrs[KVM_NR_SHARED_MSRS];
104 };
105
106 struct kvm_shared_msrs {
107         struct user_return_notifier urn;
108         bool registered;
109         struct kvm_shared_msr_values {
110                 u64 host;
111                 u64 curr;
112         } values[KVM_NR_SHARED_MSRS];
113 };
114
115 static struct kvm_shared_msrs_global __read_mostly shared_msrs_global;
116 static DEFINE_PER_CPU(struct kvm_shared_msrs, shared_msrs);
117
118 struct kvm_stats_debugfs_item debugfs_entries[] = {
119         { "pf_fixed", VCPU_STAT(pf_fixed) },
120         { "pf_guest", VCPU_STAT(pf_guest) },
121         { "tlb_flush", VCPU_STAT(tlb_flush) },
122         { "invlpg", VCPU_STAT(invlpg) },
123         { "exits", VCPU_STAT(exits) },
124         { "io_exits", VCPU_STAT(io_exits) },
125         { "mmio_exits", VCPU_STAT(mmio_exits) },
126         { "signal_exits", VCPU_STAT(signal_exits) },
127         { "irq_window", VCPU_STAT(irq_window_exits) },
128         { "nmi_window", VCPU_STAT(nmi_window_exits) },
129         { "halt_exits", VCPU_STAT(halt_exits) },
130         { "halt_wakeup", VCPU_STAT(halt_wakeup) },
131         { "hypercalls", VCPU_STAT(hypercalls) },
132         { "request_irq", VCPU_STAT(request_irq_exits) },
133         { "irq_exits", VCPU_STAT(irq_exits) },
134         { "host_state_reload", VCPU_STAT(host_state_reload) },
135         { "efer_reload", VCPU_STAT(efer_reload) },
136         { "fpu_reload", VCPU_STAT(fpu_reload) },
137         { "insn_emulation", VCPU_STAT(insn_emulation) },
138         { "insn_emulation_fail", VCPU_STAT(insn_emulation_fail) },
139         { "irq_injections", VCPU_STAT(irq_injections) },
140         { "nmi_injections", VCPU_STAT(nmi_injections) },
141         { "mmu_shadow_zapped", VM_STAT(mmu_shadow_zapped) },
142         { "mmu_pte_write", VM_STAT(mmu_pte_write) },
143         { "mmu_pte_updated", VM_STAT(mmu_pte_updated) },
144         { "mmu_pde_zapped", VM_STAT(mmu_pde_zapped) },
145         { "mmu_flooded", VM_STAT(mmu_flooded) },
146         { "mmu_recycled", VM_STAT(mmu_recycled) },
147         { "mmu_cache_miss", VM_STAT(mmu_cache_miss) },
148         { "mmu_unsync", VM_STAT(mmu_unsync) },
149         { "remote_tlb_flush", VM_STAT(remote_tlb_flush) },
150         { "largepages", VM_STAT(lpages) },
151         { NULL }
152 };
153
154 u64 __read_mostly host_xcr0;
155
156 static inline u32 bit(int bitno)
157 {
158         return 1 << (bitno & 31);
159 }
160
161 static void kvm_on_user_return(struct user_return_notifier *urn)
162 {
163         unsigned slot;
164         struct kvm_shared_msrs *locals
165                 = container_of(urn, struct kvm_shared_msrs, urn);
166         struct kvm_shared_msr_values *values;
167
168         for (slot = 0; slot < shared_msrs_global.nr; ++slot) {
169                 values = &locals->values[slot];
170                 if (values->host != values->curr) {
171                         wrmsrl(shared_msrs_global.msrs[slot], values->host);
172                         values->curr = values->host;
173                 }
174         }
175         locals->registered = false;
176         user_return_notifier_unregister(urn);
177 }
178
179 static void shared_msr_update(unsigned slot, u32 msr)
180 {
181         struct kvm_shared_msrs *smsr;
182         u64 value;
183
184         smsr = &__get_cpu_var(shared_msrs);
185         /* only read, and nobody should modify it at this time,
186          * so don't need lock */
187         if (slot >= shared_msrs_global.nr) {
188                 printk(KERN_ERR "kvm: invalid MSR slot!");
189                 return;
190         }
191         rdmsrl_safe(msr, &value);
192         smsr->values[slot].host = value;
193         smsr->values[slot].curr = value;
194 }
195
196 void kvm_define_shared_msr(unsigned slot, u32 msr)
197 {
198         if (slot >= shared_msrs_global.nr)
199                 shared_msrs_global.nr = slot + 1;
200         shared_msrs_global.msrs[slot] = msr;
201         /* we need ensured the shared_msr_global have been updated */
202         smp_wmb();
203 }
204 EXPORT_SYMBOL_GPL(kvm_define_shared_msr);
205
206 static void kvm_shared_msr_cpu_online(void)
207 {
208         unsigned i;
209
210         for (i = 0; i < shared_msrs_global.nr; ++i)
211                 shared_msr_update(i, shared_msrs_global.msrs[i]);
212 }
213
214 void kvm_set_shared_msr(unsigned slot, u64 value, u64 mask)
215 {
216         struct kvm_shared_msrs *smsr = &__get_cpu_var(shared_msrs);
217
218         if (((value ^ smsr->values[slot].curr) & mask) == 0)
219                 return;
220         smsr->values[slot].curr = value;
221         wrmsrl(shared_msrs_global.msrs[slot], value);
222         if (!smsr->registered) {
223                 smsr->urn.on_user_return = kvm_on_user_return;
224                 user_return_notifier_register(&smsr->urn);
225                 smsr->registered = true;
226         }
227 }
228 EXPORT_SYMBOL_GPL(kvm_set_shared_msr);
229
230 static void drop_user_return_notifiers(void *ignore)
231 {
232         struct kvm_shared_msrs *smsr = &__get_cpu_var(shared_msrs);
233
234         if (smsr->registered)
235                 kvm_on_user_return(&smsr->urn);
236 }
237
238 u64 kvm_get_apic_base(struct kvm_vcpu *vcpu)
239 {
240         if (irqchip_in_kernel(vcpu->kvm))
241                 return vcpu->arch.apic_base;
242         else
243                 return vcpu->arch.apic_base;
244 }
245 EXPORT_SYMBOL_GPL(kvm_get_apic_base);
246
247 void kvm_set_apic_base(struct kvm_vcpu *vcpu, u64 data)
248 {
249         /* TODO: reserve bits check */
250         if (irqchip_in_kernel(vcpu->kvm))
251                 kvm_lapic_set_base(vcpu, data);
252         else
253                 vcpu->arch.apic_base = data;
254 }
255 EXPORT_SYMBOL_GPL(kvm_set_apic_base);
256
257 #define EXCPT_BENIGN            0
258 #define EXCPT_CONTRIBUTORY      1
259 #define EXCPT_PF                2
260
261 static int exception_class(int vector)
262 {
263         switch (vector) {
264         case PF_VECTOR:
265                 return EXCPT_PF;
266         case DE_VECTOR:
267         case TS_VECTOR:
268         case NP_VECTOR:
269         case SS_VECTOR:
270         case GP_VECTOR:
271                 return EXCPT_CONTRIBUTORY;
272         default:
273                 break;
274         }
275         return EXCPT_BENIGN;
276 }
277
278 static void kvm_multiple_exception(struct kvm_vcpu *vcpu,
279                 unsigned nr, bool has_error, u32 error_code,
280                 bool reinject)
281 {
282         u32 prev_nr;
283         int class1, class2;
284
285         if (!vcpu->arch.exception.pending) {
286         queue:
287                 vcpu->arch.exception.pending = true;
288                 vcpu->arch.exception.has_error_code = has_error;
289                 vcpu->arch.exception.nr = nr;
290                 vcpu->arch.exception.error_code = error_code;
291                 vcpu->arch.exception.reinject = reinject;
292                 return;
293         }
294
295         /* to check exception */
296         prev_nr = vcpu->arch.exception.nr;
297         if (prev_nr == DF_VECTOR) {
298                 /* triple fault -> shutdown */
299                 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
300                 return;
301         }
302         class1 = exception_class(prev_nr);
303         class2 = exception_class(nr);
304         if ((class1 == EXCPT_CONTRIBUTORY && class2 == EXCPT_CONTRIBUTORY)
305                 || (class1 == EXCPT_PF && class2 != EXCPT_BENIGN)) {
306                 /* generate double fault per SDM Table 5-5 */
307                 vcpu->arch.exception.pending = true;
308                 vcpu->arch.exception.has_error_code = true;
309                 vcpu->arch.exception.nr = DF_VECTOR;
310                 vcpu->arch.exception.error_code = 0;
311         } else
312                 /* replace previous exception with a new one in a hope
313                    that instruction re-execution will regenerate lost
314                    exception */
315                 goto queue;
316 }
317
318 void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr)
319 {
320         kvm_multiple_exception(vcpu, nr, false, 0, false);
321 }
322 EXPORT_SYMBOL_GPL(kvm_queue_exception);
323
324 void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr)
325 {
326         kvm_multiple_exception(vcpu, nr, false, 0, true);
327 }
328 EXPORT_SYMBOL_GPL(kvm_requeue_exception);
329
330 void kvm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long addr,
331                            u32 error_code)
332 {
333         ++vcpu->stat.pf_guest;
334         vcpu->arch.cr2 = addr;
335         kvm_queue_exception_e(vcpu, PF_VECTOR, error_code);
336 }
337
338 void kvm_inject_nmi(struct kvm_vcpu *vcpu)
339 {
340         vcpu->arch.nmi_pending = 1;
341 }
342 EXPORT_SYMBOL_GPL(kvm_inject_nmi);
343
344 void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
345 {
346         kvm_multiple_exception(vcpu, nr, true, error_code, false);
347 }
348 EXPORT_SYMBOL_GPL(kvm_queue_exception_e);
349
350 void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
351 {
352         kvm_multiple_exception(vcpu, nr, true, error_code, true);
353 }
354 EXPORT_SYMBOL_GPL(kvm_requeue_exception_e);
355
356 /*
357  * Checks if cpl <= required_cpl; if true, return true.  Otherwise queue
358  * a #GP and return false.
359  */
360 bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl)
361 {
362         if (kvm_x86_ops->get_cpl(vcpu) <= required_cpl)
363                 return true;
364         kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
365         return false;
366 }
367 EXPORT_SYMBOL_GPL(kvm_require_cpl);
368
369 /*
370  * Load the pae pdptrs.  Return true is they are all valid.
371  */
372 int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3)
373 {
374         gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT;
375         unsigned offset = ((cr3 & (PAGE_SIZE-1)) >> 5) << 2;
376         int i;
377         int ret;
378         u64 pdpte[ARRAY_SIZE(vcpu->arch.pdptrs)];
379
380         ret = kvm_read_guest_page(vcpu->kvm, pdpt_gfn, pdpte,
381                                   offset * sizeof(u64), sizeof(pdpte));
382         if (ret < 0) {
383                 ret = 0;
384                 goto out;
385         }
386         for (i = 0; i < ARRAY_SIZE(pdpte); ++i) {
387                 if (is_present_gpte(pdpte[i]) &&
388                     (pdpte[i] & vcpu->arch.mmu.rsvd_bits_mask[0][2])) {
389                         ret = 0;
390                         goto out;
391                 }
392         }
393         ret = 1;
394
395         memcpy(vcpu->arch.pdptrs, pdpte, sizeof(vcpu->arch.pdptrs));
396         __set_bit(VCPU_EXREG_PDPTR,
397                   (unsigned long *)&vcpu->arch.regs_avail);
398         __set_bit(VCPU_EXREG_PDPTR,
399                   (unsigned long *)&vcpu->arch.regs_dirty);
400 out:
401
402         return ret;
403 }
404 EXPORT_SYMBOL_GPL(load_pdptrs);
405
406 static bool pdptrs_changed(struct kvm_vcpu *vcpu)
407 {
408         u64 pdpte[ARRAY_SIZE(vcpu->arch.pdptrs)];
409         bool changed = true;
410         int r;
411
412         if (is_long_mode(vcpu) || !is_pae(vcpu))
413                 return false;
414
415         if (!test_bit(VCPU_EXREG_PDPTR,
416                       (unsigned long *)&vcpu->arch.regs_avail))
417                 return true;
418
419         r = kvm_read_guest(vcpu->kvm, vcpu->arch.cr3 & ~31u, pdpte, sizeof(pdpte));
420         if (r < 0)
421                 goto out;
422         changed = memcmp(pdpte, vcpu->arch.pdptrs, sizeof(pdpte)) != 0;
423 out:
424
425         return changed;
426 }
427
428 int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
429 {
430         unsigned long old_cr0 = kvm_read_cr0(vcpu);
431         unsigned long update_bits = X86_CR0_PG | X86_CR0_WP |
432                                     X86_CR0_CD | X86_CR0_NW;
433
434         cr0 |= X86_CR0_ET;
435
436 #ifdef CONFIG_X86_64
437         if (cr0 & 0xffffffff00000000UL)
438                 return 1;
439 #endif
440
441         cr0 &= ~CR0_RESERVED_BITS;
442
443         if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD))
444                 return 1;
445
446         if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE))
447                 return 1;
448
449         if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
450 #ifdef CONFIG_X86_64
451                 if ((vcpu->arch.efer & EFER_LME)) {
452                         int cs_db, cs_l;
453
454                         if (!is_pae(vcpu))
455                                 return 1;
456                         kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
457                         if (cs_l)
458                                 return 1;
459                 } else
460 #endif
461                 if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.cr3))
462                         return 1;
463         }
464
465         kvm_x86_ops->set_cr0(vcpu, cr0);
466
467         if ((cr0 ^ old_cr0) & update_bits)
468                 kvm_mmu_reset_context(vcpu);
469         return 0;
470 }
471 EXPORT_SYMBOL_GPL(kvm_set_cr0);
472
473 void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
474 {
475         (void)kvm_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~0x0eul) | (msw & 0x0f));
476 }
477 EXPORT_SYMBOL_GPL(kvm_lmsw);
478
479 int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
480 {
481         u64 xcr0;
482
483         /* Only support XCR_XFEATURE_ENABLED_MASK(xcr0) now  */
484         if (index != XCR_XFEATURE_ENABLED_MASK)
485                 return 1;
486         xcr0 = xcr;
487         if (kvm_x86_ops->get_cpl(vcpu) != 0)
488                 return 1;
489         if (!(xcr0 & XSTATE_FP))
490                 return 1;
491         if ((xcr0 & XSTATE_YMM) && !(xcr0 & XSTATE_SSE))
492                 return 1;
493         if (xcr0 & ~host_xcr0)
494                 return 1;
495         vcpu->arch.xcr0 = xcr0;
496         vcpu->guest_xcr0_loaded = 0;
497         return 0;
498 }
499
500 int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
501 {
502         if (__kvm_set_xcr(vcpu, index, xcr)) {
503                 kvm_inject_gp(vcpu, 0);
504                 return 1;
505         }
506         return 0;
507 }
508 EXPORT_SYMBOL_GPL(kvm_set_xcr);
509
510 static bool guest_cpuid_has_xsave(struct kvm_vcpu *vcpu)
511 {
512         struct kvm_cpuid_entry2 *best;
513
514         best = kvm_find_cpuid_entry(vcpu, 1, 0);
515         return best && (best->ecx & bit(X86_FEATURE_XSAVE));
516 }
517
518 static void update_cpuid(struct kvm_vcpu *vcpu)
519 {
520         struct kvm_cpuid_entry2 *best;
521
522         best = kvm_find_cpuid_entry(vcpu, 1, 0);
523         if (!best)
524                 return;
525
526         /* Update OSXSAVE bit */
527         if (cpu_has_xsave && best->function == 0x1) {
528                 best->ecx &= ~(bit(X86_FEATURE_OSXSAVE));
529                 if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE))
530                         best->ecx |= bit(X86_FEATURE_OSXSAVE);
531         }
532 }
533
534 int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
535 {
536         unsigned long old_cr4 = kvm_read_cr4(vcpu);
537         unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE;
538
539         if (cr4 & CR4_RESERVED_BITS)
540                 return 1;
541
542         if (!guest_cpuid_has_xsave(vcpu) && (cr4 & X86_CR4_OSXSAVE))
543                 return 1;
544
545         if (is_long_mode(vcpu)) {
546                 if (!(cr4 & X86_CR4_PAE))
547                         return 1;
548         } else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE)
549                    && ((cr4 ^ old_cr4) & pdptr_bits)
550                    && !load_pdptrs(vcpu, vcpu->arch.cr3))
551                 return 1;
552
553         if (cr4 & X86_CR4_VMXE)
554                 return 1;
555
556         kvm_x86_ops->set_cr4(vcpu, cr4);
557
558         if ((cr4 ^ old_cr4) & pdptr_bits)
559                 kvm_mmu_reset_context(vcpu);
560
561         if ((cr4 ^ old_cr4) & X86_CR4_OSXSAVE)
562                 update_cpuid(vcpu);
563
564         return 0;
565 }
566 EXPORT_SYMBOL_GPL(kvm_set_cr4);
567
568 int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
569 {
570         if (cr3 == vcpu->arch.cr3 && !pdptrs_changed(vcpu)) {
571                 kvm_mmu_sync_roots(vcpu);
572                 kvm_mmu_flush_tlb(vcpu);
573                 return 0;
574         }
575
576         if (is_long_mode(vcpu)) {
577                 if (cr3 & CR3_L_MODE_RESERVED_BITS)
578                         return 1;
579         } else {
580                 if (is_pae(vcpu)) {
581                         if (cr3 & CR3_PAE_RESERVED_BITS)
582                                 return 1;
583                         if (is_paging(vcpu) && !load_pdptrs(vcpu, cr3))
584                                 return 1;
585                 }
586                 /*
587                  * We don't check reserved bits in nonpae mode, because
588                  * this isn't enforced, and VMware depends on this.
589                  */
590         }
591
592         /*
593          * Does the new cr3 value map to physical memory? (Note, we
594          * catch an invalid cr3 even in real-mode, because it would
595          * cause trouble later on when we turn on paging anyway.)
596          *
597          * A real CPU would silently accept an invalid cr3 and would
598          * attempt to use it - with largely undefined (and often hard
599          * to debug) behavior on the guest side.
600          */
601         if (unlikely(!gfn_to_memslot(vcpu->kvm, cr3 >> PAGE_SHIFT)))
602                 return 1;
603         vcpu->arch.cr3 = cr3;
604         vcpu->arch.mmu.new_cr3(vcpu);
605         return 0;
606 }
607 EXPORT_SYMBOL_GPL(kvm_set_cr3);
608
609 int __kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
610 {
611         if (cr8 & CR8_RESERVED_BITS)
612                 return 1;
613         if (irqchip_in_kernel(vcpu->kvm))
614                 kvm_lapic_set_tpr(vcpu, cr8);
615         else
616                 vcpu->arch.cr8 = cr8;
617         return 0;
618 }
619
620 void kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
621 {
622         if (__kvm_set_cr8(vcpu, cr8))
623                 kvm_inject_gp(vcpu, 0);
624 }
625 EXPORT_SYMBOL_GPL(kvm_set_cr8);
626
627 unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu)
628 {
629         if (irqchip_in_kernel(vcpu->kvm))
630                 return kvm_lapic_get_cr8(vcpu);
631         else
632                 return vcpu->arch.cr8;
633 }
634 EXPORT_SYMBOL_GPL(kvm_get_cr8);
635
636 static int __kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val)
637 {
638         switch (dr) {
639         case 0 ... 3:
640                 vcpu->arch.db[dr] = val;
641                 if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP))
642                         vcpu->arch.eff_db[dr] = val;
643                 break;
644         case 4:
645                 if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
646                         return 1; /* #UD */
647                 /* fall through */
648         case 6:
649                 if (val & 0xffffffff00000000ULL)
650                         return -1; /* #GP */
651                 vcpu->arch.dr6 = (val & DR6_VOLATILE) | DR6_FIXED_1;
652                 break;
653         case 5:
654                 if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
655                         return 1; /* #UD */
656                 /* fall through */
657         default: /* 7 */
658                 if (val & 0xffffffff00000000ULL)
659                         return -1; /* #GP */
660                 vcpu->arch.dr7 = (val & DR7_VOLATILE) | DR7_FIXED_1;
661                 if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) {
662                         kvm_x86_ops->set_dr7(vcpu, vcpu->arch.dr7);
663                         vcpu->arch.switch_db_regs = (val & DR7_BP_EN_MASK);
664                 }
665                 break;
666         }
667
668         return 0;
669 }
670
671 int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val)
672 {
673         int res;
674
675         res = __kvm_set_dr(vcpu, dr, val);
676         if (res > 0)
677                 kvm_queue_exception(vcpu, UD_VECTOR);
678         else if (res < 0)
679                 kvm_inject_gp(vcpu, 0);
680
681         return res;
682 }
683 EXPORT_SYMBOL_GPL(kvm_set_dr);
684
685 static int _kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val)
686 {
687         switch (dr) {
688         case 0 ... 3:
689                 *val = vcpu->arch.db[dr];
690                 break;
691         case 4:
692                 if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
693                         return 1;
694                 /* fall through */
695         case 6:
696                 *val = vcpu->arch.dr6;
697                 break;
698         case 5:
699                 if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
700                         return 1;
701                 /* fall through */
702         default: /* 7 */
703                 *val = vcpu->arch.dr7;
704                 break;
705         }
706
707         return 0;
708 }
709
710 int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val)
711 {
712         if (_kvm_get_dr(vcpu, dr, val)) {
713                 kvm_queue_exception(vcpu, UD_VECTOR);
714                 return 1;
715         }
716         return 0;
717 }
718 EXPORT_SYMBOL_GPL(kvm_get_dr);
719
720 /*
721  * List of msr numbers which we expose to userspace through KVM_GET_MSRS
722  * and KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST.
723  *
724  * This list is modified at module load time to reflect the
725  * capabilities of the host cpu. This capabilities test skips MSRs that are
726  * kvm-specific. Those are put in the beginning of the list.
727  */
728
729 #define KVM_SAVE_MSRS_BEGIN     7
730 static u32 msrs_to_save[] = {
731         MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK,
732         MSR_KVM_SYSTEM_TIME_NEW, MSR_KVM_WALL_CLOCK_NEW,
733         HV_X64_MSR_GUEST_OS_ID, HV_X64_MSR_HYPERCALL,
734         HV_X64_MSR_APIC_ASSIST_PAGE,
735         MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
736         MSR_STAR,
737 #ifdef CONFIG_X86_64
738         MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR,
739 #endif
740         MSR_IA32_TSC, MSR_IA32_PERF_STATUS, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA
741 };
742
743 static unsigned num_msrs_to_save;
744
745 static u32 emulated_msrs[] = {
746         MSR_IA32_MISC_ENABLE,
747         MSR_IA32_MCG_STATUS,
748         MSR_IA32_MCG_CTL,
749 };
750
751 static int set_efer(struct kvm_vcpu *vcpu, u64 efer)
752 {
753         u64 old_efer = vcpu->arch.efer;
754
755         if (efer & efer_reserved_bits)
756                 return 1;
757
758         if (is_paging(vcpu)
759             && (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME))
760                 return 1;
761
762         if (efer & EFER_FFXSR) {
763                 struct kvm_cpuid_entry2 *feat;
764
765                 feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
766                 if (!feat || !(feat->edx & bit(X86_FEATURE_FXSR_OPT)))
767                         return 1;
768         }
769
770         if (efer & EFER_SVME) {
771                 struct kvm_cpuid_entry2 *feat;
772
773                 feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
774                 if (!feat || !(feat->ecx & bit(X86_FEATURE_SVM)))
775                         return 1;
776         }
777
778         efer &= ~EFER_LMA;
779         efer |= vcpu->arch.efer & EFER_LMA;
780
781         kvm_x86_ops->set_efer(vcpu, efer);
782
783         vcpu->arch.mmu.base_role.nxe = (efer & EFER_NX) && !tdp_enabled;
784         kvm_mmu_reset_context(vcpu);
785
786         /* Update reserved bits */
787         if ((efer ^ old_efer) & EFER_NX)
788                 kvm_mmu_reset_context(vcpu);
789
790         return 0;
791 }
792
793 void kvm_enable_efer_bits(u64 mask)
794 {
795        efer_reserved_bits &= ~mask;
796 }
797 EXPORT_SYMBOL_GPL(kvm_enable_efer_bits);
798
799
800 /*
801  * Writes msr value into into the appropriate "register".
802  * Returns 0 on success, non-0 otherwise.
803  * Assumes vcpu_load() was already called.
804  */
805 int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
806 {
807         return kvm_x86_ops->set_msr(vcpu, msr_index, data);
808 }
809
810 /*
811  * Adapt set_msr() to msr_io()'s calling convention
812  */
813 static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
814 {
815         return kvm_set_msr(vcpu, index, *data);
816 }
817
818 static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock)
819 {
820         int version;
821         int r;
822         struct pvclock_wall_clock wc;
823         struct timespec boot;
824
825         if (!wall_clock)
826                 return;
827
828         r = kvm_read_guest(kvm, wall_clock, &version, sizeof(version));
829         if (r)
830                 return;
831
832         if (version & 1)
833                 ++version;  /* first time write, random junk */
834
835         ++version;
836
837         kvm_write_guest(kvm, wall_clock, &version, sizeof(version));
838
839         /*
840          * The guest calculates current wall clock time by adding
841          * system time (updated by kvm_write_guest_time below) to the
842          * wall clock specified here.  guest system time equals host
843          * system time for us, thus we must fill in host boot time here.
844          */
845         getboottime(&boot);
846
847         wc.sec = boot.tv_sec;
848         wc.nsec = boot.tv_nsec;
849         wc.version = version;
850
851         kvm_write_guest(kvm, wall_clock, &wc, sizeof(wc));
852
853         version++;
854         kvm_write_guest(kvm, wall_clock, &version, sizeof(version));
855 }
856
857 static uint32_t div_frac(uint32_t dividend, uint32_t divisor)
858 {
859         uint32_t quotient, remainder;
860
861         /* Don't try to replace with do_div(), this one calculates
862          * "(dividend << 32) / divisor" */
863         __asm__ ( "divl %4"
864                   : "=a" (quotient), "=d" (remainder)
865                   : "0" (0), "1" (dividend), "r" (divisor) );
866         return quotient;
867 }
868
869 static void kvm_set_time_scale(uint32_t tsc_khz, struct pvclock_vcpu_time_info *hv_clock)
870 {
871         uint64_t nsecs = 1000000000LL;
872         int32_t  shift = 0;
873         uint64_t tps64;
874         uint32_t tps32;
875
876         tps64 = tsc_khz * 1000LL;
877         while (tps64 > nsecs*2) {
878                 tps64 >>= 1;
879                 shift--;
880         }
881
882         tps32 = (uint32_t)tps64;
883         while (tps32 <= (uint32_t)nsecs) {
884                 tps32 <<= 1;
885                 shift++;
886         }
887
888         hv_clock->tsc_shift = shift;
889         hv_clock->tsc_to_system_mul = div_frac(nsecs, tps32);
890
891         pr_debug("%s: tsc_khz %u, tsc_shift %d, tsc_mul %u\n",
892                  __func__, tsc_khz, hv_clock->tsc_shift,
893                  hv_clock->tsc_to_system_mul);
894 }
895
896 static DEFINE_PER_CPU(unsigned long, cpu_tsc_khz);
897
898 void kvm_write_tsc(struct kvm_vcpu *vcpu, u64 data)
899 {
900         struct kvm *kvm = vcpu->kvm;
901         u64 offset;
902         unsigned long flags;
903
904         spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags);
905         offset = data - native_read_tsc();
906         kvm_x86_ops->write_tsc_offset(vcpu, offset);
907         spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags);
908
909         /* Reset of TSC must disable overshoot protection below */
910         vcpu->arch.hv_clock.tsc_timestamp = 0;
911 }
912 EXPORT_SYMBOL_GPL(kvm_write_tsc);
913
914 static void kvm_write_guest_time(struct kvm_vcpu *v)
915 {
916         struct timespec ts;
917         unsigned long flags;
918         struct kvm_vcpu_arch *vcpu = &v->arch;
919         void *shared_kaddr;
920         unsigned long this_tsc_khz;
921
922         if ((!vcpu->time_page))
923                 return;
924
925         this_tsc_khz = get_cpu_var(cpu_tsc_khz);
926         if (unlikely(vcpu->hv_clock_tsc_khz != this_tsc_khz)) {
927                 kvm_set_time_scale(this_tsc_khz, &vcpu->hv_clock);
928                 vcpu->hv_clock_tsc_khz = this_tsc_khz;
929         }
930         put_cpu_var(cpu_tsc_khz);
931
932         /* Keep irq disabled to prevent changes to the clock */
933         local_irq_save(flags);
934         kvm_get_msr(v, MSR_IA32_TSC, &vcpu->hv_clock.tsc_timestamp);
935         ktime_get_ts(&ts);
936         monotonic_to_bootbased(&ts);
937         local_irq_restore(flags);
938
939         /* With all the info we got, fill in the values */
940
941         vcpu->hv_clock.system_time = ts.tv_nsec +
942                                      (NSEC_PER_SEC * (u64)ts.tv_sec) + v->kvm->arch.kvmclock_offset;
943
944         vcpu->hv_clock.flags = 0;
945
946         /*
947          * The interface expects us to write an even number signaling that the
948          * update is finished. Since the guest won't see the intermediate
949          * state, we just increase by 2 at the end.
950          */
951         vcpu->hv_clock.version += 2;
952
953         shared_kaddr = kmap_atomic(vcpu->time_page, KM_USER0);
954
955         memcpy(shared_kaddr + vcpu->time_offset, &vcpu->hv_clock,
956                sizeof(vcpu->hv_clock));
957
958         kunmap_atomic(shared_kaddr, KM_USER0);
959
960         mark_page_dirty(v->kvm, vcpu->time >> PAGE_SHIFT);
961 }
962
963 static int kvm_request_guest_time_update(struct kvm_vcpu *v)
964 {
965         struct kvm_vcpu_arch *vcpu = &v->arch;
966
967         if (!vcpu->time_page)
968                 return 0;
969         kvm_make_request(KVM_REQ_KVMCLOCK_UPDATE, v);
970         return 1;
971 }
972
973 static bool msr_mtrr_valid(unsigned msr)
974 {
975         switch (msr) {
976         case 0x200 ... 0x200 + 2 * KVM_NR_VAR_MTRR - 1:
977         case MSR_MTRRfix64K_00000:
978         case MSR_MTRRfix16K_80000:
979         case MSR_MTRRfix16K_A0000:
980         case MSR_MTRRfix4K_C0000:
981         case MSR_MTRRfix4K_C8000:
982         case MSR_MTRRfix4K_D0000:
983         case MSR_MTRRfix4K_D8000:
984         case MSR_MTRRfix4K_E0000:
985         case MSR_MTRRfix4K_E8000:
986         case MSR_MTRRfix4K_F0000:
987         case MSR_MTRRfix4K_F8000:
988         case MSR_MTRRdefType:
989         case MSR_IA32_CR_PAT:
990                 return true;
991         case 0x2f8:
992                 return true;
993         }
994         return false;
995 }
996
997 static bool valid_pat_type(unsigned t)
998 {
999         return t < 8 && (1 << t) & 0xf3; /* 0, 1, 4, 5, 6, 7 */
1000 }
1001
1002 static bool valid_mtrr_type(unsigned t)
1003 {
1004         return t < 8 && (1 << t) & 0x73; /* 0, 1, 4, 5, 6 */
1005 }
1006
1007 static bool mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data)
1008 {
1009         int i;
1010
1011         if (!msr_mtrr_valid(msr))
1012                 return false;
1013
1014         if (msr == MSR_IA32_CR_PAT) {
1015                 for (i = 0; i < 8; i++)
1016                         if (!valid_pat_type((data >> (i * 8)) & 0xff))
1017                                 return false;
1018                 return true;
1019         } else if (msr == MSR_MTRRdefType) {
1020                 if (data & ~0xcff)
1021                         return false;
1022                 return valid_mtrr_type(data & 0xff);
1023         } else if (msr >= MSR_MTRRfix64K_00000 && msr <= MSR_MTRRfix4K_F8000) {
1024                 for (i = 0; i < 8 ; i++)
1025                         if (!valid_mtrr_type((data >> (i * 8)) & 0xff))
1026                                 return false;
1027                 return true;
1028         }
1029
1030         /* variable MTRRs */
1031         return valid_mtrr_type(data & 0xff);
1032 }
1033
1034 static int set_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
1035 {
1036         u64 *p = (u64 *)&vcpu->arch.mtrr_state.fixed_ranges;
1037
1038         if (!mtrr_valid(vcpu, msr, data))
1039                 return 1;
1040
1041         if (msr == MSR_MTRRdefType) {
1042                 vcpu->arch.mtrr_state.def_type = data;
1043                 vcpu->arch.mtrr_state.enabled = (data & 0xc00) >> 10;
1044         } else if (msr == MSR_MTRRfix64K_00000)
1045                 p[0] = data;
1046         else if (msr == MSR_MTRRfix16K_80000 || msr == MSR_MTRRfix16K_A0000)
1047                 p[1 + msr - MSR_MTRRfix16K_80000] = data;
1048         else if (msr >= MSR_MTRRfix4K_C0000 && msr <= MSR_MTRRfix4K_F8000)
1049                 p[3 + msr - MSR_MTRRfix4K_C0000] = data;
1050         else if (msr == MSR_IA32_CR_PAT)
1051                 vcpu->arch.pat = data;
1052         else {  /* Variable MTRRs */
1053                 int idx, is_mtrr_mask;
1054                 u64 *pt;
1055
1056                 idx = (msr - 0x200) / 2;
1057                 is_mtrr_mask = msr - 0x200 - 2 * idx;
1058                 if (!is_mtrr_mask)
1059                         pt =
1060                           (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].base_lo;
1061                 else
1062                         pt =
1063                           (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].mask_lo;
1064                 *pt = data;
1065         }
1066
1067         kvm_mmu_reset_context(vcpu);
1068         return 0;
1069 }
1070
1071 static int set_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 data)
1072 {
1073         u64 mcg_cap = vcpu->arch.mcg_cap;
1074         unsigned bank_num = mcg_cap & 0xff;
1075
1076         switch (msr) {
1077         case MSR_IA32_MCG_STATUS:
1078                 vcpu->arch.mcg_status = data;
1079                 break;
1080         case MSR_IA32_MCG_CTL:
1081                 if (!(mcg_cap & MCG_CTL_P))
1082                         return 1;
1083                 if (data != 0 && data != ~(u64)0)
1084                         return -1;
1085                 vcpu->arch.mcg_ctl = data;
1086                 break;
1087         default:
1088                 if (msr >= MSR_IA32_MC0_CTL &&
1089                     msr < MSR_IA32_MC0_CTL + 4 * bank_num) {
1090                         u32 offset = msr - MSR_IA32_MC0_CTL;
1091                         /* only 0 or all 1s can be written to IA32_MCi_CTL
1092                          * some Linux kernels though clear bit 10 in bank 4 to
1093                          * workaround a BIOS/GART TBL issue on AMD K8s, ignore
1094                          * this to avoid an uncatched #GP in the guest
1095                          */
1096                         if ((offset & 0x3) == 0 &&
1097                             data != 0 && (data | (1 << 10)) != ~(u64)0)
1098                                 return -1;
1099                         vcpu->arch.mce_banks[offset] = data;
1100                         break;
1101                 }
1102                 return 1;
1103         }
1104         return 0;
1105 }
1106
1107 static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
1108 {
1109         struct kvm *kvm = vcpu->kvm;
1110         int lm = is_long_mode(vcpu);
1111         u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
1112                 : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
1113         u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
1114                 : kvm->arch.xen_hvm_config.blob_size_32;
1115         u32 page_num = data & ~PAGE_MASK;
1116         u64 page_addr = data & PAGE_MASK;
1117         u8 *page;
1118         int r;
1119
1120         r = -E2BIG;
1121         if (page_num >= blob_size)
1122                 goto out;
1123         r = -ENOMEM;
1124         page = kzalloc(PAGE_SIZE, GFP_KERNEL);
1125         if (!page)
1126                 goto out;
1127         r = -EFAULT;
1128         if (copy_from_user(page, blob_addr + (page_num * PAGE_SIZE), PAGE_SIZE))
1129                 goto out_free;
1130         if (kvm_write_guest(kvm, page_addr, page, PAGE_SIZE))
1131                 goto out_free;
1132         r = 0;
1133 out_free:
1134         kfree(page);
1135 out:
1136         return r;
1137 }
1138
1139 static bool kvm_hv_hypercall_enabled(struct kvm *kvm)
1140 {
1141         return kvm->arch.hv_hypercall & HV_X64_MSR_HYPERCALL_ENABLE;
1142 }
1143
1144 static bool kvm_hv_msr_partition_wide(u32 msr)
1145 {
1146         bool r = false;
1147         switch (msr) {
1148         case HV_X64_MSR_GUEST_OS_ID:
1149         case HV_X64_MSR_HYPERCALL:
1150                 r = true;
1151                 break;
1152         }
1153
1154         return r;
1155 }
1156
1157 static int set_msr_hyperv_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data)
1158 {
1159         struct kvm *kvm = vcpu->kvm;
1160
1161         switch (msr) {
1162         case HV_X64_MSR_GUEST_OS_ID:
1163                 kvm->arch.hv_guest_os_id = data;
1164                 /* setting guest os id to zero disables hypercall page */
1165                 if (!kvm->arch.hv_guest_os_id)
1166                         kvm->arch.hv_hypercall &= ~HV_X64_MSR_HYPERCALL_ENABLE;
1167                 break;
1168         case HV_X64_MSR_HYPERCALL: {
1169                 u64 gfn;
1170                 unsigned long addr;
1171                 u8 instructions[4];
1172
1173                 /* if guest os id is not set hypercall should remain disabled */
1174                 if (!kvm->arch.hv_guest_os_id)
1175                         break;
1176                 if (!(data & HV_X64_MSR_HYPERCALL_ENABLE)) {
1177                         kvm->arch.hv_hypercall = data;
1178                         break;
1179                 }
1180                 gfn = data >> HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT;
1181                 addr = gfn_to_hva(kvm, gfn);
1182                 if (kvm_is_error_hva(addr))
1183                         return 1;
1184                 kvm_x86_ops->patch_hypercall(vcpu, instructions);
1185                 ((unsigned char *)instructions)[3] = 0xc3; /* ret */
1186                 if (copy_to_user((void __user *)addr, instructions, 4))
1187                         return 1;
1188                 kvm->arch.hv_hypercall = data;
1189                 break;
1190         }
1191         default:
1192                 pr_unimpl(vcpu, "HYPER-V unimplemented wrmsr: 0x%x "
1193                           "data 0x%llx\n", msr, data);
1194                 return 1;
1195         }
1196         return 0;
1197 }
1198
1199 static int set_msr_hyperv(struct kvm_vcpu *vcpu, u32 msr, u64 data)
1200 {
1201         switch (msr) {
1202         case HV_X64_MSR_APIC_ASSIST_PAGE: {
1203                 unsigned long addr;
1204
1205                 if (!(data & HV_X64_MSR_APIC_ASSIST_PAGE_ENABLE)) {
1206                         vcpu->arch.hv_vapic = data;
1207                         break;
1208                 }
1209                 addr = gfn_to_hva(vcpu->kvm, data >>
1210                                   HV_X64_MSR_APIC_ASSIST_PAGE_ADDRESS_SHIFT);
1211                 if (kvm_is_error_hva(addr))
1212                         return 1;
1213                 if (clear_user((void __user *)addr, PAGE_SIZE))
1214                         return 1;
1215                 vcpu->arch.hv_vapic = data;
1216                 break;
1217         }
1218         case HV_X64_MSR_EOI:
1219                 return kvm_hv_vapic_msr_write(vcpu, APIC_EOI, data);
1220         case HV_X64_MSR_ICR:
1221                 return kvm_hv_vapic_msr_write(vcpu, APIC_ICR, data);
1222         case HV_X64_MSR_TPR:
1223                 return kvm_hv_vapic_msr_write(vcpu, APIC_TASKPRI, data);
1224         default:
1225                 pr_unimpl(vcpu, "HYPER-V unimplemented wrmsr: 0x%x "
1226                           "data 0x%llx\n", msr, data);
1227                 return 1;
1228         }
1229
1230         return 0;
1231 }
1232
1233 int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
1234 {
1235         switch (msr) {
1236         case MSR_EFER:
1237                 return set_efer(vcpu, data);
1238         case MSR_K7_HWCR:
1239                 data &= ~(u64)0x40;     /* ignore flush filter disable */
1240                 data &= ~(u64)0x100;    /* ignore ignne emulation enable */
1241                 if (data != 0) {
1242                         pr_unimpl(vcpu, "unimplemented HWCR wrmsr: 0x%llx\n",
1243                                 data);
1244                         return 1;
1245                 }
1246                 break;
1247         case MSR_FAM10H_MMIO_CONF_BASE:
1248                 if (data != 0) {
1249                         pr_unimpl(vcpu, "unimplemented MMIO_CONF_BASE wrmsr: "
1250                                 "0x%llx\n", data);
1251                         return 1;
1252                 }
1253                 break;
1254         case MSR_AMD64_NB_CFG:
1255                 break;
1256         case MSR_IA32_DEBUGCTLMSR:
1257                 if (!data) {
1258                         /* We support the non-activated case already */
1259                         break;
1260                 } else if (data & ~(DEBUGCTLMSR_LBR | DEBUGCTLMSR_BTF)) {
1261                         /* Values other than LBR and BTF are vendor-specific,
1262                            thus reserved and should throw a #GP */
1263                         return 1;
1264                 }
1265                 pr_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTLMSR 0x%llx, nop\n",
1266                         __func__, data);
1267                 break;
1268         case MSR_IA32_UCODE_REV:
1269         case MSR_IA32_UCODE_WRITE:
1270         case MSR_VM_HSAVE_PA:
1271         case MSR_AMD64_PATCH_LOADER:
1272                 break;
1273         case 0x200 ... 0x2ff:
1274                 return set_msr_mtrr(vcpu, msr, data);
1275         case MSR_IA32_APICBASE:
1276                 kvm_set_apic_base(vcpu, data);
1277                 break;
1278         case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff:
1279                 return kvm_x2apic_msr_write(vcpu, msr, data);
1280         case MSR_IA32_MISC_ENABLE:
1281                 vcpu->arch.ia32_misc_enable_msr = data;
1282                 break;
1283         case MSR_KVM_WALL_CLOCK_NEW:
1284         case MSR_KVM_WALL_CLOCK:
1285                 vcpu->kvm->arch.wall_clock = data;
1286                 kvm_write_wall_clock(vcpu->kvm, data);
1287                 break;
1288         case MSR_KVM_SYSTEM_TIME_NEW:
1289         case MSR_KVM_SYSTEM_TIME: {
1290                 if (vcpu->arch.time_page) {
1291                         kvm_release_page_dirty(vcpu->arch.time_page);
1292                         vcpu->arch.time_page = NULL;
1293                 }
1294
1295                 vcpu->arch.time = data;
1296
1297                 /* we verify if the enable bit is set... */
1298                 if (!(data & 1))
1299                         break;
1300
1301                 /* ...but clean it before doing the actual write */
1302                 vcpu->arch.time_offset = data & ~(PAGE_MASK | 1);
1303
1304                 vcpu->arch.time_page =
1305                                 gfn_to_page(vcpu->kvm, data >> PAGE_SHIFT);
1306
1307                 if (is_error_page(vcpu->arch.time_page)) {
1308                         kvm_release_page_clean(vcpu->arch.time_page);
1309                         vcpu->arch.time_page = NULL;
1310                 }
1311
1312                 kvm_request_guest_time_update(vcpu);
1313                 break;
1314         }
1315         case MSR_IA32_MCG_CTL:
1316         case MSR_IA32_MCG_STATUS:
1317         case MSR_IA32_MC0_CTL ... MSR_IA32_MC0_CTL + 4 * KVM_MAX_MCE_BANKS - 1:
1318                 return set_msr_mce(vcpu, msr, data);
1319
1320         /* Performance counters are not protected by a CPUID bit,
1321          * so we should check all of them in the generic path for the sake of
1322          * cross vendor migration.
1323          * Writing a zero into the event select MSRs disables them,
1324          * which we perfectly emulate ;-). Any other value should be at least
1325          * reported, some guests depend on them.
1326          */
1327         case MSR_P6_EVNTSEL0:
1328         case MSR_P6_EVNTSEL1:
1329         case MSR_K7_EVNTSEL0:
1330         case MSR_K7_EVNTSEL1:
1331         case MSR_K7_EVNTSEL2:
1332         case MSR_K7_EVNTSEL3:
1333                 if (data != 0)
1334                         pr_unimpl(vcpu, "unimplemented perfctr wrmsr: "
1335                                 "0x%x data 0x%llx\n", msr, data);
1336                 break;
1337         /* at least RHEL 4 unconditionally writes to the perfctr registers,
1338          * so we ignore writes to make it happy.
1339          */
1340         case MSR_P6_PERFCTR0:
1341         case MSR_P6_PERFCTR1:
1342         case MSR_K7_PERFCTR0:
1343         case MSR_K7_PERFCTR1:
1344         case MSR_K7_PERFCTR2:
1345         case MSR_K7_PERFCTR3:
1346                 pr_unimpl(vcpu, "unimplemented perfctr wrmsr: "
1347                         "0x%x data 0x%llx\n", msr, data);
1348                 break;
1349         case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15:
1350                 if (kvm_hv_msr_partition_wide(msr)) {
1351                         int r;
1352                         mutex_lock(&vcpu->kvm->lock);
1353                         r = set_msr_hyperv_pw(vcpu, msr, data);
1354                         mutex_unlock(&vcpu->kvm->lock);
1355                         return r;
1356                 } else
1357                         return set_msr_hyperv(vcpu, msr, data);
1358                 break;
1359         default:
1360                 if (msr && (msr == vcpu->kvm->arch.xen_hvm_config.msr))
1361                         return xen_hvm_config(vcpu, data);
1362                 if (!ignore_msrs) {
1363                         pr_unimpl(vcpu, "unhandled wrmsr: 0x%x data %llx\n",
1364                                 msr, data);
1365                         return 1;
1366                 } else {
1367                         pr_unimpl(vcpu, "ignored wrmsr: 0x%x data %llx\n",
1368                                 msr, data);
1369                         break;
1370                 }
1371         }
1372         return 0;
1373 }
1374 EXPORT_SYMBOL_GPL(kvm_set_msr_common);
1375
1376
1377 /*
1378  * Reads an msr value (of 'msr_index') into 'pdata'.
1379  * Returns 0 on success, non-0 otherwise.
1380  * Assumes vcpu_load() was already called.
1381  */
1382 int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
1383 {
1384         return kvm_x86_ops->get_msr(vcpu, msr_index, pdata);
1385 }
1386
1387 static int get_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
1388 {
1389         u64 *p = (u64 *)&vcpu->arch.mtrr_state.fixed_ranges;
1390
1391         if (!msr_mtrr_valid(msr))
1392                 return 1;
1393
1394         if (msr == MSR_MTRRdefType)
1395                 *pdata = vcpu->arch.mtrr_state.def_type +
1396                          (vcpu->arch.mtrr_state.enabled << 10);
1397         else if (msr == MSR_MTRRfix64K_00000)
1398                 *pdata = p[0];
1399         else if (msr == MSR_MTRRfix16K_80000 || msr == MSR_MTRRfix16K_A0000)
1400                 *pdata = p[1 + msr - MSR_MTRRfix16K_80000];
1401         else if (msr >= MSR_MTRRfix4K_C0000 && msr <= MSR_MTRRfix4K_F8000)
1402                 *pdata = p[3 + msr - MSR_MTRRfix4K_C0000];
1403         else if (msr == MSR_IA32_CR_PAT)
1404                 *pdata = vcpu->arch.pat;
1405         else {  /* Variable MTRRs */
1406                 int idx, is_mtrr_mask;
1407                 u64 *pt;
1408
1409                 idx = (msr - 0x200) / 2;
1410                 is_mtrr_mask = msr - 0x200 - 2 * idx;
1411                 if (!is_mtrr_mask)
1412                         pt =
1413                           (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].base_lo;
1414                 else
1415                         pt =
1416                           (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].mask_lo;
1417                 *pdata = *pt;
1418         }
1419
1420         return 0;
1421 }
1422
1423 static int get_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
1424 {
1425         u64 data;
1426         u64 mcg_cap = vcpu->arch.mcg_cap;
1427         unsigned bank_num = mcg_cap & 0xff;
1428
1429         switch (msr) {
1430         case MSR_IA32_P5_MC_ADDR:
1431         case MSR_IA32_P5_MC_TYPE:
1432                 data = 0;
1433                 break;
1434         case MSR_IA32_MCG_CAP:
1435                 data = vcpu->arch.mcg_cap;
1436                 break;
1437         case MSR_IA32_MCG_CTL:
1438                 if (!(mcg_cap & MCG_CTL_P))
1439                         return 1;
1440                 data = vcpu->arch.mcg_ctl;
1441                 break;
1442         case MSR_IA32_MCG_STATUS:
1443                 data = vcpu->arch.mcg_status;
1444                 break;
1445         default:
1446                 if (msr >= MSR_IA32_MC0_CTL &&
1447                     msr < MSR_IA32_MC0_CTL + 4 * bank_num) {
1448                         u32 offset = msr - MSR_IA32_MC0_CTL;
1449                         data = vcpu->arch.mce_banks[offset];
1450                         break;
1451                 }
1452                 return 1;
1453         }
1454         *pdata = data;
1455         return 0;
1456 }
1457
1458 static int get_msr_hyperv_pw(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
1459 {
1460         u64 data = 0;
1461         struct kvm *kvm = vcpu->kvm;
1462
1463         switch (msr) {
1464         case HV_X64_MSR_GUEST_OS_ID:
1465                 data = kvm->arch.hv_guest_os_id;
1466                 break;
1467         case HV_X64_MSR_HYPERCALL:
1468                 data = kvm->arch.hv_hypercall;
1469                 break;
1470         default:
1471                 pr_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
1472                 return 1;
1473         }
1474
1475         *pdata = data;
1476         return 0;
1477 }
1478
1479 static int get_msr_hyperv(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
1480 {
1481         u64 data = 0;
1482
1483         switch (msr) {
1484         case HV_X64_MSR_VP_INDEX: {
1485                 int r;
1486                 struct kvm_vcpu *v;
1487                 kvm_for_each_vcpu(r, v, vcpu->kvm)
1488                         if (v == vcpu)
1489                                 data = r;
1490                 break;
1491         }
1492         case HV_X64_MSR_EOI:
1493                 return kvm_hv_vapic_msr_read(vcpu, APIC_EOI, pdata);
1494         case HV_X64_MSR_ICR:
1495                 return kvm_hv_vapic_msr_read(vcpu, APIC_ICR, pdata);
1496         case HV_X64_MSR_TPR:
1497                 return kvm_hv_vapic_msr_read(vcpu, APIC_TASKPRI, pdata);
1498         default:
1499                 pr_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
1500                 return 1;
1501         }
1502         *pdata = data;
1503         return 0;
1504 }
1505
1506 int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
1507 {
1508         u64 data;
1509
1510         switch (msr) {
1511         case MSR_IA32_PLATFORM_ID:
1512         case MSR_IA32_UCODE_REV:
1513         case MSR_IA32_EBL_CR_POWERON:
1514         case MSR_IA32_DEBUGCTLMSR:
1515         case MSR_IA32_LASTBRANCHFROMIP:
1516         case MSR_IA32_LASTBRANCHTOIP:
1517         case MSR_IA32_LASTINTFROMIP:
1518         case MSR_IA32_LASTINTTOIP:
1519         case MSR_K8_SYSCFG:
1520         case MSR_K7_HWCR:
1521         case MSR_VM_HSAVE_PA:
1522         case MSR_P6_PERFCTR0:
1523         case MSR_P6_PERFCTR1:
1524         case MSR_P6_EVNTSEL0:
1525         case MSR_P6_EVNTSEL1:
1526         case MSR_K7_EVNTSEL0:
1527         case MSR_K7_PERFCTR0:
1528         case MSR_K8_INT_PENDING_MSG:
1529         case MSR_AMD64_NB_CFG:
1530         case MSR_FAM10H_MMIO_CONF_BASE:
1531                 data = 0;
1532                 break;
1533         case MSR_MTRRcap:
1534                 data = 0x500 | KVM_NR_VAR_MTRR;
1535                 break;
1536         case 0x200 ... 0x2ff:
1537                 return get_msr_mtrr(vcpu, msr, pdata);
1538         case 0xcd: /* fsb frequency */
1539                 data = 3;
1540                 break;
1541         case MSR_IA32_APICBASE:
1542                 data = kvm_get_apic_base(vcpu);
1543                 break;
1544         case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff:
1545                 return kvm_x2apic_msr_read(vcpu, msr, pdata);
1546                 break;
1547         case MSR_IA32_MISC_ENABLE:
1548                 data = vcpu->arch.ia32_misc_enable_msr;
1549                 break;
1550         case MSR_IA32_PERF_STATUS:
1551                 /* TSC increment by tick */
1552                 data = 1000ULL;
1553                 /* CPU multiplier */
1554                 data |= (((uint64_t)4ULL) << 40);
1555                 break;
1556         case MSR_EFER:
1557                 data = vcpu->arch.efer;
1558                 break;
1559         case MSR_KVM_WALL_CLOCK:
1560         case MSR_KVM_WALL_CLOCK_NEW:
1561                 data = vcpu->kvm->arch.wall_clock;
1562                 break;
1563         case MSR_KVM_SYSTEM_TIME:
1564         case MSR_KVM_SYSTEM_TIME_NEW:
1565                 data = vcpu->arch.time;
1566                 break;
1567         case MSR_IA32_P5_MC_ADDR:
1568         case MSR_IA32_P5_MC_TYPE:
1569         case MSR_IA32_MCG_CAP:
1570         case MSR_IA32_MCG_CTL:
1571         case MSR_IA32_MCG_STATUS:
1572         case MSR_IA32_MC0_CTL ... MSR_IA32_MC0_CTL + 4 * KVM_MAX_MCE_BANKS - 1:
1573                 return get_msr_mce(vcpu, msr, pdata);
1574         case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15:
1575                 if (kvm_hv_msr_partition_wide(msr)) {
1576                         int r;
1577                         mutex_lock(&vcpu->kvm->lock);
1578                         r = get_msr_hyperv_pw(vcpu, msr, pdata);
1579                         mutex_unlock(&vcpu->kvm->lock);
1580                         return r;
1581                 } else
1582                         return get_msr_hyperv(vcpu, msr, pdata);
1583                 break;
1584         default:
1585                 if (!ignore_msrs) {
1586                         pr_unimpl(vcpu, "unhandled rdmsr: 0x%x\n", msr);
1587                         return 1;
1588                 } else {
1589                         pr_unimpl(vcpu, "ignored rdmsr: 0x%x\n", msr);
1590                         data = 0;
1591                 }
1592                 break;
1593         }
1594         *pdata = data;
1595         return 0;
1596 }
1597 EXPORT_SYMBOL_GPL(kvm_get_msr_common);
1598
1599 /*
1600  * Read or write a bunch of msrs. All parameters are kernel addresses.
1601  *
1602  * @return number of msrs set successfully.
1603  */
1604 static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs,
1605                     struct kvm_msr_entry *entries,
1606                     int (*do_msr)(struct kvm_vcpu *vcpu,
1607                                   unsigned index, u64 *data))
1608 {
1609         int i, idx;
1610
1611         idx = srcu_read_lock(&vcpu->kvm->srcu);
1612         for (i = 0; i < msrs->nmsrs; ++i)
1613                 if (do_msr(vcpu, entries[i].index, &entries[i].data))
1614                         break;
1615         srcu_read_unlock(&vcpu->kvm->srcu, idx);
1616
1617         return i;
1618 }
1619
1620 /*
1621  * Read or write a bunch of msrs. Parameters are user addresses.
1622  *
1623  * @return number of msrs set successfully.
1624  */
1625 static int msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs __user *user_msrs,
1626                   int (*do_msr)(struct kvm_vcpu *vcpu,
1627                                 unsigned index, u64 *data),
1628                   int writeback)
1629 {
1630         struct kvm_msrs msrs;
1631         struct kvm_msr_entry *entries;
1632         int r, n;
1633         unsigned size;
1634
1635         r = -EFAULT;
1636         if (copy_from_user(&msrs, user_msrs, sizeof msrs))
1637                 goto out;
1638
1639         r = -E2BIG;
1640         if (msrs.nmsrs >= MAX_IO_MSRS)
1641                 goto out;
1642
1643         r = -ENOMEM;
1644         size = sizeof(struct kvm_msr_entry) * msrs.nmsrs;
1645         entries = kmalloc(size, GFP_KERNEL);
1646         if (!entries)
1647                 goto out;
1648
1649         r = -EFAULT;
1650         if (copy_from_user(entries, user_msrs->entries, size))
1651                 goto out_free;
1652
1653         r = n = __msr_io(vcpu, &msrs, entries, do_msr);
1654         if (r < 0)
1655                 goto out_free;
1656
1657         r = -EFAULT;
1658         if (writeback && copy_to_user(user_msrs->entries, entries, size))
1659                 goto out_free;
1660
1661         r = n;
1662
1663 out_free:
1664         kfree(entries);
1665 out:
1666         return r;
1667 }
1668
1669 int kvm_dev_ioctl_check_extension(long ext)
1670 {
1671         int r;
1672
1673         switch (ext) {
1674         case KVM_CAP_IRQCHIP:
1675         case KVM_CAP_HLT:
1676         case KVM_CAP_MMU_SHADOW_CACHE_CONTROL:
1677         case KVM_CAP_SET_TSS_ADDR:
1678         case KVM_CAP_EXT_CPUID:
1679         case KVM_CAP_CLOCKSOURCE:
1680         case KVM_CAP_PIT:
1681         case KVM_CAP_NOP_IO_DELAY:
1682         case KVM_CAP_MP_STATE:
1683         case KVM_CAP_SYNC_MMU:
1684         case KVM_CAP_REINJECT_CONTROL:
1685         case KVM_CAP_IRQ_INJECT_STATUS:
1686         case KVM_CAP_ASSIGN_DEV_IRQ:
1687         case KVM_CAP_IRQFD:
1688         case KVM_CAP_IOEVENTFD:
1689         case KVM_CAP_PIT2:
1690         case KVM_CAP_PIT_STATE2:
1691         case KVM_CAP_SET_IDENTITY_MAP_ADDR:
1692         case KVM_CAP_XEN_HVM:
1693         case KVM_CAP_ADJUST_CLOCK:
1694         case KVM_CAP_VCPU_EVENTS:
1695         case KVM_CAP_HYPERV:
1696         case KVM_CAP_HYPERV_VAPIC:
1697         case KVM_CAP_HYPERV_SPIN:
1698         case KVM_CAP_PCI_SEGMENT:
1699         case KVM_CAP_DEBUGREGS:
1700         case KVM_CAP_X86_ROBUST_SINGLESTEP:
1701         case KVM_CAP_XSAVE:
1702                 r = 1;
1703                 break;
1704         case KVM_CAP_COALESCED_MMIO:
1705                 r = KVM_COALESCED_MMIO_PAGE_OFFSET;
1706                 break;
1707         case KVM_CAP_VAPIC:
1708                 r = !kvm_x86_ops->cpu_has_accelerated_tpr();
1709                 break;
1710         case KVM_CAP_NR_VCPUS:
1711                 r = KVM_MAX_VCPUS;
1712                 break;
1713         case KVM_CAP_NR_MEMSLOTS:
1714                 r = KVM_MEMORY_SLOTS;
1715                 break;
1716         case KVM_CAP_PV_MMU:    /* obsolete */
1717                 r = 0;
1718                 break;
1719         case KVM_CAP_IOMMU:
1720                 r = iommu_found();
1721                 break;
1722         case KVM_CAP_MCE:
1723                 r = KVM_MAX_MCE_BANKS;
1724                 break;
1725         case KVM_CAP_XCRS:
1726                 r = cpu_has_xsave;
1727                 break;
1728         default:
1729                 r = 0;
1730                 break;
1731         }
1732         return r;
1733
1734 }
1735
1736 long kvm_arch_dev_ioctl(struct file *filp,
1737                         unsigned int ioctl, unsigned long arg)
1738 {
1739         void __user *argp = (void __user *)arg;
1740         long r;
1741
1742         switch (ioctl) {
1743         case KVM_GET_MSR_INDEX_LIST: {
1744                 struct kvm_msr_list __user *user_msr_list = argp;
1745                 struct kvm_msr_list msr_list;
1746                 unsigned n;
1747
1748                 r = -EFAULT;
1749                 if (copy_from_user(&msr_list, user_msr_list, sizeof msr_list))
1750                         goto out;
1751                 n = msr_list.nmsrs;
1752                 msr_list.nmsrs = num_msrs_to_save + ARRAY_SIZE(emulated_msrs);
1753                 if (copy_to_user(user_msr_list, &msr_list, sizeof msr_list))
1754                         goto out;
1755                 r = -E2BIG;
1756                 if (n < msr_list.nmsrs)
1757                         goto out;
1758                 r = -EFAULT;
1759                 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
1760                                  num_msrs_to_save * sizeof(u32)))
1761                         goto out;
1762                 if (copy_to_user(user_msr_list->indices + num_msrs_to_save,
1763                                  &emulated_msrs,
1764                                  ARRAY_SIZE(emulated_msrs) * sizeof(u32)))
1765                         goto out;
1766                 r = 0;
1767                 break;
1768         }
1769         case KVM_GET_SUPPORTED_CPUID: {
1770                 struct kvm_cpuid2 __user *cpuid_arg = argp;
1771                 struct kvm_cpuid2 cpuid;
1772
1773                 r = -EFAULT;
1774                 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
1775                         goto out;
1776                 r = kvm_dev_ioctl_get_supported_cpuid(&cpuid,
1777                                                       cpuid_arg->entries);
1778                 if (r)
1779                         goto out;
1780
1781                 r = -EFAULT;
1782                 if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid))
1783                         goto out;
1784                 r = 0;
1785                 break;
1786         }
1787         case KVM_X86_GET_MCE_CAP_SUPPORTED: {
1788                 u64 mce_cap;
1789
1790                 mce_cap = KVM_MCE_CAP_SUPPORTED;
1791                 r = -EFAULT;
1792                 if (copy_to_user(argp, &mce_cap, sizeof mce_cap))
1793                         goto out;
1794                 r = 0;
1795                 break;
1796         }
1797         default:
1798                 r = -EINVAL;
1799         }
1800 out:
1801         return r;
1802 }
1803
1804 static void wbinvd_ipi(void *garbage)
1805 {
1806         wbinvd();
1807 }
1808
1809 static bool need_emulate_wbinvd(struct kvm_vcpu *vcpu)
1810 {
1811         return vcpu->kvm->arch.iommu_domain &&
1812                 !(vcpu->kvm->arch.iommu_flags & KVM_IOMMU_CACHE_COHERENCY);
1813 }
1814
1815 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1816 {
1817         /* Address WBINVD may be executed by guest */
1818         if (need_emulate_wbinvd(vcpu)) {
1819                 if (kvm_x86_ops->has_wbinvd_exit())
1820                         cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask);
1821                 else if (vcpu->cpu != -1 && vcpu->cpu != cpu)
1822                         smp_call_function_single(vcpu->cpu,
1823                                         wbinvd_ipi, NULL, 1);
1824         }
1825
1826         kvm_x86_ops->vcpu_load(vcpu, cpu);
1827         if (unlikely(per_cpu(cpu_tsc_khz, cpu) == 0)) {
1828                 unsigned long khz = cpufreq_quick_get(cpu);
1829                 if (!khz)
1830                         khz = tsc_khz;
1831                 per_cpu(cpu_tsc_khz, cpu) = khz;
1832         }
1833         kvm_request_guest_time_update(vcpu);
1834 }
1835
1836 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
1837 {
1838         kvm_x86_ops->vcpu_put(vcpu);
1839         kvm_put_guest_fpu(vcpu);
1840 }
1841
1842 static int is_efer_nx(void)
1843 {
1844         unsigned long long efer = 0;
1845
1846         rdmsrl_safe(MSR_EFER, &efer);
1847         return efer & EFER_NX;
1848 }
1849
1850 static void cpuid_fix_nx_cap(struct kvm_vcpu *vcpu)
1851 {
1852         int i;
1853         struct kvm_cpuid_entry2 *e, *entry;
1854
1855         entry = NULL;
1856         for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
1857                 e = &vcpu->arch.cpuid_entries[i];
1858                 if (e->function == 0x80000001) {
1859                         entry = e;
1860                         break;
1861                 }
1862         }
1863         if (entry && (entry->edx & (1 << 20)) && !is_efer_nx()) {
1864                 entry->edx &= ~(1 << 20);
1865                 printk(KERN_INFO "kvm: guest NX capability removed\n");
1866         }
1867 }
1868
1869 /* when an old userspace process fills a new kernel module */
1870 static int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
1871                                     struct kvm_cpuid *cpuid,
1872                                     struct kvm_cpuid_entry __user *entries)
1873 {
1874         int r, i;
1875         struct kvm_cpuid_entry *cpuid_entries;
1876
1877         r = -E2BIG;
1878         if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
1879                 goto out;
1880         r = -ENOMEM;
1881         cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry) * cpuid->nent);
1882         if (!cpuid_entries)
1883                 goto out;
1884         r = -EFAULT;
1885         if (copy_from_user(cpuid_entries, entries,
1886                            cpuid->nent * sizeof(struct kvm_cpuid_entry)))
1887                 goto out_free;
1888         for (i = 0; i < cpuid->nent; i++) {
1889                 vcpu->arch.cpuid_entries[i].function = cpuid_entries[i].function;
1890                 vcpu->arch.cpuid_entries[i].eax = cpuid_entries[i].eax;
1891                 vcpu->arch.cpuid_entries[i].ebx = cpuid_entries[i].ebx;
1892                 vcpu->arch.cpuid_entries[i].ecx = cpuid_entries[i].ecx;
1893                 vcpu->arch.cpuid_entries[i].edx = cpuid_entries[i].edx;
1894                 vcpu->arch.cpuid_entries[i].index = 0;
1895                 vcpu->arch.cpuid_entries[i].flags = 0;
1896                 vcpu->arch.cpuid_entries[i].padding[0] = 0;
1897                 vcpu->arch.cpuid_entries[i].padding[1] = 0;
1898                 vcpu->arch.cpuid_entries[i].padding[2] = 0;
1899         }
1900         vcpu->arch.cpuid_nent = cpuid->nent;
1901         cpuid_fix_nx_cap(vcpu);
1902         r = 0;
1903         kvm_apic_set_version(vcpu);
1904         kvm_x86_ops->cpuid_update(vcpu);
1905         update_cpuid(vcpu);
1906
1907 out_free:
1908         vfree(cpuid_entries);
1909 out:
1910         return r;
1911 }
1912
1913 static int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
1914                                      struct kvm_cpuid2 *cpuid,
1915                                      struct kvm_cpuid_entry2 __user *entries)
1916 {
1917         int r;
1918
1919         r = -E2BIG;
1920         if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
1921                 goto out;
1922         r = -EFAULT;
1923         if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
1924                            cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
1925                 goto out;
1926         vcpu->arch.cpuid_nent = cpuid->nent;
1927         kvm_apic_set_version(vcpu);
1928         kvm_x86_ops->cpuid_update(vcpu);
1929         update_cpuid(vcpu);
1930         return 0;
1931
1932 out:
1933         return r;
1934 }
1935
1936 static int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
1937                                      struct kvm_cpuid2 *cpuid,
1938                                      struct kvm_cpuid_entry2 __user *entries)
1939 {
1940         int r;
1941
1942         r = -E2BIG;
1943         if (cpuid->nent < vcpu->arch.cpuid_nent)
1944                 goto out;
1945         r = -EFAULT;
1946         if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
1947                          vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
1948                 goto out;
1949         return 0;
1950
1951 out:
1952         cpuid->nent = vcpu->arch.cpuid_nent;
1953         return r;
1954 }
1955
1956 static void do_cpuid_1_ent(struct kvm_cpuid_entry2 *entry, u32 function,
1957                            u32 index)
1958 {
1959         entry->function = function;
1960         entry->index = index;
1961         cpuid_count(entry->function, entry->index,
1962                     &entry->eax, &entry->ebx, &entry->ecx, &entry->edx);
1963         entry->flags = 0;
1964 }
1965
1966 #define F(x) bit(X86_FEATURE_##x)
1967
1968 static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
1969                          u32 index, int *nent, int maxnent)
1970 {
1971         unsigned f_nx = is_efer_nx() ? F(NX) : 0;
1972 #ifdef CONFIG_X86_64
1973         unsigned f_gbpages = (kvm_x86_ops->get_lpage_level() == PT_PDPE_LEVEL)
1974                                 ? F(GBPAGES) : 0;
1975         unsigned f_lm = F(LM);
1976 #else
1977         unsigned f_gbpages = 0;
1978         unsigned f_lm = 0;
1979 #endif
1980         unsigned f_rdtscp = kvm_x86_ops->rdtscp_supported() ? F(RDTSCP) : 0;
1981
1982         /* cpuid 1.edx */
1983         const u32 kvm_supported_word0_x86_features =
1984                 F(FPU) | F(VME) | F(DE) | F(PSE) |
1985                 F(TSC) | F(MSR) | F(PAE) | F(MCE) |
1986                 F(CX8) | F(APIC) | 0 /* Reserved */ | F(SEP) |
1987                 F(MTRR) | F(PGE) | F(MCA) | F(CMOV) |
1988                 F(PAT) | F(PSE36) | 0 /* PSN */ | F(CLFLSH) |
1989                 0 /* Reserved, DS, ACPI */ | F(MMX) |
1990                 F(FXSR) | F(XMM) | F(XMM2) | F(SELFSNOOP) |
1991                 0 /* HTT, TM, Reserved, PBE */;
1992         /* cpuid 0x80000001.edx */
1993         const u32 kvm_supported_word1_x86_features =
1994                 F(FPU) | F(VME) | F(DE) | F(PSE) |
1995                 F(TSC) | F(MSR) | F(PAE) | F(MCE) |
1996                 F(CX8) | F(APIC) | 0 /* Reserved */ | F(SYSCALL) |
1997                 F(MTRR) | F(PGE) | F(MCA) | F(CMOV) |
1998                 F(PAT) | F(PSE36) | 0 /* Reserved */ |
1999                 f_nx | 0 /* Reserved */ | F(MMXEXT) | F(MMX) |
2000                 F(FXSR) | F(FXSR_OPT) | f_gbpages | f_rdtscp |
2001                 0 /* Reserved */ | f_lm | F(3DNOWEXT) | F(3DNOW);
2002         /* cpuid 1.ecx */
2003         const u32 kvm_supported_word4_x86_features =
2004                 F(XMM3) | F(PCLMULQDQ) | 0 /* DTES64, MONITOR */ |
2005                 0 /* DS-CPL, VMX, SMX, EST */ |
2006                 0 /* TM2 */ | F(SSSE3) | 0 /* CNXT-ID */ | 0 /* Reserved */ |
2007                 0 /* Reserved */ | F(CX16) | 0 /* xTPR Update, PDCM */ |
2008                 0 /* Reserved, DCA */ | F(XMM4_1) |
2009                 F(XMM4_2) | F(X2APIC) | F(MOVBE) | F(POPCNT) |
2010                 0 /* Reserved, AES */ | F(XSAVE) | 0 /* OSXSAVE */ | F(AVX);
2011         /* cpuid 0x80000001.ecx */
2012         const u32 kvm_supported_word6_x86_features =
2013                 F(LAHF_LM) | F(CMP_LEGACY) | F(SVM) | 0 /* ExtApicSpace */ |
2014                 F(CR8_LEGACY) | F(ABM) | F(SSE4A) | F(MISALIGNSSE) |
2015                 F(3DNOWPREFETCH) | 0 /* OSVW */ | 0 /* IBS */ | F(SSE5) |
2016                 0 /* SKINIT */ | 0 /* WDT */;
2017
2018         /* all calls to cpuid_count() should be made on the same cpu */
2019         get_cpu();
2020         do_cpuid_1_ent(entry, function, index);
2021         ++*nent;
2022
2023         switch (function) {
2024         case 0:
2025                 entry->eax = min(entry->eax, (u32)0xd);
2026                 break;
2027         case 1:
2028                 entry->edx &= kvm_supported_word0_x86_features;
2029                 entry->ecx &= kvm_supported_word4_x86_features;
2030                 /* we support x2apic emulation even if host does not support
2031                  * it since we emulate x2apic in software */
2032                 entry->ecx |= F(X2APIC);
2033                 break;
2034         /* function 2 entries are STATEFUL. That is, repeated cpuid commands
2035          * may return different values. This forces us to get_cpu() before
2036          * issuing the first command, and also to emulate this annoying behavior
2037          * in kvm_emulate_cpuid() using KVM_CPUID_FLAG_STATE_READ_NEXT */
2038         case 2: {
2039                 int t, times = entry->eax & 0xff;
2040
2041                 entry->flags |= KVM_CPUID_FLAG_STATEFUL_FUNC;
2042                 entry->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
2043                 for (t = 1; t < times && *nent < maxnent; ++t) {
2044                         do_cpuid_1_ent(&entry[t], function, 0);
2045                         entry[t].flags |= KVM_CPUID_FLAG_STATEFUL_FUNC;
2046                         ++*nent;
2047                 }
2048                 break;
2049         }
2050         /* function 4 and 0xb have additional index. */
2051         case 4: {
2052                 int i, cache_type;
2053
2054                 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
2055                 /* read more entries until cache_type is zero */
2056                 for (i = 1; *nent < maxnent; ++i) {
2057                         cache_type = entry[i - 1].eax & 0x1f;
2058                         if (!cache_type)
2059                                 break;
2060                         do_cpuid_1_ent(&entry[i], function, i);
2061                         entry[i].flags |=
2062                                KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
2063                         ++*nent;
2064                 }
2065                 break;
2066         }
2067         case 0xb: {
2068                 int i, level_type;
2069
2070                 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
2071                 /* read more entries until level_type is zero */
2072                 for (i = 1; *nent < maxnent; ++i) {
2073                         level_type = entry[i - 1].ecx & 0xff00;
2074                         if (!level_type)
2075                                 break;
2076                         do_cpuid_1_ent(&entry[i], function, i);
2077                         entry[i].flags |=
2078                                KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
2079                         ++*nent;
2080                 }
2081                 break;
2082         }
2083         case 0xd: {
2084                 int i;
2085
2086                 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
2087                 for (i = 1; *nent < maxnent; ++i) {
2088                         if (entry[i - 1].eax == 0 && i != 2)
2089                                 break;
2090                         do_cpuid_1_ent(&entry[i], function, i);
2091                         entry[i].flags |=
2092                                KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
2093                         ++*nent;
2094                 }
2095                 break;
2096         }
2097         case KVM_CPUID_SIGNATURE: {
2098                 char signature[12] = "KVMKVMKVM\0\0";
2099                 u32 *sigptr = (u32 *)signature;
2100                 entry->eax = 0;
2101                 entry->ebx = sigptr[0];
2102                 entry->ecx = sigptr[1];
2103                 entry->edx = sigptr[2];
2104                 break;
2105         }
2106         case KVM_CPUID_FEATURES:
2107                 entry->eax = (1 << KVM_FEATURE_CLOCKSOURCE) |
2108                              (1 << KVM_FEATURE_NOP_IO_DELAY) |
2109                              (1 << KVM_FEATURE_CLOCKSOURCE2) |
2110                              (1 << KVM_FEATURE_CLOCKSOURCE_STABLE_BIT);
2111                 entry->ebx = 0;
2112                 entry->ecx = 0;
2113                 entry->edx = 0;
2114                 break;
2115         case 0x80000000:
2116                 entry->eax = min(entry->eax, 0x8000001a);
2117                 break;
2118         case 0x80000001:
2119                 entry->edx &= kvm_supported_word1_x86_features;
2120                 entry->ecx &= kvm_supported_word6_x86_features;
2121                 break;
2122         }
2123
2124         kvm_x86_ops->set_supported_cpuid(function, entry);
2125
2126         put_cpu();
2127 }
2128
2129 #undef F
2130
2131 static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
2132                                      struct kvm_cpuid_entry2 __user *entries)
2133 {
2134         struct kvm_cpuid_entry2 *cpuid_entries;
2135         int limit, nent = 0, r = -E2BIG;
2136         u32 func;
2137
2138         if (cpuid->nent < 1)
2139                 goto out;
2140         if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
2141                 cpuid->nent = KVM_MAX_CPUID_ENTRIES;
2142         r = -ENOMEM;
2143         cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry2) * cpuid->nent);
2144         if (!cpuid_entries)
2145                 goto out;
2146
2147         do_cpuid_ent(&cpuid_entries[0], 0, 0, &nent, cpuid->nent);
2148         limit = cpuid_entries[0].eax;
2149         for (func = 1; func <= limit && nent < cpuid->nent; ++func)
2150                 do_cpuid_ent(&cpuid_entries[nent], func, 0,
2151                              &nent, cpuid->nent);
2152         r = -E2BIG;
2153         if (nent >= cpuid->nent)
2154                 goto out_free;
2155
2156         do_cpuid_ent(&cpuid_entries[nent], 0x80000000, 0, &nent, cpuid->nent);
2157         limit = cpuid_entries[nent - 1].eax;
2158         for (func = 0x80000001; func <= limit && nent < cpuid->nent; ++func)
2159                 do_cpuid_ent(&cpuid_entries[nent], func, 0,
2160                              &nent, cpuid->nent);
2161
2162
2163
2164         r = -E2BIG;
2165         if (nent >= cpuid->nent)
2166                 goto out_free;
2167
2168         do_cpuid_ent(&cpuid_entries[nent], KVM_CPUID_SIGNATURE, 0, &nent,
2169                      cpuid->nent);
2170
2171         r = -E2BIG;
2172         if (nent >= cpuid->nent)
2173                 goto out_free;
2174
2175         do_cpuid_ent(&cpuid_entries[nent], KVM_CPUID_FEATURES, 0, &nent,
2176                      cpuid->nent);
2177
2178         r = -E2BIG;
2179         if (nent >= cpuid->nent)
2180                 goto out_free;
2181
2182         r = -EFAULT;
2183         if (copy_to_user(entries, cpuid_entries,
2184                          nent * sizeof(struct kvm_cpuid_entry2)))
2185                 goto out_free;
2186         cpuid->nent = nent;
2187         r = 0;
2188
2189 out_free:
2190         vfree(cpuid_entries);
2191 out:
2192         return r;
2193 }
2194
2195 static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu,
2196                                     struct kvm_lapic_state *s)
2197 {
2198         memcpy(s->regs, vcpu->arch.apic->regs, sizeof *s);
2199
2200         return 0;
2201 }
2202
2203 static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
2204                                     struct kvm_lapic_state *s)
2205 {
2206         memcpy(vcpu->arch.apic->regs, s->regs, sizeof *s);
2207         kvm_apic_post_state_restore(vcpu);
2208         update_cr8_intercept(vcpu);
2209
2210         return 0;
2211 }
2212
2213 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
2214                                     struct kvm_interrupt *irq)
2215 {
2216         if (irq->irq < 0 || irq->irq >= 256)
2217                 return -EINVAL;
2218         if (irqchip_in_kernel(vcpu->kvm))
2219                 return -ENXIO;
2220
2221         kvm_queue_interrupt(vcpu, irq->irq, false);
2222
2223         return 0;
2224 }
2225
2226 static int kvm_vcpu_ioctl_nmi(struct kvm_vcpu *vcpu)
2227 {
2228         kvm_inject_nmi(vcpu);
2229
2230         return 0;
2231 }
2232
2233 static int vcpu_ioctl_tpr_access_reporting(struct kvm_vcpu *vcpu,
2234                                            struct kvm_tpr_access_ctl *tac)
2235 {
2236         if (tac->flags)
2237                 return -EINVAL;
2238         vcpu->arch.tpr_access_reporting = !!tac->enabled;
2239         return 0;
2240 }
2241
2242 static int kvm_vcpu_ioctl_x86_setup_mce(struct kvm_vcpu *vcpu,
2243                                         u64 mcg_cap)
2244 {
2245         int r;
2246         unsigned bank_num = mcg_cap & 0xff, bank;
2247
2248         r = -EINVAL;
2249         if (!bank_num || bank_num >= KVM_MAX_MCE_BANKS)
2250                 goto out;
2251         if (mcg_cap & ~(KVM_MCE_CAP_SUPPORTED | 0xff | 0xff0000))
2252                 goto out;
2253         r = 0;
2254         vcpu->arch.mcg_cap = mcg_cap;
2255         /* Init IA32_MCG_CTL to all 1s */
2256         if (mcg_cap & MCG_CTL_P)
2257                 vcpu->arch.mcg_ctl = ~(u64)0;
2258         /* Init IA32_MCi_CTL to all 1s */
2259         for (bank = 0; bank < bank_num; bank++)
2260                 vcpu->arch.mce_banks[bank*4] = ~(u64)0;
2261 out:
2262         return r;
2263 }
2264
2265 static int kvm_vcpu_ioctl_x86_set_mce(struct kvm_vcpu *vcpu,
2266                                       struct kvm_x86_mce *mce)
2267 {
2268         u64 mcg_cap = vcpu->arch.mcg_cap;
2269         unsigned bank_num = mcg_cap & 0xff;
2270         u64 *banks = vcpu->arch.mce_banks;
2271
2272         if (mce->bank >= bank_num || !(mce->status & MCI_STATUS_VAL))
2273                 return -EINVAL;
2274         /*
2275          * if IA32_MCG_CTL is not all 1s, the uncorrected error
2276          * reporting is disabled
2277          */
2278         if ((mce->status & MCI_STATUS_UC) && (mcg_cap & MCG_CTL_P) &&
2279             vcpu->arch.mcg_ctl != ~(u64)0)
2280                 return 0;
2281         banks += 4 * mce->bank;
2282         /*
2283          * if IA32_MCi_CTL is not all 1s, the uncorrected error
2284          * reporting is disabled for the bank
2285          */
2286         if ((mce->status & MCI_STATUS_UC) && banks[0] != ~(u64)0)
2287                 return 0;
2288         if (mce->status & MCI_STATUS_UC) {
2289                 if ((vcpu->arch.mcg_status & MCG_STATUS_MCIP) ||
2290                     !kvm_read_cr4_bits(vcpu, X86_CR4_MCE)) {
2291                         printk(KERN_DEBUG "kvm: set_mce: "
2292                                "injects mce exception while "
2293                                "previous one is in progress!\n");
2294                         kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
2295                         return 0;
2296                 }
2297                 if (banks[1] & MCI_STATUS_VAL)
2298                         mce->status |= MCI_STATUS_OVER;
2299                 banks[2] = mce->addr;
2300                 banks[3] = mce->misc;
2301                 vcpu->arch.mcg_status = mce->mcg_status;
2302                 banks[1] = mce->status;
2303                 kvm_queue_exception(vcpu, MC_VECTOR);
2304         } else if (!(banks[1] & MCI_STATUS_VAL)
2305                    || !(banks[1] & MCI_STATUS_UC)) {
2306                 if (banks[1] & MCI_STATUS_VAL)
2307                         mce->status |= MCI_STATUS_OVER;
2308                 banks[2] = mce->addr;
2309                 banks[3] = mce->misc;
2310                 banks[1] = mce->status;
2311         } else
2312                 banks[1] |= MCI_STATUS_OVER;
2313         return 0;
2314 }
2315
2316 static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
2317                                                struct kvm_vcpu_events *events)
2318 {
2319         events->exception.injected =
2320                 vcpu->arch.exception.pending &&
2321                 !kvm_exception_is_soft(vcpu->arch.exception.nr);
2322         events->exception.nr = vcpu->arch.exception.nr;
2323         events->exception.has_error_code = vcpu->arch.exception.has_error_code;
2324         events->exception.error_code = vcpu->arch.exception.error_code;
2325
2326         events->interrupt.injected =
2327                 vcpu->arch.interrupt.pending && !vcpu->arch.interrupt.soft;
2328         events->interrupt.nr = vcpu->arch.interrupt.nr;
2329         events->interrupt.soft = 0;
2330         events->interrupt.shadow =
2331                 kvm_x86_ops->get_interrupt_shadow(vcpu,
2332                         KVM_X86_SHADOW_INT_MOV_SS | KVM_X86_SHADOW_INT_STI);
2333
2334         events->nmi.injected = vcpu->arch.nmi_injected;
2335         events->nmi.pending = vcpu->arch.nmi_pending;
2336         events->nmi.masked = kvm_x86_ops->get_nmi_mask(vcpu);
2337
2338         events->sipi_vector = vcpu->arch.sipi_vector;
2339
2340         events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING
2341                          | KVM_VCPUEVENT_VALID_SIPI_VECTOR
2342                          | KVM_VCPUEVENT_VALID_SHADOW);
2343 }
2344
2345 static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
2346                                               struct kvm_vcpu_events *events)
2347 {
2348         if (events->flags & ~(KVM_VCPUEVENT_VALID_NMI_PENDING
2349                               | KVM_VCPUEVENT_VALID_SIPI_VECTOR
2350                               | KVM_VCPUEVENT_VALID_SHADOW))
2351                 return -EINVAL;
2352
2353         vcpu->arch.exception.pending = events->exception.injected;
2354         vcpu->arch.exception.nr = events->exception.nr;
2355         vcpu->arch.exception.has_error_code = events->exception.has_error_code;
2356         vcpu->arch.exception.error_code = events->exception.error_code;
2357
2358         vcpu->arch.interrupt.pending = events->interrupt.injected;
2359         vcpu->arch.interrupt.nr = events->interrupt.nr;
2360         vcpu->arch.interrupt.soft = events->interrupt.soft;
2361         if (vcpu->arch.interrupt.pending && irqchip_in_kernel(vcpu->kvm))
2362                 kvm_pic_clear_isr_ack(vcpu->kvm);
2363         if (events->flags & KVM_VCPUEVENT_VALID_SHADOW)
2364                 kvm_x86_ops->set_interrupt_shadow(vcpu,
2365                                                   events->interrupt.shadow);
2366
2367         vcpu->arch.nmi_injected = events->nmi.injected;
2368         if (events->flags & KVM_VCPUEVENT_VALID_NMI_PENDING)
2369                 vcpu->arch.nmi_pending = events->nmi.pending;
2370         kvm_x86_ops->set_nmi_mask(vcpu, events->nmi.masked);
2371
2372         if (events->flags & KVM_VCPUEVENT_VALID_SIPI_VECTOR)
2373                 vcpu->arch.sipi_vector = events->sipi_vector;
2374
2375         return 0;
2376 }
2377
2378 static void kvm_vcpu_ioctl_x86_get_debugregs(struct kvm_vcpu *vcpu,
2379                                              struct kvm_debugregs *dbgregs)
2380 {
2381         memcpy(dbgregs->db, vcpu->arch.db, sizeof(vcpu->arch.db));
2382         dbgregs->dr6 = vcpu->arch.dr6;
2383         dbgregs->dr7 = vcpu->arch.dr7;
2384         dbgregs->flags = 0;
2385 }
2386
2387 static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu,
2388                                             struct kvm_debugregs *dbgregs)
2389 {
2390         if (dbgregs->flags)
2391                 return -EINVAL;
2392
2393         memcpy(vcpu->arch.db, dbgregs->db, sizeof(vcpu->arch.db));
2394         vcpu->arch.dr6 = dbgregs->dr6;
2395         vcpu->arch.dr7 = dbgregs->dr7;
2396
2397         return 0;
2398 }
2399
2400 static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu,
2401                                          struct kvm_xsave *guest_xsave)
2402 {
2403         if (cpu_has_xsave)
2404                 memcpy(guest_xsave->region,
2405                         &vcpu->arch.guest_fpu.state->xsave,
2406                         xstate_size);
2407         else {
2408                 memcpy(guest_xsave->region,
2409                         &vcpu->arch.guest_fpu.state->fxsave,
2410                         sizeof(struct i387_fxsave_struct));
2411                 *(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)] =
2412                         XSTATE_FPSSE;
2413         }
2414 }
2415
2416 static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu,
2417                                         struct kvm_xsave *guest_xsave)
2418 {
2419         u64 xstate_bv =
2420                 *(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)];
2421
2422         if (cpu_has_xsave)
2423                 memcpy(&vcpu->arch.guest_fpu.state->xsave,
2424                         guest_xsave->region, xstate_size);
2425         else {
2426                 if (xstate_bv & ~XSTATE_FPSSE)
2427                         return -EINVAL;
2428                 memcpy(&vcpu->arch.guest_fpu.state->fxsave,
2429                         guest_xsave->region, sizeof(struct i387_fxsave_struct));
2430         }
2431         return 0;
2432 }
2433
2434 static void kvm_vcpu_ioctl_x86_get_xcrs(struct kvm_vcpu *vcpu,
2435                                         struct kvm_xcrs *guest_xcrs)
2436 {
2437         if (!cpu_has_xsave) {
2438                 guest_xcrs->nr_xcrs = 0;
2439                 return;
2440         }
2441
2442         guest_xcrs->nr_xcrs = 1;
2443         guest_xcrs->flags = 0;
2444         guest_xcrs->xcrs[0].xcr = XCR_XFEATURE_ENABLED_MASK;
2445         guest_xcrs->xcrs[0].value = vcpu->arch.xcr0;
2446 }
2447
2448 static int kvm_vcpu_ioctl_x86_set_xcrs(struct kvm_vcpu *vcpu,
2449                                        struct kvm_xcrs *guest_xcrs)
2450 {
2451         int i, r = 0;
2452
2453         if (!cpu_has_xsave)
2454                 return -EINVAL;
2455
2456         if (guest_xcrs->nr_xcrs > KVM_MAX_XCRS || guest_xcrs->flags)
2457                 return -EINVAL;
2458
2459         for (i = 0; i < guest_xcrs->nr_xcrs; i++)
2460                 /* Only support XCR0 currently */
2461                 if (guest_xcrs->xcrs[0].xcr == XCR_XFEATURE_ENABLED_MASK) {
2462                         r = __kvm_set_xcr(vcpu, XCR_XFEATURE_ENABLED_MASK,
2463                                 guest_xcrs->xcrs[0].value);
2464                         break;
2465                 }
2466         if (r)
2467                 r = -EINVAL;
2468         return r;
2469 }
2470
2471 long kvm_arch_vcpu_ioctl(struct file *filp,
2472                          unsigned int ioctl, unsigned long arg)
2473 {
2474         struct kvm_vcpu *vcpu = filp->private_data;
2475         void __user *argp = (void __user *)arg;
2476         int r;
2477         union {
2478                 struct kvm_lapic_state *lapic;
2479                 struct kvm_xsave *xsave;
2480                 struct kvm_xcrs *xcrs;
2481                 void *buffer;
2482         } u;
2483
2484         u.buffer = NULL;
2485         switch (ioctl) {
2486         case KVM_GET_LAPIC: {
2487                 r = -EINVAL;
2488                 if (!vcpu->arch.apic)
2489                         goto out;
2490                 u.lapic = kzalloc(sizeof(struct kvm_lapic_state), GFP_KERNEL);
2491
2492                 r = -ENOMEM;
2493                 if (!u.lapic)
2494                         goto out;
2495                 r = kvm_vcpu_ioctl_get_lapic(vcpu, u.lapic);
2496                 if (r)
2497                         goto out;
2498                 r = -EFAULT;
2499                 if (copy_to_user(argp, u.lapic, sizeof(struct kvm_lapic_state)))
2500                         goto out;
2501                 r = 0;
2502                 break;
2503         }
2504         case KVM_SET_LAPIC: {
2505                 r = -EINVAL;
2506                 if (!vcpu->arch.apic)
2507                         goto out;
2508                 u.lapic = kmalloc(sizeof(struct kvm_lapic_state), GFP_KERNEL);
2509                 r = -ENOMEM;
2510                 if (!u.lapic)
2511                         goto out;
2512                 r = -EFAULT;
2513                 if (copy_from_user(u.lapic, argp, sizeof(struct kvm_lapic_state)))
2514                         goto out;
2515                 r = kvm_vcpu_ioctl_set_lapic(vcpu, u.lapic);
2516                 if (r)
2517                         goto out;
2518                 r = 0;
2519                 break;
2520         }
2521         case KVM_INTERRUPT: {
2522                 struct kvm_interrupt irq;
2523
2524                 r = -EFAULT;
2525                 if (copy_from_user(&irq, argp, sizeof irq))
2526                         goto out;
2527                 r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
2528                 if (r)
2529                         goto out;
2530                 r = 0;
2531                 break;
2532         }
2533         case KVM_NMI: {
2534                 r = kvm_vcpu_ioctl_nmi(vcpu);
2535                 if (r)
2536                         goto out;
2537                 r = 0;
2538                 break;
2539         }
2540         case KVM_SET_CPUID: {
2541                 struct kvm_cpuid __user *cpuid_arg = argp;
2542                 struct kvm_cpuid cpuid;
2543
2544                 r = -EFAULT;
2545                 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
2546                         goto out;
2547                 r = kvm_vcpu_ioctl_set_cpuid(vcpu, &cpuid, cpuid_arg->entries);
2548                 if (r)
2549                         goto out;
2550                 break;
2551         }
2552         case KVM_SET_CPUID2: {
2553                 struct kvm_cpuid2 __user *cpuid_arg = argp;
2554                 struct kvm_cpuid2 cpuid;
2555
2556                 r = -EFAULT;
2557                 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
2558                         goto out;
2559                 r = kvm_vcpu_ioctl_set_cpuid2(vcpu, &cpuid,
2560                                               cpuid_arg->entries);
2561                 if (r)
2562                         goto out;
2563                 break;
2564         }
2565         case KVM_GET_CPUID2: {
2566                 struct kvm_cpuid2 __user *cpuid_arg = argp;
2567                 struct kvm_cpuid2 cpuid;
2568
2569                 r = -EFAULT;
2570                 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
2571                         goto out;
2572                 r = kvm_vcpu_ioctl_get_cpuid2(vcpu, &cpuid,
2573                                               cpuid_arg->entries);
2574                 if (r)
2575                         goto out;
2576                 r = -EFAULT;
2577                 if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid))
2578                         goto out;
2579                 r = 0;
2580                 break;
2581         }
2582         case KVM_GET_MSRS:
2583                 r = msr_io(vcpu, argp, kvm_get_msr, 1);
2584                 break;
2585         case KVM_SET_MSRS:
2586                 r = msr_io(vcpu, argp, do_set_msr, 0);
2587                 break;
2588         case KVM_TPR_ACCESS_REPORTING: {
2589                 struct kvm_tpr_access_ctl tac;
2590
2591                 r = -EFAULT;
2592                 if (copy_from_user(&tac, argp, sizeof tac))
2593                         goto out;
2594                 r = vcpu_ioctl_tpr_access_reporting(vcpu, &tac);
2595                 if (r)
2596                         goto out;
2597                 r = -EFAULT;
2598                 if (copy_to_user(argp, &tac, sizeof tac))
2599                         goto out;
2600                 r = 0;
2601                 break;
2602         };
2603         case KVM_SET_VAPIC_ADDR: {
2604                 struct kvm_vapic_addr va;
2605
2606                 r = -EINVAL;
2607                 if (!irqchip_in_kernel(vcpu->kvm))
2608                         goto out;
2609                 r = -EFAULT;
2610                 if (copy_from_user(&va, argp, sizeof va))
2611                         goto out;
2612                 r = 0;
2613                 kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr);
2614                 break;
2615         }
2616         case KVM_X86_SETUP_MCE: {
2617                 u64 mcg_cap;
2618
2619                 r = -EFAULT;
2620                 if (copy_from_user(&mcg_cap, argp, sizeof mcg_cap))
2621                         goto out;
2622                 r = kvm_vcpu_ioctl_x86_setup_mce(vcpu, mcg_cap);
2623                 break;
2624         }
2625         case KVM_X86_SET_MCE: {
2626                 struct kvm_x86_mce mce;
2627
2628                 r = -EFAULT;
2629                 if (copy_from_user(&mce, argp, sizeof mce))
2630                         goto out;
2631                 r = kvm_vcpu_ioctl_x86_set_mce(vcpu, &mce);
2632                 break;
2633         }
2634         case KVM_GET_VCPU_EVENTS: {
2635                 struct kvm_vcpu_events events;
2636
2637                 kvm_vcpu_ioctl_x86_get_vcpu_events(vcpu, &events);
2638
2639                 r = -EFAULT;
2640                 if (copy_to_user(argp, &events, sizeof(struct kvm_vcpu_events)))
2641                         break;
2642                 r = 0;
2643                 break;
2644         }
2645         case KVM_SET_VCPU_EVENTS: {
2646                 struct kvm_vcpu_events events;
2647
2648                 r = -EFAULT;
2649                 if (copy_from_user(&events, argp, sizeof(struct kvm_vcpu_events)))
2650                         break;
2651
2652                 r = kvm_vcpu_ioctl_x86_set_vcpu_events(vcpu, &events);
2653                 break;
2654         }
2655         case KVM_GET_DEBUGREGS: {
2656                 struct kvm_debugregs dbgregs;
2657
2658                 kvm_vcpu_ioctl_x86_get_debugregs(vcpu, &dbgregs);
2659
2660                 r = -EFAULT;
2661                 if (copy_to_user(argp, &dbgregs,
2662                                  sizeof(struct kvm_debugregs)))
2663                         break;
2664                 r = 0;
2665                 break;
2666         }
2667         case KVM_SET_DEBUGREGS: {
2668                 struct kvm_debugregs dbgregs;
2669
2670                 r = -EFAULT;
2671                 if (copy_from_user(&dbgregs, argp,
2672                                    sizeof(struct kvm_debugregs)))
2673                         break;
2674
2675                 r = kvm_vcpu_ioctl_x86_set_debugregs(vcpu, &dbgregs);
2676                 break;
2677         }
2678         case KVM_GET_XSAVE: {
2679                 u.xsave = kzalloc(sizeof(struct kvm_xsave), GFP_KERNEL);
2680                 r = -ENOMEM;
2681                 if (!u.xsave)
2682                         break;
2683
2684                 kvm_vcpu_ioctl_x86_get_xsave(vcpu, u.xsave);
2685
2686                 r = -EFAULT;
2687                 if (copy_to_user(argp, u.xsave, sizeof(struct kvm_xsave)))
2688                         break;
2689                 r = 0;
2690                 break;
2691         }
2692         case KVM_SET_XSAVE: {
2693                 u.xsave = kzalloc(sizeof(struct kvm_xsave), GFP_KERNEL);
2694                 r = -ENOMEM;
2695                 if (!u.xsave)
2696                         break;
2697
2698                 r = -EFAULT;
2699                 if (copy_from_user(u.xsave, argp, sizeof(struct kvm_xsave)))
2700                         break;
2701
2702                 r = kvm_vcpu_ioctl_x86_set_xsave(vcpu, u.xsave);
2703                 break;
2704         }
2705         case KVM_GET_XCRS: {
2706                 u.xcrs = kzalloc(sizeof(struct kvm_xcrs), GFP_KERNEL);
2707                 r = -ENOMEM;
2708                 if (!u.xcrs)
2709                         break;
2710
2711                 kvm_vcpu_ioctl_x86_get_xcrs(vcpu, u.xcrs);
2712
2713                 r = -EFAULT;
2714                 if (copy_to_user(argp, u.xcrs,
2715                                  sizeof(struct kvm_xcrs)))
2716                         break;
2717                 r = 0;
2718                 break;
2719         }
2720         case KVM_SET_XCRS: {
2721                 u.xcrs = kzalloc(sizeof(struct kvm_xcrs), GFP_KERNEL);
2722                 r = -ENOMEM;
2723                 if (!u.xcrs)
2724                         break;
2725
2726                 r = -EFAULT;
2727                 if (copy_from_user(u.xcrs, argp,
2728                                    sizeof(struct kvm_xcrs)))
2729                         break;
2730
2731                 r = kvm_vcpu_ioctl_x86_set_xcrs(vcpu, u.xcrs);
2732                 break;
2733         }
2734         default:
2735                 r = -EINVAL;
2736         }
2737 out:
2738         kfree(u.buffer);
2739         return r;
2740 }
2741
2742 static int kvm_vm_ioctl_set_tss_addr(struct kvm *kvm, unsigned long addr)
2743 {
2744         int ret;
2745
2746         if (addr > (unsigned int)(-3 * PAGE_SIZE))
2747                 return -1;
2748         ret = kvm_x86_ops->set_tss_addr(kvm, addr);
2749         return ret;
2750 }
2751
2752 static int kvm_vm_ioctl_set_identity_map_addr(struct kvm *kvm,
2753                                               u64 ident_addr)
2754 {
2755         kvm->arch.ept_identity_map_addr = ident_addr;
2756         return 0;
2757 }
2758
2759 static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm,
2760                                           u32 kvm_nr_mmu_pages)
2761 {
2762         if (kvm_nr_mmu_pages < KVM_MIN_ALLOC_MMU_PAGES)
2763                 return -EINVAL;
2764
2765         mutex_lock(&kvm->slots_lock);
2766         spin_lock(&kvm->mmu_lock);
2767
2768         kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages);
2769         kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages;
2770
2771         spin_unlock(&kvm->mmu_lock);
2772         mutex_unlock(&kvm->slots_lock);
2773         return 0;
2774 }
2775
2776 static int kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm)
2777 {
2778         return kvm->arch.n_max_mmu_pages;
2779 }
2780
2781 static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
2782 {
2783         int r;
2784
2785         r = 0;
2786         switch (chip->chip_id) {
2787         case KVM_IRQCHIP_PIC_MASTER:
2788                 memcpy(&chip->chip.pic,
2789                         &pic_irqchip(kvm)->pics[0],
2790                         sizeof(struct kvm_pic_state));
2791                 break;
2792         case KVM_IRQCHIP_PIC_SLAVE:
2793                 memcpy(&chip->chip.pic,
2794                         &pic_irqchip(kvm)->pics[1],
2795                         sizeof(struct kvm_pic_state));
2796                 break;
2797         case KVM_IRQCHIP_IOAPIC:
2798                 r = kvm_get_ioapic(kvm, &chip->chip.ioapic);
2799                 break;
2800         default:
2801                 r = -EINVAL;
2802                 break;
2803         }
2804         return r;
2805 }
2806
2807 static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
2808 {
2809         int r;
2810
2811         r = 0;
2812         switch (chip->chip_id) {
2813         case KVM_IRQCHIP_PIC_MASTER:
2814                 raw_spin_lock(&pic_irqchip(kvm)->lock);
2815                 memcpy(&pic_irqchip(kvm)->pics[0],
2816                         &chip->chip.pic,
2817                         sizeof(struct kvm_pic_state));
2818                 raw_spin_unlock(&pic_irqchip(kvm)->lock);
2819                 break;
2820         case KVM_IRQCHIP_PIC_SLAVE:
2821                 raw_spin_lock(&pic_irqchip(kvm)->lock);
2822                 memcpy(&pic_irqchip(kvm)->pics[1],
2823                         &chip->chip.pic,
2824                         sizeof(struct kvm_pic_state));
2825                 raw_spin_unlock(&pic_irqchip(kvm)->lock);
2826                 break;
2827         case KVM_IRQCHIP_IOAPIC:
2828                 r = kvm_set_ioapic(kvm, &chip->chip.ioapic);
2829                 break;
2830         default:
2831                 r = -EINVAL;
2832                 break;
2833         }
2834         kvm_pic_update_irq(pic_irqchip(kvm));
2835         return r;
2836 }
2837
2838 static int kvm_vm_ioctl_get_pit(struct kvm *kvm, struct kvm_pit_state *ps)
2839 {
2840         int r = 0;
2841
2842         mutex_lock(&kvm->arch.vpit->pit_state.lock);
2843         memcpy(ps, &kvm->arch.vpit->pit_state, sizeof(struct kvm_pit_state));
2844         mutex_unlock(&kvm->arch.vpit->pit_state.lock);
2845         return r;
2846 }
2847
2848 static int kvm_vm_ioctl_set_pit(struct kvm *kvm, struct kvm_pit_state *ps)
2849 {
2850         int r = 0;
2851
2852         mutex_lock(&kvm->arch.vpit->pit_state.lock);
2853         memcpy(&kvm->arch.vpit->pit_state, ps, sizeof(struct kvm_pit_state));
2854         kvm_pit_load_count(kvm, 0, ps->channels[0].count, 0);
2855         mutex_unlock(&kvm->arch.vpit->pit_state.lock);
2856         return r;
2857 }
2858
2859 static int kvm_vm_ioctl_get_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
2860 {
2861         int r = 0;
2862
2863         mutex_lock(&kvm->arch.vpit->pit_state.lock);
2864         memcpy(ps->channels, &kvm->arch.vpit->pit_state.channels,
2865                 sizeof(ps->channels));
2866         ps->flags = kvm->arch.vpit->pit_state.flags;
2867         mutex_unlock(&kvm->arch.vpit->pit_state.lock);
2868         return r;
2869 }
2870
2871 static int kvm_vm_ioctl_set_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
2872 {
2873         int r = 0, start = 0;
2874         u32 prev_legacy, cur_legacy;
2875         mutex_lock(&kvm->arch.vpit->pit_state.lock);
2876         prev_legacy = kvm->arch.vpit->pit_state.flags & KVM_PIT_FLAGS_HPET_LEGACY;
2877         cur_legacy = ps->flags & KVM_PIT_FLAGS_HPET_LEGACY;
2878         if (!prev_legacy && cur_legacy)
2879                 start = 1;
2880         memcpy(&kvm->arch.vpit->pit_state.channels, &ps->channels,
2881                sizeof(kvm->arch.vpit->pit_state.channels));
2882         kvm->arch.vpit->pit_state.flags = ps->flags;
2883         kvm_pit_load_count(kvm, 0, kvm->arch.vpit->pit_state.channels[0].count, start);
2884         mutex_unlock(&kvm->arch.vpit->pit_state.lock);
2885         return r;
2886 }
2887
2888 static int kvm_vm_ioctl_reinject(struct kvm *kvm,
2889                                  struct kvm_reinject_control *control)
2890 {
2891         if (!kvm->arch.vpit)
2892                 return -ENXIO;
2893         mutex_lock(&kvm->arch.vpit->pit_state.lock);
2894         kvm->arch.vpit->pit_state.pit_timer.reinject = control->pit_reinject;
2895         mutex_unlock(&kvm->arch.vpit->pit_state.lock);
2896         return 0;
2897 }
2898
2899 /*
2900  * Get (and clear) the dirty memory log for a memory slot.
2901  */
2902 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
2903                                       struct kvm_dirty_log *log)
2904 {
2905         int r, i;
2906         struct kvm_memory_slot *memslot;
2907         unsigned long n;
2908         unsigned long is_dirty = 0;
2909
2910         mutex_lock(&kvm->slots_lock);
2911
2912         r = -EINVAL;
2913         if (log->slot >= KVM_MEMORY_SLOTS)
2914                 goto out;
2915
2916         memslot = &kvm->memslots->memslots[log->slot];
2917         r = -ENOENT;
2918         if (!memslot->dirty_bitmap)
2919                 goto out;
2920
2921         n = kvm_dirty_bitmap_bytes(memslot);
2922
2923         for (i = 0; !is_dirty && i < n/sizeof(long); i++)
2924                 is_dirty = memslot->dirty_bitmap[i];
2925
2926         /* If nothing is dirty, don't bother messing with page tables. */
2927         if (is_dirty) {
2928                 struct kvm_memslots *slots, *old_slots;
2929                 unsigned long *dirty_bitmap;
2930
2931                 spin_lock(&kvm->mmu_lock);
2932                 kvm_mmu_slot_remove_write_access(kvm, log->slot);
2933                 spin_unlock(&kvm->mmu_lock);
2934
2935                 r = -ENOMEM;
2936                 dirty_bitmap = vmalloc(n);
2937                 if (!dirty_bitmap)
2938                         goto out;
2939                 memset(dirty_bitmap, 0, n);
2940
2941                 r = -ENOMEM;
2942                 slots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
2943                 if (!slots) {
2944                         vfree(dirty_bitmap);
2945                         goto out;
2946                 }
2947                 memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots));
2948                 slots->memslots[log->slot].dirty_bitmap = dirty_bitmap;
2949
2950                 old_slots = kvm->memslots;
2951                 rcu_assign_pointer(kvm->memslots, slots);
2952                 synchronize_srcu_expedited(&kvm->srcu);
2953                 dirty_bitmap = old_slots->memslots[log->slot].dirty_bitmap;
2954                 kfree(old_slots);
2955
2956                 r = -EFAULT;
2957                 if (copy_to_user(log->dirty_bitmap, dirty_bitmap, n)) {
2958                         vfree(dirty_bitmap);
2959                         goto out;
2960                 }
2961                 vfree(dirty_bitmap);
2962         } else {
2963                 r = -EFAULT;
2964                 if (clear_user(log->dirty_bitmap, n))
2965                         goto out;
2966         }
2967
2968         r = 0;
2969 out:
2970         mutex_unlock(&kvm->slots_lock);
2971         return r;
2972 }
2973
2974 long kvm_arch_vm_ioctl(struct file *filp,
2975                        unsigned int ioctl, unsigned long arg)
2976 {
2977         struct kvm *kvm = filp->private_data;
2978         void __user *argp = (void __user *)arg;
2979         int r = -ENOTTY;
2980         /*
2981          * This union makes it completely explicit to gcc-3.x
2982          * that these two variables' stack usage should be
2983          * combined, not added together.
2984          */
2985         union {
2986                 struct kvm_pit_state ps;
2987                 struct kvm_pit_state2 ps2;
2988                 struct kvm_pit_config pit_config;
2989         } u;
2990
2991         switch (ioctl) {
2992         case KVM_SET_TSS_ADDR:
2993                 r = kvm_vm_ioctl_set_tss_addr(kvm, arg);
2994                 if (r < 0)
2995                         goto out;
2996                 break;
2997         case KVM_SET_IDENTITY_MAP_ADDR: {
2998                 u64 ident_addr;
2999
3000                 r = -EFAULT;
3001                 if (copy_from_user(&ident_addr, argp, sizeof ident_addr))
3002                         goto out;
3003                 r = kvm_vm_ioctl_set_identity_map_addr(kvm, ident_addr);
3004                 if (r < 0)
3005                         goto out;
3006                 break;
3007         }
3008         case KVM_SET_NR_MMU_PAGES:
3009                 r = kvm_vm_ioctl_set_nr_mmu_pages(kvm, arg);
3010                 if (r)
3011                         goto out;
3012                 break;
3013         case KVM_GET_NR_MMU_PAGES:
3014                 r = kvm_vm_ioctl_get_nr_mmu_pages(kvm);
3015                 break;
3016         case KVM_CREATE_IRQCHIP: {
3017                 struct kvm_pic *vpic;
3018
3019                 mutex_lock(&kvm->lock);
3020                 r = -EEXIST;
3021                 if (kvm->arch.vpic)
3022                         goto create_irqchip_unlock;
3023                 r = -ENOMEM;
3024                 vpic = kvm_create_pic(kvm);
3025                 if (vpic) {
3026                         r = kvm_ioapic_init(kvm);
3027                         if (r) {
3028                                 kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS,
3029                                                           &vpic->dev);
3030                                 kfree(vpic);
3031                                 goto create_irqchip_unlock;
3032                         }
3033                 } else
3034                         goto create_irqchip_unlock;
3035                 smp_wmb();
3036                 kvm->arch.vpic = vpic;
3037                 smp_wmb();
3038                 r = kvm_setup_default_irq_routing(kvm);
3039                 if (r) {
3040                         mutex_lock(&kvm->irq_lock);
3041                         kvm_ioapic_destroy(kvm);
3042                         kvm_destroy_pic(kvm);
3043                         mutex_unlock(&kvm->irq_lock);
3044                 }
3045         create_irqchip_unlock:
3046                 mutex_unlock(&kvm->lock);
3047                 break;
3048         }
3049         case KVM_CREATE_PIT:
3050                 u.pit_config.flags = KVM_PIT_SPEAKER_DUMMY;
3051                 goto create_pit;
3052         case KVM_CREATE_PIT2:
3053                 r = -EFAULT;
3054                 if (copy_from_user(&u.pit_config, argp,
3055                                    sizeof(struct kvm_pit_config)))
3056                         goto out;
3057         create_pit:
3058                 mutex_lock(&kvm->slots_lock);
3059                 r = -EEXIST;
3060                 if (kvm->arch.vpit)
3061                         goto create_pit_unlock;
3062                 r = -ENOMEM;
3063                 kvm->arch.vpit = kvm_create_pit(kvm, u.pit_config.flags);
3064                 if (kvm->arch.vpit)
3065                         r = 0;
3066         create_pit_unlock:
3067                 mutex_unlock(&kvm->slots_lock);
3068                 break;
3069         case KVM_IRQ_LINE_STATUS:
3070         case KVM_IRQ_LINE: {
3071                 struct kvm_irq_level irq_event;
3072
3073                 r = -EFAULT;
3074                 if (copy_from_user(&irq_event, argp, sizeof irq_event))
3075                         goto out;
3076                 r = -ENXIO;
3077                 if (irqchip_in_kernel(kvm)) {
3078                         __s32 status;
3079                         status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
3080                                         irq_event.irq, irq_event.level);
3081                         if (ioctl == KVM_IRQ_LINE_STATUS) {
3082                                 r = -EFAULT;
3083                                 irq_event.status = status;
3084                                 if (copy_to_user(argp, &irq_event,
3085                                                         sizeof irq_event))
3086                                         goto out;
3087                         }
3088                         r = 0;
3089                 }
3090                 break;
3091         }
3092         case KVM_GET_IRQCHIP: {
3093                 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
3094                 struct kvm_irqchip *chip = kmalloc(sizeof(*chip), GFP_KERNEL);
3095
3096                 r = -ENOMEM;
3097                 if (!chip)
3098                         goto out;
3099                 r = -EFAULT;
3100                 if (copy_from_user(chip, argp, sizeof *chip))
3101                         goto get_irqchip_out;
3102                 r = -ENXIO;
3103                 if (!irqchip_in_kernel(kvm))
3104                         goto get_irqchip_out;
3105                 r = kvm_vm_ioctl_get_irqchip(kvm, chip);
3106                 if (r)
3107                         goto get_irqchip_out;
3108                 r = -EFAULT;
3109                 if (copy_to_user(argp, chip, sizeof *chip))
3110                         goto get_irqchip_out;
3111                 r = 0;
3112         get_irqchip_out:
3113                 kfree(chip);
3114                 if (r)
3115                         goto out;
3116                 break;
3117         }
3118         case KVM_SET_IRQCHIP: {
3119                 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
3120                 struct kvm_irqchip *chip = kmalloc(sizeof(*chip), GFP_KERNEL);
3121
3122                 r = -ENOMEM;
3123                 if (!chip)
3124                         goto out;
3125                 r = -EFAULT;
3126                 if (copy_from_user(chip, argp, sizeof *chip))
3127                         goto set_irqchip_out;
3128                 r = -ENXIO;
3129                 if (!irqchip_in_kernel(kvm))
3130                         goto set_irqchip_out;
3131                 r = kvm_vm_ioctl_set_irqchip(kvm, chip);
3132                 if (r)
3133                         goto set_irqchip_out;
3134                 r = 0;
3135         set_irqchip_out:
3136                 kfree(chip);
3137                 if (r)
3138                         goto out;
3139                 break;
3140         }
3141         case KVM_GET_PIT: {
3142                 r = -EFAULT;
3143                 if (copy_from_user(&u.ps, argp, sizeof(struct kvm_pit_state)))
3144                         goto out;
3145                 r = -ENXIO;
3146                 if (!kvm->arch.vpit)
3147                         goto out;
3148                 r = kvm_vm_ioctl_get_pit(kvm, &u.ps);
3149                 if (r)
3150                         goto out;
3151                 r = -EFAULT;
3152                 if (copy_to_user(argp, &u.ps, sizeof(struct kvm_pit_state)))
3153                         goto out;
3154                 r = 0;
3155                 break;
3156         }
3157         case KVM_SET_PIT: {
3158                 r = -EFAULT;
3159                 if (copy_from_user(&u.ps, argp, sizeof u.ps))
3160                         goto out;
3161                 r = -ENXIO;
3162                 if (!kvm->arch.vpit)
3163                         goto out;
3164                 r = kvm_vm_ioctl_set_pit(kvm, &u.ps);
3165                 if (r)
3166                         goto out;
3167                 r = 0;
3168                 break;
3169         }
3170         case KVM_GET_PIT2: {
3171                 r = -ENXIO;
3172                 if (!kvm->arch.vpit)
3173                         goto out;
3174                 r = kvm_vm_ioctl_get_pit2(kvm, &u.ps2);
3175                 if (r)
3176                         goto out;
3177                 r = -EFAULT;
3178                 if (copy_to_user(argp, &u.ps2, sizeof(u.ps2)))
3179                         goto out;
3180                 r = 0;
3181                 break;
3182         }
3183         case KVM_SET_PIT2: {
3184                 r = -EFAULT;
3185                 if (copy_from_user(&u.ps2, argp, sizeof(u.ps2)))
3186                         goto out;
3187                 r = -ENXIO;
3188                 if (!kvm->arch.vpit)
3189                         goto out;
3190                 r = kvm_vm_ioctl_set_pit2(kvm, &u.ps2);
3191                 if (r)
3192                         goto out;
3193                 r = 0;
3194                 break;
3195         }
3196         case KVM_REINJECT_CONTROL: {
3197                 struct kvm_reinject_control control;
3198                 r =  -EFAULT;
3199                 if (copy_from_user(&control, argp, sizeof(control)))
3200                         goto out;
3201                 r = kvm_vm_ioctl_reinject(kvm, &control);
3202                 if (r)
3203                         goto out;
3204                 r = 0;
3205                 break;
3206         }
3207         case KVM_XEN_HVM_CONFIG: {
3208                 r = -EFAULT;
3209                 if (copy_from_user(&kvm->arch.xen_hvm_config, argp,
3210                                    sizeof(struct kvm_xen_hvm_config)))
3211                         goto out;
3212                 r = -EINVAL;
3213                 if (kvm->arch.xen_hvm_config.flags)
3214                         goto out;
3215                 r = 0;
3216                 break;
3217         }
3218         case KVM_SET_CLOCK: {
3219                 struct timespec now;
3220                 struct kvm_clock_data user_ns;
3221                 u64 now_ns;
3222                 s64 delta;
3223
3224                 r = -EFAULT;
3225                 if (copy_from_user(&user_ns, argp, sizeof(user_ns)))
3226                         goto out;
3227
3228                 r = -EINVAL;
3229                 if (user_ns.flags)
3230                         goto out;
3231
3232                 r = 0;
3233                 ktime_get_ts(&now);
3234                 now_ns = timespec_to_ns(&now);
3235                 delta = user_ns.clock - now_ns;
3236                 kvm->arch.kvmclock_offset = delta;
3237                 break;
3238         }
3239         case KVM_GET_CLOCK: {
3240                 struct timespec now;
3241                 struct kvm_clock_data user_ns;
3242                 u64 now_ns;
3243
3244                 ktime_get_ts(&now);
3245                 now_ns = timespec_to_ns(&now);
3246                 user_ns.clock = kvm->arch.kvmclock_offset + now_ns;
3247                 user_ns.flags = 0;
3248
3249                 r = -EFAULT;
3250                 if (copy_to_user(argp, &user_ns, sizeof(user_ns)))
3251                         goto out;
3252                 r = 0;
3253                 break;
3254         }
3255
3256         default:
3257                 ;
3258         }
3259 out:
3260         return r;
3261 }
3262
3263 static void kvm_init_msr_list(void)
3264 {
3265         u32 dummy[2];
3266         unsigned i, j;
3267
3268         /* skip the first msrs in the list. KVM-specific */
3269         for (i = j = KVM_SAVE_MSRS_BEGIN; i < ARRAY_SIZE(msrs_to_save); i++) {
3270                 if (rdmsr_safe(msrs_to_save[i], &dummy[0], &dummy[1]) < 0)
3271                         continue;
3272                 if (j < i)
3273                         msrs_to_save[j] = msrs_to_save[i];
3274                 j++;
3275         }
3276         num_msrs_to_save = j;
3277 }
3278
3279 static int vcpu_mmio_write(struct kvm_vcpu *vcpu, gpa_t addr, int len,
3280                            const void *v)
3281 {
3282         if (vcpu->arch.apic &&
3283             !kvm_iodevice_write(&vcpu->arch.apic->dev, addr, len, v))
3284                 return 0;
3285
3286         return kvm_io_bus_write(vcpu->kvm, KVM_MMIO_BUS, addr, len, v);
3287 }
3288
3289 static int vcpu_mmio_read(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *v)
3290 {
3291         if (vcpu->arch.apic &&
3292             !kvm_iodevice_read(&vcpu->arch.apic->dev, addr, len, v))
3293                 return 0;
3294
3295         return kvm_io_bus_read(vcpu->kvm, KVM_MMIO_BUS, addr, len, v);
3296 }
3297
3298 static void kvm_set_segment(struct kvm_vcpu *vcpu,
3299                         struct kvm_segment *var, int seg)
3300 {
3301         kvm_x86_ops->set_segment(vcpu, var, seg);
3302 }
3303
3304 void kvm_get_segment(struct kvm_vcpu *vcpu,
3305                      struct kvm_segment *var, int seg)
3306 {
3307         kvm_x86_ops->get_segment(vcpu, var, seg);
3308 }
3309
3310 gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, u32 *error)
3311 {
3312         u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
3313         return vcpu->arch.mmu.gva_to_gpa(vcpu, gva, access, error);
3314 }
3315
3316  gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva, u32 *error)
3317 {
3318         u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
3319         access |= PFERR_FETCH_MASK;
3320         return vcpu->arch.mmu.gva_to_gpa(vcpu, gva, access, error);
3321 }
3322
3323 gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva, u32 *error)
3324 {
3325         u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
3326         access |= PFERR_WRITE_MASK;
3327         return vcpu->arch.mmu.gva_to_gpa(vcpu, gva, access, error);
3328 }
3329
3330 /* uses this to access any guest's mapped memory without checking CPL */
3331 gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva, u32 *error)
3332 {
3333         return vcpu->arch.mmu.gva_to_gpa(vcpu, gva, 0, error);
3334 }
3335
3336 static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes,
3337                                       struct kvm_vcpu *vcpu, u32 access,
3338                                       u32 *error)
3339 {
3340         void *data = val;
3341         int r = X86EMUL_CONTINUE;
3342
3343         while (bytes) {
3344                 gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr, access, error);
3345                 unsigned offset = addr & (PAGE_SIZE-1);
3346                 unsigned toread = min(bytes, (unsigned)PAGE_SIZE - offset);
3347                 int ret;
3348
3349                 if (gpa == UNMAPPED_GVA) {
3350                         r = X86EMUL_PROPAGATE_FAULT;
3351                         goto out;
3352                 }
3353                 ret = kvm_read_guest(vcpu->kvm, gpa, data, toread);
3354                 if (ret < 0) {
3355                         r = X86EMUL_IO_NEEDED;
3356                         goto out;
3357                 }
3358
3359                 bytes -= toread;
3360                 data += toread;
3361                 addr += toread;
3362         }
3363 out:
3364         return r;
3365 }
3366
3367 /* used for instruction fetching */
3368 static int kvm_fetch_guest_virt(gva_t addr, void *val, unsigned int bytes,
3369                                 struct kvm_vcpu *vcpu, u32 *error)
3370 {
3371         u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
3372         return kvm_read_guest_virt_helper(addr, val, bytes, vcpu,
3373                                           access | PFERR_FETCH_MASK, error);
3374 }
3375
3376 static int kvm_read_guest_virt(gva_t addr, void *val, unsigned int bytes,
3377                                struct kvm_vcpu *vcpu, u32 *error)
3378 {
3379         u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
3380         return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access,
3381                                           error);
3382 }
3383
3384 static int kvm_read_guest_virt_system(gva_t addr, void *val, unsigned int bytes,
3385                                struct kvm_vcpu *vcpu, u32 *error)
3386 {
3387         return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, 0, error);
3388 }
3389
3390 static int kvm_write_guest_virt_system(gva_t addr, void *val,
3391                                        unsigned int bytes,
3392                                        struct kvm_vcpu *vcpu,
3393                                        u32 *error)
3394 {
3395         void *data = val;
3396         int r = X86EMUL_CONTINUE;
3397
3398         while (bytes) {
3399                 gpa_t gpa =  vcpu->arch.mmu.gva_to_gpa(vcpu, addr,
3400                                                        PFERR_WRITE_MASK, error);
3401                 unsigned offset = addr & (PAGE_SIZE-1);
3402                 unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset);
3403                 int ret;
3404
3405                 if (gpa == UNMAPPED_GVA) {
3406                         r = X86EMUL_PROPAGATE_FAULT;
3407                         goto out;
3408                 }
3409                 ret = kvm_write_guest(vcpu->kvm, gpa, data, towrite);
3410                 if (ret < 0) {
3411                         r = X86EMUL_IO_NEEDED;
3412                         goto out;
3413                 }
3414
3415                 bytes -= towrite;
3416                 data += towrite;
3417                 addr += towrite;
3418         }
3419 out:
3420         return r;
3421 }
3422
3423 static int emulator_read_emulated(unsigned long addr,
3424                                   void *val,
3425                                   unsigned int bytes,
3426                                   unsigned int *error_code,
3427                                   struct kvm_vcpu *vcpu)
3428 {
3429         gpa_t                 gpa;
3430
3431         if (vcpu->mmio_read_completed) {
3432                 memcpy(val, vcpu->mmio_data, bytes);
3433                 trace_kvm_mmio(KVM_TRACE_MMIO_READ, bytes,
3434                                vcpu->mmio_phys_addr, *(u64 *)val);
3435                 vcpu->mmio_read_completed = 0;
3436                 return X86EMUL_CONTINUE;
3437         }
3438
3439         gpa = kvm_mmu_gva_to_gpa_read(vcpu, addr, error_code);
3440
3441         if (gpa == UNMAPPED_GVA)
3442                 return X86EMUL_PROPAGATE_FAULT;
3443
3444         /* For APIC access vmexit */
3445         if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
3446                 goto mmio;
3447
3448         if (kvm_read_guest_virt(addr, val, bytes, vcpu, NULL)
3449                                 == X86EMUL_CONTINUE)
3450                 return X86EMUL_CONTINUE;
3451
3452 mmio:
3453         /*
3454          * Is this MMIO handled locally?
3455          */
3456         if (!vcpu_mmio_read(vcpu, gpa, bytes, val)) {
3457                 trace_kvm_mmio(KVM_TRACE_MMIO_READ, bytes, gpa, *(u64 *)val);
3458                 return X86EMUL_CONTINUE;
3459         }
3460
3461         trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, bytes, gpa, 0);
3462
3463         vcpu->mmio_needed = 1;
3464         vcpu->run->exit_reason = KVM_EXIT_MMIO;
3465         vcpu->run->mmio.phys_addr = vcpu->mmio_phys_addr = gpa;
3466         vcpu->run->mmio.len = vcpu->mmio_size = bytes;
3467         vcpu->run->mmio.is_write = vcpu->mmio_is_write = 0;
3468
3469         return X86EMUL_IO_NEEDED;
3470 }
3471
3472 int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
3473                           const void *val, int bytes)
3474 {
3475         int ret;
3476
3477         ret = kvm_write_guest(vcpu->kvm, gpa, val, bytes);
3478         if (ret < 0)
3479                 return 0;
3480         kvm_mmu_pte_write(vcpu, gpa, val, bytes, 1);
3481         return 1;
3482 }
3483
3484 static int emulator_write_emulated_onepage(unsigned long addr,
3485                                            const void *val,
3486                                            unsigned int bytes,
3487                                            unsigned int *error_code,
3488                                            struct kvm_vcpu *vcpu)
3489 {
3490         gpa_t                 gpa;
3491
3492         gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, error_code);
3493
3494         if (gpa == UNMAPPED_GVA)
3495                 return X86EMUL_PROPAGATE_FAULT;
3496
3497         /* For APIC access vmexit */
3498         if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
3499                 goto mmio;
3500
3501         if (emulator_write_phys(vcpu, gpa, val, bytes))
3502                 return X86EMUL_CONTINUE;
3503
3504 mmio:
3505         trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, bytes, gpa, *(u64 *)val);
3506         /*
3507          * Is this MMIO handled locally?
3508          */
3509         if (!vcpu_mmio_write(vcpu, gpa, bytes, val))
3510                 return X86EMUL_CONTINUE;
3511
3512         vcpu->mmio_needed = 1;
3513         vcpu->run->exit_reason = KVM_EXIT_MMIO;
3514         vcpu->run->mmio.phys_addr = vcpu->mmio_phys_addr = gpa;
3515         vcpu->run->mmio.len = vcpu->mmio_size = bytes;
3516         vcpu->run->mmio.is_write = vcpu->mmio_is_write = 1;
3517         memcpy(vcpu->run->mmio.data, val, bytes);
3518
3519         return X86EMUL_CONTINUE;
3520 }
3521
3522 int emulator_write_emulated(unsigned long addr,
3523                             const void *val,
3524                             unsigned int bytes,
3525                             unsigned int *error_code,
3526                             struct kvm_vcpu *vcpu)
3527 {
3528         /* Crossing a page boundary? */
3529         if (((addr + bytes - 1) ^ addr) & PAGE_MASK) {
3530                 int rc, now;
3531
3532                 now = -addr & ~PAGE_MASK;
3533                 rc = emulator_write_emulated_onepage(addr, val, now, error_code,
3534                                                      vcpu);
3535                 if (rc != X86EMUL_CONTINUE)
3536                         return rc;
3537                 addr += now;
3538                 val += now;
3539                 bytes -= now;
3540         }
3541         return emulator_write_emulated_onepage(addr, val, bytes, error_code,
3542                                                vcpu);
3543 }
3544
3545 #define CMPXCHG_TYPE(t, ptr, old, new) \
3546         (cmpxchg((t *)(ptr), *(t *)(old), *(t *)(new)) == *(t *)(old))
3547
3548 #ifdef CONFIG_X86_64
3549 #  define CMPXCHG64(ptr, old, new) CMPXCHG_TYPE(u64, ptr, old, new)
3550 #else
3551 #  define CMPXCHG64(ptr, old, new) \
3552         (cmpxchg64((u64 *)(ptr), *(u64 *)(old), *(u64 *)(new)) == *(u64 *)(old))
3553 #endif
3554
3555 static int emulator_cmpxchg_emulated(unsigned long addr,
3556                                      const void *old,
3557                                      const void *new,
3558                                      unsigned int bytes,
3559                                      unsigned int *error_code,
3560                                      struct kvm_vcpu *vcpu)
3561 {
3562         gpa_t gpa;
3563         struct page *page;
3564         char *kaddr;
3565         bool exchanged;
3566
3567         /* guests cmpxchg8b have to be emulated atomically */
3568         if (bytes > 8 || (bytes & (bytes - 1)))
3569                 goto emul_write;
3570
3571         gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, NULL);
3572
3573         if (gpa == UNMAPPED_GVA ||
3574             (gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
3575                 goto emul_write;
3576
3577         if (((gpa + bytes - 1) & PAGE_MASK) != (gpa & PAGE_MASK))
3578                 goto emul_write;
3579
3580         page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
3581         if (is_error_page(page)) {
3582                 kvm_release_page_clean(page);
3583                 goto emul_write;
3584         }
3585
3586         kaddr = kmap_atomic(page, KM_USER0);
3587         kaddr += offset_in_page(gpa);
3588         switch (bytes) {
3589         case 1:
3590                 exchanged = CMPXCHG_TYPE(u8, kaddr, old, new);
3591                 break;
3592         case 2:
3593                 exchanged = CMPXCHG_TYPE(u16, kaddr, old, new);
3594                 break;
3595         case 4:
3596                 exchanged = CMPXCHG_TYPE(u32, kaddr, old, new);
3597                 break;
3598         case 8:
3599                 exchanged = CMPXCHG64(kaddr, old, new);
3600                 break;
3601         default:
3602                 BUG();
3603         }
3604         kunmap_atomic(kaddr, KM_USER0);
3605         kvm_release_page_dirty(page);
3606
3607         if (!exchanged)
3608                 return X86EMUL_CMPXCHG_FAILED;
3609
3610         kvm_mmu_pte_write(vcpu, gpa, new, bytes, 1);
3611
3612         return X86EMUL_CONTINUE;
3613
3614 emul_write:
3615         printk_once(KERN_WARNING "kvm: emulating exchange as write\n");
3616
3617         return emulator_write_emulated(addr, new, bytes, error_code, vcpu);
3618 }
3619
3620 static int kernel_pio(struct kvm_vcpu *vcpu, void *pd)
3621 {
3622         /* TODO: String I/O for in kernel device */
3623         int r;
3624
3625         if (vcpu->arch.pio.in)
3626                 r = kvm_io_bus_read(vcpu->kvm, KVM_PIO_BUS, vcpu->arch.pio.port,
3627                                     vcpu->arch.pio.size, pd);
3628         else
3629                 r = kvm_io_bus_write(vcpu->kvm, KVM_PIO_BUS,
3630                                      vcpu->arch.pio.port, vcpu->arch.pio.size,
3631                                      pd);
3632         return r;
3633 }
3634
3635
3636 static int emulator_pio_in_emulated(int size, unsigned short port, void *val,
3637                              unsigned int count, struct kvm_vcpu *vcpu)
3638 {
3639         if (vcpu->arch.pio.count)
3640                 goto data_avail;
3641
3642         trace_kvm_pio(1, port, size, 1);
3643
3644         vcpu->arch.pio.port = port;
3645         vcpu->arch.pio.in = 1;
3646         vcpu->arch.pio.count  = count;
3647         vcpu->arch.pio.size = size;
3648
3649         if (!kernel_pio(vcpu, vcpu->arch.pio_data)) {
3650         data_avail:
3651                 memcpy(val, vcpu->arch.pio_data, size * count);
3652                 vcpu->arch.pio.count = 0;
3653                 return 1;
3654         }
3655
3656         vcpu->run->exit_reason = KVM_EXIT_IO;
3657         vcpu->run->io.direction = KVM_EXIT_IO_IN;
3658         vcpu->run->io.size = size;
3659         vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
3660         vcpu->run->io.count = count;
3661         vcpu->run->io.port = port;
3662
3663         return 0;
3664 }
3665
3666 static int emulator_pio_out_emulated(int size, unsigned short port,
3667                               const void *val, unsigned int count,
3668                               struct kvm_vcpu *vcpu)
3669 {
3670         trace_kvm_pio(0, port, size, 1);
3671
3672         vcpu->arch.pio.port = port;
3673         vcpu->arch.pio.in = 0;
3674         vcpu->arch.pio.count = count;
3675         vcpu->arch.pio.size = size;
3676
3677         memcpy(vcpu->arch.pio_data, val, size * count);
3678
3679         if (!kernel_pio(vcpu, vcpu->arch.pio_data)) {
3680                 vcpu->arch.pio.count = 0;
3681                 return 1;
3682         }
3683
3684         vcpu->run->exit_reason = KVM_EXIT_IO;
3685         vcpu->run->io.direction = KVM_EXIT_IO_OUT;
3686         vcpu->run->io.size = size;
3687         vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
3688         vcpu->run->io.count = count;
3689         vcpu->run->io.port = port;
3690
3691         return 0;
3692 }
3693
3694 static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg)
3695 {
3696         return kvm_x86_ops->get_segment_base(vcpu, seg);
3697 }
3698
3699 int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address)
3700 {
3701         kvm_mmu_invlpg(vcpu, address);
3702         return X86EMUL_CONTINUE;
3703 }
3704
3705 int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu)
3706 {
3707         if (!need_emulate_wbinvd(vcpu))
3708                 return X86EMUL_CONTINUE;
3709
3710         if (kvm_x86_ops->has_wbinvd_exit()) {
3711                 smp_call_function_many(vcpu->arch.wbinvd_dirty_mask,
3712                                 wbinvd_ipi, NULL, 1);
3713                 cpumask_clear(vcpu->arch.wbinvd_dirty_mask);
3714         }
3715         wbinvd();
3716         return X86EMUL_CONTINUE;
3717 }
3718 EXPORT_SYMBOL_GPL(kvm_emulate_wbinvd);
3719
3720 int emulate_clts(struct kvm_vcpu *vcpu)
3721 {
3722         kvm_x86_ops->set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~X86_CR0_TS));
3723         kvm_x86_ops->fpu_activate(vcpu);
3724         return X86EMUL_CONTINUE;
3725 }
3726
3727 int emulator_get_dr(int dr, unsigned long *dest, struct kvm_vcpu *vcpu)
3728 {
3729         return _kvm_get_dr(vcpu, dr, dest);
3730 }
3731
3732 int emulator_set_dr(int dr, unsigned long value, struct kvm_vcpu *vcpu)
3733 {
3734
3735         return __kvm_set_dr(vcpu, dr, value);
3736 }
3737
3738 static u64 mk_cr_64(u64 curr_cr, u32 new_val)
3739 {
3740         return (curr_cr & ~((1ULL << 32) - 1)) | new_val;
3741 }
3742
3743 static unsigned long emulator_get_cr(int cr, struct kvm_vcpu *vcpu)
3744 {
3745         unsigned long value;
3746
3747         switch (cr) {
3748         case 0:
3749                 value = kvm_read_cr0(vcpu);
3750                 break;
3751         case 2:
3752                 value = vcpu->arch.cr2;
3753                 break;
3754         case 3:
3755                 value = vcpu->arch.cr3;
3756                 break;
3757         case 4:
3758                 value = kvm_read_cr4(vcpu);
3759                 break;
3760         case 8:
3761                 value = kvm_get_cr8(vcpu);
3762                 break;
3763         default:
3764                 vcpu_printf(vcpu, "%s: unexpected cr %u\n", __func__, cr);
3765                 return 0;
3766         }
3767
3768         return value;
3769 }
3770
3771 static int emulator_set_cr(int cr, unsigned long val, struct kvm_vcpu *vcpu)
3772 {
3773         int res = 0;
3774
3775         switch (cr) {
3776         case 0:
3777                 res = kvm_set_cr0(vcpu, mk_cr_64(kvm_read_cr0(vcpu), val));
3778                 break;
3779         case 2:
3780                 vcpu->arch.cr2 = val;
3781                 break;
3782         case 3:
3783                 res = kvm_set_cr3(vcpu, val);
3784                 break;
3785         case 4:
3786                 res = kvm_set_cr4(vcpu, mk_cr_64(kvm_read_cr4(vcpu), val));
3787                 break;
3788         case 8:
3789                 res = __kvm_set_cr8(vcpu, val & 0xfUL);
3790                 break;
3791         default:
3792                 vcpu_printf(vcpu, "%s: unexpected cr %u\n", __func__, cr);
3793                 res = -1;
3794         }
3795
3796         return res;
3797 }
3798
3799 static int emulator_get_cpl(struct kvm_vcpu *vcpu)
3800 {
3801         return kvm_x86_ops->get_cpl(vcpu);
3802 }
3803
3804 static void emulator_get_gdt(struct desc_ptr *dt, struct kvm_vcpu *vcpu)
3805 {
3806         kvm_x86_ops->get_gdt(vcpu, dt);
3807 }
3808
3809 static void emulator_get_idt(struct desc_ptr *dt, struct kvm_vcpu *vcpu)
3810 {
3811         kvm_x86_ops->get_idt(vcpu, dt);
3812 }
3813
3814 static unsigned long emulator_get_cached_segment_base(int seg,
3815                                                       struct kvm_vcpu *vcpu)
3816 {
3817         return get_segment_base(vcpu, seg);
3818 }
3819
3820 static bool emulator_get_cached_descriptor(struct desc_struct *desc, int seg,
3821                                            struct kvm_vcpu *vcpu)
3822 {
3823         struct kvm_segment var;
3824
3825         kvm_get_segment(vcpu, &var, seg);
3826
3827         if (var.unusable)
3828                 return false;
3829
3830         if (var.g)
3831                 var.limit >>= 12;
3832         set_desc_limit(desc, var.limit);
3833         set_desc_base(desc, (unsigned long)var.base);
3834         desc->type = var.type;
3835         desc->s = var.s;
3836         desc->dpl = var.dpl;
3837         desc->p = var.present;
3838         desc->avl = var.avl;
3839         desc->l = var.l;
3840         desc->d = var.db;
3841         desc->g = var.g;
3842
3843         return true;
3844 }
3845
3846 static void emulator_set_cached_descriptor(struct desc_struct *desc, int seg,
3847                                            struct kvm_vcpu *vcpu)
3848 {
3849         struct kvm_segment var;
3850
3851         /* needed to preserve selector */
3852         kvm_get_segment(vcpu, &var, seg);
3853
3854         var.base = get_desc_base(desc);
3855         var.limit = get_desc_limit(desc);
3856         if (desc->g)
3857                 var.limit = (var.limit << 12) | 0xfff;
3858         var.type = desc->type;
3859         var.present = desc->p;
3860         var.dpl = desc->dpl;
3861         var.db = desc->d;
3862         var.s = desc->s;
3863         var.l = desc->l;
3864         var.g = desc->g;
3865         var.avl = desc->avl;
3866         var.present = desc->p;
3867         var.unusable = !var.present;
3868         var.padding = 0;
3869
3870         kvm_set_segment(vcpu, &var, seg);
3871         return;
3872 }
3873
3874 static u16 emulator_get_segment_selector(int seg, struct kvm_vcpu *vcpu)
3875 {
3876         struct kvm_segment kvm_seg;
3877
3878         kvm_get_segment(vcpu, &kvm_seg, seg);
3879         return kvm_seg.selector;
3880 }
3881
3882 static void emulator_set_segment_selector(u16 sel, int seg,
3883                                           struct kvm_vcpu *vcpu)
3884 {
3885         struct kvm_segment kvm_seg;
3886
3887         kvm_get_segment(vcpu, &kvm_seg, seg);
3888         kvm_seg.selector = sel;
3889         kvm_set_segment(vcpu, &kvm_seg, seg);
3890 }
3891
3892 static struct x86_emulate_ops emulate_ops = {
3893         .read_std            = kvm_read_guest_virt_system,
3894         .write_std           = kvm_write_guest_virt_system,
3895         .fetch               = kvm_fetch_guest_virt,
3896         .read_emulated       = emulator_read_emulated,
3897         .write_emulated      = emulator_write_emulated,
3898         .cmpxchg_emulated    = emulator_cmpxchg_emulated,
3899         .pio_in_emulated     = emulator_pio_in_emulated,
3900         .pio_out_emulated    = emulator_pio_out_emulated,
3901         .get_cached_descriptor = emulator_get_cached_descriptor,
3902         .set_cached_descriptor = emulator_set_cached_descriptor,
3903         .get_segment_selector = emulator_get_segment_selector,
3904         .set_segment_selector = emulator_set_segment_selector,
3905         .get_cached_segment_base = emulator_get_cached_segment_base,
3906         .get_gdt             = emulator_get_gdt,
3907         .get_idt             = emulator_get_idt,
3908         .get_cr              = emulator_get_cr,
3909         .set_cr              = emulator_set_cr,
3910         .cpl                 = emulator_get_cpl,
3911         .get_dr              = emulator_get_dr,
3912         .set_dr              = emulator_set_dr,
3913         .set_msr             = kvm_set_msr,
3914         .get_msr             = kvm_get_msr,
3915 };
3916
3917 static void cache_all_regs(struct kvm_vcpu *vcpu)
3918 {
3919         kvm_register_read(vcpu, VCPU_REGS_RAX);
3920         kvm_register_read(vcpu, VCPU_REGS_RSP);
3921         kvm_register_read(vcpu, VCPU_REGS_RIP);
3922         vcpu->arch.regs_dirty = ~0;
3923 }
3924
3925 static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask)
3926 {
3927         u32 int_shadow = kvm_x86_ops->get_interrupt_shadow(vcpu, mask);
3928         /*
3929          * an sti; sti; sequence only disable interrupts for the first
3930          * instruction. So, if the last instruction, be it emulated or
3931          * not, left the system with the INT_STI flag enabled, it
3932          * means that the last instruction is an sti. We should not
3933          * leave the flag on in this case. The same goes for mov ss
3934          */
3935         if (!(int_shadow & mask))
3936                 kvm_x86_ops->set_interrupt_shadow(vcpu, mask);
3937 }
3938
3939 static void inject_emulated_exception(struct kvm_vcpu *vcpu)
3940 {
3941         struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
3942         if (ctxt->exception == PF_VECTOR)
3943                 kvm_inject_page_fault(vcpu, ctxt->cr2, ctxt->error_code);
3944         else if (ctxt->error_code_valid)
3945                 kvm_queue_exception_e(vcpu, ctxt->exception, ctxt->error_code);
3946         else
3947                 kvm_queue_exception(vcpu, ctxt->exception);
3948 }
3949
3950 static void init_emulate_ctxt(struct kvm_vcpu *vcpu)
3951 {
3952         struct decode_cache *c = &vcpu->arch.emulate_ctxt.decode;
3953         int cs_db, cs_l;
3954
3955         cache_all_regs(vcpu);
3956
3957         kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
3958
3959         vcpu->arch.emulate_ctxt.vcpu = vcpu;
3960         vcpu->arch.emulate_ctxt.eflags = kvm_x86_ops->get_rflags(vcpu);
3961         vcpu->arch.emulate_ctxt.eip = kvm_rip_read(vcpu);
3962         vcpu->arch.emulate_ctxt.mode =
3963                 (!is_protmode(vcpu)) ? X86EMUL_MODE_REAL :
3964                 (vcpu->arch.emulate_ctxt.eflags & X86_EFLAGS_VM)
3965                 ? X86EMUL_MODE_VM86 : cs_l
3966                 ? X86EMUL_MODE_PROT64 : cs_db
3967                 ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
3968         memset(c, 0, sizeof(struct decode_cache));
3969         memcpy(c->regs, vcpu->arch.regs, sizeof c->regs);
3970 }
3971
3972 static int handle_emulation_failure(struct kvm_vcpu *vcpu)
3973 {
3974         ++vcpu->stat.insn_emulation_fail;
3975         trace_kvm_emulate_insn_failed(vcpu);
3976         vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
3977         vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
3978         vcpu->run->internal.ndata = 0;
3979         kvm_queue_exception(vcpu, UD_VECTOR);
3980         return EMULATE_FAIL;
3981 }
3982
3983 static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t gva)
3984 {
3985         gpa_t gpa;
3986
3987         if (tdp_enabled)
3988                 return false;
3989
3990         /*
3991          * if emulation was due to access to shadowed page table
3992          * and it failed try to unshadow page and re-entetr the
3993          * guest to let CPU execute the instruction.
3994          */
3995         if (kvm_mmu_unprotect_page_virt(vcpu, gva))
3996                 return true;
3997
3998         gpa = kvm_mmu_gva_to_gpa_system(vcpu, gva, NULL);
3999
4000         if (gpa == UNMAPPED_GVA)
4001                 return true; /* let cpu generate fault */
4002
4003         if (!kvm_is_error_hva(gfn_to_hva(vcpu->kvm, gpa >> PAGE_SHIFT)))
4004                 return true;
4005
4006         return false;
4007 }
4008
4009 int emulate_instruction(struct kvm_vcpu *vcpu,
4010                         unsigned long cr2,
4011                         u16 error_code,
4012                         int emulation_type)
4013 {
4014         int r;
4015         struct decode_cache *c = &vcpu->arch.emulate_ctxt.decode;
4016
4017         kvm_clear_exception_queue(vcpu);
4018         vcpu->arch.mmio_fault_cr2 = cr2;
4019         /*
4020          * TODO: fix emulate.c to use guest_read/write_register
4021          * instead of direct ->regs accesses, can save hundred cycles
4022          * on Intel for instructions that don't read/change RSP, for
4023          * for example.
4024          */
4025         cache_all_regs(vcpu);
4026
4027         if (!(emulation_type & EMULTYPE_NO_DECODE)) {
4028                 init_emulate_ctxt(vcpu);
4029                 vcpu->arch.emulate_ctxt.interruptibility = 0;
4030                 vcpu->arch.emulate_ctxt.exception = -1;
4031                 vcpu->arch.emulate_ctxt.perm_ok = false;
4032
4033                 r = x86_decode_insn(&vcpu->arch.emulate_ctxt);
4034                 trace_kvm_emulate_insn_start(vcpu);
4035
4036                 /* Only allow emulation of specific instructions on #UD
4037                  * (namely VMMCALL, sysenter, sysexit, syscall)*/
4038                 if (emulation_type & EMULTYPE_TRAP_UD) {
4039                         if (!c->twobyte)
4040                                 return EMULATE_FAIL;
4041                         switch (c->b) {
4042                         case 0x01: /* VMMCALL */
4043                                 if (c->modrm_mod != 3 || c->modrm_rm != 1)
4044                                         return EMULATE_FAIL;
4045                                 break;
4046                         case 0x34: /* sysenter */
4047                         case 0x35: /* sysexit */
4048                                 if (c->modrm_mod != 0 || c->modrm_rm != 0)
4049                                         return EMULATE_FAIL;
4050                                 break;
4051                         case 0x05: /* syscall */
4052                                 if (c->modrm_mod != 0 || c->modrm_rm != 0)
4053                                         return EMULATE_FAIL;
4054                                 break;
4055                         default:
4056                                 return EMULATE_FAIL;
4057                         }
4058
4059                         if (!(c->modrm_reg == 0 || c->modrm_reg == 3))
4060                                 return EMULATE_FAIL;
4061                 }
4062
4063                 ++vcpu->stat.insn_emulation;
4064                 if (r)  {
4065                         if (reexecute_instruction(vcpu, cr2))
4066                                 return EMULATE_DONE;
4067                         if (emulation_type & EMULTYPE_SKIP)
4068                                 return EMULATE_FAIL;
4069                         return handle_emulation_failure(vcpu);
4070                 }
4071         }
4072
4073         if (emulation_type & EMULTYPE_SKIP) {
4074                 kvm_rip_write(vcpu, vcpu->arch.emulate_ctxt.decode.eip);
4075                 return EMULATE_DONE;
4076         }
4077
4078         /* this is needed for vmware backdor interface to work since it
4079            changes registers values  during IO operation */
4080         memcpy(c->regs, vcpu->arch.regs, sizeof c->regs);
4081
4082 restart:
4083         r = x86_emulate_insn(&vcpu->arch.emulate_ctxt);
4084
4085         if (r) { /* emulation failed */
4086                 if (reexecute_instruction(vcpu, cr2))
4087                         return EMULATE_DONE;
4088
4089                 return handle_emulation_failure(vcpu);
4090         }
4091
4092         r = EMULATE_DONE;
4093
4094         if (vcpu->arch.emulate_ctxt.exception >= 0)
4095                 inject_emulated_exception(vcpu);
4096         else if (vcpu->arch.pio.count) {
4097                 if (!vcpu->arch.pio.in)
4098                         vcpu->arch.pio.count = 0;
4099                 r = EMULATE_DO_MMIO;
4100         } else if (vcpu->mmio_needed) {
4101                 if (vcpu->mmio_is_write)
4102                         vcpu->mmio_needed = 0;
4103                 r = EMULATE_DO_MMIO;
4104         } else if (vcpu->arch.emulate_ctxt.restart)
4105                 goto restart;
4106
4107         toggle_interruptibility(vcpu, vcpu->arch.emulate_ctxt.interruptibility);
4108         kvm_x86_ops->set_rflags(vcpu, vcpu->arch.emulate_ctxt.eflags);
4109         memcpy(vcpu->arch.regs, c->regs, sizeof c->regs);
4110         kvm_rip_write(vcpu, vcpu->arch.emulate_ctxt.eip);
4111
4112         return r;
4113 }
4114 EXPORT_SYMBOL_GPL(emulate_instruction);
4115
4116 int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size, unsigned short port)
4117 {
4118         unsigned long val = kvm_register_read(vcpu, VCPU_REGS_RAX);
4119         int ret = emulator_pio_out_emulated(size, port, &val, 1, vcpu);
4120         /* do not return to emulator after return from userspace */
4121         vcpu->arch.pio.count = 0;
4122         return ret;
4123 }
4124 EXPORT_SYMBOL_GPL(kvm_fast_pio_out);
4125
4126 static void bounce_off(void *info)
4127 {
4128         /* nothing */
4129 }
4130
4131 static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
4132                                      void *data)
4133 {
4134         struct cpufreq_freqs *freq = data;
4135         struct kvm *kvm;
4136         struct kvm_vcpu *vcpu;
4137         int i, send_ipi = 0;
4138
4139         if (val == CPUFREQ_PRECHANGE && freq->old > freq->new)
4140                 return 0;
4141         if (val == CPUFREQ_POSTCHANGE && freq->old < freq->new)
4142                 return 0;
4143         per_cpu(cpu_tsc_khz, freq->cpu) = freq->new;
4144
4145         spin_lock(&kvm_lock);
4146         list_for_each_entry(kvm, &vm_list, vm_list) {
4147                 kvm_for_each_vcpu(i, vcpu, kvm) {
4148                         if (vcpu->cpu != freq->cpu)
4149                                 continue;
4150                         if (!kvm_request_guest_time_update(vcpu))
4151                                 continue;
4152                         if (vcpu->cpu != smp_processor_id())
4153                                 send_ipi++;
4154                 }
4155         }
4156         spin_unlock(&kvm_lock);
4157
4158         if (freq->old < freq->new && send_ipi) {
4159                 /*
4160                  * We upscale the frequency.  Must make the guest
4161                  * doesn't see old kvmclock values while running with
4162                  * the new frequency, otherwise we risk the guest sees
4163                  * time go backwards.
4164                  *
4165                  * In case we update the frequency for another cpu
4166                  * (which might be in guest context) send an interrupt
4167                  * to kick the cpu out of guest context.  Next time
4168                  * guest context is entered kvmclock will be updated,
4169                  * so the guest will not see stale values.
4170                  */
4171                 smp_call_function_single(freq->cpu, bounce_off, NULL, 1);
4172         }
4173         return 0;
4174 }
4175
4176 static struct notifier_block kvmclock_cpufreq_notifier_block = {
4177         .notifier_call  = kvmclock_cpufreq_notifier
4178 };
4179
4180 static void kvm_timer_init(void)
4181 {
4182         int cpu;
4183
4184         if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) {
4185                 cpufreq_register_notifier(&kvmclock_cpufreq_notifier_block,
4186                                           CPUFREQ_TRANSITION_NOTIFIER);
4187                 for_each_online_cpu(cpu) {
4188                         unsigned long khz = cpufreq_get(cpu);
4189                         if (!khz)
4190                                 khz = tsc_khz;
4191                         per_cpu(cpu_tsc_khz, cpu) = khz;
4192                 }
4193         } else {
4194                 for_each_possible_cpu(cpu)
4195                         per_cpu(cpu_tsc_khz, cpu) = tsc_khz;
4196         }
4197 }
4198
4199 static DEFINE_PER_CPU(struct kvm_vcpu *, current_vcpu);
4200
4201 static int kvm_is_in_guest(void)
4202 {
4203         return percpu_read(current_vcpu) != NULL;
4204 }
4205
4206 static int kvm_is_user_mode(void)
4207 {
4208         int user_mode = 3;
4209
4210         if (percpu_read(current_vcpu))
4211                 user_mode = kvm_x86_ops->get_cpl(percpu_read(current_vcpu));
4212
4213         return user_mode != 0;
4214 }
4215
4216 static unsigned long kvm_get_guest_ip(void)
4217 {
4218         unsigned long ip = 0;
4219
4220         if (percpu_read(current_vcpu))
4221                 ip = kvm_rip_read(percpu_read(current_vcpu));
4222
4223         return ip;
4224 }
4225
4226 static struct perf_guest_info_callbacks kvm_guest_cbs = {
4227         .is_in_guest            = kvm_is_in_guest,
4228         .is_user_mode           = kvm_is_user_mode,
4229         .get_guest_ip           = kvm_get_guest_ip,
4230 };
4231
4232 void kvm_before_handle_nmi(struct kvm_vcpu *vcpu)
4233 {
4234         percpu_write(current_vcpu, vcpu);
4235 }
4236 EXPORT_SYMBOL_GPL(kvm_before_handle_nmi);
4237
4238 void kvm_after_handle_nmi(struct kvm_vcpu *vcpu)
4239 {
4240         percpu_write(current_vcpu, NULL);
4241 }
4242 EXPORT_SYMBOL_GPL(kvm_after_handle_nmi);
4243
4244 int kvm_arch_init(void *opaque)
4245 {
4246         int r;
4247         struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
4248
4249         if (kvm_x86_ops) {
4250                 printk(KERN_ERR "kvm: already loaded the other module\n");
4251                 r = -EEXIST;
4252                 goto out;
4253         }
4254
4255         if (!ops->cpu_has_kvm_support()) {
4256                 printk(KERN_ERR "kvm: no hardware support\n");
4257                 r = -EOPNOTSUPP;
4258                 goto out;
4259         }
4260         if (ops->disabled_by_bios()) {
4261                 printk(KERN_ERR "kvm: disabled by bios\n");
4262                 r = -EOPNOTSUPP;
4263                 goto out;
4264         }
4265
4266         r = kvm_mmu_module_init();
4267         if (r)
4268                 goto out;
4269
4270         kvm_init_msr_list();
4271
4272         kvm_x86_ops = ops;
4273         kvm_mmu_set_nonpresent_ptes(0ull, 0ull);
4274         kvm_mmu_set_base_ptes(PT_PRESENT_MASK);
4275         kvm_mmu_set_mask_ptes(PT_USER_MASK, PT_ACCESSED_MASK,
4276                         PT_DIRTY_MASK, PT64_NX_MASK, 0);
4277
4278         kvm_timer_init();
4279
4280         perf_register_guest_info_callbacks(&kvm_guest_cbs);
4281
4282         if (cpu_has_xsave)
4283                 host_xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK);
4284
4285         return 0;
4286
4287 out:
4288         return r;
4289 }
4290
4291 void kvm_arch_exit(void)
4292 {
4293         perf_unregister_guest_info_callbacks(&kvm_guest_cbs);
4294
4295         if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
4296                 cpufreq_unregister_notifier(&kvmclock_cpufreq_notifier_block,
4297                                             CPUFREQ_TRANSITION_NOTIFIER);
4298         kvm_x86_ops = NULL;
4299         kvm_mmu_module_exit();
4300 }
4301
4302 int kvm_emulate_halt(struct kvm_vcpu *vcpu)
4303 {
4304         ++vcpu->stat.halt_exits;
4305         if (irqchip_in_kernel(vcpu->kvm)) {
4306                 vcpu->arch.mp_state = KVM_MP_STATE_HALTED;
4307                 return 1;
4308         } else {
4309                 vcpu->run->exit_reason = KVM_EXIT_HLT;
4310                 return 0;
4311         }
4312 }
4313 EXPORT_SYMBOL_GPL(kvm_emulate_halt);
4314
4315 static inline gpa_t hc_gpa(struct kvm_vcpu *vcpu, unsigned long a0,
4316                            unsigned long a1)
4317 {
4318         if (is_long_mode(vcpu))
4319                 return a0;
4320         else
4321                 return a0 | ((gpa_t)a1 << 32);
4322 }
4323
4324 int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
4325 {
4326         u64 param, ingpa, outgpa, ret;
4327         uint16_t code, rep_idx, rep_cnt, res = HV_STATUS_SUCCESS, rep_done = 0;
4328         bool fast, longmode;
4329         int cs_db, cs_l;
4330
4331         /*
4332          * hypercall generates UD from non zero cpl and real mode
4333          * per HYPER-V spec
4334          */
4335         if (kvm_x86_ops->get_cpl(vcpu) != 0 || !is_protmode(vcpu)) {
4336                 kvm_queue_exception(vcpu, UD_VECTOR);
4337                 return 0;
4338         }
4339
4340         kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
4341         longmode = is_long_mode(vcpu) && cs_l == 1;
4342
4343         if (!longmode) {
4344                 param = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDX) << 32) |
4345                         (kvm_register_read(vcpu, VCPU_REGS_RAX) & 0xffffffff);
4346                 ingpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RBX) << 32) |
4347                         (kvm_register_read(vcpu, VCPU_REGS_RCX) & 0xffffffff);
4348                 outgpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDI) << 32) |
4349                         (kvm_register_read(vcpu, VCPU_REGS_RSI) & 0xffffffff);
4350         }
4351 #ifdef CONFIG_X86_64
4352         else {
4353                 param = kvm_register_read(vcpu, VCPU_REGS_RCX);
4354                 ingpa = kvm_register_read(vcpu, VCPU_REGS_RDX);
4355                 outgpa = kvm_register_read(vcpu, VCPU_REGS_R8);
4356         }
4357 #endif
4358
4359         code = param & 0xffff;
4360         fast = (param >> 16) & 0x1;
4361         rep_cnt = (param >> 32) & 0xfff;
4362         rep_idx = (param >> 48) & 0xfff;
4363
4364         trace_kvm_hv_hypercall(code, fast, rep_cnt, rep_idx, ingpa, outgpa);
4365
4366         switch (code) {
4367         case HV_X64_HV_NOTIFY_LONG_SPIN_WAIT:
4368                 kvm_vcpu_on_spin(vcpu);
4369                 break;
4370         default:
4371                 res = HV_STATUS_INVALID_HYPERCALL_CODE;
4372                 break;
4373         }
4374
4375         ret = res | (((u64)rep_done & 0xfff) << 32);
4376         if (longmode) {
4377                 kvm_register_write(vcpu, VCPU_REGS_RAX, ret);
4378         } else {
4379                 kvm_register_write(vcpu, VCPU_REGS_RDX, ret >> 32);
4380                 kvm_register_write(vcpu, VCPU_REGS_RAX, ret & 0xffffffff);
4381         }
4382
4383         return 1;
4384 }
4385
4386 int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
4387 {
4388         unsigned long nr, a0, a1, a2, a3, ret;
4389         int r = 1;
4390
4391         if (kvm_hv_hypercall_enabled(vcpu->kvm))
4392                 return kvm_hv_hypercall(vcpu);
4393
4394         nr = kvm_register_read(vcpu, VCPU_REGS_RAX);
4395         a0 = kvm_register_read(vcpu, VCPU_REGS_RBX);
4396         a1 = kvm_register_read(vcpu, VCPU_REGS_RCX);
4397         a2 = kvm_register_read(vcpu, VCPU_REGS_RDX);
4398         a3 = kvm_register_read(vcpu, VCPU_REGS_RSI);
4399
4400         trace_kvm_hypercall(nr, a0, a1, a2, a3);
4401
4402         if (!is_long_mode(vcpu)) {
4403                 nr &= 0xFFFFFFFF;
4404                 a0 &= 0xFFFFFFFF;
4405                 a1 &= 0xFFFFFFFF;
4406                 a2 &= 0xFFFFFFFF;
4407                 a3 &= 0xFFFFFFFF;
4408         }
4409
4410         if (kvm_x86_ops->get_cpl(vcpu) != 0) {
4411                 ret = -KVM_EPERM;
4412                 goto out;
4413         }
4414
4415         switch (nr) {
4416         case KVM_HC_VAPIC_POLL_IRQ:
4417                 ret = 0;
4418                 break;
4419         case KVM_HC_MMU_OP:
4420                 r = kvm_pv_mmu_op(vcpu, a0, hc_gpa(vcpu, a1, a2), &ret);
4421                 break;
4422         default:
4423                 ret = -KVM_ENOSYS;
4424                 break;
4425         }
4426 out:
4427         kvm_register_write(vcpu, VCPU_REGS_RAX, ret);
4428         ++vcpu->stat.hypercalls;
4429         return r;
4430 }
4431 EXPORT_SYMBOL_GPL(kvm_emulate_hypercall);
4432
4433 int kvm_fix_hypercall(struct kvm_vcpu *vcpu)
4434 {
4435         char instruction[3];
4436         unsigned long rip = kvm_rip_read(vcpu);
4437
4438         /*
4439          * Blow out the MMU to ensure that no other VCPU has an active mapping
4440          * to ensure that the updated hypercall appears atomically across all
4441          * VCPUs.
4442          */
4443         kvm_mmu_zap_all(vcpu->kvm);
4444
4445         kvm_x86_ops->patch_hypercall(vcpu, instruction);
4446
4447         return emulator_write_emulated(rip, instruction, 3, NULL, vcpu);
4448 }
4449
4450 void realmode_lgdt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)
4451 {
4452         struct desc_ptr dt = { limit, base };
4453
4454         kvm_x86_ops->set_gdt(vcpu, &dt);
4455 }
4456
4457 void realmode_lidt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)
4458 {
4459         struct desc_ptr dt = { limit, base };
4460
4461         kvm_x86_ops->set_idt(vcpu, &dt);
4462 }
4463
4464 static int move_to_next_stateful_cpuid_entry(struct kvm_vcpu *vcpu, int i)
4465 {
4466         struct kvm_cpuid_entry2 *e = &vcpu->arch.cpuid_entries[i];
4467         int j, nent = vcpu->arch.cpuid_nent;
4468
4469         e->flags &= ~KVM_CPUID_FLAG_STATE_READ_NEXT;
4470         /* when no next entry is found, the current entry[i] is reselected */
4471         for (j = i + 1; ; j = (j + 1) % nent) {
4472                 struct kvm_cpuid_entry2 *ej = &vcpu->arch.cpuid_entries[j];
4473                 if (ej->function == e->function) {
4474                         ej->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
4475                         return j;
4476                 }
4477         }
4478         return 0; /* silence gcc, even though control never reaches here */
4479 }
4480
4481 /* find an entry with matching function, matching index (if needed), and that
4482  * should be read next (if it's stateful) */
4483 static int is_matching_cpuid_entry(struct kvm_cpuid_entry2 *e,
4484         u32 function, u32 index)
4485 {
4486         if (e->function != function)
4487                 return 0;
4488         if ((e->flags & KVM_CPUID_FLAG_SIGNIFCANT_INDEX) && e->index != index)
4489                 return 0;
4490         if ((e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC) &&
4491             !(e->flags & KVM_CPUID_FLAG_STATE_READ_NEXT))
4492                 return 0;
4493         return 1;
4494 }
4495
4496 struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
4497                                               u32 function, u32 index)
4498 {
4499         int i;
4500         struct kvm_cpuid_entry2 *best = NULL;
4501
4502         for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
4503                 struct kvm_cpuid_entry2 *e;
4504
4505                 e = &vcpu->arch.cpuid_entries[i];
4506                 if (is_matching_cpuid_entry(e, function, index)) {
4507                         if (e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC)
4508                                 move_to_next_stateful_cpuid_entry(vcpu, i);
4509                         best = e;
4510                         break;
4511                 }
4512                 /*
4513                  * Both basic or both extended?
4514                  */
4515                 if (((e->function ^ function) & 0x80000000) == 0)
4516                         if (!best || e->function > best->function)
4517                                 best = e;
4518         }
4519         return best;
4520 }
4521 EXPORT_SYMBOL_GPL(kvm_find_cpuid_entry);
4522
4523 int cpuid_maxphyaddr(struct kvm_vcpu *vcpu)
4524 {
4525         struct kvm_cpuid_entry2 *best;
4526
4527         best = kvm_find_cpuid_entry(vcpu, 0x80000000, 0);
4528         if (!best || best->eax < 0x80000008)
4529                 goto not_found;
4530         best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0);
4531         if (best)
4532                 return best->eax & 0xff;
4533 not_found:
4534         return 36;
4535 }
4536
4537 void kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
4538 {
4539         u32 function, index;
4540         struct kvm_cpuid_entry2 *best;
4541
4542         function = kvm_register_read(vcpu, VCPU_REGS_RAX);
4543         index = kvm_register_read(vcpu, VCPU_REGS_RCX);
4544         kvm_register_write(vcpu, VCPU_REGS_RAX, 0);
4545         kvm_register_write(vcpu, VCPU_REGS_RBX, 0);
4546         kvm_register_write(vcpu, VCPU_REGS_RCX, 0);
4547         kvm_register_write(vcpu, VCPU_REGS_RDX, 0);
4548         best = kvm_find_cpuid_entry(vcpu, function, index);
4549         if (best) {
4550                 kvm_register_write(vcpu, VCPU_REGS_RAX, best->eax);
4551                 kvm_register_write(vcpu, VCPU_REGS_RBX, best->ebx);
4552                 kvm_register_write(vcpu, VCPU_REGS_RCX, best->ecx);
4553                 kvm_register_write(vcpu, VCPU_REGS_RDX, best->edx);
4554         }
4555         kvm_x86_ops->skip_emulated_instruction(vcpu);
4556         trace_kvm_cpuid(function,
4557                         kvm_register_read(vcpu, VCPU_REGS_RAX),
4558                         kvm_register_read(vcpu, VCPU_REGS_RBX),
4559                         kvm_register_read(vcpu, VCPU_REGS_RCX),
4560                         kvm_register_read(vcpu, VCPU_REGS_RDX));
4561 }
4562 EXPORT_SYMBOL_GPL(kvm_emulate_cpuid);
4563
4564 /*
4565  * Check if userspace requested an interrupt window, and that the
4566  * interrupt window is open.
4567  *
4568  * No need to exit to userspace if we already have an interrupt queued.
4569  */
4570 static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu)
4571 {
4572         return (!irqchip_in_kernel(vcpu->kvm) && !kvm_cpu_has_interrupt(vcpu) &&
4573                 vcpu->run->request_interrupt_window &&
4574                 kvm_arch_interrupt_allowed(vcpu));
4575 }
4576
4577 static void post_kvm_run_save(struct kvm_vcpu *vcpu)
4578 {
4579         struct kvm_run *kvm_run = vcpu->run;
4580
4581         kvm_run->if_flag = (kvm_get_rflags(vcpu) & X86_EFLAGS_IF) != 0;
4582         kvm_run->cr8 = kvm_get_cr8(vcpu);
4583         kvm_run->apic_base = kvm_get_apic_base(vcpu);
4584         if (irqchip_in_kernel(vcpu->kvm))
4585                 kvm_run->ready_for_interrupt_injection = 1;
4586         else
4587                 kvm_run->ready_for_interrupt_injection =
4588                         kvm_arch_interrupt_allowed(vcpu) &&
4589                         !kvm_cpu_has_interrupt(vcpu) &&
4590                         !kvm_event_needs_reinjection(vcpu);
4591 }
4592
4593 static void vapic_enter(struct kvm_vcpu *vcpu)
4594 {
4595         struct kvm_lapic *apic = vcpu->arch.apic;
4596         struct page *page;
4597
4598         if (!apic || !apic->vapic_addr)
4599                 return;
4600
4601         page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
4602
4603         vcpu->arch.apic->vapic_page = page;
4604 }
4605
4606 static void vapic_exit(struct kvm_vcpu *vcpu)
4607 {
4608         struct kvm_lapic *apic = vcpu->arch.apic;
4609         int idx;
4610
4611         if (!apic || !apic->vapic_addr)
4612                 return;
4613
4614         idx = srcu_read_lock(&vcpu->kvm->srcu);
4615         kvm_release_page_dirty(apic->vapic_page);
4616         mark_page_dirty(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
4617         srcu_read_unlock(&vcpu->kvm->srcu, idx);
4618 }
4619
4620 static void update_cr8_intercept(struct kvm_vcpu *vcpu)
4621 {
4622         int max_irr, tpr;
4623
4624         if (!kvm_x86_ops->update_cr8_intercept)
4625                 return;
4626
4627         if (!vcpu->arch.apic)
4628                 return;
4629
4630         if (!vcpu->arch.apic->vapic_addr)
4631                 max_irr = kvm_lapic_find_highest_irr(vcpu);
4632         else
4633                 max_irr = -1;
4634
4635         if (max_irr != -1)
4636                 max_irr >>= 4;
4637
4638         tpr = kvm_lapic_get_cr8(vcpu);
4639
4640         kvm_x86_ops->update_cr8_intercept(vcpu, tpr, max_irr);
4641 }
4642
4643 static void inject_pending_event(struct kvm_vcpu *vcpu)
4644 {
4645         /* try to reinject previous events if any */
4646         if (vcpu->arch.exception.pending) {
4647                 trace_kvm_inj_exception(vcpu->arch.exception.nr,
4648                                         vcpu->arch.exception.has_error_code,
4649                                         vcpu->arch.exception.error_code);
4650                 kvm_x86_ops->queue_exception(vcpu, vcpu->arch.exception.nr,
4651                                           vcpu->arch.exception.has_error_code,
4652                                           vcpu->arch.exception.error_code,
4653                                           vcpu->arch.exception.reinject);
4654                 return;
4655         }
4656
4657         if (vcpu->arch.nmi_injected) {
4658                 kvm_x86_ops->set_nmi(vcpu);
4659                 return;
4660         }
4661
4662         if (vcpu->arch.interrupt.pending) {
4663                 kvm_x86_ops->set_irq(vcpu);
4664                 return;
4665         }
4666
4667         /* try to inject new event if pending */
4668         if (vcpu->arch.nmi_pending) {
4669                 if (kvm_x86_ops->nmi_allowed(vcpu)) {
4670                         vcpu->arch.nmi_pending = false;
4671                         vcpu->arch.nmi_injected = true;
4672                         kvm_x86_ops->set_nmi(vcpu);
4673                 }
4674         } else if (kvm_cpu_has_interrupt(vcpu)) {
4675                 if (kvm_x86_ops->interrupt_allowed(vcpu)) {
4676                         kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu),
4677                                             false);
4678                         kvm_x86_ops->set_irq(vcpu);
4679                 }
4680         }
4681 }
4682
4683 static void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu)
4684 {
4685         if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE) &&
4686                         !vcpu->guest_xcr0_loaded) {
4687                 /* kvm_set_xcr() also depends on this */
4688                 xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0);
4689                 vcpu->guest_xcr0_loaded = 1;
4690         }
4691 }
4692
4693 static void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu)
4694 {
4695         if (vcpu->guest_xcr0_loaded) {
4696                 if (vcpu->arch.xcr0 != host_xcr0)
4697                         xsetbv(XCR_XFEATURE_ENABLED_MASK, host_xcr0);
4698                 vcpu->guest_xcr0_loaded = 0;
4699         }
4700 }
4701
4702 static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
4703 {
4704         int r;
4705         bool req_int_win = !irqchip_in_kernel(vcpu->kvm) &&
4706                 vcpu->run->request_interrupt_window;
4707
4708         if (vcpu->requests) {
4709                 if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu))
4710                         kvm_mmu_unload(vcpu);
4711                 if (kvm_check_request(KVM_REQ_MIGRATE_TIMER, vcpu))
4712                         __kvm_migrate_timers(vcpu);
4713                 if (kvm_check_request(KVM_REQ_KVMCLOCK_UPDATE, vcpu))
4714                         kvm_write_guest_time(vcpu);
4715                 if (kvm_check_request(KVM_REQ_MMU_SYNC, vcpu))
4716                         kvm_mmu_sync_roots(vcpu);
4717                 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
4718                         kvm_x86_ops->tlb_flush(vcpu);
4719                 if (kvm_check_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu)) {
4720                         vcpu->run->exit_reason = KVM_EXIT_TPR_ACCESS;
4721                         r = 0;
4722                         goto out;
4723                 }
4724                 if (kvm_check_request(KVM_REQ_TRIPLE_FAULT, vcpu)) {
4725                         vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN;
4726                         r = 0;
4727                         goto out;
4728                 }
4729                 if (kvm_check_request(KVM_REQ_DEACTIVATE_FPU, vcpu)) {
4730                         vcpu->fpu_active = 0;
4731                         kvm_x86_ops->fpu_deactivate(vcpu);
4732                 }
4733         }
4734
4735         r = kvm_mmu_reload(vcpu);
4736         if (unlikely(r))
4737                 goto out;
4738
4739         preempt_disable();
4740
4741         kvm_x86_ops->prepare_guest_switch(vcpu);
4742         if (vcpu->fpu_active)
4743                 kvm_load_guest_fpu(vcpu);
4744         kvm_load_guest_xcr0(vcpu);
4745
4746         atomic_set(&vcpu->guest_mode, 1);
4747         smp_wmb();
4748
4749         local_irq_disable();
4750
4751         if (!atomic_read(&vcpu->guest_mode) || vcpu->requests
4752             || need_resched() || signal_pending(current)) {
4753                 atomic_set(&vcpu->guest_mode, 0);
4754                 smp_wmb();
4755                 local_irq_enable();
4756                 preempt_enable();
4757                 r = 1;
4758                 goto out;
4759         }
4760
4761         inject_pending_event(vcpu);
4762
4763         /* enable NMI/IRQ window open exits if needed */
4764         if (vcpu->arch.nmi_pending)
4765                 kvm_x86_ops->enable_nmi_window(vcpu);
4766         else if (kvm_cpu_has_interrupt(vcpu) || req_int_win)
4767                 kvm_x86_ops->enable_irq_window(vcpu);
4768
4769         if (kvm_lapic_enabled(vcpu)) {
4770                 update_cr8_intercept(vcpu);
4771                 kvm_lapic_sync_to_vapic(vcpu);
4772         }
4773
4774         srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
4775
4776         kvm_guest_enter();
4777
4778         if (unlikely(vcpu->arch.switch_db_regs)) {
4779                 set_debugreg(0, 7);
4780                 set_debugreg(vcpu->arch.eff_db[0], 0);
4781                 set_debugreg(vcpu->arch.eff_db[1], 1);
4782                 set_debugreg(vcpu->arch.eff_db[2], 2);
4783                 set_debugreg(vcpu->arch.eff_db[3], 3);
4784         }
4785
4786         trace_kvm_entry(vcpu->vcpu_id);
4787         kvm_x86_ops->run(vcpu);
4788
4789         /*
4790          * If the guest has used debug registers, at least dr7
4791          * will be disabled while returning to the host.
4792          * If we don't have active breakpoints in the host, we don't
4793          * care about the messed up debug address registers. But if
4794          * we have some of them active, restore the old state.
4795          */
4796         if (hw_breakpoint_active())
4797                 hw_breakpoint_restore();
4798
4799         atomic_set(&vcpu->guest_mode, 0);
4800         smp_wmb();
4801         local_irq_enable();
4802
4803         ++vcpu->stat.exits;
4804
4805         /*
4806          * We must have an instruction between local_irq_enable() and
4807          * kvm_guest_exit(), so the timer interrupt isn't delayed by
4808          * the interrupt shadow.  The stat.exits increment will do nicely.
4809          * But we need to prevent reordering, hence this barrier():
4810          */
4811         barrier();
4812
4813         kvm_guest_exit();
4814
4815         preempt_enable();
4816
4817         vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
4818
4819         /*
4820          * Profile KVM exit RIPs:
4821          */
4822         if (unlikely(prof_on == KVM_PROFILING)) {
4823                 unsigned long rip = kvm_rip_read(vcpu);
4824                 profile_hit(KVM_PROFILING, (void *)rip);
4825         }
4826
4827
4828         kvm_lapic_sync_from_vapic(vcpu);
4829
4830         r = kvm_x86_ops->handle_exit(vcpu);
4831 out:
4832         return r;
4833 }
4834
4835
4836 static int __vcpu_run(struct kvm_vcpu *vcpu)
4837 {
4838         int r;
4839         struct kvm *kvm = vcpu->kvm;
4840
4841         if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED)) {
4842                 pr_debug("vcpu %d received sipi with vector # %x\n",
4843                          vcpu->vcpu_id, vcpu->arch.sipi_vector);
4844                 kvm_lapic_reset(vcpu);
4845                 r = kvm_arch_vcpu_reset(vcpu);
4846                 if (r)
4847                         return r;
4848                 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
4849         }
4850
4851         vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
4852         vapic_enter(vcpu);
4853
4854         r = 1;
4855         while (r > 0) {
4856                 if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE)
4857                         r = vcpu_enter_guest(vcpu);
4858                 else {
4859                         srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
4860                         kvm_vcpu_block(vcpu);
4861                         vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
4862                         if (kvm_check_request(KVM_REQ_UNHALT, vcpu))
4863                         {
4864                                 switch(vcpu->arch.mp_state) {
4865                                 case KVM_MP_STATE_HALTED:
4866                                         vcpu->arch.mp_state =
4867                                                 KVM_MP_STATE_RUNNABLE;
4868                                 case KVM_MP_STATE_RUNNABLE:
4869                                         break;
4870                                 case KVM_MP_STATE_SIPI_RECEIVED:
4871                                 default:
4872                                         r = -EINTR;
4873                                         break;
4874                                 }
4875                         }
4876                 }
4877
4878                 if (r <= 0)
4879                         break;
4880
4881                 clear_bit(KVM_REQ_PENDING_TIMER, &vcpu->requests);
4882                 if (kvm_cpu_has_pending_timer(vcpu))
4883                         kvm_inject_pending_timer_irqs(vcpu);
4884
4885                 if (dm_request_for_irq_injection(vcpu)) {
4886                         r = -EINTR;
4887                         vcpu->run->exit_reason = KVM_EXIT_INTR;
4888                         ++vcpu->stat.request_irq_exits;
4889                 }
4890                 if (signal_pending(current)) {
4891                         r = -EINTR;
4892                         vcpu->run->exit_reason = KVM_EXIT_INTR;
4893                         ++vcpu->stat.signal_exits;
4894                 }
4895                 if (need_resched()) {
4896                         srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
4897                         kvm_resched(vcpu);
4898                         vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
4899                 }
4900         }
4901
4902         srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
4903
4904         vapic_exit(vcpu);
4905
4906         return r;
4907 }
4908
4909 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
4910 {
4911         int r;
4912         sigset_t sigsaved;
4913
4914         if (vcpu->sigset_active)
4915                 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
4916
4917         if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) {
4918                 kvm_vcpu_block(vcpu);
4919                 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
4920                 r = -EAGAIN;
4921                 goto out;
4922         }
4923
4924         /* re-sync apic's tpr */
4925         if (!irqchip_in_kernel(vcpu->kvm))
4926                 kvm_set_cr8(vcpu, kvm_run->cr8);
4927
4928         if (vcpu->arch.pio.count || vcpu->mmio_needed ||
4929             vcpu->arch.emulate_ctxt.restart) {
4930                 if (vcpu->mmio_needed) {
4931                         memcpy(vcpu->mmio_data, kvm_run->mmio.data, 8);
4932                         vcpu->mmio_read_completed = 1;
4933                         vcpu->mmio_needed = 0;
4934                 }
4935                 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
4936                 r = emulate_instruction(vcpu, 0, 0, EMULTYPE_NO_DECODE);
4937                 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
4938                 if (r != EMULATE_DONE) {
4939                         r = 0;
4940                         goto out;
4941                 }
4942         }
4943         if (kvm_run->exit_reason == KVM_EXIT_HYPERCALL)
4944                 kvm_register_write(vcpu, VCPU_REGS_RAX,
4945                                      kvm_run->hypercall.ret);
4946
4947         r = __vcpu_run(vcpu);
4948
4949 out:
4950         post_kvm_run_save(vcpu);
4951         if (vcpu->sigset_active)
4952                 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
4953
4954         return r;
4955 }
4956
4957 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
4958 {
4959         regs->rax = kvm_register_read(vcpu, VCPU_REGS_RAX);
4960         regs->rbx = kvm_register_read(vcpu, VCPU_REGS_RBX);
4961         regs->rcx = kvm_register_read(vcpu, VCPU_REGS_RCX);
4962         regs->rdx = kvm_register_read(vcpu, VCPU_REGS_RDX);
4963         regs->rsi = kvm_register_read(vcpu, VCPU_REGS_RSI);
4964         regs->rdi = kvm_register_read(vcpu, VCPU_REGS_RDI);
4965         regs->rsp = kvm_register_read(vcpu, VCPU_REGS_RSP);
4966         regs->rbp = kvm_register_read(vcpu, VCPU_REGS_RBP);
4967 #ifdef CONFIG_X86_64
4968         regs->r8 = kvm_register_read(vcpu, VCPU_REGS_R8);
4969         regs->r9 = kvm_register_read(vcpu, VCPU_REGS_R9);
4970         regs->r10 = kvm_register_read(vcpu, VCPU_REGS_R10);
4971         regs->r11 = kvm_register_read(vcpu, VCPU_REGS_R11);
4972         regs->r12 = kvm_register_read(vcpu, VCPU_REGS_R12);
4973         regs->r13 = kvm_register_read(vcpu, VCPU_REGS_R13);
4974         regs->r14 = kvm_register_read(vcpu, VCPU_REGS_R14);
4975         regs->r15 = kvm_register_read(vcpu, VCPU_REGS_R15);
4976 #endif
4977
4978         regs->rip = kvm_rip_read(vcpu);
4979         regs->rflags = kvm_get_rflags(vcpu);
4980
4981         return 0;
4982 }
4983
4984 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
4985 {
4986         kvm_register_write(vcpu, VCPU_REGS_RAX, regs->rax);
4987         kvm_register_write(vcpu, VCPU_REGS_RBX, regs->rbx);
4988         kvm_register_write(vcpu, VCPU_REGS_RCX, regs->rcx);
4989         kvm_register_write(vcpu, VCPU_REGS_RDX, regs->rdx);
4990         kvm_register_write(vcpu, VCPU_REGS_RSI, regs->rsi);
4991         kvm_register_write(vcpu, VCPU_REGS_RDI, regs->rdi);
4992         kvm_register_write(vcpu, VCPU_REGS_RSP, regs->rsp);
4993         kvm_register_write(vcpu, VCPU_REGS_RBP, regs->rbp);
4994 #ifdef CONFIG_X86_64
4995         kvm_register_write(vcpu, VCPU_REGS_R8, regs->r8);
4996         kvm_register_write(vcpu, VCPU_REGS_R9, regs->r9);
4997         kvm_register_write(vcpu, VCPU_REGS_R10, regs->r10);
4998         kvm_register_write(vcpu, VCPU_REGS_R11, regs->r11);
4999         kvm_register_write(vcpu, VCPU_REGS_R12, regs->r12);
5000         kvm_register_write(vcpu, VCPU_REGS_R13, regs->r13);
5001         kvm_register_write(vcpu, VCPU_REGS_R14, regs->r14);
5002         kvm_register_write(vcpu, VCPU_REGS_R15, regs->r15);
5003 #endif
5004
5005         kvm_rip_write(vcpu, regs->rip);
5006         kvm_set_rflags(vcpu, regs->rflags);
5007
5008         vcpu->arch.exception.pending = false;
5009
5010         return 0;
5011 }
5012
5013 void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
5014 {
5015         struct kvm_segment cs;
5016
5017         kvm_get_segment(vcpu, &cs, VCPU_SREG_CS);
5018         *db = cs.db;
5019         *l = cs.l;
5020 }
5021 EXPORT_SYMBOL_GPL(kvm_get_cs_db_l_bits);
5022
5023 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
5024                                   struct kvm_sregs *sregs)
5025 {
5026         struct desc_ptr dt;
5027
5028         kvm_get_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
5029         kvm_get_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
5030         kvm_get_segment(vcpu, &sregs->es, VCPU_SREG_ES);
5031         kvm_get_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
5032         kvm_get_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
5033         kvm_get_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
5034
5035         kvm_get_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
5036         kvm_get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
5037
5038         kvm_x86_ops->get_idt(vcpu, &dt);
5039         sregs->idt.limit = dt.size;
5040         sregs->idt.base = dt.address;
5041         kvm_x86_ops->get_gdt(vcpu, &dt);
5042         sregs->gdt.limit = dt.size;
5043         sregs->gdt.base = dt.address;
5044
5045         sregs->cr0 = kvm_read_cr0(vcpu);
5046         sregs->cr2 = vcpu->arch.cr2;
5047         sregs->cr3 = vcpu->arch.cr3;
5048         sregs->cr4 = kvm_read_cr4(vcpu);
5049         sregs->cr8 = kvm_get_cr8(vcpu);
5050         sregs->efer = vcpu->arch.efer;
5051         sregs->apic_base = kvm_get_apic_base(vcpu);
5052
5053         memset(sregs->interrupt_bitmap, 0, sizeof sregs->interrupt_bitmap);
5054
5055         if (vcpu->arch.interrupt.pending && !vcpu->arch.interrupt.soft)
5056                 set_bit(vcpu->arch.interrupt.nr,
5057                         (unsigned long *)sregs->interrupt_bitmap);
5058
5059         return 0;
5060 }
5061
5062 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
5063                                     struct kvm_mp_state *mp_state)
5064 {
5065         mp_state->mp_state = vcpu->arch.mp_state;
5066         return 0;
5067 }
5068
5069 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
5070                                     struct kvm_mp_state *mp_state)
5071 {
5072         vcpu->arch.mp_state = mp_state->mp_state;
5073         return 0;
5074 }
5075
5076 int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason,
5077                     bool has_error_code, u32 error_code)
5078 {
5079         struct decode_cache *c = &vcpu->arch.emulate_ctxt.decode;
5080         int ret;
5081
5082         init_emulate_ctxt(vcpu);
5083
5084         ret = emulator_task_switch(&vcpu->arch.emulate_ctxt,
5085                                    tss_selector, reason, has_error_code,
5086                                    error_code);
5087
5088         if (ret)
5089                 return EMULATE_FAIL;
5090
5091         memcpy(vcpu->arch.regs, c->regs, sizeof c->regs);
5092         kvm_rip_write(vcpu, vcpu->arch.emulate_ctxt.eip);
5093         kvm_x86_ops->set_rflags(vcpu, vcpu->arch.emulate_ctxt.eflags);
5094         return EMULATE_DONE;
5095 }
5096 EXPORT_SYMBOL_GPL(kvm_task_switch);
5097
5098 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
5099                                   struct kvm_sregs *sregs)
5100 {
5101         int mmu_reset_needed = 0;
5102         int pending_vec, max_bits;
5103         struct desc_ptr dt;
5104
5105         dt.size = sregs->idt.limit;
5106         dt.address = sregs->idt.base;
5107         kvm_x86_ops->set_idt(vcpu, &dt);
5108         dt.size = sregs->gdt.limit;
5109         dt.address = sregs->gdt.base;
5110         kvm_x86_ops->set_gdt(vcpu, &dt);
5111
5112         vcpu->arch.cr2 = sregs->cr2;
5113         mmu_reset_needed |= vcpu->arch.cr3 != sregs->cr3;
5114         vcpu->arch.cr3 = sregs->cr3;
5115
5116         kvm_set_cr8(vcpu, sregs->cr8);
5117
5118         mmu_reset_needed |= vcpu->arch.efer != sregs->efer;
5119         kvm_x86_ops->set_efer(vcpu, sregs->efer);
5120         kvm_set_apic_base(vcpu, sregs->apic_base);
5121
5122         mmu_reset_needed |= kvm_read_cr0(vcpu) != sregs->cr0;
5123         kvm_x86_ops->set_cr0(vcpu, sregs->cr0);
5124         vcpu->arch.cr0 = sregs->cr0;
5125
5126         mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4;
5127         kvm_x86_ops->set_cr4(vcpu, sregs->cr4);
5128         if (!is_long_mode(vcpu) && is_pae(vcpu)) {
5129                 load_pdptrs(vcpu, vcpu->arch.cr3);
5130                 mmu_reset_needed = 1;
5131         }
5132
5133         if (mmu_reset_needed)
5134                 kvm_mmu_reset_context(vcpu);
5135
5136         max_bits = (sizeof sregs->interrupt_bitmap) << 3;
5137         pending_vec = find_first_bit(
5138                 (const unsigned long *)sregs->interrupt_bitmap, max_bits);
5139         if (pending_vec < max_bits) {
5140                 kvm_queue_interrupt(vcpu, pending_vec, false);
5141                 pr_debug("Set back pending irq %d\n", pending_vec);
5142                 if (irqchip_in_kernel(vcpu->kvm))
5143                         kvm_pic_clear_isr_ack(vcpu->kvm);
5144         }
5145
5146         kvm_set_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
5147         kvm_set_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
5148         kvm_set_segment(vcpu, &sregs->es, VCPU_SREG_ES);
5149         kvm_set_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
5150         kvm_set_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
5151         kvm_set_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
5152
5153         kvm_set_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
5154         kvm_set_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
5155
5156         update_cr8_intercept(vcpu);
5157
5158         /* Older userspace won't unhalt the vcpu on reset. */
5159         if (kvm_vcpu_is_bsp(vcpu) && kvm_rip_read(vcpu) == 0xfff0 &&
5160             sregs->cs.selector == 0xf000 && sregs->cs.base == 0xffff0000 &&
5161             !is_protmode(vcpu))
5162                 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
5163
5164         return 0;
5165 }
5166
5167 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
5168                                         struct kvm_guest_debug *dbg)
5169 {
5170         unsigned long rflags;
5171         int i, r;
5172
5173         if (dbg->control & (KVM_GUESTDBG_INJECT_DB | KVM_GUESTDBG_INJECT_BP)) {
5174                 r = -EBUSY;
5175                 if (vcpu->arch.exception.pending)
5176                         goto out;
5177                 if (dbg->control & KVM_GUESTDBG_INJECT_DB)
5178                         kvm_queue_exception(vcpu, DB_VECTOR);
5179                 else
5180                         kvm_queue_exception(vcpu, BP_VECTOR);
5181         }
5182
5183         /*
5184          * Read rflags as long as potentially injected trace flags are still
5185          * filtered out.
5186          */
5187         rflags = kvm_get_rflags(vcpu);
5188
5189         vcpu->guest_debug = dbg->control;
5190         if (!(vcpu->guest_debug & KVM_GUESTDBG_ENABLE))
5191                 vcpu->guest_debug = 0;
5192
5193         if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) {
5194                 for (i = 0; i < KVM_NR_DB_REGS; ++i)
5195                         vcpu->arch.eff_db[i] = dbg->arch.debugreg[i];
5196                 vcpu->arch.switch_db_regs =
5197                         (dbg->arch.debugreg[7] & DR7_BP_EN_MASK);
5198         } else {
5199                 for (i = 0; i < KVM_NR_DB_REGS; i++)
5200                         vcpu->arch.eff_db[i] = vcpu->arch.db[i];
5201                 vcpu->arch.switch_db_regs = (vcpu->arch.dr7 & DR7_BP_EN_MASK);
5202         }
5203
5204         if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
5205                 vcpu->arch.singlestep_rip = kvm_rip_read(vcpu) +
5206                         get_segment_base(vcpu, VCPU_SREG_CS);
5207
5208         /*
5209          * Trigger an rflags update that will inject or remove the trace
5210          * flags.
5211          */
5212         kvm_set_rflags(vcpu, rflags);
5213
5214         kvm_x86_ops->set_guest_debug(vcpu, dbg);
5215
5216         r = 0;
5217
5218 out:
5219
5220         return r;
5221 }
5222
5223 /*
5224  * Translate a guest virtual address to a guest physical address.
5225  */
5226 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
5227                                     struct kvm_translation *tr)
5228 {
5229         unsigned long vaddr = tr->linear_address;
5230         gpa_t gpa;
5231         int idx;
5232
5233         idx = srcu_read_lock(&vcpu->kvm->srcu);
5234         gpa = kvm_mmu_gva_to_gpa_system(vcpu, vaddr, NULL);
5235         srcu_read_unlock(&vcpu->kvm->srcu, idx);
5236         tr->physical_address = gpa;
5237         tr->valid = gpa != UNMAPPED_GVA;
5238         tr->writeable = 1;
5239         tr->usermode = 0;
5240
5241         return 0;
5242 }
5243
5244 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
5245 {
5246         struct i387_fxsave_struct *fxsave =
5247                         &vcpu->arch.guest_fpu.state->fxsave;
5248
5249         memcpy(fpu->fpr, fxsave->st_space, 128);
5250         fpu->fcw = fxsave->cwd;
5251         fpu->fsw = fxsave->swd;
5252         fpu->ftwx = fxsave->twd;
5253         fpu->last_opcode = fxsave->fop;
5254         fpu->last_ip = fxsave->rip;
5255         fpu->last_dp = fxsave->rdp;
5256         memcpy(fpu->xmm, fxsave->xmm_space, sizeof fxsave->xmm_space);
5257
5258         return 0;
5259 }
5260
5261 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
5262 {
5263         struct i387_fxsave_struct *fxsave =
5264                         &vcpu->arch.guest_fpu.state->fxsave;
5265
5266         memcpy(fxsave->st_space, fpu->fpr, 128);
5267         fxsave->cwd = fpu->fcw;
5268         fxsave->swd = fpu->fsw;
5269         fxsave->twd = fpu->ftwx;
5270         fxsave->fop = fpu->last_opcode;
5271         fxsave->rip = fpu->last_ip;
5272         fxsave->rdp = fpu->last_dp;
5273         memcpy(fxsave->xmm_space, fpu->xmm, sizeof fxsave->xmm_space);
5274
5275         return 0;
5276 }
5277
5278 int fx_init(struct kvm_vcpu *vcpu)
5279 {
5280         int err;
5281
5282         err = fpu_alloc(&vcpu->arch.guest_fpu);
5283         if (err)
5284                 return err;
5285
5286         fpu_finit(&vcpu->arch.guest_fpu);
5287
5288         /*
5289          * Ensure guest xcr0 is valid for loading
5290          */
5291         vcpu->arch.xcr0 = XSTATE_FP;
5292
5293         vcpu->arch.cr0 |= X86_CR0_ET;
5294
5295         return 0;
5296 }
5297 EXPORT_SYMBOL_GPL(fx_init);
5298
5299 static void fx_free(struct kvm_vcpu *vcpu)
5300 {
5301         fpu_free(&vcpu->arch.guest_fpu);
5302 }
5303
5304 void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
5305 {
5306         if (vcpu->guest_fpu_loaded)
5307                 return;
5308
5309         /*
5310          * Restore all possible states in the guest,
5311          * and assume host would use all available bits.
5312          * Guest xcr0 would be loaded later.
5313          */
5314         kvm_put_guest_xcr0(vcpu);
5315         vcpu->guest_fpu_loaded = 1;
5316         unlazy_fpu(current);
5317         fpu_restore_checking(&vcpu->arch.guest_fpu);
5318         trace_kvm_fpu(1);
5319 }
5320
5321 void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
5322 {
5323         kvm_put_guest_xcr0(vcpu);
5324
5325         if (!vcpu->guest_fpu_loaded)
5326                 return;
5327
5328         vcpu->guest_fpu_loaded = 0;
5329         fpu_save_init(&vcpu->arch.guest_fpu);
5330         ++vcpu->stat.fpu_reload;
5331         kvm_make_request(KVM_REQ_DEACTIVATE_FPU, vcpu);
5332         trace_kvm_fpu(0);
5333 }
5334
5335 void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
5336 {
5337         if (vcpu->arch.time_page) {
5338                 kvm_release_page_dirty(vcpu->arch.time_page);
5339                 vcpu->arch.time_page = NULL;
5340         }
5341
5342         free_cpumask_var(vcpu->arch.wbinvd_dirty_mask);
5343         fx_free(vcpu);
5344         kvm_x86_ops->vcpu_free(vcpu);
5345 }
5346
5347 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
5348                                                 unsigned int id)
5349 {
5350         return kvm_x86_ops->vcpu_create(kvm, id);
5351 }
5352
5353 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
5354 {
5355         int r;
5356
5357         vcpu->arch.mtrr_state.have_fixed = 1;
5358         vcpu_load(vcpu);
5359         r = kvm_arch_vcpu_reset(vcpu);
5360         if (r == 0)
5361                 r = kvm_mmu_setup(vcpu);
5362         vcpu_put(vcpu);
5363         if (r < 0)
5364                 goto free_vcpu;
5365
5366         return 0;
5367 free_vcpu:
5368         kvm_x86_ops->vcpu_free(vcpu);
5369         return r;
5370 }
5371
5372 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
5373 {
5374         vcpu_load(vcpu);
5375         kvm_mmu_unload(vcpu);
5376         vcpu_put(vcpu);
5377
5378         fx_free(vcpu);
5379         kvm_x86_ops->vcpu_free(vcpu);
5380 }
5381
5382 int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu)
5383 {
5384         vcpu->arch.nmi_pending = false;
5385         vcpu->arch.nmi_injected = false;
5386
5387         vcpu->arch.switch_db_regs = 0;
5388         memset(vcpu->arch.db, 0, sizeof(vcpu->arch.db));
5389         vcpu->arch.dr6 = DR6_FIXED_1;
5390         vcpu->arch.dr7 = DR7_FIXED_1;
5391
5392         return kvm_x86_ops->vcpu_reset(vcpu);
5393 }
5394
5395 int kvm_arch_hardware_enable(void *garbage)
5396 {
5397         /*
5398          * Since this may be called from a hotplug notifcation,
5399          * we can't get the CPU frequency directly.
5400          */
5401         if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) {
5402                 int cpu = raw_smp_processor_id();
5403                 per_cpu(cpu_tsc_khz, cpu) = 0;
5404         }
5405
5406         kvm_shared_msr_cpu_online();
5407
5408         return kvm_x86_ops->hardware_enable(garbage);
5409 }
5410
5411 void kvm_arch_hardware_disable(void *garbage)
5412 {
5413         kvm_x86_ops->hardware_disable(garbage);
5414         drop_user_return_notifiers(garbage);
5415 }
5416
5417 int kvm_arch_hardware_setup(void)
5418 {
5419         return kvm_x86_ops->hardware_setup();
5420 }
5421
5422 void kvm_arch_hardware_unsetup(void)
5423 {
5424         kvm_x86_ops->hardware_unsetup();
5425 }
5426
5427 void kvm_arch_check_processor_compat(void *rtn)
5428 {
5429         kvm_x86_ops->check_processor_compatibility(rtn);
5430 }
5431
5432 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
5433 {
5434         struct page *page;
5435         struct kvm *kvm;
5436         int r;
5437
5438         BUG_ON(vcpu->kvm == NULL);
5439         kvm = vcpu->kvm;
5440
5441         vcpu->arch.emulate_ctxt.ops = &emulate_ops;
5442         vcpu->arch.mmu.root_hpa = INVALID_PAGE;
5443         if (!irqchip_in_kernel(kvm) || kvm_vcpu_is_bsp(vcpu))
5444                 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
5445         else
5446                 vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED;
5447
5448         page = alloc_page(GFP_KERNEL | __GFP_ZERO);
5449         if (!page) {
5450                 r = -ENOMEM;
5451                 goto fail;
5452         }
5453         vcpu->arch.pio_data = page_address(page);
5454
5455         r = kvm_mmu_create(vcpu);
5456         if (r < 0)
5457                 goto fail_free_pio_data;
5458
5459         if (irqchip_in_kernel(kvm)) {
5460                 r = kvm_create_lapic(vcpu);
5461                 if (r < 0)
5462                         goto fail_mmu_destroy;
5463         }
5464
5465         vcpu->arch.mce_banks = kzalloc(KVM_MAX_MCE_BANKS * sizeof(u64) * 4,
5466                                        GFP_KERNEL);
5467         if (!vcpu->arch.mce_banks) {
5468                 r = -ENOMEM;
5469                 goto fail_free_lapic;
5470         }
5471         vcpu->arch.mcg_cap = KVM_MAX_MCE_BANKS;
5472
5473         if (!zalloc_cpumask_var(&vcpu->arch.wbinvd_dirty_mask, GFP_KERNEL))
5474                 goto fail_free_mce_banks;
5475
5476         return 0;
5477 fail_free_mce_banks:
5478         kfree(vcpu->arch.mce_banks);
5479 fail_free_lapic:
5480         kvm_free_lapic(vcpu);
5481 fail_mmu_destroy:
5482         kvm_mmu_destroy(vcpu);
5483 fail_free_pio_data:
5484         free_page((unsigned long)vcpu->arch.pio_data);
5485 fail:
5486         return r;
5487 }
5488
5489 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
5490 {
5491         int idx;
5492
5493         kfree(vcpu->arch.mce_banks);
5494         kvm_free_lapic(vcpu);
5495         idx = srcu_read_lock(&vcpu->kvm->srcu);
5496         kvm_mmu_destroy(vcpu);
5497         srcu_read_unlock(&vcpu->kvm->srcu, idx);
5498         free_page((unsigned long)vcpu->arch.pio_data);
5499 }
5500
5501 struct  kvm *kvm_arch_create_vm(void)
5502 {
5503         struct kvm *kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
5504
5505         if (!kvm)
5506                 return ERR_PTR(-ENOMEM);
5507
5508         INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
5509         INIT_LIST_HEAD(&kvm->arch.assigned_dev_head);
5510
5511         /* Reserve bit 0 of irq_sources_bitmap for userspace irq source */
5512         set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap);
5513
5514         spin_lock_init(&kvm->arch.tsc_write_lock);
5515
5516         return kvm;
5517 }
5518
5519 static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu)
5520 {
5521         vcpu_load(vcpu);
5522         kvm_mmu_unload(vcpu);
5523         vcpu_put(vcpu);
5524 }
5525
5526 static void kvm_free_vcpus(struct kvm *kvm)
5527 {
5528         unsigned int i;
5529         struct kvm_vcpu *vcpu;
5530
5531         /*
5532          * Unpin any mmu pages first.
5533          */
5534         kvm_for_each_vcpu(i, vcpu, kvm)
5535                 kvm_unload_vcpu_mmu(vcpu);
5536         kvm_for_each_vcpu(i, vcpu, kvm)
5537                 kvm_arch_vcpu_free(vcpu);
5538
5539         mutex_lock(&kvm->lock);
5540         for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
5541                 kvm->vcpus[i] = NULL;
5542
5543         atomic_set(&kvm->online_vcpus, 0);
5544         mutex_unlock(&kvm->lock);
5545 }
5546
5547 void kvm_arch_sync_events(struct kvm *kvm)
5548 {
5549         kvm_free_all_assigned_devices(kvm);
5550         kvm_free_pit(kvm);
5551 }
5552
5553 void kvm_arch_destroy_vm(struct kvm *kvm)
5554 {
5555         kvm_iommu_unmap_guest(kvm);
5556         kfree(kvm->arch.vpic);
5557         kfree(kvm->arch.vioapic);
5558         kvm_free_vcpus(kvm);
5559         kvm_free_physmem(kvm);
5560         if (kvm->arch.apic_access_page)
5561                 put_page(kvm->arch.apic_access_page);
5562         if (kvm->arch.ept_identity_pagetable)
5563                 put_page(kvm->arch.ept_identity_pagetable);
5564         cleanup_srcu_struct(&kvm->srcu);
5565         kfree(kvm);
5566 }
5567
5568 int kvm_arch_prepare_memory_region(struct kvm *kvm,
5569                                 struct kvm_memory_slot *memslot,
5570                                 struct kvm_memory_slot old,
5571                                 struct kvm_userspace_memory_region *mem,
5572                                 int user_alloc)
5573 {
5574         int npages = memslot->npages;
5575         int map_flags = MAP_PRIVATE | MAP_ANONYMOUS;
5576
5577         /* Prevent internal slot pages from being moved by fork()/COW. */
5578         if (memslot->id >= KVM_MEMORY_SLOTS)
5579                 map_flags = MAP_SHARED | MAP_ANONYMOUS;
5580
5581         /*To keep backward compatibility with older userspace,
5582          *x86 needs to hanlde !user_alloc case.
5583          */
5584         if (!user_alloc) {
5585                 if (npages && !old.rmap) {
5586                         unsigned long userspace_addr;
5587
5588                         down_write(&current->mm->mmap_sem);
5589                         userspace_addr = do_mmap(NULL, 0,
5590                                                  npages * PAGE_SIZE,
5591                                                  PROT_READ | PROT_WRITE,
5592                                                  map_flags,
5593                                                  0);
5594                         up_write(&current->mm->mmap_sem);
5595
5596                         if (IS_ERR((void *)userspace_addr))
5597                                 return PTR_ERR((void *)userspace_addr);
5598
5599                         memslot->userspace_addr = userspace_addr;
5600                 }
5601         }
5602
5603
5604         return 0;
5605 }
5606
5607 void kvm_arch_commit_memory_region(struct kvm *kvm,
5608                                 struct kvm_userspace_memory_region *mem,
5609                                 struct kvm_memory_slot old,
5610                                 int user_alloc)
5611 {
5612
5613         int npages = mem->memory_size >> PAGE_SHIFT;
5614
5615         if (!user_alloc && !old.user_alloc && old.rmap && !npages) {
5616                 int ret;
5617
5618                 down_write(&current->mm->mmap_sem);
5619                 ret = do_munmap(current->mm, old.userspace_addr,
5620                                 old.npages * PAGE_SIZE);
5621                 up_write(&current->mm->mmap_sem);
5622                 if (ret < 0)
5623                         printk(KERN_WARNING
5624                                "kvm_vm_ioctl_set_memory_region: "
5625                                "failed to munmap memory\n");
5626         }
5627
5628         spin_lock(&kvm->mmu_lock);
5629         if (!kvm->arch.n_requested_mmu_pages) {
5630                 unsigned int nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm);
5631                 kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages);
5632         }
5633
5634         kvm_mmu_slot_remove_write_access(kvm, mem->slot);
5635         spin_unlock(&kvm->mmu_lock);
5636 }
5637
5638 void kvm_arch_flush_shadow(struct kvm *kvm)
5639 {
5640         kvm_mmu_zap_all(kvm);
5641         kvm_reload_remote_mmus(kvm);
5642 }
5643
5644 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
5645 {
5646         return vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE
5647                 || vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED
5648                 || vcpu->arch.nmi_pending ||
5649                 (kvm_arch_interrupt_allowed(vcpu) &&
5650                  kvm_cpu_has_interrupt(vcpu));
5651 }
5652
5653 void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
5654 {
5655         int me;
5656         int cpu = vcpu->cpu;
5657
5658         if (waitqueue_active(&vcpu->wq)) {
5659                 wake_up_interruptible(&vcpu->wq);
5660                 ++vcpu->stat.halt_wakeup;
5661         }
5662
5663         me = get_cpu();
5664         if (cpu != me && (unsigned)cpu < nr_cpu_ids && cpu_online(cpu))
5665                 if (atomic_xchg(&vcpu->guest_mode, 0))
5666                         smp_send_reschedule(cpu);
5667         put_cpu();
5668 }
5669
5670 int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu)
5671 {
5672         return kvm_x86_ops->interrupt_allowed(vcpu);
5673 }
5674
5675 bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip)
5676 {
5677         unsigned long current_rip = kvm_rip_read(vcpu) +
5678                 get_segment_base(vcpu, VCPU_SREG_CS);
5679
5680         return current_rip == linear_rip;
5681 }
5682 EXPORT_SYMBOL_GPL(kvm_is_linear_rip);
5683
5684 unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu)
5685 {
5686         unsigned long rflags;
5687
5688         rflags = kvm_x86_ops->get_rflags(vcpu);
5689         if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
5690                 rflags &= ~X86_EFLAGS_TF;
5691         return rflags;
5692 }
5693 EXPORT_SYMBOL_GPL(kvm_get_rflags);
5694
5695 void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
5696 {
5697         if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP &&
5698             kvm_is_linear_rip(vcpu, vcpu->arch.singlestep_rip))
5699                 rflags |= X86_EFLAGS_TF;
5700         kvm_x86_ops->set_rflags(vcpu, rflags);
5701 }
5702 EXPORT_SYMBOL_GPL(kvm_set_rflags);
5703
5704 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_exit);
5705 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_inj_virq);
5706 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_page_fault);
5707 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_msr);
5708 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_cr);
5709 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmrun);
5710 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmexit);
5711 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmexit_inject);
5712 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_intr_vmexit);
5713 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_invlpga);
5714 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_skinit);
5715 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_intercepts);