80c6d6835805ec734b9d6eb1c378033672811418
[pandora-kernel.git] / arch / x86 / kvm / x86.c
1 /*
2  * Kernel-based Virtual Machine driver for Linux
3  *
4  * derived from drivers/kvm/kvm_main.c
5  *
6  * Copyright (C) 2006 Qumranet, Inc.
7  * Copyright (C) 2008 Qumranet, Inc.
8  * Copyright IBM Corporation, 2008
9  * Copyright 2010 Red Hat, Inc. and/or its affiliates.
10  *
11  * Authors:
12  *   Avi Kivity   <avi@qumranet.com>
13  *   Yaniv Kamay  <yaniv@qumranet.com>
14  *   Amit Shah    <amit.shah@qumranet.com>
15  *   Ben-Ami Yassour <benami@il.ibm.com>
16  *
17  * This work is licensed under the terms of the GNU GPL, version 2.  See
18  * the COPYING file in the top-level directory.
19  *
20  */
21
22 #include <linux/kvm_host.h>
23 #include "irq.h"
24 #include "mmu.h"
25 #include "i8254.h"
26 #include "tss.h"
27 #include "kvm_cache_regs.h"
28 #include "x86.h"
29
30 #include <linux/clocksource.h>
31 #include <linux/interrupt.h>
32 #include <linux/kvm.h>
33 #include <linux/fs.h>
34 #include <linux/vmalloc.h>
35 #include <linux/module.h>
36 #include <linux/mman.h>
37 #include <linux/highmem.h>
38 #include <linux/iommu.h>
39 #include <linux/intel-iommu.h>
40 #include <linux/cpufreq.h>
41 #include <linux/user-return-notifier.h>
42 #include <linux/srcu.h>
43 #include <linux/slab.h>
44 #include <linux/perf_event.h>
45 #include <linux/uaccess.h>
46 #include <linux/hash.h>
47 #include <linux/pci.h>
48 #include <trace/events/kvm.h>
49
50 #define CREATE_TRACE_POINTS
51 #include "trace.h"
52
53 #include <asm/debugreg.h>
54 #include <asm/msr.h>
55 #include <asm/desc.h>
56 #include <asm/mtrr.h>
57 #include <asm/mce.h>
58 #include <asm/i387.h>
59 #include <asm/xcr.h>
60 #include <asm/pvclock.h>
61 #include <asm/div64.h>
62
63 #define MAX_IO_MSRS 256
64 #define KVM_MAX_MCE_BANKS 32
65 #define KVM_MCE_CAP_SUPPORTED (MCG_CTL_P | MCG_SER_P)
66
67 #define emul_to_vcpu(ctxt) \
68         container_of(ctxt, struct kvm_vcpu, arch.emulate_ctxt)
69
70 /* EFER defaults:
71  * - enable syscall per default because its emulated by KVM
72  * - enable LME and LMA per default on 64 bit KVM
73  */
74 #ifdef CONFIG_X86_64
75 static
76 u64 __read_mostly efer_reserved_bits = ~((u64)(EFER_SCE | EFER_LME | EFER_LMA));
77 #else
78 static u64 __read_mostly efer_reserved_bits = ~((u64)EFER_SCE);
79 #endif
80
81 #define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
82 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
83
84 static void update_cr8_intercept(struct kvm_vcpu *vcpu);
85 static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
86                                     struct kvm_cpuid_entry2 __user *entries);
87 static void process_nmi(struct kvm_vcpu *vcpu);
88
89 struct kvm_x86_ops *kvm_x86_ops;
90 EXPORT_SYMBOL_GPL(kvm_x86_ops);
91
92 int ignore_msrs = 0;
93 module_param_named(ignore_msrs, ignore_msrs, bool, S_IRUGO | S_IWUSR);
94
95 unsigned int min_timer_period_us = 500;
96 module_param(min_timer_period_us, uint, S_IRUGO | S_IWUSR);
97
98 bool kvm_has_tsc_control;
99 EXPORT_SYMBOL_GPL(kvm_has_tsc_control);
100 u32  kvm_max_guest_tsc_khz;
101 EXPORT_SYMBOL_GPL(kvm_max_guest_tsc_khz);
102
103 #define KVM_NR_SHARED_MSRS 16
104
105 struct kvm_shared_msrs_global {
106         int nr;
107         u32 msrs[KVM_NR_SHARED_MSRS];
108 };
109
110 struct kvm_shared_msrs {
111         struct user_return_notifier urn;
112         bool registered;
113         struct kvm_shared_msr_values {
114                 u64 host;
115                 u64 curr;
116         } values[KVM_NR_SHARED_MSRS];
117 };
118
119 static struct kvm_shared_msrs_global __read_mostly shared_msrs_global;
120 static DEFINE_PER_CPU(struct kvm_shared_msrs, shared_msrs);
121
122 struct kvm_stats_debugfs_item debugfs_entries[] = {
123         { "pf_fixed", VCPU_STAT(pf_fixed) },
124         { "pf_guest", VCPU_STAT(pf_guest) },
125         { "tlb_flush", VCPU_STAT(tlb_flush) },
126         { "invlpg", VCPU_STAT(invlpg) },
127         { "exits", VCPU_STAT(exits) },
128         { "io_exits", VCPU_STAT(io_exits) },
129         { "mmio_exits", VCPU_STAT(mmio_exits) },
130         { "signal_exits", VCPU_STAT(signal_exits) },
131         { "irq_window", VCPU_STAT(irq_window_exits) },
132         { "nmi_window", VCPU_STAT(nmi_window_exits) },
133         { "halt_exits", VCPU_STAT(halt_exits) },
134         { "halt_wakeup", VCPU_STAT(halt_wakeup) },
135         { "hypercalls", VCPU_STAT(hypercalls) },
136         { "request_irq", VCPU_STAT(request_irq_exits) },
137         { "irq_exits", VCPU_STAT(irq_exits) },
138         { "host_state_reload", VCPU_STAT(host_state_reload) },
139         { "efer_reload", VCPU_STAT(efer_reload) },
140         { "fpu_reload", VCPU_STAT(fpu_reload) },
141         { "insn_emulation", VCPU_STAT(insn_emulation) },
142         { "insn_emulation_fail", VCPU_STAT(insn_emulation_fail) },
143         { "irq_injections", VCPU_STAT(irq_injections) },
144         { "nmi_injections", VCPU_STAT(nmi_injections) },
145         { "mmu_shadow_zapped", VM_STAT(mmu_shadow_zapped) },
146         { "mmu_pte_write", VM_STAT(mmu_pte_write) },
147         { "mmu_pte_updated", VM_STAT(mmu_pte_updated) },
148         { "mmu_pde_zapped", VM_STAT(mmu_pde_zapped) },
149         { "mmu_flooded", VM_STAT(mmu_flooded) },
150         { "mmu_recycled", VM_STAT(mmu_recycled) },
151         { "mmu_cache_miss", VM_STAT(mmu_cache_miss) },
152         { "mmu_unsync", VM_STAT(mmu_unsync) },
153         { "remote_tlb_flush", VM_STAT(remote_tlb_flush) },
154         { "largepages", VM_STAT(lpages) },
155         { NULL }
156 };
157
158 u64 __read_mostly host_xcr0;
159
160 int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt);
161
162 static inline void kvm_async_pf_hash_reset(struct kvm_vcpu *vcpu)
163 {
164         int i;
165         for (i = 0; i < roundup_pow_of_two(ASYNC_PF_PER_VCPU); i++)
166                 vcpu->arch.apf.gfns[i] = ~0;
167 }
168
169 static void kvm_on_user_return(struct user_return_notifier *urn)
170 {
171         unsigned slot;
172         struct kvm_shared_msrs *locals
173                 = container_of(urn, struct kvm_shared_msrs, urn);
174         struct kvm_shared_msr_values *values;
175
176         for (slot = 0; slot < shared_msrs_global.nr; ++slot) {
177                 values = &locals->values[slot];
178                 if (values->host != values->curr) {
179                         wrmsrl(shared_msrs_global.msrs[slot], values->host);
180                         values->curr = values->host;
181                 }
182         }
183         locals->registered = false;
184         user_return_notifier_unregister(urn);
185 }
186
187 static void shared_msr_update(unsigned slot, u32 msr)
188 {
189         struct kvm_shared_msrs *smsr;
190         u64 value;
191
192         smsr = &__get_cpu_var(shared_msrs);
193         /* only read, and nobody should modify it at this time,
194          * so don't need lock */
195         if (slot >= shared_msrs_global.nr) {
196                 printk(KERN_ERR "kvm: invalid MSR slot!");
197                 return;
198         }
199         rdmsrl_safe(msr, &value);
200         smsr->values[slot].host = value;
201         smsr->values[slot].curr = value;
202 }
203
204 void kvm_define_shared_msr(unsigned slot, u32 msr)
205 {
206         if (slot >= shared_msrs_global.nr)
207                 shared_msrs_global.nr = slot + 1;
208         shared_msrs_global.msrs[slot] = msr;
209         /* we need ensured the shared_msr_global have been updated */
210         smp_wmb();
211 }
212 EXPORT_SYMBOL_GPL(kvm_define_shared_msr);
213
214 static void kvm_shared_msr_cpu_online(void)
215 {
216         unsigned i;
217
218         for (i = 0; i < shared_msrs_global.nr; ++i)
219                 shared_msr_update(i, shared_msrs_global.msrs[i]);
220 }
221
222 void kvm_set_shared_msr(unsigned slot, u64 value, u64 mask)
223 {
224         struct kvm_shared_msrs *smsr = &__get_cpu_var(shared_msrs);
225
226         if (((value ^ smsr->values[slot].curr) & mask) == 0)
227                 return;
228         smsr->values[slot].curr = value;
229         wrmsrl(shared_msrs_global.msrs[slot], value);
230         if (!smsr->registered) {
231                 smsr->urn.on_user_return = kvm_on_user_return;
232                 user_return_notifier_register(&smsr->urn);
233                 smsr->registered = true;
234         }
235 }
236 EXPORT_SYMBOL_GPL(kvm_set_shared_msr);
237
238 static void drop_user_return_notifiers(void *ignore)
239 {
240         struct kvm_shared_msrs *smsr = &__get_cpu_var(shared_msrs);
241
242         if (smsr->registered)
243                 kvm_on_user_return(&smsr->urn);
244 }
245
246 u64 kvm_get_apic_base(struct kvm_vcpu *vcpu)
247 {
248         if (irqchip_in_kernel(vcpu->kvm))
249                 return vcpu->arch.apic_base;
250         else
251                 return vcpu->arch.apic_base;
252 }
253 EXPORT_SYMBOL_GPL(kvm_get_apic_base);
254
255 void kvm_set_apic_base(struct kvm_vcpu *vcpu, u64 data)
256 {
257         /* TODO: reserve bits check */
258         if (irqchip_in_kernel(vcpu->kvm))
259                 kvm_lapic_set_base(vcpu, data);
260         else
261                 vcpu->arch.apic_base = data;
262 }
263 EXPORT_SYMBOL_GPL(kvm_set_apic_base);
264
265 #define EXCPT_BENIGN            0
266 #define EXCPT_CONTRIBUTORY      1
267 #define EXCPT_PF                2
268
269 static int exception_class(int vector)
270 {
271         switch (vector) {
272         case PF_VECTOR:
273                 return EXCPT_PF;
274         case DE_VECTOR:
275         case TS_VECTOR:
276         case NP_VECTOR:
277         case SS_VECTOR:
278         case GP_VECTOR:
279                 return EXCPT_CONTRIBUTORY;
280         default:
281                 break;
282         }
283         return EXCPT_BENIGN;
284 }
285
286 static void kvm_multiple_exception(struct kvm_vcpu *vcpu,
287                 unsigned nr, bool has_error, u32 error_code,
288                 bool reinject)
289 {
290         u32 prev_nr;
291         int class1, class2;
292
293         kvm_make_request(KVM_REQ_EVENT, vcpu);
294
295         if (!vcpu->arch.exception.pending) {
296         queue:
297                 vcpu->arch.exception.pending = true;
298                 vcpu->arch.exception.has_error_code = has_error;
299                 vcpu->arch.exception.nr = nr;
300                 vcpu->arch.exception.error_code = error_code;
301                 vcpu->arch.exception.reinject = reinject;
302                 return;
303         }
304
305         /* to check exception */
306         prev_nr = vcpu->arch.exception.nr;
307         if (prev_nr == DF_VECTOR) {
308                 /* triple fault -> shutdown */
309                 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
310                 return;
311         }
312         class1 = exception_class(prev_nr);
313         class2 = exception_class(nr);
314         if ((class1 == EXCPT_CONTRIBUTORY && class2 == EXCPT_CONTRIBUTORY)
315                 || (class1 == EXCPT_PF && class2 != EXCPT_BENIGN)) {
316                 /* generate double fault per SDM Table 5-5 */
317                 vcpu->arch.exception.pending = true;
318                 vcpu->arch.exception.has_error_code = true;
319                 vcpu->arch.exception.nr = DF_VECTOR;
320                 vcpu->arch.exception.error_code = 0;
321         } else
322                 /* replace previous exception with a new one in a hope
323                    that instruction re-execution will regenerate lost
324                    exception */
325                 goto queue;
326 }
327
328 void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr)
329 {
330         kvm_multiple_exception(vcpu, nr, false, 0, false);
331 }
332 EXPORT_SYMBOL_GPL(kvm_queue_exception);
333
334 void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr)
335 {
336         kvm_multiple_exception(vcpu, nr, false, 0, true);
337 }
338 EXPORT_SYMBOL_GPL(kvm_requeue_exception);
339
340 void kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err)
341 {
342         if (err)
343                 kvm_inject_gp(vcpu, 0);
344         else
345                 kvm_x86_ops->skip_emulated_instruction(vcpu);
346 }
347 EXPORT_SYMBOL_GPL(kvm_complete_insn_gp);
348
349 void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault)
350 {
351         ++vcpu->stat.pf_guest;
352         vcpu->arch.cr2 = fault->address;
353         kvm_queue_exception_e(vcpu, PF_VECTOR, fault->error_code);
354 }
355 EXPORT_SYMBOL_GPL(kvm_inject_page_fault);
356
357 void kvm_propagate_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault)
358 {
359         if (mmu_is_nested(vcpu) && !fault->nested_page_fault)
360                 vcpu->arch.nested_mmu.inject_page_fault(vcpu, fault);
361         else
362                 vcpu->arch.mmu.inject_page_fault(vcpu, fault);
363 }
364
365 void kvm_inject_nmi(struct kvm_vcpu *vcpu)
366 {
367         atomic_inc(&vcpu->arch.nmi_queued);
368         kvm_make_request(KVM_REQ_NMI, vcpu);
369 }
370 EXPORT_SYMBOL_GPL(kvm_inject_nmi);
371
372 void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
373 {
374         kvm_multiple_exception(vcpu, nr, true, error_code, false);
375 }
376 EXPORT_SYMBOL_GPL(kvm_queue_exception_e);
377
378 void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
379 {
380         kvm_multiple_exception(vcpu, nr, true, error_code, true);
381 }
382 EXPORT_SYMBOL_GPL(kvm_requeue_exception_e);
383
384 /*
385  * Checks if cpl <= required_cpl; if true, return true.  Otherwise queue
386  * a #GP and return false.
387  */
388 bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl)
389 {
390         if (kvm_x86_ops->get_cpl(vcpu) <= required_cpl)
391                 return true;
392         kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
393         return false;
394 }
395 EXPORT_SYMBOL_GPL(kvm_require_cpl);
396
397 /*
398  * This function will be used to read from the physical memory of the currently
399  * running guest. The difference to kvm_read_guest_page is that this function
400  * can read from guest physical or from the guest's guest physical memory.
401  */
402 int kvm_read_guest_page_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
403                             gfn_t ngfn, void *data, int offset, int len,
404                             u32 access)
405 {
406         gfn_t real_gfn;
407         gpa_t ngpa;
408
409         ngpa     = gfn_to_gpa(ngfn);
410         real_gfn = mmu->translate_gpa(vcpu, ngpa, access);
411         if (real_gfn == UNMAPPED_GVA)
412                 return -EFAULT;
413
414         real_gfn = gpa_to_gfn(real_gfn);
415
416         return kvm_read_guest_page(vcpu->kvm, real_gfn, data, offset, len);
417 }
418 EXPORT_SYMBOL_GPL(kvm_read_guest_page_mmu);
419
420 int kvm_read_nested_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn,
421                                void *data, int offset, int len, u32 access)
422 {
423         return kvm_read_guest_page_mmu(vcpu, vcpu->arch.walk_mmu, gfn,
424                                        data, offset, len, access);
425 }
426
427 /*
428  * Load the pae pdptrs.  Return true is they are all valid.
429  */
430 int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3)
431 {
432         gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT;
433         unsigned offset = ((cr3 & (PAGE_SIZE-1)) >> 5) << 2;
434         int i;
435         int ret;
436         u64 pdpte[ARRAY_SIZE(mmu->pdptrs)];
437
438         ret = kvm_read_guest_page_mmu(vcpu, mmu, pdpt_gfn, pdpte,
439                                       offset * sizeof(u64), sizeof(pdpte),
440                                       PFERR_USER_MASK|PFERR_WRITE_MASK);
441         if (ret < 0) {
442                 ret = 0;
443                 goto out;
444         }
445         for (i = 0; i < ARRAY_SIZE(pdpte); ++i) {
446                 if (is_present_gpte(pdpte[i]) &&
447                     (pdpte[i] & vcpu->arch.mmu.rsvd_bits_mask[0][2])) {
448                         ret = 0;
449                         goto out;
450                 }
451         }
452         ret = 1;
453
454         memcpy(mmu->pdptrs, pdpte, sizeof(mmu->pdptrs));
455         __set_bit(VCPU_EXREG_PDPTR,
456                   (unsigned long *)&vcpu->arch.regs_avail);
457         __set_bit(VCPU_EXREG_PDPTR,
458                   (unsigned long *)&vcpu->arch.regs_dirty);
459 out:
460
461         return ret;
462 }
463 EXPORT_SYMBOL_GPL(load_pdptrs);
464
465 static bool pdptrs_changed(struct kvm_vcpu *vcpu)
466 {
467         u64 pdpte[ARRAY_SIZE(vcpu->arch.walk_mmu->pdptrs)];
468         bool changed = true;
469         int offset;
470         gfn_t gfn;
471         int r;
472
473         if (is_long_mode(vcpu) || !is_pae(vcpu))
474                 return false;
475
476         if (!test_bit(VCPU_EXREG_PDPTR,
477                       (unsigned long *)&vcpu->arch.regs_avail))
478                 return true;
479
480         gfn = (kvm_read_cr3(vcpu) & ~31u) >> PAGE_SHIFT;
481         offset = (kvm_read_cr3(vcpu) & ~31u) & (PAGE_SIZE - 1);
482         r = kvm_read_nested_guest_page(vcpu, gfn, pdpte, offset, sizeof(pdpte),
483                                        PFERR_USER_MASK | PFERR_WRITE_MASK);
484         if (r < 0)
485                 goto out;
486         changed = memcmp(pdpte, vcpu->arch.walk_mmu->pdptrs, sizeof(pdpte)) != 0;
487 out:
488
489         return changed;
490 }
491
492 int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
493 {
494         unsigned long old_cr0 = kvm_read_cr0(vcpu);
495         unsigned long update_bits = X86_CR0_PG | X86_CR0_WP |
496                                     X86_CR0_CD | X86_CR0_NW;
497
498         cr0 |= X86_CR0_ET;
499
500 #ifdef CONFIG_X86_64
501         if (cr0 & 0xffffffff00000000UL)
502                 return 1;
503 #endif
504
505         cr0 &= ~CR0_RESERVED_BITS;
506
507         if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD))
508                 return 1;
509
510         if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE))
511                 return 1;
512
513         if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
514 #ifdef CONFIG_X86_64
515                 if ((vcpu->arch.efer & EFER_LME)) {
516                         int cs_db, cs_l;
517
518                         if (!is_pae(vcpu))
519                                 return 1;
520                         kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
521                         if (cs_l)
522                                 return 1;
523                 } else
524 #endif
525                 if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.walk_mmu,
526                                                  kvm_read_cr3(vcpu)))
527                         return 1;
528         }
529
530         kvm_x86_ops->set_cr0(vcpu, cr0);
531
532         if ((cr0 ^ old_cr0) & X86_CR0_PG) {
533                 kvm_clear_async_pf_completion_queue(vcpu);
534                 kvm_async_pf_hash_reset(vcpu);
535         }
536
537         if ((cr0 ^ old_cr0) & update_bits)
538                 kvm_mmu_reset_context(vcpu);
539         return 0;
540 }
541 EXPORT_SYMBOL_GPL(kvm_set_cr0);
542
543 void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
544 {
545         (void)kvm_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~0x0eul) | (msw & 0x0f));
546 }
547 EXPORT_SYMBOL_GPL(kvm_lmsw);
548
549 int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
550 {
551         u64 xcr0;
552
553         /* Only support XCR_XFEATURE_ENABLED_MASK(xcr0) now  */
554         if (index != XCR_XFEATURE_ENABLED_MASK)
555                 return 1;
556         xcr0 = xcr;
557         if (!(xcr0 & XSTATE_FP))
558                 return 1;
559         if ((xcr0 & XSTATE_YMM) && !(xcr0 & XSTATE_SSE))
560                 return 1;
561         if (xcr0 & ~host_xcr0)
562                 return 1;
563         vcpu->arch.xcr0 = xcr0;
564         vcpu->guest_xcr0_loaded = 0;
565         return 0;
566 }
567
568 int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
569 {
570         if (kvm_x86_ops->get_cpl(vcpu) != 0 ||
571             __kvm_set_xcr(vcpu, index, xcr)) {
572                 kvm_inject_gp(vcpu, 0);
573                 return 1;
574         }
575         return 0;
576 }
577 EXPORT_SYMBOL_GPL(kvm_set_xcr);
578
579 static bool guest_cpuid_has_xsave(struct kvm_vcpu *vcpu)
580 {
581         struct kvm_cpuid_entry2 *best;
582
583         if (!static_cpu_has(X86_FEATURE_XSAVE))
584                 return 0;
585
586         best = kvm_find_cpuid_entry(vcpu, 1, 0);
587         return best && (best->ecx & bit(X86_FEATURE_XSAVE));
588 }
589
590 static bool guest_cpuid_has_smep(struct kvm_vcpu *vcpu)
591 {
592         struct kvm_cpuid_entry2 *best;
593
594         best = kvm_find_cpuid_entry(vcpu, 7, 0);
595         return best && (best->ebx & bit(X86_FEATURE_SMEP));
596 }
597
598 static bool guest_cpuid_has_fsgsbase(struct kvm_vcpu *vcpu)
599 {
600         struct kvm_cpuid_entry2 *best;
601
602         best = kvm_find_cpuid_entry(vcpu, 7, 0);
603         return best && (best->ebx & bit(X86_FEATURE_FSGSBASE));
604 }
605
606 static void update_cpuid(struct kvm_vcpu *vcpu)
607 {
608         struct kvm_cpuid_entry2 *best;
609         struct kvm_lapic *apic = vcpu->arch.apic;
610
611         best = kvm_find_cpuid_entry(vcpu, 1, 0);
612         if (!best)
613                 return;
614
615         /* Update OSXSAVE bit */
616         if (cpu_has_xsave && best->function == 0x1) {
617                 best->ecx &= ~(bit(X86_FEATURE_OSXSAVE));
618                 if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE))
619                         best->ecx |= bit(X86_FEATURE_OSXSAVE);
620         }
621
622         if (apic) {
623                 if (best->ecx & bit(X86_FEATURE_TSC_DEADLINE_TIMER))
624                         apic->lapic_timer.timer_mode_mask = 3 << 17;
625                 else
626                         apic->lapic_timer.timer_mode_mask = 1 << 17;
627         }
628 }
629
630 int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
631 {
632         unsigned long old_cr4 = kvm_read_cr4(vcpu);
633         unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE |
634                                    X86_CR4_PAE | X86_CR4_SMEP;
635         if (cr4 & CR4_RESERVED_BITS)
636                 return 1;
637
638         if (!guest_cpuid_has_xsave(vcpu) && (cr4 & X86_CR4_OSXSAVE))
639                 return 1;
640
641         if (!guest_cpuid_has_smep(vcpu) && (cr4 & X86_CR4_SMEP))
642                 return 1;
643
644         if (!guest_cpuid_has_fsgsbase(vcpu) && (cr4 & X86_CR4_RDWRGSFS))
645                 return 1;
646
647         if (is_long_mode(vcpu)) {
648                 if (!(cr4 & X86_CR4_PAE))
649                         return 1;
650         } else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE)
651                    && ((cr4 ^ old_cr4) & pdptr_bits)
652                    && !load_pdptrs(vcpu, vcpu->arch.walk_mmu,
653                                    kvm_read_cr3(vcpu)))
654                 return 1;
655
656         if (kvm_x86_ops->set_cr4(vcpu, cr4))
657                 return 1;
658
659         if ((cr4 ^ old_cr4) & pdptr_bits)
660                 kvm_mmu_reset_context(vcpu);
661
662         if ((cr4 ^ old_cr4) & X86_CR4_OSXSAVE)
663                 update_cpuid(vcpu);
664
665         return 0;
666 }
667 EXPORT_SYMBOL_GPL(kvm_set_cr4);
668
669 int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
670 {
671         if (cr3 == kvm_read_cr3(vcpu) && !pdptrs_changed(vcpu)) {
672                 kvm_mmu_sync_roots(vcpu);
673                 kvm_mmu_flush_tlb(vcpu);
674                 return 0;
675         }
676
677         if (is_long_mode(vcpu)) {
678                 if (cr3 & CR3_L_MODE_RESERVED_BITS)
679                         return 1;
680         } else {
681                 if (is_pae(vcpu)) {
682                         if (cr3 & CR3_PAE_RESERVED_BITS)
683                                 return 1;
684                         if (is_paging(vcpu) &&
685                             !load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3))
686                                 return 1;
687                 }
688                 /*
689                  * We don't check reserved bits in nonpae mode, because
690                  * this isn't enforced, and VMware depends on this.
691                  */
692         }
693
694         /*
695          * Does the new cr3 value map to physical memory? (Note, we
696          * catch an invalid cr3 even in real-mode, because it would
697          * cause trouble later on when we turn on paging anyway.)
698          *
699          * A real CPU would silently accept an invalid cr3 and would
700          * attempt to use it - with largely undefined (and often hard
701          * to debug) behavior on the guest side.
702          */
703         if (unlikely(!gfn_to_memslot(vcpu->kvm, cr3 >> PAGE_SHIFT)))
704                 return 1;
705         vcpu->arch.cr3 = cr3;
706         __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
707         vcpu->arch.mmu.new_cr3(vcpu);
708         return 0;
709 }
710 EXPORT_SYMBOL_GPL(kvm_set_cr3);
711
712 int kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
713 {
714         if (cr8 & CR8_RESERVED_BITS)
715                 return 1;
716         if (irqchip_in_kernel(vcpu->kvm))
717                 kvm_lapic_set_tpr(vcpu, cr8);
718         else
719                 vcpu->arch.cr8 = cr8;
720         return 0;
721 }
722 EXPORT_SYMBOL_GPL(kvm_set_cr8);
723
724 unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu)
725 {
726         if (irqchip_in_kernel(vcpu->kvm))
727                 return kvm_lapic_get_cr8(vcpu);
728         else
729                 return vcpu->arch.cr8;
730 }
731 EXPORT_SYMBOL_GPL(kvm_get_cr8);
732
733 static int __kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val)
734 {
735         switch (dr) {
736         case 0 ... 3:
737                 vcpu->arch.db[dr] = val;
738                 if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP))
739                         vcpu->arch.eff_db[dr] = val;
740                 break;
741         case 4:
742                 if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
743                         return 1; /* #UD */
744                 /* fall through */
745         case 6:
746                 if (val & 0xffffffff00000000ULL)
747                         return -1; /* #GP */
748                 vcpu->arch.dr6 = (val & DR6_VOLATILE) | DR6_FIXED_1;
749                 break;
750         case 5:
751                 if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
752                         return 1; /* #UD */
753                 /* fall through */
754         default: /* 7 */
755                 if (val & 0xffffffff00000000ULL)
756                         return -1; /* #GP */
757                 vcpu->arch.dr7 = (val & DR7_VOLATILE) | DR7_FIXED_1;
758                 if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) {
759                         kvm_x86_ops->set_dr7(vcpu, vcpu->arch.dr7);
760                         vcpu->arch.switch_db_regs = (val & DR7_BP_EN_MASK);
761                 }
762                 break;
763         }
764
765         return 0;
766 }
767
768 int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val)
769 {
770         int res;
771
772         res = __kvm_set_dr(vcpu, dr, val);
773         if (res > 0)
774                 kvm_queue_exception(vcpu, UD_VECTOR);
775         else if (res < 0)
776                 kvm_inject_gp(vcpu, 0);
777
778         return res;
779 }
780 EXPORT_SYMBOL_GPL(kvm_set_dr);
781
782 static int _kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val)
783 {
784         switch (dr) {
785         case 0 ... 3:
786                 *val = vcpu->arch.db[dr];
787                 break;
788         case 4:
789                 if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
790                         return 1;
791                 /* fall through */
792         case 6:
793                 *val = vcpu->arch.dr6;
794                 break;
795         case 5:
796                 if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
797                         return 1;
798                 /* fall through */
799         default: /* 7 */
800                 *val = vcpu->arch.dr7;
801                 break;
802         }
803
804         return 0;
805 }
806
807 int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val)
808 {
809         if (_kvm_get_dr(vcpu, dr, val)) {
810                 kvm_queue_exception(vcpu, UD_VECTOR);
811                 return 1;
812         }
813         return 0;
814 }
815 EXPORT_SYMBOL_GPL(kvm_get_dr);
816
817 /*
818  * List of msr numbers which we expose to userspace through KVM_GET_MSRS
819  * and KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST.
820  *
821  * This list is modified at module load time to reflect the
822  * capabilities of the host cpu. This capabilities test skips MSRs that are
823  * kvm-specific. Those are put in the beginning of the list.
824  */
825
826 #define KVM_SAVE_MSRS_BEGIN     9
827 static u32 msrs_to_save[] = {
828         MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK,
829         MSR_KVM_SYSTEM_TIME_NEW, MSR_KVM_WALL_CLOCK_NEW,
830         HV_X64_MSR_GUEST_OS_ID, HV_X64_MSR_HYPERCALL,
831         HV_X64_MSR_APIC_ASSIST_PAGE, MSR_KVM_ASYNC_PF_EN, MSR_KVM_STEAL_TIME,
832         MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
833         MSR_STAR,
834 #ifdef CONFIG_X86_64
835         MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR,
836 #endif
837         MSR_IA32_TSC, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA
838 };
839
840 static unsigned num_msrs_to_save;
841
842 static u32 emulated_msrs[] = {
843         MSR_IA32_TSCDEADLINE,
844         MSR_IA32_MISC_ENABLE,
845         MSR_IA32_MCG_STATUS,
846         MSR_IA32_MCG_CTL,
847 };
848
849 static int set_efer(struct kvm_vcpu *vcpu, u64 efer)
850 {
851         u64 old_efer = vcpu->arch.efer;
852
853         if (efer & efer_reserved_bits)
854                 return 1;
855
856         if (is_paging(vcpu)
857             && (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME))
858                 return 1;
859
860         if (efer & EFER_FFXSR) {
861                 struct kvm_cpuid_entry2 *feat;
862
863                 feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
864                 if (!feat || !(feat->edx & bit(X86_FEATURE_FXSR_OPT)))
865                         return 1;
866         }
867
868         if (efer & EFER_SVME) {
869                 struct kvm_cpuid_entry2 *feat;
870
871                 feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
872                 if (!feat || !(feat->ecx & bit(X86_FEATURE_SVM)))
873                         return 1;
874         }
875
876         efer &= ~EFER_LMA;
877         efer |= vcpu->arch.efer & EFER_LMA;
878
879         kvm_x86_ops->set_efer(vcpu, efer);
880
881         vcpu->arch.mmu.base_role.nxe = (efer & EFER_NX) && !tdp_enabled;
882
883         /* Update reserved bits */
884         if ((efer ^ old_efer) & EFER_NX)
885                 kvm_mmu_reset_context(vcpu);
886
887         return 0;
888 }
889
890 void kvm_enable_efer_bits(u64 mask)
891 {
892        efer_reserved_bits &= ~mask;
893 }
894 EXPORT_SYMBOL_GPL(kvm_enable_efer_bits);
895
896 /*
897  * Writes msr value into into the appropriate "register".
898  * Returns 0 on success, non-0 otherwise.
899  * Assumes vcpu_load() was already called.
900  */
901 int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
902 {
903         switch (msr_index) {
904         case MSR_FS_BASE:
905         case MSR_GS_BASE:
906         case MSR_KERNEL_GS_BASE:
907         case MSR_CSTAR:
908         case MSR_LSTAR:
909                 if (is_noncanonical_address(data))
910                         return 1;
911                 break;
912         case MSR_IA32_SYSENTER_EIP:
913         case MSR_IA32_SYSENTER_ESP:
914                 /*
915                  * IA32_SYSENTER_ESP and IA32_SYSENTER_EIP cause #GP if
916                  * non-canonical address is written on Intel but not on
917                  * AMD (which ignores the top 32-bits, because it does
918                  * not implement 64-bit SYSENTER).
919                  *
920                  * 64-bit code should hence be able to write a non-canonical
921                  * value on AMD.  Making the address canonical ensures that
922                  * vmentry does not fail on Intel after writing a non-canonical
923                  * value, and that something deterministic happens if the guest
924                  * invokes 64-bit SYSENTER.
925                  */
926                 data = get_canonical(data);
927         }
928         return kvm_x86_ops->set_msr(vcpu, msr_index, data);
929 }
930 EXPORT_SYMBOL_GPL(kvm_set_msr);
931
932 /*
933  * Adapt set_msr() to msr_io()'s calling convention
934  */
935 static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
936 {
937         return kvm_set_msr(vcpu, index, *data);
938 }
939
940 static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock)
941 {
942         int version;
943         int r;
944         struct pvclock_wall_clock wc;
945         struct timespec boot;
946
947         if (!wall_clock)
948                 return;
949
950         r = kvm_read_guest(kvm, wall_clock, &version, sizeof(version));
951         if (r)
952                 return;
953
954         if (version & 1)
955                 ++version;  /* first time write, random junk */
956
957         ++version;
958
959         kvm_write_guest(kvm, wall_clock, &version, sizeof(version));
960
961         /*
962          * The guest calculates current wall clock time by adding
963          * system time (updated by kvm_guest_time_update below) to the
964          * wall clock specified here.  guest system time equals host
965          * system time for us, thus we must fill in host boot time here.
966          */
967         getboottime(&boot);
968
969         wc.sec = boot.tv_sec;
970         wc.nsec = boot.tv_nsec;
971         wc.version = version;
972
973         kvm_write_guest(kvm, wall_clock, &wc, sizeof(wc));
974
975         version++;
976         kvm_write_guest(kvm, wall_clock, &version, sizeof(version));
977 }
978
979 static uint32_t div_frac(uint32_t dividend, uint32_t divisor)
980 {
981         uint32_t quotient, remainder;
982
983         /* Don't try to replace with do_div(), this one calculates
984          * "(dividend << 32) / divisor" */
985         __asm__ ( "divl %4"
986                   : "=a" (quotient), "=d" (remainder)
987                   : "0" (0), "1" (dividend), "r" (divisor) );
988         return quotient;
989 }
990
991 static void kvm_get_time_scale(uint32_t scaled_khz, uint32_t base_khz,
992                                s8 *pshift, u32 *pmultiplier)
993 {
994         uint64_t scaled64;
995         int32_t  shift = 0;
996         uint64_t tps64;
997         uint32_t tps32;
998
999         tps64 = base_khz * 1000LL;
1000         scaled64 = scaled_khz * 1000LL;
1001         while (tps64 > scaled64*2 || tps64 & 0xffffffff00000000ULL) {
1002                 tps64 >>= 1;
1003                 shift--;
1004         }
1005
1006         tps32 = (uint32_t)tps64;
1007         while (tps32 <= scaled64 || scaled64 & 0xffffffff00000000ULL) {
1008                 if (scaled64 & 0xffffffff00000000ULL || tps32 & 0x80000000)
1009                         scaled64 >>= 1;
1010                 else
1011                         tps32 <<= 1;
1012                 shift++;
1013         }
1014
1015         *pshift = shift;
1016         *pmultiplier = div_frac(scaled64, tps32);
1017
1018         pr_debug("%s: base_khz %u => %u, shift %d, mul %u\n",
1019                  __func__, base_khz, scaled_khz, shift, *pmultiplier);
1020 }
1021
1022 static inline u64 get_kernel_ns(void)
1023 {
1024         struct timespec ts;
1025
1026         WARN_ON(preemptible());
1027         ktime_get_ts(&ts);
1028         monotonic_to_bootbased(&ts);
1029         return timespec_to_ns(&ts);
1030 }
1031
1032 static DEFINE_PER_CPU(unsigned long, cpu_tsc_khz);
1033 unsigned long max_tsc_khz;
1034
1035 static inline int kvm_tsc_changes_freq(void)
1036 {
1037         int cpu = get_cpu();
1038         int ret = !boot_cpu_has(X86_FEATURE_CONSTANT_TSC) &&
1039                   cpufreq_quick_get(cpu) != 0;
1040         put_cpu();
1041         return ret;
1042 }
1043
1044 u64 vcpu_tsc_khz(struct kvm_vcpu *vcpu)
1045 {
1046         if (vcpu->arch.virtual_tsc_khz)
1047                 return vcpu->arch.virtual_tsc_khz;
1048         else
1049                 return __this_cpu_read(cpu_tsc_khz);
1050 }
1051
1052 static inline u64 nsec_to_cycles(struct kvm_vcpu *vcpu, u64 nsec)
1053 {
1054         u64 ret;
1055
1056         WARN_ON(preemptible());
1057         if (kvm_tsc_changes_freq())
1058                 printk_once(KERN_WARNING
1059                  "kvm: unreliable cycle conversion on adjustable rate TSC\n");
1060         ret = nsec * vcpu_tsc_khz(vcpu);
1061         do_div(ret, USEC_PER_SEC);
1062         return ret;
1063 }
1064
1065 static void kvm_init_tsc_catchup(struct kvm_vcpu *vcpu, u32 this_tsc_khz)
1066 {
1067         /* Compute a scale to convert nanoseconds in TSC cycles */
1068         kvm_get_time_scale(this_tsc_khz, NSEC_PER_SEC / 1000,
1069                            &vcpu->arch.tsc_catchup_shift,
1070                            &vcpu->arch.tsc_catchup_mult);
1071 }
1072
1073 static u64 compute_guest_tsc(struct kvm_vcpu *vcpu, s64 kernel_ns)
1074 {
1075         u64 tsc = pvclock_scale_delta(kernel_ns-vcpu->arch.last_tsc_nsec,
1076                                       vcpu->arch.tsc_catchup_mult,
1077                                       vcpu->arch.tsc_catchup_shift);
1078         tsc += vcpu->arch.last_tsc_write;
1079         return tsc;
1080 }
1081
1082 void kvm_write_tsc(struct kvm_vcpu *vcpu, u64 data)
1083 {
1084         struct kvm *kvm = vcpu->kvm;
1085         u64 offset, ns, elapsed;
1086         unsigned long flags;
1087         s64 sdiff;
1088
1089         raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags);
1090         offset = kvm_x86_ops->compute_tsc_offset(vcpu, data);
1091         ns = get_kernel_ns();
1092         elapsed = ns - kvm->arch.last_tsc_nsec;
1093         sdiff = data - kvm->arch.last_tsc_write;
1094         if (sdiff < 0)
1095                 sdiff = -sdiff;
1096
1097         /*
1098          * Special case: close write to TSC within 5 seconds of
1099          * another CPU is interpreted as an attempt to synchronize
1100          * The 5 seconds is to accommodate host load / swapping as
1101          * well as any reset of TSC during the boot process.
1102          *
1103          * In that case, for a reliable TSC, we can match TSC offsets,
1104          * or make a best guest using elapsed value.
1105          */
1106         if (sdiff < nsec_to_cycles(vcpu, 5ULL * NSEC_PER_SEC) &&
1107             elapsed < 5ULL * NSEC_PER_SEC) {
1108                 if (!check_tsc_unstable()) {
1109                         offset = kvm->arch.last_tsc_offset;
1110                         pr_debug("kvm: matched tsc offset for %llu\n", data);
1111                 } else {
1112                         u64 delta = nsec_to_cycles(vcpu, elapsed);
1113                         offset += delta;
1114                         pr_debug("kvm: adjusted tsc offset by %llu\n", delta);
1115                 }
1116                 ns = kvm->arch.last_tsc_nsec;
1117         }
1118         kvm->arch.last_tsc_nsec = ns;
1119         kvm->arch.last_tsc_write = data;
1120         kvm->arch.last_tsc_offset = offset;
1121         kvm_x86_ops->write_tsc_offset(vcpu, offset);
1122         raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags);
1123
1124         /* Reset of TSC must disable overshoot protection below */
1125         vcpu->arch.hv_clock.tsc_timestamp = 0;
1126         vcpu->arch.last_tsc_write = data;
1127         vcpu->arch.last_tsc_nsec = ns;
1128 }
1129 EXPORT_SYMBOL_GPL(kvm_write_tsc);
1130
1131 static int kvm_guest_time_update(struct kvm_vcpu *v)
1132 {
1133         unsigned long flags;
1134         struct kvm_vcpu_arch *vcpu = &v->arch;
1135         unsigned long this_tsc_khz;
1136         s64 kernel_ns, max_kernel_ns;
1137         u64 tsc_timestamp;
1138
1139         /* Keep irq disabled to prevent changes to the clock */
1140         local_irq_save(flags);
1141         tsc_timestamp = kvm_x86_ops->read_l1_tsc(v);
1142         kernel_ns = get_kernel_ns();
1143         this_tsc_khz = vcpu_tsc_khz(v);
1144         if (unlikely(this_tsc_khz == 0)) {
1145                 local_irq_restore(flags);
1146                 kvm_make_request(KVM_REQ_CLOCK_UPDATE, v);
1147                 return 1;
1148         }
1149
1150         /*
1151          * We may have to catch up the TSC to match elapsed wall clock
1152          * time for two reasons, even if kvmclock is used.
1153          *   1) CPU could have been running below the maximum TSC rate
1154          *   2) Broken TSC compensation resets the base at each VCPU
1155          *      entry to avoid unknown leaps of TSC even when running
1156          *      again on the same CPU.  This may cause apparent elapsed
1157          *      time to disappear, and the guest to stand still or run
1158          *      very slowly.
1159          */
1160         if (vcpu->tsc_catchup) {
1161                 u64 tsc = compute_guest_tsc(v, kernel_ns);
1162                 if (tsc > tsc_timestamp) {
1163                         kvm_x86_ops->adjust_tsc_offset(v, tsc - tsc_timestamp);
1164                         tsc_timestamp = tsc;
1165                 }
1166         }
1167
1168         local_irq_restore(flags);
1169
1170         if (!vcpu->pv_time_enabled)
1171                 return 0;
1172
1173         /*
1174          * Time as measured by the TSC may go backwards when resetting the base
1175          * tsc_timestamp.  The reason for this is that the TSC resolution is
1176          * higher than the resolution of the other clock scales.  Thus, many
1177          * possible measurments of the TSC correspond to one measurement of any
1178          * other clock, and so a spread of values is possible.  This is not a
1179          * problem for the computation of the nanosecond clock; with TSC rates
1180          * around 1GHZ, there can only be a few cycles which correspond to one
1181          * nanosecond value, and any path through this code will inevitably
1182          * take longer than that.  However, with the kernel_ns value itself,
1183          * the precision may be much lower, down to HZ granularity.  If the
1184          * first sampling of TSC against kernel_ns ends in the low part of the
1185          * range, and the second in the high end of the range, we can get:
1186          *
1187          * (TSC - offset_low) * S + kns_old > (TSC - offset_high) * S + kns_new
1188          *
1189          * As the sampling errors potentially range in the thousands of cycles,
1190          * it is possible such a time value has already been observed by the
1191          * guest.  To protect against this, we must compute the system time as
1192          * observed by the guest and ensure the new system time is greater.
1193          */
1194         max_kernel_ns = 0;
1195         if (vcpu->hv_clock.tsc_timestamp && vcpu->last_guest_tsc) {
1196                 max_kernel_ns = vcpu->last_guest_tsc -
1197                                 vcpu->hv_clock.tsc_timestamp;
1198                 max_kernel_ns = pvclock_scale_delta(max_kernel_ns,
1199                                     vcpu->hv_clock.tsc_to_system_mul,
1200                                     vcpu->hv_clock.tsc_shift);
1201                 max_kernel_ns += vcpu->last_kernel_ns;
1202         }
1203
1204         if (unlikely(vcpu->hw_tsc_khz != this_tsc_khz)) {
1205                 kvm_get_time_scale(NSEC_PER_SEC / 1000, this_tsc_khz,
1206                                    &vcpu->hv_clock.tsc_shift,
1207                                    &vcpu->hv_clock.tsc_to_system_mul);
1208                 vcpu->hw_tsc_khz = this_tsc_khz;
1209         }
1210
1211         if (max_kernel_ns > kernel_ns)
1212                 kernel_ns = max_kernel_ns;
1213
1214         /* With all the info we got, fill in the values */
1215         vcpu->hv_clock.tsc_timestamp = tsc_timestamp;
1216         vcpu->hv_clock.system_time = kernel_ns + v->kvm->arch.kvmclock_offset;
1217         vcpu->last_kernel_ns = kernel_ns;
1218         vcpu->last_guest_tsc = tsc_timestamp;
1219         vcpu->hv_clock.flags = 0;
1220
1221         /*
1222          * The interface expects us to write an even number signaling that the
1223          * update is finished. Since the guest won't see the intermediate
1224          * state, we just increase by 2 at the end.
1225          */
1226         vcpu->hv_clock.version += 2;
1227
1228         kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
1229                                 &vcpu->hv_clock,
1230                                 sizeof(vcpu->hv_clock));
1231         return 0;
1232 }
1233
1234 static bool msr_mtrr_valid(unsigned msr)
1235 {
1236         switch (msr) {
1237         case 0x200 ... 0x200 + 2 * KVM_NR_VAR_MTRR - 1:
1238         case MSR_MTRRfix64K_00000:
1239         case MSR_MTRRfix16K_80000:
1240         case MSR_MTRRfix16K_A0000:
1241         case MSR_MTRRfix4K_C0000:
1242         case MSR_MTRRfix4K_C8000:
1243         case MSR_MTRRfix4K_D0000:
1244         case MSR_MTRRfix4K_D8000:
1245         case MSR_MTRRfix4K_E0000:
1246         case MSR_MTRRfix4K_E8000:
1247         case MSR_MTRRfix4K_F0000:
1248         case MSR_MTRRfix4K_F8000:
1249         case MSR_MTRRdefType:
1250         case MSR_IA32_CR_PAT:
1251                 return true;
1252         case 0x2f8:
1253                 return true;
1254         }
1255         return false;
1256 }
1257
1258 static bool valid_pat_type(unsigned t)
1259 {
1260         return t < 8 && (1 << t) & 0xf3; /* 0, 1, 4, 5, 6, 7 */
1261 }
1262
1263 static bool valid_mtrr_type(unsigned t)
1264 {
1265         return t < 8 && (1 << t) & 0x73; /* 0, 1, 4, 5, 6 */
1266 }
1267
1268 static bool mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data)
1269 {
1270         int i;
1271
1272         if (!msr_mtrr_valid(msr))
1273                 return false;
1274
1275         if (msr == MSR_IA32_CR_PAT) {
1276                 for (i = 0; i < 8; i++)
1277                         if (!valid_pat_type((data >> (i * 8)) & 0xff))
1278                                 return false;
1279                 return true;
1280         } else if (msr == MSR_MTRRdefType) {
1281                 if (data & ~0xcff)
1282                         return false;
1283                 return valid_mtrr_type(data & 0xff);
1284         } else if (msr >= MSR_MTRRfix64K_00000 && msr <= MSR_MTRRfix4K_F8000) {
1285                 for (i = 0; i < 8 ; i++)
1286                         if (!valid_mtrr_type((data >> (i * 8)) & 0xff))
1287                                 return false;
1288                 return true;
1289         }
1290
1291         /* variable MTRRs */
1292         return valid_mtrr_type(data & 0xff);
1293 }
1294
1295 static int set_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
1296 {
1297         u64 *p = (u64 *)&vcpu->arch.mtrr_state.fixed_ranges;
1298
1299         if (!mtrr_valid(vcpu, msr, data))
1300                 return 1;
1301
1302         if (msr == MSR_MTRRdefType) {
1303                 vcpu->arch.mtrr_state.def_type = data;
1304                 vcpu->arch.mtrr_state.enabled = (data & 0xc00) >> 10;
1305         } else if (msr == MSR_MTRRfix64K_00000)
1306                 p[0] = data;
1307         else if (msr == MSR_MTRRfix16K_80000 || msr == MSR_MTRRfix16K_A0000)
1308                 p[1 + msr - MSR_MTRRfix16K_80000] = data;
1309         else if (msr >= MSR_MTRRfix4K_C0000 && msr <= MSR_MTRRfix4K_F8000)
1310                 p[3 + msr - MSR_MTRRfix4K_C0000] = data;
1311         else if (msr == MSR_IA32_CR_PAT)
1312                 vcpu->arch.pat = data;
1313         else {  /* Variable MTRRs */
1314                 int idx, is_mtrr_mask;
1315                 u64 *pt;
1316
1317                 idx = (msr - 0x200) / 2;
1318                 is_mtrr_mask = msr - 0x200 - 2 * idx;
1319                 if (!is_mtrr_mask)
1320                         pt =
1321                           (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].base_lo;
1322                 else
1323                         pt =
1324                           (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].mask_lo;
1325                 *pt = data;
1326         }
1327
1328         kvm_mmu_reset_context(vcpu);
1329         return 0;
1330 }
1331
1332 static int set_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 data)
1333 {
1334         u64 mcg_cap = vcpu->arch.mcg_cap;
1335         unsigned bank_num = mcg_cap & 0xff;
1336
1337         switch (msr) {
1338         case MSR_IA32_MCG_STATUS:
1339                 vcpu->arch.mcg_status = data;
1340                 break;
1341         case MSR_IA32_MCG_CTL:
1342                 if (!(mcg_cap & MCG_CTL_P))
1343                         return 1;
1344                 if (data != 0 && data != ~(u64)0)
1345                         return -1;
1346                 vcpu->arch.mcg_ctl = data;
1347                 break;
1348         default:
1349                 if (msr >= MSR_IA32_MC0_CTL &&
1350                     msr < MSR_IA32_MC0_CTL + 4 * bank_num) {
1351                         u32 offset = msr - MSR_IA32_MC0_CTL;
1352                         /* only 0 or all 1s can be written to IA32_MCi_CTL
1353                          * some Linux kernels though clear bit 10 in bank 4 to
1354                          * workaround a BIOS/GART TBL issue on AMD K8s, ignore
1355                          * this to avoid an uncatched #GP in the guest
1356                          */
1357                         if ((offset & 0x3) == 0 &&
1358                             data != 0 && (data | (1 << 10)) != ~(u64)0)
1359                                 return -1;
1360                         vcpu->arch.mce_banks[offset] = data;
1361                         break;
1362                 }
1363                 return 1;
1364         }
1365         return 0;
1366 }
1367
1368 static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
1369 {
1370         struct kvm *kvm = vcpu->kvm;
1371         int lm = is_long_mode(vcpu);
1372         u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
1373                 : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
1374         u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
1375                 : kvm->arch.xen_hvm_config.blob_size_32;
1376         u32 page_num = data & ~PAGE_MASK;
1377         u64 page_addr = data & PAGE_MASK;
1378         u8 *page;
1379         int r;
1380
1381         r = -E2BIG;
1382         if (page_num >= blob_size)
1383                 goto out;
1384         r = -ENOMEM;
1385         page = kzalloc(PAGE_SIZE, GFP_KERNEL);
1386         if (!page)
1387                 goto out;
1388         r = -EFAULT;
1389         if (copy_from_user(page, blob_addr + (page_num * PAGE_SIZE), PAGE_SIZE))
1390                 goto out_free;
1391         if (kvm_write_guest(kvm, page_addr, page, PAGE_SIZE))
1392                 goto out_free;
1393         r = 0;
1394 out_free:
1395         kfree(page);
1396 out:
1397         return r;
1398 }
1399
1400 static bool kvm_hv_hypercall_enabled(struct kvm *kvm)
1401 {
1402         return kvm->arch.hv_hypercall & HV_X64_MSR_HYPERCALL_ENABLE;
1403 }
1404
1405 static bool kvm_hv_msr_partition_wide(u32 msr)
1406 {
1407         bool r = false;
1408         switch (msr) {
1409         case HV_X64_MSR_GUEST_OS_ID:
1410         case HV_X64_MSR_HYPERCALL:
1411                 r = true;
1412                 break;
1413         }
1414
1415         return r;
1416 }
1417
1418 static int set_msr_hyperv_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data)
1419 {
1420         struct kvm *kvm = vcpu->kvm;
1421
1422         switch (msr) {
1423         case HV_X64_MSR_GUEST_OS_ID:
1424                 kvm->arch.hv_guest_os_id = data;
1425                 /* setting guest os id to zero disables hypercall page */
1426                 if (!kvm->arch.hv_guest_os_id)
1427                         kvm->arch.hv_hypercall &= ~HV_X64_MSR_HYPERCALL_ENABLE;
1428                 break;
1429         case HV_X64_MSR_HYPERCALL: {
1430                 u64 gfn;
1431                 unsigned long addr;
1432                 u8 instructions[4];
1433
1434                 /* if guest os id is not set hypercall should remain disabled */
1435                 if (!kvm->arch.hv_guest_os_id)
1436                         break;
1437                 if (!(data & HV_X64_MSR_HYPERCALL_ENABLE)) {
1438                         kvm->arch.hv_hypercall = data;
1439                         break;
1440                 }
1441                 gfn = data >> HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT;
1442                 addr = gfn_to_hva(kvm, gfn);
1443                 if (kvm_is_error_hva(addr))
1444                         return 1;
1445                 kvm_x86_ops->patch_hypercall(vcpu, instructions);
1446                 ((unsigned char *)instructions)[3] = 0xc3; /* ret */
1447                 if (__copy_to_user((void __user *)addr, instructions, 4))
1448                         return 1;
1449                 kvm->arch.hv_hypercall = data;
1450                 break;
1451         }
1452         default:
1453                 pr_unimpl(vcpu, "HYPER-V unimplemented wrmsr: 0x%x "
1454                           "data 0x%llx\n", msr, data);
1455                 return 1;
1456         }
1457         return 0;
1458 }
1459
1460 static int set_msr_hyperv(struct kvm_vcpu *vcpu, u32 msr, u64 data)
1461 {
1462         switch (msr) {
1463         case HV_X64_MSR_APIC_ASSIST_PAGE: {
1464                 unsigned long addr;
1465
1466                 if (!(data & HV_X64_MSR_APIC_ASSIST_PAGE_ENABLE)) {
1467                         vcpu->arch.hv_vapic = data;
1468                         break;
1469                 }
1470                 addr = gfn_to_hva(vcpu->kvm, data >>
1471                                   HV_X64_MSR_APIC_ASSIST_PAGE_ADDRESS_SHIFT);
1472                 if (kvm_is_error_hva(addr))
1473                         return 1;
1474                 if (__clear_user((void __user *)addr, PAGE_SIZE))
1475                         return 1;
1476                 vcpu->arch.hv_vapic = data;
1477                 break;
1478         }
1479         case HV_X64_MSR_EOI:
1480                 return kvm_hv_vapic_msr_write(vcpu, APIC_EOI, data);
1481         case HV_X64_MSR_ICR:
1482                 return kvm_hv_vapic_msr_write(vcpu, APIC_ICR, data);
1483         case HV_X64_MSR_TPR:
1484                 return kvm_hv_vapic_msr_write(vcpu, APIC_TASKPRI, data);
1485         default:
1486                 pr_unimpl(vcpu, "HYPER-V unimplemented wrmsr: 0x%x "
1487                           "data 0x%llx\n", msr, data);
1488                 return 1;
1489         }
1490
1491         return 0;
1492 }
1493
1494 static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
1495 {
1496         gpa_t gpa = data & ~0x3f;
1497
1498         /* Bits 2:5 are resrved, Should be zero */
1499         if (data & 0x3c)
1500                 return 1;
1501
1502         vcpu->arch.apf.msr_val = data;
1503
1504         if (!(data & KVM_ASYNC_PF_ENABLED)) {
1505                 kvm_clear_async_pf_completion_queue(vcpu);
1506                 kvm_async_pf_hash_reset(vcpu);
1507                 return 0;
1508         }
1509
1510         if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa,
1511                                         sizeof(u32)))
1512                 return 1;
1513
1514         vcpu->arch.apf.send_user_only = !(data & KVM_ASYNC_PF_SEND_ALWAYS);
1515         kvm_async_pf_wakeup_all(vcpu);
1516         return 0;
1517 }
1518
1519 static void kvmclock_reset(struct kvm_vcpu *vcpu)
1520 {
1521         vcpu->arch.pv_time_enabled = false;
1522 }
1523
1524 static void accumulate_steal_time(struct kvm_vcpu *vcpu)
1525 {
1526         u64 delta;
1527
1528         if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
1529                 return;
1530
1531         delta = current->sched_info.run_delay - vcpu->arch.st.last_steal;
1532         vcpu->arch.st.last_steal = current->sched_info.run_delay;
1533         vcpu->arch.st.accum_steal = delta;
1534 }
1535
1536 static void record_steal_time(struct kvm_vcpu *vcpu)
1537 {
1538         if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
1539                 return;
1540
1541         if (unlikely(kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
1542                 &vcpu->arch.st.steal, sizeof(struct kvm_steal_time))))
1543                 return;
1544
1545         vcpu->arch.st.steal.steal += vcpu->arch.st.accum_steal;
1546         vcpu->arch.st.steal.version += 2;
1547         vcpu->arch.st.accum_steal = 0;
1548
1549         kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
1550                 &vcpu->arch.st.steal, sizeof(struct kvm_steal_time));
1551 }
1552
1553 int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
1554 {
1555         switch (msr) {
1556         case MSR_EFER:
1557                 return set_efer(vcpu, data);
1558         case MSR_K7_HWCR:
1559                 data &= ~(u64)0x40;     /* ignore flush filter disable */
1560                 data &= ~(u64)0x100;    /* ignore ignne emulation enable */
1561                 if (data != 0) {
1562                         pr_unimpl(vcpu, "unimplemented HWCR wrmsr: 0x%llx\n",
1563                                 data);
1564                         return 1;
1565                 }
1566                 break;
1567         case MSR_FAM10H_MMIO_CONF_BASE:
1568                 if (data != 0) {
1569                         pr_unimpl(vcpu, "unimplemented MMIO_CONF_BASE wrmsr: "
1570                                 "0x%llx\n", data);
1571                         return 1;
1572                 }
1573                 break;
1574         case MSR_AMD64_NB_CFG:
1575                 break;
1576         case MSR_IA32_DEBUGCTLMSR:
1577                 if (!data) {
1578                         /* We support the non-activated case already */
1579                         break;
1580                 } else if (data & ~(DEBUGCTLMSR_LBR | DEBUGCTLMSR_BTF)) {
1581                         /* Values other than LBR and BTF are vendor-specific,
1582                            thus reserved and should throw a #GP */
1583                         return 1;
1584                 }
1585                 pr_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTLMSR 0x%llx, nop\n",
1586                         __func__, data);
1587                 break;
1588         case MSR_IA32_UCODE_REV:
1589         case MSR_IA32_UCODE_WRITE:
1590         case MSR_VM_HSAVE_PA:
1591         case MSR_AMD64_PATCH_LOADER:
1592                 break;
1593         case 0x200 ... 0x2ff:
1594                 return set_msr_mtrr(vcpu, msr, data);
1595         case MSR_IA32_APICBASE:
1596                 kvm_set_apic_base(vcpu, data);
1597                 break;
1598         case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff:
1599                 return kvm_x2apic_msr_write(vcpu, msr, data);
1600         case MSR_IA32_TSCDEADLINE:
1601                 kvm_set_lapic_tscdeadline_msr(vcpu, data);
1602                 break;
1603         case MSR_IA32_MISC_ENABLE:
1604                 vcpu->arch.ia32_misc_enable_msr = data;
1605                 break;
1606         case MSR_KVM_WALL_CLOCK_NEW:
1607         case MSR_KVM_WALL_CLOCK:
1608                 vcpu->kvm->arch.wall_clock = data;
1609                 kvm_write_wall_clock(vcpu->kvm, data);
1610                 break;
1611         case MSR_KVM_SYSTEM_TIME_NEW:
1612         case MSR_KVM_SYSTEM_TIME: {
1613                 u64 gpa_offset;
1614                 kvmclock_reset(vcpu);
1615
1616                 vcpu->arch.time = data;
1617                 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
1618
1619                 /* we verify if the enable bit is set... */
1620                 if (!(data & 1))
1621                         break;
1622
1623                 gpa_offset = data & ~(PAGE_MASK | 1);
1624
1625                 if (kvm_gfn_to_hva_cache_init(vcpu->kvm,
1626                      &vcpu->arch.pv_time, data & ~1ULL,
1627                      sizeof(struct pvclock_vcpu_time_info)))
1628                         vcpu->arch.pv_time_enabled = false;
1629                 else
1630                         vcpu->arch.pv_time_enabled = true;
1631                 break;
1632         }
1633         case MSR_KVM_ASYNC_PF_EN:
1634                 if (kvm_pv_enable_async_pf(vcpu, data))
1635                         return 1;
1636                 break;
1637         case MSR_KVM_STEAL_TIME:
1638
1639                 if (unlikely(!sched_info_on()))
1640                         return 1;
1641
1642                 if (data & KVM_STEAL_RESERVED_MASK)
1643                         return 1;
1644
1645                 if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.st.stime,
1646                                                 data & KVM_STEAL_VALID_BITS,
1647                                                 sizeof(struct kvm_steal_time)))
1648                         return 1;
1649
1650                 vcpu->arch.st.msr_val = data;
1651
1652                 if (!(data & KVM_MSR_ENABLED))
1653                         break;
1654
1655                 vcpu->arch.st.last_steal = current->sched_info.run_delay;
1656
1657                 preempt_disable();
1658                 accumulate_steal_time(vcpu);
1659                 preempt_enable();
1660
1661                 kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
1662
1663                 break;
1664
1665         case MSR_IA32_MCG_CTL:
1666         case MSR_IA32_MCG_STATUS:
1667         case MSR_IA32_MC0_CTL ... MSR_IA32_MC0_CTL + 4 * KVM_MAX_MCE_BANKS - 1:
1668                 return set_msr_mce(vcpu, msr, data);
1669
1670         /* Performance counters are not protected by a CPUID bit,
1671          * so we should check all of them in the generic path for the sake of
1672          * cross vendor migration.
1673          * Writing a zero into the event select MSRs disables them,
1674          * which we perfectly emulate ;-). Any other value should be at least
1675          * reported, some guests depend on them.
1676          */
1677         case MSR_P6_EVNTSEL0:
1678         case MSR_P6_EVNTSEL1:
1679         case MSR_K7_EVNTSEL0:
1680         case MSR_K7_EVNTSEL1:
1681         case MSR_K7_EVNTSEL2:
1682         case MSR_K7_EVNTSEL3:
1683                 if (data != 0)
1684                         pr_unimpl(vcpu, "unimplemented perfctr wrmsr: "
1685                                 "0x%x data 0x%llx\n", msr, data);
1686                 break;
1687         /* at least RHEL 4 unconditionally writes to the perfctr registers,
1688          * so we ignore writes to make it happy.
1689          */
1690         case MSR_P6_PERFCTR0:
1691         case MSR_P6_PERFCTR1:
1692         case MSR_K7_PERFCTR0:
1693         case MSR_K7_PERFCTR1:
1694         case MSR_K7_PERFCTR2:
1695         case MSR_K7_PERFCTR3:
1696                 pr_unimpl(vcpu, "unimplemented perfctr wrmsr: "
1697                         "0x%x data 0x%llx\n", msr, data);
1698                 break;
1699         case MSR_K7_CLK_CTL:
1700                 /*
1701                  * Ignore all writes to this no longer documented MSR.
1702                  * Writes are only relevant for old K7 processors,
1703                  * all pre-dating SVM, but a recommended workaround from
1704                  * AMD for these chips. It is possible to speicify the
1705                  * affected processor models on the command line, hence
1706                  * the need to ignore the workaround.
1707                  */
1708                 break;
1709         case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15:
1710                 if (kvm_hv_msr_partition_wide(msr)) {
1711                         int r;
1712                         mutex_lock(&vcpu->kvm->lock);
1713                         r = set_msr_hyperv_pw(vcpu, msr, data);
1714                         mutex_unlock(&vcpu->kvm->lock);
1715                         return r;
1716                 } else
1717                         return set_msr_hyperv(vcpu, msr, data);
1718                 break;
1719         case MSR_IA32_BBL_CR_CTL3:
1720                 /* Drop writes to this legacy MSR -- see rdmsr
1721                  * counterpart for further detail.
1722                  */
1723                 pr_unimpl(vcpu, "ignored wrmsr: 0x%x data %llx\n", msr, data);
1724                 break;
1725         default:
1726                 if (msr && (msr == vcpu->kvm->arch.xen_hvm_config.msr))
1727                         return xen_hvm_config(vcpu, data);
1728                 if (!ignore_msrs) {
1729                         pr_unimpl(vcpu, "unhandled wrmsr: 0x%x data %llx\n",
1730                                 msr, data);
1731                         return 1;
1732                 } else {
1733                         pr_unimpl(vcpu, "ignored wrmsr: 0x%x data %llx\n",
1734                                 msr, data);
1735                         break;
1736                 }
1737         }
1738         return 0;
1739 }
1740 EXPORT_SYMBOL_GPL(kvm_set_msr_common);
1741
1742
1743 /*
1744  * Reads an msr value (of 'msr_index') into 'pdata'.
1745  * Returns 0 on success, non-0 otherwise.
1746  * Assumes vcpu_load() was already called.
1747  */
1748 int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
1749 {
1750         return kvm_x86_ops->get_msr(vcpu, msr_index, pdata);
1751 }
1752
1753 static int get_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
1754 {
1755         u64 *p = (u64 *)&vcpu->arch.mtrr_state.fixed_ranges;
1756
1757         if (!msr_mtrr_valid(msr))
1758                 return 1;
1759
1760         if (msr == MSR_MTRRdefType)
1761                 *pdata = vcpu->arch.mtrr_state.def_type +
1762                          (vcpu->arch.mtrr_state.enabled << 10);
1763         else if (msr == MSR_MTRRfix64K_00000)
1764                 *pdata = p[0];
1765         else if (msr == MSR_MTRRfix16K_80000 || msr == MSR_MTRRfix16K_A0000)
1766                 *pdata = p[1 + msr - MSR_MTRRfix16K_80000];
1767         else if (msr >= MSR_MTRRfix4K_C0000 && msr <= MSR_MTRRfix4K_F8000)
1768                 *pdata = p[3 + msr - MSR_MTRRfix4K_C0000];
1769         else if (msr == MSR_IA32_CR_PAT)
1770                 *pdata = vcpu->arch.pat;
1771         else {  /* Variable MTRRs */
1772                 int idx, is_mtrr_mask;
1773                 u64 *pt;
1774
1775                 idx = (msr - 0x200) / 2;
1776                 is_mtrr_mask = msr - 0x200 - 2 * idx;
1777                 if (!is_mtrr_mask)
1778                         pt =
1779                           (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].base_lo;
1780                 else
1781                         pt =
1782                           (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].mask_lo;
1783                 *pdata = *pt;
1784         }
1785
1786         return 0;
1787 }
1788
1789 static int get_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
1790 {
1791         u64 data;
1792         u64 mcg_cap = vcpu->arch.mcg_cap;
1793         unsigned bank_num = mcg_cap & 0xff;
1794
1795         switch (msr) {
1796         case MSR_IA32_P5_MC_ADDR:
1797         case MSR_IA32_P5_MC_TYPE:
1798                 data = 0;
1799                 break;
1800         case MSR_IA32_MCG_CAP:
1801                 data = vcpu->arch.mcg_cap;
1802                 break;
1803         case MSR_IA32_MCG_CTL:
1804                 if (!(mcg_cap & MCG_CTL_P))
1805                         return 1;
1806                 data = vcpu->arch.mcg_ctl;
1807                 break;
1808         case MSR_IA32_MCG_STATUS:
1809                 data = vcpu->arch.mcg_status;
1810                 break;
1811         default:
1812                 if (msr >= MSR_IA32_MC0_CTL &&
1813                     msr < MSR_IA32_MC0_CTL + 4 * bank_num) {
1814                         u32 offset = msr - MSR_IA32_MC0_CTL;
1815                         data = vcpu->arch.mce_banks[offset];
1816                         break;
1817                 }
1818                 return 1;
1819         }
1820         *pdata = data;
1821         return 0;
1822 }
1823
1824 static int get_msr_hyperv_pw(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
1825 {
1826         u64 data = 0;
1827         struct kvm *kvm = vcpu->kvm;
1828
1829         switch (msr) {
1830         case HV_X64_MSR_GUEST_OS_ID:
1831                 data = kvm->arch.hv_guest_os_id;
1832                 break;
1833         case HV_X64_MSR_HYPERCALL:
1834                 data = kvm->arch.hv_hypercall;
1835                 break;
1836         default:
1837                 pr_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
1838                 return 1;
1839         }
1840
1841         *pdata = data;
1842         return 0;
1843 }
1844
1845 static int get_msr_hyperv(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
1846 {
1847         u64 data = 0;
1848
1849         switch (msr) {
1850         case HV_X64_MSR_VP_INDEX: {
1851                 int r;
1852                 struct kvm_vcpu *v;
1853                 kvm_for_each_vcpu(r, v, vcpu->kvm)
1854                         if (v == vcpu)
1855                                 data = r;
1856                 break;
1857         }
1858         case HV_X64_MSR_EOI:
1859                 return kvm_hv_vapic_msr_read(vcpu, APIC_EOI, pdata);
1860         case HV_X64_MSR_ICR:
1861                 return kvm_hv_vapic_msr_read(vcpu, APIC_ICR, pdata);
1862         case HV_X64_MSR_TPR:
1863                 return kvm_hv_vapic_msr_read(vcpu, APIC_TASKPRI, pdata);
1864         case HV_X64_MSR_APIC_ASSIST_PAGE:
1865                 data = vcpu->arch.hv_vapic;
1866                 break;
1867         default:
1868                 pr_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
1869                 return 1;
1870         }
1871         *pdata = data;
1872         return 0;
1873 }
1874
1875 int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
1876 {
1877         u64 data;
1878
1879         switch (msr) {
1880         case MSR_IA32_PLATFORM_ID:
1881         case MSR_IA32_EBL_CR_POWERON:
1882         case MSR_IA32_DEBUGCTLMSR:
1883         case MSR_IA32_LASTBRANCHFROMIP:
1884         case MSR_IA32_LASTBRANCHTOIP:
1885         case MSR_IA32_LASTINTFROMIP:
1886         case MSR_IA32_LASTINTTOIP:
1887         case MSR_K8_SYSCFG:
1888         case MSR_K8_TSEG_ADDR:
1889         case MSR_K8_TSEG_MASK:
1890         case MSR_K7_HWCR:
1891         case MSR_VM_HSAVE_PA:
1892         case MSR_P6_PERFCTR0:
1893         case MSR_P6_PERFCTR1:
1894         case MSR_P6_EVNTSEL0:
1895         case MSR_P6_EVNTSEL1:
1896         case MSR_K7_EVNTSEL0:
1897         case MSR_K7_PERFCTR0:
1898         case MSR_K8_INT_PENDING_MSG:
1899         case MSR_AMD64_NB_CFG:
1900         case MSR_FAM10H_MMIO_CONF_BASE:
1901                 data = 0;
1902                 break;
1903         case MSR_IA32_UCODE_REV:
1904                 data = 0x100000000ULL;
1905                 break;
1906         case MSR_MTRRcap:
1907                 data = 0x500 | KVM_NR_VAR_MTRR;
1908                 break;
1909         case 0x200 ... 0x2ff:
1910                 return get_msr_mtrr(vcpu, msr, pdata);
1911         case 0xcd: /* fsb frequency */
1912                 data = 3;
1913                 break;
1914                 /*
1915                  * MSR_EBC_FREQUENCY_ID
1916                  * Conservative value valid for even the basic CPU models.
1917                  * Models 0,1: 000 in bits 23:21 indicating a bus speed of
1918                  * 100MHz, model 2 000 in bits 18:16 indicating 100MHz,
1919                  * and 266MHz for model 3, or 4. Set Core Clock
1920                  * Frequency to System Bus Frequency Ratio to 1 (bits
1921                  * 31:24) even though these are only valid for CPU
1922                  * models > 2, however guests may end up dividing or
1923                  * multiplying by zero otherwise.
1924                  */
1925         case MSR_EBC_FREQUENCY_ID:
1926                 data = 1 << 24;
1927                 break;
1928         case MSR_IA32_APICBASE:
1929                 data = kvm_get_apic_base(vcpu);
1930                 break;
1931         case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff:
1932                 return kvm_x2apic_msr_read(vcpu, msr, pdata);
1933                 break;
1934         case MSR_IA32_TSCDEADLINE:
1935                 data = kvm_get_lapic_tscdeadline_msr(vcpu);
1936                 break;
1937         case MSR_IA32_MISC_ENABLE:
1938                 data = vcpu->arch.ia32_misc_enable_msr;
1939                 break;
1940         case MSR_IA32_PERF_STATUS:
1941                 /* TSC increment by tick */
1942                 data = 1000ULL;
1943                 /* CPU multiplier */
1944                 data |= (((uint64_t)4ULL) << 40);
1945                 break;
1946         case MSR_EFER:
1947                 data = vcpu->arch.efer;
1948                 break;
1949         case MSR_KVM_WALL_CLOCK:
1950         case MSR_KVM_WALL_CLOCK_NEW:
1951                 data = vcpu->kvm->arch.wall_clock;
1952                 break;
1953         case MSR_KVM_SYSTEM_TIME:
1954         case MSR_KVM_SYSTEM_TIME_NEW:
1955                 data = vcpu->arch.time;
1956                 break;
1957         case MSR_KVM_ASYNC_PF_EN:
1958                 data = vcpu->arch.apf.msr_val;
1959                 break;
1960         case MSR_KVM_STEAL_TIME:
1961                 data = vcpu->arch.st.msr_val;
1962                 break;
1963         case MSR_IA32_P5_MC_ADDR:
1964         case MSR_IA32_P5_MC_TYPE:
1965         case MSR_IA32_MCG_CAP:
1966         case MSR_IA32_MCG_CTL:
1967         case MSR_IA32_MCG_STATUS:
1968         case MSR_IA32_MC0_CTL ... MSR_IA32_MC0_CTL + 4 * KVM_MAX_MCE_BANKS - 1:
1969                 return get_msr_mce(vcpu, msr, pdata);
1970         case MSR_K7_CLK_CTL:
1971                 /*
1972                  * Provide expected ramp-up count for K7. All other
1973                  * are set to zero, indicating minimum divisors for
1974                  * every field.
1975                  *
1976                  * This prevents guest kernels on AMD host with CPU
1977                  * type 6, model 8 and higher from exploding due to
1978                  * the rdmsr failing.
1979                  */
1980                 data = 0x20000000;
1981                 break;
1982         case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15:
1983                 if (kvm_hv_msr_partition_wide(msr)) {
1984                         int r;
1985                         mutex_lock(&vcpu->kvm->lock);
1986                         r = get_msr_hyperv_pw(vcpu, msr, pdata);
1987                         mutex_unlock(&vcpu->kvm->lock);
1988                         return r;
1989                 } else
1990                         return get_msr_hyperv(vcpu, msr, pdata);
1991                 break;
1992         case MSR_IA32_BBL_CR_CTL3:
1993                 /* This legacy MSR exists but isn't fully documented in current
1994                  * silicon.  It is however accessed by winxp in very narrow
1995                  * scenarios where it sets bit #19, itself documented as
1996                  * a "reserved" bit.  Best effort attempt to source coherent
1997                  * read data here should the balance of the register be
1998                  * interpreted by the guest:
1999                  *
2000                  * L2 cache control register 3: 64GB range, 256KB size,
2001                  * enabled, latency 0x1, configured
2002                  */
2003                 data = 0xbe702111;
2004                 break;
2005         default:
2006                 if (!ignore_msrs) {
2007                         pr_unimpl(vcpu, "unhandled rdmsr: 0x%x\n", msr);
2008                         return 1;
2009                 } else {
2010                         pr_unimpl(vcpu, "ignored rdmsr: 0x%x\n", msr);
2011                         data = 0;
2012                 }
2013                 break;
2014         }
2015         *pdata = data;
2016         return 0;
2017 }
2018 EXPORT_SYMBOL_GPL(kvm_get_msr_common);
2019
2020 /*
2021  * Read or write a bunch of msrs. All parameters are kernel addresses.
2022  *
2023  * @return number of msrs set successfully.
2024  */
2025 static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs,
2026                     struct kvm_msr_entry *entries,
2027                     int (*do_msr)(struct kvm_vcpu *vcpu,
2028                                   unsigned index, u64 *data))
2029 {
2030         int i, idx;
2031
2032         idx = srcu_read_lock(&vcpu->kvm->srcu);
2033         for (i = 0; i < msrs->nmsrs; ++i)
2034                 if (do_msr(vcpu, entries[i].index, &entries[i].data))
2035                         break;
2036         srcu_read_unlock(&vcpu->kvm->srcu, idx);
2037
2038         return i;
2039 }
2040
2041 /*
2042  * Read or write a bunch of msrs. Parameters are user addresses.
2043  *
2044  * @return number of msrs set successfully.
2045  */
2046 static int msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs __user *user_msrs,
2047                   int (*do_msr)(struct kvm_vcpu *vcpu,
2048                                 unsigned index, u64 *data),
2049                   int writeback)
2050 {
2051         struct kvm_msrs msrs;
2052         struct kvm_msr_entry *entries;
2053         int r, n;
2054         unsigned size;
2055
2056         r = -EFAULT;
2057         if (copy_from_user(&msrs, user_msrs, sizeof msrs))
2058                 goto out;
2059
2060         r = -E2BIG;
2061         if (msrs.nmsrs >= MAX_IO_MSRS)
2062                 goto out;
2063
2064         r = -ENOMEM;
2065         size = sizeof(struct kvm_msr_entry) * msrs.nmsrs;
2066         entries = kmalloc(size, GFP_KERNEL);
2067         if (!entries)
2068                 goto out;
2069
2070         r = -EFAULT;
2071         if (copy_from_user(entries, user_msrs->entries, size))
2072                 goto out_free;
2073
2074         r = n = __msr_io(vcpu, &msrs, entries, do_msr);
2075         if (r < 0)
2076                 goto out_free;
2077
2078         r = -EFAULT;
2079         if (writeback && copy_to_user(user_msrs->entries, entries, size))
2080                 goto out_free;
2081
2082         r = n;
2083
2084 out_free:
2085         kfree(entries);
2086 out:
2087         return r;
2088 }
2089
2090 int kvm_dev_ioctl_check_extension(long ext)
2091 {
2092         int r;
2093
2094         switch (ext) {
2095         case KVM_CAP_IRQCHIP:
2096         case KVM_CAP_HLT:
2097         case KVM_CAP_MMU_SHADOW_CACHE_CONTROL:
2098         case KVM_CAP_SET_TSS_ADDR:
2099         case KVM_CAP_EXT_CPUID:
2100         case KVM_CAP_CLOCKSOURCE:
2101         case KVM_CAP_PIT:
2102         case KVM_CAP_NOP_IO_DELAY:
2103         case KVM_CAP_MP_STATE:
2104         case KVM_CAP_SYNC_MMU:
2105         case KVM_CAP_USER_NMI:
2106         case KVM_CAP_REINJECT_CONTROL:
2107         case KVM_CAP_IRQ_INJECT_STATUS:
2108         case KVM_CAP_ASSIGN_DEV_IRQ:
2109         case KVM_CAP_IRQFD:
2110         case KVM_CAP_IOEVENTFD:
2111         case KVM_CAP_PIT2:
2112         case KVM_CAP_PIT_STATE2:
2113         case KVM_CAP_SET_IDENTITY_MAP_ADDR:
2114         case KVM_CAP_XEN_HVM:
2115         case KVM_CAP_ADJUST_CLOCK:
2116         case KVM_CAP_VCPU_EVENTS:
2117         case KVM_CAP_HYPERV:
2118         case KVM_CAP_HYPERV_VAPIC:
2119         case KVM_CAP_HYPERV_SPIN:
2120         case KVM_CAP_PCI_SEGMENT:
2121         case KVM_CAP_DEBUGREGS:
2122         case KVM_CAP_X86_ROBUST_SINGLESTEP:
2123         case KVM_CAP_XSAVE:
2124         case KVM_CAP_ASYNC_PF:
2125         case KVM_CAP_GET_TSC_KHZ:
2126                 r = 1;
2127                 break;
2128         case KVM_CAP_COALESCED_MMIO:
2129                 r = KVM_COALESCED_MMIO_PAGE_OFFSET;
2130                 break;
2131         case KVM_CAP_VAPIC:
2132                 r = !kvm_x86_ops->cpu_has_accelerated_tpr();
2133                 break;
2134         case KVM_CAP_NR_VCPUS:
2135                 r = KVM_SOFT_MAX_VCPUS;
2136                 break;
2137         case KVM_CAP_MAX_VCPUS:
2138                 r = KVM_MAX_VCPUS;
2139                 break;
2140         case KVM_CAP_NR_MEMSLOTS:
2141                 r = KVM_MEMORY_SLOTS;
2142                 break;
2143         case KVM_CAP_PV_MMU:    /* obsolete */
2144                 r = 0;
2145                 break;
2146         case KVM_CAP_IOMMU:
2147                 r = iommu_present(&pci_bus_type);
2148                 break;
2149         case KVM_CAP_MCE:
2150                 r = KVM_MAX_MCE_BANKS;
2151                 break;
2152         case KVM_CAP_XCRS:
2153                 r = cpu_has_xsave;
2154                 break;
2155         case KVM_CAP_TSC_CONTROL:
2156                 r = kvm_has_tsc_control;
2157                 break;
2158         case KVM_CAP_TSC_DEADLINE_TIMER:
2159                 r = boot_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER);
2160                 break;
2161         default:
2162                 r = 0;
2163                 break;
2164         }
2165         return r;
2166
2167 }
2168
2169 long kvm_arch_dev_ioctl(struct file *filp,
2170                         unsigned int ioctl, unsigned long arg)
2171 {
2172         void __user *argp = (void __user *)arg;
2173         long r;
2174
2175         switch (ioctl) {
2176         case KVM_GET_MSR_INDEX_LIST: {
2177                 struct kvm_msr_list __user *user_msr_list = argp;
2178                 struct kvm_msr_list msr_list;
2179                 unsigned n;
2180
2181                 r = -EFAULT;
2182                 if (copy_from_user(&msr_list, user_msr_list, sizeof msr_list))
2183                         goto out;
2184                 n = msr_list.nmsrs;
2185                 msr_list.nmsrs = num_msrs_to_save + ARRAY_SIZE(emulated_msrs);
2186                 if (copy_to_user(user_msr_list, &msr_list, sizeof msr_list))
2187                         goto out;
2188                 r = -E2BIG;
2189                 if (n < msr_list.nmsrs)
2190                         goto out;
2191                 r = -EFAULT;
2192                 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
2193                                  num_msrs_to_save * sizeof(u32)))
2194                         goto out;
2195                 if (copy_to_user(user_msr_list->indices + num_msrs_to_save,
2196                                  &emulated_msrs,
2197                                  ARRAY_SIZE(emulated_msrs) * sizeof(u32)))
2198                         goto out;
2199                 r = 0;
2200                 break;
2201         }
2202         case KVM_GET_SUPPORTED_CPUID: {
2203                 struct kvm_cpuid2 __user *cpuid_arg = argp;
2204                 struct kvm_cpuid2 cpuid;
2205
2206                 r = -EFAULT;
2207                 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
2208                         goto out;
2209                 r = kvm_dev_ioctl_get_supported_cpuid(&cpuid,
2210                                                       cpuid_arg->entries);
2211                 if (r)
2212                         goto out;
2213
2214                 r = -EFAULT;
2215                 if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid))
2216                         goto out;
2217                 r = 0;
2218                 break;
2219         }
2220         case KVM_X86_GET_MCE_CAP_SUPPORTED: {
2221                 u64 mce_cap;
2222
2223                 mce_cap = KVM_MCE_CAP_SUPPORTED;
2224                 r = -EFAULT;
2225                 if (copy_to_user(argp, &mce_cap, sizeof mce_cap))
2226                         goto out;
2227                 r = 0;
2228                 break;
2229         }
2230         default:
2231                 r = -EINVAL;
2232         }
2233 out:
2234         return r;
2235 }
2236
2237 static void wbinvd_ipi(void *garbage)
2238 {
2239         wbinvd();
2240 }
2241
2242 static bool need_emulate_wbinvd(struct kvm_vcpu *vcpu)
2243 {
2244         return vcpu->kvm->arch.iommu_domain &&
2245                 !(vcpu->kvm->arch.iommu_flags & KVM_IOMMU_CACHE_COHERENCY);
2246 }
2247
2248 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
2249 {
2250         /* Address WBINVD may be executed by guest */
2251         if (need_emulate_wbinvd(vcpu)) {
2252                 if (kvm_x86_ops->has_wbinvd_exit())
2253                         cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask);
2254                 else if (vcpu->cpu != -1 && vcpu->cpu != cpu)
2255                         smp_call_function_single(vcpu->cpu,
2256                                         wbinvd_ipi, NULL, 1);
2257         }
2258
2259         kvm_x86_ops->vcpu_load(vcpu, cpu);
2260         if (unlikely(vcpu->cpu != cpu) || check_tsc_unstable()) {
2261                 /* Make sure TSC doesn't go backwards */
2262                 s64 tsc_delta;
2263                 u64 tsc;
2264
2265                 tsc = kvm_x86_ops->read_l1_tsc(vcpu);
2266                 tsc_delta = !vcpu->arch.last_guest_tsc ? 0 :
2267                              tsc - vcpu->arch.last_guest_tsc;
2268
2269                 if (tsc_delta < 0)
2270                         mark_tsc_unstable("KVM discovered backwards TSC");
2271                 if (check_tsc_unstable()) {
2272                         kvm_x86_ops->adjust_tsc_offset(vcpu, -tsc_delta);
2273                         vcpu->arch.tsc_catchup = 1;
2274                 }
2275                 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
2276                 if (vcpu->cpu != cpu)
2277                         kvm_migrate_timers(vcpu);
2278                 vcpu->cpu = cpu;
2279         }
2280
2281         accumulate_steal_time(vcpu);
2282         kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
2283 }
2284
2285 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
2286 {
2287         kvm_x86_ops->vcpu_put(vcpu);
2288         kvm_put_guest_fpu(vcpu);
2289         vcpu->arch.last_guest_tsc = kvm_x86_ops->read_l1_tsc(vcpu);
2290 }
2291
2292 static int is_efer_nx(void)
2293 {
2294         unsigned long long efer = 0;
2295
2296         rdmsrl_safe(MSR_EFER, &efer);
2297         return efer & EFER_NX;
2298 }
2299
2300 static void cpuid_fix_nx_cap(struct kvm_vcpu *vcpu)
2301 {
2302         int i;
2303         struct kvm_cpuid_entry2 *e, *entry;
2304
2305         entry = NULL;
2306         for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
2307                 e = &vcpu->arch.cpuid_entries[i];
2308                 if (e->function == 0x80000001) {
2309                         entry = e;
2310                         break;
2311                 }
2312         }
2313         if (entry && (entry->edx & (1 << 20)) && !is_efer_nx()) {
2314                 entry->edx &= ~(1 << 20);
2315                 printk(KERN_INFO "kvm: guest NX capability removed\n");
2316         }
2317 }
2318
2319 /* when an old userspace process fills a new kernel module */
2320 static int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
2321                                     struct kvm_cpuid *cpuid,
2322                                     struct kvm_cpuid_entry __user *entries)
2323 {
2324         int r, i;
2325         struct kvm_cpuid_entry *cpuid_entries;
2326
2327         r = -E2BIG;
2328         if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
2329                 goto out;
2330         r = -ENOMEM;
2331         cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry) * cpuid->nent);
2332         if (!cpuid_entries)
2333                 goto out;
2334         r = -EFAULT;
2335         if (copy_from_user(cpuid_entries, entries,
2336                            cpuid->nent * sizeof(struct kvm_cpuid_entry)))
2337                 goto out_free;
2338         for (i = 0; i < cpuid->nent; i++) {
2339                 vcpu->arch.cpuid_entries[i].function = cpuid_entries[i].function;
2340                 vcpu->arch.cpuid_entries[i].eax = cpuid_entries[i].eax;
2341                 vcpu->arch.cpuid_entries[i].ebx = cpuid_entries[i].ebx;
2342                 vcpu->arch.cpuid_entries[i].ecx = cpuid_entries[i].ecx;
2343                 vcpu->arch.cpuid_entries[i].edx = cpuid_entries[i].edx;
2344                 vcpu->arch.cpuid_entries[i].index = 0;
2345                 vcpu->arch.cpuid_entries[i].flags = 0;
2346                 vcpu->arch.cpuid_entries[i].padding[0] = 0;
2347                 vcpu->arch.cpuid_entries[i].padding[1] = 0;
2348                 vcpu->arch.cpuid_entries[i].padding[2] = 0;
2349         }
2350         vcpu->arch.cpuid_nent = cpuid->nent;
2351         cpuid_fix_nx_cap(vcpu);
2352         r = 0;
2353         kvm_apic_set_version(vcpu);
2354         kvm_x86_ops->cpuid_update(vcpu);
2355         update_cpuid(vcpu);
2356
2357 out_free:
2358         vfree(cpuid_entries);
2359 out:
2360         return r;
2361 }
2362
2363 static int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
2364                                      struct kvm_cpuid2 *cpuid,
2365                                      struct kvm_cpuid_entry2 __user *entries)
2366 {
2367         int r;
2368
2369         r = -E2BIG;
2370         if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
2371                 goto out;
2372         r = -EFAULT;
2373         if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
2374                            cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
2375                 goto out;
2376         vcpu->arch.cpuid_nent = cpuid->nent;
2377         kvm_apic_set_version(vcpu);
2378         kvm_x86_ops->cpuid_update(vcpu);
2379         update_cpuid(vcpu);
2380         return 0;
2381
2382 out:
2383         return r;
2384 }
2385
2386 static int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
2387                                      struct kvm_cpuid2 *cpuid,
2388                                      struct kvm_cpuid_entry2 __user *entries)
2389 {
2390         int r;
2391
2392         r = -E2BIG;
2393         if (cpuid->nent < vcpu->arch.cpuid_nent)
2394                 goto out;
2395         r = -EFAULT;
2396         if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
2397                          vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
2398                 goto out;
2399         return 0;
2400
2401 out:
2402         cpuid->nent = vcpu->arch.cpuid_nent;
2403         return r;
2404 }
2405
2406 static void cpuid_mask(u32 *word, int wordnum)
2407 {
2408         *word &= boot_cpu_data.x86_capability[wordnum];
2409 }
2410
2411 static void do_cpuid_1_ent(struct kvm_cpuid_entry2 *entry, u32 function,
2412                            u32 index)
2413 {
2414         entry->function = function;
2415         entry->index = index;
2416         cpuid_count(entry->function, entry->index,
2417                     &entry->eax, &entry->ebx, &entry->ecx, &entry->edx);
2418         entry->flags = 0;
2419 }
2420
2421 static bool supported_xcr0_bit(unsigned bit)
2422 {
2423         u64 mask = ((u64)1 << bit);
2424
2425         return mask & (XSTATE_FP | XSTATE_SSE | XSTATE_YMM) & host_xcr0;
2426 }
2427
2428 #define F(x) bit(X86_FEATURE_##x)
2429
2430 static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
2431                          u32 index, int *nent, int maxnent)
2432 {
2433         unsigned f_nx = is_efer_nx() ? F(NX) : 0;
2434 #ifdef CONFIG_X86_64
2435         unsigned f_gbpages = (kvm_x86_ops->get_lpage_level() == PT_PDPE_LEVEL)
2436                                 ? F(GBPAGES) : 0;
2437         unsigned f_lm = F(LM);
2438 #else
2439         unsigned f_gbpages = 0;
2440         unsigned f_lm = 0;
2441 #endif
2442         unsigned f_rdtscp = kvm_x86_ops->rdtscp_supported() ? F(RDTSCP) : 0;
2443
2444         /* cpuid 1.edx */
2445         const u32 kvm_supported_word0_x86_features =
2446                 F(FPU) | F(VME) | F(DE) | F(PSE) |
2447                 F(TSC) | F(MSR) | F(PAE) | F(MCE) |
2448                 F(CX8) | F(APIC) | 0 /* Reserved */ | F(SEP) |
2449                 F(MTRR) | F(PGE) | F(MCA) | F(CMOV) |
2450                 F(PAT) | F(PSE36) | 0 /* PSN */ | F(CLFLSH) |
2451                 0 /* Reserved, DS, ACPI */ | F(MMX) |
2452                 F(FXSR) | F(XMM) | F(XMM2) | F(SELFSNOOP) |
2453                 0 /* HTT, TM, Reserved, PBE */;
2454         /* cpuid 0x80000001.edx */
2455         const u32 kvm_supported_word1_x86_features =
2456                 F(FPU) | F(VME) | F(DE) | F(PSE) |
2457                 F(TSC) | F(MSR) | F(PAE) | F(MCE) |
2458                 F(CX8) | F(APIC) | 0 /* Reserved */ | F(SYSCALL) |
2459                 F(MTRR) | F(PGE) | F(MCA) | F(CMOV) |
2460                 F(PAT) | F(PSE36) | 0 /* Reserved */ |
2461                 f_nx | 0 /* Reserved */ | F(MMXEXT) | F(MMX) |
2462                 F(FXSR) | F(FXSR_OPT) | f_gbpages | f_rdtscp |
2463                 0 /* Reserved */ | f_lm | F(3DNOWEXT) | F(3DNOW);
2464         /* cpuid 1.ecx */
2465         const u32 kvm_supported_word4_x86_features =
2466                 F(XMM3) | F(PCLMULQDQ) | 0 /* DTES64, MONITOR */ |
2467                 0 /* DS-CPL, VMX, SMX, EST */ |
2468                 0 /* TM2 */ | F(SSSE3) | 0 /* CNXT-ID */ | 0 /* Reserved */ |
2469                 0 /* Reserved */ | F(CX16) | 0 /* xTPR Update, PDCM */ |
2470                 0 /* Reserved, DCA */ | F(XMM4_1) |
2471                 F(XMM4_2) | F(X2APIC) | F(MOVBE) | F(POPCNT) |
2472                 0 /* Reserved*/ | F(AES) | F(XSAVE) | 0 /* OSXSAVE */ | F(AVX) |
2473                 F(F16C) | F(RDRAND);
2474         /* cpuid 0x80000001.ecx */
2475         const u32 kvm_supported_word6_x86_features =
2476                 F(LAHF_LM) | F(CMP_LEGACY) | 0 /*SVM*/ | 0 /* ExtApicSpace */ |
2477                 F(CR8_LEGACY) | F(ABM) | F(SSE4A) | F(MISALIGNSSE) |
2478                 F(3DNOWPREFETCH) | 0 /* OSVW */ | 0 /* IBS */ | F(XOP) |
2479                 0 /* SKINIT, WDT, LWP */ | F(FMA4) | F(TBM);
2480
2481         /* cpuid 0xC0000001.edx */
2482         const u32 kvm_supported_word5_x86_features =
2483                 F(XSTORE) | F(XSTORE_EN) | F(XCRYPT) | F(XCRYPT_EN) |
2484                 F(ACE2) | F(ACE2_EN) | F(PHE) | F(PHE_EN) |
2485                 F(PMM) | F(PMM_EN);
2486
2487         /* cpuid 7.0.ebx */
2488         const u32 kvm_supported_word9_x86_features =
2489                 F(SMEP) | F(FSGSBASE) | F(ERMS);
2490
2491         /* all calls to cpuid_count() should be made on the same cpu */
2492         get_cpu();
2493         do_cpuid_1_ent(entry, function, index);
2494         ++*nent;
2495
2496         switch (function) {
2497         case 0:
2498                 entry->eax = min(entry->eax, (u32)0xd);
2499                 break;
2500         case 1:
2501                 entry->edx &= kvm_supported_word0_x86_features;
2502                 cpuid_mask(&entry->edx, 0);
2503                 entry->ecx &= kvm_supported_word4_x86_features;
2504                 cpuid_mask(&entry->ecx, 4);
2505                 /* we support x2apic emulation even if host does not support
2506                  * it since we emulate x2apic in software */
2507                 entry->ecx |= F(X2APIC);
2508                 break;
2509         /* function 2 entries are STATEFUL. That is, repeated cpuid commands
2510          * may return different values. This forces us to get_cpu() before
2511          * issuing the first command, and also to emulate this annoying behavior
2512          * in kvm_emulate_cpuid() using KVM_CPUID_FLAG_STATE_READ_NEXT */
2513         case 2: {
2514                 int t, times = entry->eax & 0xff;
2515
2516                 entry->flags |= KVM_CPUID_FLAG_STATEFUL_FUNC;
2517                 entry->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
2518                 for (t = 1; t < times && *nent < maxnent; ++t) {
2519                         do_cpuid_1_ent(&entry[t], function, 0);
2520                         entry[t].flags |= KVM_CPUID_FLAG_STATEFUL_FUNC;
2521                         ++*nent;
2522                 }
2523                 break;
2524         }
2525         /* function 4 has additional index. */
2526         case 4: {
2527                 int i, cache_type;
2528
2529                 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
2530                 /* read more entries until cache_type is zero */
2531                 for (i = 1; *nent < maxnent; ++i) {
2532                         cache_type = entry[i - 1].eax & 0x1f;
2533                         if (!cache_type)
2534                                 break;
2535                         do_cpuid_1_ent(&entry[i], function, i);
2536                         entry[i].flags |=
2537                                KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
2538                         ++*nent;
2539                 }
2540                 break;
2541         }
2542         case 7: {
2543                 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
2544                 /* Mask ebx against host capbability word 9 */
2545                 if (index == 0) {
2546                         entry->ebx &= kvm_supported_word9_x86_features;
2547                         cpuid_mask(&entry->ebx, 9);
2548                 } else
2549                         entry->ebx = 0;
2550                 entry->eax = 0;
2551                 entry->ecx = 0;
2552                 entry->edx = 0;
2553                 break;
2554         }
2555         case 9:
2556                 break;
2557         /* function 0xb has additional index. */
2558         case 0xb: {
2559                 int i, level_type;
2560
2561                 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
2562                 /* read more entries until level_type is zero */
2563                 for (i = 1; *nent < maxnent; ++i) {
2564                         level_type = entry[i - 1].ecx & 0xff00;
2565                         if (!level_type)
2566                                 break;
2567                         do_cpuid_1_ent(&entry[i], function, i);
2568                         entry[i].flags |=
2569                                KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
2570                         ++*nent;
2571                 }
2572                 break;
2573         }
2574         case 0xd: {
2575                 int idx, i;
2576
2577                 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
2578                 for (idx = 1, i = 1; *nent < maxnent && idx < 64; ++idx) {
2579                         do_cpuid_1_ent(&entry[i], function, idx);
2580                         if (entry[i].eax == 0 || !supported_xcr0_bit(idx))
2581                                 continue;
2582                         entry[i].flags |=
2583                                KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
2584                         ++*nent;
2585                         ++i;
2586                 }
2587                 break;
2588         }
2589         case KVM_CPUID_SIGNATURE: {
2590                 char signature[12] = "KVMKVMKVM\0\0";
2591                 u32 *sigptr = (u32 *)signature;
2592                 entry->eax = 0;
2593                 entry->ebx = sigptr[0];
2594                 entry->ecx = sigptr[1];
2595                 entry->edx = sigptr[2];
2596                 break;
2597         }
2598         case KVM_CPUID_FEATURES:
2599                 entry->eax = (1 << KVM_FEATURE_CLOCKSOURCE) |
2600                              (1 << KVM_FEATURE_NOP_IO_DELAY) |
2601                              (1 << KVM_FEATURE_CLOCKSOURCE2) |
2602                              (1 << KVM_FEATURE_ASYNC_PF) |
2603                              (1 << KVM_FEATURE_CLOCKSOURCE_STABLE_BIT);
2604
2605                 if (sched_info_on())
2606                         entry->eax |= (1 << KVM_FEATURE_STEAL_TIME);
2607
2608                 entry->ebx = 0;
2609                 entry->ecx = 0;
2610                 entry->edx = 0;
2611                 break;
2612         case 0x80000000:
2613                 entry->eax = min(entry->eax, 0x8000001a);
2614                 break;
2615         case 0x80000001:
2616                 entry->edx &= kvm_supported_word1_x86_features;
2617                 cpuid_mask(&entry->edx, 1);
2618                 entry->ecx &= kvm_supported_word6_x86_features;
2619                 cpuid_mask(&entry->ecx, 6);
2620                 break;
2621         case 0x80000008: {
2622                 unsigned g_phys_as = (entry->eax >> 16) & 0xff;
2623                 unsigned virt_as = max((entry->eax >> 8) & 0xff, 48U);
2624                 unsigned phys_as = entry->eax & 0xff;
2625
2626                 if (!g_phys_as)
2627                         g_phys_as = phys_as;
2628                 entry->eax = g_phys_as | (virt_as << 8);
2629                 entry->ebx = entry->edx = 0;
2630                 break;
2631         }
2632         case 0x80000019:
2633                 entry->ecx = entry->edx = 0;
2634                 break;
2635         case 0x8000001a:
2636                 break;
2637         case 0x8000001d:
2638                 break;
2639         /*Add support for Centaur's CPUID instruction*/
2640         case 0xC0000000:
2641                 /*Just support up to 0xC0000004 now*/
2642                 entry->eax = min(entry->eax, 0xC0000004);
2643                 break;
2644         case 0xC0000001:
2645                 entry->edx &= kvm_supported_word5_x86_features;
2646                 cpuid_mask(&entry->edx, 5);
2647                 break;
2648         case 3: /* Processor serial number */
2649         case 5: /* MONITOR/MWAIT */
2650         case 6: /* Thermal management */
2651         case 0xA: /* Architectural Performance Monitoring */
2652         case 0x80000007: /* Advanced power management */
2653         case 0xC0000002:
2654         case 0xC0000003:
2655         case 0xC0000004:
2656         default:
2657                 entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
2658                 break;
2659         }
2660
2661         kvm_x86_ops->set_supported_cpuid(function, entry);
2662
2663         put_cpu();
2664 }
2665
2666 #undef F
2667
2668 static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
2669                                      struct kvm_cpuid_entry2 __user *entries)
2670 {
2671         struct kvm_cpuid_entry2 *cpuid_entries;
2672         int limit, nent = 0, r = -E2BIG;
2673         u32 func;
2674
2675         if (cpuid->nent < 1)
2676                 goto out;
2677         if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
2678                 cpuid->nent = KVM_MAX_CPUID_ENTRIES;
2679         r = -ENOMEM;
2680         cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry2) * cpuid->nent);
2681         if (!cpuid_entries)
2682                 goto out;
2683
2684         do_cpuid_ent(&cpuid_entries[0], 0, 0, &nent, cpuid->nent);
2685         limit = cpuid_entries[0].eax;
2686         for (func = 1; func <= limit && nent < cpuid->nent; ++func)
2687                 do_cpuid_ent(&cpuid_entries[nent], func, 0,
2688                              &nent, cpuid->nent);
2689         r = -E2BIG;
2690         if (nent >= cpuid->nent)
2691                 goto out_free;
2692
2693         do_cpuid_ent(&cpuid_entries[nent], 0x80000000, 0, &nent, cpuid->nent);
2694         limit = cpuid_entries[nent - 1].eax;
2695         for (func = 0x80000001; func <= limit && nent < cpuid->nent; ++func)
2696                 do_cpuid_ent(&cpuid_entries[nent], func, 0,
2697                              &nent, cpuid->nent);
2698
2699
2700
2701         r = -E2BIG;
2702         if (nent >= cpuid->nent)
2703                 goto out_free;
2704
2705         /* Add support for Centaur's CPUID instruction. */
2706         if (boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR) {
2707                 do_cpuid_ent(&cpuid_entries[nent], 0xC0000000, 0,
2708                                 &nent, cpuid->nent);
2709
2710                 r = -E2BIG;
2711                 if (nent >= cpuid->nent)
2712                         goto out_free;
2713
2714                 limit = cpuid_entries[nent - 1].eax;
2715                 for (func = 0xC0000001;
2716                         func <= limit && nent < cpuid->nent; ++func)
2717                         do_cpuid_ent(&cpuid_entries[nent], func, 0,
2718                                         &nent, cpuid->nent);
2719
2720                 r = -E2BIG;
2721                 if (nent >= cpuid->nent)
2722                         goto out_free;
2723         }
2724
2725         do_cpuid_ent(&cpuid_entries[nent], KVM_CPUID_SIGNATURE, 0, &nent,
2726                      cpuid->nent);
2727
2728         r = -E2BIG;
2729         if (nent >= cpuid->nent)
2730                 goto out_free;
2731
2732         do_cpuid_ent(&cpuid_entries[nent], KVM_CPUID_FEATURES, 0, &nent,
2733                      cpuid->nent);
2734
2735         r = -E2BIG;
2736         if (nent >= cpuid->nent)
2737                 goto out_free;
2738
2739         r = -EFAULT;
2740         if (copy_to_user(entries, cpuid_entries,
2741                          nent * sizeof(struct kvm_cpuid_entry2)))
2742                 goto out_free;
2743         cpuid->nent = nent;
2744         r = 0;
2745
2746 out_free:
2747         vfree(cpuid_entries);
2748 out:
2749         return r;
2750 }
2751
2752 static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu,
2753                                     struct kvm_lapic_state *s)
2754 {
2755         memcpy(s->regs, vcpu->arch.apic->regs, sizeof *s);
2756
2757         return 0;
2758 }
2759
2760 static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
2761                                     struct kvm_lapic_state *s)
2762 {
2763         memcpy(vcpu->arch.apic->regs, s->regs, sizeof *s);
2764         kvm_apic_post_state_restore(vcpu);
2765         update_cr8_intercept(vcpu);
2766
2767         return 0;
2768 }
2769
2770 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
2771                                     struct kvm_interrupt *irq)
2772 {
2773         if (irq->irq < 0 || irq->irq >= 256)
2774                 return -EINVAL;
2775         if (irqchip_in_kernel(vcpu->kvm))
2776                 return -ENXIO;
2777
2778         kvm_queue_interrupt(vcpu, irq->irq, false);
2779         kvm_make_request(KVM_REQ_EVENT, vcpu);
2780
2781         return 0;
2782 }
2783
2784 static int kvm_vcpu_ioctl_nmi(struct kvm_vcpu *vcpu)
2785 {
2786         kvm_inject_nmi(vcpu);
2787
2788         return 0;
2789 }
2790
2791 static int vcpu_ioctl_tpr_access_reporting(struct kvm_vcpu *vcpu,
2792                                            struct kvm_tpr_access_ctl *tac)
2793 {
2794         if (tac->flags)
2795                 return -EINVAL;
2796         vcpu->arch.tpr_access_reporting = !!tac->enabled;
2797         return 0;
2798 }
2799
2800 static int kvm_vcpu_ioctl_x86_setup_mce(struct kvm_vcpu *vcpu,
2801                                         u64 mcg_cap)
2802 {
2803         int r;
2804         unsigned bank_num = mcg_cap & 0xff, bank;
2805
2806         r = -EINVAL;
2807         if (!bank_num || bank_num >= KVM_MAX_MCE_BANKS)
2808                 goto out;
2809         if (mcg_cap & ~(KVM_MCE_CAP_SUPPORTED | 0xff | 0xff0000))
2810                 goto out;
2811         r = 0;
2812         vcpu->arch.mcg_cap = mcg_cap;
2813         /* Init IA32_MCG_CTL to all 1s */
2814         if (mcg_cap & MCG_CTL_P)
2815                 vcpu->arch.mcg_ctl = ~(u64)0;
2816         /* Init IA32_MCi_CTL to all 1s */
2817         for (bank = 0; bank < bank_num; bank++)
2818                 vcpu->arch.mce_banks[bank*4] = ~(u64)0;
2819 out:
2820         return r;
2821 }
2822
2823 static int kvm_vcpu_ioctl_x86_set_mce(struct kvm_vcpu *vcpu,
2824                                       struct kvm_x86_mce *mce)
2825 {
2826         u64 mcg_cap = vcpu->arch.mcg_cap;
2827         unsigned bank_num = mcg_cap & 0xff;
2828         u64 *banks = vcpu->arch.mce_banks;
2829
2830         if (mce->bank >= bank_num || !(mce->status & MCI_STATUS_VAL))
2831                 return -EINVAL;
2832         /*
2833          * if IA32_MCG_CTL is not all 1s, the uncorrected error
2834          * reporting is disabled
2835          */
2836         if ((mce->status & MCI_STATUS_UC) && (mcg_cap & MCG_CTL_P) &&
2837             vcpu->arch.mcg_ctl != ~(u64)0)
2838                 return 0;
2839         banks += 4 * mce->bank;
2840         /*
2841          * if IA32_MCi_CTL is not all 1s, the uncorrected error
2842          * reporting is disabled for the bank
2843          */
2844         if ((mce->status & MCI_STATUS_UC) && banks[0] != ~(u64)0)
2845                 return 0;
2846         if (mce->status & MCI_STATUS_UC) {
2847                 if ((vcpu->arch.mcg_status & MCG_STATUS_MCIP) ||
2848                     !kvm_read_cr4_bits(vcpu, X86_CR4_MCE)) {
2849                         kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
2850                         return 0;
2851                 }
2852                 if (banks[1] & MCI_STATUS_VAL)
2853                         mce->status |= MCI_STATUS_OVER;
2854                 banks[2] = mce->addr;
2855                 banks[3] = mce->misc;
2856                 vcpu->arch.mcg_status = mce->mcg_status;
2857                 banks[1] = mce->status;
2858                 kvm_queue_exception(vcpu, MC_VECTOR);
2859         } else if (!(banks[1] & MCI_STATUS_VAL)
2860                    || !(banks[1] & MCI_STATUS_UC)) {
2861                 if (banks[1] & MCI_STATUS_VAL)
2862                         mce->status |= MCI_STATUS_OVER;
2863                 banks[2] = mce->addr;
2864                 banks[3] = mce->misc;
2865                 banks[1] = mce->status;
2866         } else
2867                 banks[1] |= MCI_STATUS_OVER;
2868         return 0;
2869 }
2870
2871 static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
2872                                                struct kvm_vcpu_events *events)
2873 {
2874         process_nmi(vcpu);
2875         events->exception.injected =
2876                 vcpu->arch.exception.pending &&
2877                 !kvm_exception_is_soft(vcpu->arch.exception.nr);
2878         events->exception.nr = vcpu->arch.exception.nr;
2879         events->exception.has_error_code = vcpu->arch.exception.has_error_code;
2880         events->exception.pad = 0;
2881         events->exception.error_code = vcpu->arch.exception.error_code;
2882
2883         events->interrupt.injected =
2884                 vcpu->arch.interrupt.pending && !vcpu->arch.interrupt.soft;
2885         events->interrupt.nr = vcpu->arch.interrupt.nr;
2886         events->interrupt.soft = 0;
2887         events->interrupt.shadow =
2888                 kvm_x86_ops->get_interrupt_shadow(vcpu,
2889                         KVM_X86_SHADOW_INT_MOV_SS | KVM_X86_SHADOW_INT_STI);
2890
2891         events->nmi.injected = vcpu->arch.nmi_injected;
2892         events->nmi.pending = vcpu->arch.nmi_pending != 0;
2893         events->nmi.masked = kvm_x86_ops->get_nmi_mask(vcpu);
2894         events->nmi.pad = 0;
2895
2896         events->sipi_vector = vcpu->arch.sipi_vector;
2897
2898         events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING
2899                          | KVM_VCPUEVENT_VALID_SIPI_VECTOR
2900                          | KVM_VCPUEVENT_VALID_SHADOW);
2901         memset(&events->reserved, 0, sizeof(events->reserved));
2902 }
2903
2904 static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
2905                                               struct kvm_vcpu_events *events)
2906 {
2907         if (events->flags & ~(KVM_VCPUEVENT_VALID_NMI_PENDING
2908                               | KVM_VCPUEVENT_VALID_SIPI_VECTOR
2909                               | KVM_VCPUEVENT_VALID_SHADOW))
2910                 return -EINVAL;
2911
2912         process_nmi(vcpu);
2913         vcpu->arch.exception.pending = events->exception.injected;
2914         vcpu->arch.exception.nr = events->exception.nr;
2915         vcpu->arch.exception.has_error_code = events->exception.has_error_code;
2916         vcpu->arch.exception.error_code = events->exception.error_code;
2917
2918         vcpu->arch.interrupt.pending = events->interrupt.injected;
2919         vcpu->arch.interrupt.nr = events->interrupt.nr;
2920         vcpu->arch.interrupt.soft = events->interrupt.soft;
2921         if (events->flags & KVM_VCPUEVENT_VALID_SHADOW)
2922                 kvm_x86_ops->set_interrupt_shadow(vcpu,
2923                                                   events->interrupt.shadow);
2924
2925         vcpu->arch.nmi_injected = events->nmi.injected;
2926         if (events->flags & KVM_VCPUEVENT_VALID_NMI_PENDING)
2927                 vcpu->arch.nmi_pending = events->nmi.pending;
2928         kvm_x86_ops->set_nmi_mask(vcpu, events->nmi.masked);
2929
2930         if (events->flags & KVM_VCPUEVENT_VALID_SIPI_VECTOR)
2931                 vcpu->arch.sipi_vector = events->sipi_vector;
2932
2933         kvm_make_request(KVM_REQ_EVENT, vcpu);
2934
2935         return 0;
2936 }
2937
2938 static void kvm_vcpu_ioctl_x86_get_debugregs(struct kvm_vcpu *vcpu,
2939                                              struct kvm_debugregs *dbgregs)
2940 {
2941         memcpy(dbgregs->db, vcpu->arch.db, sizeof(vcpu->arch.db));
2942         dbgregs->dr6 = vcpu->arch.dr6;
2943         dbgregs->dr7 = vcpu->arch.dr7;
2944         dbgregs->flags = 0;
2945         memset(&dbgregs->reserved, 0, sizeof(dbgregs->reserved));
2946 }
2947
2948 static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu,
2949                                             struct kvm_debugregs *dbgregs)
2950 {
2951         if (dbgregs->flags)
2952                 return -EINVAL;
2953
2954         memcpy(vcpu->arch.db, dbgregs->db, sizeof(vcpu->arch.db));
2955         vcpu->arch.dr6 = dbgregs->dr6;
2956         vcpu->arch.dr7 = dbgregs->dr7;
2957
2958         return 0;
2959 }
2960
2961 static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu,
2962                                          struct kvm_xsave *guest_xsave)
2963 {
2964         if (cpu_has_xsave)
2965                 memcpy(guest_xsave->region,
2966                         &vcpu->arch.guest_fpu.state->xsave,
2967                         xstate_size);
2968         else {
2969                 memcpy(guest_xsave->region,
2970                         &vcpu->arch.guest_fpu.state->fxsave,
2971                         sizeof(struct i387_fxsave_struct));
2972                 *(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)] =
2973                         XSTATE_FPSSE;
2974         }
2975 }
2976
2977 static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu,
2978                                         struct kvm_xsave *guest_xsave)
2979 {
2980         u64 xstate_bv =
2981                 *(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)];
2982
2983         if (cpu_has_xsave)
2984                 memcpy(&vcpu->arch.guest_fpu.state->xsave,
2985                         guest_xsave->region, xstate_size);
2986         else {
2987                 if (xstate_bv & ~XSTATE_FPSSE)
2988                         return -EINVAL;
2989                 memcpy(&vcpu->arch.guest_fpu.state->fxsave,
2990                         guest_xsave->region, sizeof(struct i387_fxsave_struct));
2991         }
2992         return 0;
2993 }
2994
2995 static void kvm_vcpu_ioctl_x86_get_xcrs(struct kvm_vcpu *vcpu,
2996                                         struct kvm_xcrs *guest_xcrs)
2997 {
2998         if (!cpu_has_xsave) {
2999                 guest_xcrs->nr_xcrs = 0;
3000                 return;
3001         }
3002
3003         guest_xcrs->nr_xcrs = 1;
3004         guest_xcrs->flags = 0;
3005         guest_xcrs->xcrs[0].xcr = XCR_XFEATURE_ENABLED_MASK;
3006         guest_xcrs->xcrs[0].value = vcpu->arch.xcr0;
3007 }
3008
3009 static int kvm_vcpu_ioctl_x86_set_xcrs(struct kvm_vcpu *vcpu,
3010                                        struct kvm_xcrs *guest_xcrs)
3011 {
3012         int i, r = 0;
3013
3014         if (!cpu_has_xsave)
3015                 return -EINVAL;
3016
3017         if (guest_xcrs->nr_xcrs > KVM_MAX_XCRS || guest_xcrs->flags)
3018                 return -EINVAL;
3019
3020         for (i = 0; i < guest_xcrs->nr_xcrs; i++)
3021                 /* Only support XCR0 currently */
3022                 if (guest_xcrs->xcrs[0].xcr == XCR_XFEATURE_ENABLED_MASK) {
3023                         r = __kvm_set_xcr(vcpu, XCR_XFEATURE_ENABLED_MASK,
3024                                 guest_xcrs->xcrs[0].value);
3025                         break;
3026                 }
3027         if (r)
3028                 r = -EINVAL;
3029         return r;
3030 }
3031
3032 long kvm_arch_vcpu_ioctl(struct file *filp,
3033                          unsigned int ioctl, unsigned long arg)
3034 {
3035         struct kvm_vcpu *vcpu = filp->private_data;
3036         void __user *argp = (void __user *)arg;
3037         int r;
3038         union {
3039                 struct kvm_lapic_state *lapic;
3040                 struct kvm_xsave *xsave;
3041                 struct kvm_xcrs *xcrs;
3042                 void *buffer;
3043         } u;
3044
3045         u.buffer = NULL;
3046         switch (ioctl) {
3047         case KVM_GET_LAPIC: {
3048                 r = -EINVAL;
3049                 if (!vcpu->arch.apic)
3050                         goto out;
3051                 u.lapic = kzalloc(sizeof(struct kvm_lapic_state), GFP_KERNEL);
3052
3053                 r = -ENOMEM;
3054                 if (!u.lapic)
3055                         goto out;
3056                 r = kvm_vcpu_ioctl_get_lapic(vcpu, u.lapic);
3057                 if (r)
3058                         goto out;
3059                 r = -EFAULT;
3060                 if (copy_to_user(argp, u.lapic, sizeof(struct kvm_lapic_state)))
3061                         goto out;
3062                 r = 0;
3063                 break;
3064         }
3065         case KVM_SET_LAPIC: {
3066                 r = -EINVAL;
3067                 if (!vcpu->arch.apic)
3068                         goto out;
3069                 u.lapic = kmalloc(sizeof(struct kvm_lapic_state), GFP_KERNEL);
3070                 r = -ENOMEM;
3071                 if (!u.lapic)
3072                         goto out;
3073                 r = -EFAULT;
3074                 if (copy_from_user(u.lapic, argp, sizeof(struct kvm_lapic_state)))
3075                         goto out;
3076                 r = kvm_vcpu_ioctl_set_lapic(vcpu, u.lapic);
3077                 if (r)
3078                         goto out;
3079                 r = 0;
3080                 break;
3081         }
3082         case KVM_INTERRUPT: {
3083                 struct kvm_interrupt irq;
3084
3085                 r = -EFAULT;
3086                 if (copy_from_user(&irq, argp, sizeof irq))
3087                         goto out;
3088                 r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
3089                 if (r)
3090                         goto out;
3091                 r = 0;
3092                 break;
3093         }
3094         case KVM_NMI: {
3095                 r = kvm_vcpu_ioctl_nmi(vcpu);
3096                 if (r)
3097                         goto out;
3098                 r = 0;
3099                 break;
3100         }
3101         case KVM_SET_CPUID: {
3102                 struct kvm_cpuid __user *cpuid_arg = argp;
3103                 struct kvm_cpuid cpuid;
3104
3105                 r = -EFAULT;
3106                 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
3107                         goto out;
3108                 r = kvm_vcpu_ioctl_set_cpuid(vcpu, &cpuid, cpuid_arg->entries);
3109                 if (r)
3110                         goto out;
3111                 break;
3112         }
3113         case KVM_SET_CPUID2: {
3114                 struct kvm_cpuid2 __user *cpuid_arg = argp;
3115                 struct kvm_cpuid2 cpuid;
3116
3117                 r = -EFAULT;
3118                 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
3119                         goto out;
3120                 r = kvm_vcpu_ioctl_set_cpuid2(vcpu, &cpuid,
3121                                               cpuid_arg->entries);
3122                 if (r)
3123                         goto out;
3124                 break;
3125         }
3126         case KVM_GET_CPUID2: {
3127                 struct kvm_cpuid2 __user *cpuid_arg = argp;
3128                 struct kvm_cpuid2 cpuid;
3129
3130                 r = -EFAULT;
3131                 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
3132                         goto out;
3133                 r = kvm_vcpu_ioctl_get_cpuid2(vcpu, &cpuid,
3134                                               cpuid_arg->entries);
3135                 if (r)
3136                         goto out;
3137                 r = -EFAULT;
3138                 if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid))
3139                         goto out;
3140                 r = 0;
3141                 break;
3142         }
3143         case KVM_GET_MSRS:
3144                 r = msr_io(vcpu, argp, kvm_get_msr, 1);
3145                 break;
3146         case KVM_SET_MSRS:
3147                 r = msr_io(vcpu, argp, do_set_msr, 0);
3148                 break;
3149         case KVM_TPR_ACCESS_REPORTING: {
3150                 struct kvm_tpr_access_ctl tac;
3151
3152                 r = -EFAULT;
3153                 if (copy_from_user(&tac, argp, sizeof tac))
3154                         goto out;
3155                 r = vcpu_ioctl_tpr_access_reporting(vcpu, &tac);
3156                 if (r)
3157                         goto out;
3158                 r = -EFAULT;
3159                 if (copy_to_user(argp, &tac, sizeof tac))
3160                         goto out;
3161                 r = 0;
3162                 break;
3163         };
3164         case KVM_SET_VAPIC_ADDR: {
3165                 struct kvm_vapic_addr va;
3166
3167                 r = -EINVAL;
3168                 if (!irqchip_in_kernel(vcpu->kvm))
3169                         goto out;
3170                 r = -EFAULT;
3171                 if (copy_from_user(&va, argp, sizeof va))
3172                         goto out;
3173                 r = kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr);
3174                 break;
3175         }
3176         case KVM_X86_SETUP_MCE: {
3177                 u64 mcg_cap;
3178
3179                 r = -EFAULT;
3180                 if (copy_from_user(&mcg_cap, argp, sizeof mcg_cap))
3181                         goto out;
3182                 r = kvm_vcpu_ioctl_x86_setup_mce(vcpu, mcg_cap);
3183                 break;
3184         }
3185         case KVM_X86_SET_MCE: {
3186                 struct kvm_x86_mce mce;
3187
3188                 r = -EFAULT;
3189                 if (copy_from_user(&mce, argp, sizeof mce))
3190                         goto out;
3191                 r = kvm_vcpu_ioctl_x86_set_mce(vcpu, &mce);
3192                 break;
3193         }
3194         case KVM_GET_VCPU_EVENTS: {
3195                 struct kvm_vcpu_events events;
3196
3197                 kvm_vcpu_ioctl_x86_get_vcpu_events(vcpu, &events);
3198
3199                 r = -EFAULT;
3200                 if (copy_to_user(argp, &events, sizeof(struct kvm_vcpu_events)))
3201                         break;
3202                 r = 0;
3203                 break;
3204         }
3205         case KVM_SET_VCPU_EVENTS: {
3206                 struct kvm_vcpu_events events;
3207
3208                 r = -EFAULT;
3209                 if (copy_from_user(&events, argp, sizeof(struct kvm_vcpu_events)))
3210                         break;
3211
3212                 r = kvm_vcpu_ioctl_x86_set_vcpu_events(vcpu, &events);
3213                 break;
3214         }
3215         case KVM_GET_DEBUGREGS: {
3216                 struct kvm_debugregs dbgregs;
3217
3218                 kvm_vcpu_ioctl_x86_get_debugregs(vcpu, &dbgregs);
3219
3220                 r = -EFAULT;
3221                 if (copy_to_user(argp, &dbgregs,
3222                                  sizeof(struct kvm_debugregs)))
3223                         break;
3224                 r = 0;
3225                 break;
3226         }
3227         case KVM_SET_DEBUGREGS: {
3228                 struct kvm_debugregs dbgregs;
3229
3230                 r = -EFAULT;
3231                 if (copy_from_user(&dbgregs, argp,
3232                                    sizeof(struct kvm_debugregs)))
3233                         break;
3234
3235                 r = kvm_vcpu_ioctl_x86_set_debugregs(vcpu, &dbgregs);
3236                 break;
3237         }
3238         case KVM_GET_XSAVE: {
3239                 u.xsave = kzalloc(sizeof(struct kvm_xsave), GFP_KERNEL);
3240                 r = -ENOMEM;
3241                 if (!u.xsave)
3242                         break;
3243
3244                 kvm_vcpu_ioctl_x86_get_xsave(vcpu, u.xsave);
3245
3246                 r = -EFAULT;
3247                 if (copy_to_user(argp, u.xsave, sizeof(struct kvm_xsave)))
3248                         break;
3249                 r = 0;
3250                 break;
3251         }
3252         case KVM_SET_XSAVE: {
3253                 u.xsave = kzalloc(sizeof(struct kvm_xsave), GFP_KERNEL);
3254                 r = -ENOMEM;
3255                 if (!u.xsave)
3256                         break;
3257
3258                 r = -EFAULT;
3259                 if (copy_from_user(u.xsave, argp, sizeof(struct kvm_xsave)))
3260                         break;
3261
3262                 r = kvm_vcpu_ioctl_x86_set_xsave(vcpu, u.xsave);
3263                 break;
3264         }
3265         case KVM_GET_XCRS: {
3266                 u.xcrs = kzalloc(sizeof(struct kvm_xcrs), GFP_KERNEL);
3267                 r = -ENOMEM;
3268                 if (!u.xcrs)
3269                         break;
3270
3271                 kvm_vcpu_ioctl_x86_get_xcrs(vcpu, u.xcrs);
3272
3273                 r = -EFAULT;
3274                 if (copy_to_user(argp, u.xcrs,
3275                                  sizeof(struct kvm_xcrs)))
3276                         break;
3277                 r = 0;
3278                 break;
3279         }
3280         case KVM_SET_XCRS: {
3281                 u.xcrs = kzalloc(sizeof(struct kvm_xcrs), GFP_KERNEL);
3282                 r = -ENOMEM;
3283                 if (!u.xcrs)
3284                         break;
3285
3286                 r = -EFAULT;
3287                 if (copy_from_user(u.xcrs, argp,
3288                                    sizeof(struct kvm_xcrs)))
3289                         break;
3290
3291                 r = kvm_vcpu_ioctl_x86_set_xcrs(vcpu, u.xcrs);
3292                 break;
3293         }
3294         case KVM_SET_TSC_KHZ: {
3295                 u32 user_tsc_khz;
3296
3297                 r = -EINVAL;
3298                 if (!kvm_has_tsc_control)
3299                         break;
3300
3301                 user_tsc_khz = (u32)arg;
3302
3303                 if (user_tsc_khz >= kvm_max_guest_tsc_khz)
3304                         goto out;
3305
3306                 kvm_x86_ops->set_tsc_khz(vcpu, user_tsc_khz);
3307
3308                 r = 0;
3309                 goto out;
3310         }
3311         case KVM_GET_TSC_KHZ: {
3312                 r = -EIO;
3313                 if (check_tsc_unstable())
3314                         goto out;
3315
3316                 r = vcpu_tsc_khz(vcpu);
3317
3318                 goto out;
3319         }
3320         default:
3321                 r = -EINVAL;
3322         }
3323 out:
3324         kfree(u.buffer);
3325         return r;
3326 }
3327
3328 static int kvm_vm_ioctl_set_tss_addr(struct kvm *kvm, unsigned long addr)
3329 {
3330         int ret;
3331
3332         if (addr > (unsigned int)(-3 * PAGE_SIZE))
3333                 return -1;
3334         ret = kvm_x86_ops->set_tss_addr(kvm, addr);
3335         return ret;
3336 }
3337
3338 static int kvm_vm_ioctl_set_identity_map_addr(struct kvm *kvm,
3339                                               u64 ident_addr)
3340 {
3341         kvm->arch.ept_identity_map_addr = ident_addr;
3342         return 0;
3343 }
3344
3345 static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm,
3346                                           u32 kvm_nr_mmu_pages)
3347 {
3348         if (kvm_nr_mmu_pages < KVM_MIN_ALLOC_MMU_PAGES)
3349                 return -EINVAL;
3350
3351         mutex_lock(&kvm->slots_lock);
3352         spin_lock(&kvm->mmu_lock);
3353
3354         kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages);
3355         kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages;
3356
3357         spin_unlock(&kvm->mmu_lock);
3358         mutex_unlock(&kvm->slots_lock);
3359         return 0;
3360 }
3361
3362 static int kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm)
3363 {
3364         return kvm->arch.n_max_mmu_pages;
3365 }
3366
3367 static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
3368 {
3369         int r;
3370
3371         r = 0;
3372         switch (chip->chip_id) {
3373         case KVM_IRQCHIP_PIC_MASTER:
3374                 memcpy(&chip->chip.pic,
3375                         &pic_irqchip(kvm)->pics[0],
3376                         sizeof(struct kvm_pic_state));
3377                 break;
3378         case KVM_IRQCHIP_PIC_SLAVE:
3379                 memcpy(&chip->chip.pic,
3380                         &pic_irqchip(kvm)->pics[1],
3381                         sizeof(struct kvm_pic_state));
3382                 break;
3383         case KVM_IRQCHIP_IOAPIC:
3384                 r = kvm_get_ioapic(kvm, &chip->chip.ioapic);
3385                 break;
3386         default:
3387                 r = -EINVAL;
3388                 break;
3389         }
3390         return r;
3391 }
3392
3393 static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
3394 {
3395         int r;
3396
3397         r = 0;
3398         switch (chip->chip_id) {
3399         case KVM_IRQCHIP_PIC_MASTER:
3400                 spin_lock(&pic_irqchip(kvm)->lock);
3401                 memcpy(&pic_irqchip(kvm)->pics[0],
3402                         &chip->chip.pic,
3403                         sizeof(struct kvm_pic_state));
3404                 spin_unlock(&pic_irqchip(kvm)->lock);
3405                 break;
3406         case KVM_IRQCHIP_PIC_SLAVE:
3407                 spin_lock(&pic_irqchip(kvm)->lock);
3408                 memcpy(&pic_irqchip(kvm)->pics[1],
3409                         &chip->chip.pic,
3410                         sizeof(struct kvm_pic_state));
3411                 spin_unlock(&pic_irqchip(kvm)->lock);
3412                 break;
3413         case KVM_IRQCHIP_IOAPIC:
3414                 r = kvm_set_ioapic(kvm, &chip->chip.ioapic);
3415                 break;
3416         default:
3417                 r = -EINVAL;
3418                 break;
3419         }
3420         kvm_pic_update_irq(pic_irqchip(kvm));
3421         return r;
3422 }
3423
3424 static int kvm_vm_ioctl_get_pit(struct kvm *kvm, struct kvm_pit_state *ps)
3425 {
3426         int r = 0;
3427
3428         mutex_lock(&kvm->arch.vpit->pit_state.lock);
3429         memcpy(ps, &kvm->arch.vpit->pit_state, sizeof(struct kvm_pit_state));
3430         mutex_unlock(&kvm->arch.vpit->pit_state.lock);
3431         return r;
3432 }
3433
3434 static int kvm_vm_ioctl_set_pit(struct kvm *kvm, struct kvm_pit_state *ps)
3435 {
3436         int r = 0;
3437         int i;
3438         mutex_lock(&kvm->arch.vpit->pit_state.lock);
3439         memcpy(&kvm->arch.vpit->pit_state, ps, sizeof(struct kvm_pit_state));
3440         for (i = 0; i < 3; i++)
3441                 kvm_pit_load_count(kvm, i, ps->channels[i].count, 0);
3442         mutex_unlock(&kvm->arch.vpit->pit_state.lock);
3443         return r;
3444 }
3445
3446 static int kvm_vm_ioctl_get_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
3447 {
3448         int r = 0;
3449
3450         mutex_lock(&kvm->arch.vpit->pit_state.lock);
3451         memcpy(ps->channels, &kvm->arch.vpit->pit_state.channels,
3452                 sizeof(ps->channels));
3453         ps->flags = kvm->arch.vpit->pit_state.flags;
3454         mutex_unlock(&kvm->arch.vpit->pit_state.lock);
3455         memset(&ps->reserved, 0, sizeof(ps->reserved));
3456         return r;
3457 }
3458
3459 static int kvm_vm_ioctl_set_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
3460 {
3461         int r = 0, start = 0;
3462         int i;
3463         u32 prev_legacy, cur_legacy;
3464         mutex_lock(&kvm->arch.vpit->pit_state.lock);
3465         prev_legacy = kvm->arch.vpit->pit_state.flags & KVM_PIT_FLAGS_HPET_LEGACY;
3466         cur_legacy = ps->flags & KVM_PIT_FLAGS_HPET_LEGACY;
3467         if (!prev_legacy && cur_legacy)
3468                 start = 1;
3469         memcpy(&kvm->arch.vpit->pit_state.channels, &ps->channels,
3470                sizeof(kvm->arch.vpit->pit_state.channels));
3471         kvm->arch.vpit->pit_state.flags = ps->flags;
3472         for (i = 0; i < 3; i++)
3473                 kvm_pit_load_count(kvm, i, kvm->arch.vpit->pit_state.channels[i].count,
3474                                    start && i == 0);
3475         mutex_unlock(&kvm->arch.vpit->pit_state.lock);
3476         return r;
3477 }
3478
3479 static int kvm_vm_ioctl_reinject(struct kvm *kvm,
3480                                  struct kvm_reinject_control *control)
3481 {
3482         if (!kvm->arch.vpit)
3483                 return -ENXIO;
3484         mutex_lock(&kvm->arch.vpit->pit_state.lock);
3485         kvm->arch.vpit->pit_state.pit_timer.reinject = control->pit_reinject;
3486         mutex_unlock(&kvm->arch.vpit->pit_state.lock);
3487         return 0;
3488 }
3489
3490 /*
3491  * Get (and clear) the dirty memory log for a memory slot.
3492  */
3493 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
3494                                       struct kvm_dirty_log *log)
3495 {
3496         int r, i;
3497         struct kvm_memory_slot *memslot;
3498         unsigned long n;
3499         unsigned long is_dirty = 0;
3500
3501         mutex_lock(&kvm->slots_lock);
3502
3503         r = -EINVAL;
3504         if (log->slot >= KVM_MEMORY_SLOTS)
3505                 goto out;
3506
3507         memslot = &kvm->memslots->memslots[log->slot];
3508         r = -ENOENT;
3509         if (!memslot->dirty_bitmap)
3510                 goto out;
3511
3512         n = kvm_dirty_bitmap_bytes(memslot);
3513
3514         for (i = 0; !is_dirty && i < n/sizeof(long); i++)
3515                 is_dirty = memslot->dirty_bitmap[i];
3516
3517         /* If nothing is dirty, don't bother messing with page tables. */
3518         if (is_dirty) {
3519                 struct kvm_memslots *slots, *old_slots;
3520                 unsigned long *dirty_bitmap;
3521
3522                 dirty_bitmap = memslot->dirty_bitmap_head;
3523                 if (memslot->dirty_bitmap == dirty_bitmap)
3524                         dirty_bitmap += n / sizeof(long);
3525                 memset(dirty_bitmap, 0, n);
3526
3527                 r = -ENOMEM;
3528                 slots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
3529                 if (!slots)
3530                         goto out;
3531                 memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots));
3532                 slots->memslots[log->slot].dirty_bitmap = dirty_bitmap;
3533                 slots->generation++;
3534
3535                 old_slots = kvm->memslots;
3536                 rcu_assign_pointer(kvm->memslots, slots);
3537                 synchronize_srcu_expedited(&kvm->srcu);
3538                 dirty_bitmap = old_slots->memslots[log->slot].dirty_bitmap;
3539                 kfree(old_slots);
3540
3541                 spin_lock(&kvm->mmu_lock);
3542                 kvm_mmu_slot_remove_write_access(kvm, log->slot);
3543                 spin_unlock(&kvm->mmu_lock);
3544
3545                 r = -EFAULT;
3546                 if (copy_to_user(log->dirty_bitmap, dirty_bitmap, n))
3547                         goto out;
3548         } else {
3549                 r = -EFAULT;
3550                 if (clear_user(log->dirty_bitmap, n))
3551                         goto out;
3552         }
3553
3554         r = 0;
3555 out:
3556         mutex_unlock(&kvm->slots_lock);
3557         return r;
3558 }
3559
3560 long kvm_arch_vm_ioctl(struct file *filp,
3561                        unsigned int ioctl, unsigned long arg)
3562 {
3563         struct kvm *kvm = filp->private_data;
3564         void __user *argp = (void __user *)arg;
3565         int r = -ENOTTY;
3566         /*
3567          * This union makes it completely explicit to gcc-3.x
3568          * that these two variables' stack usage should be
3569          * combined, not added together.
3570          */
3571         union {
3572                 struct kvm_pit_state ps;
3573                 struct kvm_pit_state2 ps2;
3574                 struct kvm_pit_config pit_config;
3575         } u;
3576
3577         switch (ioctl) {
3578         case KVM_SET_TSS_ADDR:
3579                 r = kvm_vm_ioctl_set_tss_addr(kvm, arg);
3580                 if (r < 0)
3581                         goto out;
3582                 break;
3583         case KVM_SET_IDENTITY_MAP_ADDR: {
3584                 u64 ident_addr;
3585
3586                 r = -EFAULT;
3587                 if (copy_from_user(&ident_addr, argp, sizeof ident_addr))
3588                         goto out;
3589                 r = kvm_vm_ioctl_set_identity_map_addr(kvm, ident_addr);
3590                 if (r < 0)
3591                         goto out;
3592                 break;
3593         }
3594         case KVM_SET_NR_MMU_PAGES:
3595                 r = kvm_vm_ioctl_set_nr_mmu_pages(kvm, arg);
3596                 if (r)
3597                         goto out;
3598                 break;
3599         case KVM_GET_NR_MMU_PAGES:
3600                 r = kvm_vm_ioctl_get_nr_mmu_pages(kvm);
3601                 break;
3602         case KVM_CREATE_IRQCHIP: {
3603                 struct kvm_pic *vpic;
3604
3605                 mutex_lock(&kvm->lock);
3606                 r = -EEXIST;
3607                 if (kvm->arch.vpic)
3608                         goto create_irqchip_unlock;
3609                 r = -EINVAL;
3610                 if (atomic_read(&kvm->online_vcpus))
3611                         goto create_irqchip_unlock;
3612                 r = -ENOMEM;
3613                 vpic = kvm_create_pic(kvm);
3614                 if (vpic) {
3615                         r = kvm_ioapic_init(kvm);
3616                         if (r) {
3617                                 mutex_lock(&kvm->slots_lock);
3618                                 kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS,
3619                                                           &vpic->dev_master);
3620                                 kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS,
3621                                                           &vpic->dev_slave);
3622                                 kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS,
3623                                                           &vpic->dev_eclr);
3624                                 mutex_unlock(&kvm->slots_lock);
3625                                 kfree(vpic);
3626                                 goto create_irqchip_unlock;
3627                         }
3628                 } else
3629                         goto create_irqchip_unlock;
3630                 smp_wmb();
3631                 kvm->arch.vpic = vpic;
3632                 smp_wmb();
3633                 r = kvm_setup_default_irq_routing(kvm);
3634                 if (r) {
3635                         mutex_lock(&kvm->slots_lock);
3636                         mutex_lock(&kvm->irq_lock);
3637                         kvm_ioapic_destroy(kvm);
3638                         kvm_destroy_pic(kvm);
3639                         mutex_unlock(&kvm->irq_lock);
3640                         mutex_unlock(&kvm->slots_lock);
3641                 }
3642         create_irqchip_unlock:
3643                 mutex_unlock(&kvm->lock);
3644                 break;
3645         }
3646         case KVM_CREATE_PIT:
3647                 u.pit_config.flags = KVM_PIT_SPEAKER_DUMMY;
3648                 goto create_pit;
3649         case KVM_CREATE_PIT2:
3650                 r = -EFAULT;
3651                 if (copy_from_user(&u.pit_config, argp,
3652                                    sizeof(struct kvm_pit_config)))
3653                         goto out;
3654         create_pit:
3655                 mutex_lock(&kvm->slots_lock);
3656                 r = -EEXIST;
3657                 if (kvm->arch.vpit)
3658                         goto create_pit_unlock;
3659                 r = -ENOMEM;
3660                 kvm->arch.vpit = kvm_create_pit(kvm, u.pit_config.flags);
3661                 if (kvm->arch.vpit)
3662                         r = 0;
3663         create_pit_unlock:
3664                 mutex_unlock(&kvm->slots_lock);
3665                 break;
3666         case KVM_IRQ_LINE_STATUS:
3667         case KVM_IRQ_LINE: {
3668                 struct kvm_irq_level irq_event;
3669
3670                 r = -EFAULT;
3671                 if (copy_from_user(&irq_event, argp, sizeof irq_event))
3672                         goto out;
3673                 r = -ENXIO;
3674                 if (irqchip_in_kernel(kvm)) {
3675                         __s32 status;
3676                         status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
3677                                         irq_event.irq, irq_event.level);
3678                         if (ioctl == KVM_IRQ_LINE_STATUS) {
3679                                 r = -EFAULT;
3680                                 irq_event.status = status;
3681                                 if (copy_to_user(argp, &irq_event,
3682                                                         sizeof irq_event))
3683                                         goto out;
3684                         }
3685                         r = 0;
3686                 }
3687                 break;
3688         }
3689         case KVM_GET_IRQCHIP: {
3690                 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
3691                 struct kvm_irqchip *chip = kmalloc(sizeof(*chip), GFP_KERNEL);
3692
3693                 r = -ENOMEM;
3694                 if (!chip)
3695                         goto out;
3696                 r = -EFAULT;
3697                 if (copy_from_user(chip, argp, sizeof *chip))
3698                         goto get_irqchip_out;
3699                 r = -ENXIO;
3700                 if (!irqchip_in_kernel(kvm))
3701                         goto get_irqchip_out;
3702                 r = kvm_vm_ioctl_get_irqchip(kvm, chip);
3703                 if (r)
3704                         goto get_irqchip_out;
3705                 r = -EFAULT;
3706                 if (copy_to_user(argp, chip, sizeof *chip))
3707                         goto get_irqchip_out;
3708                 r = 0;
3709         get_irqchip_out:
3710                 kfree(chip);
3711                 if (r)
3712                         goto out;
3713                 break;
3714         }
3715         case KVM_SET_IRQCHIP: {
3716                 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
3717                 struct kvm_irqchip *chip = kmalloc(sizeof(*chip), GFP_KERNEL);
3718
3719                 r = -ENOMEM;
3720                 if (!chip)
3721                         goto out;
3722                 r = -EFAULT;
3723                 if (copy_from_user(chip, argp, sizeof *chip))
3724                         goto set_irqchip_out;
3725                 r = -ENXIO;
3726                 if (!irqchip_in_kernel(kvm))
3727                         goto set_irqchip_out;
3728                 r = kvm_vm_ioctl_set_irqchip(kvm, chip);
3729                 if (r)
3730                         goto set_irqchip_out;
3731                 r = 0;
3732         set_irqchip_out:
3733                 kfree(chip);
3734                 if (r)
3735                         goto out;
3736                 break;
3737         }
3738         case KVM_GET_PIT: {
3739                 r = -EFAULT;
3740                 if (copy_from_user(&u.ps, argp, sizeof(struct kvm_pit_state)))
3741                         goto out;
3742                 r = -ENXIO;
3743                 if (!kvm->arch.vpit)
3744                         goto out;
3745                 r = kvm_vm_ioctl_get_pit(kvm, &u.ps);
3746                 if (r)
3747                         goto out;
3748                 r = -EFAULT;
3749                 if (copy_to_user(argp, &u.ps, sizeof(struct kvm_pit_state)))
3750                         goto out;
3751                 r = 0;
3752                 break;
3753         }
3754         case KVM_SET_PIT: {
3755                 r = -EFAULT;
3756                 if (copy_from_user(&u.ps, argp, sizeof u.ps))
3757                         goto out;
3758                 r = -ENXIO;
3759                 if (!kvm->arch.vpit)
3760                         goto out;
3761                 r = kvm_vm_ioctl_set_pit(kvm, &u.ps);
3762                 if (r)
3763                         goto out;
3764                 r = 0;
3765                 break;
3766         }
3767         case KVM_GET_PIT2: {
3768                 r = -ENXIO;
3769                 if (!kvm->arch.vpit)
3770                         goto out;
3771                 r = kvm_vm_ioctl_get_pit2(kvm, &u.ps2);
3772                 if (r)
3773                         goto out;
3774                 r = -EFAULT;
3775                 if (copy_to_user(argp, &u.ps2, sizeof(u.ps2)))
3776                         goto out;
3777                 r = 0;
3778                 break;
3779         }
3780         case KVM_SET_PIT2: {
3781                 r = -EFAULT;
3782                 if (copy_from_user(&u.ps2, argp, sizeof(u.ps2)))
3783                         goto out;
3784                 r = -ENXIO;
3785                 if (!kvm->arch.vpit)
3786                         goto out;
3787                 r = kvm_vm_ioctl_set_pit2(kvm, &u.ps2);
3788                 if (r)
3789                         goto out;
3790                 r = 0;
3791                 break;
3792         }
3793         case KVM_REINJECT_CONTROL: {
3794                 struct kvm_reinject_control control;
3795                 r =  -EFAULT;
3796                 if (copy_from_user(&control, argp, sizeof(control)))
3797                         goto out;
3798                 r = kvm_vm_ioctl_reinject(kvm, &control);
3799                 if (r)
3800                         goto out;
3801                 r = 0;
3802                 break;
3803         }
3804         case KVM_XEN_HVM_CONFIG: {
3805                 r = -EFAULT;
3806                 if (copy_from_user(&kvm->arch.xen_hvm_config, argp,
3807                                    sizeof(struct kvm_xen_hvm_config)))
3808                         goto out;
3809                 r = -EINVAL;
3810                 if (kvm->arch.xen_hvm_config.flags)
3811                         goto out;
3812                 r = 0;
3813                 break;
3814         }
3815         case KVM_SET_CLOCK: {
3816                 struct kvm_clock_data user_ns;
3817                 u64 now_ns;
3818                 s64 delta;
3819
3820                 r = -EFAULT;
3821                 if (copy_from_user(&user_ns, argp, sizeof(user_ns)))
3822                         goto out;
3823
3824                 r = -EINVAL;
3825                 if (user_ns.flags)
3826                         goto out;
3827
3828                 r = 0;
3829                 local_irq_disable();
3830                 now_ns = get_kernel_ns();
3831                 delta = user_ns.clock - now_ns;
3832                 local_irq_enable();
3833                 kvm->arch.kvmclock_offset = delta;
3834                 break;
3835         }
3836         case KVM_GET_CLOCK: {
3837                 struct kvm_clock_data user_ns;
3838                 u64 now_ns;
3839
3840                 local_irq_disable();
3841                 now_ns = get_kernel_ns();
3842                 user_ns.clock = kvm->arch.kvmclock_offset + now_ns;
3843                 local_irq_enable();
3844                 user_ns.flags = 0;
3845                 memset(&user_ns.pad, 0, sizeof(user_ns.pad));
3846
3847                 r = -EFAULT;
3848                 if (copy_to_user(argp, &user_ns, sizeof(user_ns)))
3849                         goto out;
3850                 r = 0;
3851                 break;
3852         }
3853
3854         default:
3855                 ;
3856         }
3857 out:
3858         return r;
3859 }
3860
3861 static void kvm_init_msr_list(void)
3862 {
3863         u32 dummy[2];
3864         unsigned i, j;
3865
3866         /* skip the first msrs in the list. KVM-specific */
3867         for (i = j = KVM_SAVE_MSRS_BEGIN; i < ARRAY_SIZE(msrs_to_save); i++) {
3868                 if (rdmsr_safe(msrs_to_save[i], &dummy[0], &dummy[1]) < 0)
3869                         continue;
3870                 if (j < i)
3871                         msrs_to_save[j] = msrs_to_save[i];
3872                 j++;
3873         }
3874         num_msrs_to_save = j;
3875 }
3876
3877 static int vcpu_mmio_write(struct kvm_vcpu *vcpu, gpa_t addr, int len,
3878                            const void *v)
3879 {
3880         int handled = 0;
3881         int n;
3882
3883         do {
3884                 n = min(len, 8);
3885                 if (!(vcpu->arch.apic &&
3886                       !kvm_iodevice_write(&vcpu->arch.apic->dev, addr, n, v))
3887                     && kvm_io_bus_write(vcpu->kvm, KVM_MMIO_BUS, addr, n, v))
3888                         break;
3889                 handled += n;
3890                 addr += n;
3891                 len -= n;
3892                 v += n;
3893         } while (len);
3894
3895         return handled;
3896 }
3897
3898 static int vcpu_mmio_read(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *v)
3899 {
3900         int handled = 0;
3901         int n;
3902
3903         do {
3904                 n = min(len, 8);
3905                 if (!(vcpu->arch.apic &&
3906                       !kvm_iodevice_read(&vcpu->arch.apic->dev, addr, n, v))
3907                     && kvm_io_bus_read(vcpu->kvm, KVM_MMIO_BUS, addr, n, v))
3908                         break;
3909                 trace_kvm_mmio(KVM_TRACE_MMIO_READ, n, addr, *(u64 *)v);
3910                 handled += n;
3911                 addr += n;
3912                 len -= n;
3913                 v += n;
3914         } while (len);
3915
3916         return handled;
3917 }
3918
3919 static void kvm_set_segment(struct kvm_vcpu *vcpu,
3920                         struct kvm_segment *var, int seg)
3921 {
3922         kvm_x86_ops->set_segment(vcpu, var, seg);
3923 }
3924
3925 void kvm_get_segment(struct kvm_vcpu *vcpu,
3926                      struct kvm_segment *var, int seg)
3927 {
3928         kvm_x86_ops->get_segment(vcpu, var, seg);
3929 }
3930
3931 static gpa_t translate_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access)
3932 {
3933         return gpa;
3934 }
3935
3936 static gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access)
3937 {
3938         gpa_t t_gpa;
3939         struct x86_exception exception;
3940
3941         BUG_ON(!mmu_is_nested(vcpu));
3942
3943         /* NPT walks are always user-walks */
3944         access |= PFERR_USER_MASK;
3945         t_gpa  = vcpu->arch.mmu.gva_to_gpa(vcpu, gpa, access, &exception);
3946
3947         return t_gpa;
3948 }
3949
3950 gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva,
3951                               struct x86_exception *exception)
3952 {
3953         u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
3954         return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception);
3955 }
3956
3957  gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva,
3958                                 struct x86_exception *exception)
3959 {
3960         u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
3961         access |= PFERR_FETCH_MASK;
3962         return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception);
3963 }
3964
3965 gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva,
3966                                struct x86_exception *exception)
3967 {
3968         u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
3969         access |= PFERR_WRITE_MASK;
3970         return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception);
3971 }
3972
3973 /* uses this to access any guest's mapped memory without checking CPL */
3974 gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva,
3975                                 struct x86_exception *exception)
3976 {
3977         return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, 0, exception);
3978 }
3979
3980 static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes,
3981                                       struct kvm_vcpu *vcpu, u32 access,
3982                                       struct x86_exception *exception)
3983 {
3984         void *data = val;
3985         int r = X86EMUL_CONTINUE;
3986
3987         while (bytes) {
3988                 gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, access,
3989                                                             exception);
3990                 unsigned offset = addr & (PAGE_SIZE-1);
3991                 unsigned toread = min(bytes, (unsigned)PAGE_SIZE - offset);
3992                 int ret;
3993
3994                 if (gpa == UNMAPPED_GVA)
3995                         return X86EMUL_PROPAGATE_FAULT;
3996                 ret = kvm_read_guest(vcpu->kvm, gpa, data, toread);
3997                 if (ret < 0) {
3998                         r = X86EMUL_IO_NEEDED;
3999                         goto out;
4000                 }
4001
4002                 bytes -= toread;
4003                 data += toread;
4004                 addr += toread;
4005         }
4006 out:
4007         return r;
4008 }
4009
4010 /* used for instruction fetching */
4011 static int kvm_fetch_guest_virt(struct x86_emulate_ctxt *ctxt,
4012                                 gva_t addr, void *val, unsigned int bytes,
4013                                 struct x86_exception *exception)
4014 {
4015         struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
4016         u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
4017
4018         return kvm_read_guest_virt_helper(addr, val, bytes, vcpu,
4019                                           access | PFERR_FETCH_MASK,
4020                                           exception);
4021 }
4022
4023 int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt,
4024                                gva_t addr, void *val, unsigned int bytes,
4025                                struct x86_exception *exception)
4026 {
4027         struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
4028         u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
4029
4030         return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access,
4031                                           exception);
4032 }
4033 EXPORT_SYMBOL_GPL(kvm_read_guest_virt);
4034
4035 static int kvm_read_guest_virt_system(struct x86_emulate_ctxt *ctxt,
4036                                       gva_t addr, void *val, unsigned int bytes,
4037                                       struct x86_exception *exception)
4038 {
4039         struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
4040         return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, 0, exception);
4041 }
4042
4043 int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt,
4044                                        gva_t addr, void *val,
4045                                        unsigned int bytes,
4046                                        struct x86_exception *exception)
4047 {
4048         struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
4049         void *data = val;
4050         int r = X86EMUL_CONTINUE;
4051
4052         while (bytes) {
4053                 gpa_t gpa =  vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr,
4054                                                              PFERR_WRITE_MASK,
4055                                                              exception);
4056                 unsigned offset = addr & (PAGE_SIZE-1);
4057                 unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset);
4058                 int ret;
4059
4060                 if (gpa == UNMAPPED_GVA)
4061                         return X86EMUL_PROPAGATE_FAULT;
4062                 ret = kvm_write_guest(vcpu->kvm, gpa, data, towrite);
4063                 if (ret < 0) {
4064                         r = X86EMUL_IO_NEEDED;
4065                         goto out;
4066                 }
4067
4068                 bytes -= towrite;
4069                 data += towrite;
4070                 addr += towrite;
4071         }
4072 out:
4073         return r;
4074 }
4075 EXPORT_SYMBOL_GPL(kvm_write_guest_virt_system);
4076
4077 static int vcpu_mmio_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva,
4078                                 gpa_t *gpa, struct x86_exception *exception,
4079                                 bool write)
4080 {
4081         u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
4082
4083         if (vcpu_match_mmio_gva(vcpu, gva) &&
4084                   check_write_user_access(vcpu, write, access,
4085                   vcpu->arch.access)) {
4086                 *gpa = vcpu->arch.mmio_gfn << PAGE_SHIFT |
4087                                         (gva & (PAGE_SIZE - 1));
4088                 trace_vcpu_match_mmio(gva, *gpa, write, false);
4089                 return 1;
4090         }
4091
4092         if (write)
4093                 access |= PFERR_WRITE_MASK;
4094
4095         *gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception);
4096
4097         if (*gpa == UNMAPPED_GVA)
4098                 return -1;
4099
4100         /* For APIC access vmexit */
4101         if ((*gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
4102                 return 1;
4103
4104         if (vcpu_match_mmio_gpa(vcpu, *gpa)) {
4105                 trace_vcpu_match_mmio(gva, *gpa, write, true);
4106                 return 1;
4107         }
4108
4109         return 0;
4110 }
4111
4112 int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
4113                         const void *val, int bytes)
4114 {
4115         int ret;
4116
4117         ret = kvm_write_guest(vcpu->kvm, gpa, val, bytes);
4118         if (ret < 0)
4119                 return 0;
4120         kvm_mmu_pte_write(vcpu, gpa, val, bytes, 1);
4121         return 1;
4122 }
4123
4124 struct read_write_emulator_ops {
4125         int (*read_write_prepare)(struct kvm_vcpu *vcpu, void *val,
4126                                   int bytes);
4127         int (*read_write_emulate)(struct kvm_vcpu *vcpu, gpa_t gpa,
4128                                   void *val, int bytes);
4129         int (*read_write_mmio)(struct kvm_vcpu *vcpu, gpa_t gpa,
4130                                int bytes, void *val);
4131         int (*read_write_exit_mmio)(struct kvm_vcpu *vcpu, gpa_t gpa,
4132                                     void *val, int bytes);
4133         bool write;
4134 };
4135
4136 static int read_prepare(struct kvm_vcpu *vcpu, void *val, int bytes)
4137 {
4138         if (vcpu->mmio_read_completed) {
4139                 memcpy(val, vcpu->mmio_data, bytes);
4140                 trace_kvm_mmio(KVM_TRACE_MMIO_READ, bytes,
4141                                vcpu->mmio_phys_addr, *(u64 *)val);
4142                 vcpu->mmio_read_completed = 0;
4143                 return 1;
4144         }
4145
4146         return 0;
4147 }
4148
4149 static int read_emulate(struct kvm_vcpu *vcpu, gpa_t gpa,
4150                         void *val, int bytes)
4151 {
4152         return !kvm_read_guest(vcpu->kvm, gpa, val, bytes);
4153 }
4154
4155 static int write_emulate(struct kvm_vcpu *vcpu, gpa_t gpa,
4156                          void *val, int bytes)
4157 {
4158         return emulator_write_phys(vcpu, gpa, val, bytes);
4159 }
4160
4161 static int write_mmio(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes, void *val)
4162 {
4163         trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, bytes, gpa, *(u64 *)val);
4164         return vcpu_mmio_write(vcpu, gpa, bytes, val);
4165 }
4166
4167 static int read_exit_mmio(struct kvm_vcpu *vcpu, gpa_t gpa,
4168                           void *val, int bytes)
4169 {
4170         trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, bytes, gpa, 0);
4171         return X86EMUL_IO_NEEDED;
4172 }
4173
4174 static int write_exit_mmio(struct kvm_vcpu *vcpu, gpa_t gpa,
4175                            void *val, int bytes)
4176 {
4177         memcpy(vcpu->mmio_data, val, bytes);
4178         memcpy(vcpu->run->mmio.data, vcpu->mmio_data, 8);
4179         return X86EMUL_CONTINUE;
4180 }
4181
4182 static struct read_write_emulator_ops read_emultor = {
4183         .read_write_prepare = read_prepare,
4184         .read_write_emulate = read_emulate,
4185         .read_write_mmio = vcpu_mmio_read,
4186         .read_write_exit_mmio = read_exit_mmio,
4187 };
4188
4189 static struct read_write_emulator_ops write_emultor = {
4190         .read_write_emulate = write_emulate,
4191         .read_write_mmio = write_mmio,
4192         .read_write_exit_mmio = write_exit_mmio,
4193         .write = true,
4194 };
4195
4196 static int emulator_read_write_onepage(unsigned long addr, void *val,
4197                                        unsigned int bytes,
4198                                        struct x86_exception *exception,
4199                                        struct kvm_vcpu *vcpu,
4200                                        struct read_write_emulator_ops *ops)
4201 {
4202         gpa_t gpa;
4203         int handled, ret;
4204         bool write = ops->write;
4205
4206         if (ops->read_write_prepare &&
4207                   ops->read_write_prepare(vcpu, val, bytes))
4208                 return X86EMUL_CONTINUE;
4209
4210         ret = vcpu_mmio_gva_to_gpa(vcpu, addr, &gpa, exception, write);
4211
4212         if (ret < 0)
4213                 return X86EMUL_PROPAGATE_FAULT;
4214
4215         /* For APIC access vmexit */
4216         if (ret)
4217                 goto mmio;
4218
4219         if (ops->read_write_emulate(vcpu, gpa, val, bytes))
4220                 return X86EMUL_CONTINUE;
4221
4222 mmio:
4223         /*
4224          * Is this MMIO handled locally?
4225          */
4226         handled = ops->read_write_mmio(vcpu, gpa, bytes, val);
4227         if (handled == bytes)
4228                 return X86EMUL_CONTINUE;
4229
4230         gpa += handled;
4231         bytes -= handled;
4232         val += handled;
4233
4234         vcpu->mmio_needed = 1;
4235         vcpu->run->exit_reason = KVM_EXIT_MMIO;
4236         vcpu->run->mmio.phys_addr = vcpu->mmio_phys_addr = gpa;
4237         vcpu->mmio_size = bytes;
4238         vcpu->run->mmio.len = min(vcpu->mmio_size, 8);
4239         vcpu->run->mmio.is_write = vcpu->mmio_is_write = write;
4240         vcpu->mmio_index = 0;
4241
4242         return ops->read_write_exit_mmio(vcpu, gpa, val, bytes);
4243 }
4244
4245 int emulator_read_write(struct x86_emulate_ctxt *ctxt, unsigned long addr,
4246                         void *val, unsigned int bytes,
4247                         struct x86_exception *exception,
4248                         struct read_write_emulator_ops *ops)
4249 {
4250         struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
4251
4252         /* Crossing a page boundary? */
4253         if (((addr + bytes - 1) ^ addr) & PAGE_MASK) {
4254                 int rc, now;
4255
4256                 now = -addr & ~PAGE_MASK;
4257                 rc = emulator_read_write_onepage(addr, val, now, exception,
4258                                                  vcpu, ops);
4259
4260                 if (rc != X86EMUL_CONTINUE)
4261                         return rc;
4262                 addr += now;
4263                 val += now;
4264                 bytes -= now;
4265         }
4266
4267         return emulator_read_write_onepage(addr, val, bytes, exception,
4268                                            vcpu, ops);
4269 }
4270
4271 static int emulator_read_emulated(struct x86_emulate_ctxt *ctxt,
4272                                   unsigned long addr,
4273                                   void *val,
4274                                   unsigned int bytes,
4275                                   struct x86_exception *exception)
4276 {
4277         return emulator_read_write(ctxt, addr, val, bytes,
4278                                    exception, &read_emultor);
4279 }
4280
4281 int emulator_write_emulated(struct x86_emulate_ctxt *ctxt,
4282                             unsigned long addr,
4283                             const void *val,
4284                             unsigned int bytes,
4285                             struct x86_exception *exception)
4286 {
4287         return emulator_read_write(ctxt, addr, (void *)val, bytes,
4288                                    exception, &write_emultor);
4289 }
4290
4291 #define CMPXCHG_TYPE(t, ptr, old, new) \
4292         (cmpxchg((t *)(ptr), *(t *)(old), *(t *)(new)) == *(t *)(old))
4293
4294 #ifdef CONFIG_X86_64
4295 #  define CMPXCHG64(ptr, old, new) CMPXCHG_TYPE(u64, ptr, old, new)
4296 #else
4297 #  define CMPXCHG64(ptr, old, new) \
4298         (cmpxchg64((u64 *)(ptr), *(u64 *)(old), *(u64 *)(new)) == *(u64 *)(old))
4299 #endif
4300
4301 static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt,
4302                                      unsigned long addr,
4303                                      const void *old,
4304                                      const void *new,
4305                                      unsigned int bytes,
4306                                      struct x86_exception *exception)
4307 {
4308         struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
4309         gpa_t gpa;
4310         struct page *page;
4311         char *kaddr;
4312         bool exchanged;
4313
4314         /* guests cmpxchg8b have to be emulated atomically */
4315         if (bytes > 8 || (bytes & (bytes - 1)))
4316                 goto emul_write;
4317
4318         gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, NULL);
4319
4320         if (gpa == UNMAPPED_GVA ||
4321             (gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
4322                 goto emul_write;
4323
4324         if (((gpa + bytes - 1) & PAGE_MASK) != (gpa & PAGE_MASK))
4325                 goto emul_write;
4326
4327         page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
4328         if (is_error_page(page)) {
4329                 kvm_release_page_clean(page);
4330                 goto emul_write;
4331         }
4332
4333         kaddr = kmap_atomic(page, KM_USER0);
4334         kaddr += offset_in_page(gpa);
4335         switch (bytes) {
4336         case 1:
4337                 exchanged = CMPXCHG_TYPE(u8, kaddr, old, new);
4338                 break;
4339         case 2:
4340                 exchanged = CMPXCHG_TYPE(u16, kaddr, old, new);
4341                 break;
4342         case 4:
4343                 exchanged = CMPXCHG_TYPE(u32, kaddr, old, new);
4344                 break;
4345         case 8:
4346                 exchanged = CMPXCHG64(kaddr, old, new);
4347                 break;
4348         default:
4349                 BUG();
4350         }
4351         kunmap_atomic(kaddr, KM_USER0);
4352         kvm_release_page_dirty(page);
4353
4354         if (!exchanged)
4355                 return X86EMUL_CMPXCHG_FAILED;
4356
4357         kvm_mmu_pte_write(vcpu, gpa, new, bytes, 1);
4358
4359         return X86EMUL_CONTINUE;
4360
4361 emul_write:
4362         printk_once(KERN_WARNING "kvm: emulating exchange as write\n");
4363
4364         return emulator_write_emulated(ctxt, addr, new, bytes, exception);
4365 }
4366
4367 static int kernel_pio(struct kvm_vcpu *vcpu, void *pd)
4368 {
4369         /* TODO: String I/O for in kernel device */
4370         int r;
4371
4372         if (vcpu->arch.pio.in)
4373                 r = kvm_io_bus_read(vcpu->kvm, KVM_PIO_BUS, vcpu->arch.pio.port,
4374                                     vcpu->arch.pio.size, pd);
4375         else
4376                 r = kvm_io_bus_write(vcpu->kvm, KVM_PIO_BUS,
4377                                      vcpu->arch.pio.port, vcpu->arch.pio.size,
4378                                      pd);
4379         return r;
4380 }
4381
4382
4383 static int emulator_pio_in_emulated(struct x86_emulate_ctxt *ctxt,
4384                                     int size, unsigned short port, void *val,
4385                                     unsigned int count)
4386 {
4387         struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
4388
4389         if (vcpu->arch.pio.count)
4390                 goto data_avail;
4391
4392         trace_kvm_pio(0, port, size, count);
4393
4394         vcpu->arch.pio.port = port;
4395         vcpu->arch.pio.in = 1;
4396         vcpu->arch.pio.count  = count;
4397         vcpu->arch.pio.size = size;
4398
4399         if (!kernel_pio(vcpu, vcpu->arch.pio_data)) {
4400         data_avail:
4401                 memcpy(val, vcpu->arch.pio_data, size * count);
4402                 vcpu->arch.pio.count = 0;
4403                 return 1;
4404         }
4405
4406         vcpu->run->exit_reason = KVM_EXIT_IO;
4407         vcpu->run->io.direction = KVM_EXIT_IO_IN;
4408         vcpu->run->io.size = size;
4409         vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
4410         vcpu->run->io.count = count;
4411         vcpu->run->io.port = port;
4412
4413         return 0;
4414 }
4415
4416 static int emulator_pio_out_emulated(struct x86_emulate_ctxt *ctxt,
4417                                      int size, unsigned short port,
4418                                      const void *val, unsigned int count)
4419 {
4420         struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
4421
4422         trace_kvm_pio(1, port, size, count);
4423
4424         vcpu->arch.pio.port = port;
4425         vcpu->arch.pio.in = 0;
4426         vcpu->arch.pio.count = count;
4427         vcpu->arch.pio.size = size;
4428
4429         memcpy(vcpu->arch.pio_data, val, size * count);
4430
4431         if (!kernel_pio(vcpu, vcpu->arch.pio_data)) {
4432                 vcpu->arch.pio.count = 0;
4433                 return 1;
4434         }
4435
4436         vcpu->run->exit_reason = KVM_EXIT_IO;
4437         vcpu->run->io.direction = KVM_EXIT_IO_OUT;
4438         vcpu->run->io.size = size;
4439         vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
4440         vcpu->run->io.count = count;
4441         vcpu->run->io.port = port;
4442
4443         return 0;
4444 }
4445
4446 static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg)
4447 {
4448         return kvm_x86_ops->get_segment_base(vcpu, seg);
4449 }
4450
4451 static void emulator_invlpg(struct x86_emulate_ctxt *ctxt, ulong address)
4452 {
4453         kvm_mmu_invlpg(emul_to_vcpu(ctxt), address);
4454 }
4455
4456 int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu)
4457 {
4458         if (!need_emulate_wbinvd(vcpu))
4459                 return X86EMUL_CONTINUE;
4460
4461         if (kvm_x86_ops->has_wbinvd_exit()) {
4462                 int cpu = get_cpu();
4463
4464                 cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask);
4465                 smp_call_function_many(vcpu->arch.wbinvd_dirty_mask,
4466                                 wbinvd_ipi, NULL, 1);
4467                 put_cpu();
4468                 cpumask_clear(vcpu->arch.wbinvd_dirty_mask);
4469         } else
4470                 wbinvd();
4471         return X86EMUL_CONTINUE;
4472 }
4473 EXPORT_SYMBOL_GPL(kvm_emulate_wbinvd);
4474
4475 static void emulator_wbinvd(struct x86_emulate_ctxt *ctxt)
4476 {
4477         kvm_emulate_wbinvd(emul_to_vcpu(ctxt));
4478 }
4479
4480 int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long *dest)
4481 {
4482         return _kvm_get_dr(emul_to_vcpu(ctxt), dr, dest);
4483 }
4484
4485 int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long value)
4486 {
4487
4488         return __kvm_set_dr(emul_to_vcpu(ctxt), dr, value);
4489 }
4490
4491 static u64 mk_cr_64(u64 curr_cr, u32 new_val)
4492 {
4493         return (curr_cr & ~((1ULL << 32) - 1)) | new_val;
4494 }
4495
4496 static unsigned long emulator_get_cr(struct x86_emulate_ctxt *ctxt, int cr)
4497 {
4498         struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
4499         unsigned long value;
4500
4501         switch (cr) {
4502         case 0:
4503                 value = kvm_read_cr0(vcpu);
4504                 break;
4505         case 2:
4506                 value = vcpu->arch.cr2;
4507                 break;
4508         case 3:
4509                 value = kvm_read_cr3(vcpu);
4510                 break;
4511         case 4:
4512                 value = kvm_read_cr4(vcpu);
4513                 break;
4514         case 8:
4515                 value = kvm_get_cr8(vcpu);
4516                 break;
4517         default:
4518                 vcpu_printf(vcpu, "%s: unexpected cr %u\n", __func__, cr);
4519                 return 0;
4520         }
4521
4522         return value;
4523 }
4524
4525 static int emulator_set_cr(struct x86_emulate_ctxt *ctxt, int cr, ulong val)
4526 {
4527         struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
4528         int res = 0;
4529
4530         switch (cr) {
4531         case 0:
4532                 res = kvm_set_cr0(vcpu, mk_cr_64(kvm_read_cr0(vcpu), val));
4533                 break;
4534         case 2:
4535                 vcpu->arch.cr2 = val;
4536                 break;
4537         case 3:
4538                 res = kvm_set_cr3(vcpu, val);
4539                 break;
4540         case 4:
4541                 res = kvm_set_cr4(vcpu, mk_cr_64(kvm_read_cr4(vcpu), val));
4542                 break;
4543         case 8:
4544                 res = kvm_set_cr8(vcpu, val);
4545                 break;
4546         default:
4547                 vcpu_printf(vcpu, "%s: unexpected cr %u\n", __func__, cr);
4548                 res = -1;
4549         }
4550
4551         return res;
4552 }
4553
4554 static int emulator_get_cpl(struct x86_emulate_ctxt *ctxt)
4555 {
4556         return kvm_x86_ops->get_cpl(emul_to_vcpu(ctxt));
4557 }
4558
4559 static void emulator_get_gdt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt)
4560 {
4561         kvm_x86_ops->get_gdt(emul_to_vcpu(ctxt), dt);
4562 }
4563
4564 static void emulator_get_idt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt)
4565 {
4566         kvm_x86_ops->get_idt(emul_to_vcpu(ctxt), dt);
4567 }
4568
4569 static void emulator_set_gdt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt)
4570 {
4571         kvm_x86_ops->set_gdt(emul_to_vcpu(ctxt), dt);
4572 }
4573
4574 static void emulator_set_idt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt)
4575 {
4576         kvm_x86_ops->set_idt(emul_to_vcpu(ctxt), dt);
4577 }
4578
4579 static unsigned long emulator_get_cached_segment_base(
4580         struct x86_emulate_ctxt *ctxt, int seg)
4581 {
4582         return get_segment_base(emul_to_vcpu(ctxt), seg);
4583 }
4584
4585 static bool emulator_get_segment(struct x86_emulate_ctxt *ctxt, u16 *selector,
4586                                  struct desc_struct *desc, u32 *base3,
4587                                  int seg)
4588 {
4589         struct kvm_segment var;
4590
4591         kvm_get_segment(emul_to_vcpu(ctxt), &var, seg);
4592         *selector = var.selector;
4593
4594         if (var.unusable)
4595                 return false;
4596
4597         if (var.g)
4598                 var.limit >>= 12;
4599         set_desc_limit(desc, var.limit);
4600         set_desc_base(desc, (unsigned long)var.base);
4601 #ifdef CONFIG_X86_64
4602         if (base3)
4603                 *base3 = var.base >> 32;
4604 #endif
4605         desc->type = var.type;
4606         desc->s = var.s;
4607         desc->dpl = var.dpl;
4608         desc->p = var.present;
4609         desc->avl = var.avl;
4610         desc->l = var.l;
4611         desc->d = var.db;
4612         desc->g = var.g;
4613
4614         return true;
4615 }
4616
4617 static void emulator_set_segment(struct x86_emulate_ctxt *ctxt, u16 selector,
4618                                  struct desc_struct *desc, u32 base3,
4619                                  int seg)
4620 {
4621         struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
4622         struct kvm_segment var;
4623
4624         var.selector = selector;
4625         var.base = get_desc_base(desc);
4626 #ifdef CONFIG_X86_64
4627         var.base |= ((u64)base3) << 32;
4628 #endif
4629         var.limit = get_desc_limit(desc);
4630         if (desc->g)
4631                 var.limit = (var.limit << 12) | 0xfff;
4632         var.type = desc->type;
4633         var.present = desc->p;
4634         var.dpl = desc->dpl;
4635         var.db = desc->d;
4636         var.s = desc->s;
4637         var.l = desc->l;
4638         var.g = desc->g;
4639         var.avl = desc->avl;
4640         var.present = desc->p;
4641         var.unusable = !var.present;
4642         var.padding = 0;
4643
4644         kvm_set_segment(vcpu, &var, seg);
4645         return;
4646 }
4647
4648 static int emulator_get_msr(struct x86_emulate_ctxt *ctxt,
4649                             u32 msr_index, u64 *pdata)
4650 {
4651         return kvm_get_msr(emul_to_vcpu(ctxt), msr_index, pdata);
4652 }
4653
4654 static int emulator_set_msr(struct x86_emulate_ctxt *ctxt,
4655                             u32 msr_index, u64 data)
4656 {
4657         return kvm_set_msr(emul_to_vcpu(ctxt), msr_index, data);
4658 }
4659
4660 static void emulator_halt(struct x86_emulate_ctxt *ctxt)
4661 {
4662         emul_to_vcpu(ctxt)->arch.halt_request = 1;
4663 }
4664
4665 static void emulator_get_fpu(struct x86_emulate_ctxt *ctxt)
4666 {
4667         preempt_disable();
4668         kvm_load_guest_fpu(emul_to_vcpu(ctxt));
4669         /*
4670          * CR0.TS may reference the host fpu state, not the guest fpu state,
4671          * so it may be clear at this point.
4672          */
4673         clts();
4674 }
4675
4676 static void emulator_put_fpu(struct x86_emulate_ctxt *ctxt)
4677 {
4678         preempt_enable();
4679 }
4680
4681 static int emulator_intercept(struct x86_emulate_ctxt *ctxt,
4682                               struct x86_instruction_info *info,
4683                               enum x86_intercept_stage stage)
4684 {
4685         return kvm_x86_ops->check_intercept(emul_to_vcpu(ctxt), info, stage);
4686 }
4687
4688 static bool emulator_get_cpuid(struct x86_emulate_ctxt *ctxt,
4689                                u32 *eax, u32 *ebx, u32 *ecx, u32 *edx)
4690 {
4691         struct kvm_cpuid_entry2 *cpuid = NULL;
4692
4693         if (eax && ecx)
4694                 cpuid = kvm_find_cpuid_entry(emul_to_vcpu(ctxt),
4695                                             *eax, *ecx);
4696
4697         if (cpuid) {
4698                 *eax = cpuid->eax;
4699                 *ecx = cpuid->ecx;
4700                 if (ebx)
4701                         *ebx = cpuid->ebx;
4702                 if (edx)
4703                         *edx = cpuid->edx;
4704                 return true;
4705         }
4706
4707         return false;
4708 }
4709
4710 static struct x86_emulate_ops emulate_ops = {
4711         .read_std            = kvm_read_guest_virt_system,
4712         .write_std           = kvm_write_guest_virt_system,
4713         .fetch               = kvm_fetch_guest_virt,
4714         .read_emulated       = emulator_read_emulated,
4715         .write_emulated      = emulator_write_emulated,
4716         .cmpxchg_emulated    = emulator_cmpxchg_emulated,
4717         .invlpg              = emulator_invlpg,
4718         .pio_in_emulated     = emulator_pio_in_emulated,
4719         .pio_out_emulated    = emulator_pio_out_emulated,
4720         .get_segment         = emulator_get_segment,
4721         .set_segment         = emulator_set_segment,
4722         .get_cached_segment_base = emulator_get_cached_segment_base,
4723         .get_gdt             = emulator_get_gdt,
4724         .get_idt             = emulator_get_idt,
4725         .set_gdt             = emulator_set_gdt,
4726         .set_idt             = emulator_set_idt,
4727         .get_cr              = emulator_get_cr,
4728         .set_cr              = emulator_set_cr,
4729         .cpl                 = emulator_get_cpl,
4730         .get_dr              = emulator_get_dr,
4731         .set_dr              = emulator_set_dr,
4732         .set_msr             = emulator_set_msr,
4733         .get_msr             = emulator_get_msr,
4734         .halt                = emulator_halt,
4735         .wbinvd              = emulator_wbinvd,
4736         .fix_hypercall       = emulator_fix_hypercall,
4737         .get_fpu             = emulator_get_fpu,
4738         .put_fpu             = emulator_put_fpu,
4739         .intercept           = emulator_intercept,
4740         .get_cpuid           = emulator_get_cpuid,
4741 };
4742
4743 static void cache_all_regs(struct kvm_vcpu *vcpu)
4744 {
4745         kvm_register_read(vcpu, VCPU_REGS_RAX);
4746         kvm_register_read(vcpu, VCPU_REGS_RSP);
4747         kvm_register_read(vcpu, VCPU_REGS_RIP);
4748         vcpu->arch.regs_dirty = ~0;
4749 }
4750
4751 static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask)
4752 {
4753         u32 int_shadow = kvm_x86_ops->get_interrupt_shadow(vcpu, mask);
4754         /*
4755          * an sti; sti; sequence only disable interrupts for the first
4756          * instruction. So, if the last instruction, be it emulated or
4757          * not, left the system with the INT_STI flag enabled, it
4758          * means that the last instruction is an sti. We should not
4759          * leave the flag on in this case. The same goes for mov ss
4760          */
4761         if (!(int_shadow & mask))
4762                 kvm_x86_ops->set_interrupt_shadow(vcpu, mask);
4763 }
4764
4765 static void inject_emulated_exception(struct kvm_vcpu *vcpu)
4766 {
4767         struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
4768         if (ctxt->exception.vector == PF_VECTOR)
4769                 kvm_propagate_fault(vcpu, &ctxt->exception);
4770         else if (ctxt->exception.error_code_valid)
4771                 kvm_queue_exception_e(vcpu, ctxt->exception.vector,
4772                                       ctxt->exception.error_code);
4773         else
4774                 kvm_queue_exception(vcpu, ctxt->exception.vector);
4775 }
4776
4777 static void init_decode_cache(struct x86_emulate_ctxt *ctxt,
4778                               const unsigned long *regs)
4779 {
4780         memset(&ctxt->twobyte, 0,
4781                (void *)&ctxt->regs - (void *)&ctxt->twobyte);
4782         memcpy(ctxt->regs, regs, sizeof(ctxt->regs));
4783
4784         ctxt->fetch.start = 0;
4785         ctxt->fetch.end = 0;
4786         ctxt->io_read.pos = 0;
4787         ctxt->io_read.end = 0;
4788         ctxt->mem_read.pos = 0;
4789         ctxt->mem_read.end = 0;
4790 }
4791
4792 static void init_emulate_ctxt(struct kvm_vcpu *vcpu)
4793 {
4794         struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
4795         int cs_db, cs_l;
4796
4797         /*
4798          * TODO: fix emulate.c to use guest_read/write_register
4799          * instead of direct ->regs accesses, can save hundred cycles
4800          * on Intel for instructions that don't read/change RSP, for
4801          * for example.
4802          */
4803         cache_all_regs(vcpu);
4804
4805         kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
4806
4807         ctxt->eflags = kvm_get_rflags(vcpu);
4808         ctxt->eip = kvm_rip_read(vcpu);
4809         ctxt->mode = (!is_protmode(vcpu))               ? X86EMUL_MODE_REAL :
4810                      (ctxt->eflags & X86_EFLAGS_VM)     ? X86EMUL_MODE_VM86 :
4811                      cs_l                               ? X86EMUL_MODE_PROT64 :
4812                      cs_db                              ? X86EMUL_MODE_PROT32 :
4813                                                           X86EMUL_MODE_PROT16;
4814         ctxt->guest_mode = is_guest_mode(vcpu);
4815
4816         init_decode_cache(ctxt, vcpu->arch.regs);
4817         vcpu->arch.emulate_regs_need_sync_from_vcpu = false;
4818 }
4819
4820 int kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip)
4821 {
4822         struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
4823         int ret;
4824
4825         init_emulate_ctxt(vcpu);
4826
4827         ctxt->op_bytes = 2;
4828         ctxt->ad_bytes = 2;
4829         ctxt->_eip = ctxt->eip + inc_eip;
4830         ret = emulate_int_real(ctxt, irq);
4831
4832         if (ret != X86EMUL_CONTINUE)
4833                 return EMULATE_FAIL;
4834
4835         ctxt->eip = ctxt->_eip;
4836         memcpy(vcpu->arch.regs, ctxt->regs, sizeof ctxt->regs);
4837         kvm_rip_write(vcpu, ctxt->eip);
4838         kvm_set_rflags(vcpu, ctxt->eflags);
4839
4840         if (irq == NMI_VECTOR)
4841                 vcpu->arch.nmi_pending = 0;
4842         else
4843                 vcpu->arch.interrupt.pending = false;
4844
4845         return EMULATE_DONE;
4846 }
4847 EXPORT_SYMBOL_GPL(kvm_inject_realmode_interrupt);
4848
4849 static int handle_emulation_failure(struct kvm_vcpu *vcpu)
4850 {
4851         int r = EMULATE_DONE;
4852
4853         ++vcpu->stat.insn_emulation_fail;
4854         trace_kvm_emulate_insn_failed(vcpu);
4855         if (!is_guest_mode(vcpu) && kvm_x86_ops->get_cpl(vcpu) == 0) {
4856                 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
4857                 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
4858                 vcpu->run->internal.ndata = 0;
4859                 r = EMULATE_FAIL;
4860         }
4861         kvm_queue_exception(vcpu, UD_VECTOR);
4862
4863         return r;
4864 }
4865
4866 static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t gva)
4867 {
4868         gpa_t gpa;
4869
4870         if (tdp_enabled)
4871                 return false;
4872
4873         /*
4874          * if emulation was due to access to shadowed page table
4875          * and it failed try to unshadow page and re-entetr the
4876          * guest to let CPU execute the instruction.
4877          */
4878         if (kvm_mmu_unprotect_page_virt(vcpu, gva))
4879                 return true;
4880
4881         gpa = kvm_mmu_gva_to_gpa_system(vcpu, gva, NULL);
4882
4883         if (gpa == UNMAPPED_GVA)
4884                 return true; /* let cpu generate fault */
4885
4886         if (!kvm_is_error_hva(gfn_to_hva(vcpu->kvm, gpa >> PAGE_SHIFT)))
4887                 return true;
4888
4889         return false;
4890 }
4891
4892 int x86_emulate_instruction(struct kvm_vcpu *vcpu,
4893                             unsigned long cr2,
4894                             int emulation_type,
4895                             void *insn,
4896                             int insn_len)
4897 {
4898         int r;
4899         struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
4900         bool writeback = true;
4901
4902         kvm_clear_exception_queue(vcpu);
4903
4904         if (!(emulation_type & EMULTYPE_NO_DECODE)) {
4905                 init_emulate_ctxt(vcpu);
4906                 ctxt->interruptibility = 0;
4907                 ctxt->have_exception = false;
4908                 ctxt->perm_ok = false;
4909
4910                 ctxt->only_vendor_specific_insn
4911                         = emulation_type & EMULTYPE_TRAP_UD;
4912
4913                 r = x86_decode_insn(ctxt, insn, insn_len);
4914
4915                 trace_kvm_emulate_insn_start(vcpu);
4916                 ++vcpu->stat.insn_emulation;
4917                 if (r != EMULATION_OK)  {
4918                         if (emulation_type & EMULTYPE_TRAP_UD)
4919                                 return EMULATE_FAIL;
4920                         if (reexecute_instruction(vcpu, cr2))
4921                                 return EMULATE_DONE;
4922                         if (emulation_type & EMULTYPE_SKIP)
4923                                 return EMULATE_FAIL;
4924                         return handle_emulation_failure(vcpu);
4925                 }
4926         }
4927
4928         if (emulation_type & EMULTYPE_SKIP) {
4929                 kvm_rip_write(vcpu, ctxt->_eip);
4930                 return EMULATE_DONE;
4931         }
4932
4933         /* this is needed for vmware backdoor interface to work since it
4934            changes registers values  during IO operation */
4935         if (vcpu->arch.emulate_regs_need_sync_from_vcpu) {
4936                 vcpu->arch.emulate_regs_need_sync_from_vcpu = false;
4937                 memcpy(ctxt->regs, vcpu->arch.regs, sizeof ctxt->regs);
4938         }
4939
4940 restart:
4941         r = x86_emulate_insn(ctxt);
4942
4943         if (r == EMULATION_INTERCEPTED)
4944                 return EMULATE_DONE;
4945
4946         if (r == EMULATION_FAILED) {
4947                 if (reexecute_instruction(vcpu, cr2))
4948                         return EMULATE_DONE;
4949
4950                 return handle_emulation_failure(vcpu);
4951         }
4952
4953         if (ctxt->have_exception) {
4954                 inject_emulated_exception(vcpu);
4955                 r = EMULATE_DONE;
4956         } else if (vcpu->arch.pio.count) {
4957                 if (!vcpu->arch.pio.in)
4958                         vcpu->arch.pio.count = 0;
4959                 else
4960                         writeback = false;
4961                 r = EMULATE_DO_MMIO;
4962         } else if (vcpu->mmio_needed) {
4963                 if (!vcpu->mmio_is_write)
4964                         writeback = false;
4965                 r = EMULATE_DO_MMIO;
4966         } else if (r == EMULATION_RESTART)
4967                 goto restart;
4968         else
4969                 r = EMULATE_DONE;
4970
4971         if (writeback) {
4972                 toggle_interruptibility(vcpu, ctxt->interruptibility);
4973                 kvm_set_rflags(vcpu, ctxt->eflags);
4974                 kvm_make_request(KVM_REQ_EVENT, vcpu);
4975                 memcpy(vcpu->arch.regs, ctxt->regs, sizeof ctxt->regs);
4976                 vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
4977                 kvm_rip_write(vcpu, ctxt->eip);
4978         } else
4979                 vcpu->arch.emulate_regs_need_sync_to_vcpu = true;
4980
4981         return r;
4982 }
4983 EXPORT_SYMBOL_GPL(x86_emulate_instruction);
4984
4985 int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size, unsigned short port)
4986 {
4987         unsigned long val = kvm_register_read(vcpu, VCPU_REGS_RAX);
4988         int ret = emulator_pio_out_emulated(&vcpu->arch.emulate_ctxt,
4989                                             size, port, &val, 1);
4990         /* do not return to emulator after return from userspace */
4991         vcpu->arch.pio.count = 0;
4992         return ret;
4993 }
4994 EXPORT_SYMBOL_GPL(kvm_fast_pio_out);
4995
4996 static void tsc_bad(void *info)
4997 {
4998         __this_cpu_write(cpu_tsc_khz, 0);
4999 }
5000
5001 static void tsc_khz_changed(void *data)
5002 {
5003         struct cpufreq_freqs *freq = data;
5004         unsigned long khz = 0;
5005
5006         if (data)
5007                 khz = freq->new;
5008         else if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
5009                 khz = cpufreq_quick_get(raw_smp_processor_id());
5010         if (!khz)
5011                 khz = tsc_khz;
5012         __this_cpu_write(cpu_tsc_khz, khz);
5013 }
5014
5015 static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
5016                                      void *data)
5017 {
5018         struct cpufreq_freqs *freq = data;
5019         struct kvm *kvm;
5020         struct kvm_vcpu *vcpu;
5021         int i, send_ipi = 0;
5022
5023         /*
5024          * We allow guests to temporarily run on slowing clocks,
5025          * provided we notify them after, or to run on accelerating
5026          * clocks, provided we notify them before.  Thus time never
5027          * goes backwards.
5028          *
5029          * However, we have a problem.  We can't atomically update
5030          * the frequency of a given CPU from this function; it is
5031          * merely a notifier, which can be called from any CPU.
5032          * Changing the TSC frequency at arbitrary points in time
5033          * requires a recomputation of local variables related to
5034          * the TSC for each VCPU.  We must flag these local variables
5035          * to be updated and be sure the update takes place with the
5036          * new frequency before any guests proceed.
5037          *
5038          * Unfortunately, the combination of hotplug CPU and frequency
5039          * change creates an intractable locking scenario; the order
5040          * of when these callouts happen is undefined with respect to
5041          * CPU hotplug, and they can race with each other.  As such,
5042          * merely setting per_cpu(cpu_tsc_khz) = X during a hotadd is
5043          * undefined; you can actually have a CPU frequency change take
5044          * place in between the computation of X and the setting of the
5045          * variable.  To protect against this problem, all updates of
5046          * the per_cpu tsc_khz variable are done in an interrupt
5047          * protected IPI, and all callers wishing to update the value
5048          * must wait for a synchronous IPI to complete (which is trivial
5049          * if the caller is on the CPU already).  This establishes the
5050          * necessary total order on variable updates.
5051          *
5052          * Note that because a guest time update may take place
5053          * anytime after the setting of the VCPU's request bit, the
5054          * correct TSC value must be set before the request.  However,
5055          * to ensure the update actually makes it to any guest which
5056          * starts running in hardware virtualization between the set
5057          * and the acquisition of the spinlock, we must also ping the
5058          * CPU after setting the request bit.
5059          *
5060          */
5061
5062         if (val == CPUFREQ_PRECHANGE && freq->old > freq->new)
5063                 return 0;
5064         if (val == CPUFREQ_POSTCHANGE && freq->old < freq->new)
5065                 return 0;
5066
5067         smp_call_function_single(freq->cpu, tsc_khz_changed, freq, 1);
5068
5069         raw_spin_lock(&kvm_lock);
5070         list_for_each_entry(kvm, &vm_list, vm_list) {
5071                 kvm_for_each_vcpu(i, vcpu, kvm) {
5072                         if (vcpu->cpu != freq->cpu)
5073                                 continue;
5074                         kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
5075                         if (vcpu->cpu != smp_processor_id())
5076                                 send_ipi = 1;
5077                 }
5078         }
5079         raw_spin_unlock(&kvm_lock);
5080
5081         if (freq->old < freq->new && send_ipi) {
5082                 /*
5083                  * We upscale the frequency.  Must make the guest
5084                  * doesn't see old kvmclock values while running with
5085                  * the new frequency, otherwise we risk the guest sees
5086                  * time go backwards.
5087                  *
5088                  * In case we update the frequency for another cpu
5089                  * (which might be in guest context) send an interrupt
5090                  * to kick the cpu out of guest context.  Next time
5091                  * guest context is entered kvmclock will be updated,
5092                  * so the guest will not see stale values.
5093                  */
5094                 smp_call_function_single(freq->cpu, tsc_khz_changed, freq, 1);
5095         }
5096         return 0;
5097 }
5098
5099 static struct notifier_block kvmclock_cpufreq_notifier_block = {
5100         .notifier_call  = kvmclock_cpufreq_notifier
5101 };
5102
5103 static int kvmclock_cpu_notifier(struct notifier_block *nfb,
5104                                         unsigned long action, void *hcpu)
5105 {
5106         unsigned int cpu = (unsigned long)hcpu;
5107
5108         switch (action) {
5109                 case CPU_ONLINE:
5110                 case CPU_DOWN_FAILED:
5111                         smp_call_function_single(cpu, tsc_khz_changed, NULL, 1);
5112                         break;
5113                 case CPU_DOWN_PREPARE:
5114                         smp_call_function_single(cpu, tsc_bad, NULL, 1);
5115                         break;
5116         }
5117         return NOTIFY_OK;
5118 }
5119
5120 static struct notifier_block kvmclock_cpu_notifier_block = {
5121         .notifier_call  = kvmclock_cpu_notifier,
5122         .priority = -INT_MAX
5123 };
5124
5125 static void kvm_timer_init(void)
5126 {
5127         int cpu;
5128
5129         max_tsc_khz = tsc_khz;
5130         register_hotcpu_notifier(&kvmclock_cpu_notifier_block);
5131         if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) {
5132 #ifdef CONFIG_CPU_FREQ
5133                 struct cpufreq_policy policy;
5134                 memset(&policy, 0, sizeof(policy));
5135                 cpu = get_cpu();
5136                 cpufreq_get_policy(&policy, cpu);
5137                 if (policy.cpuinfo.max_freq)
5138                         max_tsc_khz = policy.cpuinfo.max_freq;
5139                 put_cpu();
5140 #endif
5141                 cpufreq_register_notifier(&kvmclock_cpufreq_notifier_block,
5142                                           CPUFREQ_TRANSITION_NOTIFIER);
5143         }
5144         pr_debug("kvm: max_tsc_khz = %ld\n", max_tsc_khz);
5145         for_each_online_cpu(cpu)
5146                 smp_call_function_single(cpu, tsc_khz_changed, NULL, 1);
5147 }
5148
5149 static DEFINE_PER_CPU(struct kvm_vcpu *, current_vcpu);
5150
5151 static int kvm_is_in_guest(void)
5152 {
5153         return percpu_read(current_vcpu) != NULL;
5154 }
5155
5156 static int kvm_is_user_mode(void)
5157 {
5158         int user_mode = 3;
5159
5160         if (percpu_read(current_vcpu))
5161                 user_mode = kvm_x86_ops->get_cpl(percpu_read(current_vcpu));
5162
5163         return user_mode != 0;
5164 }
5165
5166 static unsigned long kvm_get_guest_ip(void)
5167 {
5168         unsigned long ip = 0;
5169
5170         if (percpu_read(current_vcpu))
5171                 ip = kvm_rip_read(percpu_read(current_vcpu));
5172
5173         return ip;
5174 }
5175
5176 static struct perf_guest_info_callbacks kvm_guest_cbs = {
5177         .is_in_guest            = kvm_is_in_guest,
5178         .is_user_mode           = kvm_is_user_mode,
5179         .get_guest_ip           = kvm_get_guest_ip,
5180 };
5181
5182 void kvm_before_handle_nmi(struct kvm_vcpu *vcpu)
5183 {
5184         percpu_write(current_vcpu, vcpu);
5185 }
5186 EXPORT_SYMBOL_GPL(kvm_before_handle_nmi);
5187
5188 void kvm_after_handle_nmi(struct kvm_vcpu *vcpu)
5189 {
5190         percpu_write(current_vcpu, NULL);
5191 }
5192 EXPORT_SYMBOL_GPL(kvm_after_handle_nmi);
5193
5194 static void kvm_set_mmio_spte_mask(void)
5195 {
5196         u64 mask;
5197         int maxphyaddr = boot_cpu_data.x86_phys_bits;
5198
5199         /*
5200          * Set the reserved bits and the present bit of an paging-structure
5201          * entry to generate page fault with PFER.RSV = 1.
5202          */
5203         mask = ((1ull << (62 - maxphyaddr + 1)) - 1) << maxphyaddr;
5204         mask |= 1ull;
5205
5206 #ifdef CONFIG_X86_64
5207         /*
5208          * If reserved bit is not supported, clear the present bit to disable
5209          * mmio page fault.
5210          */
5211         if (maxphyaddr == 52)
5212                 mask &= ~1ull;
5213 #endif
5214
5215         kvm_mmu_set_mmio_spte_mask(mask);
5216 }
5217
5218 int kvm_arch_init(void *opaque)
5219 {
5220         int r;
5221         struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
5222
5223         if (kvm_x86_ops) {
5224                 printk(KERN_ERR "kvm: already loaded the other module\n");
5225                 r = -EEXIST;
5226                 goto out;
5227         }
5228
5229         if (!ops->cpu_has_kvm_support()) {
5230                 printk(KERN_ERR "kvm: no hardware support\n");
5231                 r = -EOPNOTSUPP;
5232                 goto out;
5233         }
5234         if (ops->disabled_by_bios()) {
5235                 printk(KERN_ERR "kvm: disabled by bios\n");
5236                 r = -EOPNOTSUPP;
5237                 goto out;
5238         }
5239
5240         r = kvm_mmu_module_init();
5241         if (r)
5242                 goto out;
5243
5244         kvm_set_mmio_spte_mask();
5245         kvm_init_msr_list();
5246
5247         kvm_x86_ops = ops;
5248         kvm_mmu_set_mask_ptes(PT_USER_MASK, PT_ACCESSED_MASK,
5249                         PT_DIRTY_MASK, PT64_NX_MASK, 0);
5250
5251         kvm_timer_init();
5252
5253         perf_register_guest_info_callbacks(&kvm_guest_cbs);
5254
5255         if (cpu_has_xsave)
5256                 host_xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK);
5257
5258         return 0;
5259
5260 out:
5261         return r;
5262 }
5263
5264 void kvm_arch_exit(void)
5265 {
5266         perf_unregister_guest_info_callbacks(&kvm_guest_cbs);
5267
5268         if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
5269                 cpufreq_unregister_notifier(&kvmclock_cpufreq_notifier_block,
5270                                             CPUFREQ_TRANSITION_NOTIFIER);
5271         unregister_hotcpu_notifier(&kvmclock_cpu_notifier_block);
5272         kvm_x86_ops = NULL;
5273         kvm_mmu_module_exit();
5274 }
5275
5276 int kvm_emulate_halt(struct kvm_vcpu *vcpu)
5277 {
5278         ++vcpu->stat.halt_exits;
5279         if (irqchip_in_kernel(vcpu->kvm)) {
5280                 vcpu->arch.mp_state = KVM_MP_STATE_HALTED;
5281                 return 1;
5282         } else {
5283                 vcpu->run->exit_reason = KVM_EXIT_HLT;
5284                 return 0;
5285         }
5286 }
5287 EXPORT_SYMBOL_GPL(kvm_emulate_halt);
5288
5289 static inline gpa_t hc_gpa(struct kvm_vcpu *vcpu, unsigned long a0,
5290                            unsigned long a1)
5291 {
5292         if (is_long_mode(vcpu))
5293                 return a0;
5294         else
5295                 return a0 | ((gpa_t)a1 << 32);
5296 }
5297
5298 int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
5299 {
5300         u64 param, ingpa, outgpa, ret;
5301         uint16_t code, rep_idx, rep_cnt, res = HV_STATUS_SUCCESS, rep_done = 0;
5302         bool fast, longmode;
5303         int cs_db, cs_l;
5304
5305         /*
5306          * hypercall generates UD from non zero cpl and real mode
5307          * per HYPER-V spec
5308          */
5309         if (kvm_x86_ops->get_cpl(vcpu) != 0 || !is_protmode(vcpu)) {
5310                 kvm_queue_exception(vcpu, UD_VECTOR);
5311                 return 0;
5312         }
5313
5314         kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
5315         longmode = is_long_mode(vcpu) && cs_l == 1;
5316
5317         if (!longmode) {
5318                 param = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDX) << 32) |
5319                         (kvm_register_read(vcpu, VCPU_REGS_RAX) & 0xffffffff);
5320                 ingpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RBX) << 32) |
5321                         (kvm_register_read(vcpu, VCPU_REGS_RCX) & 0xffffffff);
5322                 outgpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDI) << 32) |
5323                         (kvm_register_read(vcpu, VCPU_REGS_RSI) & 0xffffffff);
5324         }
5325 #ifdef CONFIG_X86_64
5326         else {
5327                 param = kvm_register_read(vcpu, VCPU_REGS_RCX);
5328                 ingpa = kvm_register_read(vcpu, VCPU_REGS_RDX);
5329                 outgpa = kvm_register_read(vcpu, VCPU_REGS_R8);
5330         }
5331 #endif
5332
5333         code = param & 0xffff;
5334         fast = (param >> 16) & 0x1;
5335         rep_cnt = (param >> 32) & 0xfff;
5336         rep_idx = (param >> 48) & 0xfff;
5337
5338         trace_kvm_hv_hypercall(code, fast, rep_cnt, rep_idx, ingpa, outgpa);
5339
5340         switch (code) {
5341         case HV_X64_HV_NOTIFY_LONG_SPIN_WAIT:
5342                 kvm_vcpu_on_spin(vcpu);
5343                 break;
5344         default:
5345                 res = HV_STATUS_INVALID_HYPERCALL_CODE;
5346                 break;
5347         }
5348
5349         ret = res | (((u64)rep_done & 0xfff) << 32);
5350         if (longmode) {
5351                 kvm_register_write(vcpu, VCPU_REGS_RAX, ret);
5352         } else {
5353                 kvm_register_write(vcpu, VCPU_REGS_RDX, ret >> 32);
5354                 kvm_register_write(vcpu, VCPU_REGS_RAX, ret & 0xffffffff);
5355         }
5356
5357         return 1;
5358 }
5359
5360 int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
5361 {
5362         unsigned long nr, a0, a1, a2, a3, ret;
5363         int r = 1;
5364
5365         if (kvm_hv_hypercall_enabled(vcpu->kvm))
5366                 return kvm_hv_hypercall(vcpu);
5367
5368         nr = kvm_register_read(vcpu, VCPU_REGS_RAX);
5369         a0 = kvm_register_read(vcpu, VCPU_REGS_RBX);
5370         a1 = kvm_register_read(vcpu, VCPU_REGS_RCX);
5371         a2 = kvm_register_read(vcpu, VCPU_REGS_RDX);
5372         a3 = kvm_register_read(vcpu, VCPU_REGS_RSI);
5373
5374         trace_kvm_hypercall(nr, a0, a1, a2, a3);
5375
5376         if (!is_long_mode(vcpu)) {
5377                 nr &= 0xFFFFFFFF;
5378                 a0 &= 0xFFFFFFFF;
5379                 a1 &= 0xFFFFFFFF;
5380                 a2 &= 0xFFFFFFFF;
5381                 a3 &= 0xFFFFFFFF;
5382         }
5383
5384         if (kvm_x86_ops->get_cpl(vcpu) != 0) {
5385                 ret = -KVM_EPERM;
5386                 goto out;
5387         }
5388
5389         switch (nr) {
5390         case KVM_HC_VAPIC_POLL_IRQ:
5391                 ret = 0;
5392                 break;
5393         case KVM_HC_MMU_OP:
5394                 r = kvm_pv_mmu_op(vcpu, a0, hc_gpa(vcpu, a1, a2), &ret);
5395                 break;
5396         default:
5397                 ret = -KVM_ENOSYS;
5398                 break;
5399         }
5400 out:
5401         kvm_register_write(vcpu, VCPU_REGS_RAX, ret);
5402         ++vcpu->stat.hypercalls;
5403         return r;
5404 }
5405 EXPORT_SYMBOL_GPL(kvm_emulate_hypercall);
5406
5407 int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt)
5408 {
5409         struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
5410         char instruction[3];
5411         unsigned long rip = kvm_rip_read(vcpu);
5412
5413         /*
5414          * Blow out the MMU to ensure that no other VCPU has an active mapping
5415          * to ensure that the updated hypercall appears atomically across all
5416          * VCPUs.
5417          */
5418         kvm_mmu_zap_all(vcpu->kvm);
5419
5420         kvm_x86_ops->patch_hypercall(vcpu, instruction);
5421
5422         return emulator_write_emulated(ctxt, rip, instruction, 3, NULL);
5423 }
5424
5425 static int move_to_next_stateful_cpuid_entry(struct kvm_vcpu *vcpu, int i)
5426 {
5427         struct kvm_cpuid_entry2 *e = &vcpu->arch.cpuid_entries[i];
5428         int j, nent = vcpu->arch.cpuid_nent;
5429
5430         e->flags &= ~KVM_CPUID_FLAG_STATE_READ_NEXT;
5431         /* when no next entry is found, the current entry[i] is reselected */
5432         for (j = i + 1; ; j = (j + 1) % nent) {
5433                 struct kvm_cpuid_entry2 *ej = &vcpu->arch.cpuid_entries[j];
5434                 if (ej->function == e->function) {
5435                         ej->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
5436                         return j;
5437                 }
5438         }
5439         return 0; /* silence gcc, even though control never reaches here */
5440 }
5441
5442 /* find an entry with matching function, matching index (if needed), and that
5443  * should be read next (if it's stateful) */
5444 static int is_matching_cpuid_entry(struct kvm_cpuid_entry2 *e,
5445         u32 function, u32 index)
5446 {
5447         if (e->function != function)
5448                 return 0;
5449         if ((e->flags & KVM_CPUID_FLAG_SIGNIFCANT_INDEX) && e->index != index)
5450                 return 0;
5451         if ((e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC) &&
5452             !(e->flags & KVM_CPUID_FLAG_STATE_READ_NEXT))
5453                 return 0;
5454         return 1;
5455 }
5456
5457 struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
5458                                               u32 function, u32 index)
5459 {
5460         int i;
5461         struct kvm_cpuid_entry2 *best = NULL;
5462
5463         for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
5464                 struct kvm_cpuid_entry2 *e;
5465
5466                 e = &vcpu->arch.cpuid_entries[i];
5467                 if (is_matching_cpuid_entry(e, function, index)) {
5468                         if (e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC)
5469                                 move_to_next_stateful_cpuid_entry(vcpu, i);
5470                         best = e;
5471                         break;
5472                 }
5473         }
5474         return best;
5475 }
5476 EXPORT_SYMBOL_GPL(kvm_find_cpuid_entry);
5477
5478 int cpuid_maxphyaddr(struct kvm_vcpu *vcpu)
5479 {
5480         struct kvm_cpuid_entry2 *best;
5481
5482         best = kvm_find_cpuid_entry(vcpu, 0x80000000, 0);
5483         if (!best || best->eax < 0x80000008)
5484                 goto not_found;
5485         best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0);
5486         if (best)
5487                 return best->eax & 0xff;
5488 not_found:
5489         return 36;
5490 }
5491
5492 /*
5493  * If no match is found, check whether we exceed the vCPU's limit
5494  * and return the content of the highest valid _standard_ leaf instead.
5495  * This is to satisfy the CPUID specification.
5496  */
5497 static struct kvm_cpuid_entry2* check_cpuid_limit(struct kvm_vcpu *vcpu,
5498                                                   u32 function, u32 index)
5499 {
5500         struct kvm_cpuid_entry2 *maxlevel;
5501
5502         maxlevel = kvm_find_cpuid_entry(vcpu, function & 0x80000000, 0);
5503         if (!maxlevel || maxlevel->eax >= function)
5504                 return NULL;
5505         if (function & 0x80000000) {
5506                 maxlevel = kvm_find_cpuid_entry(vcpu, 0, 0);
5507                 if (!maxlevel)
5508                         return NULL;
5509         }
5510         return kvm_find_cpuid_entry(vcpu, maxlevel->eax, index);
5511 }
5512
5513 void kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
5514 {
5515         u32 function, index;
5516         struct kvm_cpuid_entry2 *best;
5517
5518         function = kvm_register_read(vcpu, VCPU_REGS_RAX);
5519         index = kvm_register_read(vcpu, VCPU_REGS_RCX);
5520         kvm_register_write(vcpu, VCPU_REGS_RAX, 0);
5521         kvm_register_write(vcpu, VCPU_REGS_RBX, 0);
5522         kvm_register_write(vcpu, VCPU_REGS_RCX, 0);
5523         kvm_register_write(vcpu, VCPU_REGS_RDX, 0);
5524         best = kvm_find_cpuid_entry(vcpu, function, index);
5525
5526         if (!best)
5527                 best = check_cpuid_limit(vcpu, function, index);
5528
5529         if (best) {
5530                 kvm_register_write(vcpu, VCPU_REGS_RAX, best->eax);
5531                 kvm_register_write(vcpu, VCPU_REGS_RBX, best->ebx);
5532                 kvm_register_write(vcpu, VCPU_REGS_RCX, best->ecx);
5533                 kvm_register_write(vcpu, VCPU_REGS_RDX, best->edx);
5534         }
5535         kvm_x86_ops->skip_emulated_instruction(vcpu);
5536         trace_kvm_cpuid(function,
5537                         kvm_register_read(vcpu, VCPU_REGS_RAX),
5538                         kvm_register_read(vcpu, VCPU_REGS_RBX),
5539                         kvm_register_read(vcpu, VCPU_REGS_RCX),
5540                         kvm_register_read(vcpu, VCPU_REGS_RDX));
5541 }
5542 EXPORT_SYMBOL_GPL(kvm_emulate_cpuid);
5543
5544 /*
5545  * Check if userspace requested an interrupt window, and that the
5546  * interrupt window is open.
5547  *
5548  * No need to exit to userspace if we already have an interrupt queued.
5549  */
5550 static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu)
5551 {
5552         return (!irqchip_in_kernel(vcpu->kvm) && !kvm_cpu_has_interrupt(vcpu) &&
5553                 vcpu->run->request_interrupt_window &&
5554                 kvm_arch_interrupt_allowed(vcpu));
5555 }
5556
5557 static void post_kvm_run_save(struct kvm_vcpu *vcpu)
5558 {
5559         struct kvm_run *kvm_run = vcpu->run;
5560
5561         kvm_run->if_flag = (kvm_get_rflags(vcpu) & X86_EFLAGS_IF) != 0;
5562         kvm_run->cr8 = kvm_get_cr8(vcpu);
5563         kvm_run->apic_base = kvm_get_apic_base(vcpu);
5564         if (irqchip_in_kernel(vcpu->kvm))
5565                 kvm_run->ready_for_interrupt_injection = 1;
5566         else
5567                 kvm_run->ready_for_interrupt_injection =
5568                         kvm_arch_interrupt_allowed(vcpu) &&
5569                         !kvm_cpu_has_interrupt(vcpu) &&
5570                         !kvm_event_needs_reinjection(vcpu);
5571 }
5572
5573 static void update_cr8_intercept(struct kvm_vcpu *vcpu)
5574 {
5575         int max_irr, tpr;
5576
5577         if (!kvm_x86_ops->update_cr8_intercept)
5578                 return;
5579
5580         if (!vcpu->arch.apic)
5581                 return;
5582
5583         if (!vcpu->arch.apic->vapic_addr)
5584                 max_irr = kvm_lapic_find_highest_irr(vcpu);
5585         else
5586                 max_irr = -1;
5587
5588         if (max_irr != -1)
5589                 max_irr >>= 4;
5590
5591         tpr = kvm_lapic_get_cr8(vcpu);
5592
5593         kvm_x86_ops->update_cr8_intercept(vcpu, tpr, max_irr);
5594 }
5595
5596 static void inject_pending_event(struct kvm_vcpu *vcpu)
5597 {
5598         /* try to reinject previous events if any */
5599         if (vcpu->arch.exception.pending) {
5600                 trace_kvm_inj_exception(vcpu->arch.exception.nr,
5601                                         vcpu->arch.exception.has_error_code,
5602                                         vcpu->arch.exception.error_code);
5603                 kvm_x86_ops->queue_exception(vcpu, vcpu->arch.exception.nr,
5604                                           vcpu->arch.exception.has_error_code,
5605                                           vcpu->arch.exception.error_code,
5606                                           vcpu->arch.exception.reinject);
5607                 return;
5608         }
5609
5610         if (vcpu->arch.nmi_injected) {
5611                 kvm_x86_ops->set_nmi(vcpu);
5612                 return;
5613         }
5614
5615         if (vcpu->arch.interrupt.pending) {
5616                 kvm_x86_ops->set_irq(vcpu);
5617                 return;
5618         }
5619
5620         /* try to inject new event if pending */
5621         if (vcpu->arch.nmi_pending) {
5622                 if (kvm_x86_ops->nmi_allowed(vcpu)) {
5623                         --vcpu->arch.nmi_pending;
5624                         vcpu->arch.nmi_injected = true;
5625                         kvm_x86_ops->set_nmi(vcpu);
5626                 }
5627         } else if (kvm_cpu_has_interrupt(vcpu)) {
5628                 if (kvm_x86_ops->interrupt_allowed(vcpu)) {
5629                         kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu),
5630                                             false);
5631                         kvm_x86_ops->set_irq(vcpu);
5632                 }
5633         }
5634 }
5635
5636 static void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu)
5637 {
5638         if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE) &&
5639                         !vcpu->guest_xcr0_loaded) {
5640                 /* kvm_set_xcr() also depends on this */
5641                 xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0);
5642                 vcpu->guest_xcr0_loaded = 1;
5643         }
5644 }
5645
5646 static void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu)
5647 {
5648         if (vcpu->guest_xcr0_loaded) {
5649                 if (vcpu->arch.xcr0 != host_xcr0)
5650                         xsetbv(XCR_XFEATURE_ENABLED_MASK, host_xcr0);
5651                 vcpu->guest_xcr0_loaded = 0;
5652         }
5653 }
5654
5655 static void process_nmi(struct kvm_vcpu *vcpu)
5656 {
5657         unsigned limit = 2;
5658
5659         /*
5660          * x86 is limited to one NMI running, and one NMI pending after it.
5661          * If an NMI is already in progress, limit further NMIs to just one.
5662          * Otherwise, allow two (and we'll inject the first one immediately).
5663          */
5664         if (kvm_x86_ops->get_nmi_mask(vcpu) || vcpu->arch.nmi_injected)
5665                 limit = 1;
5666
5667         vcpu->arch.nmi_pending += atomic_xchg(&vcpu->arch.nmi_queued, 0);
5668         vcpu->arch.nmi_pending = min(vcpu->arch.nmi_pending, limit);
5669         kvm_make_request(KVM_REQ_EVENT, vcpu);
5670 }
5671
5672 static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
5673 {
5674         int r;
5675         bool req_int_win = !irqchip_in_kernel(vcpu->kvm) &&
5676                 vcpu->run->request_interrupt_window;
5677
5678         if (vcpu->requests) {
5679                 if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu))
5680                         kvm_mmu_unload(vcpu);
5681                 if (kvm_check_request(KVM_REQ_MIGRATE_TIMER, vcpu))
5682                         __kvm_migrate_timers(vcpu);
5683                 if (kvm_check_request(KVM_REQ_CLOCK_UPDATE, vcpu)) {
5684                         r = kvm_guest_time_update(vcpu);
5685                         if (unlikely(r))
5686                                 goto out;
5687                 }
5688                 if (kvm_check_request(KVM_REQ_MMU_SYNC, vcpu))
5689                         kvm_mmu_sync_roots(vcpu);
5690                 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
5691                         kvm_x86_ops->tlb_flush(vcpu);
5692                 if (kvm_check_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu)) {
5693                         vcpu->run->exit_reason = KVM_EXIT_TPR_ACCESS;
5694                         r = 0;
5695                         goto out;
5696                 }
5697                 if (kvm_check_request(KVM_REQ_TRIPLE_FAULT, vcpu)) {
5698                         vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN;
5699                         r = 0;
5700                         goto out;
5701                 }
5702                 if (kvm_check_request(KVM_REQ_DEACTIVATE_FPU, vcpu)) {
5703                         vcpu->fpu_active = 0;
5704                         kvm_x86_ops->fpu_deactivate(vcpu);
5705                 }
5706                 if (kvm_check_request(KVM_REQ_APF_HALT, vcpu)) {
5707                         /* Page is swapped out. Do synthetic halt */
5708                         vcpu->arch.apf.halted = true;
5709                         r = 1;
5710                         goto out;
5711                 }
5712                 if (kvm_check_request(KVM_REQ_STEAL_UPDATE, vcpu))
5713                         record_steal_time(vcpu);
5714                 if (kvm_check_request(KVM_REQ_NMI, vcpu))
5715                         process_nmi(vcpu);
5716
5717         }
5718
5719         r = kvm_mmu_reload(vcpu);
5720         if (unlikely(r))
5721                 goto out;
5722
5723         if (kvm_check_request(KVM_REQ_EVENT, vcpu) || req_int_win) {
5724                 inject_pending_event(vcpu);
5725
5726                 /* enable NMI/IRQ window open exits if needed */
5727                 if (vcpu->arch.nmi_pending)
5728                         kvm_x86_ops->enable_nmi_window(vcpu);
5729                 else if (kvm_cpu_has_interrupt(vcpu) || req_int_win)
5730                         kvm_x86_ops->enable_irq_window(vcpu);
5731
5732                 if (kvm_lapic_enabled(vcpu)) {
5733                         update_cr8_intercept(vcpu);
5734                         kvm_lapic_sync_to_vapic(vcpu);
5735                 }
5736         }
5737
5738         preempt_disable();
5739
5740         kvm_x86_ops->prepare_guest_switch(vcpu);
5741         if (vcpu->fpu_active)
5742                 kvm_load_guest_fpu(vcpu);
5743         kvm_load_guest_xcr0(vcpu);
5744
5745         vcpu->mode = IN_GUEST_MODE;
5746
5747         /* We should set ->mode before check ->requests,
5748          * see the comment in make_all_cpus_request.
5749          */
5750         smp_mb();
5751
5752         local_irq_disable();
5753
5754         if (vcpu->mode == EXITING_GUEST_MODE || vcpu->requests
5755             || need_resched() || signal_pending(current)) {
5756                 vcpu->mode = OUTSIDE_GUEST_MODE;
5757                 smp_wmb();
5758                 local_irq_enable();
5759                 preempt_enable();
5760                 kvm_x86_ops->cancel_injection(vcpu);
5761                 r = 1;
5762                 goto out;
5763         }
5764
5765         srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
5766
5767         kvm_guest_enter();
5768
5769         if (unlikely(vcpu->arch.switch_db_regs)) {
5770                 set_debugreg(0, 7);
5771                 set_debugreg(vcpu->arch.eff_db[0], 0);
5772                 set_debugreg(vcpu->arch.eff_db[1], 1);
5773                 set_debugreg(vcpu->arch.eff_db[2], 2);
5774                 set_debugreg(vcpu->arch.eff_db[3], 3);
5775         }
5776
5777         trace_kvm_entry(vcpu->vcpu_id);
5778         kvm_x86_ops->run(vcpu);
5779
5780         /*
5781          * If the guest has used debug registers, at least dr7
5782          * will be disabled while returning to the host.
5783          * If we don't have active breakpoints in the host, we don't
5784          * care about the messed up debug address registers. But if
5785          * we have some of them active, restore the old state.
5786          */
5787         if (hw_breakpoint_active())
5788                 hw_breakpoint_restore();
5789
5790         vcpu->arch.last_guest_tsc = kvm_x86_ops->read_l1_tsc(vcpu);
5791
5792         vcpu->mode = OUTSIDE_GUEST_MODE;
5793         smp_wmb();
5794         local_irq_enable();
5795
5796         ++vcpu->stat.exits;
5797
5798         /*
5799          * We must have an instruction between local_irq_enable() and
5800          * kvm_guest_exit(), so the timer interrupt isn't delayed by
5801          * the interrupt shadow.  The stat.exits increment will do nicely.
5802          * But we need to prevent reordering, hence this barrier():
5803          */
5804         barrier();
5805
5806         kvm_guest_exit();
5807
5808         preempt_enable();
5809
5810         vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
5811
5812         /*
5813          * Profile KVM exit RIPs:
5814          */
5815         if (unlikely(prof_on == KVM_PROFILING)) {
5816                 unsigned long rip = kvm_rip_read(vcpu);
5817                 profile_hit(KVM_PROFILING, (void *)rip);
5818         }
5819
5820
5821         kvm_lapic_sync_from_vapic(vcpu);
5822
5823         r = kvm_x86_ops->handle_exit(vcpu);
5824 out:
5825         return r;
5826 }
5827
5828
5829 static int __vcpu_run(struct kvm_vcpu *vcpu)
5830 {
5831         int r;
5832         struct kvm *kvm = vcpu->kvm;
5833
5834         if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED)) {
5835                 pr_debug("vcpu %d received sipi with vector # %x\n",
5836                          vcpu->vcpu_id, vcpu->arch.sipi_vector);
5837                 kvm_lapic_reset(vcpu);
5838                 r = kvm_arch_vcpu_reset(vcpu);
5839                 if (r)
5840                         return r;
5841                 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
5842         }
5843
5844         vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
5845
5846         r = 1;
5847         while (r > 0) {
5848                 if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE &&
5849                     !vcpu->arch.apf.halted)
5850                         r = vcpu_enter_guest(vcpu);
5851                 else {
5852                         srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
5853                         kvm_vcpu_block(vcpu);
5854                         vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
5855                         if (kvm_check_request(KVM_REQ_UNHALT, vcpu))
5856                         {
5857                                 switch(vcpu->arch.mp_state) {
5858                                 case KVM_MP_STATE_HALTED:
5859                                         vcpu->arch.mp_state =
5860                                                 KVM_MP_STATE_RUNNABLE;
5861                                 case KVM_MP_STATE_RUNNABLE:
5862                                         vcpu->arch.apf.halted = false;
5863                                         break;
5864                                 case KVM_MP_STATE_SIPI_RECEIVED:
5865                                 default:
5866                                         r = -EINTR;
5867                                         break;
5868                                 }
5869                         }
5870                 }
5871
5872                 if (r <= 0)
5873                         break;
5874
5875                 clear_bit(KVM_REQ_PENDING_TIMER, &vcpu->requests);
5876                 if (kvm_cpu_has_pending_timer(vcpu))
5877                         kvm_inject_pending_timer_irqs(vcpu);
5878
5879                 if (dm_request_for_irq_injection(vcpu)) {
5880                         r = -EINTR;
5881                         vcpu->run->exit_reason = KVM_EXIT_INTR;
5882                         ++vcpu->stat.request_irq_exits;
5883                 }
5884
5885                 kvm_check_async_pf_completion(vcpu);
5886
5887                 if (signal_pending(current)) {
5888                         r = -EINTR;
5889                         vcpu->run->exit_reason = KVM_EXIT_INTR;
5890                         ++vcpu->stat.signal_exits;
5891                 }
5892                 if (need_resched()) {
5893                         srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
5894                         kvm_resched(vcpu);
5895                         vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
5896                 }
5897         }
5898
5899         srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
5900
5901         return r;
5902 }
5903
5904 static int complete_mmio(struct kvm_vcpu *vcpu)
5905 {
5906         struct kvm_run *run = vcpu->run;
5907         int r;
5908
5909         if (!(vcpu->arch.pio.count || vcpu->mmio_needed))
5910                 return 1;
5911
5912         if (vcpu->mmio_needed) {
5913                 vcpu->mmio_needed = 0;
5914                 if (!vcpu->mmio_is_write)
5915                         memcpy(vcpu->mmio_data + vcpu->mmio_index,
5916                                run->mmio.data, 8);
5917                 vcpu->mmio_index += 8;
5918                 if (vcpu->mmio_index < vcpu->mmio_size) {
5919                         run->exit_reason = KVM_EXIT_MMIO;
5920                         run->mmio.phys_addr = vcpu->mmio_phys_addr + vcpu->mmio_index;
5921                         memcpy(run->mmio.data, vcpu->mmio_data + vcpu->mmio_index, 8);
5922                         run->mmio.len = min(vcpu->mmio_size - vcpu->mmio_index, 8);
5923                         run->mmio.is_write = vcpu->mmio_is_write;
5924                         vcpu->mmio_needed = 1;
5925                         return 0;
5926                 }
5927                 if (vcpu->mmio_is_write)
5928                         return 1;
5929                 vcpu->mmio_read_completed = 1;
5930         }
5931         vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
5932         r = emulate_instruction(vcpu, EMULTYPE_NO_DECODE);
5933         srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
5934         if (r != EMULATE_DONE)
5935                 return 0;
5936         return 1;
5937 }
5938
5939 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
5940 {
5941         int r;
5942         sigset_t sigsaved;
5943
5944         if (!tsk_used_math(current) && init_fpu(current))
5945                 return -ENOMEM;
5946
5947         if (vcpu->sigset_active)
5948                 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
5949
5950         if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) {
5951                 kvm_vcpu_block(vcpu);
5952                 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
5953                 r = -EAGAIN;
5954                 goto out;
5955         }
5956
5957         /* re-sync apic's tpr */
5958         if (!irqchip_in_kernel(vcpu->kvm)) {
5959                 if (kvm_set_cr8(vcpu, kvm_run->cr8) != 0) {
5960                         r = -EINVAL;
5961                         goto out;
5962                 }
5963         }
5964
5965         r = complete_mmio(vcpu);
5966         if (r <= 0)
5967                 goto out;
5968
5969         if (kvm_run->exit_reason == KVM_EXIT_HYPERCALL)
5970                 kvm_register_write(vcpu, VCPU_REGS_RAX,
5971                                      kvm_run->hypercall.ret);
5972
5973         r = __vcpu_run(vcpu);
5974
5975 out:
5976         post_kvm_run_save(vcpu);
5977         if (vcpu->sigset_active)
5978                 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
5979
5980         return r;
5981 }
5982
5983 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
5984 {
5985         if (vcpu->arch.emulate_regs_need_sync_to_vcpu) {
5986                 /*
5987                  * We are here if userspace calls get_regs() in the middle of
5988                  * instruction emulation. Registers state needs to be copied
5989                  * back from emulation context to vcpu. Usrapace shouldn't do
5990                  * that usually, but some bad designed PV devices (vmware
5991                  * backdoor interface) need this to work
5992                  */
5993                 struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
5994                 memcpy(vcpu->arch.regs, ctxt->regs, sizeof ctxt->regs);
5995                 vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
5996         }
5997         regs->rax = kvm_register_read(vcpu, VCPU_REGS_RAX);
5998         regs->rbx = kvm_register_read(vcpu, VCPU_REGS_RBX);
5999         regs->rcx = kvm_register_read(vcpu, VCPU_REGS_RCX);
6000         regs->rdx = kvm_register_read(vcpu, VCPU_REGS_RDX);
6001         regs->rsi = kvm_register_read(vcpu, VCPU_REGS_RSI);
6002         regs->rdi = kvm_register_read(vcpu, VCPU_REGS_RDI);
6003         regs->rsp = kvm_register_read(vcpu, VCPU_REGS_RSP);
6004         regs->rbp = kvm_register_read(vcpu, VCPU_REGS_RBP);
6005 #ifdef CONFIG_X86_64
6006         regs->r8 = kvm_register_read(vcpu, VCPU_REGS_R8);
6007         regs->r9 = kvm_register_read(vcpu, VCPU_REGS_R9);
6008         regs->r10 = kvm_register_read(vcpu, VCPU_REGS_R10);
6009         regs->r11 = kvm_register_read(vcpu, VCPU_REGS_R11);
6010         regs->r12 = kvm_register_read(vcpu, VCPU_REGS_R12);
6011         regs->r13 = kvm_register_read(vcpu, VCPU_REGS_R13);
6012         regs->r14 = kvm_register_read(vcpu, VCPU_REGS_R14);
6013         regs->r15 = kvm_register_read(vcpu, VCPU_REGS_R15);
6014 #endif
6015
6016         regs->rip = kvm_rip_read(vcpu);
6017         regs->rflags = kvm_get_rflags(vcpu);
6018
6019         return 0;
6020 }
6021
6022 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
6023 {
6024         vcpu->arch.emulate_regs_need_sync_from_vcpu = true;
6025         vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
6026
6027         kvm_register_write(vcpu, VCPU_REGS_RAX, regs->rax);
6028         kvm_register_write(vcpu, VCPU_REGS_RBX, regs->rbx);
6029         kvm_register_write(vcpu, VCPU_REGS_RCX, regs->rcx);
6030         kvm_register_write(vcpu, VCPU_REGS_RDX, regs->rdx);
6031         kvm_register_write(vcpu, VCPU_REGS_RSI, regs->rsi);
6032         kvm_register_write(vcpu, VCPU_REGS_RDI, regs->rdi);
6033         kvm_register_write(vcpu, VCPU_REGS_RSP, regs->rsp);
6034         kvm_register_write(vcpu, VCPU_REGS_RBP, regs->rbp);
6035 #ifdef CONFIG_X86_64
6036         kvm_register_write(vcpu, VCPU_REGS_R8, regs->r8);
6037         kvm_register_write(vcpu, VCPU_REGS_R9, regs->r9);
6038         kvm_register_write(vcpu, VCPU_REGS_R10, regs->r10);
6039         kvm_register_write(vcpu, VCPU_REGS_R11, regs->r11);
6040         kvm_register_write(vcpu, VCPU_REGS_R12, regs->r12);
6041         kvm_register_write(vcpu, VCPU_REGS_R13, regs->r13);
6042         kvm_register_write(vcpu, VCPU_REGS_R14, regs->r14);
6043         kvm_register_write(vcpu, VCPU_REGS_R15, regs->r15);
6044 #endif
6045
6046         kvm_rip_write(vcpu, regs->rip);
6047         kvm_set_rflags(vcpu, regs->rflags);
6048
6049         vcpu->arch.exception.pending = false;
6050
6051         kvm_make_request(KVM_REQ_EVENT, vcpu);
6052
6053         return 0;
6054 }
6055
6056 void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
6057 {
6058         struct kvm_segment cs;
6059
6060         kvm_get_segment(vcpu, &cs, VCPU_SREG_CS);
6061         *db = cs.db;
6062         *l = cs.l;
6063 }
6064 EXPORT_SYMBOL_GPL(kvm_get_cs_db_l_bits);
6065
6066 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
6067                                   struct kvm_sregs *sregs)
6068 {
6069         struct desc_ptr dt;
6070
6071         kvm_get_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
6072         kvm_get_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
6073         kvm_get_segment(vcpu, &sregs->es, VCPU_SREG_ES);
6074         kvm_get_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
6075         kvm_get_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
6076         kvm_get_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
6077
6078         kvm_get_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
6079         kvm_get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
6080
6081         kvm_x86_ops->get_idt(vcpu, &dt);
6082         sregs->idt.limit = dt.size;
6083         sregs->idt.base = dt.address;
6084         kvm_x86_ops->get_gdt(vcpu, &dt);
6085         sregs->gdt.limit = dt.size;
6086         sregs->gdt.base = dt.address;
6087
6088         sregs->cr0 = kvm_read_cr0(vcpu);
6089         sregs->cr2 = vcpu->arch.cr2;
6090         sregs->cr3 = kvm_read_cr3(vcpu);
6091         sregs->cr4 = kvm_read_cr4(vcpu);
6092         sregs->cr8 = kvm_get_cr8(vcpu);
6093         sregs->efer = vcpu->arch.efer;
6094         sregs->apic_base = kvm_get_apic_base(vcpu);
6095
6096         memset(sregs->interrupt_bitmap, 0, sizeof sregs->interrupt_bitmap);
6097
6098         if (vcpu->arch.interrupt.pending && !vcpu->arch.interrupt.soft)
6099                 set_bit(vcpu->arch.interrupt.nr,
6100                         (unsigned long *)sregs->interrupt_bitmap);
6101
6102         return 0;
6103 }
6104
6105 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
6106                                     struct kvm_mp_state *mp_state)
6107 {
6108         mp_state->mp_state = vcpu->arch.mp_state;
6109         return 0;
6110 }
6111
6112 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
6113                                     struct kvm_mp_state *mp_state)
6114 {
6115         vcpu->arch.mp_state = mp_state->mp_state;
6116         kvm_make_request(KVM_REQ_EVENT, vcpu);
6117         return 0;
6118 }
6119
6120 int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason,
6121                     bool has_error_code, u32 error_code)
6122 {
6123         struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
6124         int ret;
6125
6126         init_emulate_ctxt(vcpu);
6127
6128         ret = emulator_task_switch(ctxt, tss_selector, reason,
6129                                    has_error_code, error_code);
6130
6131         if (ret)
6132                 return EMULATE_FAIL;
6133
6134         memcpy(vcpu->arch.regs, ctxt->regs, sizeof ctxt->regs);
6135         kvm_rip_write(vcpu, ctxt->eip);
6136         kvm_set_rflags(vcpu, ctxt->eflags);
6137         kvm_make_request(KVM_REQ_EVENT, vcpu);
6138         return EMULATE_DONE;
6139 }
6140 EXPORT_SYMBOL_GPL(kvm_task_switch);
6141
6142 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
6143                                   struct kvm_sregs *sregs)
6144 {
6145         int mmu_reset_needed = 0;
6146         int pending_vec, max_bits, idx;
6147         struct desc_ptr dt;
6148
6149         if (!guest_cpuid_has_xsave(vcpu) && (sregs->cr4 & X86_CR4_OSXSAVE))
6150                 return -EINVAL;
6151
6152         dt.size = sregs->idt.limit;
6153         dt.address = sregs->idt.base;
6154         kvm_x86_ops->set_idt(vcpu, &dt);
6155         dt.size = sregs->gdt.limit;
6156         dt.address = sregs->gdt.base;
6157         kvm_x86_ops->set_gdt(vcpu, &dt);
6158
6159         vcpu->arch.cr2 = sregs->cr2;
6160         mmu_reset_needed |= kvm_read_cr3(vcpu) != sregs->cr3;
6161         vcpu->arch.cr3 = sregs->cr3;
6162         __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
6163
6164         kvm_set_cr8(vcpu, sregs->cr8);
6165
6166         mmu_reset_needed |= vcpu->arch.efer != sregs->efer;
6167         kvm_x86_ops->set_efer(vcpu, sregs->efer);
6168         kvm_set_apic_base(vcpu, sregs->apic_base);
6169
6170         mmu_reset_needed |= kvm_read_cr0(vcpu) != sregs->cr0;
6171         kvm_x86_ops->set_cr0(vcpu, sregs->cr0);
6172         vcpu->arch.cr0 = sregs->cr0;
6173
6174         mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4;
6175         kvm_x86_ops->set_cr4(vcpu, sregs->cr4);
6176         if (sregs->cr4 & X86_CR4_OSXSAVE)
6177                 update_cpuid(vcpu);
6178
6179         idx = srcu_read_lock(&vcpu->kvm->srcu);
6180         if (!is_long_mode(vcpu) && is_pae(vcpu)) {
6181                 load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu));
6182                 mmu_reset_needed = 1;
6183         }
6184         srcu_read_unlock(&vcpu->kvm->srcu, idx);
6185
6186         if (mmu_reset_needed)
6187                 kvm_mmu_reset_context(vcpu);
6188
6189         max_bits = (sizeof sregs->interrupt_bitmap) << 3;
6190         pending_vec = find_first_bit(
6191                 (const unsigned long *)sregs->interrupt_bitmap, max_bits);
6192         if (pending_vec < max_bits) {
6193                 kvm_queue_interrupt(vcpu, pending_vec, false);
6194                 pr_debug("Set back pending irq %d\n", pending_vec);
6195         }
6196
6197         kvm_set_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
6198         kvm_set_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
6199         kvm_set_segment(vcpu, &sregs->es, VCPU_SREG_ES);
6200         kvm_set_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
6201         kvm_set_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
6202         kvm_set_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
6203
6204         kvm_set_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
6205         kvm_set_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
6206
6207         update_cr8_intercept(vcpu);
6208
6209         /* Older userspace won't unhalt the vcpu on reset. */
6210         if (kvm_vcpu_is_bsp(vcpu) && kvm_rip_read(vcpu) == 0xfff0 &&
6211             sregs->cs.selector == 0xf000 && sregs->cs.base == 0xffff0000 &&
6212             !is_protmode(vcpu))
6213                 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
6214
6215         kvm_make_request(KVM_REQ_EVENT, vcpu);
6216
6217         return 0;
6218 }
6219
6220 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
6221                                         struct kvm_guest_debug *dbg)
6222 {
6223         unsigned long rflags;
6224         int i, r;
6225
6226         if (dbg->control & (KVM_GUESTDBG_INJECT_DB | KVM_GUESTDBG_INJECT_BP)) {
6227                 r = -EBUSY;
6228                 if (vcpu->arch.exception.pending)
6229                         goto out;
6230                 if (dbg->control & KVM_GUESTDBG_INJECT_DB)
6231                         kvm_queue_exception(vcpu, DB_VECTOR);
6232                 else
6233                         kvm_queue_exception(vcpu, BP_VECTOR);
6234         }
6235
6236         /*
6237          * Read rflags as long as potentially injected trace flags are still
6238          * filtered out.
6239          */
6240         rflags = kvm_get_rflags(vcpu);
6241
6242         vcpu->guest_debug = dbg->control;
6243         if (!(vcpu->guest_debug & KVM_GUESTDBG_ENABLE))
6244                 vcpu->guest_debug = 0;
6245
6246         if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) {
6247                 for (i = 0; i < KVM_NR_DB_REGS; ++i)
6248                         vcpu->arch.eff_db[i] = dbg->arch.debugreg[i];
6249                 vcpu->arch.switch_db_regs =
6250                         (dbg->arch.debugreg[7] & DR7_BP_EN_MASK);
6251         } else {
6252                 for (i = 0; i < KVM_NR_DB_REGS; i++)
6253                         vcpu->arch.eff_db[i] = vcpu->arch.db[i];
6254                 vcpu->arch.switch_db_regs = (vcpu->arch.dr7 & DR7_BP_EN_MASK);
6255         }
6256
6257         if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
6258                 vcpu->arch.singlestep_rip = kvm_rip_read(vcpu) +
6259                         get_segment_base(vcpu, VCPU_SREG_CS);
6260
6261         /*
6262          * Trigger an rflags update that will inject or remove the trace
6263          * flags.
6264          */
6265         kvm_set_rflags(vcpu, rflags);
6266
6267         kvm_x86_ops->set_guest_debug(vcpu, dbg);
6268
6269         r = 0;
6270
6271 out:
6272
6273         return r;
6274 }
6275
6276 /*
6277  * Translate a guest virtual address to a guest physical address.
6278  */
6279 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
6280                                     struct kvm_translation *tr)
6281 {
6282         unsigned long vaddr = tr->linear_address;
6283         gpa_t gpa;
6284         int idx;
6285
6286         idx = srcu_read_lock(&vcpu->kvm->srcu);
6287         gpa = kvm_mmu_gva_to_gpa_system(vcpu, vaddr, NULL);
6288         srcu_read_unlock(&vcpu->kvm->srcu, idx);
6289         tr->physical_address = gpa;
6290         tr->valid = gpa != UNMAPPED_GVA;
6291         tr->writeable = 1;
6292         tr->usermode = 0;
6293
6294         return 0;
6295 }
6296
6297 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
6298 {
6299         struct i387_fxsave_struct *fxsave =
6300                         &vcpu->arch.guest_fpu.state->fxsave;
6301
6302         memcpy(fpu->fpr, fxsave->st_space, 128);
6303         fpu->fcw = fxsave->cwd;
6304         fpu->fsw = fxsave->swd;
6305         fpu->ftwx = fxsave->twd;
6306         fpu->last_opcode = fxsave->fop;
6307         fpu->last_ip = fxsave->rip;
6308         fpu->last_dp = fxsave->rdp;
6309         memcpy(fpu->xmm, fxsave->xmm_space, sizeof fxsave->xmm_space);
6310
6311         return 0;
6312 }
6313
6314 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
6315 {
6316         struct i387_fxsave_struct *fxsave =
6317                         &vcpu->arch.guest_fpu.state->fxsave;
6318
6319         memcpy(fxsave->st_space, fpu->fpr, 128);
6320         fxsave->cwd = fpu->fcw;
6321         fxsave->swd = fpu->fsw;
6322         fxsave->twd = fpu->ftwx;
6323         fxsave->fop = fpu->last_opcode;
6324         fxsave->rip = fpu->last_ip;
6325         fxsave->rdp = fpu->last_dp;
6326         memcpy(fxsave->xmm_space, fpu->xmm, sizeof fxsave->xmm_space);
6327
6328         return 0;
6329 }
6330
6331 int fx_init(struct kvm_vcpu *vcpu)
6332 {
6333         int err;
6334
6335         err = fpu_alloc(&vcpu->arch.guest_fpu);
6336         if (err)
6337                 return err;
6338
6339         fpu_finit(&vcpu->arch.guest_fpu);
6340
6341         /*
6342          * Ensure guest xcr0 is valid for loading
6343          */
6344         vcpu->arch.xcr0 = XSTATE_FP;
6345
6346         vcpu->arch.cr0 |= X86_CR0_ET;
6347
6348         return 0;
6349 }
6350 EXPORT_SYMBOL_GPL(fx_init);
6351
6352 static void fx_free(struct kvm_vcpu *vcpu)
6353 {
6354         fpu_free(&vcpu->arch.guest_fpu);
6355 }
6356
6357 void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
6358 {
6359         if (vcpu->guest_fpu_loaded)
6360                 return;
6361
6362         /*
6363          * Restore all possible states in the guest,
6364          * and assume host would use all available bits.
6365          * Guest xcr0 would be loaded later.
6366          */
6367         kvm_put_guest_xcr0(vcpu);
6368         vcpu->guest_fpu_loaded = 1;
6369         unlazy_fpu(current);
6370         fpu_restore_checking(&vcpu->arch.guest_fpu);
6371         trace_kvm_fpu(1);
6372 }
6373
6374 void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
6375 {
6376         kvm_put_guest_xcr0(vcpu);
6377
6378         if (!vcpu->guest_fpu_loaded)
6379                 return;
6380
6381         vcpu->guest_fpu_loaded = 0;
6382         fpu_save_init(&vcpu->arch.guest_fpu);
6383         ++vcpu->stat.fpu_reload;
6384         kvm_make_request(KVM_REQ_DEACTIVATE_FPU, vcpu);
6385         trace_kvm_fpu(0);
6386 }
6387
6388 void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
6389 {
6390         kvmclock_reset(vcpu);
6391
6392         free_cpumask_var(vcpu->arch.wbinvd_dirty_mask);
6393         fx_free(vcpu);
6394         kvm_x86_ops->vcpu_free(vcpu);
6395 }
6396
6397 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
6398                                                 unsigned int id)
6399 {
6400         if (check_tsc_unstable() && atomic_read(&kvm->online_vcpus) != 0)
6401                 printk_once(KERN_WARNING
6402                 "kvm: SMP vm created on host with unstable TSC; "
6403                 "guest TSC will not be reliable\n");
6404         return kvm_x86_ops->vcpu_create(kvm, id);
6405 }
6406
6407 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
6408 {
6409         int r;
6410
6411         vcpu->arch.mtrr_state.have_fixed = 1;
6412         vcpu_load(vcpu);
6413         r = kvm_arch_vcpu_reset(vcpu);
6414         if (r == 0)
6415                 r = kvm_mmu_setup(vcpu);
6416         vcpu_put(vcpu);
6417
6418         return r;
6419 }
6420
6421 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
6422 {
6423         vcpu->arch.apf.msr_val = 0;
6424
6425         vcpu_load(vcpu);
6426         kvm_mmu_unload(vcpu);
6427         vcpu_put(vcpu);
6428
6429         fx_free(vcpu);
6430         kvm_x86_ops->vcpu_free(vcpu);
6431 }
6432
6433 int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu)
6434 {
6435         atomic_set(&vcpu->arch.nmi_queued, 0);
6436         vcpu->arch.nmi_pending = 0;
6437         vcpu->arch.nmi_injected = false;
6438
6439         vcpu->arch.switch_db_regs = 0;
6440         memset(vcpu->arch.db, 0, sizeof(vcpu->arch.db));
6441         vcpu->arch.dr6 = DR6_FIXED_1;
6442         vcpu->arch.dr7 = DR7_FIXED_1;
6443
6444         kvm_make_request(KVM_REQ_EVENT, vcpu);
6445         vcpu->arch.apf.msr_val = 0;
6446         vcpu->arch.st.msr_val = 0;
6447
6448         kvmclock_reset(vcpu);
6449
6450         kvm_clear_async_pf_completion_queue(vcpu);
6451         kvm_async_pf_hash_reset(vcpu);
6452         vcpu->arch.apf.halted = false;
6453
6454         return kvm_x86_ops->vcpu_reset(vcpu);
6455 }
6456
6457 int kvm_arch_hardware_enable(void *garbage)
6458 {
6459         struct kvm *kvm;
6460         struct kvm_vcpu *vcpu;
6461         int i;
6462
6463         kvm_shared_msr_cpu_online();
6464         list_for_each_entry(kvm, &vm_list, vm_list)
6465                 kvm_for_each_vcpu(i, vcpu, kvm)
6466                         if (vcpu->cpu == smp_processor_id())
6467                                 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
6468         return kvm_x86_ops->hardware_enable(garbage);
6469 }
6470
6471 void kvm_arch_hardware_disable(void *garbage)
6472 {
6473         kvm_x86_ops->hardware_disable(garbage);
6474         drop_user_return_notifiers(garbage);
6475 }
6476
6477 int kvm_arch_hardware_setup(void)
6478 {
6479         return kvm_x86_ops->hardware_setup();
6480 }
6481
6482 void kvm_arch_hardware_unsetup(void)
6483 {
6484         kvm_x86_ops->hardware_unsetup();
6485 }
6486
6487 void kvm_arch_check_processor_compat(void *rtn)
6488 {
6489         kvm_x86_ops->check_processor_compatibility(rtn);
6490 }
6491
6492 bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu)
6493 {
6494         return irqchip_in_kernel(vcpu->kvm) == (vcpu->arch.apic != NULL);
6495 }
6496
6497 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
6498 {
6499         struct page *page;
6500         struct kvm *kvm;
6501         int r;
6502
6503         BUG_ON(vcpu->kvm == NULL);
6504         kvm = vcpu->kvm;
6505
6506         vcpu->arch.emulate_ctxt.ops = &emulate_ops;
6507         vcpu->arch.walk_mmu = &vcpu->arch.mmu;
6508         vcpu->arch.mmu.root_hpa = INVALID_PAGE;
6509         vcpu->arch.mmu.translate_gpa = translate_gpa;
6510         vcpu->arch.nested_mmu.translate_gpa = translate_nested_gpa;
6511         if (!irqchip_in_kernel(kvm) || kvm_vcpu_is_bsp(vcpu))
6512                 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
6513         else
6514                 vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED;
6515
6516         page = alloc_page(GFP_KERNEL | __GFP_ZERO);
6517         if (!page) {
6518                 r = -ENOMEM;
6519                 goto fail;
6520         }
6521         vcpu->arch.pio_data = page_address(page);
6522
6523         kvm_init_tsc_catchup(vcpu, max_tsc_khz);
6524
6525         r = kvm_mmu_create(vcpu);
6526         if (r < 0)
6527                 goto fail_free_pio_data;
6528
6529         if (irqchip_in_kernel(kvm)) {
6530                 r = kvm_create_lapic(vcpu);
6531                 if (r < 0)
6532                         goto fail_mmu_destroy;
6533         }
6534
6535         vcpu->arch.mce_banks = kzalloc(KVM_MAX_MCE_BANKS * sizeof(u64) * 4,
6536                                        GFP_KERNEL);
6537         if (!vcpu->arch.mce_banks) {
6538                 r = -ENOMEM;
6539                 goto fail_free_lapic;
6540         }
6541         vcpu->arch.mcg_cap = KVM_MAX_MCE_BANKS;
6542
6543         if (!zalloc_cpumask_var(&vcpu->arch.wbinvd_dirty_mask, GFP_KERNEL))
6544                 goto fail_free_mce_banks;
6545
6546         vcpu->arch.pv_time_enabled = false;
6547         kvm_async_pf_hash_reset(vcpu);
6548
6549         return 0;
6550 fail_free_mce_banks:
6551         kfree(vcpu->arch.mce_banks);
6552 fail_free_lapic:
6553         kvm_free_lapic(vcpu);
6554 fail_mmu_destroy:
6555         kvm_mmu_destroy(vcpu);
6556 fail_free_pio_data:
6557         free_page((unsigned long)vcpu->arch.pio_data);
6558 fail:
6559         return r;
6560 }
6561
6562 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
6563 {
6564         int idx;
6565
6566         kfree(vcpu->arch.mce_banks);
6567         kvm_free_lapic(vcpu);
6568         idx = srcu_read_lock(&vcpu->kvm->srcu);
6569         kvm_mmu_destroy(vcpu);
6570         srcu_read_unlock(&vcpu->kvm->srcu, idx);
6571         free_page((unsigned long)vcpu->arch.pio_data);
6572 }
6573
6574 int kvm_arch_init_vm(struct kvm *kvm)
6575 {
6576         INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
6577         INIT_LIST_HEAD(&kvm->arch.assigned_dev_head);
6578
6579         /* Reserve bit 0 of irq_sources_bitmap for userspace irq source */
6580         set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap);
6581
6582         raw_spin_lock_init(&kvm->arch.tsc_write_lock);
6583
6584         return 0;
6585 }
6586
6587 static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu)
6588 {
6589         vcpu_load(vcpu);
6590         kvm_mmu_unload(vcpu);
6591         vcpu_put(vcpu);
6592 }
6593
6594 static void kvm_free_vcpus(struct kvm *kvm)
6595 {
6596         unsigned int i;
6597         struct kvm_vcpu *vcpu;
6598
6599         /*
6600          * Unpin any mmu pages first.
6601          */
6602         kvm_for_each_vcpu(i, vcpu, kvm) {
6603                 kvm_clear_async_pf_completion_queue(vcpu);
6604                 kvm_unload_vcpu_mmu(vcpu);
6605         }
6606         kvm_for_each_vcpu(i, vcpu, kvm)
6607                 kvm_arch_vcpu_free(vcpu);
6608
6609         mutex_lock(&kvm->lock);
6610         for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
6611                 kvm->vcpus[i] = NULL;
6612
6613         atomic_set(&kvm->online_vcpus, 0);
6614         mutex_unlock(&kvm->lock);
6615 }
6616
6617 void kvm_arch_sync_events(struct kvm *kvm)
6618 {
6619         kvm_free_all_assigned_devices(kvm);
6620         kvm_free_pit(kvm);
6621 }
6622
6623 void kvm_arch_destroy_vm(struct kvm *kvm)
6624 {
6625         kvm_iommu_unmap_guest(kvm);
6626         kfree(kvm->arch.vpic);
6627         kfree(kvm->arch.vioapic);
6628         kvm_free_vcpus(kvm);
6629         if (kvm->arch.apic_access_page)
6630                 put_page(kvm->arch.apic_access_page);
6631         if (kvm->arch.ept_identity_pagetable)
6632                 put_page(kvm->arch.ept_identity_pagetable);
6633 }
6634
6635 int kvm_arch_prepare_memory_region(struct kvm *kvm,
6636                                 struct kvm_memory_slot *memslot,
6637                                 struct kvm_memory_slot old,
6638                                 struct kvm_userspace_memory_region *mem,
6639                                 int user_alloc)
6640 {
6641         int npages = memslot->npages;
6642         int map_flags = MAP_PRIVATE | MAP_ANONYMOUS;
6643
6644         /* Prevent internal slot pages from being moved by fork()/COW. */
6645         if (memslot->id >= KVM_MEMORY_SLOTS)
6646                 map_flags = MAP_SHARED | MAP_ANONYMOUS;
6647
6648         /*To keep backward compatibility with older userspace,
6649          *x86 needs to hanlde !user_alloc case.
6650          */
6651         if (!user_alloc) {
6652                 if (npages && !old.rmap) {
6653                         unsigned long userspace_addr;
6654
6655                         down_write(&current->mm->mmap_sem);
6656                         userspace_addr = do_mmap(NULL, 0,
6657                                                  npages * PAGE_SIZE,
6658                                                  PROT_READ | PROT_WRITE,
6659                                                  map_flags,
6660                                                  0);
6661                         up_write(&current->mm->mmap_sem);
6662
6663                         if (IS_ERR((void *)userspace_addr))
6664                                 return PTR_ERR((void *)userspace_addr);
6665
6666                         memslot->userspace_addr = userspace_addr;
6667                 }
6668         }
6669
6670
6671         return 0;
6672 }
6673
6674 void kvm_arch_commit_memory_region(struct kvm *kvm,
6675                                 struct kvm_userspace_memory_region *mem,
6676                                 struct kvm_memory_slot old,
6677                                 int user_alloc)
6678 {
6679
6680         int nr_mmu_pages = 0, npages = mem->memory_size >> PAGE_SHIFT;
6681
6682         if (!user_alloc && !old.user_alloc && old.rmap && !npages) {
6683                 int ret;
6684
6685                 down_write(&current->mm->mmap_sem);
6686                 ret = do_munmap(current->mm, old.userspace_addr,
6687                                 old.npages * PAGE_SIZE);
6688                 up_write(&current->mm->mmap_sem);
6689                 if (ret < 0)
6690                         printk(KERN_WARNING
6691                                "kvm_vm_ioctl_set_memory_region: "
6692                                "failed to munmap memory\n");
6693         }
6694
6695         if (!kvm->arch.n_requested_mmu_pages)
6696                 nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm);
6697
6698         spin_lock(&kvm->mmu_lock);
6699         if (nr_mmu_pages)
6700                 kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages);
6701         kvm_mmu_slot_remove_write_access(kvm, mem->slot);
6702         spin_unlock(&kvm->mmu_lock);
6703 }
6704
6705 void kvm_arch_flush_shadow(struct kvm *kvm)
6706 {
6707         kvm_mmu_zap_all(kvm);
6708         kvm_reload_remote_mmus(kvm);
6709 }
6710
6711 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
6712 {
6713         return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE &&
6714                 !vcpu->arch.apf.halted)
6715                 || !list_empty_careful(&vcpu->async_pf.done)
6716                 || vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED
6717                 || atomic_read(&vcpu->arch.nmi_queued) ||
6718                 (kvm_arch_interrupt_allowed(vcpu) &&
6719                  kvm_cpu_has_interrupt(vcpu));
6720 }
6721
6722 void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
6723 {
6724         int me;
6725         int cpu = vcpu->cpu;
6726
6727         if (waitqueue_active(&vcpu->wq)) {
6728                 wake_up_interruptible(&vcpu->wq);
6729                 ++vcpu->stat.halt_wakeup;
6730         }
6731
6732         me = get_cpu();
6733         if (cpu != me && (unsigned)cpu < nr_cpu_ids && cpu_online(cpu))
6734                 if (kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE)
6735                         smp_send_reschedule(cpu);
6736         put_cpu();
6737 }
6738
6739 int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu)
6740 {
6741         return kvm_x86_ops->interrupt_allowed(vcpu);
6742 }
6743
6744 bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip)
6745 {
6746         unsigned long current_rip = kvm_rip_read(vcpu) +
6747                 get_segment_base(vcpu, VCPU_SREG_CS);
6748
6749         return current_rip == linear_rip;
6750 }
6751 EXPORT_SYMBOL_GPL(kvm_is_linear_rip);
6752
6753 unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu)
6754 {
6755         unsigned long rflags;
6756
6757         rflags = kvm_x86_ops->get_rflags(vcpu);
6758         if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
6759                 rflags &= ~X86_EFLAGS_TF;
6760         return rflags;
6761 }
6762 EXPORT_SYMBOL_GPL(kvm_get_rflags);
6763
6764 void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
6765 {
6766         if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP &&
6767             kvm_is_linear_rip(vcpu, vcpu->arch.singlestep_rip))
6768                 rflags |= X86_EFLAGS_TF;
6769         kvm_x86_ops->set_rflags(vcpu, rflags);
6770         kvm_make_request(KVM_REQ_EVENT, vcpu);
6771 }
6772 EXPORT_SYMBOL_GPL(kvm_set_rflags);
6773
6774 void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work)
6775 {
6776         int r;
6777
6778         if ((vcpu->arch.mmu.direct_map != work->arch.direct_map) ||
6779               is_error_page(work->page))
6780                 return;
6781
6782         r = kvm_mmu_reload(vcpu);
6783         if (unlikely(r))
6784                 return;
6785
6786         if (!vcpu->arch.mmu.direct_map &&
6787               work->arch.cr3 != vcpu->arch.mmu.get_cr3(vcpu))
6788                 return;
6789
6790         vcpu->arch.mmu.page_fault(vcpu, work->gva, 0, true);
6791 }
6792
6793 static inline u32 kvm_async_pf_hash_fn(gfn_t gfn)
6794 {
6795         return hash_32(gfn & 0xffffffff, order_base_2(ASYNC_PF_PER_VCPU));
6796 }
6797
6798 static inline u32 kvm_async_pf_next_probe(u32 key)
6799 {
6800         return (key + 1) & (roundup_pow_of_two(ASYNC_PF_PER_VCPU) - 1);
6801 }
6802
6803 static void kvm_add_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
6804 {
6805         u32 key = kvm_async_pf_hash_fn(gfn);
6806
6807         while (vcpu->arch.apf.gfns[key] != ~0)
6808                 key = kvm_async_pf_next_probe(key);
6809
6810         vcpu->arch.apf.gfns[key] = gfn;
6811 }
6812
6813 static u32 kvm_async_pf_gfn_slot(struct kvm_vcpu *vcpu, gfn_t gfn)
6814 {
6815         int i;
6816         u32 key = kvm_async_pf_hash_fn(gfn);
6817
6818         for (i = 0; i < roundup_pow_of_two(ASYNC_PF_PER_VCPU) &&
6819                      (vcpu->arch.apf.gfns[key] != gfn &&
6820                       vcpu->arch.apf.gfns[key] != ~0); i++)
6821                 key = kvm_async_pf_next_probe(key);
6822
6823         return key;
6824 }
6825
6826 bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
6827 {
6828         return vcpu->arch.apf.gfns[kvm_async_pf_gfn_slot(vcpu, gfn)] == gfn;
6829 }
6830
6831 static void kvm_del_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
6832 {
6833         u32 i, j, k;
6834
6835         i = j = kvm_async_pf_gfn_slot(vcpu, gfn);
6836         while (true) {
6837                 vcpu->arch.apf.gfns[i] = ~0;
6838                 do {
6839                         j = kvm_async_pf_next_probe(j);
6840                         if (vcpu->arch.apf.gfns[j] == ~0)
6841                                 return;
6842                         k = kvm_async_pf_hash_fn(vcpu->arch.apf.gfns[j]);
6843                         /*
6844                          * k lies cyclically in ]i,j]
6845                          * |    i.k.j |
6846                          * |....j i.k.| or  |.k..j i...|
6847                          */
6848                 } while ((i <= j) ? (i < k && k <= j) : (i < k || k <= j));
6849                 vcpu->arch.apf.gfns[i] = vcpu->arch.apf.gfns[j];
6850                 i = j;
6851         }
6852 }
6853
6854 static int apf_put_user(struct kvm_vcpu *vcpu, u32 val)
6855 {
6856
6857         return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apf.data, &val,
6858                                       sizeof(val));
6859 }
6860
6861 void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
6862                                      struct kvm_async_pf *work)
6863 {
6864         struct x86_exception fault;
6865
6866         trace_kvm_async_pf_not_present(work->arch.token, work->gva);
6867         kvm_add_async_pf_gfn(vcpu, work->arch.gfn);
6868
6869         if (!(vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED) ||
6870             (vcpu->arch.apf.send_user_only &&
6871              kvm_x86_ops->get_cpl(vcpu) == 0))
6872                 kvm_make_request(KVM_REQ_APF_HALT, vcpu);
6873         else if (!apf_put_user(vcpu, KVM_PV_REASON_PAGE_NOT_PRESENT)) {
6874                 fault.vector = PF_VECTOR;
6875                 fault.error_code_valid = true;
6876                 fault.error_code = 0;
6877                 fault.nested_page_fault = false;
6878                 fault.address = work->arch.token;
6879                 kvm_inject_page_fault(vcpu, &fault);
6880         }
6881 }
6882
6883 void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
6884                                  struct kvm_async_pf *work)
6885 {
6886         struct x86_exception fault;
6887
6888         trace_kvm_async_pf_ready(work->arch.token, work->gva);
6889         if (is_error_page(work->page))
6890                 work->arch.token = ~0; /* broadcast wakeup */
6891         else
6892                 kvm_del_async_pf_gfn(vcpu, work->arch.gfn);
6893
6894         if ((vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED) &&
6895             !apf_put_user(vcpu, KVM_PV_REASON_PAGE_READY)) {
6896                 fault.vector = PF_VECTOR;
6897                 fault.error_code_valid = true;
6898                 fault.error_code = 0;
6899                 fault.nested_page_fault = false;
6900                 fault.address = work->arch.token;
6901                 kvm_inject_page_fault(vcpu, &fault);
6902         }
6903         vcpu->arch.apf.halted = false;
6904 }
6905
6906 bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
6907 {
6908         if (!(vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED))
6909                 return true;
6910         else
6911                 return !kvm_event_needs_reinjection(vcpu) &&
6912                         kvm_x86_ops->interrupt_allowed(vcpu);
6913 }
6914
6915 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_exit);
6916 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_inj_virq);
6917 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_page_fault);
6918 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_msr);
6919 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_cr);
6920 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmrun);
6921 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmexit);
6922 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmexit_inject);
6923 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_intr_vmexit);
6924 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_invlpga);
6925 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_skinit);
6926 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_intercepts);