4ae334a1bd9957dd434bd464d54f3967e6880f74
[pandora-kernel.git] / arch / x86 / kvm / x86.c
1 /*
2  * Kernel-based Virtual Machine driver for Linux
3  *
4  * derived from drivers/kvm/kvm_main.c
5  *
6  * Copyright (C) 2006 Qumranet, Inc.
7  * Copyright (C) 2008 Qumranet, Inc.
8  * Copyright IBM Corporation, 2008
9  * Copyright 2010 Red Hat, Inc. and/or its affiliates.
10  *
11  * Authors:
12  *   Avi Kivity   <avi@qumranet.com>
13  *   Yaniv Kamay  <yaniv@qumranet.com>
14  *   Amit Shah    <amit.shah@qumranet.com>
15  *   Ben-Ami Yassour <benami@il.ibm.com>
16  *
17  * This work is licensed under the terms of the GNU GPL, version 2.  See
18  * the COPYING file in the top-level directory.
19  *
20  */
21
22 #include <linux/kvm_host.h>
23 #include "irq.h"
24 #include "mmu.h"
25 #include "i8254.h"
26 #include "tss.h"
27 #include "kvm_cache_regs.h"
28 #include "x86.h"
29
30 #include <linux/clocksource.h>
31 #include <linux/interrupt.h>
32 #include <linux/kvm.h>
33 #include <linux/fs.h>
34 #include <linux/vmalloc.h>
35 #include <linux/module.h>
36 #include <linux/mman.h>
37 #include <linux/highmem.h>
38 #include <linux/iommu.h>
39 #include <linux/intel-iommu.h>
40 #include <linux/cpufreq.h>
41 #include <linux/user-return-notifier.h>
42 #include <linux/srcu.h>
43 #include <linux/slab.h>
44 #include <linux/perf_event.h>
45 #include <linux/uaccess.h>
46 #include <linux/hash.h>
47 #include <linux/pci.h>
48 #include <trace/events/kvm.h>
49
50 #define CREATE_TRACE_POINTS
51 #include "trace.h"
52
53 #include <asm/debugreg.h>
54 #include <asm/msr.h>
55 #include <asm/desc.h>
56 #include <asm/mtrr.h>
57 #include <asm/mce.h>
58 #include <asm/i387.h>
59 #include <asm/xcr.h>
60 #include <asm/pvclock.h>
61 #include <asm/div64.h>
62
63 #define MAX_IO_MSRS 256
64 #define KVM_MAX_MCE_BANKS 32
65 #define KVM_MCE_CAP_SUPPORTED (MCG_CTL_P | MCG_SER_P)
66
67 #define emul_to_vcpu(ctxt) \
68         container_of(ctxt, struct kvm_vcpu, arch.emulate_ctxt)
69
70 /* EFER defaults:
71  * - enable syscall per default because its emulated by KVM
72  * - enable LME and LMA per default on 64 bit KVM
73  */
74 #ifdef CONFIG_X86_64
75 static
76 u64 __read_mostly efer_reserved_bits = ~((u64)(EFER_SCE | EFER_LME | EFER_LMA));
77 #else
78 static u64 __read_mostly efer_reserved_bits = ~((u64)EFER_SCE);
79 #endif
80
81 #define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
82 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
83
84 static void update_cr8_intercept(struct kvm_vcpu *vcpu);
85 static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
86                                     struct kvm_cpuid_entry2 __user *entries);
87 static void process_nmi(struct kvm_vcpu *vcpu);
88
89 struct kvm_x86_ops *kvm_x86_ops;
90 EXPORT_SYMBOL_GPL(kvm_x86_ops);
91
92 int ignore_msrs = 0;
93 module_param_named(ignore_msrs, ignore_msrs, bool, S_IRUGO | S_IWUSR);
94
95 unsigned int min_timer_period_us = 500;
96 module_param(min_timer_period_us, uint, S_IRUGO | S_IWUSR);
97
98 bool kvm_has_tsc_control;
99 EXPORT_SYMBOL_GPL(kvm_has_tsc_control);
100 u32  kvm_max_guest_tsc_khz;
101 EXPORT_SYMBOL_GPL(kvm_max_guest_tsc_khz);
102
103 #define KVM_NR_SHARED_MSRS 16
104
105 struct kvm_shared_msrs_global {
106         int nr;
107         u32 msrs[KVM_NR_SHARED_MSRS];
108 };
109
110 struct kvm_shared_msrs {
111         struct user_return_notifier urn;
112         bool registered;
113         struct kvm_shared_msr_values {
114                 u64 host;
115                 u64 curr;
116         } values[KVM_NR_SHARED_MSRS];
117 };
118
119 static struct kvm_shared_msrs_global __read_mostly shared_msrs_global;
120 static DEFINE_PER_CPU(struct kvm_shared_msrs, shared_msrs);
121
122 struct kvm_stats_debugfs_item debugfs_entries[] = {
123         { "pf_fixed", VCPU_STAT(pf_fixed) },
124         { "pf_guest", VCPU_STAT(pf_guest) },
125         { "tlb_flush", VCPU_STAT(tlb_flush) },
126         { "invlpg", VCPU_STAT(invlpg) },
127         { "exits", VCPU_STAT(exits) },
128         { "io_exits", VCPU_STAT(io_exits) },
129         { "mmio_exits", VCPU_STAT(mmio_exits) },
130         { "signal_exits", VCPU_STAT(signal_exits) },
131         { "irq_window", VCPU_STAT(irq_window_exits) },
132         { "nmi_window", VCPU_STAT(nmi_window_exits) },
133         { "halt_exits", VCPU_STAT(halt_exits) },
134         { "halt_wakeup", VCPU_STAT(halt_wakeup) },
135         { "hypercalls", VCPU_STAT(hypercalls) },
136         { "request_irq", VCPU_STAT(request_irq_exits) },
137         { "irq_exits", VCPU_STAT(irq_exits) },
138         { "host_state_reload", VCPU_STAT(host_state_reload) },
139         { "efer_reload", VCPU_STAT(efer_reload) },
140         { "fpu_reload", VCPU_STAT(fpu_reload) },
141         { "insn_emulation", VCPU_STAT(insn_emulation) },
142         { "insn_emulation_fail", VCPU_STAT(insn_emulation_fail) },
143         { "irq_injections", VCPU_STAT(irq_injections) },
144         { "nmi_injections", VCPU_STAT(nmi_injections) },
145         { "mmu_shadow_zapped", VM_STAT(mmu_shadow_zapped) },
146         { "mmu_pte_write", VM_STAT(mmu_pte_write) },
147         { "mmu_pte_updated", VM_STAT(mmu_pte_updated) },
148         { "mmu_pde_zapped", VM_STAT(mmu_pde_zapped) },
149         { "mmu_flooded", VM_STAT(mmu_flooded) },
150         { "mmu_recycled", VM_STAT(mmu_recycled) },
151         { "mmu_cache_miss", VM_STAT(mmu_cache_miss) },
152         { "mmu_unsync", VM_STAT(mmu_unsync) },
153         { "remote_tlb_flush", VM_STAT(remote_tlb_flush) },
154         { "largepages", VM_STAT(lpages) },
155         { NULL }
156 };
157
158 u64 __read_mostly host_xcr0;
159
160 int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt);
161
162 static inline void kvm_async_pf_hash_reset(struct kvm_vcpu *vcpu)
163 {
164         int i;
165         for (i = 0; i < roundup_pow_of_two(ASYNC_PF_PER_VCPU); i++)
166                 vcpu->arch.apf.gfns[i] = ~0;
167 }
168
169 static void kvm_on_user_return(struct user_return_notifier *urn)
170 {
171         unsigned slot;
172         struct kvm_shared_msrs *locals
173                 = container_of(urn, struct kvm_shared_msrs, urn);
174         struct kvm_shared_msr_values *values;
175
176         for (slot = 0; slot < shared_msrs_global.nr; ++slot) {
177                 values = &locals->values[slot];
178                 if (values->host != values->curr) {
179                         wrmsrl(shared_msrs_global.msrs[slot], values->host);
180                         values->curr = values->host;
181                 }
182         }
183         locals->registered = false;
184         user_return_notifier_unregister(urn);
185 }
186
187 static void shared_msr_update(unsigned slot, u32 msr)
188 {
189         struct kvm_shared_msrs *smsr;
190         u64 value;
191
192         smsr = &__get_cpu_var(shared_msrs);
193         /* only read, and nobody should modify it at this time,
194          * so don't need lock */
195         if (slot >= shared_msrs_global.nr) {
196                 printk(KERN_ERR "kvm: invalid MSR slot!");
197                 return;
198         }
199         rdmsrl_safe(msr, &value);
200         smsr->values[slot].host = value;
201         smsr->values[slot].curr = value;
202 }
203
204 void kvm_define_shared_msr(unsigned slot, u32 msr)
205 {
206         if (slot >= shared_msrs_global.nr)
207                 shared_msrs_global.nr = slot + 1;
208         shared_msrs_global.msrs[slot] = msr;
209         /* we need ensured the shared_msr_global have been updated */
210         smp_wmb();
211 }
212 EXPORT_SYMBOL_GPL(kvm_define_shared_msr);
213
214 static void kvm_shared_msr_cpu_online(void)
215 {
216         unsigned i;
217
218         for (i = 0; i < shared_msrs_global.nr; ++i)
219                 shared_msr_update(i, shared_msrs_global.msrs[i]);
220 }
221
222 void kvm_set_shared_msr(unsigned slot, u64 value, u64 mask)
223 {
224         struct kvm_shared_msrs *smsr = &__get_cpu_var(shared_msrs);
225
226         if (((value ^ smsr->values[slot].curr) & mask) == 0)
227                 return;
228         smsr->values[slot].curr = value;
229         wrmsrl(shared_msrs_global.msrs[slot], value);
230         if (!smsr->registered) {
231                 smsr->urn.on_user_return = kvm_on_user_return;
232                 user_return_notifier_register(&smsr->urn);
233                 smsr->registered = true;
234         }
235 }
236 EXPORT_SYMBOL_GPL(kvm_set_shared_msr);
237
238 static void drop_user_return_notifiers(void *ignore)
239 {
240         struct kvm_shared_msrs *smsr = &__get_cpu_var(shared_msrs);
241
242         if (smsr->registered)
243                 kvm_on_user_return(&smsr->urn);
244 }
245
246 u64 kvm_get_apic_base(struct kvm_vcpu *vcpu)
247 {
248         if (irqchip_in_kernel(vcpu->kvm))
249                 return vcpu->arch.apic_base;
250         else
251                 return vcpu->arch.apic_base;
252 }
253 EXPORT_SYMBOL_GPL(kvm_get_apic_base);
254
255 void kvm_set_apic_base(struct kvm_vcpu *vcpu, u64 data)
256 {
257         /* TODO: reserve bits check */
258         if (irqchip_in_kernel(vcpu->kvm))
259                 kvm_lapic_set_base(vcpu, data);
260         else
261                 vcpu->arch.apic_base = data;
262 }
263 EXPORT_SYMBOL_GPL(kvm_set_apic_base);
264
265 #define EXCPT_BENIGN            0
266 #define EXCPT_CONTRIBUTORY      1
267 #define EXCPT_PF                2
268
269 static int exception_class(int vector)
270 {
271         switch (vector) {
272         case PF_VECTOR:
273                 return EXCPT_PF;
274         case DE_VECTOR:
275         case TS_VECTOR:
276         case NP_VECTOR:
277         case SS_VECTOR:
278         case GP_VECTOR:
279                 return EXCPT_CONTRIBUTORY;
280         default:
281                 break;
282         }
283         return EXCPT_BENIGN;
284 }
285
286 static void kvm_multiple_exception(struct kvm_vcpu *vcpu,
287                 unsigned nr, bool has_error, u32 error_code,
288                 bool reinject)
289 {
290         u32 prev_nr;
291         int class1, class2;
292
293         kvm_make_request(KVM_REQ_EVENT, vcpu);
294
295         if (!vcpu->arch.exception.pending) {
296         queue:
297                 vcpu->arch.exception.pending = true;
298                 vcpu->arch.exception.has_error_code = has_error;
299                 vcpu->arch.exception.nr = nr;
300                 vcpu->arch.exception.error_code = error_code;
301                 vcpu->arch.exception.reinject = reinject;
302                 return;
303         }
304
305         /* to check exception */
306         prev_nr = vcpu->arch.exception.nr;
307         if (prev_nr == DF_VECTOR) {
308                 /* triple fault -> shutdown */
309                 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
310                 return;
311         }
312         class1 = exception_class(prev_nr);
313         class2 = exception_class(nr);
314         if ((class1 == EXCPT_CONTRIBUTORY && class2 == EXCPT_CONTRIBUTORY)
315                 || (class1 == EXCPT_PF && class2 != EXCPT_BENIGN)) {
316                 /* generate double fault per SDM Table 5-5 */
317                 vcpu->arch.exception.pending = true;
318                 vcpu->arch.exception.has_error_code = true;
319                 vcpu->arch.exception.nr = DF_VECTOR;
320                 vcpu->arch.exception.error_code = 0;
321         } else
322                 /* replace previous exception with a new one in a hope
323                    that instruction re-execution will regenerate lost
324                    exception */
325                 goto queue;
326 }
327
328 void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr)
329 {
330         kvm_multiple_exception(vcpu, nr, false, 0, false);
331 }
332 EXPORT_SYMBOL_GPL(kvm_queue_exception);
333
334 void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr)
335 {
336         kvm_multiple_exception(vcpu, nr, false, 0, true);
337 }
338 EXPORT_SYMBOL_GPL(kvm_requeue_exception);
339
340 void kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err)
341 {
342         if (err)
343                 kvm_inject_gp(vcpu, 0);
344         else
345                 kvm_x86_ops->skip_emulated_instruction(vcpu);
346 }
347 EXPORT_SYMBOL_GPL(kvm_complete_insn_gp);
348
349 void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault)
350 {
351         ++vcpu->stat.pf_guest;
352         vcpu->arch.cr2 = fault->address;
353         kvm_queue_exception_e(vcpu, PF_VECTOR, fault->error_code);
354 }
355 EXPORT_SYMBOL_GPL(kvm_inject_page_fault);
356
357 void kvm_propagate_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault)
358 {
359         if (mmu_is_nested(vcpu) && !fault->nested_page_fault)
360                 vcpu->arch.nested_mmu.inject_page_fault(vcpu, fault);
361         else
362                 vcpu->arch.mmu.inject_page_fault(vcpu, fault);
363 }
364
365 void kvm_inject_nmi(struct kvm_vcpu *vcpu)
366 {
367         atomic_inc(&vcpu->arch.nmi_queued);
368         kvm_make_request(KVM_REQ_NMI, vcpu);
369 }
370 EXPORT_SYMBOL_GPL(kvm_inject_nmi);
371
372 void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
373 {
374         kvm_multiple_exception(vcpu, nr, true, error_code, false);
375 }
376 EXPORT_SYMBOL_GPL(kvm_queue_exception_e);
377
378 void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
379 {
380         kvm_multiple_exception(vcpu, nr, true, error_code, true);
381 }
382 EXPORT_SYMBOL_GPL(kvm_requeue_exception_e);
383
384 /*
385  * Checks if cpl <= required_cpl; if true, return true.  Otherwise queue
386  * a #GP and return false.
387  */
388 bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl)
389 {
390         if (kvm_x86_ops->get_cpl(vcpu) <= required_cpl)
391                 return true;
392         kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
393         return false;
394 }
395 EXPORT_SYMBOL_GPL(kvm_require_cpl);
396
397 /*
398  * This function will be used to read from the physical memory of the currently
399  * running guest. The difference to kvm_read_guest_page is that this function
400  * can read from guest physical or from the guest's guest physical memory.
401  */
402 int kvm_read_guest_page_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
403                             gfn_t ngfn, void *data, int offset, int len,
404                             u32 access)
405 {
406         gfn_t real_gfn;
407         gpa_t ngpa;
408
409         ngpa     = gfn_to_gpa(ngfn);
410         real_gfn = mmu->translate_gpa(vcpu, ngpa, access);
411         if (real_gfn == UNMAPPED_GVA)
412                 return -EFAULT;
413
414         real_gfn = gpa_to_gfn(real_gfn);
415
416         return kvm_read_guest_page(vcpu->kvm, real_gfn, data, offset, len);
417 }
418 EXPORT_SYMBOL_GPL(kvm_read_guest_page_mmu);
419
420 int kvm_read_nested_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn,
421                                void *data, int offset, int len, u32 access)
422 {
423         return kvm_read_guest_page_mmu(vcpu, vcpu->arch.walk_mmu, gfn,
424                                        data, offset, len, access);
425 }
426
427 /*
428  * Load the pae pdptrs.  Return true is they are all valid.
429  */
430 int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3)
431 {
432         gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT;
433         unsigned offset = ((cr3 & (PAGE_SIZE-1)) >> 5) << 2;
434         int i;
435         int ret;
436         u64 pdpte[ARRAY_SIZE(mmu->pdptrs)];
437
438         ret = kvm_read_guest_page_mmu(vcpu, mmu, pdpt_gfn, pdpte,
439                                       offset * sizeof(u64), sizeof(pdpte),
440                                       PFERR_USER_MASK|PFERR_WRITE_MASK);
441         if (ret < 0) {
442                 ret = 0;
443                 goto out;
444         }
445         for (i = 0; i < ARRAY_SIZE(pdpte); ++i) {
446                 if (is_present_gpte(pdpte[i]) &&
447                     (pdpte[i] & vcpu->arch.mmu.rsvd_bits_mask[0][2])) {
448                         ret = 0;
449                         goto out;
450                 }
451         }
452         ret = 1;
453
454         memcpy(mmu->pdptrs, pdpte, sizeof(mmu->pdptrs));
455         __set_bit(VCPU_EXREG_PDPTR,
456                   (unsigned long *)&vcpu->arch.regs_avail);
457         __set_bit(VCPU_EXREG_PDPTR,
458                   (unsigned long *)&vcpu->arch.regs_dirty);
459 out:
460
461         return ret;
462 }
463 EXPORT_SYMBOL_GPL(load_pdptrs);
464
465 static bool pdptrs_changed(struct kvm_vcpu *vcpu)
466 {
467         u64 pdpte[ARRAY_SIZE(vcpu->arch.walk_mmu->pdptrs)];
468         bool changed = true;
469         int offset;
470         gfn_t gfn;
471         int r;
472
473         if (is_long_mode(vcpu) || !is_pae(vcpu))
474                 return false;
475
476         if (!test_bit(VCPU_EXREG_PDPTR,
477                       (unsigned long *)&vcpu->arch.regs_avail))
478                 return true;
479
480         gfn = (kvm_read_cr3(vcpu) & ~31u) >> PAGE_SHIFT;
481         offset = (kvm_read_cr3(vcpu) & ~31u) & (PAGE_SIZE - 1);
482         r = kvm_read_nested_guest_page(vcpu, gfn, pdpte, offset, sizeof(pdpte),
483                                        PFERR_USER_MASK | PFERR_WRITE_MASK);
484         if (r < 0)
485                 goto out;
486         changed = memcmp(pdpte, vcpu->arch.walk_mmu->pdptrs, sizeof(pdpte)) != 0;
487 out:
488
489         return changed;
490 }
491
492 int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
493 {
494         unsigned long old_cr0 = kvm_read_cr0(vcpu);
495         unsigned long update_bits = X86_CR0_PG | X86_CR0_WP |
496                                     X86_CR0_CD | X86_CR0_NW;
497
498         cr0 |= X86_CR0_ET;
499
500 #ifdef CONFIG_X86_64
501         if (cr0 & 0xffffffff00000000UL)
502                 return 1;
503 #endif
504
505         cr0 &= ~CR0_RESERVED_BITS;
506
507         if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD))
508                 return 1;
509
510         if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE))
511                 return 1;
512
513         if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
514 #ifdef CONFIG_X86_64
515                 if ((vcpu->arch.efer & EFER_LME)) {
516                         int cs_db, cs_l;
517
518                         if (!is_pae(vcpu))
519                                 return 1;
520                         kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
521                         if (cs_l)
522                                 return 1;
523                 } else
524 #endif
525                 if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.walk_mmu,
526                                                  kvm_read_cr3(vcpu)))
527                         return 1;
528         }
529
530         kvm_x86_ops->set_cr0(vcpu, cr0);
531
532         if ((cr0 ^ old_cr0) & X86_CR0_PG) {
533                 kvm_clear_async_pf_completion_queue(vcpu);
534                 kvm_async_pf_hash_reset(vcpu);
535         }
536
537         if ((cr0 ^ old_cr0) & update_bits)
538                 kvm_mmu_reset_context(vcpu);
539         return 0;
540 }
541 EXPORT_SYMBOL_GPL(kvm_set_cr0);
542
543 void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
544 {
545         (void)kvm_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~0x0eul) | (msw & 0x0f));
546 }
547 EXPORT_SYMBOL_GPL(kvm_lmsw);
548
549 int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
550 {
551         u64 xcr0;
552
553         /* Only support XCR_XFEATURE_ENABLED_MASK(xcr0) now  */
554         if (index != XCR_XFEATURE_ENABLED_MASK)
555                 return 1;
556         xcr0 = xcr;
557         if (!(xcr0 & XSTATE_FP))
558                 return 1;
559         if ((xcr0 & XSTATE_YMM) && !(xcr0 & XSTATE_SSE))
560                 return 1;
561         if (xcr0 & ~host_xcr0)
562                 return 1;
563         vcpu->arch.xcr0 = xcr0;
564         vcpu->guest_xcr0_loaded = 0;
565         return 0;
566 }
567
568 int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
569 {
570         if (kvm_x86_ops->get_cpl(vcpu) != 0 ||
571             __kvm_set_xcr(vcpu, index, xcr)) {
572                 kvm_inject_gp(vcpu, 0);
573                 return 1;
574         }
575         return 0;
576 }
577 EXPORT_SYMBOL_GPL(kvm_set_xcr);
578
579 static bool guest_cpuid_has_xsave(struct kvm_vcpu *vcpu)
580 {
581         struct kvm_cpuid_entry2 *best;
582
583         if (!static_cpu_has(X86_FEATURE_XSAVE))
584                 return 0;
585
586         best = kvm_find_cpuid_entry(vcpu, 1, 0);
587         return best && (best->ecx & bit(X86_FEATURE_XSAVE));
588 }
589
590 static bool guest_cpuid_has_smep(struct kvm_vcpu *vcpu)
591 {
592         struct kvm_cpuid_entry2 *best;
593
594         best = kvm_find_cpuid_entry(vcpu, 7, 0);
595         return best && (best->ebx & bit(X86_FEATURE_SMEP));
596 }
597
598 static bool guest_cpuid_has_fsgsbase(struct kvm_vcpu *vcpu)
599 {
600         struct kvm_cpuid_entry2 *best;
601
602         best = kvm_find_cpuid_entry(vcpu, 7, 0);
603         return best && (best->ebx & bit(X86_FEATURE_FSGSBASE));
604 }
605
606 static void update_cpuid(struct kvm_vcpu *vcpu)
607 {
608         struct kvm_cpuid_entry2 *best;
609         struct kvm_lapic *apic = vcpu->arch.apic;
610
611         best = kvm_find_cpuid_entry(vcpu, 1, 0);
612         if (!best)
613                 return;
614
615         /* Update OSXSAVE bit */
616         if (cpu_has_xsave && best->function == 0x1) {
617                 best->ecx &= ~(bit(X86_FEATURE_OSXSAVE));
618                 if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE))
619                         best->ecx |= bit(X86_FEATURE_OSXSAVE);
620         }
621
622         if (apic) {
623                 if (best->ecx & bit(X86_FEATURE_TSC_DEADLINE_TIMER))
624                         apic->lapic_timer.timer_mode_mask = 3 << 17;
625                 else
626                         apic->lapic_timer.timer_mode_mask = 1 << 17;
627         }
628 }
629
630 int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
631 {
632         unsigned long old_cr4 = kvm_read_cr4(vcpu);
633         unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE |
634                                    X86_CR4_PAE | X86_CR4_SMEP;
635         if (cr4 & CR4_RESERVED_BITS)
636                 return 1;
637
638         if (!guest_cpuid_has_xsave(vcpu) && (cr4 & X86_CR4_OSXSAVE))
639                 return 1;
640
641         if (!guest_cpuid_has_smep(vcpu) && (cr4 & X86_CR4_SMEP))
642                 return 1;
643
644         if (!guest_cpuid_has_fsgsbase(vcpu) && (cr4 & X86_CR4_RDWRGSFS))
645                 return 1;
646
647         if (is_long_mode(vcpu)) {
648                 if (!(cr4 & X86_CR4_PAE))
649                         return 1;
650         } else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE)
651                    && ((cr4 ^ old_cr4) & pdptr_bits)
652                    && !load_pdptrs(vcpu, vcpu->arch.walk_mmu,
653                                    kvm_read_cr3(vcpu)))
654                 return 1;
655
656         if (kvm_x86_ops->set_cr4(vcpu, cr4))
657                 return 1;
658
659         if ((cr4 ^ old_cr4) & pdptr_bits)
660                 kvm_mmu_reset_context(vcpu);
661
662         if ((cr4 ^ old_cr4) & X86_CR4_OSXSAVE)
663                 update_cpuid(vcpu);
664
665         return 0;
666 }
667 EXPORT_SYMBOL_GPL(kvm_set_cr4);
668
669 int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
670 {
671         if (cr3 == kvm_read_cr3(vcpu) && !pdptrs_changed(vcpu)) {
672                 kvm_mmu_sync_roots(vcpu);
673                 kvm_mmu_flush_tlb(vcpu);
674                 return 0;
675         }
676
677         if (is_long_mode(vcpu)) {
678                 if (cr3 & CR3_L_MODE_RESERVED_BITS)
679                         return 1;
680         } else {
681                 if (is_pae(vcpu)) {
682                         if (cr3 & CR3_PAE_RESERVED_BITS)
683                                 return 1;
684                         if (is_paging(vcpu) &&
685                             !load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3))
686                                 return 1;
687                 }
688                 /*
689                  * We don't check reserved bits in nonpae mode, because
690                  * this isn't enforced, and VMware depends on this.
691                  */
692         }
693
694         /*
695          * Does the new cr3 value map to physical memory? (Note, we
696          * catch an invalid cr3 even in real-mode, because it would
697          * cause trouble later on when we turn on paging anyway.)
698          *
699          * A real CPU would silently accept an invalid cr3 and would
700          * attempt to use it - with largely undefined (and often hard
701          * to debug) behavior on the guest side.
702          */
703         if (unlikely(!gfn_to_memslot(vcpu->kvm, cr3 >> PAGE_SHIFT)))
704                 return 1;
705         vcpu->arch.cr3 = cr3;
706         __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
707         vcpu->arch.mmu.new_cr3(vcpu);
708         return 0;
709 }
710 EXPORT_SYMBOL_GPL(kvm_set_cr3);
711
712 int kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
713 {
714         if (cr8 & CR8_RESERVED_BITS)
715                 return 1;
716         if (irqchip_in_kernel(vcpu->kvm))
717                 kvm_lapic_set_tpr(vcpu, cr8);
718         else
719                 vcpu->arch.cr8 = cr8;
720         return 0;
721 }
722 EXPORT_SYMBOL_GPL(kvm_set_cr8);
723
724 unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu)
725 {
726         if (irqchip_in_kernel(vcpu->kvm))
727                 return kvm_lapic_get_cr8(vcpu);
728         else
729                 return vcpu->arch.cr8;
730 }
731 EXPORT_SYMBOL_GPL(kvm_get_cr8);
732
733 static int __kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val)
734 {
735         switch (dr) {
736         case 0 ... 3:
737                 vcpu->arch.db[dr] = val;
738                 if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP))
739                         vcpu->arch.eff_db[dr] = val;
740                 break;
741         case 4:
742                 if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
743                         return 1; /* #UD */
744                 /* fall through */
745         case 6:
746                 if (val & 0xffffffff00000000ULL)
747                         return -1; /* #GP */
748                 vcpu->arch.dr6 = (val & DR6_VOLATILE) | DR6_FIXED_1;
749                 break;
750         case 5:
751                 if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
752                         return 1; /* #UD */
753                 /* fall through */
754         default: /* 7 */
755                 if (val & 0xffffffff00000000ULL)
756                         return -1; /* #GP */
757                 vcpu->arch.dr7 = (val & DR7_VOLATILE) | DR7_FIXED_1;
758                 if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) {
759                         kvm_x86_ops->set_dr7(vcpu, vcpu->arch.dr7);
760                         vcpu->arch.switch_db_regs = (val & DR7_BP_EN_MASK);
761                 }
762                 break;
763         }
764
765         return 0;
766 }
767
768 int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val)
769 {
770         int res;
771
772         res = __kvm_set_dr(vcpu, dr, val);
773         if (res > 0)
774                 kvm_queue_exception(vcpu, UD_VECTOR);
775         else if (res < 0)
776                 kvm_inject_gp(vcpu, 0);
777
778         return res;
779 }
780 EXPORT_SYMBOL_GPL(kvm_set_dr);
781
782 static int _kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val)
783 {
784         switch (dr) {
785         case 0 ... 3:
786                 *val = vcpu->arch.db[dr];
787                 break;
788         case 4:
789                 if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
790                         return 1;
791                 /* fall through */
792         case 6:
793                 *val = vcpu->arch.dr6;
794                 break;
795         case 5:
796                 if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
797                         return 1;
798                 /* fall through */
799         default: /* 7 */
800                 *val = vcpu->arch.dr7;
801                 break;
802         }
803
804         return 0;
805 }
806
807 int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val)
808 {
809         if (_kvm_get_dr(vcpu, dr, val)) {
810                 kvm_queue_exception(vcpu, UD_VECTOR);
811                 return 1;
812         }
813         return 0;
814 }
815 EXPORT_SYMBOL_GPL(kvm_get_dr);
816
817 /*
818  * List of msr numbers which we expose to userspace through KVM_GET_MSRS
819  * and KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST.
820  *
821  * This list is modified at module load time to reflect the
822  * capabilities of the host cpu. This capabilities test skips MSRs that are
823  * kvm-specific. Those are put in the beginning of the list.
824  */
825
826 #define KVM_SAVE_MSRS_BEGIN     9
827 static u32 msrs_to_save[] = {
828         MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK,
829         MSR_KVM_SYSTEM_TIME_NEW, MSR_KVM_WALL_CLOCK_NEW,
830         HV_X64_MSR_GUEST_OS_ID, HV_X64_MSR_HYPERCALL,
831         HV_X64_MSR_APIC_ASSIST_PAGE, MSR_KVM_ASYNC_PF_EN, MSR_KVM_STEAL_TIME,
832         MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
833         MSR_STAR,
834 #ifdef CONFIG_X86_64
835         MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR,
836 #endif
837         MSR_IA32_TSC, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA,
838         MSR_TSC_AUX,
839 };
840
841 static unsigned num_msrs_to_save;
842
843 static u32 emulated_msrs[] = {
844         MSR_IA32_TSCDEADLINE,
845         MSR_IA32_MISC_ENABLE,
846         MSR_IA32_MCG_STATUS,
847         MSR_IA32_MCG_CTL,
848 };
849
850 static int set_efer(struct kvm_vcpu *vcpu, u64 efer)
851 {
852         u64 old_efer = vcpu->arch.efer;
853
854         if (efer & efer_reserved_bits)
855                 return 1;
856
857         if (is_paging(vcpu)
858             && (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME))
859                 return 1;
860
861         if (efer & EFER_FFXSR) {
862                 struct kvm_cpuid_entry2 *feat;
863
864                 feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
865                 if (!feat || !(feat->edx & bit(X86_FEATURE_FXSR_OPT)))
866                         return 1;
867         }
868
869         if (efer & EFER_SVME) {
870                 struct kvm_cpuid_entry2 *feat;
871
872                 feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
873                 if (!feat || !(feat->ecx & bit(X86_FEATURE_SVM)))
874                         return 1;
875         }
876
877         efer &= ~EFER_LMA;
878         efer |= vcpu->arch.efer & EFER_LMA;
879
880         kvm_x86_ops->set_efer(vcpu, efer);
881
882         vcpu->arch.mmu.base_role.nxe = (efer & EFER_NX) && !tdp_enabled;
883
884         /* Update reserved bits */
885         if ((efer ^ old_efer) & EFER_NX)
886                 kvm_mmu_reset_context(vcpu);
887
888         return 0;
889 }
890
891 void kvm_enable_efer_bits(u64 mask)
892 {
893        efer_reserved_bits &= ~mask;
894 }
895 EXPORT_SYMBOL_GPL(kvm_enable_efer_bits);
896
897 /*
898  * Writes msr value into into the appropriate "register".
899  * Returns 0 on success, non-0 otherwise.
900  * Assumes vcpu_load() was already called.
901  */
902 int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
903 {
904         switch (msr_index) {
905         case MSR_FS_BASE:
906         case MSR_GS_BASE:
907         case MSR_KERNEL_GS_BASE:
908         case MSR_CSTAR:
909         case MSR_LSTAR:
910                 if (is_noncanonical_address(data))
911                         return 1;
912                 break;
913         case MSR_IA32_SYSENTER_EIP:
914         case MSR_IA32_SYSENTER_ESP:
915                 /*
916                  * IA32_SYSENTER_ESP and IA32_SYSENTER_EIP cause #GP if
917                  * non-canonical address is written on Intel but not on
918                  * AMD (which ignores the top 32-bits, because it does
919                  * not implement 64-bit SYSENTER).
920                  *
921                  * 64-bit code should hence be able to write a non-canonical
922                  * value on AMD.  Making the address canonical ensures that
923                  * vmentry does not fail on Intel after writing a non-canonical
924                  * value, and that something deterministic happens if the guest
925                  * invokes 64-bit SYSENTER.
926                  */
927                 data = get_canonical(data);
928         }
929         return kvm_x86_ops->set_msr(vcpu, msr_index, data);
930 }
931 EXPORT_SYMBOL_GPL(kvm_set_msr);
932
933 /*
934  * Adapt set_msr() to msr_io()'s calling convention
935  */
936 static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
937 {
938         return kvm_set_msr(vcpu, index, *data);
939 }
940
941 static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock)
942 {
943         int version;
944         int r;
945         struct pvclock_wall_clock wc;
946         struct timespec boot;
947
948         if (!wall_clock)
949                 return;
950
951         r = kvm_read_guest(kvm, wall_clock, &version, sizeof(version));
952         if (r)
953                 return;
954
955         if (version & 1)
956                 ++version;  /* first time write, random junk */
957
958         ++version;
959
960         kvm_write_guest(kvm, wall_clock, &version, sizeof(version));
961
962         /*
963          * The guest calculates current wall clock time by adding
964          * system time (updated by kvm_guest_time_update below) to the
965          * wall clock specified here.  guest system time equals host
966          * system time for us, thus we must fill in host boot time here.
967          */
968         getboottime(&boot);
969
970         wc.sec = boot.tv_sec;
971         wc.nsec = boot.tv_nsec;
972         wc.version = version;
973
974         kvm_write_guest(kvm, wall_clock, &wc, sizeof(wc));
975
976         version++;
977         kvm_write_guest(kvm, wall_clock, &version, sizeof(version));
978 }
979
980 static uint32_t div_frac(uint32_t dividend, uint32_t divisor)
981 {
982         uint32_t quotient, remainder;
983
984         /* Don't try to replace with do_div(), this one calculates
985          * "(dividend << 32) / divisor" */
986         __asm__ ( "divl %4"
987                   : "=a" (quotient), "=d" (remainder)
988                   : "0" (0), "1" (dividend), "r" (divisor) );
989         return quotient;
990 }
991
992 static void kvm_get_time_scale(uint32_t scaled_khz, uint32_t base_khz,
993                                s8 *pshift, u32 *pmultiplier)
994 {
995         uint64_t scaled64;
996         int32_t  shift = 0;
997         uint64_t tps64;
998         uint32_t tps32;
999
1000         tps64 = base_khz * 1000LL;
1001         scaled64 = scaled_khz * 1000LL;
1002         while (tps64 > scaled64*2 || tps64 & 0xffffffff00000000ULL) {
1003                 tps64 >>= 1;
1004                 shift--;
1005         }
1006
1007         tps32 = (uint32_t)tps64;
1008         while (tps32 <= scaled64 || scaled64 & 0xffffffff00000000ULL) {
1009                 if (scaled64 & 0xffffffff00000000ULL || tps32 & 0x80000000)
1010                         scaled64 >>= 1;
1011                 else
1012                         tps32 <<= 1;
1013                 shift++;
1014         }
1015
1016         *pshift = shift;
1017         *pmultiplier = div_frac(scaled64, tps32);
1018
1019         pr_debug("%s: base_khz %u => %u, shift %d, mul %u\n",
1020                  __func__, base_khz, scaled_khz, shift, *pmultiplier);
1021 }
1022
1023 static inline u64 get_kernel_ns(void)
1024 {
1025         struct timespec ts;
1026
1027         WARN_ON(preemptible());
1028         ktime_get_ts(&ts);
1029         monotonic_to_bootbased(&ts);
1030         return timespec_to_ns(&ts);
1031 }
1032
1033 static DEFINE_PER_CPU(unsigned long, cpu_tsc_khz);
1034 unsigned long max_tsc_khz;
1035
1036 static inline int kvm_tsc_changes_freq(void)
1037 {
1038         int cpu = get_cpu();
1039         int ret = !boot_cpu_has(X86_FEATURE_CONSTANT_TSC) &&
1040                   cpufreq_quick_get(cpu) != 0;
1041         put_cpu();
1042         return ret;
1043 }
1044
1045 u64 vcpu_tsc_khz(struct kvm_vcpu *vcpu)
1046 {
1047         if (vcpu->arch.virtual_tsc_khz)
1048                 return vcpu->arch.virtual_tsc_khz;
1049         else
1050                 return __this_cpu_read(cpu_tsc_khz);
1051 }
1052
1053 static inline u64 nsec_to_cycles(struct kvm_vcpu *vcpu, u64 nsec)
1054 {
1055         u64 ret;
1056
1057         WARN_ON(preemptible());
1058         if (kvm_tsc_changes_freq())
1059                 printk_once(KERN_WARNING
1060                  "kvm: unreliable cycle conversion on adjustable rate TSC\n");
1061         ret = nsec * vcpu_tsc_khz(vcpu);
1062         do_div(ret, USEC_PER_SEC);
1063         return ret;
1064 }
1065
1066 static void kvm_init_tsc_catchup(struct kvm_vcpu *vcpu, u32 this_tsc_khz)
1067 {
1068         /* Compute a scale to convert nanoseconds in TSC cycles */
1069         kvm_get_time_scale(this_tsc_khz, NSEC_PER_SEC / 1000,
1070                            &vcpu->arch.tsc_catchup_shift,
1071                            &vcpu->arch.tsc_catchup_mult);
1072 }
1073
1074 static u64 compute_guest_tsc(struct kvm_vcpu *vcpu, s64 kernel_ns)
1075 {
1076         u64 tsc = pvclock_scale_delta(kernel_ns-vcpu->arch.last_tsc_nsec,
1077                                       vcpu->arch.tsc_catchup_mult,
1078                                       vcpu->arch.tsc_catchup_shift);
1079         tsc += vcpu->arch.last_tsc_write;
1080         return tsc;
1081 }
1082
1083 void kvm_write_tsc(struct kvm_vcpu *vcpu, u64 data)
1084 {
1085         struct kvm *kvm = vcpu->kvm;
1086         u64 offset, ns, elapsed;
1087         unsigned long flags;
1088         s64 sdiff;
1089
1090         raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags);
1091         offset = kvm_x86_ops->compute_tsc_offset(vcpu, data);
1092         ns = get_kernel_ns();
1093         elapsed = ns - kvm->arch.last_tsc_nsec;
1094         sdiff = data - kvm->arch.last_tsc_write;
1095         if (sdiff < 0)
1096                 sdiff = -sdiff;
1097
1098         /*
1099          * Special case: close write to TSC within 5 seconds of
1100          * another CPU is interpreted as an attempt to synchronize
1101          * The 5 seconds is to accommodate host load / swapping as
1102          * well as any reset of TSC during the boot process.
1103          *
1104          * In that case, for a reliable TSC, we can match TSC offsets,
1105          * or make a best guest using elapsed value.
1106          */
1107         if (sdiff < nsec_to_cycles(vcpu, 5ULL * NSEC_PER_SEC) &&
1108             elapsed < 5ULL * NSEC_PER_SEC) {
1109                 if (!check_tsc_unstable()) {
1110                         offset = kvm->arch.last_tsc_offset;
1111                         pr_debug("kvm: matched tsc offset for %llu\n", data);
1112                 } else {
1113                         u64 delta = nsec_to_cycles(vcpu, elapsed);
1114                         offset += delta;
1115                         pr_debug("kvm: adjusted tsc offset by %llu\n", delta);
1116                 }
1117                 ns = kvm->arch.last_tsc_nsec;
1118         }
1119         kvm->arch.last_tsc_nsec = ns;
1120         kvm->arch.last_tsc_write = data;
1121         kvm->arch.last_tsc_offset = offset;
1122         kvm_x86_ops->write_tsc_offset(vcpu, offset);
1123         raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags);
1124
1125         /* Reset of TSC must disable overshoot protection below */
1126         vcpu->arch.hv_clock.tsc_timestamp = 0;
1127         vcpu->arch.last_tsc_write = data;
1128         vcpu->arch.last_tsc_nsec = ns;
1129 }
1130 EXPORT_SYMBOL_GPL(kvm_write_tsc);
1131
1132 static int kvm_guest_time_update(struct kvm_vcpu *v)
1133 {
1134         unsigned long flags;
1135         struct kvm_vcpu_arch *vcpu = &v->arch;
1136         unsigned long this_tsc_khz;
1137         s64 kernel_ns, max_kernel_ns;
1138         u64 tsc_timestamp;
1139
1140         /* Keep irq disabled to prevent changes to the clock */
1141         local_irq_save(flags);
1142         tsc_timestamp = kvm_x86_ops->read_l1_tsc(v);
1143         kernel_ns = get_kernel_ns();
1144         this_tsc_khz = vcpu_tsc_khz(v);
1145         if (unlikely(this_tsc_khz == 0)) {
1146                 local_irq_restore(flags);
1147                 kvm_make_request(KVM_REQ_CLOCK_UPDATE, v);
1148                 return 1;
1149         }
1150
1151         /*
1152          * We may have to catch up the TSC to match elapsed wall clock
1153          * time for two reasons, even if kvmclock is used.
1154          *   1) CPU could have been running below the maximum TSC rate
1155          *   2) Broken TSC compensation resets the base at each VCPU
1156          *      entry to avoid unknown leaps of TSC even when running
1157          *      again on the same CPU.  This may cause apparent elapsed
1158          *      time to disappear, and the guest to stand still or run
1159          *      very slowly.
1160          */
1161         if (vcpu->tsc_catchup) {
1162                 u64 tsc = compute_guest_tsc(v, kernel_ns);
1163                 if (tsc > tsc_timestamp) {
1164                         kvm_x86_ops->adjust_tsc_offset(v, tsc - tsc_timestamp);
1165                         tsc_timestamp = tsc;
1166                 }
1167         }
1168
1169         local_irq_restore(flags);
1170
1171         if (!vcpu->pv_time_enabled)
1172                 return 0;
1173
1174         /*
1175          * Time as measured by the TSC may go backwards when resetting the base
1176          * tsc_timestamp.  The reason for this is that the TSC resolution is
1177          * higher than the resolution of the other clock scales.  Thus, many
1178          * possible measurments of the TSC correspond to one measurement of any
1179          * other clock, and so a spread of values is possible.  This is not a
1180          * problem for the computation of the nanosecond clock; with TSC rates
1181          * around 1GHZ, there can only be a few cycles which correspond to one
1182          * nanosecond value, and any path through this code will inevitably
1183          * take longer than that.  However, with the kernel_ns value itself,
1184          * the precision may be much lower, down to HZ granularity.  If the
1185          * first sampling of TSC against kernel_ns ends in the low part of the
1186          * range, and the second in the high end of the range, we can get:
1187          *
1188          * (TSC - offset_low) * S + kns_old > (TSC - offset_high) * S + kns_new
1189          *
1190          * As the sampling errors potentially range in the thousands of cycles,
1191          * it is possible such a time value has already been observed by the
1192          * guest.  To protect against this, we must compute the system time as
1193          * observed by the guest and ensure the new system time is greater.
1194          */
1195         max_kernel_ns = 0;
1196         if (vcpu->hv_clock.tsc_timestamp && vcpu->last_guest_tsc) {
1197                 max_kernel_ns = vcpu->last_guest_tsc -
1198                                 vcpu->hv_clock.tsc_timestamp;
1199                 max_kernel_ns = pvclock_scale_delta(max_kernel_ns,
1200                                     vcpu->hv_clock.tsc_to_system_mul,
1201                                     vcpu->hv_clock.tsc_shift);
1202                 max_kernel_ns += vcpu->last_kernel_ns;
1203         }
1204
1205         if (unlikely(vcpu->hw_tsc_khz != this_tsc_khz)) {
1206                 kvm_get_time_scale(NSEC_PER_SEC / 1000, this_tsc_khz,
1207                                    &vcpu->hv_clock.tsc_shift,
1208                                    &vcpu->hv_clock.tsc_to_system_mul);
1209                 vcpu->hw_tsc_khz = this_tsc_khz;
1210         }
1211
1212         if (max_kernel_ns > kernel_ns)
1213                 kernel_ns = max_kernel_ns;
1214
1215         /* With all the info we got, fill in the values */
1216         vcpu->hv_clock.tsc_timestamp = tsc_timestamp;
1217         vcpu->hv_clock.system_time = kernel_ns + v->kvm->arch.kvmclock_offset;
1218         vcpu->last_kernel_ns = kernel_ns;
1219         vcpu->last_guest_tsc = tsc_timestamp;
1220         vcpu->hv_clock.flags = 0;
1221
1222         /*
1223          * The interface expects us to write an even number signaling that the
1224          * update is finished. Since the guest won't see the intermediate
1225          * state, we just increase by 2 at the end.
1226          */
1227         vcpu->hv_clock.version += 2;
1228
1229         kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
1230                                 &vcpu->hv_clock,
1231                                 sizeof(vcpu->hv_clock));
1232         return 0;
1233 }
1234
1235 static bool msr_mtrr_valid(unsigned msr)
1236 {
1237         switch (msr) {
1238         case 0x200 ... 0x200 + 2 * KVM_NR_VAR_MTRR - 1:
1239         case MSR_MTRRfix64K_00000:
1240         case MSR_MTRRfix16K_80000:
1241         case MSR_MTRRfix16K_A0000:
1242         case MSR_MTRRfix4K_C0000:
1243         case MSR_MTRRfix4K_C8000:
1244         case MSR_MTRRfix4K_D0000:
1245         case MSR_MTRRfix4K_D8000:
1246         case MSR_MTRRfix4K_E0000:
1247         case MSR_MTRRfix4K_E8000:
1248         case MSR_MTRRfix4K_F0000:
1249         case MSR_MTRRfix4K_F8000:
1250         case MSR_MTRRdefType:
1251         case MSR_IA32_CR_PAT:
1252                 return true;
1253         case 0x2f8:
1254                 return true;
1255         }
1256         return false;
1257 }
1258
1259 static bool valid_pat_type(unsigned t)
1260 {
1261         return t < 8 && (1 << t) & 0xf3; /* 0, 1, 4, 5, 6, 7 */
1262 }
1263
1264 static bool valid_mtrr_type(unsigned t)
1265 {
1266         return t < 8 && (1 << t) & 0x73; /* 0, 1, 4, 5, 6 */
1267 }
1268
1269 static bool mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data)
1270 {
1271         int i;
1272
1273         if (!msr_mtrr_valid(msr))
1274                 return false;
1275
1276         if (msr == MSR_IA32_CR_PAT) {
1277                 for (i = 0; i < 8; i++)
1278                         if (!valid_pat_type((data >> (i * 8)) & 0xff))
1279                                 return false;
1280                 return true;
1281         } else if (msr == MSR_MTRRdefType) {
1282                 if (data & ~0xcff)
1283                         return false;
1284                 return valid_mtrr_type(data & 0xff);
1285         } else if (msr >= MSR_MTRRfix64K_00000 && msr <= MSR_MTRRfix4K_F8000) {
1286                 for (i = 0; i < 8 ; i++)
1287                         if (!valid_mtrr_type((data >> (i * 8)) & 0xff))
1288                                 return false;
1289                 return true;
1290         }
1291
1292         /* variable MTRRs */
1293         return valid_mtrr_type(data & 0xff);
1294 }
1295
1296 static int set_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
1297 {
1298         u64 *p = (u64 *)&vcpu->arch.mtrr_state.fixed_ranges;
1299
1300         if (!mtrr_valid(vcpu, msr, data))
1301                 return 1;
1302
1303         if (msr == MSR_MTRRdefType) {
1304                 vcpu->arch.mtrr_state.def_type = data;
1305                 vcpu->arch.mtrr_state.enabled = (data & 0xc00) >> 10;
1306         } else if (msr == MSR_MTRRfix64K_00000)
1307                 p[0] = data;
1308         else if (msr == MSR_MTRRfix16K_80000 || msr == MSR_MTRRfix16K_A0000)
1309                 p[1 + msr - MSR_MTRRfix16K_80000] = data;
1310         else if (msr >= MSR_MTRRfix4K_C0000 && msr <= MSR_MTRRfix4K_F8000)
1311                 p[3 + msr - MSR_MTRRfix4K_C0000] = data;
1312         else if (msr == MSR_IA32_CR_PAT)
1313                 vcpu->arch.pat = data;
1314         else {  /* Variable MTRRs */
1315                 int idx, is_mtrr_mask;
1316                 u64 *pt;
1317
1318                 idx = (msr - 0x200) / 2;
1319                 is_mtrr_mask = msr - 0x200 - 2 * idx;
1320                 if (!is_mtrr_mask)
1321                         pt =
1322                           (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].base_lo;
1323                 else
1324                         pt =
1325                           (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].mask_lo;
1326                 *pt = data;
1327         }
1328
1329         kvm_mmu_reset_context(vcpu);
1330         return 0;
1331 }
1332
1333 static int set_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 data)
1334 {
1335         u64 mcg_cap = vcpu->arch.mcg_cap;
1336         unsigned bank_num = mcg_cap & 0xff;
1337
1338         switch (msr) {
1339         case MSR_IA32_MCG_STATUS:
1340                 vcpu->arch.mcg_status = data;
1341                 break;
1342         case MSR_IA32_MCG_CTL:
1343                 if (!(mcg_cap & MCG_CTL_P))
1344                         return 1;
1345                 if (data != 0 && data != ~(u64)0)
1346                         return -1;
1347                 vcpu->arch.mcg_ctl = data;
1348                 break;
1349         default:
1350                 if (msr >= MSR_IA32_MC0_CTL &&
1351                     msr < MSR_IA32_MC0_CTL + 4 * bank_num) {
1352                         u32 offset = msr - MSR_IA32_MC0_CTL;
1353                         /* only 0 or all 1s can be written to IA32_MCi_CTL
1354                          * some Linux kernels though clear bit 10 in bank 4 to
1355                          * workaround a BIOS/GART TBL issue on AMD K8s, ignore
1356                          * this to avoid an uncatched #GP in the guest
1357                          */
1358                         if ((offset & 0x3) == 0 &&
1359                             data != 0 && (data | (1 << 10)) != ~(u64)0)
1360                                 return -1;
1361                         vcpu->arch.mce_banks[offset] = data;
1362                         break;
1363                 }
1364                 return 1;
1365         }
1366         return 0;
1367 }
1368
1369 static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
1370 {
1371         struct kvm *kvm = vcpu->kvm;
1372         int lm = is_long_mode(vcpu);
1373         u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
1374                 : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
1375         u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
1376                 : kvm->arch.xen_hvm_config.blob_size_32;
1377         u32 page_num = data & ~PAGE_MASK;
1378         u64 page_addr = data & PAGE_MASK;
1379         u8 *page;
1380         int r;
1381
1382         r = -E2BIG;
1383         if (page_num >= blob_size)
1384                 goto out;
1385         r = -ENOMEM;
1386         page = kzalloc(PAGE_SIZE, GFP_KERNEL);
1387         if (!page)
1388                 goto out;
1389         r = -EFAULT;
1390         if (copy_from_user(page, blob_addr + (page_num * PAGE_SIZE), PAGE_SIZE))
1391                 goto out_free;
1392         if (kvm_write_guest(kvm, page_addr, page, PAGE_SIZE))
1393                 goto out_free;
1394         r = 0;
1395 out_free:
1396         kfree(page);
1397 out:
1398         return r;
1399 }
1400
1401 static bool kvm_hv_hypercall_enabled(struct kvm *kvm)
1402 {
1403         return kvm->arch.hv_hypercall & HV_X64_MSR_HYPERCALL_ENABLE;
1404 }
1405
1406 static bool kvm_hv_msr_partition_wide(u32 msr)
1407 {
1408         bool r = false;
1409         switch (msr) {
1410         case HV_X64_MSR_GUEST_OS_ID:
1411         case HV_X64_MSR_HYPERCALL:
1412                 r = true;
1413                 break;
1414         }
1415
1416         return r;
1417 }
1418
1419 static int set_msr_hyperv_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data)
1420 {
1421         struct kvm *kvm = vcpu->kvm;
1422
1423         switch (msr) {
1424         case HV_X64_MSR_GUEST_OS_ID:
1425                 kvm->arch.hv_guest_os_id = data;
1426                 /* setting guest os id to zero disables hypercall page */
1427                 if (!kvm->arch.hv_guest_os_id)
1428                         kvm->arch.hv_hypercall &= ~HV_X64_MSR_HYPERCALL_ENABLE;
1429                 break;
1430         case HV_X64_MSR_HYPERCALL: {
1431                 u64 gfn;
1432                 unsigned long addr;
1433                 u8 instructions[4];
1434
1435                 /* if guest os id is not set hypercall should remain disabled */
1436                 if (!kvm->arch.hv_guest_os_id)
1437                         break;
1438                 if (!(data & HV_X64_MSR_HYPERCALL_ENABLE)) {
1439                         kvm->arch.hv_hypercall = data;
1440                         break;
1441                 }
1442                 gfn = data >> HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT;
1443                 addr = gfn_to_hva(kvm, gfn);
1444                 if (kvm_is_error_hva(addr))
1445                         return 1;
1446                 kvm_x86_ops->patch_hypercall(vcpu, instructions);
1447                 ((unsigned char *)instructions)[3] = 0xc3; /* ret */
1448                 if (__copy_to_user((void __user *)addr, instructions, 4))
1449                         return 1;
1450                 kvm->arch.hv_hypercall = data;
1451                 break;
1452         }
1453         default:
1454                 pr_unimpl(vcpu, "HYPER-V unimplemented wrmsr: 0x%x "
1455                           "data 0x%llx\n", msr, data);
1456                 return 1;
1457         }
1458         return 0;
1459 }
1460
1461 static int set_msr_hyperv(struct kvm_vcpu *vcpu, u32 msr, u64 data)
1462 {
1463         switch (msr) {
1464         case HV_X64_MSR_APIC_ASSIST_PAGE: {
1465                 unsigned long addr;
1466
1467                 if (!(data & HV_X64_MSR_APIC_ASSIST_PAGE_ENABLE)) {
1468                         vcpu->arch.hv_vapic = data;
1469                         break;
1470                 }
1471                 addr = gfn_to_hva(vcpu->kvm, data >>
1472                                   HV_X64_MSR_APIC_ASSIST_PAGE_ADDRESS_SHIFT);
1473                 if (kvm_is_error_hva(addr))
1474                         return 1;
1475                 if (__clear_user((void __user *)addr, PAGE_SIZE))
1476                         return 1;
1477                 vcpu->arch.hv_vapic = data;
1478                 break;
1479         }
1480         case HV_X64_MSR_EOI:
1481                 return kvm_hv_vapic_msr_write(vcpu, APIC_EOI, data);
1482         case HV_X64_MSR_ICR:
1483                 return kvm_hv_vapic_msr_write(vcpu, APIC_ICR, data);
1484         case HV_X64_MSR_TPR:
1485                 return kvm_hv_vapic_msr_write(vcpu, APIC_TASKPRI, data);
1486         default:
1487                 pr_unimpl(vcpu, "HYPER-V unimplemented wrmsr: 0x%x "
1488                           "data 0x%llx\n", msr, data);
1489                 return 1;
1490         }
1491
1492         return 0;
1493 }
1494
1495 static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
1496 {
1497         gpa_t gpa = data & ~0x3f;
1498
1499         /* Bits 2:5 are resrved, Should be zero */
1500         if (data & 0x3c)
1501                 return 1;
1502
1503         vcpu->arch.apf.msr_val = data;
1504
1505         if (!(data & KVM_ASYNC_PF_ENABLED)) {
1506                 kvm_clear_async_pf_completion_queue(vcpu);
1507                 kvm_async_pf_hash_reset(vcpu);
1508                 return 0;
1509         }
1510
1511         if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa,
1512                                         sizeof(u32)))
1513                 return 1;
1514
1515         vcpu->arch.apf.send_user_only = !(data & KVM_ASYNC_PF_SEND_ALWAYS);
1516         kvm_async_pf_wakeup_all(vcpu);
1517         return 0;
1518 }
1519
1520 static void kvmclock_reset(struct kvm_vcpu *vcpu)
1521 {
1522         vcpu->arch.pv_time_enabled = false;
1523 }
1524
1525 static void accumulate_steal_time(struct kvm_vcpu *vcpu)
1526 {
1527         u64 delta;
1528
1529         if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
1530                 return;
1531
1532         delta = current->sched_info.run_delay - vcpu->arch.st.last_steal;
1533         vcpu->arch.st.last_steal = current->sched_info.run_delay;
1534         vcpu->arch.st.accum_steal = delta;
1535 }
1536
1537 static void record_steal_time(struct kvm_vcpu *vcpu)
1538 {
1539         if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
1540                 return;
1541
1542         if (unlikely(kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
1543                 &vcpu->arch.st.steal, sizeof(struct kvm_steal_time))))
1544                 return;
1545
1546         vcpu->arch.st.steal.steal += vcpu->arch.st.accum_steal;
1547         vcpu->arch.st.steal.version += 2;
1548         vcpu->arch.st.accum_steal = 0;
1549
1550         kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
1551                 &vcpu->arch.st.steal, sizeof(struct kvm_steal_time));
1552 }
1553
1554 int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
1555 {
1556         switch (msr) {
1557         case MSR_EFER:
1558                 return set_efer(vcpu, data);
1559         case MSR_K7_HWCR:
1560                 data &= ~(u64)0x40;     /* ignore flush filter disable */
1561                 data &= ~(u64)0x100;    /* ignore ignne emulation enable */
1562                 if (data != 0) {
1563                         pr_unimpl(vcpu, "unimplemented HWCR wrmsr: 0x%llx\n",
1564                                 data);
1565                         return 1;
1566                 }
1567                 break;
1568         case MSR_FAM10H_MMIO_CONF_BASE:
1569                 if (data != 0) {
1570                         pr_unimpl(vcpu, "unimplemented MMIO_CONF_BASE wrmsr: "
1571                                 "0x%llx\n", data);
1572                         return 1;
1573                 }
1574                 break;
1575         case MSR_AMD64_NB_CFG:
1576                 break;
1577         case MSR_IA32_DEBUGCTLMSR:
1578                 if (!data) {
1579                         /* We support the non-activated case already */
1580                         break;
1581                 } else if (data & ~(DEBUGCTLMSR_LBR | DEBUGCTLMSR_BTF)) {
1582                         /* Values other than LBR and BTF are vendor-specific,
1583                            thus reserved and should throw a #GP */
1584                         return 1;
1585                 }
1586                 pr_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTLMSR 0x%llx, nop\n",
1587                         __func__, data);
1588                 break;
1589         case MSR_IA32_UCODE_REV:
1590         case MSR_IA32_UCODE_WRITE:
1591         case MSR_VM_HSAVE_PA:
1592         case MSR_AMD64_PATCH_LOADER:
1593                 break;
1594         case 0x200 ... 0x2ff:
1595                 return set_msr_mtrr(vcpu, msr, data);
1596         case MSR_IA32_APICBASE:
1597                 kvm_set_apic_base(vcpu, data);
1598                 break;
1599         case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff:
1600                 return kvm_x2apic_msr_write(vcpu, msr, data);
1601         case MSR_IA32_TSCDEADLINE:
1602                 kvm_set_lapic_tscdeadline_msr(vcpu, data);
1603                 break;
1604         case MSR_IA32_MISC_ENABLE:
1605                 vcpu->arch.ia32_misc_enable_msr = data;
1606                 break;
1607         case MSR_KVM_WALL_CLOCK_NEW:
1608         case MSR_KVM_WALL_CLOCK:
1609                 vcpu->kvm->arch.wall_clock = data;
1610                 kvm_write_wall_clock(vcpu->kvm, data);
1611                 break;
1612         case MSR_KVM_SYSTEM_TIME_NEW:
1613         case MSR_KVM_SYSTEM_TIME: {
1614                 u64 gpa_offset;
1615                 kvmclock_reset(vcpu);
1616
1617                 vcpu->arch.time = data;
1618                 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
1619
1620                 /* we verify if the enable bit is set... */
1621                 if (!(data & 1))
1622                         break;
1623
1624                 gpa_offset = data & ~(PAGE_MASK | 1);
1625
1626                 if (kvm_gfn_to_hva_cache_init(vcpu->kvm,
1627                      &vcpu->arch.pv_time, data & ~1ULL,
1628                      sizeof(struct pvclock_vcpu_time_info)))
1629                         vcpu->arch.pv_time_enabled = false;
1630                 else
1631                         vcpu->arch.pv_time_enabled = true;
1632                 break;
1633         }
1634         case MSR_KVM_ASYNC_PF_EN:
1635                 if (kvm_pv_enable_async_pf(vcpu, data))
1636                         return 1;
1637                 break;
1638         case MSR_KVM_STEAL_TIME:
1639
1640                 if (unlikely(!sched_info_on()))
1641                         return 1;
1642
1643                 if (data & KVM_STEAL_RESERVED_MASK)
1644                         return 1;
1645
1646                 if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.st.stime,
1647                                                 data & KVM_STEAL_VALID_BITS,
1648                                                 sizeof(struct kvm_steal_time)))
1649                         return 1;
1650
1651                 vcpu->arch.st.msr_val = data;
1652
1653                 if (!(data & KVM_MSR_ENABLED))
1654                         break;
1655
1656                 vcpu->arch.st.last_steal = current->sched_info.run_delay;
1657
1658                 preempt_disable();
1659                 accumulate_steal_time(vcpu);
1660                 preempt_enable();
1661
1662                 kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
1663
1664                 break;
1665
1666         case MSR_IA32_MCG_CTL:
1667         case MSR_IA32_MCG_STATUS:
1668         case MSR_IA32_MC0_CTL ... MSR_IA32_MC0_CTL + 4 * KVM_MAX_MCE_BANKS - 1:
1669                 return set_msr_mce(vcpu, msr, data);
1670
1671         /* Performance counters are not protected by a CPUID bit,
1672          * so we should check all of them in the generic path for the sake of
1673          * cross vendor migration.
1674          * Writing a zero into the event select MSRs disables them,
1675          * which we perfectly emulate ;-). Any other value should be at least
1676          * reported, some guests depend on them.
1677          */
1678         case MSR_P6_EVNTSEL0:
1679         case MSR_P6_EVNTSEL1:
1680         case MSR_K7_EVNTSEL0:
1681         case MSR_K7_EVNTSEL1:
1682         case MSR_K7_EVNTSEL2:
1683         case MSR_K7_EVNTSEL3:
1684                 if (data != 0)
1685                         pr_unimpl(vcpu, "unimplemented perfctr wrmsr: "
1686                                 "0x%x data 0x%llx\n", msr, data);
1687                 break;
1688         /* at least RHEL 4 unconditionally writes to the perfctr registers,
1689          * so we ignore writes to make it happy.
1690          */
1691         case MSR_P6_PERFCTR0:
1692         case MSR_P6_PERFCTR1:
1693         case MSR_K7_PERFCTR0:
1694         case MSR_K7_PERFCTR1:
1695         case MSR_K7_PERFCTR2:
1696         case MSR_K7_PERFCTR3:
1697                 pr_unimpl(vcpu, "unimplemented perfctr wrmsr: "
1698                         "0x%x data 0x%llx\n", msr, data);
1699                 break;
1700         case MSR_K7_CLK_CTL:
1701                 /*
1702                  * Ignore all writes to this no longer documented MSR.
1703                  * Writes are only relevant for old K7 processors,
1704                  * all pre-dating SVM, but a recommended workaround from
1705                  * AMD for these chips. It is possible to speicify the
1706                  * affected processor models on the command line, hence
1707                  * the need to ignore the workaround.
1708                  */
1709                 break;
1710         case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15:
1711                 if (kvm_hv_msr_partition_wide(msr)) {
1712                         int r;
1713                         mutex_lock(&vcpu->kvm->lock);
1714                         r = set_msr_hyperv_pw(vcpu, msr, data);
1715                         mutex_unlock(&vcpu->kvm->lock);
1716                         return r;
1717                 } else
1718                         return set_msr_hyperv(vcpu, msr, data);
1719                 break;
1720         case MSR_IA32_BBL_CR_CTL3:
1721                 /* Drop writes to this legacy MSR -- see rdmsr
1722                  * counterpart for further detail.
1723                  */
1724                 pr_unimpl(vcpu, "ignored wrmsr: 0x%x data %llx\n", msr, data);
1725                 break;
1726         default:
1727                 if (msr && (msr == vcpu->kvm->arch.xen_hvm_config.msr))
1728                         return xen_hvm_config(vcpu, data);
1729                 if (!ignore_msrs) {
1730                         pr_unimpl(vcpu, "unhandled wrmsr: 0x%x data %llx\n",
1731                                 msr, data);
1732                         return 1;
1733                 } else {
1734                         pr_unimpl(vcpu, "ignored wrmsr: 0x%x data %llx\n",
1735                                 msr, data);
1736                         break;
1737                 }
1738         }
1739         return 0;
1740 }
1741 EXPORT_SYMBOL_GPL(kvm_set_msr_common);
1742
1743
1744 /*
1745  * Reads an msr value (of 'msr_index') into 'pdata'.
1746  * Returns 0 on success, non-0 otherwise.
1747  * Assumes vcpu_load() was already called.
1748  */
1749 int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
1750 {
1751         return kvm_x86_ops->get_msr(vcpu, msr_index, pdata);
1752 }
1753
1754 static int get_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
1755 {
1756         u64 *p = (u64 *)&vcpu->arch.mtrr_state.fixed_ranges;
1757
1758         if (!msr_mtrr_valid(msr))
1759                 return 1;
1760
1761         if (msr == MSR_MTRRdefType)
1762                 *pdata = vcpu->arch.mtrr_state.def_type +
1763                          (vcpu->arch.mtrr_state.enabled << 10);
1764         else if (msr == MSR_MTRRfix64K_00000)
1765                 *pdata = p[0];
1766         else if (msr == MSR_MTRRfix16K_80000 || msr == MSR_MTRRfix16K_A0000)
1767                 *pdata = p[1 + msr - MSR_MTRRfix16K_80000];
1768         else if (msr >= MSR_MTRRfix4K_C0000 && msr <= MSR_MTRRfix4K_F8000)
1769                 *pdata = p[3 + msr - MSR_MTRRfix4K_C0000];
1770         else if (msr == MSR_IA32_CR_PAT)
1771                 *pdata = vcpu->arch.pat;
1772         else {  /* Variable MTRRs */
1773                 int idx, is_mtrr_mask;
1774                 u64 *pt;
1775
1776                 idx = (msr - 0x200) / 2;
1777                 is_mtrr_mask = msr - 0x200 - 2 * idx;
1778                 if (!is_mtrr_mask)
1779                         pt =
1780                           (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].base_lo;
1781                 else
1782                         pt =
1783                           (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].mask_lo;
1784                 *pdata = *pt;
1785         }
1786
1787         return 0;
1788 }
1789
1790 static int get_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
1791 {
1792         u64 data;
1793         u64 mcg_cap = vcpu->arch.mcg_cap;
1794         unsigned bank_num = mcg_cap & 0xff;
1795
1796         switch (msr) {
1797         case MSR_IA32_P5_MC_ADDR:
1798         case MSR_IA32_P5_MC_TYPE:
1799                 data = 0;
1800                 break;
1801         case MSR_IA32_MCG_CAP:
1802                 data = vcpu->arch.mcg_cap;
1803                 break;
1804         case MSR_IA32_MCG_CTL:
1805                 if (!(mcg_cap & MCG_CTL_P))
1806                         return 1;
1807                 data = vcpu->arch.mcg_ctl;
1808                 break;
1809         case MSR_IA32_MCG_STATUS:
1810                 data = vcpu->arch.mcg_status;
1811                 break;
1812         default:
1813                 if (msr >= MSR_IA32_MC0_CTL &&
1814                     msr < MSR_IA32_MC0_CTL + 4 * bank_num) {
1815                         u32 offset = msr - MSR_IA32_MC0_CTL;
1816                         data = vcpu->arch.mce_banks[offset];
1817                         break;
1818                 }
1819                 return 1;
1820         }
1821         *pdata = data;
1822         return 0;
1823 }
1824
1825 static int get_msr_hyperv_pw(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
1826 {
1827         u64 data = 0;
1828         struct kvm *kvm = vcpu->kvm;
1829
1830         switch (msr) {
1831         case HV_X64_MSR_GUEST_OS_ID:
1832                 data = kvm->arch.hv_guest_os_id;
1833                 break;
1834         case HV_X64_MSR_HYPERCALL:
1835                 data = kvm->arch.hv_hypercall;
1836                 break;
1837         default:
1838                 pr_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
1839                 return 1;
1840         }
1841
1842         *pdata = data;
1843         return 0;
1844 }
1845
1846 static int get_msr_hyperv(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
1847 {
1848         u64 data = 0;
1849
1850         switch (msr) {
1851         case HV_X64_MSR_VP_INDEX: {
1852                 int r;
1853                 struct kvm_vcpu *v;
1854                 kvm_for_each_vcpu(r, v, vcpu->kvm)
1855                         if (v == vcpu)
1856                                 data = r;
1857                 break;
1858         }
1859         case HV_X64_MSR_EOI:
1860                 return kvm_hv_vapic_msr_read(vcpu, APIC_EOI, pdata);
1861         case HV_X64_MSR_ICR:
1862                 return kvm_hv_vapic_msr_read(vcpu, APIC_ICR, pdata);
1863         case HV_X64_MSR_TPR:
1864                 return kvm_hv_vapic_msr_read(vcpu, APIC_TASKPRI, pdata);
1865         case HV_X64_MSR_APIC_ASSIST_PAGE:
1866                 data = vcpu->arch.hv_vapic;
1867                 break;
1868         default:
1869                 pr_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
1870                 return 1;
1871         }
1872         *pdata = data;
1873         return 0;
1874 }
1875
1876 int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
1877 {
1878         u64 data;
1879
1880         switch (msr) {
1881         case MSR_IA32_PLATFORM_ID:
1882         case MSR_IA32_EBL_CR_POWERON:
1883         case MSR_IA32_DEBUGCTLMSR:
1884         case MSR_IA32_LASTBRANCHFROMIP:
1885         case MSR_IA32_LASTBRANCHTOIP:
1886         case MSR_IA32_LASTINTFROMIP:
1887         case MSR_IA32_LASTINTTOIP:
1888         case MSR_K8_SYSCFG:
1889         case MSR_K8_TSEG_ADDR:
1890         case MSR_K8_TSEG_MASK:
1891         case MSR_K7_HWCR:
1892         case MSR_VM_HSAVE_PA:
1893         case MSR_P6_PERFCTR0:
1894         case MSR_P6_PERFCTR1:
1895         case MSR_P6_EVNTSEL0:
1896         case MSR_P6_EVNTSEL1:
1897         case MSR_K7_EVNTSEL0:
1898         case MSR_K7_PERFCTR0:
1899         case MSR_K8_INT_PENDING_MSG:
1900         case MSR_AMD64_NB_CFG:
1901         case MSR_FAM10H_MMIO_CONF_BASE:
1902                 data = 0;
1903                 break;
1904         case MSR_IA32_UCODE_REV:
1905                 data = 0x100000000ULL;
1906                 break;
1907         case MSR_MTRRcap:
1908                 data = 0x500 | KVM_NR_VAR_MTRR;
1909                 break;
1910         case 0x200 ... 0x2ff:
1911                 return get_msr_mtrr(vcpu, msr, pdata);
1912         case 0xcd: /* fsb frequency */
1913                 data = 3;
1914                 break;
1915                 /*
1916                  * MSR_EBC_FREQUENCY_ID
1917                  * Conservative value valid for even the basic CPU models.
1918                  * Models 0,1: 000 in bits 23:21 indicating a bus speed of
1919                  * 100MHz, model 2 000 in bits 18:16 indicating 100MHz,
1920                  * and 266MHz for model 3, or 4. Set Core Clock
1921                  * Frequency to System Bus Frequency Ratio to 1 (bits
1922                  * 31:24) even though these are only valid for CPU
1923                  * models > 2, however guests may end up dividing or
1924                  * multiplying by zero otherwise.
1925                  */
1926         case MSR_EBC_FREQUENCY_ID:
1927                 data = 1 << 24;
1928                 break;
1929         case MSR_IA32_APICBASE:
1930                 data = kvm_get_apic_base(vcpu);
1931                 break;
1932         case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff:
1933                 return kvm_x2apic_msr_read(vcpu, msr, pdata);
1934                 break;
1935         case MSR_IA32_TSCDEADLINE:
1936                 data = kvm_get_lapic_tscdeadline_msr(vcpu);
1937                 break;
1938         case MSR_IA32_MISC_ENABLE:
1939                 data = vcpu->arch.ia32_misc_enable_msr;
1940                 break;
1941         case MSR_IA32_PERF_STATUS:
1942                 /* TSC increment by tick */
1943                 data = 1000ULL;
1944                 /* CPU multiplier */
1945                 data |= (((uint64_t)4ULL) << 40);
1946                 break;
1947         case MSR_EFER:
1948                 data = vcpu->arch.efer;
1949                 break;
1950         case MSR_KVM_WALL_CLOCK:
1951         case MSR_KVM_WALL_CLOCK_NEW:
1952                 data = vcpu->kvm->arch.wall_clock;
1953                 break;
1954         case MSR_KVM_SYSTEM_TIME:
1955         case MSR_KVM_SYSTEM_TIME_NEW:
1956                 data = vcpu->arch.time;
1957                 break;
1958         case MSR_KVM_ASYNC_PF_EN:
1959                 data = vcpu->arch.apf.msr_val;
1960                 break;
1961         case MSR_KVM_STEAL_TIME:
1962                 data = vcpu->arch.st.msr_val;
1963                 break;
1964         case MSR_IA32_P5_MC_ADDR:
1965         case MSR_IA32_P5_MC_TYPE:
1966         case MSR_IA32_MCG_CAP:
1967         case MSR_IA32_MCG_CTL:
1968         case MSR_IA32_MCG_STATUS:
1969         case MSR_IA32_MC0_CTL ... MSR_IA32_MC0_CTL + 4 * KVM_MAX_MCE_BANKS - 1:
1970                 return get_msr_mce(vcpu, msr, pdata);
1971         case MSR_K7_CLK_CTL:
1972                 /*
1973                  * Provide expected ramp-up count for K7. All other
1974                  * are set to zero, indicating minimum divisors for
1975                  * every field.
1976                  *
1977                  * This prevents guest kernels on AMD host with CPU
1978                  * type 6, model 8 and higher from exploding due to
1979                  * the rdmsr failing.
1980                  */
1981                 data = 0x20000000;
1982                 break;
1983         case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15:
1984                 if (kvm_hv_msr_partition_wide(msr)) {
1985                         int r;
1986                         mutex_lock(&vcpu->kvm->lock);
1987                         r = get_msr_hyperv_pw(vcpu, msr, pdata);
1988                         mutex_unlock(&vcpu->kvm->lock);
1989                         return r;
1990                 } else
1991                         return get_msr_hyperv(vcpu, msr, pdata);
1992                 break;
1993         case MSR_IA32_BBL_CR_CTL3:
1994                 /* This legacy MSR exists but isn't fully documented in current
1995                  * silicon.  It is however accessed by winxp in very narrow
1996                  * scenarios where it sets bit #19, itself documented as
1997                  * a "reserved" bit.  Best effort attempt to source coherent
1998                  * read data here should the balance of the register be
1999                  * interpreted by the guest:
2000                  *
2001                  * L2 cache control register 3: 64GB range, 256KB size,
2002                  * enabled, latency 0x1, configured
2003                  */
2004                 data = 0xbe702111;
2005                 break;
2006         default:
2007                 if (!ignore_msrs) {
2008                         pr_unimpl(vcpu, "unhandled rdmsr: 0x%x\n", msr);
2009                         return 1;
2010                 } else {
2011                         pr_unimpl(vcpu, "ignored rdmsr: 0x%x\n", msr);
2012                         data = 0;
2013                 }
2014                 break;
2015         }
2016         *pdata = data;
2017         return 0;
2018 }
2019 EXPORT_SYMBOL_GPL(kvm_get_msr_common);
2020
2021 /*
2022  * Read or write a bunch of msrs. All parameters are kernel addresses.
2023  *
2024  * @return number of msrs set successfully.
2025  */
2026 static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs,
2027                     struct kvm_msr_entry *entries,
2028                     int (*do_msr)(struct kvm_vcpu *vcpu,
2029                                   unsigned index, u64 *data))
2030 {
2031         int i, idx;
2032
2033         idx = srcu_read_lock(&vcpu->kvm->srcu);
2034         for (i = 0; i < msrs->nmsrs; ++i)
2035                 if (do_msr(vcpu, entries[i].index, &entries[i].data))
2036                         break;
2037         srcu_read_unlock(&vcpu->kvm->srcu, idx);
2038
2039         return i;
2040 }
2041
2042 /*
2043  * Read or write a bunch of msrs. Parameters are user addresses.
2044  *
2045  * @return number of msrs set successfully.
2046  */
2047 static int msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs __user *user_msrs,
2048                   int (*do_msr)(struct kvm_vcpu *vcpu,
2049                                 unsigned index, u64 *data),
2050                   int writeback)
2051 {
2052         struct kvm_msrs msrs;
2053         struct kvm_msr_entry *entries;
2054         int r, n;
2055         unsigned size;
2056
2057         r = -EFAULT;
2058         if (copy_from_user(&msrs, user_msrs, sizeof msrs))
2059                 goto out;
2060
2061         r = -E2BIG;
2062         if (msrs.nmsrs >= MAX_IO_MSRS)
2063                 goto out;
2064
2065         r = -ENOMEM;
2066         size = sizeof(struct kvm_msr_entry) * msrs.nmsrs;
2067         entries = kmalloc(size, GFP_KERNEL);
2068         if (!entries)
2069                 goto out;
2070
2071         r = -EFAULT;
2072         if (copy_from_user(entries, user_msrs->entries, size))
2073                 goto out_free;
2074
2075         r = n = __msr_io(vcpu, &msrs, entries, do_msr);
2076         if (r < 0)
2077                 goto out_free;
2078
2079         r = -EFAULT;
2080         if (writeback && copy_to_user(user_msrs->entries, entries, size))
2081                 goto out_free;
2082
2083         r = n;
2084
2085 out_free:
2086         kfree(entries);
2087 out:
2088         return r;
2089 }
2090
2091 int kvm_dev_ioctl_check_extension(long ext)
2092 {
2093         int r;
2094
2095         switch (ext) {
2096         case KVM_CAP_IRQCHIP:
2097         case KVM_CAP_HLT:
2098         case KVM_CAP_MMU_SHADOW_CACHE_CONTROL:
2099         case KVM_CAP_SET_TSS_ADDR:
2100         case KVM_CAP_EXT_CPUID:
2101         case KVM_CAP_CLOCKSOURCE:
2102         case KVM_CAP_PIT:
2103         case KVM_CAP_NOP_IO_DELAY:
2104         case KVM_CAP_MP_STATE:
2105         case KVM_CAP_SYNC_MMU:
2106         case KVM_CAP_USER_NMI:
2107         case KVM_CAP_REINJECT_CONTROL:
2108         case KVM_CAP_IRQ_INJECT_STATUS:
2109         case KVM_CAP_ASSIGN_DEV_IRQ:
2110         case KVM_CAP_IRQFD:
2111         case KVM_CAP_IOEVENTFD:
2112         case KVM_CAP_PIT2:
2113         case KVM_CAP_PIT_STATE2:
2114         case KVM_CAP_SET_IDENTITY_MAP_ADDR:
2115         case KVM_CAP_XEN_HVM:
2116         case KVM_CAP_ADJUST_CLOCK:
2117         case KVM_CAP_VCPU_EVENTS:
2118         case KVM_CAP_HYPERV:
2119         case KVM_CAP_HYPERV_VAPIC:
2120         case KVM_CAP_HYPERV_SPIN:
2121         case KVM_CAP_PCI_SEGMENT:
2122         case KVM_CAP_DEBUGREGS:
2123         case KVM_CAP_X86_ROBUST_SINGLESTEP:
2124         case KVM_CAP_XSAVE:
2125         case KVM_CAP_ASYNC_PF:
2126         case KVM_CAP_GET_TSC_KHZ:
2127                 r = 1;
2128                 break;
2129         case KVM_CAP_COALESCED_MMIO:
2130                 r = KVM_COALESCED_MMIO_PAGE_OFFSET;
2131                 break;
2132         case KVM_CAP_VAPIC:
2133                 r = !kvm_x86_ops->cpu_has_accelerated_tpr();
2134                 break;
2135         case KVM_CAP_NR_VCPUS:
2136                 r = KVM_SOFT_MAX_VCPUS;
2137                 break;
2138         case KVM_CAP_MAX_VCPUS:
2139                 r = KVM_MAX_VCPUS;
2140                 break;
2141         case KVM_CAP_NR_MEMSLOTS:
2142                 r = KVM_MEMORY_SLOTS;
2143                 break;
2144         case KVM_CAP_PV_MMU:    /* obsolete */
2145                 r = 0;
2146                 break;
2147         case KVM_CAP_IOMMU:
2148                 r = iommu_present(&pci_bus_type);
2149                 break;
2150         case KVM_CAP_MCE:
2151                 r = KVM_MAX_MCE_BANKS;
2152                 break;
2153         case KVM_CAP_XCRS:
2154                 r = cpu_has_xsave;
2155                 break;
2156         case KVM_CAP_TSC_CONTROL:
2157                 r = kvm_has_tsc_control;
2158                 break;
2159         case KVM_CAP_TSC_DEADLINE_TIMER:
2160                 r = boot_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER);
2161                 break;
2162         default:
2163                 r = 0;
2164                 break;
2165         }
2166         return r;
2167
2168 }
2169
2170 long kvm_arch_dev_ioctl(struct file *filp,
2171                         unsigned int ioctl, unsigned long arg)
2172 {
2173         void __user *argp = (void __user *)arg;
2174         long r;
2175
2176         switch (ioctl) {
2177         case KVM_GET_MSR_INDEX_LIST: {
2178                 struct kvm_msr_list __user *user_msr_list = argp;
2179                 struct kvm_msr_list msr_list;
2180                 unsigned n;
2181
2182                 r = -EFAULT;
2183                 if (copy_from_user(&msr_list, user_msr_list, sizeof msr_list))
2184                         goto out;
2185                 n = msr_list.nmsrs;
2186                 msr_list.nmsrs = num_msrs_to_save + ARRAY_SIZE(emulated_msrs);
2187                 if (copy_to_user(user_msr_list, &msr_list, sizeof msr_list))
2188                         goto out;
2189                 r = -E2BIG;
2190                 if (n < msr_list.nmsrs)
2191                         goto out;
2192                 r = -EFAULT;
2193                 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
2194                                  num_msrs_to_save * sizeof(u32)))
2195                         goto out;
2196                 if (copy_to_user(user_msr_list->indices + num_msrs_to_save,
2197                                  &emulated_msrs,
2198                                  ARRAY_SIZE(emulated_msrs) * sizeof(u32)))
2199                         goto out;
2200                 r = 0;
2201                 break;
2202         }
2203         case KVM_GET_SUPPORTED_CPUID: {
2204                 struct kvm_cpuid2 __user *cpuid_arg = argp;
2205                 struct kvm_cpuid2 cpuid;
2206
2207                 r = -EFAULT;
2208                 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
2209                         goto out;
2210                 r = kvm_dev_ioctl_get_supported_cpuid(&cpuid,
2211                                                       cpuid_arg->entries);
2212                 if (r)
2213                         goto out;
2214
2215                 r = -EFAULT;
2216                 if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid))
2217                         goto out;
2218                 r = 0;
2219                 break;
2220         }
2221         case KVM_X86_GET_MCE_CAP_SUPPORTED: {
2222                 u64 mce_cap;
2223
2224                 mce_cap = KVM_MCE_CAP_SUPPORTED;
2225                 r = -EFAULT;
2226                 if (copy_to_user(argp, &mce_cap, sizeof mce_cap))
2227                         goto out;
2228                 r = 0;
2229                 break;
2230         }
2231         default:
2232                 r = -EINVAL;
2233         }
2234 out:
2235         return r;
2236 }
2237
2238 static void wbinvd_ipi(void *garbage)
2239 {
2240         wbinvd();
2241 }
2242
2243 static bool need_emulate_wbinvd(struct kvm_vcpu *vcpu)
2244 {
2245         return vcpu->kvm->arch.iommu_domain &&
2246                 !(vcpu->kvm->arch.iommu_flags & KVM_IOMMU_CACHE_COHERENCY);
2247 }
2248
2249 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
2250 {
2251         /* Address WBINVD may be executed by guest */
2252         if (need_emulate_wbinvd(vcpu)) {
2253                 if (kvm_x86_ops->has_wbinvd_exit())
2254                         cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask);
2255                 else if (vcpu->cpu != -1 && vcpu->cpu != cpu)
2256                         smp_call_function_single(vcpu->cpu,
2257                                         wbinvd_ipi, NULL, 1);
2258         }
2259
2260         kvm_x86_ops->vcpu_load(vcpu, cpu);
2261         if (unlikely(vcpu->cpu != cpu) || check_tsc_unstable()) {
2262                 /* Make sure TSC doesn't go backwards */
2263                 s64 tsc_delta;
2264                 u64 tsc;
2265
2266                 tsc = kvm_x86_ops->read_l1_tsc(vcpu);
2267                 tsc_delta = !vcpu->arch.last_guest_tsc ? 0 :
2268                              tsc - vcpu->arch.last_guest_tsc;
2269
2270                 if (tsc_delta < 0)
2271                         mark_tsc_unstable("KVM discovered backwards TSC");
2272                 if (check_tsc_unstable()) {
2273                         kvm_x86_ops->adjust_tsc_offset(vcpu, -tsc_delta);
2274                         vcpu->arch.tsc_catchup = 1;
2275                 }
2276                 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
2277                 if (vcpu->cpu != cpu)
2278                         kvm_migrate_timers(vcpu);
2279                 vcpu->cpu = cpu;
2280         }
2281
2282         accumulate_steal_time(vcpu);
2283         kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
2284 }
2285
2286 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
2287 {
2288         kvm_x86_ops->vcpu_put(vcpu);
2289         kvm_put_guest_fpu(vcpu);
2290         vcpu->arch.last_guest_tsc = kvm_x86_ops->read_l1_tsc(vcpu);
2291 }
2292
2293 static int is_efer_nx(void)
2294 {
2295         unsigned long long efer = 0;
2296
2297         rdmsrl_safe(MSR_EFER, &efer);
2298         return efer & EFER_NX;
2299 }
2300
2301 static void cpuid_fix_nx_cap(struct kvm_vcpu *vcpu)
2302 {
2303         int i;
2304         struct kvm_cpuid_entry2 *e, *entry;
2305
2306         entry = NULL;
2307         for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
2308                 e = &vcpu->arch.cpuid_entries[i];
2309                 if (e->function == 0x80000001) {
2310                         entry = e;
2311                         break;
2312                 }
2313         }
2314         if (entry && (entry->edx & (1 << 20)) && !is_efer_nx()) {
2315                 entry->edx &= ~(1 << 20);
2316                 printk(KERN_INFO "kvm: guest NX capability removed\n");
2317         }
2318 }
2319
2320 /* when an old userspace process fills a new kernel module */
2321 static int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
2322                                     struct kvm_cpuid *cpuid,
2323                                     struct kvm_cpuid_entry __user *entries)
2324 {
2325         int r, i;
2326         struct kvm_cpuid_entry *cpuid_entries;
2327
2328         r = -E2BIG;
2329         if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
2330                 goto out;
2331         r = -ENOMEM;
2332         cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry) * cpuid->nent);
2333         if (!cpuid_entries)
2334                 goto out;
2335         r = -EFAULT;
2336         if (copy_from_user(cpuid_entries, entries,
2337                            cpuid->nent * sizeof(struct kvm_cpuid_entry)))
2338                 goto out_free;
2339         for (i = 0; i < cpuid->nent; i++) {
2340                 vcpu->arch.cpuid_entries[i].function = cpuid_entries[i].function;
2341                 vcpu->arch.cpuid_entries[i].eax = cpuid_entries[i].eax;
2342                 vcpu->arch.cpuid_entries[i].ebx = cpuid_entries[i].ebx;
2343                 vcpu->arch.cpuid_entries[i].ecx = cpuid_entries[i].ecx;
2344                 vcpu->arch.cpuid_entries[i].edx = cpuid_entries[i].edx;
2345                 vcpu->arch.cpuid_entries[i].index = 0;
2346                 vcpu->arch.cpuid_entries[i].flags = 0;
2347                 vcpu->arch.cpuid_entries[i].padding[0] = 0;
2348                 vcpu->arch.cpuid_entries[i].padding[1] = 0;
2349                 vcpu->arch.cpuid_entries[i].padding[2] = 0;
2350         }
2351         vcpu->arch.cpuid_nent = cpuid->nent;
2352         cpuid_fix_nx_cap(vcpu);
2353         r = 0;
2354         kvm_apic_set_version(vcpu);
2355         kvm_x86_ops->cpuid_update(vcpu);
2356         update_cpuid(vcpu);
2357
2358 out_free:
2359         vfree(cpuid_entries);
2360 out:
2361         return r;
2362 }
2363
2364 static int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
2365                                      struct kvm_cpuid2 *cpuid,
2366                                      struct kvm_cpuid_entry2 __user *entries)
2367 {
2368         int r;
2369
2370         r = -E2BIG;
2371         if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
2372                 goto out;
2373         r = -EFAULT;
2374         if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
2375                            cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
2376                 goto out;
2377         vcpu->arch.cpuid_nent = cpuid->nent;
2378         kvm_apic_set_version(vcpu);
2379         kvm_x86_ops->cpuid_update(vcpu);
2380         update_cpuid(vcpu);
2381         return 0;
2382
2383 out:
2384         return r;
2385 }
2386
2387 static int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
2388                                      struct kvm_cpuid2 *cpuid,
2389                                      struct kvm_cpuid_entry2 __user *entries)
2390 {
2391         int r;
2392
2393         r = -E2BIG;
2394         if (cpuid->nent < vcpu->arch.cpuid_nent)
2395                 goto out;
2396         r = -EFAULT;
2397         if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
2398                          vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
2399                 goto out;
2400         return 0;
2401
2402 out:
2403         cpuid->nent = vcpu->arch.cpuid_nent;
2404         return r;
2405 }
2406
2407 static void cpuid_mask(u32 *word, int wordnum)
2408 {
2409         *word &= boot_cpu_data.x86_capability[wordnum];
2410 }
2411
2412 static void do_cpuid_1_ent(struct kvm_cpuid_entry2 *entry, u32 function,
2413                            u32 index)
2414 {
2415         entry->function = function;
2416         entry->index = index;
2417         cpuid_count(entry->function, entry->index,
2418                     &entry->eax, &entry->ebx, &entry->ecx, &entry->edx);
2419         entry->flags = 0;
2420 }
2421
2422 static bool supported_xcr0_bit(unsigned bit)
2423 {
2424         u64 mask = ((u64)1 << bit);
2425
2426         return mask & (XSTATE_FP | XSTATE_SSE | XSTATE_YMM) & host_xcr0;
2427 }
2428
2429 #define F(x) bit(X86_FEATURE_##x)
2430
2431 static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
2432                          u32 index, int *nent, int maxnent)
2433 {
2434         unsigned f_nx = is_efer_nx() ? F(NX) : 0;
2435 #ifdef CONFIG_X86_64
2436         unsigned f_gbpages = (kvm_x86_ops->get_lpage_level() == PT_PDPE_LEVEL)
2437                                 ? F(GBPAGES) : 0;
2438         unsigned f_lm = F(LM);
2439 #else
2440         unsigned f_gbpages = 0;
2441         unsigned f_lm = 0;
2442 #endif
2443         unsigned f_rdtscp = kvm_x86_ops->rdtscp_supported() ? F(RDTSCP) : 0;
2444
2445         /* cpuid 1.edx */
2446         const u32 kvm_supported_word0_x86_features =
2447                 F(FPU) | F(VME) | F(DE) | F(PSE) |
2448                 F(TSC) | F(MSR) | F(PAE) | F(MCE) |
2449                 F(CX8) | F(APIC) | 0 /* Reserved */ | F(SEP) |
2450                 F(MTRR) | F(PGE) | F(MCA) | F(CMOV) |
2451                 F(PAT) | F(PSE36) | 0 /* PSN */ | F(CLFLSH) |
2452                 0 /* Reserved, DS, ACPI */ | F(MMX) |
2453                 F(FXSR) | F(XMM) | F(XMM2) | F(SELFSNOOP) |
2454                 0 /* HTT, TM, Reserved, PBE */;
2455         /* cpuid 0x80000001.edx */
2456         const u32 kvm_supported_word1_x86_features =
2457                 F(FPU) | F(VME) | F(DE) | F(PSE) |
2458                 F(TSC) | F(MSR) | F(PAE) | F(MCE) |
2459                 F(CX8) | F(APIC) | 0 /* Reserved */ | F(SYSCALL) |
2460                 F(MTRR) | F(PGE) | F(MCA) | F(CMOV) |
2461                 F(PAT) | F(PSE36) | 0 /* Reserved */ |
2462                 f_nx | 0 /* Reserved */ | F(MMXEXT) | F(MMX) |
2463                 F(FXSR) | F(FXSR_OPT) | f_gbpages | f_rdtscp |
2464                 0 /* Reserved */ | f_lm | F(3DNOWEXT) | F(3DNOW);
2465         /* cpuid 1.ecx */
2466         const u32 kvm_supported_word4_x86_features =
2467                 F(XMM3) | F(PCLMULQDQ) | 0 /* DTES64, MONITOR */ |
2468                 0 /* DS-CPL, VMX, SMX, EST */ |
2469                 0 /* TM2 */ | F(SSSE3) | 0 /* CNXT-ID */ | 0 /* Reserved */ |
2470                 0 /* Reserved */ | F(CX16) | 0 /* xTPR Update, PDCM */ |
2471                 0 /* Reserved, DCA */ | F(XMM4_1) |
2472                 F(XMM4_2) | F(X2APIC) | F(MOVBE) | F(POPCNT) |
2473                 0 /* Reserved*/ | F(AES) | F(XSAVE) | 0 /* OSXSAVE */ | F(AVX) |
2474                 F(F16C) | F(RDRAND);
2475         /* cpuid 0x80000001.ecx */
2476         const u32 kvm_supported_word6_x86_features =
2477                 F(LAHF_LM) | F(CMP_LEGACY) | 0 /*SVM*/ | 0 /* ExtApicSpace */ |
2478                 F(CR8_LEGACY) | F(ABM) | F(SSE4A) | F(MISALIGNSSE) |
2479                 F(3DNOWPREFETCH) | 0 /* OSVW */ | 0 /* IBS */ | F(XOP) |
2480                 0 /* SKINIT, WDT, LWP */ | F(FMA4) | F(TBM);
2481
2482         /* cpuid 0xC0000001.edx */
2483         const u32 kvm_supported_word5_x86_features =
2484                 F(XSTORE) | F(XSTORE_EN) | F(XCRYPT) | F(XCRYPT_EN) |
2485                 F(ACE2) | F(ACE2_EN) | F(PHE) | F(PHE_EN) |
2486                 F(PMM) | F(PMM_EN);
2487
2488         /* cpuid 7.0.ebx */
2489         const u32 kvm_supported_word9_x86_features =
2490                 F(SMEP) | F(FSGSBASE) | F(ERMS);
2491
2492         /* all calls to cpuid_count() should be made on the same cpu */
2493         get_cpu();
2494         do_cpuid_1_ent(entry, function, index);
2495         ++*nent;
2496
2497         switch (function) {
2498         case 0:
2499                 entry->eax = min(entry->eax, (u32)0xd);
2500                 break;
2501         case 1:
2502                 entry->edx &= kvm_supported_word0_x86_features;
2503                 cpuid_mask(&entry->edx, 0);
2504                 entry->ecx &= kvm_supported_word4_x86_features;
2505                 cpuid_mask(&entry->ecx, 4);
2506                 /* we support x2apic emulation even if host does not support
2507                  * it since we emulate x2apic in software */
2508                 entry->ecx |= F(X2APIC);
2509                 break;
2510         /* function 2 entries are STATEFUL. That is, repeated cpuid commands
2511          * may return different values. This forces us to get_cpu() before
2512          * issuing the first command, and also to emulate this annoying behavior
2513          * in kvm_emulate_cpuid() using KVM_CPUID_FLAG_STATE_READ_NEXT */
2514         case 2: {
2515                 int t, times = entry->eax & 0xff;
2516
2517                 entry->flags |= KVM_CPUID_FLAG_STATEFUL_FUNC;
2518                 entry->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
2519                 for (t = 1; t < times && *nent < maxnent; ++t) {
2520                         do_cpuid_1_ent(&entry[t], function, 0);
2521                         entry[t].flags |= KVM_CPUID_FLAG_STATEFUL_FUNC;
2522                         ++*nent;
2523                 }
2524                 break;
2525         }
2526         /* function 4 has additional index. */
2527         case 4: {
2528                 int i, cache_type;
2529
2530                 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
2531                 /* read more entries until cache_type is zero */
2532                 for (i = 1; *nent < maxnent; ++i) {
2533                         cache_type = entry[i - 1].eax & 0x1f;
2534                         if (!cache_type)
2535                                 break;
2536                         do_cpuid_1_ent(&entry[i], function, i);
2537                         entry[i].flags |=
2538                                KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
2539                         ++*nent;
2540                 }
2541                 break;
2542         }
2543         case 7: {
2544                 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
2545                 /* Mask ebx against host capbability word 9 */
2546                 if (index == 0) {
2547                         entry->ebx &= kvm_supported_word9_x86_features;
2548                         cpuid_mask(&entry->ebx, 9);
2549                 } else
2550                         entry->ebx = 0;
2551                 entry->eax = 0;
2552                 entry->ecx = 0;
2553                 entry->edx = 0;
2554                 break;
2555         }
2556         case 9:
2557                 break;
2558         /* function 0xb has additional index. */
2559         case 0xb: {
2560                 int i, level_type;
2561
2562                 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
2563                 /* read more entries until level_type is zero */
2564                 for (i = 1; *nent < maxnent; ++i) {
2565                         level_type = entry[i - 1].ecx & 0xff00;
2566                         if (!level_type)
2567                                 break;
2568                         do_cpuid_1_ent(&entry[i], function, i);
2569                         entry[i].flags |=
2570                                KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
2571                         ++*nent;
2572                 }
2573                 break;
2574         }
2575         case 0xd: {
2576                 int idx, i;
2577
2578                 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
2579                 for (idx = 1, i = 1; *nent < maxnent && idx < 64; ++idx) {
2580                         do_cpuid_1_ent(&entry[i], function, idx);
2581                         if (entry[i].eax == 0 || !supported_xcr0_bit(idx))
2582                                 continue;
2583                         entry[i].flags |=
2584                                KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
2585                         ++*nent;
2586                         ++i;
2587                 }
2588                 break;
2589         }
2590         case KVM_CPUID_SIGNATURE: {
2591                 char signature[12] = "KVMKVMKVM\0\0";
2592                 u32 *sigptr = (u32 *)signature;
2593                 entry->eax = 0;
2594                 entry->ebx = sigptr[0];
2595                 entry->ecx = sigptr[1];
2596                 entry->edx = sigptr[2];
2597                 break;
2598         }
2599         case KVM_CPUID_FEATURES:
2600                 entry->eax = (1 << KVM_FEATURE_CLOCKSOURCE) |
2601                              (1 << KVM_FEATURE_NOP_IO_DELAY) |
2602                              (1 << KVM_FEATURE_CLOCKSOURCE2) |
2603                              (1 << KVM_FEATURE_ASYNC_PF) |
2604                              (1 << KVM_FEATURE_CLOCKSOURCE_STABLE_BIT);
2605
2606                 if (sched_info_on())
2607                         entry->eax |= (1 << KVM_FEATURE_STEAL_TIME);
2608
2609                 entry->ebx = 0;
2610                 entry->ecx = 0;
2611                 entry->edx = 0;
2612                 break;
2613         case 0x80000000:
2614                 entry->eax = min(entry->eax, 0x8000001a);
2615                 break;
2616         case 0x80000001:
2617                 entry->edx &= kvm_supported_word1_x86_features;
2618                 cpuid_mask(&entry->edx, 1);
2619                 entry->ecx &= kvm_supported_word6_x86_features;
2620                 cpuid_mask(&entry->ecx, 6);
2621                 break;
2622         case 0x80000008: {
2623                 unsigned g_phys_as = (entry->eax >> 16) & 0xff;
2624                 unsigned virt_as = max((entry->eax >> 8) & 0xff, 48U);
2625                 unsigned phys_as = entry->eax & 0xff;
2626
2627                 if (!g_phys_as)
2628                         g_phys_as = phys_as;
2629                 entry->eax = g_phys_as | (virt_as << 8);
2630                 entry->ebx = entry->edx = 0;
2631                 break;
2632         }
2633         case 0x80000019:
2634                 entry->ecx = entry->edx = 0;
2635                 break;
2636         case 0x8000001a:
2637                 break;
2638         case 0x8000001d:
2639                 break;
2640         /*Add support for Centaur's CPUID instruction*/
2641         case 0xC0000000:
2642                 /*Just support up to 0xC0000004 now*/
2643                 entry->eax = min(entry->eax, 0xC0000004);
2644                 break;
2645         case 0xC0000001:
2646                 entry->edx &= kvm_supported_word5_x86_features;
2647                 cpuid_mask(&entry->edx, 5);
2648                 break;
2649         case 3: /* Processor serial number */
2650         case 5: /* MONITOR/MWAIT */
2651         case 6: /* Thermal management */
2652         case 0xA: /* Architectural Performance Monitoring */
2653         case 0x80000007: /* Advanced power management */
2654         case 0xC0000002:
2655         case 0xC0000003:
2656         case 0xC0000004:
2657         default:
2658                 entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
2659                 break;
2660         }
2661
2662         kvm_x86_ops->set_supported_cpuid(function, entry);
2663
2664         put_cpu();
2665 }
2666
2667 #undef F
2668
2669 static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
2670                                      struct kvm_cpuid_entry2 __user *entries)
2671 {
2672         struct kvm_cpuid_entry2 *cpuid_entries;
2673         int limit, nent = 0, r = -E2BIG;
2674         u32 func;
2675
2676         if (cpuid->nent < 1)
2677                 goto out;
2678         if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
2679                 cpuid->nent = KVM_MAX_CPUID_ENTRIES;
2680         r = -ENOMEM;
2681         cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry2) * cpuid->nent);
2682         if (!cpuid_entries)
2683                 goto out;
2684
2685         do_cpuid_ent(&cpuid_entries[0], 0, 0, &nent, cpuid->nent);
2686         limit = cpuid_entries[0].eax;
2687         for (func = 1; func <= limit && nent < cpuid->nent; ++func)
2688                 do_cpuid_ent(&cpuid_entries[nent], func, 0,
2689                              &nent, cpuid->nent);
2690         r = -E2BIG;
2691         if (nent >= cpuid->nent)
2692                 goto out_free;
2693
2694         do_cpuid_ent(&cpuid_entries[nent], 0x80000000, 0, &nent, cpuid->nent);
2695         limit = cpuid_entries[nent - 1].eax;
2696         for (func = 0x80000001; func <= limit && nent < cpuid->nent; ++func)
2697                 do_cpuid_ent(&cpuid_entries[nent], func, 0,
2698                              &nent, cpuid->nent);
2699
2700
2701
2702         r = -E2BIG;
2703         if (nent >= cpuid->nent)
2704                 goto out_free;
2705
2706         /* Add support for Centaur's CPUID instruction. */
2707         if (boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR) {
2708                 do_cpuid_ent(&cpuid_entries[nent], 0xC0000000, 0,
2709                                 &nent, cpuid->nent);
2710
2711                 r = -E2BIG;
2712                 if (nent >= cpuid->nent)
2713                         goto out_free;
2714
2715                 limit = cpuid_entries[nent - 1].eax;
2716                 for (func = 0xC0000001;
2717                         func <= limit && nent < cpuid->nent; ++func)
2718                         do_cpuid_ent(&cpuid_entries[nent], func, 0,
2719                                         &nent, cpuid->nent);
2720
2721                 r = -E2BIG;
2722                 if (nent >= cpuid->nent)
2723                         goto out_free;
2724         }
2725
2726         do_cpuid_ent(&cpuid_entries[nent], KVM_CPUID_SIGNATURE, 0, &nent,
2727                      cpuid->nent);
2728
2729         r = -E2BIG;
2730         if (nent >= cpuid->nent)
2731                 goto out_free;
2732
2733         do_cpuid_ent(&cpuid_entries[nent], KVM_CPUID_FEATURES, 0, &nent,
2734                      cpuid->nent);
2735
2736         r = -E2BIG;
2737         if (nent >= cpuid->nent)
2738                 goto out_free;
2739
2740         r = -EFAULT;
2741         if (copy_to_user(entries, cpuid_entries,
2742                          nent * sizeof(struct kvm_cpuid_entry2)))
2743                 goto out_free;
2744         cpuid->nent = nent;
2745         r = 0;
2746
2747 out_free:
2748         vfree(cpuid_entries);
2749 out:
2750         return r;
2751 }
2752
2753 static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu,
2754                                     struct kvm_lapic_state *s)
2755 {
2756         memcpy(s->regs, vcpu->arch.apic->regs, sizeof *s);
2757
2758         return 0;
2759 }
2760
2761 static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
2762                                     struct kvm_lapic_state *s)
2763 {
2764         memcpy(vcpu->arch.apic->regs, s->regs, sizeof *s);
2765         kvm_apic_post_state_restore(vcpu);
2766         update_cr8_intercept(vcpu);
2767
2768         return 0;
2769 }
2770
2771 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
2772                                     struct kvm_interrupt *irq)
2773 {
2774         if (irq->irq < 0 || irq->irq >= 256)
2775                 return -EINVAL;
2776         if (irqchip_in_kernel(vcpu->kvm))
2777                 return -ENXIO;
2778
2779         kvm_queue_interrupt(vcpu, irq->irq, false);
2780         kvm_make_request(KVM_REQ_EVENT, vcpu);
2781
2782         return 0;
2783 }
2784
2785 static int kvm_vcpu_ioctl_nmi(struct kvm_vcpu *vcpu)
2786 {
2787         kvm_inject_nmi(vcpu);
2788
2789         return 0;
2790 }
2791
2792 static int vcpu_ioctl_tpr_access_reporting(struct kvm_vcpu *vcpu,
2793                                            struct kvm_tpr_access_ctl *tac)
2794 {
2795         if (tac->flags)
2796                 return -EINVAL;
2797         vcpu->arch.tpr_access_reporting = !!tac->enabled;
2798         return 0;
2799 }
2800
2801 static int kvm_vcpu_ioctl_x86_setup_mce(struct kvm_vcpu *vcpu,
2802                                         u64 mcg_cap)
2803 {
2804         int r;
2805         unsigned bank_num = mcg_cap & 0xff, bank;
2806
2807         r = -EINVAL;
2808         if (!bank_num || bank_num >= KVM_MAX_MCE_BANKS)
2809                 goto out;
2810         if (mcg_cap & ~(KVM_MCE_CAP_SUPPORTED | 0xff | 0xff0000))
2811                 goto out;
2812         r = 0;
2813         vcpu->arch.mcg_cap = mcg_cap;
2814         /* Init IA32_MCG_CTL to all 1s */
2815         if (mcg_cap & MCG_CTL_P)
2816                 vcpu->arch.mcg_ctl = ~(u64)0;
2817         /* Init IA32_MCi_CTL to all 1s */
2818         for (bank = 0; bank < bank_num; bank++)
2819                 vcpu->arch.mce_banks[bank*4] = ~(u64)0;
2820 out:
2821         return r;
2822 }
2823
2824 static int kvm_vcpu_ioctl_x86_set_mce(struct kvm_vcpu *vcpu,
2825                                       struct kvm_x86_mce *mce)
2826 {
2827         u64 mcg_cap = vcpu->arch.mcg_cap;
2828         unsigned bank_num = mcg_cap & 0xff;
2829         u64 *banks = vcpu->arch.mce_banks;
2830
2831         if (mce->bank >= bank_num || !(mce->status & MCI_STATUS_VAL))
2832                 return -EINVAL;
2833         /*
2834          * if IA32_MCG_CTL is not all 1s, the uncorrected error
2835          * reporting is disabled
2836          */
2837         if ((mce->status & MCI_STATUS_UC) && (mcg_cap & MCG_CTL_P) &&
2838             vcpu->arch.mcg_ctl != ~(u64)0)
2839                 return 0;
2840         banks += 4 * mce->bank;
2841         /*
2842          * if IA32_MCi_CTL is not all 1s, the uncorrected error
2843          * reporting is disabled for the bank
2844          */
2845         if ((mce->status & MCI_STATUS_UC) && banks[0] != ~(u64)0)
2846                 return 0;
2847         if (mce->status & MCI_STATUS_UC) {
2848                 if ((vcpu->arch.mcg_status & MCG_STATUS_MCIP) ||
2849                     !kvm_read_cr4_bits(vcpu, X86_CR4_MCE)) {
2850                         kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
2851                         return 0;
2852                 }
2853                 if (banks[1] & MCI_STATUS_VAL)
2854                         mce->status |= MCI_STATUS_OVER;
2855                 banks[2] = mce->addr;
2856                 banks[3] = mce->misc;
2857                 vcpu->arch.mcg_status = mce->mcg_status;
2858                 banks[1] = mce->status;
2859                 kvm_queue_exception(vcpu, MC_VECTOR);
2860         } else if (!(banks[1] & MCI_STATUS_VAL)
2861                    || !(banks[1] & MCI_STATUS_UC)) {
2862                 if (banks[1] & MCI_STATUS_VAL)
2863                         mce->status |= MCI_STATUS_OVER;
2864                 banks[2] = mce->addr;
2865                 banks[3] = mce->misc;
2866                 banks[1] = mce->status;
2867         } else
2868                 banks[1] |= MCI_STATUS_OVER;
2869         return 0;
2870 }
2871
2872 static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
2873                                                struct kvm_vcpu_events *events)
2874 {
2875         process_nmi(vcpu);
2876         events->exception.injected =
2877                 vcpu->arch.exception.pending &&
2878                 !kvm_exception_is_soft(vcpu->arch.exception.nr);
2879         events->exception.nr = vcpu->arch.exception.nr;
2880         events->exception.has_error_code = vcpu->arch.exception.has_error_code;
2881         events->exception.pad = 0;
2882         events->exception.error_code = vcpu->arch.exception.error_code;
2883
2884         events->interrupt.injected =
2885                 vcpu->arch.interrupt.pending && !vcpu->arch.interrupt.soft;
2886         events->interrupt.nr = vcpu->arch.interrupt.nr;
2887         events->interrupt.soft = 0;
2888         events->interrupt.shadow =
2889                 kvm_x86_ops->get_interrupt_shadow(vcpu,
2890                         KVM_X86_SHADOW_INT_MOV_SS | KVM_X86_SHADOW_INT_STI);
2891
2892         events->nmi.injected = vcpu->arch.nmi_injected;
2893         events->nmi.pending = vcpu->arch.nmi_pending != 0;
2894         events->nmi.masked = kvm_x86_ops->get_nmi_mask(vcpu);
2895         events->nmi.pad = 0;
2896
2897         events->sipi_vector = vcpu->arch.sipi_vector;
2898
2899         events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING
2900                          | KVM_VCPUEVENT_VALID_SIPI_VECTOR
2901                          | KVM_VCPUEVENT_VALID_SHADOW);
2902         memset(&events->reserved, 0, sizeof(events->reserved));
2903 }
2904
2905 static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
2906                                               struct kvm_vcpu_events *events)
2907 {
2908         if (events->flags & ~(KVM_VCPUEVENT_VALID_NMI_PENDING
2909                               | KVM_VCPUEVENT_VALID_SIPI_VECTOR
2910                               | KVM_VCPUEVENT_VALID_SHADOW))
2911                 return -EINVAL;
2912
2913         process_nmi(vcpu);
2914         vcpu->arch.exception.pending = events->exception.injected;
2915         vcpu->arch.exception.nr = events->exception.nr;
2916         vcpu->arch.exception.has_error_code = events->exception.has_error_code;
2917         vcpu->arch.exception.error_code = events->exception.error_code;
2918
2919         vcpu->arch.interrupt.pending = events->interrupt.injected;
2920         vcpu->arch.interrupt.nr = events->interrupt.nr;
2921         vcpu->arch.interrupt.soft = events->interrupt.soft;
2922         if (events->flags & KVM_VCPUEVENT_VALID_SHADOW)
2923                 kvm_x86_ops->set_interrupt_shadow(vcpu,
2924                                                   events->interrupt.shadow);
2925
2926         vcpu->arch.nmi_injected = events->nmi.injected;
2927         if (events->flags & KVM_VCPUEVENT_VALID_NMI_PENDING)
2928                 vcpu->arch.nmi_pending = events->nmi.pending;
2929         kvm_x86_ops->set_nmi_mask(vcpu, events->nmi.masked);
2930
2931         if (events->flags & KVM_VCPUEVENT_VALID_SIPI_VECTOR)
2932                 vcpu->arch.sipi_vector = events->sipi_vector;
2933
2934         kvm_make_request(KVM_REQ_EVENT, vcpu);
2935
2936         return 0;
2937 }
2938
2939 static void kvm_vcpu_ioctl_x86_get_debugregs(struct kvm_vcpu *vcpu,
2940                                              struct kvm_debugregs *dbgregs)
2941 {
2942         memcpy(dbgregs->db, vcpu->arch.db, sizeof(vcpu->arch.db));
2943         dbgregs->dr6 = vcpu->arch.dr6;
2944         dbgregs->dr7 = vcpu->arch.dr7;
2945         dbgregs->flags = 0;
2946         memset(&dbgregs->reserved, 0, sizeof(dbgregs->reserved));
2947 }
2948
2949 static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu,
2950                                             struct kvm_debugregs *dbgregs)
2951 {
2952         if (dbgregs->flags)
2953                 return -EINVAL;
2954
2955         if (dbgregs->dr6 & ~0xffffffffull)
2956                 return -EINVAL;
2957         if (dbgregs->dr7 & ~0xffffffffull)
2958                 return -EINVAL;
2959
2960         memcpy(vcpu->arch.db, dbgregs->db, sizeof(vcpu->arch.db));
2961         vcpu->arch.dr6 = dbgregs->dr6;
2962         vcpu->arch.dr7 = dbgregs->dr7;
2963
2964         return 0;
2965 }
2966
2967 static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu,
2968                                          struct kvm_xsave *guest_xsave)
2969 {
2970         if (cpu_has_xsave)
2971                 memcpy(guest_xsave->region,
2972                         &vcpu->arch.guest_fpu.state->xsave,
2973                         xstate_size);
2974         else {
2975                 memcpy(guest_xsave->region,
2976                         &vcpu->arch.guest_fpu.state->fxsave,
2977                         sizeof(struct i387_fxsave_struct));
2978                 *(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)] =
2979                         XSTATE_FPSSE;
2980         }
2981 }
2982
2983 static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu,
2984                                         struct kvm_xsave *guest_xsave)
2985 {
2986         u64 xstate_bv =
2987                 *(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)];
2988
2989         if (cpu_has_xsave)
2990                 memcpy(&vcpu->arch.guest_fpu.state->xsave,
2991                         guest_xsave->region, xstate_size);
2992         else {
2993                 if (xstate_bv & ~XSTATE_FPSSE)
2994                         return -EINVAL;
2995                 memcpy(&vcpu->arch.guest_fpu.state->fxsave,
2996                         guest_xsave->region, sizeof(struct i387_fxsave_struct));
2997         }
2998         return 0;
2999 }
3000
3001 static void kvm_vcpu_ioctl_x86_get_xcrs(struct kvm_vcpu *vcpu,
3002                                         struct kvm_xcrs *guest_xcrs)
3003 {
3004         if (!cpu_has_xsave) {
3005                 guest_xcrs->nr_xcrs = 0;
3006                 return;
3007         }
3008
3009         guest_xcrs->nr_xcrs = 1;
3010         guest_xcrs->flags = 0;
3011         guest_xcrs->xcrs[0].xcr = XCR_XFEATURE_ENABLED_MASK;
3012         guest_xcrs->xcrs[0].value = vcpu->arch.xcr0;
3013 }
3014
3015 static int kvm_vcpu_ioctl_x86_set_xcrs(struct kvm_vcpu *vcpu,
3016                                        struct kvm_xcrs *guest_xcrs)
3017 {
3018         int i, r = 0;
3019
3020         if (!cpu_has_xsave)
3021                 return -EINVAL;
3022
3023         if (guest_xcrs->nr_xcrs > KVM_MAX_XCRS || guest_xcrs->flags)
3024                 return -EINVAL;
3025
3026         for (i = 0; i < guest_xcrs->nr_xcrs; i++)
3027                 /* Only support XCR0 currently */
3028                 if (guest_xcrs->xcrs[0].xcr == XCR_XFEATURE_ENABLED_MASK) {
3029                         r = __kvm_set_xcr(vcpu, XCR_XFEATURE_ENABLED_MASK,
3030                                 guest_xcrs->xcrs[0].value);
3031                         break;
3032                 }
3033         if (r)
3034                 r = -EINVAL;
3035         return r;
3036 }
3037
3038 long kvm_arch_vcpu_ioctl(struct file *filp,
3039                          unsigned int ioctl, unsigned long arg)
3040 {
3041         struct kvm_vcpu *vcpu = filp->private_data;
3042         void __user *argp = (void __user *)arg;
3043         int r;
3044         union {
3045                 struct kvm_lapic_state *lapic;
3046                 struct kvm_xsave *xsave;
3047                 struct kvm_xcrs *xcrs;
3048                 void *buffer;
3049         } u;
3050
3051         u.buffer = NULL;
3052         switch (ioctl) {
3053         case KVM_GET_LAPIC: {
3054                 r = -EINVAL;
3055                 if (!vcpu->arch.apic)
3056                         goto out;
3057                 u.lapic = kzalloc(sizeof(struct kvm_lapic_state), GFP_KERNEL);
3058
3059                 r = -ENOMEM;
3060                 if (!u.lapic)
3061                         goto out;
3062                 r = kvm_vcpu_ioctl_get_lapic(vcpu, u.lapic);
3063                 if (r)
3064                         goto out;
3065                 r = -EFAULT;
3066                 if (copy_to_user(argp, u.lapic, sizeof(struct kvm_lapic_state)))
3067                         goto out;
3068                 r = 0;
3069                 break;
3070         }
3071         case KVM_SET_LAPIC: {
3072                 r = -EINVAL;
3073                 if (!vcpu->arch.apic)
3074                         goto out;
3075                 u.lapic = kmalloc(sizeof(struct kvm_lapic_state), GFP_KERNEL);
3076                 r = -ENOMEM;
3077                 if (!u.lapic)
3078                         goto out;
3079                 r = -EFAULT;
3080                 if (copy_from_user(u.lapic, argp, sizeof(struct kvm_lapic_state)))
3081                         goto out;
3082                 r = kvm_vcpu_ioctl_set_lapic(vcpu, u.lapic);
3083                 if (r)
3084                         goto out;
3085                 r = 0;
3086                 break;
3087         }
3088         case KVM_INTERRUPT: {
3089                 struct kvm_interrupt irq;
3090
3091                 r = -EFAULT;
3092                 if (copy_from_user(&irq, argp, sizeof irq))
3093                         goto out;
3094                 r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
3095                 if (r)
3096                         goto out;
3097                 r = 0;
3098                 break;
3099         }
3100         case KVM_NMI: {
3101                 r = kvm_vcpu_ioctl_nmi(vcpu);
3102                 if (r)
3103                         goto out;
3104                 r = 0;
3105                 break;
3106         }
3107         case KVM_SET_CPUID: {
3108                 struct kvm_cpuid __user *cpuid_arg = argp;
3109                 struct kvm_cpuid cpuid;
3110
3111                 r = -EFAULT;
3112                 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
3113                         goto out;
3114                 r = kvm_vcpu_ioctl_set_cpuid(vcpu, &cpuid, cpuid_arg->entries);
3115                 if (r)
3116                         goto out;
3117                 break;
3118         }
3119         case KVM_SET_CPUID2: {
3120                 struct kvm_cpuid2 __user *cpuid_arg = argp;
3121                 struct kvm_cpuid2 cpuid;
3122
3123                 r = -EFAULT;
3124                 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
3125                         goto out;
3126                 r = kvm_vcpu_ioctl_set_cpuid2(vcpu, &cpuid,
3127                                               cpuid_arg->entries);
3128                 if (r)
3129                         goto out;
3130                 break;
3131         }
3132         case KVM_GET_CPUID2: {
3133                 struct kvm_cpuid2 __user *cpuid_arg = argp;
3134                 struct kvm_cpuid2 cpuid;
3135
3136                 r = -EFAULT;
3137                 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
3138                         goto out;
3139                 r = kvm_vcpu_ioctl_get_cpuid2(vcpu, &cpuid,
3140                                               cpuid_arg->entries);
3141                 if (r)
3142                         goto out;
3143                 r = -EFAULT;
3144                 if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid))
3145                         goto out;
3146                 r = 0;
3147                 break;
3148         }
3149         case KVM_GET_MSRS:
3150                 r = msr_io(vcpu, argp, kvm_get_msr, 1);
3151                 break;
3152         case KVM_SET_MSRS:
3153                 r = msr_io(vcpu, argp, do_set_msr, 0);
3154                 break;
3155         case KVM_TPR_ACCESS_REPORTING: {
3156                 struct kvm_tpr_access_ctl tac;
3157
3158                 r = -EFAULT;
3159                 if (copy_from_user(&tac, argp, sizeof tac))
3160                         goto out;
3161                 r = vcpu_ioctl_tpr_access_reporting(vcpu, &tac);
3162                 if (r)
3163                         goto out;
3164                 r = -EFAULT;
3165                 if (copy_to_user(argp, &tac, sizeof tac))
3166                         goto out;
3167                 r = 0;
3168                 break;
3169         };
3170         case KVM_SET_VAPIC_ADDR: {
3171                 struct kvm_vapic_addr va;
3172
3173                 r = -EINVAL;
3174                 if (!irqchip_in_kernel(vcpu->kvm))
3175                         goto out;
3176                 r = -EFAULT;
3177                 if (copy_from_user(&va, argp, sizeof va))
3178                         goto out;
3179                 r = kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr);
3180                 break;
3181         }
3182         case KVM_X86_SETUP_MCE: {
3183                 u64 mcg_cap;
3184
3185                 r = -EFAULT;
3186                 if (copy_from_user(&mcg_cap, argp, sizeof mcg_cap))
3187                         goto out;
3188                 r = kvm_vcpu_ioctl_x86_setup_mce(vcpu, mcg_cap);
3189                 break;
3190         }
3191         case KVM_X86_SET_MCE: {
3192                 struct kvm_x86_mce mce;
3193
3194                 r = -EFAULT;
3195                 if (copy_from_user(&mce, argp, sizeof mce))
3196                         goto out;
3197                 r = kvm_vcpu_ioctl_x86_set_mce(vcpu, &mce);
3198                 break;
3199         }
3200         case KVM_GET_VCPU_EVENTS: {
3201                 struct kvm_vcpu_events events;
3202
3203                 kvm_vcpu_ioctl_x86_get_vcpu_events(vcpu, &events);
3204
3205                 r = -EFAULT;
3206                 if (copy_to_user(argp, &events, sizeof(struct kvm_vcpu_events)))
3207                         break;
3208                 r = 0;
3209                 break;
3210         }
3211         case KVM_SET_VCPU_EVENTS: {
3212                 struct kvm_vcpu_events events;
3213
3214                 r = -EFAULT;
3215                 if (copy_from_user(&events, argp, sizeof(struct kvm_vcpu_events)))
3216                         break;
3217
3218                 r = kvm_vcpu_ioctl_x86_set_vcpu_events(vcpu, &events);
3219                 break;
3220         }
3221         case KVM_GET_DEBUGREGS: {
3222                 struct kvm_debugregs dbgregs;
3223
3224                 kvm_vcpu_ioctl_x86_get_debugregs(vcpu, &dbgregs);
3225
3226                 r = -EFAULT;
3227                 if (copy_to_user(argp, &dbgregs,
3228                                  sizeof(struct kvm_debugregs)))
3229                         break;
3230                 r = 0;
3231                 break;
3232         }
3233         case KVM_SET_DEBUGREGS: {
3234                 struct kvm_debugregs dbgregs;
3235
3236                 r = -EFAULT;
3237                 if (copy_from_user(&dbgregs, argp,
3238                                    sizeof(struct kvm_debugregs)))
3239                         break;
3240
3241                 r = kvm_vcpu_ioctl_x86_set_debugregs(vcpu, &dbgregs);
3242                 break;
3243         }
3244         case KVM_GET_XSAVE: {
3245                 u.xsave = kzalloc(sizeof(struct kvm_xsave), GFP_KERNEL);
3246                 r = -ENOMEM;
3247                 if (!u.xsave)
3248                         break;
3249
3250                 kvm_vcpu_ioctl_x86_get_xsave(vcpu, u.xsave);
3251
3252                 r = -EFAULT;
3253                 if (copy_to_user(argp, u.xsave, sizeof(struct kvm_xsave)))
3254                         break;
3255                 r = 0;
3256                 break;
3257         }
3258         case KVM_SET_XSAVE: {
3259                 u.xsave = kzalloc(sizeof(struct kvm_xsave), GFP_KERNEL);
3260                 r = -ENOMEM;
3261                 if (!u.xsave)
3262                         break;
3263
3264                 r = -EFAULT;
3265                 if (copy_from_user(u.xsave, argp, sizeof(struct kvm_xsave)))
3266                         break;
3267
3268                 r = kvm_vcpu_ioctl_x86_set_xsave(vcpu, u.xsave);
3269                 break;
3270         }
3271         case KVM_GET_XCRS: {
3272                 u.xcrs = kzalloc(sizeof(struct kvm_xcrs), GFP_KERNEL);
3273                 r = -ENOMEM;
3274                 if (!u.xcrs)
3275                         break;
3276
3277                 kvm_vcpu_ioctl_x86_get_xcrs(vcpu, u.xcrs);
3278
3279                 r = -EFAULT;
3280                 if (copy_to_user(argp, u.xcrs,
3281                                  sizeof(struct kvm_xcrs)))
3282                         break;
3283                 r = 0;
3284                 break;
3285         }
3286         case KVM_SET_XCRS: {
3287                 u.xcrs = kzalloc(sizeof(struct kvm_xcrs), GFP_KERNEL);
3288                 r = -ENOMEM;
3289                 if (!u.xcrs)
3290                         break;
3291
3292                 r = -EFAULT;
3293                 if (copy_from_user(u.xcrs, argp,
3294                                    sizeof(struct kvm_xcrs)))
3295                         break;
3296
3297                 r = kvm_vcpu_ioctl_x86_set_xcrs(vcpu, u.xcrs);
3298                 break;
3299         }
3300         case KVM_SET_TSC_KHZ: {
3301                 u32 user_tsc_khz;
3302
3303                 r = -EINVAL;
3304                 if (!kvm_has_tsc_control)
3305                         break;
3306
3307                 user_tsc_khz = (u32)arg;
3308
3309                 if (user_tsc_khz >= kvm_max_guest_tsc_khz)
3310                         goto out;
3311
3312                 kvm_x86_ops->set_tsc_khz(vcpu, user_tsc_khz);
3313
3314                 r = 0;
3315                 goto out;
3316         }
3317         case KVM_GET_TSC_KHZ: {
3318                 r = -EIO;
3319                 if (check_tsc_unstable())
3320                         goto out;
3321
3322                 r = vcpu_tsc_khz(vcpu);
3323
3324                 goto out;
3325         }
3326         default:
3327                 r = -EINVAL;
3328         }
3329 out:
3330         kfree(u.buffer);
3331         return r;
3332 }
3333
3334 static int kvm_vm_ioctl_set_tss_addr(struct kvm *kvm, unsigned long addr)
3335 {
3336         int ret;
3337
3338         if (addr > (unsigned int)(-3 * PAGE_SIZE))
3339                 return -1;
3340         ret = kvm_x86_ops->set_tss_addr(kvm, addr);
3341         return ret;
3342 }
3343
3344 static int kvm_vm_ioctl_set_identity_map_addr(struct kvm *kvm,
3345                                               u64 ident_addr)
3346 {
3347         kvm->arch.ept_identity_map_addr = ident_addr;
3348         return 0;
3349 }
3350
3351 static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm,
3352                                           u32 kvm_nr_mmu_pages)
3353 {
3354         if (kvm_nr_mmu_pages < KVM_MIN_ALLOC_MMU_PAGES)
3355                 return -EINVAL;
3356
3357         mutex_lock(&kvm->slots_lock);
3358         spin_lock(&kvm->mmu_lock);
3359
3360         kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages);
3361         kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages;
3362
3363         spin_unlock(&kvm->mmu_lock);
3364         mutex_unlock(&kvm->slots_lock);
3365         return 0;
3366 }
3367
3368 static int kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm)
3369 {
3370         return kvm->arch.n_max_mmu_pages;
3371 }
3372
3373 static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
3374 {
3375         int r;
3376
3377         r = 0;
3378         switch (chip->chip_id) {
3379         case KVM_IRQCHIP_PIC_MASTER:
3380                 memcpy(&chip->chip.pic,
3381                         &pic_irqchip(kvm)->pics[0],
3382                         sizeof(struct kvm_pic_state));
3383                 break;
3384         case KVM_IRQCHIP_PIC_SLAVE:
3385                 memcpy(&chip->chip.pic,
3386                         &pic_irqchip(kvm)->pics[1],
3387                         sizeof(struct kvm_pic_state));
3388                 break;
3389         case KVM_IRQCHIP_IOAPIC:
3390                 r = kvm_get_ioapic(kvm, &chip->chip.ioapic);
3391                 break;
3392         default:
3393                 r = -EINVAL;
3394                 break;
3395         }
3396         return r;
3397 }
3398
3399 static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
3400 {
3401         int r;
3402
3403         r = 0;
3404         switch (chip->chip_id) {
3405         case KVM_IRQCHIP_PIC_MASTER:
3406                 spin_lock(&pic_irqchip(kvm)->lock);
3407                 memcpy(&pic_irqchip(kvm)->pics[0],
3408                         &chip->chip.pic,
3409                         sizeof(struct kvm_pic_state));
3410                 spin_unlock(&pic_irqchip(kvm)->lock);
3411                 break;
3412         case KVM_IRQCHIP_PIC_SLAVE:
3413                 spin_lock(&pic_irqchip(kvm)->lock);
3414                 memcpy(&pic_irqchip(kvm)->pics[1],
3415                         &chip->chip.pic,
3416                         sizeof(struct kvm_pic_state));
3417                 spin_unlock(&pic_irqchip(kvm)->lock);
3418                 break;
3419         case KVM_IRQCHIP_IOAPIC:
3420                 r = kvm_set_ioapic(kvm, &chip->chip.ioapic);
3421                 break;
3422         default:
3423                 r = -EINVAL;
3424                 break;
3425         }
3426         kvm_pic_update_irq(pic_irqchip(kvm));
3427         return r;
3428 }
3429
3430 static int kvm_vm_ioctl_get_pit(struct kvm *kvm, struct kvm_pit_state *ps)
3431 {
3432         int r = 0;
3433
3434         mutex_lock(&kvm->arch.vpit->pit_state.lock);
3435         memcpy(ps, &kvm->arch.vpit->pit_state, sizeof(struct kvm_pit_state));
3436         mutex_unlock(&kvm->arch.vpit->pit_state.lock);
3437         return r;
3438 }
3439
3440 static int kvm_vm_ioctl_set_pit(struct kvm *kvm, struct kvm_pit_state *ps)
3441 {
3442         int r = 0;
3443         int i;
3444         mutex_lock(&kvm->arch.vpit->pit_state.lock);
3445         memcpy(&kvm->arch.vpit->pit_state, ps, sizeof(struct kvm_pit_state));
3446         for (i = 0; i < 3; i++)
3447                 kvm_pit_load_count(kvm, i, ps->channels[i].count, 0);
3448         mutex_unlock(&kvm->arch.vpit->pit_state.lock);
3449         return r;
3450 }
3451
3452 static int kvm_vm_ioctl_get_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
3453 {
3454         int r = 0;
3455
3456         mutex_lock(&kvm->arch.vpit->pit_state.lock);
3457         memcpy(ps->channels, &kvm->arch.vpit->pit_state.channels,
3458                 sizeof(ps->channels));
3459         ps->flags = kvm->arch.vpit->pit_state.flags;
3460         mutex_unlock(&kvm->arch.vpit->pit_state.lock);
3461         memset(&ps->reserved, 0, sizeof(ps->reserved));
3462         return r;
3463 }
3464
3465 static int kvm_vm_ioctl_set_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
3466 {
3467         int r = 0, start = 0;
3468         int i;
3469         u32 prev_legacy, cur_legacy;
3470         mutex_lock(&kvm->arch.vpit->pit_state.lock);
3471         prev_legacy = kvm->arch.vpit->pit_state.flags & KVM_PIT_FLAGS_HPET_LEGACY;
3472         cur_legacy = ps->flags & KVM_PIT_FLAGS_HPET_LEGACY;
3473         if (!prev_legacy && cur_legacy)
3474                 start = 1;
3475         memcpy(&kvm->arch.vpit->pit_state.channels, &ps->channels,
3476                sizeof(kvm->arch.vpit->pit_state.channels));
3477         kvm->arch.vpit->pit_state.flags = ps->flags;
3478         for (i = 0; i < 3; i++)
3479                 kvm_pit_load_count(kvm, i, kvm->arch.vpit->pit_state.channels[i].count,
3480                                    start && i == 0);
3481         mutex_unlock(&kvm->arch.vpit->pit_state.lock);
3482         return r;
3483 }
3484
3485 static int kvm_vm_ioctl_reinject(struct kvm *kvm,
3486                                  struct kvm_reinject_control *control)
3487 {
3488         if (!kvm->arch.vpit)
3489                 return -ENXIO;
3490         mutex_lock(&kvm->arch.vpit->pit_state.lock);
3491         kvm->arch.vpit->pit_state.pit_timer.reinject = control->pit_reinject;
3492         mutex_unlock(&kvm->arch.vpit->pit_state.lock);
3493         return 0;
3494 }
3495
3496 /*
3497  * Get (and clear) the dirty memory log for a memory slot.
3498  */
3499 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
3500                                       struct kvm_dirty_log *log)
3501 {
3502         int r, i;
3503         struct kvm_memory_slot *memslot;
3504         unsigned long n;
3505         unsigned long is_dirty = 0;
3506
3507         mutex_lock(&kvm->slots_lock);
3508
3509         r = -EINVAL;
3510         if (log->slot >= KVM_MEMORY_SLOTS)
3511                 goto out;
3512
3513         memslot = &kvm->memslots->memslots[log->slot];
3514         r = -ENOENT;
3515         if (!memslot->dirty_bitmap)
3516                 goto out;
3517
3518         n = kvm_dirty_bitmap_bytes(memslot);
3519
3520         for (i = 0; !is_dirty && i < n/sizeof(long); i++)
3521                 is_dirty = memslot->dirty_bitmap[i];
3522
3523         /* If nothing is dirty, don't bother messing with page tables. */
3524         if (is_dirty) {
3525                 struct kvm_memslots *slots, *old_slots;
3526                 unsigned long *dirty_bitmap;
3527
3528                 dirty_bitmap = memslot->dirty_bitmap_head;
3529                 if (memslot->dirty_bitmap == dirty_bitmap)
3530                         dirty_bitmap += n / sizeof(long);
3531                 memset(dirty_bitmap, 0, n);
3532
3533                 r = -ENOMEM;
3534                 slots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
3535                 if (!slots)
3536                         goto out;
3537                 memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots));
3538                 slots->memslots[log->slot].dirty_bitmap = dirty_bitmap;
3539                 slots->generation++;
3540
3541                 old_slots = kvm->memslots;
3542                 rcu_assign_pointer(kvm->memslots, slots);
3543                 synchronize_srcu_expedited(&kvm->srcu);
3544                 dirty_bitmap = old_slots->memslots[log->slot].dirty_bitmap;
3545                 kfree(old_slots);
3546
3547                 spin_lock(&kvm->mmu_lock);
3548                 kvm_mmu_slot_remove_write_access(kvm, log->slot);
3549                 spin_unlock(&kvm->mmu_lock);
3550
3551                 r = -EFAULT;
3552                 if (copy_to_user(log->dirty_bitmap, dirty_bitmap, n))
3553                         goto out;
3554         } else {
3555                 r = -EFAULT;
3556                 if (clear_user(log->dirty_bitmap, n))
3557                         goto out;
3558         }
3559
3560         r = 0;
3561 out:
3562         mutex_unlock(&kvm->slots_lock);
3563         return r;
3564 }
3565
3566 long kvm_arch_vm_ioctl(struct file *filp,
3567                        unsigned int ioctl, unsigned long arg)
3568 {
3569         struct kvm *kvm = filp->private_data;
3570         void __user *argp = (void __user *)arg;
3571         int r = -ENOTTY;
3572         /*
3573          * This union makes it completely explicit to gcc-3.x
3574          * that these two variables' stack usage should be
3575          * combined, not added together.
3576          */
3577         union {
3578                 struct kvm_pit_state ps;
3579                 struct kvm_pit_state2 ps2;
3580                 struct kvm_pit_config pit_config;
3581         } u;
3582
3583         switch (ioctl) {
3584         case KVM_SET_TSS_ADDR:
3585                 r = kvm_vm_ioctl_set_tss_addr(kvm, arg);
3586                 if (r < 0)
3587                         goto out;
3588                 break;
3589         case KVM_SET_IDENTITY_MAP_ADDR: {
3590                 u64 ident_addr;
3591
3592                 r = -EFAULT;
3593                 if (copy_from_user(&ident_addr, argp, sizeof ident_addr))
3594                         goto out;
3595                 r = kvm_vm_ioctl_set_identity_map_addr(kvm, ident_addr);
3596                 if (r < 0)
3597                         goto out;
3598                 break;
3599         }
3600         case KVM_SET_NR_MMU_PAGES:
3601                 r = kvm_vm_ioctl_set_nr_mmu_pages(kvm, arg);
3602                 if (r)
3603                         goto out;
3604                 break;
3605         case KVM_GET_NR_MMU_PAGES:
3606                 r = kvm_vm_ioctl_get_nr_mmu_pages(kvm);
3607                 break;
3608         case KVM_CREATE_IRQCHIP: {
3609                 struct kvm_pic *vpic;
3610
3611                 mutex_lock(&kvm->lock);
3612                 r = -EEXIST;
3613                 if (kvm->arch.vpic)
3614                         goto create_irqchip_unlock;
3615                 r = -EINVAL;
3616                 if (atomic_read(&kvm->online_vcpus))
3617                         goto create_irqchip_unlock;
3618                 r = -ENOMEM;
3619                 vpic = kvm_create_pic(kvm);
3620                 if (vpic) {
3621                         r = kvm_ioapic_init(kvm);
3622                         if (r) {
3623                                 mutex_lock(&kvm->slots_lock);
3624                                 kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS,
3625                                                           &vpic->dev_master);
3626                                 kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS,
3627                                                           &vpic->dev_slave);
3628                                 kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS,
3629                                                           &vpic->dev_eclr);
3630                                 mutex_unlock(&kvm->slots_lock);
3631                                 kfree(vpic);
3632                                 goto create_irqchip_unlock;
3633                         }
3634                 } else
3635                         goto create_irqchip_unlock;
3636                 smp_wmb();
3637                 kvm->arch.vpic = vpic;
3638                 smp_wmb();
3639                 r = kvm_setup_default_irq_routing(kvm);
3640                 if (r) {
3641                         mutex_lock(&kvm->slots_lock);
3642                         mutex_lock(&kvm->irq_lock);
3643                         kvm_ioapic_destroy(kvm);
3644                         kvm_destroy_pic(kvm);
3645                         mutex_unlock(&kvm->irq_lock);
3646                         mutex_unlock(&kvm->slots_lock);
3647                 }
3648         create_irqchip_unlock:
3649                 mutex_unlock(&kvm->lock);
3650                 break;
3651         }
3652         case KVM_CREATE_PIT:
3653                 u.pit_config.flags = KVM_PIT_SPEAKER_DUMMY;
3654                 goto create_pit;
3655         case KVM_CREATE_PIT2:
3656                 r = -EFAULT;
3657                 if (copy_from_user(&u.pit_config, argp,
3658                                    sizeof(struct kvm_pit_config)))
3659                         goto out;
3660         create_pit:
3661                 mutex_lock(&kvm->slots_lock);
3662                 r = -EEXIST;
3663                 if (kvm->arch.vpit)
3664                         goto create_pit_unlock;
3665                 r = -ENOMEM;
3666                 kvm->arch.vpit = kvm_create_pit(kvm, u.pit_config.flags);
3667                 if (kvm->arch.vpit)
3668                         r = 0;
3669         create_pit_unlock:
3670                 mutex_unlock(&kvm->slots_lock);
3671                 break;
3672         case KVM_IRQ_LINE_STATUS:
3673         case KVM_IRQ_LINE: {
3674                 struct kvm_irq_level irq_event;
3675
3676                 r = -EFAULT;
3677                 if (copy_from_user(&irq_event, argp, sizeof irq_event))
3678                         goto out;
3679                 r = -ENXIO;
3680                 if (irqchip_in_kernel(kvm)) {
3681                         __s32 status;
3682                         status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
3683                                         irq_event.irq, irq_event.level);
3684                         if (ioctl == KVM_IRQ_LINE_STATUS) {
3685                                 r = -EFAULT;
3686                                 irq_event.status = status;
3687                                 if (copy_to_user(argp, &irq_event,
3688                                                         sizeof irq_event))
3689                                         goto out;
3690                         }
3691                         r = 0;
3692                 }
3693                 break;
3694         }
3695         case KVM_GET_IRQCHIP: {
3696                 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
3697                 struct kvm_irqchip *chip = kmalloc(sizeof(*chip), GFP_KERNEL);
3698
3699                 r = -ENOMEM;
3700                 if (!chip)
3701                         goto out;
3702                 r = -EFAULT;
3703                 if (copy_from_user(chip, argp, sizeof *chip))
3704                         goto get_irqchip_out;
3705                 r = -ENXIO;
3706                 if (!irqchip_in_kernel(kvm))
3707                         goto get_irqchip_out;
3708                 r = kvm_vm_ioctl_get_irqchip(kvm, chip);
3709                 if (r)
3710                         goto get_irqchip_out;
3711                 r = -EFAULT;
3712                 if (copy_to_user(argp, chip, sizeof *chip))
3713                         goto get_irqchip_out;
3714                 r = 0;
3715         get_irqchip_out:
3716                 kfree(chip);
3717                 if (r)
3718                         goto out;
3719                 break;
3720         }
3721         case KVM_SET_IRQCHIP: {
3722                 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
3723                 struct kvm_irqchip *chip = kmalloc(sizeof(*chip), GFP_KERNEL);
3724
3725                 r = -ENOMEM;
3726                 if (!chip)
3727                         goto out;
3728                 r = -EFAULT;
3729                 if (copy_from_user(chip, argp, sizeof *chip))
3730                         goto set_irqchip_out;
3731                 r = -ENXIO;
3732                 if (!irqchip_in_kernel(kvm))
3733                         goto set_irqchip_out;
3734                 r = kvm_vm_ioctl_set_irqchip(kvm, chip);
3735                 if (r)
3736                         goto set_irqchip_out;
3737                 r = 0;
3738         set_irqchip_out:
3739                 kfree(chip);
3740                 if (r)
3741                         goto out;
3742                 break;
3743         }
3744         case KVM_GET_PIT: {
3745                 r = -EFAULT;
3746                 if (copy_from_user(&u.ps, argp, sizeof(struct kvm_pit_state)))
3747                         goto out;
3748                 r = -ENXIO;
3749                 if (!kvm->arch.vpit)
3750                         goto out;
3751                 r = kvm_vm_ioctl_get_pit(kvm, &u.ps);
3752                 if (r)
3753                         goto out;
3754                 r = -EFAULT;
3755                 if (copy_to_user(argp, &u.ps, sizeof(struct kvm_pit_state)))
3756                         goto out;
3757                 r = 0;
3758                 break;
3759         }
3760         case KVM_SET_PIT: {
3761                 r = -EFAULT;
3762                 if (copy_from_user(&u.ps, argp, sizeof u.ps))
3763                         goto out;
3764                 r = -ENXIO;
3765                 if (!kvm->arch.vpit)
3766                         goto out;
3767                 r = kvm_vm_ioctl_set_pit(kvm, &u.ps);
3768                 if (r)
3769                         goto out;
3770                 r = 0;
3771                 break;
3772         }
3773         case KVM_GET_PIT2: {
3774                 r = -ENXIO;
3775                 if (!kvm->arch.vpit)
3776                         goto out;
3777                 r = kvm_vm_ioctl_get_pit2(kvm, &u.ps2);
3778                 if (r)
3779                         goto out;
3780                 r = -EFAULT;
3781                 if (copy_to_user(argp, &u.ps2, sizeof(u.ps2)))
3782                         goto out;
3783                 r = 0;
3784                 break;
3785         }
3786         case KVM_SET_PIT2: {
3787                 r = -EFAULT;
3788                 if (copy_from_user(&u.ps2, argp, sizeof(u.ps2)))
3789                         goto out;
3790                 r = -ENXIO;
3791                 if (!kvm->arch.vpit)
3792                         goto out;
3793                 r = kvm_vm_ioctl_set_pit2(kvm, &u.ps2);
3794                 if (r)
3795                         goto out;
3796                 r = 0;
3797                 break;
3798         }
3799         case KVM_REINJECT_CONTROL: {
3800                 struct kvm_reinject_control control;
3801                 r =  -EFAULT;
3802                 if (copy_from_user(&control, argp, sizeof(control)))
3803                         goto out;
3804                 r = kvm_vm_ioctl_reinject(kvm, &control);
3805                 if (r)
3806                         goto out;
3807                 r = 0;
3808                 break;
3809         }
3810         case KVM_XEN_HVM_CONFIG: {
3811                 r = -EFAULT;
3812                 if (copy_from_user(&kvm->arch.xen_hvm_config, argp,
3813                                    sizeof(struct kvm_xen_hvm_config)))
3814                         goto out;
3815                 r = -EINVAL;
3816                 if (kvm->arch.xen_hvm_config.flags)
3817                         goto out;
3818                 r = 0;
3819                 break;
3820         }
3821         case KVM_SET_CLOCK: {
3822                 struct kvm_clock_data user_ns;
3823                 u64 now_ns;
3824                 s64 delta;
3825
3826                 r = -EFAULT;
3827                 if (copy_from_user(&user_ns, argp, sizeof(user_ns)))
3828                         goto out;
3829
3830                 r = -EINVAL;
3831                 if (user_ns.flags)
3832                         goto out;
3833
3834                 r = 0;
3835                 local_irq_disable();
3836                 now_ns = get_kernel_ns();
3837                 delta = user_ns.clock - now_ns;
3838                 local_irq_enable();
3839                 kvm->arch.kvmclock_offset = delta;
3840                 break;
3841         }
3842         case KVM_GET_CLOCK: {
3843                 struct kvm_clock_data user_ns;
3844                 u64 now_ns;
3845
3846                 local_irq_disable();
3847                 now_ns = get_kernel_ns();
3848                 user_ns.clock = kvm->arch.kvmclock_offset + now_ns;
3849                 local_irq_enable();
3850                 user_ns.flags = 0;
3851                 memset(&user_ns.pad, 0, sizeof(user_ns.pad));
3852
3853                 r = -EFAULT;
3854                 if (copy_to_user(argp, &user_ns, sizeof(user_ns)))
3855                         goto out;
3856                 r = 0;
3857                 break;
3858         }
3859
3860         default:
3861                 ;
3862         }
3863 out:
3864         return r;
3865 }
3866
3867 static void kvm_init_msr_list(void)
3868 {
3869         u32 dummy[2];
3870         unsigned i, j;
3871
3872         /* skip the first msrs in the list. KVM-specific */
3873         for (i = j = KVM_SAVE_MSRS_BEGIN; i < ARRAY_SIZE(msrs_to_save); i++) {
3874                 if (rdmsr_safe(msrs_to_save[i], &dummy[0], &dummy[1]) < 0)
3875                         continue;
3876
3877                 /*
3878                  * Even MSRs that are valid in the host may not be exposed
3879                  * to the guests in some cases.
3880                  */
3881                 switch (msrs_to_save[i]) {
3882                 case MSR_TSC_AUX:
3883                         if (!kvm_x86_ops->rdtscp_supported())
3884                                 continue;
3885                         break;
3886                 default:
3887                         break;
3888                 }
3889
3890                 if (j < i)
3891                         msrs_to_save[j] = msrs_to_save[i];
3892                 j++;
3893         }
3894         num_msrs_to_save = j;
3895 }
3896
3897 static int vcpu_mmio_write(struct kvm_vcpu *vcpu, gpa_t addr, int len,
3898                            const void *v)
3899 {
3900         int handled = 0;
3901         int n;
3902
3903         do {
3904                 n = min(len, 8);
3905                 if (!(vcpu->arch.apic &&
3906                       !kvm_iodevice_write(&vcpu->arch.apic->dev, addr, n, v))
3907                     && kvm_io_bus_write(vcpu->kvm, KVM_MMIO_BUS, addr, n, v))
3908                         break;
3909                 handled += n;
3910                 addr += n;
3911                 len -= n;
3912                 v += n;
3913         } while (len);
3914
3915         return handled;
3916 }
3917
3918 static int vcpu_mmio_read(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *v)
3919 {
3920         int handled = 0;
3921         int n;
3922
3923         do {
3924                 n = min(len, 8);
3925                 if (!(vcpu->arch.apic &&
3926                       !kvm_iodevice_read(&vcpu->arch.apic->dev, addr, n, v))
3927                     && kvm_io_bus_read(vcpu->kvm, KVM_MMIO_BUS, addr, n, v))
3928                         break;
3929                 trace_kvm_mmio(KVM_TRACE_MMIO_READ, n, addr, *(u64 *)v);
3930                 handled += n;
3931                 addr += n;
3932                 len -= n;
3933                 v += n;
3934         } while (len);
3935
3936         return handled;
3937 }
3938
3939 static void kvm_set_segment(struct kvm_vcpu *vcpu,
3940                         struct kvm_segment *var, int seg)
3941 {
3942         kvm_x86_ops->set_segment(vcpu, var, seg);
3943 }
3944
3945 void kvm_get_segment(struct kvm_vcpu *vcpu,
3946                      struct kvm_segment *var, int seg)
3947 {
3948         kvm_x86_ops->get_segment(vcpu, var, seg);
3949 }
3950
3951 static gpa_t translate_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access)
3952 {
3953         return gpa;
3954 }
3955
3956 static gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access)
3957 {
3958         gpa_t t_gpa;
3959         struct x86_exception exception;
3960
3961         BUG_ON(!mmu_is_nested(vcpu));
3962
3963         /* NPT walks are always user-walks */
3964         access |= PFERR_USER_MASK;
3965         t_gpa  = vcpu->arch.mmu.gva_to_gpa(vcpu, gpa, access, &exception);
3966
3967         return t_gpa;
3968 }
3969
3970 gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva,
3971                               struct x86_exception *exception)
3972 {
3973         u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
3974         return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception);
3975 }
3976
3977  gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva,
3978                                 struct x86_exception *exception)
3979 {
3980         u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
3981         access |= PFERR_FETCH_MASK;
3982         return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception);
3983 }
3984
3985 gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva,
3986                                struct x86_exception *exception)
3987 {
3988         u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
3989         access |= PFERR_WRITE_MASK;
3990         return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception);
3991 }
3992
3993 /* uses this to access any guest's mapped memory without checking CPL */
3994 gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva,
3995                                 struct x86_exception *exception)
3996 {
3997         return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, 0, exception);
3998 }
3999
4000 static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes,
4001                                       struct kvm_vcpu *vcpu, u32 access,
4002                                       struct x86_exception *exception)
4003 {
4004         void *data = val;
4005         int r = X86EMUL_CONTINUE;
4006
4007         while (bytes) {
4008                 gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, access,
4009                                                             exception);
4010                 unsigned offset = addr & (PAGE_SIZE-1);
4011                 unsigned toread = min(bytes, (unsigned)PAGE_SIZE - offset);
4012                 int ret;
4013
4014                 if (gpa == UNMAPPED_GVA)
4015                         return X86EMUL_PROPAGATE_FAULT;
4016                 ret = kvm_read_guest(vcpu->kvm, gpa, data, toread);
4017                 if (ret < 0) {
4018                         r = X86EMUL_IO_NEEDED;
4019                         goto out;
4020                 }
4021
4022                 bytes -= toread;
4023                 data += toread;
4024                 addr += toread;
4025         }
4026 out:
4027         return r;
4028 }
4029
4030 /* used for instruction fetching */
4031 static int kvm_fetch_guest_virt(struct x86_emulate_ctxt *ctxt,
4032                                 gva_t addr, void *val, unsigned int bytes,
4033                                 struct x86_exception *exception)
4034 {
4035         struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
4036         u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
4037
4038         return kvm_read_guest_virt_helper(addr, val, bytes, vcpu,
4039                                           access | PFERR_FETCH_MASK,
4040                                           exception);
4041 }
4042
4043 int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt,
4044                                gva_t addr, void *val, unsigned int bytes,
4045                                struct x86_exception *exception)
4046 {
4047         struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
4048         u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
4049
4050         return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access,
4051                                           exception);
4052 }
4053 EXPORT_SYMBOL_GPL(kvm_read_guest_virt);
4054
4055 static int kvm_read_guest_virt_system(struct x86_emulate_ctxt *ctxt,
4056                                       gva_t addr, void *val, unsigned int bytes,
4057                                       struct x86_exception *exception)
4058 {
4059         struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
4060         return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, 0, exception);
4061 }
4062
4063 int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt,
4064                                        gva_t addr, void *val,
4065                                        unsigned int bytes,
4066                                        struct x86_exception *exception)
4067 {
4068         struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
4069         void *data = val;
4070         int r = X86EMUL_CONTINUE;
4071
4072         while (bytes) {
4073                 gpa_t gpa =  vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr,
4074                                                              PFERR_WRITE_MASK,
4075                                                              exception);
4076                 unsigned offset = addr & (PAGE_SIZE-1);
4077                 unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset);
4078                 int ret;
4079
4080                 if (gpa == UNMAPPED_GVA)
4081                         return X86EMUL_PROPAGATE_FAULT;
4082                 ret = kvm_write_guest(vcpu->kvm, gpa, data, towrite);
4083                 if (ret < 0) {
4084                         r = X86EMUL_IO_NEEDED;
4085                         goto out;
4086                 }
4087
4088                 bytes -= towrite;
4089                 data += towrite;
4090                 addr += towrite;
4091         }
4092 out:
4093         return r;
4094 }
4095 EXPORT_SYMBOL_GPL(kvm_write_guest_virt_system);
4096
4097 static int vcpu_mmio_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva,
4098                                 gpa_t *gpa, struct x86_exception *exception,
4099                                 bool write)
4100 {
4101         u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
4102
4103         if (vcpu_match_mmio_gva(vcpu, gva) &&
4104                   check_write_user_access(vcpu, write, access,
4105                   vcpu->arch.access)) {
4106                 *gpa = vcpu->arch.mmio_gfn << PAGE_SHIFT |
4107                                         (gva & (PAGE_SIZE - 1));
4108                 trace_vcpu_match_mmio(gva, *gpa, write, false);
4109                 return 1;
4110         }
4111
4112         if (write)
4113                 access |= PFERR_WRITE_MASK;
4114
4115         *gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception);
4116
4117         if (*gpa == UNMAPPED_GVA)
4118                 return -1;
4119
4120         /* For APIC access vmexit */
4121         if ((*gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
4122                 return 1;
4123
4124         if (vcpu_match_mmio_gpa(vcpu, *gpa)) {
4125                 trace_vcpu_match_mmio(gva, *gpa, write, true);
4126                 return 1;
4127         }
4128
4129         return 0;
4130 }
4131
4132 int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
4133                         const void *val, int bytes)
4134 {
4135         int ret;
4136
4137         ret = kvm_write_guest(vcpu->kvm, gpa, val, bytes);
4138         if (ret < 0)
4139                 return 0;
4140         kvm_mmu_pte_write(vcpu, gpa, val, bytes, 1);
4141         return 1;
4142 }
4143
4144 struct read_write_emulator_ops {
4145         int (*read_write_prepare)(struct kvm_vcpu *vcpu, void *val,
4146                                   int bytes);
4147         int (*read_write_emulate)(struct kvm_vcpu *vcpu, gpa_t gpa,
4148                                   void *val, int bytes);
4149         int (*read_write_mmio)(struct kvm_vcpu *vcpu, gpa_t gpa,
4150                                int bytes, void *val);
4151         int (*read_write_exit_mmio)(struct kvm_vcpu *vcpu, gpa_t gpa,
4152                                     void *val, int bytes);
4153         bool write;
4154 };
4155
4156 static int read_prepare(struct kvm_vcpu *vcpu, void *val, int bytes)
4157 {
4158         if (vcpu->mmio_read_completed) {
4159                 memcpy(val, vcpu->mmio_data, bytes);
4160                 trace_kvm_mmio(KVM_TRACE_MMIO_READ, bytes,
4161                                vcpu->mmio_phys_addr, *(u64 *)val);
4162                 vcpu->mmio_read_completed = 0;
4163                 return 1;
4164         }
4165
4166         return 0;
4167 }
4168
4169 static int read_emulate(struct kvm_vcpu *vcpu, gpa_t gpa,
4170                         void *val, int bytes)
4171 {
4172         return !kvm_read_guest(vcpu->kvm, gpa, val, bytes);
4173 }
4174
4175 static int write_emulate(struct kvm_vcpu *vcpu, gpa_t gpa,
4176                          void *val, int bytes)
4177 {
4178         return emulator_write_phys(vcpu, gpa, val, bytes);
4179 }
4180
4181 static int write_mmio(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes, void *val)
4182 {
4183         trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, bytes, gpa, *(u64 *)val);
4184         return vcpu_mmio_write(vcpu, gpa, bytes, val);
4185 }
4186
4187 static int read_exit_mmio(struct kvm_vcpu *vcpu, gpa_t gpa,
4188                           void *val, int bytes)
4189 {
4190         trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, bytes, gpa, 0);
4191         return X86EMUL_IO_NEEDED;
4192 }
4193
4194 static int write_exit_mmio(struct kvm_vcpu *vcpu, gpa_t gpa,
4195                            void *val, int bytes)
4196 {
4197         memcpy(vcpu->mmio_data, val, bytes);
4198         memcpy(vcpu->run->mmio.data, vcpu->mmio_data, 8);
4199         return X86EMUL_CONTINUE;
4200 }
4201
4202 static struct read_write_emulator_ops read_emultor = {
4203         .read_write_prepare = read_prepare,
4204         .read_write_emulate = read_emulate,
4205         .read_write_mmio = vcpu_mmio_read,
4206         .read_write_exit_mmio = read_exit_mmio,
4207 };
4208
4209 static struct read_write_emulator_ops write_emultor = {
4210         .read_write_emulate = write_emulate,
4211         .read_write_mmio = write_mmio,
4212         .read_write_exit_mmio = write_exit_mmio,
4213         .write = true,
4214 };
4215
4216 static int emulator_read_write_onepage(unsigned long addr, void *val,
4217                                        unsigned int bytes,
4218                                        struct x86_exception *exception,
4219                                        struct kvm_vcpu *vcpu,
4220                                        struct read_write_emulator_ops *ops)
4221 {
4222         gpa_t gpa;
4223         int handled, ret;
4224         bool write = ops->write;
4225
4226         if (ops->read_write_prepare &&
4227                   ops->read_write_prepare(vcpu, val, bytes))
4228                 return X86EMUL_CONTINUE;
4229
4230         ret = vcpu_mmio_gva_to_gpa(vcpu, addr, &gpa, exception, write);
4231
4232         if (ret < 0)
4233                 return X86EMUL_PROPAGATE_FAULT;
4234
4235         /* For APIC access vmexit */
4236         if (ret)
4237                 goto mmio;
4238
4239         if (ops->read_write_emulate(vcpu, gpa, val, bytes))
4240                 return X86EMUL_CONTINUE;
4241
4242 mmio:
4243         /*
4244          * Is this MMIO handled locally?
4245          */
4246         handled = ops->read_write_mmio(vcpu, gpa, bytes, val);
4247         if (handled == bytes)
4248                 return X86EMUL_CONTINUE;
4249
4250         gpa += handled;
4251         bytes -= handled;
4252         val += handled;
4253
4254         vcpu->mmio_needed = 1;
4255         vcpu->run->exit_reason = KVM_EXIT_MMIO;
4256         vcpu->run->mmio.phys_addr = vcpu->mmio_phys_addr = gpa;
4257         vcpu->mmio_size = bytes;
4258         vcpu->run->mmio.len = min(vcpu->mmio_size, 8);
4259         vcpu->run->mmio.is_write = vcpu->mmio_is_write = write;
4260         vcpu->mmio_index = 0;
4261
4262         return ops->read_write_exit_mmio(vcpu, gpa, val, bytes);
4263 }
4264
4265 int emulator_read_write(struct x86_emulate_ctxt *ctxt, unsigned long addr,
4266                         void *val, unsigned int bytes,
4267                         struct x86_exception *exception,
4268                         struct read_write_emulator_ops *ops)
4269 {
4270         struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
4271
4272         /* Crossing a page boundary? */
4273         if (((addr + bytes - 1) ^ addr) & PAGE_MASK) {
4274                 int rc, now;
4275
4276                 now = -addr & ~PAGE_MASK;
4277                 rc = emulator_read_write_onepage(addr, val, now, exception,
4278                                                  vcpu, ops);
4279
4280                 if (rc != X86EMUL_CONTINUE)
4281                         return rc;
4282                 addr += now;
4283                 val += now;
4284                 bytes -= now;
4285         }
4286
4287         return emulator_read_write_onepage(addr, val, bytes, exception,
4288                                            vcpu, ops);
4289 }
4290
4291 static int emulator_read_emulated(struct x86_emulate_ctxt *ctxt,
4292                                   unsigned long addr,
4293                                   void *val,
4294                                   unsigned int bytes,
4295                                   struct x86_exception *exception)
4296 {
4297         return emulator_read_write(ctxt, addr, val, bytes,
4298                                    exception, &read_emultor);
4299 }
4300
4301 int emulator_write_emulated(struct x86_emulate_ctxt *ctxt,
4302                             unsigned long addr,
4303                             const void *val,
4304                             unsigned int bytes,
4305                             struct x86_exception *exception)
4306 {
4307         return emulator_read_write(ctxt, addr, (void *)val, bytes,
4308                                    exception, &write_emultor);
4309 }
4310
4311 #define CMPXCHG_TYPE(t, ptr, old, new) \
4312         (cmpxchg((t *)(ptr), *(t *)(old), *(t *)(new)) == *(t *)(old))
4313
4314 #ifdef CONFIG_X86_64
4315 #  define CMPXCHG64(ptr, old, new) CMPXCHG_TYPE(u64, ptr, old, new)
4316 #else
4317 #  define CMPXCHG64(ptr, old, new) \
4318         (cmpxchg64((u64 *)(ptr), *(u64 *)(old), *(u64 *)(new)) == *(u64 *)(old))
4319 #endif
4320
4321 static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt,
4322                                      unsigned long addr,
4323                                      const void *old,
4324                                      const void *new,
4325                                      unsigned int bytes,
4326                                      struct x86_exception *exception)
4327 {
4328         struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
4329         gpa_t gpa;
4330         struct page *page;
4331         char *kaddr;
4332         bool exchanged;
4333
4334         /* guests cmpxchg8b have to be emulated atomically */
4335         if (bytes > 8 || (bytes & (bytes - 1)))
4336                 goto emul_write;
4337
4338         gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, NULL);
4339
4340         if (gpa == UNMAPPED_GVA ||
4341             (gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
4342                 goto emul_write;
4343
4344         if (((gpa + bytes - 1) & PAGE_MASK) != (gpa & PAGE_MASK))
4345                 goto emul_write;
4346
4347         page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
4348         if (is_error_page(page)) {
4349                 kvm_release_page_clean(page);
4350                 goto emul_write;
4351         }
4352
4353         kaddr = kmap_atomic(page, KM_USER0);
4354         kaddr += offset_in_page(gpa);
4355         switch (bytes) {
4356         case 1:
4357                 exchanged = CMPXCHG_TYPE(u8, kaddr, old, new);
4358                 break;
4359         case 2:
4360                 exchanged = CMPXCHG_TYPE(u16, kaddr, old, new);
4361                 break;
4362         case 4:
4363                 exchanged = CMPXCHG_TYPE(u32, kaddr, old, new);
4364                 break;
4365         case 8:
4366                 exchanged = CMPXCHG64(kaddr, old, new);
4367                 break;
4368         default:
4369                 BUG();
4370         }
4371         kunmap_atomic(kaddr, KM_USER0);
4372         kvm_release_page_dirty(page);
4373
4374         if (!exchanged)
4375                 return X86EMUL_CMPXCHG_FAILED;
4376
4377         kvm_mmu_pte_write(vcpu, gpa, new, bytes, 1);
4378
4379         return X86EMUL_CONTINUE;
4380
4381 emul_write:
4382         printk_once(KERN_WARNING "kvm: emulating exchange as write\n");
4383
4384         return emulator_write_emulated(ctxt, addr, new, bytes, exception);
4385 }
4386
4387 static int kernel_pio(struct kvm_vcpu *vcpu, void *pd)
4388 {
4389         /* TODO: String I/O for in kernel device */
4390         int r;
4391
4392         if (vcpu->arch.pio.in)
4393                 r = kvm_io_bus_read(vcpu->kvm, KVM_PIO_BUS, vcpu->arch.pio.port,
4394                                     vcpu->arch.pio.size, pd);
4395         else
4396                 r = kvm_io_bus_write(vcpu->kvm, KVM_PIO_BUS,
4397                                      vcpu->arch.pio.port, vcpu->arch.pio.size,
4398                                      pd);
4399         return r;
4400 }
4401
4402
4403 static int emulator_pio_in_emulated(struct x86_emulate_ctxt *ctxt,
4404                                     int size, unsigned short port, void *val,
4405                                     unsigned int count)
4406 {
4407         struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
4408
4409         if (vcpu->arch.pio.count)
4410                 goto data_avail;
4411
4412         trace_kvm_pio(0, port, size, count);
4413
4414         vcpu->arch.pio.port = port;
4415         vcpu->arch.pio.in = 1;
4416         vcpu->arch.pio.count  = count;
4417         vcpu->arch.pio.size = size;
4418
4419         if (!kernel_pio(vcpu, vcpu->arch.pio_data)) {
4420         data_avail:
4421                 memcpy(val, vcpu->arch.pio_data, size * count);
4422                 vcpu->arch.pio.count = 0;
4423                 return 1;
4424         }
4425
4426         vcpu->run->exit_reason = KVM_EXIT_IO;
4427         vcpu->run->io.direction = KVM_EXIT_IO_IN;
4428         vcpu->run->io.size = size;
4429         vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
4430         vcpu->run->io.count = count;
4431         vcpu->run->io.port = port;
4432
4433         return 0;
4434 }
4435
4436 static int emulator_pio_out_emulated(struct x86_emulate_ctxt *ctxt,
4437                                      int size, unsigned short port,
4438                                      const void *val, unsigned int count)
4439 {
4440         struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
4441
4442         trace_kvm_pio(1, port, size, count);
4443
4444         vcpu->arch.pio.port = port;
4445         vcpu->arch.pio.in = 0;
4446         vcpu->arch.pio.count = count;
4447         vcpu->arch.pio.size = size;
4448
4449         memcpy(vcpu->arch.pio_data, val, size * count);
4450
4451         if (!kernel_pio(vcpu, vcpu->arch.pio_data)) {
4452                 vcpu->arch.pio.count = 0;
4453                 return 1;
4454         }
4455
4456         vcpu->run->exit_reason = KVM_EXIT_IO;
4457         vcpu->run->io.direction = KVM_EXIT_IO_OUT;
4458         vcpu->run->io.size = size;
4459         vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
4460         vcpu->run->io.count = count;
4461         vcpu->run->io.port = port;
4462
4463         return 0;
4464 }
4465
4466 static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg)
4467 {
4468         return kvm_x86_ops->get_segment_base(vcpu, seg);
4469 }
4470
4471 static void emulator_invlpg(struct x86_emulate_ctxt *ctxt, ulong address)
4472 {
4473         kvm_mmu_invlpg(emul_to_vcpu(ctxt), address);
4474 }
4475
4476 int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu)
4477 {
4478         if (!need_emulate_wbinvd(vcpu))
4479                 return X86EMUL_CONTINUE;
4480
4481         if (kvm_x86_ops->has_wbinvd_exit()) {
4482                 int cpu = get_cpu();
4483
4484                 cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask);
4485                 smp_call_function_many(vcpu->arch.wbinvd_dirty_mask,
4486                                 wbinvd_ipi, NULL, 1);
4487                 put_cpu();
4488                 cpumask_clear(vcpu->arch.wbinvd_dirty_mask);
4489         } else
4490                 wbinvd();
4491         return X86EMUL_CONTINUE;
4492 }
4493 EXPORT_SYMBOL_GPL(kvm_emulate_wbinvd);
4494
4495 static void emulator_wbinvd(struct x86_emulate_ctxt *ctxt)
4496 {
4497         kvm_emulate_wbinvd(emul_to_vcpu(ctxt));
4498 }
4499
4500 int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long *dest)
4501 {
4502         return _kvm_get_dr(emul_to_vcpu(ctxt), dr, dest);
4503 }
4504
4505 int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long value)
4506 {
4507
4508         return __kvm_set_dr(emul_to_vcpu(ctxt), dr, value);
4509 }
4510
4511 static u64 mk_cr_64(u64 curr_cr, u32 new_val)
4512 {
4513         return (curr_cr & ~((1ULL << 32) - 1)) | new_val;
4514 }
4515
4516 static unsigned long emulator_get_cr(struct x86_emulate_ctxt *ctxt, int cr)
4517 {
4518         struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
4519         unsigned long value;
4520
4521         switch (cr) {
4522         case 0:
4523                 value = kvm_read_cr0(vcpu);
4524                 break;
4525         case 2:
4526                 value = vcpu->arch.cr2;
4527                 break;
4528         case 3:
4529                 value = kvm_read_cr3(vcpu);
4530                 break;
4531         case 4:
4532                 value = kvm_read_cr4(vcpu);
4533                 break;
4534         case 8:
4535                 value = kvm_get_cr8(vcpu);
4536                 break;
4537         default:
4538                 vcpu_printf(vcpu, "%s: unexpected cr %u\n", __func__, cr);
4539                 return 0;
4540         }
4541
4542         return value;
4543 }
4544
4545 static int emulator_set_cr(struct x86_emulate_ctxt *ctxt, int cr, ulong val)
4546 {
4547         struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
4548         int res = 0;
4549
4550         switch (cr) {
4551         case 0:
4552                 res = kvm_set_cr0(vcpu, mk_cr_64(kvm_read_cr0(vcpu), val));
4553                 break;
4554         case 2:
4555                 vcpu->arch.cr2 = val;
4556                 break;
4557         case 3:
4558                 res = kvm_set_cr3(vcpu, val);
4559                 break;
4560         case 4:
4561                 res = kvm_set_cr4(vcpu, mk_cr_64(kvm_read_cr4(vcpu), val));
4562                 break;
4563         case 8:
4564                 res = kvm_set_cr8(vcpu, val);
4565                 break;
4566         default:
4567                 vcpu_printf(vcpu, "%s: unexpected cr %u\n", __func__, cr);
4568                 res = -1;
4569         }
4570
4571         return res;
4572 }
4573
4574 static int emulator_get_cpl(struct x86_emulate_ctxt *ctxt)
4575 {
4576         return kvm_x86_ops->get_cpl(emul_to_vcpu(ctxt));
4577 }
4578
4579 static void emulator_get_gdt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt)
4580 {
4581         kvm_x86_ops->get_gdt(emul_to_vcpu(ctxt), dt);
4582 }
4583
4584 static void emulator_get_idt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt)
4585 {
4586         kvm_x86_ops->get_idt(emul_to_vcpu(ctxt), dt);
4587 }
4588
4589 static void emulator_set_gdt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt)
4590 {
4591         kvm_x86_ops->set_gdt(emul_to_vcpu(ctxt), dt);
4592 }
4593
4594 static void emulator_set_idt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt)
4595 {
4596         kvm_x86_ops->set_idt(emul_to_vcpu(ctxt), dt);
4597 }
4598
4599 static unsigned long emulator_get_cached_segment_base(
4600         struct x86_emulate_ctxt *ctxt, int seg)
4601 {
4602         return get_segment_base(emul_to_vcpu(ctxt), seg);
4603 }
4604
4605 static bool emulator_get_segment(struct x86_emulate_ctxt *ctxt, u16 *selector,
4606                                  struct desc_struct *desc, u32 *base3,
4607                                  int seg)
4608 {
4609         struct kvm_segment var;
4610
4611         kvm_get_segment(emul_to_vcpu(ctxt), &var, seg);
4612         *selector = var.selector;
4613
4614         if (var.unusable)
4615                 return false;
4616
4617         if (var.g)
4618                 var.limit >>= 12;
4619         set_desc_limit(desc, var.limit);
4620         set_desc_base(desc, (unsigned long)var.base);
4621 #ifdef CONFIG_X86_64
4622         if (base3)
4623                 *base3 = var.base >> 32;
4624 #endif
4625         desc->type = var.type;
4626         desc->s = var.s;
4627         desc->dpl = var.dpl;
4628         desc->p = var.present;
4629         desc->avl = var.avl;
4630         desc->l = var.l;
4631         desc->d = var.db;
4632         desc->g = var.g;
4633
4634         return true;
4635 }
4636
4637 static void emulator_set_segment(struct x86_emulate_ctxt *ctxt, u16 selector,
4638                                  struct desc_struct *desc, u32 base3,
4639                                  int seg)
4640 {
4641         struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
4642         struct kvm_segment var;
4643
4644         var.selector = selector;
4645         var.base = get_desc_base(desc);
4646 #ifdef CONFIG_X86_64
4647         var.base |= ((u64)base3) << 32;
4648 #endif
4649         var.limit = get_desc_limit(desc);
4650         if (desc->g)
4651                 var.limit = (var.limit << 12) | 0xfff;
4652         var.type = desc->type;
4653         var.present = desc->p;
4654         var.dpl = desc->dpl;
4655         var.db = desc->d;
4656         var.s = desc->s;
4657         var.l = desc->l;
4658         var.g = desc->g;
4659         var.avl = desc->avl;
4660         var.present = desc->p;
4661         var.unusable = !var.present;
4662         var.padding = 0;
4663
4664         kvm_set_segment(vcpu, &var, seg);
4665         return;
4666 }
4667
4668 static int emulator_get_msr(struct x86_emulate_ctxt *ctxt,
4669                             u32 msr_index, u64 *pdata)
4670 {
4671         return kvm_get_msr(emul_to_vcpu(ctxt), msr_index, pdata);
4672 }
4673
4674 static int emulator_set_msr(struct x86_emulate_ctxt *ctxt,
4675                             u32 msr_index, u64 data)
4676 {
4677         return kvm_set_msr(emul_to_vcpu(ctxt), msr_index, data);
4678 }
4679
4680 static void emulator_halt(struct x86_emulate_ctxt *ctxt)
4681 {
4682         emul_to_vcpu(ctxt)->arch.halt_request = 1;
4683 }
4684
4685 static void emulator_get_fpu(struct x86_emulate_ctxt *ctxt)
4686 {
4687         preempt_disable();
4688         kvm_load_guest_fpu(emul_to_vcpu(ctxt));
4689         /*
4690          * CR0.TS may reference the host fpu state, not the guest fpu state,
4691          * so it may be clear at this point.
4692          */
4693         clts();
4694 }
4695
4696 static void emulator_put_fpu(struct x86_emulate_ctxt *ctxt)
4697 {
4698         preempt_enable();
4699 }
4700
4701 static int emulator_intercept(struct x86_emulate_ctxt *ctxt,
4702                               struct x86_instruction_info *info,
4703                               enum x86_intercept_stage stage)
4704 {
4705         return kvm_x86_ops->check_intercept(emul_to_vcpu(ctxt), info, stage);
4706 }
4707
4708 static bool emulator_get_cpuid(struct x86_emulate_ctxt *ctxt,
4709                                u32 *eax, u32 *ebx, u32 *ecx, u32 *edx)
4710 {
4711         struct kvm_cpuid_entry2 *cpuid = NULL;
4712
4713         if (eax && ecx)
4714                 cpuid = kvm_find_cpuid_entry(emul_to_vcpu(ctxt),
4715                                             *eax, *ecx);
4716
4717         if (cpuid) {
4718                 *eax = cpuid->eax;
4719                 *ecx = cpuid->ecx;
4720                 if (ebx)
4721                         *ebx = cpuid->ebx;
4722                 if (edx)
4723                         *edx = cpuid->edx;
4724                 return true;
4725         }
4726
4727         return false;
4728 }
4729
4730 static struct x86_emulate_ops emulate_ops = {
4731         .read_std            = kvm_read_guest_virt_system,
4732         .write_std           = kvm_write_guest_virt_system,
4733         .fetch               = kvm_fetch_guest_virt,
4734         .read_emulated       = emulator_read_emulated,
4735         .write_emulated      = emulator_write_emulated,
4736         .cmpxchg_emulated    = emulator_cmpxchg_emulated,
4737         .invlpg              = emulator_invlpg,
4738         .pio_in_emulated     = emulator_pio_in_emulated,
4739         .pio_out_emulated    = emulator_pio_out_emulated,
4740         .get_segment         = emulator_get_segment,
4741         .set_segment         = emulator_set_segment,
4742         .get_cached_segment_base = emulator_get_cached_segment_base,
4743         .get_gdt             = emulator_get_gdt,
4744         .get_idt             = emulator_get_idt,
4745         .set_gdt             = emulator_set_gdt,
4746         .set_idt             = emulator_set_idt,
4747         .get_cr              = emulator_get_cr,
4748         .set_cr              = emulator_set_cr,
4749         .cpl                 = emulator_get_cpl,
4750         .get_dr              = emulator_get_dr,
4751         .set_dr              = emulator_set_dr,
4752         .set_msr             = emulator_set_msr,
4753         .get_msr             = emulator_get_msr,
4754         .halt                = emulator_halt,
4755         .wbinvd              = emulator_wbinvd,
4756         .fix_hypercall       = emulator_fix_hypercall,
4757         .get_fpu             = emulator_get_fpu,
4758         .put_fpu             = emulator_put_fpu,
4759         .intercept           = emulator_intercept,
4760         .get_cpuid           = emulator_get_cpuid,
4761 };
4762
4763 static void cache_all_regs(struct kvm_vcpu *vcpu)
4764 {
4765         kvm_register_read(vcpu, VCPU_REGS_RAX);
4766         kvm_register_read(vcpu, VCPU_REGS_RSP);
4767         kvm_register_read(vcpu, VCPU_REGS_RIP);
4768         vcpu->arch.regs_dirty = ~0;
4769 }
4770
4771 static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask)
4772 {
4773         u32 int_shadow = kvm_x86_ops->get_interrupt_shadow(vcpu, mask);
4774         /*
4775          * an sti; sti; sequence only disable interrupts for the first
4776          * instruction. So, if the last instruction, be it emulated or
4777          * not, left the system with the INT_STI flag enabled, it
4778          * means that the last instruction is an sti. We should not
4779          * leave the flag on in this case. The same goes for mov ss
4780          */
4781         if (!(int_shadow & mask))
4782                 kvm_x86_ops->set_interrupt_shadow(vcpu, mask);
4783 }
4784
4785 static void inject_emulated_exception(struct kvm_vcpu *vcpu)
4786 {
4787         struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
4788         if (ctxt->exception.vector == PF_VECTOR)
4789                 kvm_propagate_fault(vcpu, &ctxt->exception);
4790         else if (ctxt->exception.error_code_valid)
4791                 kvm_queue_exception_e(vcpu, ctxt->exception.vector,
4792                                       ctxt->exception.error_code);
4793         else
4794                 kvm_queue_exception(vcpu, ctxt->exception.vector);
4795 }
4796
4797 static void init_decode_cache(struct x86_emulate_ctxt *ctxt,
4798                               const unsigned long *regs)
4799 {
4800         memset(&ctxt->twobyte, 0,
4801                (void *)&ctxt->regs - (void *)&ctxt->twobyte);
4802         memcpy(ctxt->regs, regs, sizeof(ctxt->regs));
4803
4804         ctxt->fetch.start = 0;
4805         ctxt->fetch.end = 0;
4806         ctxt->io_read.pos = 0;
4807         ctxt->io_read.end = 0;
4808         ctxt->mem_read.pos = 0;
4809         ctxt->mem_read.end = 0;
4810 }
4811
4812 static void init_emulate_ctxt(struct kvm_vcpu *vcpu)
4813 {
4814         struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
4815         int cs_db, cs_l;
4816
4817         /*
4818          * TODO: fix emulate.c to use guest_read/write_register
4819          * instead of direct ->regs accesses, can save hundred cycles
4820          * on Intel for instructions that don't read/change RSP, for
4821          * for example.
4822          */
4823         cache_all_regs(vcpu);
4824
4825         kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
4826
4827         ctxt->eflags = kvm_get_rflags(vcpu);
4828         ctxt->eip = kvm_rip_read(vcpu);
4829         ctxt->mode = (!is_protmode(vcpu))               ? X86EMUL_MODE_REAL :
4830                      (ctxt->eflags & X86_EFLAGS_VM)     ? X86EMUL_MODE_VM86 :
4831                      cs_l                               ? X86EMUL_MODE_PROT64 :
4832                      cs_db                              ? X86EMUL_MODE_PROT32 :
4833                                                           X86EMUL_MODE_PROT16;
4834         ctxt->guest_mode = is_guest_mode(vcpu);
4835
4836         init_decode_cache(ctxt, vcpu->arch.regs);
4837         vcpu->arch.emulate_regs_need_sync_from_vcpu = false;
4838 }
4839
4840 int kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip)
4841 {
4842         struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
4843         int ret;
4844
4845         init_emulate_ctxt(vcpu);
4846
4847         ctxt->op_bytes = 2;
4848         ctxt->ad_bytes = 2;
4849         ctxt->_eip = ctxt->eip + inc_eip;
4850         ret = emulate_int_real(ctxt, irq);
4851
4852         if (ret != X86EMUL_CONTINUE)
4853                 return EMULATE_FAIL;
4854
4855         ctxt->eip = ctxt->_eip;
4856         memcpy(vcpu->arch.regs, ctxt->regs, sizeof ctxt->regs);
4857         kvm_rip_write(vcpu, ctxt->eip);
4858         kvm_set_rflags(vcpu, ctxt->eflags);
4859
4860         if (irq == NMI_VECTOR)
4861                 vcpu->arch.nmi_pending = 0;
4862         else
4863                 vcpu->arch.interrupt.pending = false;
4864
4865         return EMULATE_DONE;
4866 }
4867 EXPORT_SYMBOL_GPL(kvm_inject_realmode_interrupt);
4868
4869 static int handle_emulation_failure(struct kvm_vcpu *vcpu)
4870 {
4871         int r = EMULATE_DONE;
4872
4873         ++vcpu->stat.insn_emulation_fail;
4874         trace_kvm_emulate_insn_failed(vcpu);
4875         if (!is_guest_mode(vcpu) && kvm_x86_ops->get_cpl(vcpu) == 0) {
4876                 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
4877                 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
4878                 vcpu->run->internal.ndata = 0;
4879                 r = EMULATE_FAIL;
4880         }
4881         kvm_queue_exception(vcpu, UD_VECTOR);
4882
4883         return r;
4884 }
4885
4886 static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t gva)
4887 {
4888         gpa_t gpa;
4889
4890         if (tdp_enabled)
4891                 return false;
4892
4893         /*
4894          * if emulation was due to access to shadowed page table
4895          * and it failed try to unshadow page and re-entetr the
4896          * guest to let CPU execute the instruction.
4897          */
4898         if (kvm_mmu_unprotect_page_virt(vcpu, gva))
4899                 return true;
4900
4901         gpa = kvm_mmu_gva_to_gpa_system(vcpu, gva, NULL);
4902
4903         if (gpa == UNMAPPED_GVA)
4904                 return true; /* let cpu generate fault */
4905
4906         if (!kvm_is_error_hva(gfn_to_hva(vcpu->kvm, gpa >> PAGE_SHIFT)))
4907                 return true;
4908
4909         return false;
4910 }
4911
4912 int x86_emulate_instruction(struct kvm_vcpu *vcpu,
4913                             unsigned long cr2,
4914                             int emulation_type,
4915                             void *insn,
4916                             int insn_len)
4917 {
4918         int r;
4919         struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
4920         bool writeback = true;
4921
4922         kvm_clear_exception_queue(vcpu);
4923
4924         if (!(emulation_type & EMULTYPE_NO_DECODE)) {
4925                 init_emulate_ctxt(vcpu);
4926                 ctxt->interruptibility = 0;
4927                 ctxt->have_exception = false;
4928                 ctxt->perm_ok = false;
4929
4930                 ctxt->only_vendor_specific_insn
4931                         = emulation_type & EMULTYPE_TRAP_UD;
4932
4933                 r = x86_decode_insn(ctxt, insn, insn_len);
4934
4935                 trace_kvm_emulate_insn_start(vcpu);
4936                 ++vcpu->stat.insn_emulation;
4937                 if (r != EMULATION_OK)  {
4938                         if (emulation_type & EMULTYPE_TRAP_UD)
4939                                 return EMULATE_FAIL;
4940                         if (reexecute_instruction(vcpu, cr2))
4941                                 return EMULATE_DONE;
4942                         if (emulation_type & EMULTYPE_SKIP)
4943                                 return EMULATE_FAIL;
4944                         return handle_emulation_failure(vcpu);
4945                 }
4946         }
4947
4948         if (emulation_type & EMULTYPE_SKIP) {
4949                 kvm_rip_write(vcpu, ctxt->_eip);
4950                 return EMULATE_DONE;
4951         }
4952
4953         /* this is needed for vmware backdoor interface to work since it
4954            changes registers values  during IO operation */
4955         if (vcpu->arch.emulate_regs_need_sync_from_vcpu) {
4956                 vcpu->arch.emulate_regs_need_sync_from_vcpu = false;
4957                 memcpy(ctxt->regs, vcpu->arch.regs, sizeof ctxt->regs);
4958         }
4959
4960 restart:
4961         r = x86_emulate_insn(ctxt);
4962
4963         if (r == EMULATION_INTERCEPTED)
4964                 return EMULATE_DONE;
4965
4966         if (r == EMULATION_FAILED) {
4967                 if (reexecute_instruction(vcpu, cr2))
4968                         return EMULATE_DONE;
4969
4970                 return handle_emulation_failure(vcpu);
4971         }
4972
4973         if (ctxt->have_exception) {
4974                 inject_emulated_exception(vcpu);
4975                 r = EMULATE_DONE;
4976         } else if (vcpu->arch.pio.count) {
4977                 if (!vcpu->arch.pio.in)
4978                         vcpu->arch.pio.count = 0;
4979                 else
4980                         writeback = false;
4981                 r = EMULATE_DO_MMIO;
4982         } else if (vcpu->mmio_needed) {
4983                 if (!vcpu->mmio_is_write)
4984                         writeback = false;
4985                 r = EMULATE_DO_MMIO;
4986         } else if (r == EMULATION_RESTART)
4987                 goto restart;
4988         else
4989                 r = EMULATE_DONE;
4990
4991         if (writeback) {
4992                 toggle_interruptibility(vcpu, ctxt->interruptibility);
4993                 kvm_set_rflags(vcpu, ctxt->eflags);
4994                 kvm_make_request(KVM_REQ_EVENT, vcpu);
4995                 memcpy(vcpu->arch.regs, ctxt->regs, sizeof ctxt->regs);
4996                 vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
4997                 kvm_rip_write(vcpu, ctxt->eip);
4998         } else
4999                 vcpu->arch.emulate_regs_need_sync_to_vcpu = true;
5000
5001         return r;
5002 }
5003 EXPORT_SYMBOL_GPL(x86_emulate_instruction);
5004
5005 int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size, unsigned short port)
5006 {
5007         unsigned long val = kvm_register_read(vcpu, VCPU_REGS_RAX);
5008         int ret = emulator_pio_out_emulated(&vcpu->arch.emulate_ctxt,
5009                                             size, port, &val, 1);
5010         /* do not return to emulator after return from userspace */
5011         vcpu->arch.pio.count = 0;
5012         return ret;
5013 }
5014 EXPORT_SYMBOL_GPL(kvm_fast_pio_out);
5015
5016 static void tsc_bad(void *info)
5017 {
5018         __this_cpu_write(cpu_tsc_khz, 0);
5019 }
5020
5021 static void tsc_khz_changed(void *data)
5022 {
5023         struct cpufreq_freqs *freq = data;
5024         unsigned long khz = 0;
5025
5026         if (data)
5027                 khz = freq->new;
5028         else if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
5029                 khz = cpufreq_quick_get(raw_smp_processor_id());
5030         if (!khz)
5031                 khz = tsc_khz;
5032         __this_cpu_write(cpu_tsc_khz, khz);
5033 }
5034
5035 static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
5036                                      void *data)
5037 {
5038         struct cpufreq_freqs *freq = data;
5039         struct kvm *kvm;
5040         struct kvm_vcpu *vcpu;
5041         int i, send_ipi = 0;
5042
5043         /*
5044          * We allow guests to temporarily run on slowing clocks,
5045          * provided we notify them after, or to run on accelerating
5046          * clocks, provided we notify them before.  Thus time never
5047          * goes backwards.
5048          *
5049          * However, we have a problem.  We can't atomically update
5050          * the frequency of a given CPU from this function; it is
5051          * merely a notifier, which can be called from any CPU.
5052          * Changing the TSC frequency at arbitrary points in time
5053          * requires a recomputation of local variables related to
5054          * the TSC for each VCPU.  We must flag these local variables
5055          * to be updated and be sure the update takes place with the
5056          * new frequency before any guests proceed.
5057          *
5058          * Unfortunately, the combination of hotplug CPU and frequency
5059          * change creates an intractable locking scenario; the order
5060          * of when these callouts happen is undefined with respect to
5061          * CPU hotplug, and they can race with each other.  As such,
5062          * merely setting per_cpu(cpu_tsc_khz) = X during a hotadd is
5063          * undefined; you can actually have a CPU frequency change take
5064          * place in between the computation of X and the setting of the
5065          * variable.  To protect against this problem, all updates of
5066          * the per_cpu tsc_khz variable are done in an interrupt
5067          * protected IPI, and all callers wishing to update the value
5068          * must wait for a synchronous IPI to complete (which is trivial
5069          * if the caller is on the CPU already).  This establishes the
5070          * necessary total order on variable updates.
5071          *
5072          * Note that because a guest time update may take place
5073          * anytime after the setting of the VCPU's request bit, the
5074          * correct TSC value must be set before the request.  However,
5075          * to ensure the update actually makes it to any guest which
5076          * starts running in hardware virtualization between the set
5077          * and the acquisition of the spinlock, we must also ping the
5078          * CPU after setting the request bit.
5079          *
5080          */
5081
5082         if (val == CPUFREQ_PRECHANGE && freq->old > freq->new)
5083                 return 0;
5084         if (val == CPUFREQ_POSTCHANGE && freq->old < freq->new)
5085                 return 0;
5086
5087         smp_call_function_single(freq->cpu, tsc_khz_changed, freq, 1);
5088
5089         raw_spin_lock(&kvm_lock);
5090         list_for_each_entry(kvm, &vm_list, vm_list) {
5091                 kvm_for_each_vcpu(i, vcpu, kvm) {
5092                         if (vcpu->cpu != freq->cpu)
5093                                 continue;
5094                         kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
5095                         if (vcpu->cpu != smp_processor_id())
5096                                 send_ipi = 1;
5097                 }
5098         }
5099         raw_spin_unlock(&kvm_lock);
5100
5101         if (freq->old < freq->new && send_ipi) {
5102                 /*
5103                  * We upscale the frequency.  Must make the guest
5104                  * doesn't see old kvmclock values while running with
5105                  * the new frequency, otherwise we risk the guest sees
5106                  * time go backwards.
5107                  *
5108                  * In case we update the frequency for another cpu
5109                  * (which might be in guest context) send an interrupt
5110                  * to kick the cpu out of guest context.  Next time
5111                  * guest context is entered kvmclock will be updated,
5112                  * so the guest will not see stale values.
5113                  */
5114                 smp_call_function_single(freq->cpu, tsc_khz_changed, freq, 1);
5115         }
5116         return 0;
5117 }
5118
5119 static struct notifier_block kvmclock_cpufreq_notifier_block = {
5120         .notifier_call  = kvmclock_cpufreq_notifier
5121 };
5122
5123 static int kvmclock_cpu_notifier(struct notifier_block *nfb,
5124                                         unsigned long action, void *hcpu)
5125 {
5126         unsigned int cpu = (unsigned long)hcpu;
5127
5128         switch (action) {
5129                 case CPU_ONLINE:
5130                 case CPU_DOWN_FAILED:
5131                         smp_call_function_single(cpu, tsc_khz_changed, NULL, 1);
5132                         break;
5133                 case CPU_DOWN_PREPARE:
5134                         smp_call_function_single(cpu, tsc_bad, NULL, 1);
5135                         break;
5136         }
5137         return NOTIFY_OK;
5138 }
5139
5140 static struct notifier_block kvmclock_cpu_notifier_block = {
5141         .notifier_call  = kvmclock_cpu_notifier,
5142         .priority = -INT_MAX
5143 };
5144
5145 static void kvm_timer_init(void)
5146 {
5147         int cpu;
5148
5149         max_tsc_khz = tsc_khz;
5150         register_hotcpu_notifier(&kvmclock_cpu_notifier_block);
5151         if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) {
5152 #ifdef CONFIG_CPU_FREQ
5153                 struct cpufreq_policy policy;
5154                 memset(&policy, 0, sizeof(policy));
5155                 cpu = get_cpu();
5156                 cpufreq_get_policy(&policy, cpu);
5157                 if (policy.cpuinfo.max_freq)
5158                         max_tsc_khz = policy.cpuinfo.max_freq;
5159                 put_cpu();
5160 #endif
5161                 cpufreq_register_notifier(&kvmclock_cpufreq_notifier_block,
5162                                           CPUFREQ_TRANSITION_NOTIFIER);
5163         }
5164         pr_debug("kvm: max_tsc_khz = %ld\n", max_tsc_khz);
5165         for_each_online_cpu(cpu)
5166                 smp_call_function_single(cpu, tsc_khz_changed, NULL, 1);
5167 }
5168
5169 static DEFINE_PER_CPU(struct kvm_vcpu *, current_vcpu);
5170
5171 static int kvm_is_in_guest(void)
5172 {
5173         return percpu_read(current_vcpu) != NULL;
5174 }
5175
5176 static int kvm_is_user_mode(void)
5177 {
5178         int user_mode = 3;
5179
5180         if (percpu_read(current_vcpu))
5181                 user_mode = kvm_x86_ops->get_cpl(percpu_read(current_vcpu));
5182
5183         return user_mode != 0;
5184 }
5185
5186 static unsigned long kvm_get_guest_ip(void)
5187 {
5188         unsigned long ip = 0;
5189
5190         if (percpu_read(current_vcpu))
5191                 ip = kvm_rip_read(percpu_read(current_vcpu));
5192
5193         return ip;
5194 }
5195
5196 static struct perf_guest_info_callbacks kvm_guest_cbs = {
5197         .is_in_guest            = kvm_is_in_guest,
5198         .is_user_mode           = kvm_is_user_mode,
5199         .get_guest_ip           = kvm_get_guest_ip,
5200 };
5201
5202 void kvm_before_handle_nmi(struct kvm_vcpu *vcpu)
5203 {
5204         percpu_write(current_vcpu, vcpu);
5205 }
5206 EXPORT_SYMBOL_GPL(kvm_before_handle_nmi);
5207
5208 void kvm_after_handle_nmi(struct kvm_vcpu *vcpu)
5209 {
5210         percpu_write(current_vcpu, NULL);
5211 }
5212 EXPORT_SYMBOL_GPL(kvm_after_handle_nmi);
5213
5214 static void kvm_set_mmio_spte_mask(void)
5215 {
5216         u64 mask;
5217         int maxphyaddr = boot_cpu_data.x86_phys_bits;
5218
5219         /*
5220          * Set the reserved bits and the present bit of an paging-structure
5221          * entry to generate page fault with PFER.RSV = 1.
5222          */
5223         mask = ((1ull << (62 - maxphyaddr + 1)) - 1) << maxphyaddr;
5224         mask |= 1ull;
5225
5226 #ifdef CONFIG_X86_64
5227         /*
5228          * If reserved bit is not supported, clear the present bit to disable
5229          * mmio page fault.
5230          */
5231         if (maxphyaddr == 52)
5232                 mask &= ~1ull;
5233 #endif
5234
5235         kvm_mmu_set_mmio_spte_mask(mask);
5236 }
5237
5238 int kvm_arch_init(void *opaque)
5239 {
5240         int r;
5241         struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
5242
5243         if (kvm_x86_ops) {
5244                 printk(KERN_ERR "kvm: already loaded the other module\n");
5245                 r = -EEXIST;
5246                 goto out;
5247         }
5248
5249         if (!ops->cpu_has_kvm_support()) {
5250                 printk(KERN_ERR "kvm: no hardware support\n");
5251                 r = -EOPNOTSUPP;
5252                 goto out;
5253         }
5254         if (ops->disabled_by_bios()) {
5255                 printk(KERN_ERR "kvm: disabled by bios\n");
5256                 r = -EOPNOTSUPP;
5257                 goto out;
5258         }
5259
5260         r = kvm_mmu_module_init();
5261         if (r)
5262                 goto out;
5263
5264         kvm_set_mmio_spte_mask();
5265
5266         kvm_x86_ops = ops;
5267         kvm_init_msr_list();
5268
5269         kvm_mmu_set_mask_ptes(PT_USER_MASK, PT_ACCESSED_MASK,
5270                         PT_DIRTY_MASK, PT64_NX_MASK, 0);
5271
5272         kvm_timer_init();
5273
5274         perf_register_guest_info_callbacks(&kvm_guest_cbs);
5275
5276         if (cpu_has_xsave)
5277                 host_xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK);
5278
5279         return 0;
5280
5281 out:
5282         return r;
5283 }
5284
5285 void kvm_arch_exit(void)
5286 {
5287         perf_unregister_guest_info_callbacks(&kvm_guest_cbs);
5288
5289         if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
5290                 cpufreq_unregister_notifier(&kvmclock_cpufreq_notifier_block,
5291                                             CPUFREQ_TRANSITION_NOTIFIER);
5292         unregister_hotcpu_notifier(&kvmclock_cpu_notifier_block);
5293         kvm_x86_ops = NULL;
5294         kvm_mmu_module_exit();
5295 }
5296
5297 int kvm_emulate_halt(struct kvm_vcpu *vcpu)
5298 {
5299         ++vcpu->stat.halt_exits;
5300         if (irqchip_in_kernel(vcpu->kvm)) {
5301                 vcpu->arch.mp_state = KVM_MP_STATE_HALTED;
5302                 return 1;
5303         } else {
5304                 vcpu->run->exit_reason = KVM_EXIT_HLT;
5305                 return 0;
5306         }
5307 }
5308 EXPORT_SYMBOL_GPL(kvm_emulate_halt);
5309
5310 static inline gpa_t hc_gpa(struct kvm_vcpu *vcpu, unsigned long a0,
5311                            unsigned long a1)
5312 {
5313         if (is_long_mode(vcpu))
5314                 return a0;
5315         else
5316                 return a0 | ((gpa_t)a1 << 32);
5317 }
5318
5319 int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
5320 {
5321         u64 param, ingpa, outgpa, ret;
5322         uint16_t code, rep_idx, rep_cnt, res = HV_STATUS_SUCCESS, rep_done = 0;
5323         bool fast, longmode;
5324         int cs_db, cs_l;
5325
5326         /*
5327          * hypercall generates UD from non zero cpl and real mode
5328          * per HYPER-V spec
5329          */
5330         if (kvm_x86_ops->get_cpl(vcpu) != 0 || !is_protmode(vcpu)) {
5331                 kvm_queue_exception(vcpu, UD_VECTOR);
5332                 return 0;
5333         }
5334
5335         kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
5336         longmode = is_long_mode(vcpu) && cs_l == 1;
5337
5338         if (!longmode) {
5339                 param = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDX) << 32) |
5340                         (kvm_register_read(vcpu, VCPU_REGS_RAX) & 0xffffffff);
5341                 ingpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RBX) << 32) |
5342                         (kvm_register_read(vcpu, VCPU_REGS_RCX) & 0xffffffff);
5343                 outgpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDI) << 32) |
5344                         (kvm_register_read(vcpu, VCPU_REGS_RSI) & 0xffffffff);
5345         }
5346 #ifdef CONFIG_X86_64
5347         else {
5348                 param = kvm_register_read(vcpu, VCPU_REGS_RCX);
5349                 ingpa = kvm_register_read(vcpu, VCPU_REGS_RDX);
5350                 outgpa = kvm_register_read(vcpu, VCPU_REGS_R8);
5351         }
5352 #endif
5353
5354         code = param & 0xffff;
5355         fast = (param >> 16) & 0x1;
5356         rep_cnt = (param >> 32) & 0xfff;
5357         rep_idx = (param >> 48) & 0xfff;
5358
5359         trace_kvm_hv_hypercall(code, fast, rep_cnt, rep_idx, ingpa, outgpa);
5360
5361         switch (code) {
5362         case HV_X64_HV_NOTIFY_LONG_SPIN_WAIT:
5363                 kvm_vcpu_on_spin(vcpu);
5364                 break;
5365         default:
5366                 res = HV_STATUS_INVALID_HYPERCALL_CODE;
5367                 break;
5368         }
5369
5370         ret = res | (((u64)rep_done & 0xfff) << 32);
5371         if (longmode) {
5372                 kvm_register_write(vcpu, VCPU_REGS_RAX, ret);
5373         } else {
5374                 kvm_register_write(vcpu, VCPU_REGS_RDX, ret >> 32);
5375                 kvm_register_write(vcpu, VCPU_REGS_RAX, ret & 0xffffffff);
5376         }
5377
5378         return 1;
5379 }
5380
5381 int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
5382 {
5383         unsigned long nr, a0, a1, a2, a3, ret;
5384         int r = 1;
5385
5386         if (kvm_hv_hypercall_enabled(vcpu->kvm))
5387                 return kvm_hv_hypercall(vcpu);
5388
5389         nr = kvm_register_read(vcpu, VCPU_REGS_RAX);
5390         a0 = kvm_register_read(vcpu, VCPU_REGS_RBX);
5391         a1 = kvm_register_read(vcpu, VCPU_REGS_RCX);
5392         a2 = kvm_register_read(vcpu, VCPU_REGS_RDX);
5393         a3 = kvm_register_read(vcpu, VCPU_REGS_RSI);
5394
5395         trace_kvm_hypercall(nr, a0, a1, a2, a3);
5396
5397         if (!is_long_mode(vcpu)) {
5398                 nr &= 0xFFFFFFFF;
5399                 a0 &= 0xFFFFFFFF;
5400                 a1 &= 0xFFFFFFFF;
5401                 a2 &= 0xFFFFFFFF;
5402                 a3 &= 0xFFFFFFFF;
5403         }
5404
5405         if (kvm_x86_ops->get_cpl(vcpu) != 0) {
5406                 ret = -KVM_EPERM;
5407                 goto out;
5408         }
5409
5410         switch (nr) {
5411         case KVM_HC_VAPIC_POLL_IRQ:
5412                 ret = 0;
5413                 break;
5414         case KVM_HC_MMU_OP:
5415                 r = kvm_pv_mmu_op(vcpu, a0, hc_gpa(vcpu, a1, a2), &ret);
5416                 break;
5417         default:
5418                 ret = -KVM_ENOSYS;
5419                 break;
5420         }
5421 out:
5422         kvm_register_write(vcpu, VCPU_REGS_RAX, ret);
5423         ++vcpu->stat.hypercalls;
5424         return r;
5425 }
5426 EXPORT_SYMBOL_GPL(kvm_emulate_hypercall);
5427
5428 int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt)
5429 {
5430         struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
5431         char instruction[3];
5432         unsigned long rip = kvm_rip_read(vcpu);
5433
5434         /*
5435          * Blow out the MMU to ensure that no other VCPU has an active mapping
5436          * to ensure that the updated hypercall appears atomically across all
5437          * VCPUs.
5438          */
5439         kvm_mmu_zap_all(vcpu->kvm);
5440
5441         kvm_x86_ops->patch_hypercall(vcpu, instruction);
5442
5443         return emulator_write_emulated(ctxt, rip, instruction, 3, NULL);
5444 }
5445
5446 static int move_to_next_stateful_cpuid_entry(struct kvm_vcpu *vcpu, int i)
5447 {
5448         struct kvm_cpuid_entry2 *e = &vcpu->arch.cpuid_entries[i];
5449         int j, nent = vcpu->arch.cpuid_nent;
5450
5451         e->flags &= ~KVM_CPUID_FLAG_STATE_READ_NEXT;
5452         /* when no next entry is found, the current entry[i] is reselected */
5453         for (j = i + 1; ; j = (j + 1) % nent) {
5454                 struct kvm_cpuid_entry2 *ej = &vcpu->arch.cpuid_entries[j];
5455                 if (ej->function == e->function) {
5456                         ej->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
5457                         return j;
5458                 }
5459         }
5460         return 0; /* silence gcc, even though control never reaches here */
5461 }
5462
5463 /* find an entry with matching function, matching index (if needed), and that
5464  * should be read next (if it's stateful) */
5465 static int is_matching_cpuid_entry(struct kvm_cpuid_entry2 *e,
5466         u32 function, u32 index)
5467 {
5468         if (e->function != function)
5469                 return 0;
5470         if ((e->flags & KVM_CPUID_FLAG_SIGNIFCANT_INDEX) && e->index != index)
5471                 return 0;
5472         if ((e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC) &&
5473             !(e->flags & KVM_CPUID_FLAG_STATE_READ_NEXT))
5474                 return 0;
5475         return 1;
5476 }
5477
5478 struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
5479                                               u32 function, u32 index)
5480 {
5481         int i;
5482         struct kvm_cpuid_entry2 *best = NULL;
5483
5484         for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
5485                 struct kvm_cpuid_entry2 *e;
5486
5487                 e = &vcpu->arch.cpuid_entries[i];
5488                 if (is_matching_cpuid_entry(e, function, index)) {
5489                         if (e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC)
5490                                 move_to_next_stateful_cpuid_entry(vcpu, i);
5491                         best = e;
5492                         break;
5493                 }
5494         }
5495         return best;
5496 }
5497 EXPORT_SYMBOL_GPL(kvm_find_cpuid_entry);
5498
5499 int cpuid_maxphyaddr(struct kvm_vcpu *vcpu)
5500 {
5501         struct kvm_cpuid_entry2 *best;
5502
5503         best = kvm_find_cpuid_entry(vcpu, 0x80000000, 0);
5504         if (!best || best->eax < 0x80000008)
5505                 goto not_found;
5506         best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0);
5507         if (best)
5508                 return best->eax & 0xff;
5509 not_found:
5510         return 36;
5511 }
5512
5513 /*
5514  * If no match is found, check whether we exceed the vCPU's limit
5515  * and return the content of the highest valid _standard_ leaf instead.
5516  * This is to satisfy the CPUID specification.
5517  */
5518 static struct kvm_cpuid_entry2* check_cpuid_limit(struct kvm_vcpu *vcpu,
5519                                                   u32 function, u32 index)
5520 {
5521         struct kvm_cpuid_entry2 *maxlevel;
5522
5523         maxlevel = kvm_find_cpuid_entry(vcpu, function & 0x80000000, 0);
5524         if (!maxlevel || maxlevel->eax >= function)
5525                 return NULL;
5526         if (function & 0x80000000) {
5527                 maxlevel = kvm_find_cpuid_entry(vcpu, 0, 0);
5528                 if (!maxlevel)
5529                         return NULL;
5530         }
5531         return kvm_find_cpuid_entry(vcpu, maxlevel->eax, index);
5532 }
5533
5534 void kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
5535 {
5536         u32 function, index;
5537         struct kvm_cpuid_entry2 *best;
5538
5539         function = kvm_register_read(vcpu, VCPU_REGS_RAX);
5540         index = kvm_register_read(vcpu, VCPU_REGS_RCX);
5541         kvm_register_write(vcpu, VCPU_REGS_RAX, 0);
5542         kvm_register_write(vcpu, VCPU_REGS_RBX, 0);
5543         kvm_register_write(vcpu, VCPU_REGS_RCX, 0);
5544         kvm_register_write(vcpu, VCPU_REGS_RDX, 0);
5545         best = kvm_find_cpuid_entry(vcpu, function, index);
5546
5547         if (!best)
5548                 best = check_cpuid_limit(vcpu, function, index);
5549
5550         if (best) {
5551                 kvm_register_write(vcpu, VCPU_REGS_RAX, best->eax);
5552                 kvm_register_write(vcpu, VCPU_REGS_RBX, best->ebx);
5553                 kvm_register_write(vcpu, VCPU_REGS_RCX, best->ecx);
5554                 kvm_register_write(vcpu, VCPU_REGS_RDX, best->edx);
5555         }
5556         kvm_x86_ops->skip_emulated_instruction(vcpu);
5557         trace_kvm_cpuid(function,
5558                         kvm_register_read(vcpu, VCPU_REGS_RAX),
5559                         kvm_register_read(vcpu, VCPU_REGS_RBX),
5560                         kvm_register_read(vcpu, VCPU_REGS_RCX),
5561                         kvm_register_read(vcpu, VCPU_REGS_RDX));
5562 }
5563 EXPORT_SYMBOL_GPL(kvm_emulate_cpuid);
5564
5565 /*
5566  * Check if userspace requested an interrupt window, and that the
5567  * interrupt window is open.
5568  *
5569  * No need to exit to userspace if we already have an interrupt queued.
5570  */
5571 static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu)
5572 {
5573         return (!irqchip_in_kernel(vcpu->kvm) && !kvm_cpu_has_interrupt(vcpu) &&
5574                 vcpu->run->request_interrupt_window &&
5575                 kvm_arch_interrupt_allowed(vcpu));
5576 }
5577
5578 static void post_kvm_run_save(struct kvm_vcpu *vcpu)
5579 {
5580         struct kvm_run *kvm_run = vcpu->run;
5581
5582         kvm_run->if_flag = (kvm_get_rflags(vcpu) & X86_EFLAGS_IF) != 0;
5583         kvm_run->cr8 = kvm_get_cr8(vcpu);
5584         kvm_run->apic_base = kvm_get_apic_base(vcpu);
5585         if (irqchip_in_kernel(vcpu->kvm))
5586                 kvm_run->ready_for_interrupt_injection = 1;
5587         else
5588                 kvm_run->ready_for_interrupt_injection =
5589                         kvm_arch_interrupt_allowed(vcpu) &&
5590                         !kvm_cpu_has_interrupt(vcpu) &&
5591                         !kvm_event_needs_reinjection(vcpu);
5592 }
5593
5594 static void update_cr8_intercept(struct kvm_vcpu *vcpu)
5595 {
5596         int max_irr, tpr;
5597
5598         if (!kvm_x86_ops->update_cr8_intercept)
5599                 return;
5600
5601         if (!vcpu->arch.apic)
5602                 return;
5603
5604         if (!vcpu->arch.apic->vapic_addr)
5605                 max_irr = kvm_lapic_find_highest_irr(vcpu);
5606         else
5607                 max_irr = -1;
5608
5609         if (max_irr != -1)
5610                 max_irr >>= 4;
5611
5612         tpr = kvm_lapic_get_cr8(vcpu);
5613
5614         kvm_x86_ops->update_cr8_intercept(vcpu, tpr, max_irr);
5615 }
5616
5617 static void inject_pending_event(struct kvm_vcpu *vcpu)
5618 {
5619         /* try to reinject previous events if any */
5620         if (vcpu->arch.exception.pending) {
5621                 trace_kvm_inj_exception(vcpu->arch.exception.nr,
5622                                         vcpu->arch.exception.has_error_code,
5623                                         vcpu->arch.exception.error_code);
5624                 kvm_x86_ops->queue_exception(vcpu, vcpu->arch.exception.nr,
5625                                           vcpu->arch.exception.has_error_code,
5626                                           vcpu->arch.exception.error_code,
5627                                           vcpu->arch.exception.reinject);
5628                 return;
5629         }
5630
5631         if (vcpu->arch.nmi_injected) {
5632                 kvm_x86_ops->set_nmi(vcpu);
5633                 return;
5634         }
5635
5636         if (vcpu->arch.interrupt.pending) {
5637                 kvm_x86_ops->set_irq(vcpu);
5638                 return;
5639         }
5640
5641         /* try to inject new event if pending */
5642         if (vcpu->arch.nmi_pending && kvm_x86_ops->nmi_allowed(vcpu)) {
5643                 --vcpu->arch.nmi_pending;
5644                 vcpu->arch.nmi_injected = true;
5645                 kvm_x86_ops->set_nmi(vcpu);
5646         } else if (kvm_cpu_has_interrupt(vcpu)) {
5647                 if (kvm_x86_ops->interrupt_allowed(vcpu)) {
5648                         kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu),
5649                                             false);
5650                         kvm_x86_ops->set_irq(vcpu);
5651                 }
5652         }
5653 }
5654
5655 static void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu)
5656 {
5657         if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE) &&
5658                         !vcpu->guest_xcr0_loaded) {
5659                 /* kvm_set_xcr() also depends on this */
5660                 xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0);
5661                 vcpu->guest_xcr0_loaded = 1;
5662         }
5663 }
5664
5665 static void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu)
5666 {
5667         if (vcpu->guest_xcr0_loaded) {
5668                 if (vcpu->arch.xcr0 != host_xcr0)
5669                         xsetbv(XCR_XFEATURE_ENABLED_MASK, host_xcr0);
5670                 vcpu->guest_xcr0_loaded = 0;
5671         }
5672 }
5673
5674 static void process_nmi(struct kvm_vcpu *vcpu)
5675 {
5676         unsigned limit = 2;
5677
5678         /*
5679          * x86 is limited to one NMI running, and one NMI pending after it.
5680          * If an NMI is already in progress, limit further NMIs to just one.
5681          * Otherwise, allow two (and we'll inject the first one immediately).
5682          */
5683         if (kvm_x86_ops->get_nmi_mask(vcpu) || vcpu->arch.nmi_injected)
5684                 limit = 1;
5685
5686         vcpu->arch.nmi_pending += atomic_xchg(&vcpu->arch.nmi_queued, 0);
5687         vcpu->arch.nmi_pending = min(vcpu->arch.nmi_pending, limit);
5688         kvm_make_request(KVM_REQ_EVENT, vcpu);
5689 }
5690
5691 static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
5692 {
5693         int r;
5694         bool req_int_win = !irqchip_in_kernel(vcpu->kvm) &&
5695                 vcpu->run->request_interrupt_window;
5696
5697         if (vcpu->requests) {
5698                 if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu))
5699                         kvm_mmu_unload(vcpu);
5700                 if (kvm_check_request(KVM_REQ_MIGRATE_TIMER, vcpu))
5701                         __kvm_migrate_timers(vcpu);
5702                 if (kvm_check_request(KVM_REQ_CLOCK_UPDATE, vcpu)) {
5703                         r = kvm_guest_time_update(vcpu);
5704                         if (unlikely(r))
5705                                 goto out;
5706                 }
5707                 if (kvm_check_request(KVM_REQ_MMU_SYNC, vcpu))
5708                         kvm_mmu_sync_roots(vcpu);
5709                 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
5710                         kvm_x86_ops->tlb_flush(vcpu);
5711                 if (kvm_check_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu)) {
5712                         vcpu->run->exit_reason = KVM_EXIT_TPR_ACCESS;
5713                         r = 0;
5714                         goto out;
5715                 }
5716                 if (kvm_check_request(KVM_REQ_TRIPLE_FAULT, vcpu)) {
5717                         vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN;
5718                         r = 0;
5719                         goto out;
5720                 }
5721                 if (kvm_check_request(KVM_REQ_DEACTIVATE_FPU, vcpu)) {
5722                         vcpu->fpu_active = 0;
5723                         kvm_x86_ops->fpu_deactivate(vcpu);
5724                 }
5725                 if (kvm_check_request(KVM_REQ_APF_HALT, vcpu)) {
5726                         /* Page is swapped out. Do synthetic halt */
5727                         vcpu->arch.apf.halted = true;
5728                         r = 1;
5729                         goto out;
5730                 }
5731                 if (kvm_check_request(KVM_REQ_STEAL_UPDATE, vcpu))
5732                         record_steal_time(vcpu);
5733                 if (kvm_check_request(KVM_REQ_NMI, vcpu))
5734                         process_nmi(vcpu);
5735
5736         }
5737
5738         r = kvm_mmu_reload(vcpu);
5739         if (unlikely(r))
5740                 goto out;
5741
5742         if (kvm_check_request(KVM_REQ_EVENT, vcpu) || req_int_win) {
5743                 inject_pending_event(vcpu);
5744
5745                 /* enable NMI/IRQ window open exits if needed */
5746                 if (vcpu->arch.nmi_pending)
5747                         kvm_x86_ops->enable_nmi_window(vcpu);
5748                 if (kvm_cpu_has_interrupt(vcpu) || req_int_win)
5749                         kvm_x86_ops->enable_irq_window(vcpu);
5750
5751                 if (kvm_lapic_enabled(vcpu)) {
5752                         update_cr8_intercept(vcpu);
5753                         kvm_lapic_sync_to_vapic(vcpu);
5754                 }
5755         }
5756
5757         preempt_disable();
5758
5759         kvm_x86_ops->prepare_guest_switch(vcpu);
5760         if (vcpu->fpu_active)
5761                 kvm_load_guest_fpu(vcpu);
5762         vcpu->mode = IN_GUEST_MODE;
5763
5764         /* We should set ->mode before check ->requests,
5765          * see the comment in make_all_cpus_request.
5766          */
5767         smp_mb();
5768
5769         local_irq_disable();
5770
5771         if (vcpu->mode == EXITING_GUEST_MODE || vcpu->requests
5772             || need_resched() || signal_pending(current)) {
5773                 vcpu->mode = OUTSIDE_GUEST_MODE;
5774                 smp_wmb();
5775                 local_irq_enable();
5776                 preempt_enable();
5777                 kvm_x86_ops->cancel_injection(vcpu);
5778                 r = 1;
5779                 goto out;
5780         }
5781
5782         kvm_load_guest_xcr0(vcpu);
5783
5784         srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
5785
5786         kvm_guest_enter();
5787
5788         if (unlikely(vcpu->arch.switch_db_regs)) {
5789                 set_debugreg(0, 7);
5790                 set_debugreg(vcpu->arch.eff_db[0], 0);
5791                 set_debugreg(vcpu->arch.eff_db[1], 1);
5792                 set_debugreg(vcpu->arch.eff_db[2], 2);
5793                 set_debugreg(vcpu->arch.eff_db[3], 3);
5794         }
5795
5796         trace_kvm_entry(vcpu->vcpu_id);
5797         kvm_x86_ops->run(vcpu);
5798
5799         /*
5800          * If the guest has used debug registers, at least dr7
5801          * will be disabled while returning to the host.
5802          * If we don't have active breakpoints in the host, we don't
5803          * care about the messed up debug address registers. But if
5804          * we have some of them active, restore the old state.
5805          */
5806         if (hw_breakpoint_active())
5807                 hw_breakpoint_restore();
5808
5809         vcpu->arch.last_guest_tsc = kvm_x86_ops->read_l1_tsc(vcpu);
5810
5811         vcpu->mode = OUTSIDE_GUEST_MODE;
5812         smp_wmb();
5813
5814         kvm_put_guest_xcr0(vcpu);
5815
5816         local_irq_enable();
5817
5818         ++vcpu->stat.exits;
5819
5820         /*
5821          * We must have an instruction between local_irq_enable() and
5822          * kvm_guest_exit(), so the timer interrupt isn't delayed by
5823          * the interrupt shadow.  The stat.exits increment will do nicely.
5824          * But we need to prevent reordering, hence this barrier():
5825          */
5826         barrier();
5827
5828         kvm_guest_exit();
5829
5830         preempt_enable();
5831
5832         vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
5833
5834         /*
5835          * Profile KVM exit RIPs:
5836          */
5837         if (unlikely(prof_on == KVM_PROFILING)) {
5838                 unsigned long rip = kvm_rip_read(vcpu);
5839                 profile_hit(KVM_PROFILING, (void *)rip);
5840         }
5841
5842
5843         kvm_lapic_sync_from_vapic(vcpu);
5844
5845         r = kvm_x86_ops->handle_exit(vcpu);
5846 out:
5847         return r;
5848 }
5849
5850
5851 static int __vcpu_run(struct kvm_vcpu *vcpu)
5852 {
5853         int r;
5854         struct kvm *kvm = vcpu->kvm;
5855
5856         if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED)) {
5857                 pr_debug("vcpu %d received sipi with vector # %x\n",
5858                          vcpu->vcpu_id, vcpu->arch.sipi_vector);
5859                 kvm_lapic_reset(vcpu);
5860                 r = kvm_arch_vcpu_reset(vcpu);
5861                 if (r)
5862                         return r;
5863                 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
5864         }
5865
5866         vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
5867
5868         r = 1;
5869         while (r > 0) {
5870                 if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE &&
5871                     !vcpu->arch.apf.halted)
5872                         r = vcpu_enter_guest(vcpu);
5873                 else {
5874                         srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
5875                         kvm_vcpu_block(vcpu);
5876                         vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
5877                         if (kvm_check_request(KVM_REQ_UNHALT, vcpu))
5878                         {
5879                                 switch(vcpu->arch.mp_state) {
5880                                 case KVM_MP_STATE_HALTED:
5881                                         vcpu->arch.mp_state =
5882                                                 KVM_MP_STATE_RUNNABLE;
5883                                 case KVM_MP_STATE_RUNNABLE:
5884                                         vcpu->arch.apf.halted = false;
5885                                         break;
5886                                 case KVM_MP_STATE_SIPI_RECEIVED:
5887                                 default:
5888                                         r = -EINTR;
5889                                         break;
5890                                 }
5891                         }
5892                 }
5893
5894                 if (r <= 0)
5895                         break;
5896
5897                 clear_bit(KVM_REQ_PENDING_TIMER, &vcpu->requests);
5898                 if (kvm_cpu_has_pending_timer(vcpu))
5899                         kvm_inject_pending_timer_irqs(vcpu);
5900
5901                 if (dm_request_for_irq_injection(vcpu)) {
5902                         r = -EINTR;
5903                         vcpu->run->exit_reason = KVM_EXIT_INTR;
5904                         ++vcpu->stat.request_irq_exits;
5905                 }
5906
5907                 kvm_check_async_pf_completion(vcpu);
5908
5909                 if (signal_pending(current)) {
5910                         r = -EINTR;
5911                         vcpu->run->exit_reason = KVM_EXIT_INTR;
5912                         ++vcpu->stat.signal_exits;
5913                 }
5914                 if (need_resched()) {
5915                         srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
5916                         kvm_resched(vcpu);
5917                         vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
5918                 }
5919         }
5920
5921         srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
5922
5923         return r;
5924 }
5925
5926 static int complete_mmio(struct kvm_vcpu *vcpu)
5927 {
5928         struct kvm_run *run = vcpu->run;
5929         int r;
5930
5931         if (!(vcpu->arch.pio.count || vcpu->mmio_needed))
5932                 return 1;
5933
5934         if (vcpu->mmio_needed) {
5935                 vcpu->mmio_needed = 0;
5936                 if (!vcpu->mmio_is_write)
5937                         memcpy(vcpu->mmio_data + vcpu->mmio_index,
5938                                run->mmio.data, 8);
5939                 vcpu->mmio_index += 8;
5940                 if (vcpu->mmio_index < vcpu->mmio_size) {
5941                         run->exit_reason = KVM_EXIT_MMIO;
5942                         run->mmio.phys_addr = vcpu->mmio_phys_addr + vcpu->mmio_index;
5943                         memcpy(run->mmio.data, vcpu->mmio_data + vcpu->mmio_index, 8);
5944                         run->mmio.len = min(vcpu->mmio_size - vcpu->mmio_index, 8);
5945                         run->mmio.is_write = vcpu->mmio_is_write;
5946                         vcpu->mmio_needed = 1;
5947                         return 0;
5948                 }
5949                 if (vcpu->mmio_is_write)
5950                         return 1;
5951                 vcpu->mmio_read_completed = 1;
5952         }
5953         vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
5954         r = emulate_instruction(vcpu, EMULTYPE_NO_DECODE);
5955         srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
5956         if (r != EMULATE_DONE)
5957                 return 0;
5958         return 1;
5959 }
5960
5961 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
5962 {
5963         int r;
5964         sigset_t sigsaved;
5965
5966         if (!tsk_used_math(current) && init_fpu(current))
5967                 return -ENOMEM;
5968
5969         if (vcpu->sigset_active)
5970                 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
5971
5972         if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) {
5973                 kvm_vcpu_block(vcpu);
5974                 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
5975                 r = -EAGAIN;
5976                 goto out;
5977         }
5978
5979         /* re-sync apic's tpr */
5980         if (!irqchip_in_kernel(vcpu->kvm)) {
5981                 if (kvm_set_cr8(vcpu, kvm_run->cr8) != 0) {
5982                         r = -EINVAL;
5983                         goto out;
5984                 }
5985         }
5986
5987         r = complete_mmio(vcpu);
5988         if (r <= 0)
5989                 goto out;
5990
5991         if (kvm_run->exit_reason == KVM_EXIT_HYPERCALL)
5992                 kvm_register_write(vcpu, VCPU_REGS_RAX,
5993                                      kvm_run->hypercall.ret);
5994
5995         r = __vcpu_run(vcpu);
5996
5997 out:
5998         post_kvm_run_save(vcpu);
5999         if (vcpu->sigset_active)
6000                 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
6001
6002         return r;
6003 }
6004
6005 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
6006 {
6007         if (vcpu->arch.emulate_regs_need_sync_to_vcpu) {
6008                 /*
6009                  * We are here if userspace calls get_regs() in the middle of
6010                  * instruction emulation. Registers state needs to be copied
6011                  * back from emulation context to vcpu. Usrapace shouldn't do
6012                  * that usually, but some bad designed PV devices (vmware
6013                  * backdoor interface) need this to work
6014                  */
6015                 struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
6016                 memcpy(vcpu->arch.regs, ctxt->regs, sizeof ctxt->regs);
6017                 vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
6018         }
6019         regs->rax = kvm_register_read(vcpu, VCPU_REGS_RAX);
6020         regs->rbx = kvm_register_read(vcpu, VCPU_REGS_RBX);
6021         regs->rcx = kvm_register_read(vcpu, VCPU_REGS_RCX);
6022         regs->rdx = kvm_register_read(vcpu, VCPU_REGS_RDX);
6023         regs->rsi = kvm_register_read(vcpu, VCPU_REGS_RSI);
6024         regs->rdi = kvm_register_read(vcpu, VCPU_REGS_RDI);
6025         regs->rsp = kvm_register_read(vcpu, VCPU_REGS_RSP);
6026         regs->rbp = kvm_register_read(vcpu, VCPU_REGS_RBP);
6027 #ifdef CONFIG_X86_64
6028         regs->r8 = kvm_register_read(vcpu, VCPU_REGS_R8);
6029         regs->r9 = kvm_register_read(vcpu, VCPU_REGS_R9);
6030         regs->r10 = kvm_register_read(vcpu, VCPU_REGS_R10);
6031         regs->r11 = kvm_register_read(vcpu, VCPU_REGS_R11);
6032         regs->r12 = kvm_register_read(vcpu, VCPU_REGS_R12);
6033         regs->r13 = kvm_register_read(vcpu, VCPU_REGS_R13);
6034         regs->r14 = kvm_register_read(vcpu, VCPU_REGS_R14);
6035         regs->r15 = kvm_register_read(vcpu, VCPU_REGS_R15);
6036 #endif
6037
6038         regs->rip = kvm_rip_read(vcpu);
6039         regs->rflags = kvm_get_rflags(vcpu);
6040
6041         return 0;
6042 }
6043
6044 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
6045 {
6046         vcpu->arch.emulate_regs_need_sync_from_vcpu = true;
6047         vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
6048
6049         kvm_register_write(vcpu, VCPU_REGS_RAX, regs->rax);
6050         kvm_register_write(vcpu, VCPU_REGS_RBX, regs->rbx);
6051         kvm_register_write(vcpu, VCPU_REGS_RCX, regs->rcx);
6052         kvm_register_write(vcpu, VCPU_REGS_RDX, regs->rdx);
6053         kvm_register_write(vcpu, VCPU_REGS_RSI, regs->rsi);
6054         kvm_register_write(vcpu, VCPU_REGS_RDI, regs->rdi);
6055         kvm_register_write(vcpu, VCPU_REGS_RSP, regs->rsp);
6056         kvm_register_write(vcpu, VCPU_REGS_RBP, regs->rbp);
6057 #ifdef CONFIG_X86_64
6058         kvm_register_write(vcpu, VCPU_REGS_R8, regs->r8);
6059         kvm_register_write(vcpu, VCPU_REGS_R9, regs->r9);
6060         kvm_register_write(vcpu, VCPU_REGS_R10, regs->r10);
6061         kvm_register_write(vcpu, VCPU_REGS_R11, regs->r11);
6062         kvm_register_write(vcpu, VCPU_REGS_R12, regs->r12);
6063         kvm_register_write(vcpu, VCPU_REGS_R13, regs->r13);
6064         kvm_register_write(vcpu, VCPU_REGS_R14, regs->r14);
6065         kvm_register_write(vcpu, VCPU_REGS_R15, regs->r15);
6066 #endif
6067
6068         kvm_rip_write(vcpu, regs->rip);
6069         kvm_set_rflags(vcpu, regs->rflags);
6070
6071         vcpu->arch.exception.pending = false;
6072
6073         kvm_make_request(KVM_REQ_EVENT, vcpu);
6074
6075         return 0;
6076 }
6077
6078 void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
6079 {
6080         struct kvm_segment cs;
6081
6082         kvm_get_segment(vcpu, &cs, VCPU_SREG_CS);
6083         *db = cs.db;
6084         *l = cs.l;
6085 }
6086 EXPORT_SYMBOL_GPL(kvm_get_cs_db_l_bits);
6087
6088 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
6089                                   struct kvm_sregs *sregs)
6090 {
6091         struct desc_ptr dt;
6092
6093         kvm_get_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
6094         kvm_get_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
6095         kvm_get_segment(vcpu, &sregs->es, VCPU_SREG_ES);
6096         kvm_get_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
6097         kvm_get_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
6098         kvm_get_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
6099
6100         kvm_get_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
6101         kvm_get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
6102
6103         kvm_x86_ops->get_idt(vcpu, &dt);
6104         sregs->idt.limit = dt.size;
6105         sregs->idt.base = dt.address;
6106         kvm_x86_ops->get_gdt(vcpu, &dt);
6107         sregs->gdt.limit = dt.size;
6108         sregs->gdt.base = dt.address;
6109
6110         sregs->cr0 = kvm_read_cr0(vcpu);
6111         sregs->cr2 = vcpu->arch.cr2;
6112         sregs->cr3 = kvm_read_cr3(vcpu);
6113         sregs->cr4 = kvm_read_cr4(vcpu);
6114         sregs->cr8 = kvm_get_cr8(vcpu);
6115         sregs->efer = vcpu->arch.efer;
6116         sregs->apic_base = kvm_get_apic_base(vcpu);
6117
6118         memset(sregs->interrupt_bitmap, 0, sizeof sregs->interrupt_bitmap);
6119
6120         if (vcpu->arch.interrupt.pending && !vcpu->arch.interrupt.soft)
6121                 set_bit(vcpu->arch.interrupt.nr,
6122                         (unsigned long *)sregs->interrupt_bitmap);
6123
6124         return 0;
6125 }
6126
6127 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
6128                                     struct kvm_mp_state *mp_state)
6129 {
6130         mp_state->mp_state = vcpu->arch.mp_state;
6131         return 0;
6132 }
6133
6134 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
6135                                     struct kvm_mp_state *mp_state)
6136 {
6137         vcpu->arch.mp_state = mp_state->mp_state;
6138         kvm_make_request(KVM_REQ_EVENT, vcpu);
6139         return 0;
6140 }
6141
6142 int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason,
6143                     bool has_error_code, u32 error_code)
6144 {
6145         struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
6146         int ret;
6147
6148         init_emulate_ctxt(vcpu);
6149
6150         ret = emulator_task_switch(ctxt, tss_selector, reason,
6151                                    has_error_code, error_code);
6152
6153         if (ret)
6154                 return EMULATE_FAIL;
6155
6156         memcpy(vcpu->arch.regs, ctxt->regs, sizeof ctxt->regs);
6157         kvm_rip_write(vcpu, ctxt->eip);
6158         kvm_set_rflags(vcpu, ctxt->eflags);
6159         kvm_make_request(KVM_REQ_EVENT, vcpu);
6160         return EMULATE_DONE;
6161 }
6162 EXPORT_SYMBOL_GPL(kvm_task_switch);
6163
6164 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
6165                                   struct kvm_sregs *sregs)
6166 {
6167         int mmu_reset_needed = 0;
6168         int pending_vec, max_bits, idx;
6169         struct desc_ptr dt;
6170
6171         if (!guest_cpuid_has_xsave(vcpu) && (sregs->cr4 & X86_CR4_OSXSAVE))
6172                 return -EINVAL;
6173
6174         dt.size = sregs->idt.limit;
6175         dt.address = sregs->idt.base;
6176         kvm_x86_ops->set_idt(vcpu, &dt);
6177         dt.size = sregs->gdt.limit;
6178         dt.address = sregs->gdt.base;
6179         kvm_x86_ops->set_gdt(vcpu, &dt);
6180
6181         vcpu->arch.cr2 = sregs->cr2;
6182         mmu_reset_needed |= kvm_read_cr3(vcpu) != sregs->cr3;
6183         vcpu->arch.cr3 = sregs->cr3;
6184         __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
6185
6186         kvm_set_cr8(vcpu, sregs->cr8);
6187
6188         mmu_reset_needed |= vcpu->arch.efer != sregs->efer;
6189         kvm_x86_ops->set_efer(vcpu, sregs->efer);
6190         kvm_set_apic_base(vcpu, sregs->apic_base);
6191
6192         mmu_reset_needed |= kvm_read_cr0(vcpu) != sregs->cr0;
6193         kvm_x86_ops->set_cr0(vcpu, sregs->cr0);
6194         vcpu->arch.cr0 = sregs->cr0;
6195
6196         mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4;
6197         kvm_x86_ops->set_cr4(vcpu, sregs->cr4);
6198         if (sregs->cr4 & X86_CR4_OSXSAVE)
6199                 update_cpuid(vcpu);
6200
6201         idx = srcu_read_lock(&vcpu->kvm->srcu);
6202         if (!is_long_mode(vcpu) && is_pae(vcpu)) {
6203                 load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu));
6204                 mmu_reset_needed = 1;
6205         }
6206         srcu_read_unlock(&vcpu->kvm->srcu, idx);
6207
6208         if (mmu_reset_needed)
6209                 kvm_mmu_reset_context(vcpu);
6210
6211         max_bits = (sizeof sregs->interrupt_bitmap) << 3;
6212         pending_vec = find_first_bit(
6213                 (const unsigned long *)sregs->interrupt_bitmap, max_bits);
6214         if (pending_vec < max_bits) {
6215                 kvm_queue_interrupt(vcpu, pending_vec, false);
6216                 pr_debug("Set back pending irq %d\n", pending_vec);
6217         }
6218
6219         kvm_set_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
6220         kvm_set_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
6221         kvm_set_segment(vcpu, &sregs->es, VCPU_SREG_ES);
6222         kvm_set_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
6223         kvm_set_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
6224         kvm_set_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
6225
6226         kvm_set_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
6227         kvm_set_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
6228
6229         update_cr8_intercept(vcpu);
6230
6231         /* Older userspace won't unhalt the vcpu on reset. */
6232         if (kvm_vcpu_is_bsp(vcpu) && kvm_rip_read(vcpu) == 0xfff0 &&
6233             sregs->cs.selector == 0xf000 && sregs->cs.base == 0xffff0000 &&
6234             !is_protmode(vcpu))
6235                 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
6236
6237         kvm_make_request(KVM_REQ_EVENT, vcpu);
6238
6239         return 0;
6240 }
6241
6242 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
6243                                         struct kvm_guest_debug *dbg)
6244 {
6245         unsigned long rflags;
6246         int i, r;
6247
6248         if (dbg->control & (KVM_GUESTDBG_INJECT_DB | KVM_GUESTDBG_INJECT_BP)) {
6249                 r = -EBUSY;
6250                 if (vcpu->arch.exception.pending)
6251                         goto out;
6252                 if (dbg->control & KVM_GUESTDBG_INJECT_DB)
6253                         kvm_queue_exception(vcpu, DB_VECTOR);
6254                 else
6255                         kvm_queue_exception(vcpu, BP_VECTOR);
6256         }
6257
6258         /*
6259          * Read rflags as long as potentially injected trace flags are still
6260          * filtered out.
6261          */
6262         rflags = kvm_get_rflags(vcpu);
6263
6264         vcpu->guest_debug = dbg->control;
6265         if (!(vcpu->guest_debug & KVM_GUESTDBG_ENABLE))
6266                 vcpu->guest_debug = 0;
6267
6268         if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) {
6269                 for (i = 0; i < KVM_NR_DB_REGS; ++i)
6270                         vcpu->arch.eff_db[i] = dbg->arch.debugreg[i];
6271                 vcpu->arch.switch_db_regs =
6272                         (dbg->arch.debugreg[7] & DR7_BP_EN_MASK);
6273         } else {
6274                 for (i = 0; i < KVM_NR_DB_REGS; i++)
6275                         vcpu->arch.eff_db[i] = vcpu->arch.db[i];
6276                 vcpu->arch.switch_db_regs = (vcpu->arch.dr7 & DR7_BP_EN_MASK);
6277         }
6278
6279         if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
6280                 vcpu->arch.singlestep_rip = kvm_rip_read(vcpu) +
6281                         get_segment_base(vcpu, VCPU_SREG_CS);
6282
6283         /*
6284          * Trigger an rflags update that will inject or remove the trace
6285          * flags.
6286          */
6287         kvm_set_rflags(vcpu, rflags);
6288
6289         kvm_x86_ops->set_guest_debug(vcpu, dbg);
6290
6291         r = 0;
6292
6293 out:
6294
6295         return r;
6296 }
6297
6298 /*
6299  * Translate a guest virtual address to a guest physical address.
6300  */
6301 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
6302                                     struct kvm_translation *tr)
6303 {
6304         unsigned long vaddr = tr->linear_address;
6305         gpa_t gpa;
6306         int idx;
6307
6308         idx = srcu_read_lock(&vcpu->kvm->srcu);
6309         gpa = kvm_mmu_gva_to_gpa_system(vcpu, vaddr, NULL);
6310         srcu_read_unlock(&vcpu->kvm->srcu, idx);
6311         tr->physical_address = gpa;
6312         tr->valid = gpa != UNMAPPED_GVA;
6313         tr->writeable = 1;
6314         tr->usermode = 0;
6315
6316         return 0;
6317 }
6318
6319 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
6320 {
6321         struct i387_fxsave_struct *fxsave =
6322                         &vcpu->arch.guest_fpu.state->fxsave;
6323
6324         memcpy(fpu->fpr, fxsave->st_space, 128);
6325         fpu->fcw = fxsave->cwd;
6326         fpu->fsw = fxsave->swd;
6327         fpu->ftwx = fxsave->twd;
6328         fpu->last_opcode = fxsave->fop;
6329         fpu->last_ip = fxsave->rip;
6330         fpu->last_dp = fxsave->rdp;
6331         memcpy(fpu->xmm, fxsave->xmm_space, sizeof fxsave->xmm_space);
6332
6333         return 0;
6334 }
6335
6336 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
6337 {
6338         struct i387_fxsave_struct *fxsave =
6339                         &vcpu->arch.guest_fpu.state->fxsave;
6340
6341         memcpy(fxsave->st_space, fpu->fpr, 128);
6342         fxsave->cwd = fpu->fcw;
6343         fxsave->swd = fpu->fsw;
6344         fxsave->twd = fpu->ftwx;
6345         fxsave->fop = fpu->last_opcode;
6346         fxsave->rip = fpu->last_ip;
6347         fxsave->rdp = fpu->last_dp;
6348         memcpy(fxsave->xmm_space, fpu->xmm, sizeof fxsave->xmm_space);
6349
6350         return 0;
6351 }
6352
6353 int fx_init(struct kvm_vcpu *vcpu)
6354 {
6355         int err;
6356
6357         err = fpu_alloc(&vcpu->arch.guest_fpu);
6358         if (err)
6359                 return err;
6360
6361         fpu_finit(&vcpu->arch.guest_fpu);
6362
6363         /*
6364          * Ensure guest xcr0 is valid for loading
6365          */
6366         vcpu->arch.xcr0 = XSTATE_FP;
6367
6368         vcpu->arch.cr0 |= X86_CR0_ET;
6369
6370         return 0;
6371 }
6372 EXPORT_SYMBOL_GPL(fx_init);
6373
6374 static void fx_free(struct kvm_vcpu *vcpu)
6375 {
6376         fpu_free(&vcpu->arch.guest_fpu);
6377 }
6378
6379 void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
6380 {
6381         if (vcpu->guest_fpu_loaded)
6382                 return;
6383
6384         /*
6385          * Restore all possible states in the guest,
6386          * and assume host would use all available bits.
6387          * Guest xcr0 would be loaded later.
6388          */
6389         vcpu->guest_fpu_loaded = 1;
6390         unlazy_fpu(current);
6391         fpu_restore_checking(&vcpu->arch.guest_fpu);
6392         trace_kvm_fpu(1);
6393 }
6394
6395 void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
6396 {
6397         if (!vcpu->guest_fpu_loaded)
6398                 return;
6399
6400         vcpu->guest_fpu_loaded = 0;
6401         fpu_save_init(&vcpu->arch.guest_fpu);
6402         ++vcpu->stat.fpu_reload;
6403         kvm_make_request(KVM_REQ_DEACTIVATE_FPU, vcpu);
6404         trace_kvm_fpu(0);
6405 }
6406
6407 void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
6408 {
6409         kvmclock_reset(vcpu);
6410
6411         free_cpumask_var(vcpu->arch.wbinvd_dirty_mask);
6412         fx_free(vcpu);
6413         kvm_x86_ops->vcpu_free(vcpu);
6414 }
6415
6416 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
6417                                                 unsigned int id)
6418 {
6419         if (check_tsc_unstable() && atomic_read(&kvm->online_vcpus) != 0)
6420                 printk_once(KERN_WARNING
6421                 "kvm: SMP vm created on host with unstable TSC; "
6422                 "guest TSC will not be reliable\n");
6423         return kvm_x86_ops->vcpu_create(kvm, id);
6424 }
6425
6426 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
6427 {
6428         int r;
6429
6430         vcpu->arch.mtrr_state.have_fixed = 1;
6431         vcpu_load(vcpu);
6432         r = kvm_arch_vcpu_reset(vcpu);
6433         if (r == 0)
6434                 r = kvm_mmu_setup(vcpu);
6435         vcpu_put(vcpu);
6436
6437         return r;
6438 }
6439
6440 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
6441 {
6442         vcpu->arch.apf.msr_val = 0;
6443
6444         vcpu_load(vcpu);
6445         kvm_mmu_unload(vcpu);
6446         vcpu_put(vcpu);
6447
6448         fx_free(vcpu);
6449         kvm_x86_ops->vcpu_free(vcpu);
6450 }
6451
6452 int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu)
6453 {
6454         atomic_set(&vcpu->arch.nmi_queued, 0);
6455         vcpu->arch.nmi_pending = 0;
6456         vcpu->arch.nmi_injected = false;
6457
6458         vcpu->arch.switch_db_regs = 0;
6459         memset(vcpu->arch.db, 0, sizeof(vcpu->arch.db));
6460         vcpu->arch.dr6 = DR6_FIXED_1;
6461         vcpu->arch.dr7 = DR7_FIXED_1;
6462
6463         kvm_make_request(KVM_REQ_EVENT, vcpu);
6464         vcpu->arch.apf.msr_val = 0;
6465         vcpu->arch.st.msr_val = 0;
6466
6467         kvmclock_reset(vcpu);
6468
6469         kvm_clear_async_pf_completion_queue(vcpu);
6470         kvm_async_pf_hash_reset(vcpu);
6471         vcpu->arch.apf.halted = false;
6472
6473         return kvm_x86_ops->vcpu_reset(vcpu);
6474 }
6475
6476 int kvm_arch_hardware_enable(void *garbage)
6477 {
6478         struct kvm *kvm;
6479         struct kvm_vcpu *vcpu;
6480         int i;
6481
6482         kvm_shared_msr_cpu_online();
6483         list_for_each_entry(kvm, &vm_list, vm_list)
6484                 kvm_for_each_vcpu(i, vcpu, kvm)
6485                         if (vcpu->cpu == smp_processor_id())
6486                                 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
6487         return kvm_x86_ops->hardware_enable(garbage);
6488 }
6489
6490 void kvm_arch_hardware_disable(void *garbage)
6491 {
6492         kvm_x86_ops->hardware_disable(garbage);
6493         drop_user_return_notifiers(garbage);
6494 }
6495
6496 int kvm_arch_hardware_setup(void)
6497 {
6498         return kvm_x86_ops->hardware_setup();
6499 }
6500
6501 void kvm_arch_hardware_unsetup(void)
6502 {
6503         kvm_x86_ops->hardware_unsetup();
6504 }
6505
6506 void kvm_arch_check_processor_compat(void *rtn)
6507 {
6508         kvm_x86_ops->check_processor_compatibility(rtn);
6509 }
6510
6511 bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu)
6512 {
6513         return irqchip_in_kernel(vcpu->kvm) == (vcpu->arch.apic != NULL);
6514 }
6515
6516 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
6517 {
6518         struct page *page;
6519         struct kvm *kvm;
6520         int r;
6521
6522         BUG_ON(vcpu->kvm == NULL);
6523         kvm = vcpu->kvm;
6524
6525         vcpu->arch.emulate_ctxt.ops = &emulate_ops;
6526         vcpu->arch.walk_mmu = &vcpu->arch.mmu;
6527         vcpu->arch.mmu.root_hpa = INVALID_PAGE;
6528         vcpu->arch.mmu.translate_gpa = translate_gpa;
6529         vcpu->arch.nested_mmu.translate_gpa = translate_nested_gpa;
6530         if (!irqchip_in_kernel(kvm) || kvm_vcpu_is_bsp(vcpu))
6531                 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
6532         else
6533                 vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED;
6534
6535         page = alloc_page(GFP_KERNEL | __GFP_ZERO);
6536         if (!page) {
6537                 r = -ENOMEM;
6538                 goto fail;
6539         }
6540         vcpu->arch.pio_data = page_address(page);
6541
6542         kvm_init_tsc_catchup(vcpu, max_tsc_khz);
6543
6544         r = kvm_mmu_create(vcpu);
6545         if (r < 0)
6546                 goto fail_free_pio_data;
6547
6548         if (irqchip_in_kernel(kvm)) {
6549                 r = kvm_create_lapic(vcpu);
6550                 if (r < 0)
6551                         goto fail_mmu_destroy;
6552         }
6553
6554         vcpu->arch.mce_banks = kzalloc(KVM_MAX_MCE_BANKS * sizeof(u64) * 4,
6555                                        GFP_KERNEL);
6556         if (!vcpu->arch.mce_banks) {
6557                 r = -ENOMEM;
6558                 goto fail_free_lapic;
6559         }
6560         vcpu->arch.mcg_cap = KVM_MAX_MCE_BANKS;
6561
6562         if (!zalloc_cpumask_var(&vcpu->arch.wbinvd_dirty_mask, GFP_KERNEL))
6563                 goto fail_free_mce_banks;
6564
6565         vcpu->arch.pv_time_enabled = false;
6566         kvm_async_pf_hash_reset(vcpu);
6567
6568         return 0;
6569 fail_free_mce_banks:
6570         kfree(vcpu->arch.mce_banks);
6571 fail_free_lapic:
6572         kvm_free_lapic(vcpu);
6573 fail_mmu_destroy:
6574         kvm_mmu_destroy(vcpu);
6575 fail_free_pio_data:
6576         free_page((unsigned long)vcpu->arch.pio_data);
6577 fail:
6578         return r;
6579 }
6580
6581 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
6582 {
6583         int idx;
6584
6585         kfree(vcpu->arch.mce_banks);
6586         kvm_free_lapic(vcpu);
6587         idx = srcu_read_lock(&vcpu->kvm->srcu);
6588         kvm_mmu_destroy(vcpu);
6589         srcu_read_unlock(&vcpu->kvm->srcu, idx);
6590         free_page((unsigned long)vcpu->arch.pio_data);
6591 }
6592
6593 int kvm_arch_init_vm(struct kvm *kvm)
6594 {
6595         INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
6596         INIT_LIST_HEAD(&kvm->arch.assigned_dev_head);
6597
6598         /* Reserve bit 0 of irq_sources_bitmap for userspace irq source */
6599         set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap);
6600
6601         raw_spin_lock_init(&kvm->arch.tsc_write_lock);
6602
6603         return 0;
6604 }
6605
6606 static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu)
6607 {
6608         vcpu_load(vcpu);
6609         kvm_mmu_unload(vcpu);
6610         vcpu_put(vcpu);
6611 }
6612
6613 static void kvm_free_vcpus(struct kvm *kvm)
6614 {
6615         unsigned int i;
6616         struct kvm_vcpu *vcpu;
6617
6618         /*
6619          * Unpin any mmu pages first.
6620          */
6621         kvm_for_each_vcpu(i, vcpu, kvm) {
6622                 kvm_clear_async_pf_completion_queue(vcpu);
6623                 kvm_unload_vcpu_mmu(vcpu);
6624         }
6625         kvm_for_each_vcpu(i, vcpu, kvm)
6626                 kvm_arch_vcpu_free(vcpu);
6627
6628         mutex_lock(&kvm->lock);
6629         for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
6630                 kvm->vcpus[i] = NULL;
6631
6632         atomic_set(&kvm->online_vcpus, 0);
6633         mutex_unlock(&kvm->lock);
6634 }
6635
6636 void kvm_arch_sync_events(struct kvm *kvm)
6637 {
6638         kvm_free_all_assigned_devices(kvm);
6639         kvm_free_pit(kvm);
6640 }
6641
6642 void kvm_arch_destroy_vm(struct kvm *kvm)
6643 {
6644         kvm_iommu_unmap_guest(kvm);
6645         kfree(kvm->arch.vpic);
6646         kfree(kvm->arch.vioapic);
6647         kvm_free_vcpus(kvm);
6648         if (kvm->arch.apic_access_page)
6649                 put_page(kvm->arch.apic_access_page);
6650         if (kvm->arch.ept_identity_pagetable)
6651                 put_page(kvm->arch.ept_identity_pagetable);
6652 }
6653
6654 int kvm_arch_prepare_memory_region(struct kvm *kvm,
6655                                 struct kvm_memory_slot *memslot,
6656                                 struct kvm_memory_slot old,
6657                                 struct kvm_userspace_memory_region *mem,
6658                                 int user_alloc)
6659 {
6660         int npages = memslot->npages;
6661         int map_flags = MAP_PRIVATE | MAP_ANONYMOUS;
6662
6663         /* Prevent internal slot pages from being moved by fork()/COW. */
6664         if (memslot->id >= KVM_MEMORY_SLOTS)
6665                 map_flags = MAP_SHARED | MAP_ANONYMOUS;
6666
6667         /*To keep backward compatibility with older userspace,
6668          *x86 needs to hanlde !user_alloc case.
6669          */
6670         if (!user_alloc) {
6671                 if (npages && !old.rmap) {
6672                         unsigned long userspace_addr;
6673
6674                         down_write(&current->mm->mmap_sem);
6675                         userspace_addr = do_mmap(NULL, 0,
6676                                                  npages * PAGE_SIZE,
6677                                                  PROT_READ | PROT_WRITE,
6678                                                  map_flags,
6679                                                  0);
6680                         up_write(&current->mm->mmap_sem);
6681
6682                         if (IS_ERR((void *)userspace_addr))
6683                                 return PTR_ERR((void *)userspace_addr);
6684
6685                         memslot->userspace_addr = userspace_addr;
6686                 }
6687         }
6688
6689
6690         return 0;
6691 }
6692
6693 void kvm_arch_commit_memory_region(struct kvm *kvm,
6694                                 struct kvm_userspace_memory_region *mem,
6695                                 struct kvm_memory_slot old,
6696                                 int user_alloc)
6697 {
6698
6699         int nr_mmu_pages = 0, npages = mem->memory_size >> PAGE_SHIFT;
6700
6701         if (!user_alloc && !old.user_alloc && old.rmap && !npages) {
6702                 int ret;
6703
6704                 down_write(&current->mm->mmap_sem);
6705                 ret = do_munmap(current->mm, old.userspace_addr,
6706                                 old.npages * PAGE_SIZE);
6707                 up_write(&current->mm->mmap_sem);
6708                 if (ret < 0)
6709                         printk(KERN_WARNING
6710                                "kvm_vm_ioctl_set_memory_region: "
6711                                "failed to munmap memory\n");
6712         }
6713
6714         if (!kvm->arch.n_requested_mmu_pages)
6715                 nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm);
6716
6717         spin_lock(&kvm->mmu_lock);
6718         if (nr_mmu_pages)
6719                 kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages);
6720         kvm_mmu_slot_remove_write_access(kvm, mem->slot);
6721         spin_unlock(&kvm->mmu_lock);
6722 }
6723
6724 void kvm_arch_flush_shadow(struct kvm *kvm)
6725 {
6726         kvm_mmu_zap_all(kvm);
6727         kvm_reload_remote_mmus(kvm);
6728 }
6729
6730 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
6731 {
6732         return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE &&
6733                 !vcpu->arch.apf.halted)
6734                 || !list_empty_careful(&vcpu->async_pf.done)
6735                 || vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED
6736                 || atomic_read(&vcpu->arch.nmi_queued) ||
6737                 (kvm_arch_interrupt_allowed(vcpu) &&
6738                  kvm_cpu_has_interrupt(vcpu));
6739 }
6740
6741 void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
6742 {
6743         int me;
6744         int cpu = vcpu->cpu;
6745
6746         if (waitqueue_active(&vcpu->wq)) {
6747                 wake_up_interruptible(&vcpu->wq);
6748                 ++vcpu->stat.halt_wakeup;
6749         }
6750
6751         me = get_cpu();
6752         if (cpu != me && (unsigned)cpu < nr_cpu_ids && cpu_online(cpu))
6753                 if (kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE)
6754                         smp_send_reschedule(cpu);
6755         put_cpu();
6756 }
6757
6758 int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu)
6759 {
6760         return kvm_x86_ops->interrupt_allowed(vcpu);
6761 }
6762
6763 bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip)
6764 {
6765         unsigned long current_rip = kvm_rip_read(vcpu) +
6766                 get_segment_base(vcpu, VCPU_SREG_CS);
6767
6768         return current_rip == linear_rip;
6769 }
6770 EXPORT_SYMBOL_GPL(kvm_is_linear_rip);
6771
6772 unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu)
6773 {
6774         unsigned long rflags;
6775
6776         rflags = kvm_x86_ops->get_rflags(vcpu);
6777         if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
6778                 rflags &= ~X86_EFLAGS_TF;
6779         return rflags;
6780 }
6781 EXPORT_SYMBOL_GPL(kvm_get_rflags);
6782
6783 void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
6784 {
6785         if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP &&
6786             kvm_is_linear_rip(vcpu, vcpu->arch.singlestep_rip))
6787                 rflags |= X86_EFLAGS_TF;
6788         kvm_x86_ops->set_rflags(vcpu, rflags);
6789         kvm_make_request(KVM_REQ_EVENT, vcpu);
6790 }
6791 EXPORT_SYMBOL_GPL(kvm_set_rflags);
6792
6793 void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work)
6794 {
6795         int r;
6796
6797         if ((vcpu->arch.mmu.direct_map != work->arch.direct_map) ||
6798               is_error_page(work->page))
6799                 return;
6800
6801         r = kvm_mmu_reload(vcpu);
6802         if (unlikely(r))
6803                 return;
6804
6805         if (!vcpu->arch.mmu.direct_map &&
6806               work->arch.cr3 != vcpu->arch.mmu.get_cr3(vcpu))
6807                 return;
6808
6809         vcpu->arch.mmu.page_fault(vcpu, work->gva, 0, true);
6810 }
6811
6812 static inline u32 kvm_async_pf_hash_fn(gfn_t gfn)
6813 {
6814         return hash_32(gfn & 0xffffffff, order_base_2(ASYNC_PF_PER_VCPU));
6815 }
6816
6817 static inline u32 kvm_async_pf_next_probe(u32 key)
6818 {
6819         return (key + 1) & (roundup_pow_of_two(ASYNC_PF_PER_VCPU) - 1);
6820 }
6821
6822 static void kvm_add_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
6823 {
6824         u32 key = kvm_async_pf_hash_fn(gfn);
6825
6826         while (vcpu->arch.apf.gfns[key] != ~0)
6827                 key = kvm_async_pf_next_probe(key);
6828
6829         vcpu->arch.apf.gfns[key] = gfn;
6830 }
6831
6832 static u32 kvm_async_pf_gfn_slot(struct kvm_vcpu *vcpu, gfn_t gfn)
6833 {
6834         int i;
6835         u32 key = kvm_async_pf_hash_fn(gfn);
6836
6837         for (i = 0; i < roundup_pow_of_two(ASYNC_PF_PER_VCPU) &&
6838                      (vcpu->arch.apf.gfns[key] != gfn &&
6839                       vcpu->arch.apf.gfns[key] != ~0); i++)
6840                 key = kvm_async_pf_next_probe(key);
6841
6842         return key;
6843 }
6844
6845 bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
6846 {
6847         return vcpu->arch.apf.gfns[kvm_async_pf_gfn_slot(vcpu, gfn)] == gfn;
6848 }
6849
6850 static void kvm_del_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
6851 {
6852         u32 i, j, k;
6853
6854         i = j = kvm_async_pf_gfn_slot(vcpu, gfn);
6855         while (true) {
6856                 vcpu->arch.apf.gfns[i] = ~0;
6857                 do {
6858                         j = kvm_async_pf_next_probe(j);
6859                         if (vcpu->arch.apf.gfns[j] == ~0)
6860                                 return;
6861                         k = kvm_async_pf_hash_fn(vcpu->arch.apf.gfns[j]);
6862                         /*
6863                          * k lies cyclically in ]i,j]
6864                          * |    i.k.j |
6865                          * |....j i.k.| or  |.k..j i...|
6866                          */
6867                 } while ((i <= j) ? (i < k && k <= j) : (i < k || k <= j));
6868                 vcpu->arch.apf.gfns[i] = vcpu->arch.apf.gfns[j];
6869                 i = j;
6870         }
6871 }
6872
6873 static int apf_put_user(struct kvm_vcpu *vcpu, u32 val)
6874 {
6875
6876         return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apf.data, &val,
6877                                       sizeof(val));
6878 }
6879
6880 void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
6881                                      struct kvm_async_pf *work)
6882 {
6883         struct x86_exception fault;
6884
6885         trace_kvm_async_pf_not_present(work->arch.token, work->gva);
6886         kvm_add_async_pf_gfn(vcpu, work->arch.gfn);
6887
6888         if (!(vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED) ||
6889             (vcpu->arch.apf.send_user_only &&
6890              kvm_x86_ops->get_cpl(vcpu) == 0))
6891                 kvm_make_request(KVM_REQ_APF_HALT, vcpu);
6892         else if (!apf_put_user(vcpu, KVM_PV_REASON_PAGE_NOT_PRESENT)) {
6893                 fault.vector = PF_VECTOR;
6894                 fault.error_code_valid = true;
6895                 fault.error_code = 0;
6896                 fault.nested_page_fault = false;
6897                 fault.address = work->arch.token;
6898                 kvm_inject_page_fault(vcpu, &fault);
6899         }
6900 }
6901
6902 void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
6903                                  struct kvm_async_pf *work)
6904 {
6905         struct x86_exception fault;
6906
6907         trace_kvm_async_pf_ready(work->arch.token, work->gva);
6908         if (is_error_page(work->page))
6909                 work->arch.token = ~0; /* broadcast wakeup */
6910         else
6911                 kvm_del_async_pf_gfn(vcpu, work->arch.gfn);
6912
6913         if ((vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED) &&
6914             !apf_put_user(vcpu, KVM_PV_REASON_PAGE_READY)) {
6915                 fault.vector = PF_VECTOR;
6916                 fault.error_code_valid = true;
6917                 fault.error_code = 0;
6918                 fault.nested_page_fault = false;
6919                 fault.address = work->arch.token;
6920                 kvm_inject_page_fault(vcpu, &fault);
6921         }
6922         vcpu->arch.apf.halted = false;
6923 }
6924
6925 bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
6926 {
6927         if (!(vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED))
6928                 return true;
6929         else
6930                 return !kvm_event_needs_reinjection(vcpu) &&
6931                         kvm_x86_ops->interrupt_allowed(vcpu);
6932 }
6933
6934 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_exit);
6935 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_inj_virq);
6936 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_page_fault);
6937 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_msr);
6938 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_cr);
6939 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmrun);
6940 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmexit);
6941 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmexit_inject);
6942 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_intr_vmexit);
6943 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_invlpga);
6944 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_skinit);
6945 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_intercepts);