1 #ifndef ARCH_X86_KVM_X86_H
2 #define ARCH_X86_KVM_X86_H
4 #include <linux/kvm_host.h>
5 #include "kvm_cache_regs.h"
7 static inline void kvm_clear_exception_queue(struct kvm_vcpu *vcpu)
9 vcpu->arch.exception.pending = false;
12 static inline void kvm_queue_interrupt(struct kvm_vcpu *vcpu, u8 vector,
15 vcpu->arch.interrupt.pending = true;
16 vcpu->arch.interrupt.soft = soft;
17 vcpu->arch.interrupt.nr = vector;
20 static inline void kvm_clear_interrupt_queue(struct kvm_vcpu *vcpu)
22 vcpu->arch.interrupt.pending = false;
25 static inline bool kvm_event_needs_reinjection(struct kvm_vcpu *vcpu)
27 return vcpu->arch.exception.pending || vcpu->arch.interrupt.pending ||
28 vcpu->arch.nmi_injected;
31 static inline bool kvm_exception_is_soft(unsigned int nr)
33 return (nr == BP_VECTOR) || (nr == OF_VECTOR);
36 struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
37 u32 function, u32 index);
39 static inline bool is_protmode(struct kvm_vcpu *vcpu)
41 return kvm_read_cr0_bits(vcpu, X86_CR0_PE);
44 static inline int is_long_mode(struct kvm_vcpu *vcpu)
47 return vcpu->arch.efer & EFER_LMA;
53 static inline bool mmu_is_nested(struct kvm_vcpu *vcpu)
55 return vcpu->arch.walk_mmu == &vcpu->arch.nested_mmu;
58 static inline int is_pae(struct kvm_vcpu *vcpu)
60 return kvm_read_cr4_bits(vcpu, X86_CR4_PAE);
63 static inline int is_pse(struct kvm_vcpu *vcpu)
65 return kvm_read_cr4_bits(vcpu, X86_CR4_PSE);
68 static inline int is_paging(struct kvm_vcpu *vcpu)
70 return kvm_read_cr0_bits(vcpu, X86_CR0_PG);
73 static inline u32 bit(int bitno)
75 return 1 << (bitno & 31);
78 static inline void vcpu_cache_mmio_info(struct kvm_vcpu *vcpu,
79 gva_t gva, gfn_t gfn, unsigned access)
81 vcpu->arch.mmio_gva = gva & PAGE_MASK;
82 vcpu->arch.access = access;
83 vcpu->arch.mmio_gfn = gfn;
84 vcpu->arch.mmio_gen = kvm_memslots(vcpu->kvm)->generation;
87 static inline bool vcpu_match_mmio_gen(struct kvm_vcpu *vcpu)
89 return vcpu->arch.mmio_gen == kvm_memslots(vcpu->kvm)->generation;
93 * Clear the mmio cache info for the given gva. If gva is MMIO_GVA_ANY, we
94 * clear all mmio cache info.
96 #define MMIO_GVA_ANY (~(gva_t)0)
98 static inline void vcpu_clear_mmio_info(struct kvm_vcpu *vcpu, gva_t gva)
100 if (gva != MMIO_GVA_ANY && vcpu->arch.mmio_gva != (gva & PAGE_MASK))
103 vcpu->arch.mmio_gva = 0;
106 static inline bool vcpu_match_mmio_gva(struct kvm_vcpu *vcpu, unsigned long gva)
108 if (vcpu_match_mmio_gen(vcpu) && vcpu->arch.mmio_gva &&
109 vcpu->arch.mmio_gva == (gva & PAGE_MASK))
115 static inline bool vcpu_match_mmio_gpa(struct kvm_vcpu *vcpu, gpa_t gpa)
117 if (vcpu_match_mmio_gen(vcpu) && vcpu->arch.mmio_gfn &&
118 vcpu->arch.mmio_gfn == gpa >> PAGE_SHIFT)
124 void kvm_before_handle_nmi(struct kvm_vcpu *vcpu);
125 void kvm_after_handle_nmi(struct kvm_vcpu *vcpu);
126 int kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip);
128 void kvm_write_tsc(struct kvm_vcpu *vcpu, u64 data);
130 int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt,
131 gva_t addr, void *val, unsigned int bytes,
132 struct x86_exception *exception);
134 int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt,
135 gva_t addr, void *val, unsigned int bytes,
136 struct x86_exception *exception);
138 bool kvm_mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data);
140 extern unsigned int min_timer_period_us;