{ "io_exits", &kvm_stat.io_exits },
{ "mmio_exits", &kvm_stat.mmio_exits },
{ "signal_exits", &kvm_stat.signal_exits },
+ { "irq_window", &kvm_stat.irq_window_exits },
+ { "halt_exits", &kvm_stat.halt_exits },
+ { "request_irq", &kvm_stat.request_irq_exits },
{ "irq_exits", &kvm_stat.irq_exits },
{ 0, 0 }
};
#define CR8_RESEVED_BITS (~0x0fULL)
#define EFER_RESERVED_BITS 0xfffffffffffff2fe
-struct vmx_msr_entry *find_msr_entry(struct kvm_vcpu *vcpu, u32 msr)
-{
- int i;
-
- for (i = 0; i < vcpu->nmsrs; ++i)
- if (vcpu->guest_msrs[i].index == msr)
- return &vcpu->guest_msrs[i];
- return 0;
-}
-EXPORT_SYMBOL_GPL(find_msr_entry);
-
-#ifdef __x86_64__
+#ifdef CONFIG_X86_64
// LDT or TSS descriptor in the GDT. 16 bytes.
struct segment_descriptor_64 {
struct segment_descriptor s;
}
d = (struct segment_descriptor *)(table_base + (selector & ~7));
v = d->base_low | ((ul)d->base_mid << 16) | ((ul)d->base_high << 24);
-#ifdef __x86_64__
+#ifdef CONFIG_X86_64
if (d->system == 0
&& (d->type == 2 || d->type == 9 || d->type == 11))
v |= ((ul)((struct segment_descriptor_64 *)d)->base_higher) << 32;
}
EXPORT_SYMBOL_GPL(segment_base);
+static inline int valid_vcpu(int n)
+{
+ return likely(n >= 0 && n < KVM_MAX_VCPUS);
+}
+
int kvm_read_guest(struct kvm_vcpu *vcpu,
gva_t addr,
unsigned long size,
static void vcpu_put(struct kvm_vcpu *vcpu)
{
kvm_arch_ops->vcpu_put(vcpu);
- put_cpu();
mutex_unlock(&vcpu->mutex);
}
if (!dont || free->phys_mem != dont->phys_mem)
if (free->phys_mem) {
for (i = 0; i < free->npages; ++i)
- __free_page(free->phys_mem[i]);
+ if (free->phys_mem[i])
+ __free_page(free->phys_mem[i]);
vfree(free->phys_mem);
}
}
if (!is_paging(vcpu) && (cr0 & CR0_PG_MASK)) {
-#ifdef __x86_64__
+#ifdef CONFIG_X86_64
if ((vcpu->shadow_efer & EFER_LME)) {
int cs_db, cs_l;
void lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
{
+ kvm_arch_ops->decache_cr0_cr4_guest_bits(vcpu);
set_cr0(vcpu, (vcpu->cr0 & ~0x0ful) | (msw & 0x0f));
}
EXPORT_SYMBOL_GPL(lmsw);
return;
}
- if (kvm_arch_ops->is_long_mode(vcpu)) {
+ if (is_long_mode(vcpu)) {
if (!(cr4 & CR4_PAE_MASK)) {
printk(KERN_DEBUG "set_cr4: #GP, clearing PAE while "
"in long mode\n");
void set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
{
- if (kvm_arch_ops->is_long_mode(vcpu)) {
+ if (is_long_mode(vcpu)) {
if ( cr3 & CR3_L_MODE_RESEVED_BITS) {
printk(KERN_DEBUG "set_cr3: #GP, reserved bits\n");
inject_gp(vcpu);
struct kvm_vcpu *vcpu;
r = -EINVAL;
- if (n < 0 || n >= KVM_MAX_VCPUS)
+ if (!valid_vcpu(n))
goto out;
vcpu = &kvm->vcpus[n];
if (r < 0)
goto out_free_vcpus;
- kvm_arch_ops->vcpu_load(vcpu);
+ r = kvm_mmu_create(vcpu);
+ if (r < 0)
+ goto out_free_vcpus;
- r = kvm_arch_ops->vcpu_setup(vcpu);
+ kvm_arch_ops->vcpu_load(vcpu);
+ r = kvm_mmu_setup(vcpu);
if (r >= 0)
- r = kvm_mmu_init(vcpu);
-
+ r = kvm_arch_ops->vcpu_setup(vcpu);
vcpu_put(vcpu);
if (r < 0)
int emulate_clts(struct kvm_vcpu *vcpu)
{
- unsigned long cr0 = vcpu->cr0;
+ unsigned long cr0;
- cr0 &= ~CR0_TS_MASK;
+ kvm_arch_ops->decache_cr0_cr4_guest_bits(vcpu);
+ cr0 = vcpu->cr0 & ~CR0_TS_MASK;
kvm_arch_ops->set_cr0(vcpu, cr0);
return X86EMUL_CONTINUE;
}
unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr)
{
+ kvm_arch_ops->decache_cr0_cr4_guest_bits(vcpu);
switch (cr) {
case 0:
return vcpu->cr0;
}
}
+int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
+{
+ u64 data;
+
+ switch (msr) {
+ case 0xc0010010: /* SYSCFG */
+ case 0xc0010015: /* HWCR */
+ case MSR_IA32_PLATFORM_ID:
+ case MSR_IA32_P5_MC_ADDR:
+ case MSR_IA32_P5_MC_TYPE:
+ case MSR_IA32_MC0_CTL:
+ case MSR_IA32_MCG_STATUS:
+ case MSR_IA32_MCG_CAP:
+ case MSR_IA32_MC0_MISC:
+ case MSR_IA32_MC0_MISC+4:
+ case MSR_IA32_MC0_MISC+8:
+ case MSR_IA32_MC0_MISC+12:
+ case MSR_IA32_MC0_MISC+16:
+ case MSR_IA32_UCODE_REV:
+ case MSR_IA32_PERF_STATUS:
+ /* MTRR registers */
+ case 0xfe:
+ case 0x200 ... 0x2ff:
+ data = 0;
+ break;
+ case 0xcd: /* fsb frequency */
+ data = 3;
+ break;
+ case MSR_IA32_APICBASE:
+ data = vcpu->apic_base;
+ break;
+#ifdef CONFIG_X86_64
+ case MSR_EFER:
+ data = vcpu->shadow_efer;
+ break;
+#endif
+ default:
+ printk(KERN_ERR "kvm: unhandled rdmsr: 0x%x\n", msr);
+ return 1;
+ }
+ *pdata = data;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(kvm_get_msr_common);
+
/*
* Reads an msr value (of 'msr_index') into 'pdata'.
* Returns 0 on success, non-0 otherwise.
return kvm_arch_ops->get_msr(vcpu, msr_index, pdata);
}
-#ifdef __x86_64__
+#ifdef CONFIG_X86_64
-void set_efer(struct kvm_vcpu *vcpu, u64 efer)
+static void set_efer(struct kvm_vcpu *vcpu, u64 efer)
{
- struct vmx_msr_entry *msr;
-
if (efer & EFER_RESERVED_BITS) {
printk(KERN_DEBUG "set_efer: 0x%llx #GP, reserved bits\n",
efer);
return;
}
+ kvm_arch_ops->set_efer(vcpu, efer);
+
efer &= ~EFER_LMA;
efer |= vcpu->shadow_efer & EFER_LMA;
vcpu->shadow_efer = efer;
-
- msr = find_msr_entry(vcpu, MSR_EFER);
-
- if (!(efer & EFER_LMA))
- efer &= ~EFER_LME;
- msr->data = efer;
}
-EXPORT_SYMBOL_GPL(set_efer);
#endif
+int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
+{
+ switch (msr) {
+#ifdef CONFIG_X86_64
+ case MSR_EFER:
+ set_efer(vcpu, data);
+ break;
+#endif
+ case MSR_IA32_MC0_STATUS:
+ printk(KERN_WARNING "%s: MSR_IA32_MC0_STATUS 0x%llx, nop\n",
+ __FUNCTION__, data);
+ break;
+ case MSR_IA32_UCODE_REV:
+ case MSR_IA32_UCODE_WRITE:
+ case 0x200 ... 0x2ff: /* MTRRs */
+ break;
+ case MSR_IA32_APICBASE:
+ vcpu->apic_base = data;
+ break;
+ default:
+ printk(KERN_ERR "kvm: unhandled wrmsr: 0x%x\n", msr);
+ return 1;
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(kvm_set_msr_common);
+
/*
* Writes msr value into into the appropriate "register".
* Returns 0 on success, non-0 otherwise.
struct kvm_vcpu *vcpu;
int r;
- if (kvm_run->vcpu < 0 || kvm_run->vcpu >= KVM_MAX_VCPUS)
+ if (!valid_vcpu(kvm_run->vcpu))
return -EINVAL;
vcpu = vcpu_load(kvm, kvm_run->vcpu);
{
struct kvm_vcpu *vcpu;
- if (regs->vcpu < 0 || regs->vcpu >= KVM_MAX_VCPUS)
+ if (!valid_vcpu(regs->vcpu))
return -EINVAL;
vcpu = vcpu_load(kvm, regs->vcpu);
regs->rdi = vcpu->regs[VCPU_REGS_RDI];
regs->rsp = vcpu->regs[VCPU_REGS_RSP];
regs->rbp = vcpu->regs[VCPU_REGS_RBP];
-#ifdef __x86_64__
+#ifdef CONFIG_X86_64
regs->r8 = vcpu->regs[VCPU_REGS_R8];
regs->r9 = vcpu->regs[VCPU_REGS_R9];
regs->r10 = vcpu->regs[VCPU_REGS_R10];
{
struct kvm_vcpu *vcpu;
- if (regs->vcpu < 0 || regs->vcpu >= KVM_MAX_VCPUS)
+ if (!valid_vcpu(regs->vcpu))
return -EINVAL;
vcpu = vcpu_load(kvm, regs->vcpu);
vcpu->regs[VCPU_REGS_RDI] = regs->rdi;
vcpu->regs[VCPU_REGS_RSP] = regs->rsp;
vcpu->regs[VCPU_REGS_RBP] = regs->rbp;
-#ifdef __x86_64__
+#ifdef CONFIG_X86_64
vcpu->regs[VCPU_REGS_R8] = regs->r8;
vcpu->regs[VCPU_REGS_R9] = regs->r9;
vcpu->regs[VCPU_REGS_R10] = regs->r10;
struct kvm_vcpu *vcpu;
struct descriptor_table dt;
- if (sregs->vcpu < 0 || sregs->vcpu >= KVM_MAX_VCPUS)
+ if (!valid_vcpu(sregs->vcpu))
return -EINVAL;
vcpu = vcpu_load(kvm, sregs->vcpu);
if (!vcpu)
sregs->gdt.limit = dt.limit;
sregs->gdt.base = dt.base;
+ kvm_arch_ops->decache_cr0_cr4_guest_bits(vcpu);
sregs->cr0 = vcpu->cr0;
sregs->cr2 = vcpu->cr2;
sregs->cr3 = vcpu->cr3;
int i;
struct descriptor_table dt;
- if (sregs->vcpu < 0 || sregs->vcpu >= KVM_MAX_VCPUS)
+ if (!valid_vcpu(sregs->vcpu))
return -EINVAL;
vcpu = vcpu_load(kvm, sregs->vcpu);
if (!vcpu)
vcpu->cr8 = sregs->cr8;
mmu_reset_needed |= vcpu->shadow_efer != sregs->efer;
-#ifdef __x86_64__
+#ifdef CONFIG_X86_64
kvm_arch_ops->set_efer(vcpu, sregs->efer);
#endif
vcpu->apic_base = sregs->apic_base;
+ kvm_arch_ops->decache_cr0_cr4_guest_bits(vcpu);
+
mmu_reset_needed |= vcpu->cr0 != sregs->cr0;
kvm_arch_ops->set_cr0_no_modeswitch(vcpu, sregs->cr0);
/*
* List of msr numbers which we expose to userspace through KVM_GET_MSRS
* and KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST.
+ *
+ * This list is modified at module load time to reflect the
+ * capabilities of the host cpu.
*/
static u32 msrs_to_save[] = {
MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
MSR_K6_STAR,
-#ifdef __x86_64__
+#ifdef CONFIG_X86_64
MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR,
#endif
MSR_IA32_TIME_STAMP_COUNTER,
};
+static unsigned num_msrs_to_save;
+
+static __init void kvm_init_msr_list(void)
+{
+ u32 dummy[2];
+ unsigned i, j;
+
+ for (i = j = 0; i < ARRAY_SIZE(msrs_to_save); i++) {
+ if (rdmsr_safe(msrs_to_save[i], &dummy[0], &dummy[1]) < 0)
+ continue;
+ if (j < i)
+ msrs_to_save[j] = msrs_to_save[i];
+ j++;
+ }
+ num_msrs_to_save = j;
+}
/*
* Adapt set_msr() to msr_io()'s calling convention
struct kvm_vcpu *vcpu;
int i;
- if (msrs->vcpu < 0 || msrs->vcpu >= KVM_MAX_VCPUS)
+ if (!valid_vcpu(msrs->vcpu))
return -EINVAL;
vcpu = vcpu_load(kvm, msrs->vcpu);
{
struct kvm_vcpu *vcpu;
- if (irq->vcpu < 0 || irq->vcpu >= KVM_MAX_VCPUS)
+ if (!valid_vcpu(irq->vcpu))
return -EINVAL;
if (irq->irq < 0 || irq->irq >= 256)
return -EINVAL;
struct kvm_vcpu *vcpu;
int r;
- if (dbg->vcpu < 0 || dbg->vcpu >= KVM_MAX_VCPUS)
+ if (!valid_vcpu(dbg->vcpu))
return -EINVAL;
vcpu = vcpu_load(kvm, dbg->vcpu);
if (!vcpu)
int r = -EINVAL;
switch (ioctl) {
+ case KVM_GET_API_VERSION:
+ r = KVM_API_VERSION;
+ break;
case KVM_CREATE_VCPU: {
r = kvm_dev_ioctl_create_vcpu(kvm, arg);
if (r)
if (copy_from_user(&kvm_run, (void *)arg, sizeof kvm_run))
goto out;
r = kvm_dev_ioctl_run(kvm, &kvm_run);
- if (r < 0)
+ if (r < 0 && r != -EINTR)
goto out;
- r = -EFAULT;
- if (copy_to_user((void *)arg, &kvm_run, sizeof kvm_run))
+ if (copy_to_user((void *)arg, &kvm_run, sizeof kvm_run)) {
+ r = -EFAULT;
goto out;
- r = 0;
+ }
break;
}
case KVM_GET_REGS: {
if (copy_from_user(&msr_list, user_msr_list, sizeof msr_list))
goto out;
n = msr_list.nmsrs;
- msr_list.nmsrs = ARRAY_SIZE(msrs_to_save);
+ msr_list.nmsrs = num_msrs_to_save;
if (copy_to_user(user_msr_list, &msr_list, sizeof msr_list))
goto out;
r = -E2BIG;
- if (n < ARRAY_SIZE(msrs_to_save))
+ if (n < num_msrs_to_save)
goto out;
r = -EFAULT;
if (copy_to_user(user_msr_list->indices, &msrs_to_save,
- sizeof msrs_to_save))
+ num_msrs_to_save * sizeof(u32)))
goto out;
r = 0;
}
{
int r;
- kvm_arch_ops = ops;
+ if (kvm_arch_ops) {
+ printk(KERN_ERR "kvm: already loaded the other module\n");
+ return -EEXIST;
+ }
- if (!kvm_arch_ops->cpu_has_kvm_support()) {
+ if (!ops->cpu_has_kvm_support()) {
printk(KERN_ERR "kvm: no hardware support\n");
return -EOPNOTSUPP;
}
- if (kvm_arch_ops->disabled_by_bios()) {
+ if (ops->disabled_by_bios()) {
printk(KERN_ERR "kvm: disabled by bios\n");
return -EOPNOTSUPP;
}
+ kvm_arch_ops = ops;
+
r = kvm_arch_ops->hardware_setup();
if (r < 0)
return r;
unregister_reboot_notifier(&kvm_reboot_notifier);
on_each_cpu(kvm_arch_ops->hardware_disable, 0, 0, 1);
kvm_arch_ops->hardware_unsetup();
+ kvm_arch_ops = NULL;
}
static __init int kvm_init(void)
kvm_init_debug();
+ kvm_init_msr_list();
+
if ((bad_page = alloc_page(GFP_KERNEL)) == NULL) {
r = -ENOMEM;
goto out;