KVM: Rename vcpu->shadow_efer to efer
[pandora-kernel.git] / arch / x86 / kvm / x86.c
index 1de2ad7..27af6e3 100644 (file)
@@ -428,12 +428,18 @@ out:
 
 void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
 {
-       if (cr0 & CR0_RESERVED_BITS) {
+       cr0 |= X86_CR0_ET;
+
+#ifdef CONFIG_X86_64
+       if (cr0 & 0xffffffff00000000UL) {
                printk(KERN_DEBUG "set_cr0: 0x%lx #GP, reserved bits 0x%lx\n",
                       cr0, kvm_read_cr0(vcpu));
                kvm_inject_gp(vcpu, 0);
                return;
        }
+#endif
+
+       cr0 &= ~CR0_RESERVED_BITS;
 
        if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD)) {
                printk(KERN_DEBUG "set_cr0: #GP, CD == 0 && NW == 1\n");
@@ -450,7 +456,7 @@ void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
 
        if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
 #ifdef CONFIG_X86_64
-               if ((vcpu->arch.shadow_efer & EFER_LME)) {
+               if ((vcpu->arch.efer & EFER_LME)) {
                        int cs_db, cs_l;
 
                        if (!is_pae(vcpu)) {
@@ -620,9 +626,11 @@ static inline u32 bit(int bitno)
  * kvm-specific. Those are put in the beginning of the list.
  */
 
-#define KVM_SAVE_MSRS_BEGIN    2
+#define KVM_SAVE_MSRS_BEGIN    5
 static u32 msrs_to_save[] = {
        MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK,
+       HV_X64_MSR_GUEST_OS_ID, HV_X64_MSR_HYPERCALL,
+       HV_X64_MSR_APIC_ASSIST_PAGE,
        MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
        MSR_K6_STAR,
 #ifdef CONFIG_X86_64
@@ -647,7 +655,7 @@ static void set_efer(struct kvm_vcpu *vcpu, u64 efer)
        }
 
        if (is_paging(vcpu)
-           && (vcpu->arch.shadow_efer & EFER_LME) != (efer & EFER_LME)) {
+           && (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME)) {
                printk(KERN_DEBUG "set_efer: #GP, change LME while paging\n");
                kvm_inject_gp(vcpu, 0);
                return;
@@ -678,9 +686,9 @@ static void set_efer(struct kvm_vcpu *vcpu, u64 efer)
        kvm_x86_ops->set_efer(vcpu, efer);
 
        efer &= ~EFER_LMA;
-       efer |= vcpu->arch.shadow_efer & EFER_LMA;
+       efer |= vcpu->arch.efer & EFER_LMA;
 
-       vcpu->arch.shadow_efer = efer;
+       vcpu->arch.efer = efer;
 
        vcpu->arch.mmu.base_role.nxe = (efer & EFER_NX) && !tdp_enabled;
        kvm_mmu_reset_context(vcpu);
@@ -1002,6 +1010,100 @@ out:
        return r;
 }
 
+static bool kvm_hv_hypercall_enabled(struct kvm *kvm)
+{
+       return kvm->arch.hv_hypercall & HV_X64_MSR_HYPERCALL_ENABLE;
+}
+
+static bool kvm_hv_msr_partition_wide(u32 msr)
+{
+       bool r = false;
+       switch (msr) {
+       case HV_X64_MSR_GUEST_OS_ID:
+       case HV_X64_MSR_HYPERCALL:
+               r = true;
+               break;
+       }
+
+       return r;
+}
+
+static int set_msr_hyperv_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data)
+{
+       struct kvm *kvm = vcpu->kvm;
+
+       switch (msr) {
+       case HV_X64_MSR_GUEST_OS_ID:
+               kvm->arch.hv_guest_os_id = data;
+               /* setting guest os id to zero disables hypercall page */
+               if (!kvm->arch.hv_guest_os_id)
+                       kvm->arch.hv_hypercall &= ~HV_X64_MSR_HYPERCALL_ENABLE;
+               break;
+       case HV_X64_MSR_HYPERCALL: {
+               u64 gfn;
+               unsigned long addr;
+               u8 instructions[4];
+
+               /* if guest os id is not set hypercall should remain disabled */
+               if (!kvm->arch.hv_guest_os_id)
+                       break;
+               if (!(data & HV_X64_MSR_HYPERCALL_ENABLE)) {
+                       kvm->arch.hv_hypercall = data;
+                       break;
+               }
+               gfn = data >> HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT;
+               addr = gfn_to_hva(kvm, gfn);
+               if (kvm_is_error_hva(addr))
+                       return 1;
+               kvm_x86_ops->patch_hypercall(vcpu, instructions);
+               ((unsigned char *)instructions)[3] = 0xc3; /* ret */
+               if (copy_to_user((void __user *)addr, instructions, 4))
+                       return 1;
+               kvm->arch.hv_hypercall = data;
+               break;
+       }
+       default:
+               pr_unimpl(vcpu, "HYPER-V unimplemented wrmsr: 0x%x "
+                         "data 0x%llx\n", msr, data);
+               return 1;
+       }
+       return 0;
+}
+
+static int set_msr_hyperv(struct kvm_vcpu *vcpu, u32 msr, u64 data)
+{
+       switch (msr) {
+       case HV_X64_MSR_APIC_ASSIST_PAGE: {
+               unsigned long addr;
+
+               if (!(data & HV_X64_MSR_APIC_ASSIST_PAGE_ENABLE)) {
+                       vcpu->arch.hv_vapic = data;
+                       break;
+               }
+               addr = gfn_to_hva(vcpu->kvm, data >>
+                                 HV_X64_MSR_APIC_ASSIST_PAGE_ADDRESS_SHIFT);
+               if (kvm_is_error_hva(addr))
+                       return 1;
+               if (clear_user((void __user *)addr, PAGE_SIZE))
+                       return 1;
+               vcpu->arch.hv_vapic = data;
+               break;
+       }
+       case HV_X64_MSR_EOI:
+               return kvm_hv_vapic_msr_write(vcpu, APIC_EOI, data);
+       case HV_X64_MSR_ICR:
+               return kvm_hv_vapic_msr_write(vcpu, APIC_ICR, data);
+       case HV_X64_MSR_TPR:
+               return kvm_hv_vapic_msr_write(vcpu, APIC_TASKPRI, data);
+       default:
+               pr_unimpl(vcpu, "HYPER-V unimplemented wrmsr: 0x%x "
+                         "data 0x%llx\n", msr, data);
+               return 1;
+       }
+
+       return 0;
+}
+
 int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
 {
        switch (msr) {
@@ -1116,6 +1218,16 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
                pr_unimpl(vcpu, "unimplemented perfctr wrmsr: "
                        "0x%x data 0x%llx\n", msr, data);
                break;
+       case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15:
+               if (kvm_hv_msr_partition_wide(msr)) {
+                       int r;
+                       mutex_lock(&vcpu->kvm->lock);
+                       r = set_msr_hyperv_pw(vcpu, msr, data);
+                       mutex_unlock(&vcpu->kvm->lock);
+                       return r;
+               } else
+                       return set_msr_hyperv(vcpu, msr, data);
+               break;
        default:
                if (msr && (msr == vcpu->kvm->arch.xen_hvm_config.msr))
                        return xen_hvm_config(vcpu, data);
@@ -1215,6 +1327,54 @@ static int get_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
        return 0;
 }
 
+static int get_msr_hyperv_pw(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
+{
+       u64 data = 0;
+       struct kvm *kvm = vcpu->kvm;
+
+       switch (msr) {
+       case HV_X64_MSR_GUEST_OS_ID:
+               data = kvm->arch.hv_guest_os_id;
+               break;
+       case HV_X64_MSR_HYPERCALL:
+               data = kvm->arch.hv_hypercall;
+               break;
+       default:
+               pr_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
+               return 1;
+       }
+
+       *pdata = data;
+       return 0;
+}
+
+static int get_msr_hyperv(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
+{
+       u64 data = 0;
+
+       switch (msr) {
+       case HV_X64_MSR_VP_INDEX: {
+               int r;
+               struct kvm_vcpu *v;
+               kvm_for_each_vcpu(r, v, vcpu->kvm)
+                       if (v == vcpu)
+                               data = r;
+               break;
+       }
+       case HV_X64_MSR_EOI:
+               return kvm_hv_vapic_msr_read(vcpu, APIC_EOI, pdata);
+       case HV_X64_MSR_ICR:
+               return kvm_hv_vapic_msr_read(vcpu, APIC_ICR, pdata);
+       case HV_X64_MSR_TPR:
+               return kvm_hv_vapic_msr_read(vcpu, APIC_TASKPRI, pdata);
+       default:
+               pr_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
+               return 1;
+       }
+       *pdata = data;
+       return 0;
+}
+
 int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
 {
        u64 data;
@@ -1266,7 +1426,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
                data |= (((uint64_t)4ULL) << 40);
                break;
        case MSR_EFER:
-               data = vcpu->arch.shadow_efer;
+               data = vcpu->arch.efer;
                break;
        case MSR_KVM_WALL_CLOCK:
                data = vcpu->kvm->arch.wall_clock;
@@ -1281,6 +1441,16 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
        case MSR_IA32_MCG_STATUS:
        case MSR_IA32_MC0_CTL ... MSR_IA32_MC0_CTL + 4 * KVM_MAX_MCE_BANKS - 1:
                return get_msr_mce(vcpu, msr, pdata);
+       case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15:
+               if (kvm_hv_msr_partition_wide(msr)) {
+                       int r;
+                       mutex_lock(&vcpu->kvm->lock);
+                       r = get_msr_hyperv_pw(vcpu, msr, pdata);
+                       mutex_unlock(&vcpu->kvm->lock);
+                       return r;
+               } else
+                       return get_msr_hyperv(vcpu, msr, pdata);
+               break;
        default:
                if (!ignore_msrs) {
                        pr_unimpl(vcpu, "unhandled rdmsr: 0x%x\n", msr);
@@ -1396,6 +1566,9 @@ int kvm_dev_ioctl_check_extension(long ext)
        case KVM_CAP_XEN_HVM:
        case KVM_CAP_ADJUST_CLOCK:
        case KVM_CAP_VCPU_EVENTS:
+       case KVM_CAP_HYPERV:
+       case KVM_CAP_HYPERV_VAPIC:
+       case KVM_CAP_HYPERV_SPIN:
                r = 1;
                break;
        case KVM_CAP_COALESCED_MMIO:
@@ -3096,34 +3269,20 @@ int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address)
 int emulate_clts(struct kvm_vcpu *vcpu)
 {
        kvm_x86_ops->set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~X86_CR0_TS));
+       kvm_x86_ops->fpu_activate(vcpu);
        return X86EMUL_CONTINUE;
 }
 
 int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long *dest)
 {
-       struct kvm_vcpu *vcpu = ctxt->vcpu;
-
-       switch (dr) {
-       case 0 ... 3:
-               *dest = kvm_x86_ops->get_dr(vcpu, dr);
-               return X86EMUL_CONTINUE;
-       default:
-               pr_unimpl(vcpu, "%s: unexpected dr %u\n", __func__, dr);
-               return X86EMUL_UNHANDLEABLE;
-       }
+       return kvm_x86_ops->get_dr(ctxt->vcpu, dr, dest);
 }
 
 int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long value)
 {
        unsigned long mask = (ctxt->mode == X86EMUL_MODE_PROT64) ? ~0ULL : ~0U;
-       int exception;
 
-       kvm_x86_ops->set_dr(ctxt->vcpu, dr, value & mask, &exception);
-       if (exception) {
-               /* FIXME: better handling */
-               return X86EMUL_UNHANDLEABLE;
-       }
-       return X86EMUL_CONTINUE;
+       return kvm_x86_ops->set_dr(ctxt->vcpu, dr, value & mask);
 }
 
 void kvm_report_emulation_failure(struct kvm_vcpu *vcpu, const char *context)
@@ -3616,11 +3775,76 @@ static inline gpa_t hc_gpa(struct kvm_vcpu *vcpu, unsigned long a0,
                return a0 | ((gpa_t)a1 << 32);
 }
 
+int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
+{
+       u64 param, ingpa, outgpa, ret;
+       uint16_t code, rep_idx, rep_cnt, res = HV_STATUS_SUCCESS, rep_done = 0;
+       bool fast, longmode;
+       int cs_db, cs_l;
+
+       /*
+        * hypercall generates UD from non zero cpl and real mode
+        * per HYPER-V spec
+        */
+       if (kvm_x86_ops->get_cpl(vcpu) != 0 || !is_protmode(vcpu)) {
+               kvm_queue_exception(vcpu, UD_VECTOR);
+               return 0;
+       }
+
+       kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
+       longmode = is_long_mode(vcpu) && cs_l == 1;
+
+       if (!longmode) {
+               param = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDX) << 32) |
+                       (kvm_register_read(vcpu, VCPU_REGS_RAX) & 0xffffffff);
+               ingpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RBX) << 32) |
+                       (kvm_register_read(vcpu, VCPU_REGS_RCX) & 0xffffffff);
+               outgpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDI) << 32) |
+                       (kvm_register_read(vcpu, VCPU_REGS_RSI) & 0xffffffff);
+       }
+#ifdef CONFIG_X86_64
+       else {
+               param = kvm_register_read(vcpu, VCPU_REGS_RCX);
+               ingpa = kvm_register_read(vcpu, VCPU_REGS_RDX);
+               outgpa = kvm_register_read(vcpu, VCPU_REGS_R8);
+       }
+#endif
+
+       code = param & 0xffff;
+       fast = (param >> 16) & 0x1;
+       rep_cnt = (param >> 32) & 0xfff;
+       rep_idx = (param >> 48) & 0xfff;
+
+       trace_kvm_hv_hypercall(code, fast, rep_cnt, rep_idx, ingpa, outgpa);
+
+       switch (code) {
+       case HV_X64_HV_NOTIFY_LONG_SPIN_WAIT:
+               kvm_vcpu_on_spin(vcpu);
+               break;
+       default:
+               res = HV_STATUS_INVALID_HYPERCALL_CODE;
+               break;
+       }
+
+       ret = res | (((u64)rep_done & 0xfff) << 32);
+       if (longmode) {
+               kvm_register_write(vcpu, VCPU_REGS_RAX, ret);
+       } else {
+               kvm_register_write(vcpu, VCPU_REGS_RDX, ret >> 32);
+               kvm_register_write(vcpu, VCPU_REGS_RAX, ret & 0xffffffff);
+       }
+
+       return 1;
+}
+
 int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
 {
        unsigned long nr, a0, a1, a2, a3, ret;
        int r = 1;
 
+       if (kvm_hv_hypercall_enabled(vcpu->kvm))
+               return kvm_hv_hypercall(vcpu);
+
        nr = kvm_register_read(vcpu, VCPU_REGS_RAX);
        a0 = kvm_register_read(vcpu, VCPU_REGS_RBX);
        a1 = kvm_register_read(vcpu, VCPU_REGS_RCX);
@@ -4015,7 +4239,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
        preempt_disable();
 
        kvm_x86_ops->prepare_guest_switch(vcpu);
-       kvm_load_guest_fpu(vcpu);
+       if (vcpu->fpu_active)
+               kvm_load_guest_fpu(vcpu);
 
        local_irq_disable();
 
@@ -4344,7 +4569,7 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
        sregs->cr3 = vcpu->arch.cr3;
        sregs->cr4 = kvm_read_cr4(vcpu);
        sregs->cr8 = kvm_get_cr8(vcpu);
-       sregs->efer = vcpu->arch.shadow_efer;
+       sregs->efer = vcpu->arch.efer;
        sregs->apic_base = kvm_get_apic_base(vcpu);
 
        memset(sregs->interrupt_bitmap, 0, sizeof sregs->interrupt_bitmap);
@@ -4525,7 +4750,7 @@ int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
 {
        struct kvm_segment kvm_seg;
 
-       if (is_vm86_segment(vcpu, seg) || !(kvm_read_cr0_bits(vcpu, X86_CR0_PE)))
+       if (is_vm86_segment(vcpu, seg) || !is_protmode(vcpu))
                return kvm_load_realmode_segment(vcpu, selector, seg);
        if (load_segment_descriptor_to_kvm_desct(vcpu, selector, &kvm_seg))
                return 1;
@@ -4834,7 +5059,7 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
 
        kvm_set_cr8(vcpu, sregs->cr8);
 
-       mmu_reset_needed |= vcpu->arch.shadow_efer != sregs->efer;
+       mmu_reset_needed |= vcpu->arch.efer != sregs->efer;
        kvm_x86_ops->set_efer(vcpu, sregs->efer);
        kvm_set_apic_base(vcpu, sregs->apic_base);
 
@@ -4877,7 +5102,7 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
        /* Older userspace won't unhalt the vcpu on reset. */
        if (kvm_vcpu_is_bsp(vcpu) && kvm_rip_read(vcpu) == 0xfff0 &&
            sregs->cs.selector == 0xf000 && sregs->cs.base == 0xffff0000 &&
-           !(kvm_read_cr0_bits(vcpu, X86_CR0_PE)))
+           !is_protmode(vcpu))
                vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
 
        vcpu_put(vcpu);
@@ -5061,14 +5286,13 @@ EXPORT_SYMBOL_GPL(fx_init);
 
 void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
 {
-       if (!vcpu->fpu_active || vcpu->guest_fpu_loaded)
+       if (vcpu->guest_fpu_loaded)
                return;
 
        vcpu->guest_fpu_loaded = 1;
        kvm_fx_save(&vcpu->arch.host_fx_image);
        kvm_fx_restore(&vcpu->arch.guest_fx_image);
 }
-EXPORT_SYMBOL_GPL(kvm_load_guest_fpu);
 
 void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
 {
@@ -5081,7 +5305,6 @@ void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
        ++vcpu->stat.fpu_reload;
        set_bit(KVM_REQ_DEACTIVATE_FPU, &vcpu->requests);
 }
-EXPORT_SYMBOL_GPL(kvm_put_guest_fpu);
 
 void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
 {
@@ -5312,6 +5535,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
                put_page(kvm->arch.apic_access_page);
        if (kvm->arch.ept_identity_pagetable)
                put_page(kvm->arch.ept_identity_pagetable);
+       cleanup_srcu_struct(&kvm->srcu);
        kfree(kvm->arch.aliases);
        kfree(kvm);
 }