KVM: x86: check for cr3 validity in ioctl_set_sregs
[pandora-kernel.git] / arch / x86 / kvm / x86.c
index 3944e91..2bad49b 100644 (file)
@@ -108,7 +108,6 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
        { "mmu_recycled", VM_STAT(mmu_recycled) },
        { "mmu_cache_miss", VM_STAT(mmu_cache_miss) },
        { "mmu_unsync", VM_STAT(mmu_unsync) },
-       { "mmu_unsync_global", VM_STAT(mmu_unsync_global) },
        { "remote_tlb_flush", VM_STAT(remote_tlb_flush) },
        { "largepages", VM_STAT(lpages) },
        { NULL }
@@ -234,7 +233,8 @@ int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3)
                goto out;
        }
        for (i = 0; i < ARRAY_SIZE(pdpte); ++i) {
-               if ((pdpte[i] & 1) && (pdpte[i] & 0xfffffff0000001e6ull)) {
+               if (is_present_pte(pdpte[i]) &&
+                   (pdpte[i] & vcpu->arch.mmu.rsvd_bits_mask[0][2])) {
                        ret = 0;
                        goto out;
                }
@@ -321,7 +321,6 @@ void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
        kvm_x86_ops->set_cr0(vcpu, cr0);
        vcpu->arch.cr0 = cr0;
 
-       kvm_mmu_sync_global(vcpu);
        kvm_mmu_reset_context(vcpu);
        return;
 }
@@ -370,7 +369,6 @@ void kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
        kvm_x86_ops->set_cr4(vcpu, cr4);
        vcpu->arch.cr4 = cr4;
        vcpu->arch.mmu.base_role.cr4_pge = (cr4 & X86_CR4_PGE) && !tdp_enabled;
-       kvm_mmu_sync_global(vcpu);
        kvm_mmu_reset_context(vcpu);
 }
 EXPORT_SYMBOL_GPL(kvm_set_cr4);
@@ -523,6 +521,9 @@ static void set_efer(struct kvm_vcpu *vcpu, u64 efer)
        efer |= vcpu->arch.shadow_efer & EFER_LMA;
 
        vcpu->arch.shadow_efer = efer;
+
+       vcpu->arch.mmu.base_role.nxe = (efer & EFER_NX) && !tdp_enabled;
+       kvm_mmu_reset_context(vcpu);
 }
 
 void kvm_enable_efer_bits(u64 mask)
@@ -630,14 +631,17 @@ static void kvm_write_guest_time(struct kvm_vcpu *v)
        unsigned long flags;
        struct kvm_vcpu_arch *vcpu = &v->arch;
        void *shared_kaddr;
+       unsigned long this_tsc_khz;
 
        if ((!vcpu->time_page))
                return;
 
-       if (unlikely(vcpu->hv_clock_tsc_khz != __get_cpu_var(cpu_tsc_khz))) {
-               kvm_set_time_scale(__get_cpu_var(cpu_tsc_khz), &vcpu->hv_clock);
-               vcpu->hv_clock_tsc_khz = __get_cpu_var(cpu_tsc_khz);
+       this_tsc_khz = get_cpu_var(cpu_tsc_khz);
+       if (unlikely(vcpu->hv_clock_tsc_khz != this_tsc_khz)) {
+               kvm_set_time_scale(this_tsc_khz, &vcpu->hv_clock);
+               vcpu->hv_clock_tsc_khz = this_tsc_khz;
        }
+       put_cpu_var(cpu_tsc_khz);
 
        /* Keep irq disabled to prevent changes to the clock */
        local_irq_save(flags);
@@ -893,6 +897,8 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
        case MSR_IA32_LASTINTFROMIP:
        case MSR_IA32_LASTINTTOIP:
        case MSR_VM_HSAVE_PA:
+       case MSR_P6_EVNTSEL0:
+       case MSR_P6_EVNTSEL1:
                data = 0;
                break;
        case MSR_MTRRcap:
@@ -1024,6 +1030,7 @@ int kvm_dev_ioctl_check_extension(long ext)
        case KVM_CAP_SYNC_MMU:
        case KVM_CAP_REINJECT_CONTROL:
        case KVM_CAP_IRQ_INJECT_STATUS:
+       case KVM_CAP_ASSIGN_DEV_IRQ:
                r = 1;
                break;
        case KVM_CAP_COALESCED_MMIO:
@@ -1584,8 +1591,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
                r = -EINVAL;
        }
 out:
-       if (lapic)
-               kfree(lapic);
+       kfree(lapic);
        return r;
 }
 
@@ -2408,6 +2414,11 @@ int emulate_instruction(struct kvm_vcpu *vcpu,
                }
        }
 
+       if (emulation_type & EMULTYPE_SKIP) {
+               kvm_rip_write(vcpu, vcpu->arch.emulate_ctxt.decode.eip);
+               return EMULATE_DONE;
+       }
+
        r = x86_emulate_insn(&vcpu->arch.emulate_ctxt, &emulate_ops);
 
        if (vcpu->arch.pio.string)
@@ -3012,6 +3023,16 @@ struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
        return best;
 }
 
+int cpuid_maxphyaddr(struct kvm_vcpu *vcpu)
+{
+       struct kvm_cpuid_entry2 *best;
+
+       best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0);
+       if (best)
+               return best->eax & 0xff;
+       return 36;
+}
+
 void kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
 {
        u32 function, index;
@@ -3128,9 +3149,6 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
                }
        }
 
-       clear_bit(KVM_REQ_PENDING_TIMER, &vcpu->requests);
-       kvm_inject_pending_timer_irqs(vcpu);
-
        preempt_disable();
 
        kvm_x86_ops->prepare_guest_switch(vcpu);
@@ -3230,6 +3248,7 @@ out:
        return r;
 }
 
+
 static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
 {
        int r;
@@ -3256,29 +3275,42 @@ static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
                        kvm_vcpu_block(vcpu);
                        down_read(&vcpu->kvm->slots_lock);
                        if (test_and_clear_bit(KVM_REQ_UNHALT, &vcpu->requests))
-                               if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED)
+                       {
+                               switch(vcpu->arch.mp_state) {
+                               case KVM_MP_STATE_HALTED:
                                        vcpu->arch.mp_state =
-                                                       KVM_MP_STATE_RUNNABLE;
-                       if (vcpu->arch.mp_state != KVM_MP_STATE_RUNNABLE)
-                               r = -EINTR;
+                                               KVM_MP_STATE_RUNNABLE;
+                               case KVM_MP_STATE_RUNNABLE:
+                                       break;
+                               case KVM_MP_STATE_SIPI_RECEIVED:
+                               default:
+                                       r = -EINTR;
+                                       break;
+                               }
+                       }
                }
 
-               if (r > 0) {
-                       if (dm_request_for_irq_injection(vcpu, kvm_run)) {
-                               r = -EINTR;
-                               kvm_run->exit_reason = KVM_EXIT_INTR;
-                               ++vcpu->stat.request_irq_exits;
-                       }
-                       if (signal_pending(current)) {
-                               r = -EINTR;
-                               kvm_run->exit_reason = KVM_EXIT_INTR;
-                               ++vcpu->stat.signal_exits;
-                       }
-                       if (need_resched()) {
-                               up_read(&vcpu->kvm->slots_lock);
-                               kvm_resched(vcpu);
-                               down_read(&vcpu->kvm->slots_lock);
-                       }
+               if (r <= 0)
+                       break;
+
+               clear_bit(KVM_REQ_PENDING_TIMER, &vcpu->requests);
+               if (kvm_cpu_has_pending_timer(vcpu))
+                       kvm_inject_pending_timer_irqs(vcpu);
+
+               if (dm_request_for_irq_injection(vcpu, kvm_run)) {
+                       r = -EINTR;
+                       kvm_run->exit_reason = KVM_EXIT_INTR;
+                       ++vcpu->stat.request_irq_exits;
+               }
+               if (signal_pending(current)) {
+                       r = -EINTR;
+                       kvm_run->exit_reason = KVM_EXIT_INTR;
+                       ++vcpu->stat.signal_exits;
+               }
+               if (need_resched()) {
+                       up_read(&vcpu->kvm->slots_lock);
+                       kvm_resched(vcpu);
+                       down_read(&vcpu->kvm->slots_lock);
                }
        }
 
@@ -3688,7 +3720,6 @@ static void save_state_to_tss32(struct kvm_vcpu *vcpu,
        tss->fs = get_segment_selector(vcpu, VCPU_SREG_FS);
        tss->gs = get_segment_selector(vcpu, VCPU_SREG_GS);
        tss->ldt_selector = get_segment_selector(vcpu, VCPU_SREG_LDTR);
-       tss->prev_task_link = get_segment_selector(vcpu, VCPU_SREG_TR);
 }
 
 static int load_state_from_tss32(struct kvm_vcpu *vcpu,
@@ -3785,8 +3816,8 @@ static int load_state_from_tss16(struct kvm_vcpu *vcpu,
 }
 
 static int kvm_task_switch_16(struct kvm_vcpu *vcpu, u16 tss_selector,
-                      u32 old_tss_base,
-                      struct desc_struct *nseg_desc)
+                             u16 old_tss_sel, u32 old_tss_base,
+                             struct desc_struct *nseg_desc)
 {
        struct tss_segment_16 tss_segment_16;
        int ret = 0;
@@ -3805,6 +3836,16 @@ static int kvm_task_switch_16(struct kvm_vcpu *vcpu, u16 tss_selector,
                           &tss_segment_16, sizeof tss_segment_16))
                goto out;
 
+       if (old_tss_sel != 0xffff) {
+               tss_segment_16.prev_task_link = old_tss_sel;
+
+               if (kvm_write_guest(vcpu->kvm,
+                                   get_tss_base_addr(vcpu, nseg_desc),
+                                   &tss_segment_16.prev_task_link,
+                                   sizeof tss_segment_16.prev_task_link))
+                       goto out;
+       }
+
        if (load_state_from_tss16(vcpu, &tss_segment_16))
                goto out;
 
@@ -3814,7 +3855,7 @@ out:
 }
 
 static int kvm_task_switch_32(struct kvm_vcpu *vcpu, u16 tss_selector,
-                      u32 old_tss_base,
+                      u16 old_tss_sel, u32 old_tss_base,
                       struct desc_struct *nseg_desc)
 {
        struct tss_segment_32 tss_segment_32;
@@ -3834,6 +3875,16 @@ static int kvm_task_switch_32(struct kvm_vcpu *vcpu, u16 tss_selector,
                           &tss_segment_32, sizeof tss_segment_32))
                goto out;
 
+       if (old_tss_sel != 0xffff) {
+               tss_segment_32.prev_task_link = old_tss_sel;
+
+               if (kvm_write_guest(vcpu->kvm,
+                                   get_tss_base_addr(vcpu, nseg_desc),
+                                   &tss_segment_32.prev_task_link,
+                                   sizeof tss_segment_32.prev_task_link))
+                       goto out;
+       }
+
        if (load_state_from_tss32(vcpu, &tss_segment_32))
                goto out;
 
@@ -3887,14 +3938,22 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason)
                kvm_x86_ops->set_rflags(vcpu, eflags & ~X86_EFLAGS_NT);
        }
 
-       kvm_x86_ops->skip_emulated_instruction(vcpu);
+       /* set back link to prev task only if NT bit is set in eflags
+          note that old_tss_sel is not used afetr this point */
+       if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
+               old_tss_sel = 0xffff;
+
+       /* set back link to prev task only if NT bit is set in eflags
+          note that old_tss_sel is not used afetr this point */
+       if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
+               old_tss_sel = 0xffff;
 
        if (nseg_desc.type & 8)
-               ret = kvm_task_switch_32(vcpu, tss_selector, old_tss_base,
-                                        &nseg_desc);
+               ret = kvm_task_switch_32(vcpu, tss_selector, old_tss_sel,
+                                        old_tss_base, &nseg_desc);
        else
-               ret = kvm_task_switch_16(vcpu, tss_selector, old_tss_base,
-                                        &nseg_desc);
+               ret = kvm_task_switch_16(vcpu, tss_selector, old_tss_sel,
+                                        old_tss_base, &nseg_desc);
 
        if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE) {
                u32 eflags = kvm_x86_ops->get_rflags(vcpu);
@@ -3934,7 +3993,13 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
 
        vcpu->arch.cr2 = sregs->cr2;
        mmu_reset_needed |= vcpu->arch.cr3 != sregs->cr3;
-       vcpu->arch.cr3 = sregs->cr3;
+
+       down_read(&vcpu->kvm->slots_lock);
+       if (gfn_to_memslot(vcpu->kvm, sregs->cr3 >> PAGE_SHIFT))
+               vcpu->arch.cr3 = sregs->cr3;
+       else
+               set_bit(KVM_REQ_TRIPLE_FAULT, &vcpu->requests);
+       up_read(&vcpu->kvm->slots_lock);
 
        kvm_set_cr8(vcpu, sregs->cr8);
 
@@ -4308,7 +4373,6 @@ struct  kvm *kvm_arch_create_vm(void)
                return ERR_PTR(-ENOMEM);
 
        INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
-       INIT_LIST_HEAD(&kvm->arch.oos_global_pages);
        INIT_LIST_HEAD(&kvm->arch.assigned_dev_head);
 
        /* Reserve bit 0 of irq_sources_bitmap for userspace irq source */
@@ -4459,3 +4523,8 @@ void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
                smp_call_function_single(ipi_pcpu, vcpu_kick_intr, vcpu, 0);
        put_cpu();
 }
+
+int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu)
+{
+       return kvm_x86_ops->interrupt_allowed(vcpu);
+}