KVM: SVM: Annotate nested_svm_map with might_sleep()
[pandora-kernel.git] / arch / x86 / kvm / svm.c
index 445c594..4bc0183 100644 (file)
@@ -129,6 +129,7 @@ static void svm_flush_tlb(struct kvm_vcpu *vcpu);
 static void svm_complete_interrupts(struct vcpu_svm *svm);
 
 static int nested_svm_exit_handled(struct vcpu_svm *svm);
+static int nested_svm_intercept(struct vcpu_svm *svm);
 static int nested_svm_vmexit(struct vcpu_svm *svm);
 static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
                                      bool has_error_code, u32 error_code);
@@ -319,7 +320,7 @@ static int svm_hardware_enable(void *garbage)
 
        struct svm_cpu_data *sd;
        uint64_t efer;
-       struct descriptor_table gdt_descr;
+       struct desc_ptr gdt_descr;
        struct desc_struct *gdt;
        int me = raw_smp_processor_id();
 
@@ -345,7 +346,7 @@ static int svm_hardware_enable(void *garbage)
        sd->next_asid = sd->max_asid + 1;
 
        kvm_get_gdt(&gdt_descr);
-       gdt = (struct desc_struct *)gdt_descr.base;
+       gdt = (struct desc_struct *)gdt_descr.address;
        sd->tss_desc = (struct kvm_ldttss_desc *)(gdt + GDT_ENTRY_TSS);
 
        wrmsrl(MSR_EFER, efer | EFER_SVME);
@@ -706,29 +707,28 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
        if (err)
                goto free_svm;
 
+       err = -ENOMEM;
        page = alloc_page(GFP_KERNEL);
-       if (!page) {
-               err = -ENOMEM;
+       if (!page)
                goto uninit;
-       }
 
-       err = -ENOMEM;
        msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER);
        if (!msrpm_pages)
-               goto uninit;
+               goto free_page1;
 
        nested_msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER);
        if (!nested_msrpm_pages)
-               goto uninit;
-
-       svm->msrpm = page_address(msrpm_pages);
-       svm_vcpu_init_msrpm(svm->msrpm);
+               goto free_page2;
 
        hsave_page = alloc_page(GFP_KERNEL);
        if (!hsave_page)
-               goto uninit;
+               goto free_page3;
+
        svm->nested.hsave = page_address(hsave_page);
 
+       svm->msrpm = page_address(msrpm_pages);
+       svm_vcpu_init_msrpm(svm->msrpm);
+
        svm->nested.msrpm = page_address(nested_msrpm_pages);
 
        svm->vmcb = page_address(page);
@@ -744,6 +744,12 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
 
        return &svm->vcpu;
 
+free_page3:
+       __free_pages(nested_msrpm_pages, MSRPM_ALLOC_ORDER);
+free_page2:
+       __free_pages(msrpm_pages, MSRPM_ALLOC_ORDER);
+free_page1:
+       __free_page(page);
 uninit:
        kvm_vcpu_uninit(&svm->vcpu);
 free_svm:
@@ -931,36 +937,36 @@ static int svm_get_cpl(struct kvm_vcpu *vcpu)
        return save->cpl;
 }
 
-static void svm_get_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
+static void svm_get_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
 {
        struct vcpu_svm *svm = to_svm(vcpu);
 
-       dt->limit = svm->vmcb->save.idtr.limit;
-       dt->base = svm->vmcb->save.idtr.base;
+       dt->size = svm->vmcb->save.idtr.limit;
+       dt->address = svm->vmcb->save.idtr.base;
 }
 
-static void svm_set_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
+static void svm_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
 {
        struct vcpu_svm *svm = to_svm(vcpu);
 
-       svm->vmcb->save.idtr.limit = dt->limit;
-       svm->vmcb->save.idtr.base = dt->base ;
+       svm->vmcb->save.idtr.limit = dt->size;
+       svm->vmcb->save.idtr.base = dt->address ;
 }
 
-static void svm_get_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
+static void svm_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
 {
        struct vcpu_svm *svm = to_svm(vcpu);
 
-       dt->limit = svm->vmcb->save.gdtr.limit;
-       dt->base = svm->vmcb->save.gdtr.base;
+       dt->size = svm->vmcb->save.gdtr.limit;
+       dt->address = svm->vmcb->save.gdtr.base;
 }
 
-static void svm_set_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
+static void svm_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
 {
        struct vcpu_svm *svm = to_svm(vcpu);
 
-       svm->vmcb->save.gdtr.limit = dt->limit;
-       svm->vmcb->save.gdtr.base = dt->base ;
+       svm->vmcb->save.gdtr.limit = dt->size;
+       svm->vmcb->save.gdtr.base = dt->address ;
 }
 
 static void svm_decache_cr0_guest_bits(struct kvm_vcpu *vcpu)
@@ -1379,6 +1385,8 @@ static int nested_svm_check_permissions(struct vcpu_svm *svm)
 static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
                                      bool has_error_code, u32 error_code)
 {
+       int vmexit;
+
        if (!is_nested(svm))
                return 0;
 
@@ -1387,7 +1395,11 @@ static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
        svm->vmcb->control.exit_info_1 = error_code;
        svm->vmcb->control.exit_info_2 = svm->vcpu.arch.cr2;
 
-       return nested_svm_exit_handled(svm);
+       vmexit = nested_svm_intercept(svm);
+       if (vmexit == NESTED_EXIT_DONE)
+               svm->nested.exit_required = true;
+
+       return vmexit;
 }
 
 static inline int nested_svm_intr(struct vcpu_svm *svm)
@@ -1418,15 +1430,19 @@ static inline int nested_svm_intr(struct vcpu_svm *svm)
        return 0;
 }
 
-static void *nested_svm_map(struct vcpu_svm *svm, u64 gpa, enum km_type idx)
+static void *nested_svm_map(struct vcpu_svm *svm, u64 gpa, struct page **_page)
 {
        struct page *page;
 
+       might_sleep();
+
        page = gfn_to_page(svm->vcpu.kvm, gpa >> PAGE_SHIFT);
        if (is_error_page(page))
                goto error;
 
-       return kmap_atomic(page, idx);
+       *_page = page;
+
+       return kmap(page);
 
 error:
        kvm_release_page_clean(page);
@@ -1435,16 +1451,9 @@ error:
        return NULL;
 }
 
-static void nested_svm_unmap(void *addr, enum km_type idx)
+static void nested_svm_unmap(struct page *page)
 {
-       struct page *page;
-
-       if (!addr)
-               return;
-
-       page = kmap_atomic_to_page(addr);
-
-       kunmap_atomic(addr, idx);
+       kunmap(page);
        kvm_release_page_dirty(page);
 }
 
@@ -1452,6 +1461,7 @@ static bool nested_svm_exit_handled_msr(struct vcpu_svm *svm)
 {
        u32 param = svm->vmcb->control.exit_info_1 & 1;
        u32 msr = svm->vcpu.arch.regs[VCPU_REGS_RCX];
+       struct page *page;
        bool ret = false;
        u32 t0, t1;
        u8 *msrpm;
@@ -1459,7 +1469,7 @@ static bool nested_svm_exit_handled_msr(struct vcpu_svm *svm)
        if (!(svm->nested.intercept & (1ULL << INTERCEPT_MSR_PROT)))
                return false;
 
-       msrpm = nested_svm_map(svm, svm->nested.vmcb_msrpm, KM_USER0);
+       msrpm = nested_svm_map(svm, svm->nested.vmcb_msrpm, &page);
 
        if (!msrpm)
                goto out;
@@ -1487,7 +1497,7 @@ static bool nested_svm_exit_handled_msr(struct vcpu_svm *svm)
        ret = msrpm[t1] & ((1 << param) << t0);
 
 out:
-       nested_svm_unmap(msrpm, KM_USER0);
+       nested_svm_unmap(page);
 
        return ret;
 }
@@ -1520,7 +1530,7 @@ static int nested_svm_exit_special(struct vcpu_svm *svm)
 /*
  * If this function returns true, this #vmexit was already handled
  */
-static int nested_svm_exit_handled(struct vcpu_svm *svm)
+static int nested_svm_intercept(struct vcpu_svm *svm)
 {
        u32 exit_code = svm->vmcb->control.exit_code;
        int vmexit = NESTED_EXIT_HOST;
@@ -1566,9 +1576,17 @@ static int nested_svm_exit_handled(struct vcpu_svm *svm)
        }
        }
 
-       if (vmexit == NESTED_EXIT_DONE) {
+       return vmexit;
+}
+
+static int nested_svm_exit_handled(struct vcpu_svm *svm)
+{
+       int vmexit;
+
+       vmexit = nested_svm_intercept(svm);
+
+       if (vmexit == NESTED_EXIT_DONE)
                nested_svm_vmexit(svm);
-       }
 
        return vmexit;
 }
@@ -1610,6 +1628,7 @@ static int nested_svm_vmexit(struct vcpu_svm *svm)
        struct vmcb *nested_vmcb;
        struct vmcb *hsave = svm->nested.hsave;
        struct vmcb *vmcb = svm->vmcb;
+       struct page *page;
 
        trace_kvm_nested_vmexit_inject(vmcb->control.exit_code,
                                       vmcb->control.exit_info_1,
@@ -1617,7 +1636,7 @@ static int nested_svm_vmexit(struct vcpu_svm *svm)
                                       vmcb->control.exit_int_info,
                                       vmcb->control.exit_int_info_err);
 
-       nested_vmcb = nested_svm_map(svm, svm->nested.vmcb, KM_USER0);
+       nested_vmcb = nested_svm_map(svm, svm->nested.vmcb, &page);
        if (!nested_vmcb)
                return 1;
 
@@ -1630,9 +1649,13 @@ static int nested_svm_vmexit(struct vcpu_svm *svm)
        nested_vmcb->save.ds     = vmcb->save.ds;
        nested_vmcb->save.gdtr   = vmcb->save.gdtr;
        nested_vmcb->save.idtr   = vmcb->save.idtr;
+       nested_vmcb->save.cr0    = kvm_read_cr0(&svm->vcpu);
        if (npt_enabled)
                nested_vmcb->save.cr3    = vmcb->save.cr3;
+       else
+               nested_vmcb->save.cr3    = svm->vcpu.arch.cr3;
        nested_vmcb->save.cr2    = vmcb->save.cr2;
+       nested_vmcb->save.cr4    = svm->vcpu.arch.cr4;
        nested_vmcb->save.rflags = vmcb->save.rflags;
        nested_vmcb->save.rip    = vmcb->save.rip;
        nested_vmcb->save.rsp    = vmcb->save.rsp;
@@ -1707,7 +1730,7 @@ static int nested_svm_vmexit(struct vcpu_svm *svm)
        /* Exit nested SVM mode */
        svm->nested.vmcb = 0;
 
-       nested_svm_unmap(nested_vmcb, KM_USER0);
+       nested_svm_unmap(page);
 
        kvm_mmu_reset_context(&svm->vcpu);
        kvm_mmu_load(&svm->vcpu);
@@ -1718,9 +1741,10 @@ static int nested_svm_vmexit(struct vcpu_svm *svm)
 static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm)
 {
        u32 *nested_msrpm;
+       struct page *page;
        int i;
 
-       nested_msrpm = nested_svm_map(svm, svm->nested.vmcb_msrpm, KM_USER0);
+       nested_msrpm = nested_svm_map(svm, svm->nested.vmcb_msrpm, &page);
        if (!nested_msrpm)
                return false;
 
@@ -1729,7 +1753,7 @@ static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm)
 
        svm->vmcb->control.msrpm_base_pa = __pa(svm->nested.msrpm);
 
-       nested_svm_unmap(nested_msrpm, KM_USER0);
+       nested_svm_unmap(page);
 
        return true;
 }
@@ -1739,8 +1763,9 @@ static bool nested_svm_vmrun(struct vcpu_svm *svm)
        struct vmcb *nested_vmcb;
        struct vmcb *hsave = svm->nested.hsave;
        struct vmcb *vmcb = svm->vmcb;
+       struct page *page;
 
-       nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, KM_USER0);
+       nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page);
        if (!nested_vmcb)
                return false;
 
@@ -1852,7 +1877,7 @@ static bool nested_svm_vmrun(struct vcpu_svm *svm)
        svm->vmcb->control.event_inj = nested_vmcb->control.event_inj;
        svm->vmcb->control.event_inj_err = nested_vmcb->control.event_inj_err;
 
-       nested_svm_unmap(nested_vmcb, KM_USER0);
+       nested_svm_unmap(page);
 
        enable_gif(svm);
 
@@ -1878,6 +1903,7 @@ static void nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb)
 static int vmload_interception(struct vcpu_svm *svm)
 {
        struct vmcb *nested_vmcb;
+       struct page *page;
 
        if (nested_svm_check_permissions(svm))
                return 1;
@@ -1885,12 +1911,12 @@ static int vmload_interception(struct vcpu_svm *svm)
        svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
        skip_emulated_instruction(&svm->vcpu);
 
-       nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, KM_USER0);
+       nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page);
        if (!nested_vmcb)
                return 1;
 
        nested_svm_vmloadsave(nested_vmcb, svm->vmcb);
-       nested_svm_unmap(nested_vmcb, KM_USER0);
+       nested_svm_unmap(page);
 
        return 1;
 }
@@ -1898,6 +1924,7 @@ static int vmload_interception(struct vcpu_svm *svm)
 static int vmsave_interception(struct vcpu_svm *svm)
 {
        struct vmcb *nested_vmcb;
+       struct page *page;
 
        if (nested_svm_check_permissions(svm))
                return 1;
@@ -1905,12 +1932,12 @@ static int vmsave_interception(struct vcpu_svm *svm)
        svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
        skip_emulated_instruction(&svm->vcpu);
 
-       nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, KM_USER0);
+       nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page);
        if (!nested_vmcb)
                return 1;
 
        nested_svm_vmloadsave(svm->vmcb, nested_vmcb);
-       nested_svm_unmap(nested_vmcb, KM_USER0);
+       nested_svm_unmap(page);
 
        return 1;
 }