Merge branch 'connlimit' of git://dev.medozas.de/linux
[pandora-kernel.git] / arch / x86 / kvm / mmu.c
index d359500..f02b8ed 100644 (file)
@@ -477,46 +477,46 @@ static void kvm_mmu_page_set_gfn(struct kvm_mmu_page *sp, int index, gfn_t gfn)
 }
 
 /*
- * Return the pointer to the largepage write count for a given
- * gfn, handling slots that are not large page aligned.
+ * Return the pointer to the large page information for a given gfn,
+ * handling slots that are not large page aligned.
  */
-static int *slot_largepage_idx(gfn_t gfn,
-                              struct kvm_memory_slot *slot,
-                              int level)
+static struct kvm_lpage_info *lpage_info_slot(gfn_t gfn,
+                                             struct kvm_memory_slot *slot,
+                                             int level)
 {
        unsigned long idx;
 
        idx = (gfn >> KVM_HPAGE_GFN_SHIFT(level)) -
              (slot->base_gfn >> KVM_HPAGE_GFN_SHIFT(level));
-       return &slot->lpage_info[level - 2][idx].write_count;
+       return &slot->lpage_info[level - 2][idx];
 }
 
 static void account_shadowed(struct kvm *kvm, gfn_t gfn)
 {
        struct kvm_memory_slot *slot;
-       int *write_count;
+       struct kvm_lpage_info *linfo;
        int i;
 
        slot = gfn_to_memslot(kvm, gfn);
        for (i = PT_DIRECTORY_LEVEL;
             i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) {
-               write_count   = slot_largepage_idx(gfn, slot, i);
-               *write_count += 1;
+               linfo = lpage_info_slot(gfn, slot, i);
+               linfo->write_count += 1;
        }
 }
 
 static void unaccount_shadowed(struct kvm *kvm, gfn_t gfn)
 {
        struct kvm_memory_slot *slot;
-       int *write_count;
+       struct kvm_lpage_info *linfo;
        int i;
 
        slot = gfn_to_memslot(kvm, gfn);
        for (i = PT_DIRECTORY_LEVEL;
             i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) {
-               write_count   = slot_largepage_idx(gfn, slot, i);
-               *write_count -= 1;
-               WARN_ON(*write_count < 0);
+               linfo = lpage_info_slot(gfn, slot, i);
+               linfo->write_count -= 1;
+               WARN_ON(linfo->write_count < 0);
        }
 }
 
@@ -525,12 +525,12 @@ static int has_wrprotected_page(struct kvm *kvm,
                                int level)
 {
        struct kvm_memory_slot *slot;
-       int *largepage_idx;
+       struct kvm_lpage_info *linfo;
 
        slot = gfn_to_memslot(kvm, gfn);
        if (slot) {
-               largepage_idx = slot_largepage_idx(gfn, slot, level);
-               return *largepage_idx;
+               linfo = lpage_info_slot(gfn, slot, level);
+               return linfo->write_count;
        }
 
        return 1;
@@ -554,14 +554,18 @@ static int host_mapping_level(struct kvm *kvm, gfn_t gfn)
        return ret;
 }
 
-static int mapping_level(struct kvm_vcpu *vcpu, gfn_t large_gfn)
+static bool mapping_level_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t large_gfn)
 {
        struct kvm_memory_slot *slot;
-       int host_level, level, max_level;
-
        slot = gfn_to_memslot(vcpu->kvm, large_gfn);
        if (slot && slot->dirty_bitmap)
-               return PT_PAGE_TABLE_LEVEL;
+               return true;
+       return false;
+}
+
+static int mapping_level(struct kvm_vcpu *vcpu, gfn_t large_gfn)
+{
+       int host_level, level, max_level;
 
        host_level = host_mapping_level(vcpu->kvm, large_gfn);
 
@@ -585,16 +589,15 @@ static int mapping_level(struct kvm_vcpu *vcpu, gfn_t large_gfn)
 static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn, int level)
 {
        struct kvm_memory_slot *slot;
-       unsigned long idx;
+       struct kvm_lpage_info *linfo;
 
        slot = gfn_to_memslot(kvm, gfn);
        if (likely(level == PT_PAGE_TABLE_LEVEL))
                return &slot->rmap[gfn - slot->base_gfn];
 
-       idx = (gfn >> KVM_HPAGE_GFN_SHIFT(level)) -
-               (slot->base_gfn >> KVM_HPAGE_GFN_SHIFT(level));
+       linfo = lpage_info_slot(gfn, slot, level);
 
-       return &slot->lpage_info[level - 2][idx].rmap_pde;
+       return &linfo->rmap_pde;
 }
 
 /*
@@ -882,19 +885,16 @@ static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
                end = start + (memslot->npages << PAGE_SHIFT);
                if (hva >= start && hva < end) {
                        gfn_t gfn_offset = (hva - start) >> PAGE_SHIFT;
+                       gfn_t gfn = memslot->base_gfn + gfn_offset;
 
                        ret = handler(kvm, &memslot->rmap[gfn_offset], data);
 
                        for (j = 0; j < KVM_NR_PAGE_SIZES - 1; ++j) {
-                               unsigned long idx;
-                               int sh;
-
-                               sh = KVM_HPAGE_GFN_SHIFT(PT_DIRECTORY_LEVEL+j);
-                               idx = ((memslot->base_gfn+gfn_offset) >> sh) -
-                                       (memslot->base_gfn >> sh);
-                               ret |= handler(kvm,
-                                       &memslot->lpage_info[j][idx].rmap_pde,
-                                       data);
+                               struct kvm_lpage_info *linfo;
+
+                               linfo = lpage_info_slot(gfn, memslot,
+                                                       PT_DIRECTORY_LEVEL + j);
+                               ret |= handler(kvm, &linfo->rmap_pde, data);
                        }
                        trace_kvm_age_page(hva, memslot, ret);
                        retval |= ret;
@@ -945,6 +945,35 @@ static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
        return young;
 }
 
+static int kvm_test_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
+                             unsigned long data)
+{
+       u64 *spte;
+       int young = 0;
+
+       /*
+        * If there's no access bit in the secondary pte set by the
+        * hardware it's up to gup-fast/gup to set the access bit in
+        * the primary pte or in the page structure.
+        */
+       if (!shadow_accessed_mask)
+               goto out;
+
+       spte = rmap_next(kvm, rmapp, NULL);
+       while (spte) {
+               u64 _spte = *spte;
+               BUG_ON(!(_spte & PT_PRESENT_MASK));
+               young = _spte & PT_ACCESSED_MASK;
+               if (young) {
+                       young = 1;
+                       break;
+               }
+               spte = rmap_next(kvm, rmapp, spte);
+       }
+out:
+       return young;
+}
+
 #define RMAP_RECYCLE_THRESHOLD 1000
 
 static void rmap_recycle(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
@@ -965,6 +994,11 @@ int kvm_age_hva(struct kvm *kvm, unsigned long hva)
        return kvm_handle_hva(kvm, hva, 0, kvm_age_rmapp);
 }
 
+int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
+{
+       return kvm_handle_hva(kvm, hva, 0, kvm_test_age_rmapp);
+}
+
 #ifdef MMU_DEBUG
 static int is_empty_shadow_page(u64 *spt)
 {
@@ -1987,6 +2021,8 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
 
        if (host_writable)
                spte |= SPTE_HOST_WRITEABLE;
+       else
+               pte_access &= ~ACC_WRITE_MASK;
 
        spte |= (u64)pfn << PAGE_SHIFT;
 
@@ -2214,7 +2250,8 @@ static void direct_pte_prefetch(struct kvm_vcpu *vcpu, u64 *sptep)
 }
 
 static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
-                       int map_writable, int level, gfn_t gfn, pfn_t pfn)
+                       int map_writable, int level, gfn_t gfn, pfn_t pfn,
+                       bool prefault)
 {
        struct kvm_shadow_walk_iterator iterator;
        struct kvm_mmu_page *sp;
@@ -2225,11 +2262,9 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
                if (iterator.level == level) {
                        unsigned pte_access = ACC_ALL;
 
-                       if (!map_writable)
-                               pte_access &= ~ACC_WRITE_MASK;
                        mmu_set_spte(vcpu, iterator.sptep, ACC_ALL, pte_access,
                                     0, write, 1, &pt_write,
-                                    level, gfn, pfn, false, map_writable);
+                                    level, gfn, pfn, prefault, map_writable);
                        direct_pte_prefetch(vcpu, iterator.sptep);
                        ++vcpu->stat.pf_fixed;
                        break;
@@ -2284,33 +2319,80 @@ static int kvm_handle_bad_page(struct kvm *kvm, gfn_t gfn, pfn_t pfn)
        return 1;
 }
 
-static bool try_async_pf(struct kvm_vcpu *vcpu, bool no_apf, gfn_t gfn,
+static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu,
+                                       gfn_t *gfnp, pfn_t *pfnp, int *levelp)
+{
+       pfn_t pfn = *pfnp;
+       gfn_t gfn = *gfnp;
+       int level = *levelp;
+
+       /*
+        * Check if it's a transparent hugepage. If this would be an
+        * hugetlbfs page, level wouldn't be set to
+        * PT_PAGE_TABLE_LEVEL and there would be no adjustment done
+        * here.
+        */
+       if (!is_error_pfn(pfn) && !kvm_is_mmio_pfn(pfn) &&
+           level == PT_PAGE_TABLE_LEVEL &&
+           PageTransCompound(pfn_to_page(pfn)) &&
+           !has_wrprotected_page(vcpu->kvm, gfn, PT_DIRECTORY_LEVEL)) {
+               unsigned long mask;
+               /*
+                * mmu_notifier_retry was successful and we hold the
+                * mmu_lock here, so the pmd can't become splitting
+                * from under us, and in turn
+                * __split_huge_page_refcount() can't run from under
+                * us and we can safely transfer the refcount from
+                * PG_tail to PG_head as we switch the pfn to tail to
+                * head.
+                */
+               *levelp = level = PT_DIRECTORY_LEVEL;
+               mask = KVM_PAGES_PER_HPAGE(level) - 1;
+               VM_BUG_ON((gfn & mask) != (pfn & mask));
+               if (pfn & mask) {
+                       gfn &= ~mask;
+                       *gfnp = gfn;
+                       kvm_release_pfn_clean(pfn);
+                       pfn &= ~mask;
+                       if (!get_page_unless_zero(pfn_to_page(pfn)))
+                               BUG();
+                       *pfnp = pfn;
+               }
+       }
+}
+
+static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
                         gva_t gva, pfn_t *pfn, bool write, bool *writable);
 
 static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn,
-                        bool no_apf)
+                        bool prefault)
 {
        int r;
        int level;
+       int force_pt_level;
        pfn_t pfn;
        unsigned long mmu_seq;
        bool map_writable;
 
-       level = mapping_level(vcpu, gfn);
-
-       /*
-        * This path builds a PAE pagetable - so we can map 2mb pages at
-        * maximum. Therefore check if the level is larger than that.
-        */
-       if (level > PT_DIRECTORY_LEVEL)
-               level = PT_DIRECTORY_LEVEL;
+       force_pt_level = mapping_level_dirty_bitmap(vcpu, gfn);
+       if (likely(!force_pt_level)) {
+               level = mapping_level(vcpu, gfn);
+               /*
+                * This path builds a PAE pagetable - so we can map
+                * 2mb pages at maximum. Therefore check if the level
+                * is larger than that.
+                */
+               if (level > PT_DIRECTORY_LEVEL)
+                       level = PT_DIRECTORY_LEVEL;
 
-       gfn &= ~(KVM_PAGES_PER_HPAGE(level) - 1);
+               gfn &= ~(KVM_PAGES_PER_HPAGE(level) - 1);
+       } else
+               level = PT_PAGE_TABLE_LEVEL;
 
        mmu_seq = vcpu->kvm->mmu_notifier_seq;
        smp_rmb();
 
-       if (try_async_pf(vcpu, no_apf, gfn, v, &pfn, write, &map_writable))
+       if (try_async_pf(vcpu, prefault, gfn, v, &pfn, write, &map_writable))
                return 0;
 
        /* mmio */
@@ -2321,7 +2403,10 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn,
        if (mmu_notifier_retry(vcpu, mmu_seq))
                goto out_unlock;
        kvm_mmu_free_some_pages(vcpu);
-       r = __direct_map(vcpu, v, write, map_writable, level, gfn, pfn);
+       if (likely(!force_pt_level))
+               transparent_hugepage_adjust(vcpu, &gfn, &pfn, &level);
+       r = __direct_map(vcpu, v, write, map_writable, level, gfn, pfn,
+                        prefault);
        spin_unlock(&vcpu->kvm->mmu_lock);
 
 
@@ -2584,7 +2669,7 @@ static gpa_t nonpaging_gva_to_gpa_nested(struct kvm_vcpu *vcpu, gva_t vaddr,
 }
 
 static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
-                               u32 error_code, bool no_apf)
+                               u32 error_code, bool prefault)
 {
        gfn_t gfn;
        int r;
@@ -2600,15 +2685,17 @@ static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
        gfn = gva >> PAGE_SHIFT;
 
        return nonpaging_map(vcpu, gva & PAGE_MASK,
-                            error_code & PFERR_WRITE_MASK, gfn, no_apf);
+                            error_code & PFERR_WRITE_MASK, gfn, prefault);
 }
 
 static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn)
 {
        struct kvm_arch_async_pf arch;
+
        arch.token = (vcpu->arch.apf.id++ << 12) | vcpu->vcpu_id;
        arch.gfn = gfn;
        arch.direct_map = vcpu->arch.mmu.direct_map;
+       arch.cr3 = vcpu->arch.mmu.get_cr3(vcpu);
 
        return kvm_setup_async_pf(vcpu, gva, gfn, &arch);
 }
@@ -2622,7 +2709,7 @@ static bool can_do_async_pf(struct kvm_vcpu *vcpu)
        return kvm_x86_ops->interrupt_allowed(vcpu);
 }
 
-static bool try_async_pf(struct kvm_vcpu *vcpu, bool no_apf, gfn_t gfn,
+static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
                         gva_t gva, pfn_t *pfn, bool write, bool *writable)
 {
        bool async;
@@ -2634,7 +2721,7 @@ static bool try_async_pf(struct kvm_vcpu *vcpu, bool no_apf, gfn_t gfn,
 
        put_page(pfn_to_page(*pfn));
 
-       if (!no_apf && can_do_async_pf(vcpu)) {
+       if (!prefault && can_do_async_pf(vcpu)) {
                trace_kvm_try_async_get_page(gva, gfn);
                if (kvm_find_async_pf_gfn(vcpu, gfn)) {
                        trace_kvm_async_pf_doublefault(gva, gfn);
@@ -2650,11 +2737,12 @@ static bool try_async_pf(struct kvm_vcpu *vcpu, bool no_apf, gfn_t gfn,
 }
 
 static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code,
-                         bool no_apf)
+                         bool prefault)
 {
        pfn_t pfn;
        int r;
        int level;
+       int force_pt_level;
        gfn_t gfn = gpa >> PAGE_SHIFT;
        unsigned long mmu_seq;
        int write = error_code & PFERR_WRITE_MASK;
@@ -2667,14 +2755,17 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code,
        if (r)
                return r;
 
-       level = mapping_level(vcpu, gfn);
-
-       gfn &= ~(KVM_PAGES_PER_HPAGE(level) - 1);
+       force_pt_level = mapping_level_dirty_bitmap(vcpu, gfn);
+       if (likely(!force_pt_level)) {
+               level = mapping_level(vcpu, gfn);
+               gfn &= ~(KVM_PAGES_PER_HPAGE(level) - 1);
+       } else
+               level = PT_PAGE_TABLE_LEVEL;
 
        mmu_seq = vcpu->kvm->mmu_notifier_seq;
        smp_rmb();
 
-       if (try_async_pf(vcpu, no_apf, gfn, gpa, &pfn, write, &map_writable))
+       if (try_async_pf(vcpu, prefault, gfn, gpa, &pfn, write, &map_writable))
                return 0;
 
        /* mmio */
@@ -2684,8 +2775,10 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code,
        if (mmu_notifier_retry(vcpu, mmu_seq))
                goto out_unlock;
        kvm_mmu_free_some_pages(vcpu);
+       if (likely(!force_pt_level))
+               transparent_hugepage_adjust(vcpu, &gfn, &pfn, &level);
        r = __direct_map(vcpu, gpa, write, map_writable,
-                        level, gfn, pfn);
+                        level, gfn, pfn, prefault);
        spin_unlock(&vcpu->kvm->mmu_lock);
 
        return r;
@@ -2727,13 +2820,13 @@ void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu)
 
 static void paging_new_cr3(struct kvm_vcpu *vcpu)
 {
-       pgprintk("%s: cr3 %lx\n", __func__, vcpu->arch.cr3);
+       pgprintk("%s: cr3 %lx\n", __func__, kvm_read_cr3(vcpu));
        mmu_free_roots(vcpu);
 }
 
 static unsigned long get_cr3(struct kvm_vcpu *vcpu)
 {
-       return vcpu->arch.cr3;
+       return kvm_read_cr3(vcpu);
 }
 
 static void inject_page_fault(struct kvm_vcpu *vcpu,
@@ -2885,6 +2978,7 @@ static int init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
 {
        struct kvm_mmu *context = vcpu->arch.walk_mmu;
 
+       context->base_role.word = 0;
        context->new_cr3 = nonpaging_new_cr3;
        context->page_fault = tdp_page_fault;
        context->free = nonpaging_free;
@@ -3330,7 +3424,8 @@ void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
        }
 }
 
-int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code)
+int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code,
+                      void *insn, int insn_len)
 {
        int r;
        enum emulation_result er;
@@ -3348,7 +3443,7 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code)
        if (r)
                goto out;
 
-       er = emulate_instruction(vcpu, cr2, error_code, 0);
+       er = x86_emulate_instruction(vcpu, cr2, 0, insn, insn_len);
 
        switch (er) {
        case EMULATE_DONE:
@@ -3443,11 +3538,14 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
                if (!test_bit(slot, sp->slot_bitmap))
                        continue;
 
+               if (sp->role.level != PT_PAGE_TABLE_LEVEL)
+                       continue;
+
                pt = sp->spt;
                for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
                        /* avoid RMW */
                        if (is_writable_pte(pt[i]))
-                               pt[i] &= ~PT_WRITABLE_MASK;
+                               update_spte(&pt[i], pt[i] & ~PT_WRITABLE_MASK);
        }
        kvm_flush_remote_tlbs(kvm);
 }
@@ -3529,13 +3627,6 @@ static void mmu_destroy_caches(void)
                kmem_cache_destroy(mmu_page_header_cache);
 }
 
-void kvm_mmu_module_exit(void)
-{
-       mmu_destroy_caches();
-       percpu_counter_destroy(&kvm_total_used_mmu_pages);
-       unregister_shrinker(&mmu_shrinker);
-}
-
 int kvm_mmu_module_init(void)
 {
        pte_chain_cache = kmem_cache_create("kvm_pte_chain",
@@ -3632,7 +3723,7 @@ static int kvm_pv_mmu_write(struct kvm_vcpu *vcpu,
 
 static int kvm_pv_mmu_flush_tlb(struct kvm_vcpu *vcpu)
 {
-       (void)kvm_set_cr3(vcpu, vcpu->arch.cr3);
+       (void)kvm_set_cr3(vcpu, kvm_read_cr3(vcpu));
        return 1;
 }
 
@@ -3728,12 +3819,6 @@ int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4])
 }
 EXPORT_SYMBOL_GPL(kvm_mmu_get_spte_hierarchy);
 
-#ifdef CONFIG_KVM_MMU_AUDIT
-#include "mmu_audit.c"
-#else
-static void mmu_audit_disable(void) { }
-#endif
-
 void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
 {
        ASSERT(vcpu);
@@ -3741,5 +3826,18 @@ void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
        destroy_kvm_mmu(vcpu);
        free_mmu_pages(vcpu);
        mmu_free_memory_caches(vcpu);
+}
+
+#ifdef CONFIG_KVM_MMU_AUDIT
+#include "mmu_audit.c"
+#else
+static void mmu_audit_disable(void) { }
+#endif
+
+void kvm_mmu_module_exit(void)
+{
+       mmu_destroy_caches();
+       percpu_counter_destroy(&kvm_total_used_mmu_pages);
+       unregister_shrinker(&mmu_shrinker);
        mmu_audit_disable();
 }