KVM: MMU: add tracepoint for kvm_mmu_invalidate_all_pages
[pandora-kernel.git] / arch / x86 / kvm / mmu.c
index 004cc87..3fd060a 100644 (file)
@@ -1511,6 +1511,12 @@ static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu,
        if (!direct)
                sp->gfns = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache);
        set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
+
+       /*
+        * The active_mmu_pages list is the FIFO list, do not move the
+        * page until it is zapped. kvm_zap_obsolete_pages depends on
+        * this feature. See the comments in kvm_zap_obsolete_pages().
+        */
        list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages);
        sp->parent_ptes = 0;
        mmu_page_add_parent_pte(vcpu, sp, parent_pte);
@@ -1838,6 +1844,11 @@ static void clear_sp_write_flooding_count(u64 *spte)
        __clear_sp_write_flooding_count(sp);
 }
 
+static bool is_obsolete_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
+{
+       return unlikely(sp->mmu_valid_gen != kvm->arch.mmu_valid_gen);
+}
+
 static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
                                             gfn_t gfn,
                                             gva_t gaddr,
@@ -1900,6 +1911,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
 
                account_shadowed(vcpu->kvm, gfn);
        }
+       sp->mmu_valid_gen = vcpu->kvm->arch.mmu_valid_gen;
        init_shadow_page_table(sp);
        trace_kvm_mmu_get_page(sp, true);
        return sp;
@@ -2070,8 +2082,10 @@ static int kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
        ret = mmu_zap_unsync_children(kvm, sp, invalid_list);
        kvm_mmu_page_unlink_children(kvm, sp);
        kvm_mmu_unlink_parents(kvm, sp);
+
        if (!sp->role.invalid && !sp->role.direct)
                unaccount_shadowed(kvm, sp->gfn);
+
        if (sp->unsync)
                kvm_unlink_unsync_page(kvm, sp);
        if (!sp->root_count) {
@@ -2869,22 +2883,25 @@ static void mmu_free_roots(struct kvm_vcpu *vcpu)
 
        if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
                return;
-       spin_lock(&vcpu->kvm->mmu_lock);
+
        if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL &&
            (vcpu->arch.mmu.root_level == PT64_ROOT_LEVEL ||
             vcpu->arch.mmu.direct_map)) {
                hpa_t root = vcpu->arch.mmu.root_hpa;
 
+               spin_lock(&vcpu->kvm->mmu_lock);
                sp = page_header(root);
                --sp->root_count;
                if (!sp->root_count && sp->role.invalid) {
                        kvm_mmu_prepare_zap_page(vcpu->kvm, sp, &invalid_list);
                        kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
                }
-               vcpu->arch.mmu.root_hpa = INVALID_PAGE;
                spin_unlock(&vcpu->kvm->mmu_lock);
+               vcpu->arch.mmu.root_hpa = INVALID_PAGE;
                return;
        }
+
+       spin_lock(&vcpu->kvm->mmu_lock);
        for (i = 0; i < 4; ++i) {
                hpa_t root = vcpu->arch.mmu.pae_root[i];
 
@@ -3764,9 +3781,7 @@ int kvm_mmu_load(struct kvm_vcpu *vcpu)
        if (r)
                goto out;
        r = mmu_alloc_roots(vcpu);
-       spin_lock(&vcpu->kvm->mmu_lock);
-       mmu_sync_roots(vcpu);
-       spin_unlock(&vcpu->kvm->mmu_lock);
+       kvm_mmu_sync_roots(vcpu);
        if (r)
                goto out;
        /* set_cr3() should ensure TLB has been flushed */
@@ -4179,18 +4194,80 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
        spin_unlock(&kvm->mmu_lock);
 }
 
-void kvm_mmu_zap_all(struct kvm *kvm)
+static void kvm_zap_obsolete_pages(struct kvm *kvm)
 {
        struct kvm_mmu_page *sp, *node;
        LIST_HEAD(invalid_list);
 
-       spin_lock(&kvm->mmu_lock);
 restart:
-       list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link)
+       list_for_each_entry_safe_reverse(sp, node,
+             &kvm->arch.active_mmu_pages, link) {
+               /*
+                * No obsolete page exists before new created page since
+                * active_mmu_pages is the FIFO list.
+                */
+               if (!is_obsolete_sp(kvm, sp))
+                       break;
+
+               /*
+                * Do not repeatedly zap a root page to avoid unnecessary
+                * KVM_REQ_MMU_RELOAD, otherwise we may not be able to
+                * progress:
+                *    vcpu 0                        vcpu 1
+                *                         call vcpu_enter_guest():
+                *                            1): handle KVM_REQ_MMU_RELOAD
+                *                                and require mmu-lock to
+                *                                load mmu
+                * repeat:
+                *    1): zap root page and
+                *        send KVM_REQ_MMU_RELOAD
+                *
+                *    2): if (cond_resched_lock(mmu-lock))
+                *
+                *                            2): hold mmu-lock and load mmu
+                *
+                *                            3): see KVM_REQ_MMU_RELOAD bit
+                *                                on vcpu->requests is set
+                *                                then return 1 to call
+                *                                vcpu_enter_guest() again.
+                *            goto repeat;
+                *
+                * Since we are reversely walking the list and the invalid
+                * list will be moved to the head, skip the invalid page
+                * can help us to avoid the infinity list walking.
+                */
+               if (sp->role.invalid)
+                       continue;
+
+               if (need_resched() || spin_needbreak(&kvm->mmu_lock)) {
+                       kvm_mmu_commit_zap_page(kvm, &invalid_list);
+                       cond_resched_lock(&kvm->mmu_lock);
+                       goto restart;
+               }
+
                if (kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list))
                        goto restart;
+       }
 
        kvm_mmu_commit_zap_page(kvm, &invalid_list);
+}
+
+/*
+ * Fast invalidate all shadow pages and use lock-break technique
+ * to zap obsolete pages.
+ *
+ * It's required when memslot is being deleted or VM is being
+ * destroyed, in these cases, we should ensure that KVM MMU does
+ * not use any resource of the being-deleted slot or all slots
+ * after calling the function.
+ */
+void kvm_mmu_invalidate_zap_all_pages(struct kvm *kvm)
+{
+       spin_lock(&kvm->mmu_lock);
+       trace_kvm_mmu_invalidate_zap_all_pages(kvm);
+       kvm->arch.mmu_valid_gen++;
+
+       kvm_zap_obsolete_pages(kvm);
        spin_unlock(&kvm->mmu_lock);
 }