kvm: free resources after canceling async_pf
[pandora-kernel.git] / virt / kvm / async_pf.c
index 60df9e0..1758e32 100644 (file)
@@ -101,8 +101,11 @@ void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu)
                                   typeof(*work), queue);
                cancel_work_sync(&work->work);
                list_del(&work->queue);
-               if (!work->done) /* work was canceled */
+               if (!work->done) { /* work was canceled */
+                       mmdrop(work->mm);
+                       kvm_put_kvm(vcpu->kvm); /* == work->vcpu->kvm */
                        kmem_cache_free(async_pf_cache, work);
+               }
        }
 
        spin_lock(&vcpu->async_pf.lock);
@@ -124,24 +127,24 @@ void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu)
 {
        struct kvm_async_pf *work;
 
-       if (list_empty_careful(&vcpu->async_pf.done) ||
-           !kvm_arch_can_inject_async_page_present(vcpu))
-               return;
-
-       spin_lock(&vcpu->async_pf.lock);
-       work = list_first_entry(&vcpu->async_pf.done, typeof(*work), link);
-       list_del(&work->link);
-       spin_unlock(&vcpu->async_pf.lock);
+       while (!list_empty_careful(&vcpu->async_pf.done) &&
+             kvm_arch_can_inject_async_page_present(vcpu)) {
+               spin_lock(&vcpu->async_pf.lock);
+               work = list_first_entry(&vcpu->async_pf.done, typeof(*work),
+                                             link);
+               list_del(&work->link);
+               spin_unlock(&vcpu->async_pf.lock);
 
-       if (work->page)
-               kvm_arch_async_page_ready(vcpu, work);
-       kvm_arch_async_page_present(vcpu, work);
+               if (work->page)
+                       kvm_arch_async_page_ready(vcpu, work);
+               kvm_arch_async_page_present(vcpu, work);
 
-       list_del(&work->queue);
-       vcpu->async_pf.queued--;
-       if (work->page)
-               put_page(work->page);
-       kmem_cache_free(async_pf_cache, work);
+               list_del(&work->queue);
+               vcpu->async_pf.queued--;
+               if (work->page)
+                       put_page(work->page);
+               kmem_cache_free(async_pf_cache, work);
+       }
 }
 
 int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn,
@@ -196,7 +199,7 @@ int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu)
 {
        struct kvm_async_pf *work;
 
-       if (!list_empty(&vcpu->async_pf.done))
+       if (!list_empty_careful(&vcpu->async_pf.done))
                return 0;
 
        work = kmem_cache_zalloc(async_pf_cache, GFP_ATOMIC);
@@ -207,7 +210,10 @@ int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu)
        get_page(bad_page);
        INIT_LIST_HEAD(&work->queue); /* for list_del to work */
 
+       spin_lock(&vcpu->async_pf.lock);
        list_add_tail(&work->link, &vcpu->async_pf.done);
+       spin_unlock(&vcpu->async_pf.lock);
+
        vcpu->async_pf.queued++;
        return 0;
 }