typeof(*work), queue);
cancel_work_sync(&work->work);
list_del(&work->queue);
- if (!work->done) /* work was canceled */
+ if (!work->done) { /* work was canceled */
+ mmdrop(work->mm);
+ kvm_put_kvm(vcpu->kvm); /* == work->vcpu->kvm */
kmem_cache_free(async_pf_cache, work);
+ }
}
spin_lock(&vcpu->async_pf.lock);
{
struct kvm_async_pf *work;
- if (list_empty_careful(&vcpu->async_pf.done) ||
- !kvm_arch_can_inject_async_page_present(vcpu))
- return;
-
- spin_lock(&vcpu->async_pf.lock);
- work = list_first_entry(&vcpu->async_pf.done, typeof(*work), link);
- list_del(&work->link);
- spin_unlock(&vcpu->async_pf.lock);
+ while (!list_empty_careful(&vcpu->async_pf.done) &&
+ kvm_arch_can_inject_async_page_present(vcpu)) {
+ spin_lock(&vcpu->async_pf.lock);
+ work = list_first_entry(&vcpu->async_pf.done, typeof(*work),
+ link);
+ list_del(&work->link);
+ spin_unlock(&vcpu->async_pf.lock);
- if (work->page)
- kvm_arch_async_page_ready(vcpu, work);
- kvm_arch_async_page_present(vcpu, work);
+ if (work->page)
+ kvm_arch_async_page_ready(vcpu, work);
+ kvm_arch_async_page_present(vcpu, work);
- list_del(&work->queue);
- vcpu->async_pf.queued--;
- if (work->page)
- put_page(work->page);
- kmem_cache_free(async_pf_cache, work);
+ list_del(&work->queue);
+ vcpu->async_pf.queued--;
+ if (work->page)
+ put_page(work->page);
+ kmem_cache_free(async_pf_cache, work);
+ }
}
int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn,
{
struct kvm_async_pf *work;
- if (!list_empty(&vcpu->async_pf.done))
+ if (!list_empty_careful(&vcpu->async_pf.done))
return 0;
work = kmem_cache_zalloc(async_pf_cache, GFP_ATOMIC);
get_page(bad_page);
INIT_LIST_HEAD(&work->queue); /* for list_del to work */
+ spin_lock(&vcpu->async_pf.lock);
list_add_tail(&work->link, &vcpu->async_pf.done);
+ spin_unlock(&vcpu->async_pf.lock);
+
vcpu->async_pf.queued++;
return 0;
}