summaryrefslogtreecommitdiff
path: root/virt/kvm/async_pf.c
diff options
context:
space:
mode:
Diffstat (limited to 'virt/kvm/async_pf.c')
-rw-r--r--virt/kvm/async_pf.c22
1 files changed, 16 insertions, 6 deletions
diff --git a/virt/kvm/async_pf.c b/virt/kvm/async_pf.c
index 8631d9c..8a39dda 100644
--- a/virt/kvm/async_pf.c
+++ b/virt/kvm/async_pf.c
@@ -56,6 +56,7 @@ void kvm_async_pf_vcpu_init(struct kvm_vcpu *vcpu)
static void async_pf_execute(struct work_struct *work)
{
+ struct page *page = NULL;
struct kvm_async_pf *apf =
container_of(work, struct kvm_async_pf, work);
struct mm_struct *mm = apf->mm;
@@ -67,12 +68,14 @@ static void async_pf_execute(struct work_struct *work)
use_mm(mm);
down_read(&mm->mmap_sem);
- get_user_pages(current, mm, addr, 1, 1, 0, NULL, NULL);
+ get_user_pages(current, mm, addr, 1, 1, 0, &page, NULL);
up_read(&mm->mmap_sem);
unuse_mm(mm);
spin_lock(&vcpu->async_pf.lock);
list_add_tail(&apf->link, &vcpu->async_pf.done);
+ apf->page = page;
+ apf->done = true;
spin_unlock(&vcpu->async_pf.lock);
/*
@@ -80,7 +83,7 @@ static void async_pf_execute(struct work_struct *work)
* this point
*/
- trace_kvm_async_pf_completed(addr, gva);
+ trace_kvm_async_pf_completed(addr, page, gva);
if (waitqueue_active(&vcpu->wq))
wake_up_interruptible(&vcpu->wq);
@@ -96,8 +99,9 @@ void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu)
struct kvm_async_pf *work =
list_entry(vcpu->async_pf.queue.next,
typeof(*work), queue);
+ cancel_work_sync(&work->work);
list_del(&work->queue);
- if (cancel_work_sync(&work->work)) {
+ if (!work->done) { /* work was canceled */
mmdrop(work->mm);
kvm_put_kvm(vcpu->kvm); /* == work->vcpu->kvm */
kmem_cache_free(async_pf_cache, work);
@@ -110,6 +114,8 @@ void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu)
list_entry(vcpu->async_pf.done.next,
typeof(*work), link);
list_del(&work->link);
+ if (!is_error_page(work->page))
+ kvm_release_page_clean(work->page);
kmem_cache_free(async_pf_cache, work);
}
spin_unlock(&vcpu->async_pf.lock);
@@ -129,11 +135,14 @@ void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu)
list_del(&work->link);
spin_unlock(&vcpu->async_pf.lock);
- kvm_arch_async_page_ready(vcpu, work);
+ if (work->page)
+ kvm_arch_async_page_ready(vcpu, work);
kvm_arch_async_page_present(vcpu, work);
list_del(&work->queue);
vcpu->async_pf.queued--;
+ if (!is_error_page(work->page))
+ kvm_release_page_clean(work->page);
kmem_cache_free(async_pf_cache, work);
}
}
@@ -156,7 +165,8 @@ int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn,
if (!work)
return 0;
- work->wakeup_all = false;
+ work->page = NULL;
+ work->done = false;
work->vcpu = vcpu;
work->gva = gva;
work->addr = gfn_to_hva(vcpu->kvm, gfn);
@@ -196,7 +206,7 @@ int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu)
if (!work)
return -ENOMEM;
- work->wakeup_all = true;
+ work->page = KVM_ERR_PTR_BAD_PAGE;
INIT_LIST_HEAD(&work->queue); /* for list_del to work */
spin_lock(&vcpu->async_pf.lock);