aboutsummaryrefslogtreecommitdiffstats
path: root/virt
diff options
context:
space:
mode:
authorchai wen <chaiw.fnst@cn.fujitsu.com>2013-10-14 10:22:33 -0400
committerGleb Natapov <gleb@redhat.com>2013-10-15 06:43:37 -0400
commitf2e106692d5189303997ad7b96de8d8123aa5613 (patch)
treebe15cad31bc5701570a571b3be04e299bb4ac547 /virt
parenta7efdf6bec34f1a693a926ebd08de6ba6e700dff (diff)
KVM: Drop FOLL_GET in GUP when doing async page fault
Page pinning is not mandatory in kvm async page fault processing since after async page fault event is delivered to a guest it accesses page once again and does its own GUP. Drop the FOLL_GET flag in GUP in async_pf code, and do some simplifying in check/clear processing. Suggested-by: Gleb Natapov <gleb@redhat.com> Signed-off-by: Gu zheng <guz.fnst@cn.fujitsu.com> Signed-off-by: chai wen <chaiw.fnst@cn.fujitsu.com> Signed-off-by: Gleb Natapov <gleb@redhat.com>
Diffstat (limited to 'virt')
-rw-r--r--virt/kvm/async_pf.c17
1 files changed, 5 insertions, 12 deletions
diff --git a/virt/kvm/async_pf.c b/virt/kvm/async_pf.c
index b197950ac4d5..8631d9c14320 100644
--- a/virt/kvm/async_pf.c
+++ b/virt/kvm/async_pf.c
@@ -56,7 +56,6 @@ void kvm_async_pf_vcpu_init(struct kvm_vcpu *vcpu)
56 56
57static void async_pf_execute(struct work_struct *work) 57static void async_pf_execute(struct work_struct *work)
58{ 58{
59 struct page *page = NULL;
60 struct kvm_async_pf *apf = 59 struct kvm_async_pf *apf =
61 container_of(work, struct kvm_async_pf, work); 60 container_of(work, struct kvm_async_pf, work);
62 struct mm_struct *mm = apf->mm; 61 struct mm_struct *mm = apf->mm;
@@ -68,13 +67,12 @@ static void async_pf_execute(struct work_struct *work)
68 67
69 use_mm(mm); 68 use_mm(mm);
70 down_read(&mm->mmap_sem); 69 down_read(&mm->mmap_sem);
71 get_user_pages(current, mm, addr, 1, 1, 0, &page, NULL); 70 get_user_pages(current, mm, addr, 1, 1, 0, NULL, NULL);
72 up_read(&mm->mmap_sem); 71 up_read(&mm->mmap_sem);
73 unuse_mm(mm); 72 unuse_mm(mm);
74 73
75 spin_lock(&vcpu->async_pf.lock); 74 spin_lock(&vcpu->async_pf.lock);
76 list_add_tail(&apf->link, &vcpu->async_pf.done); 75 list_add_tail(&apf->link, &vcpu->async_pf.done);
77 apf->page = page;
78 spin_unlock(&vcpu->async_pf.lock); 76 spin_unlock(&vcpu->async_pf.lock);
79 77
80 /* 78 /*
@@ -82,7 +80,7 @@ static void async_pf_execute(struct work_struct *work)
82 * this point 80 * this point
83 */ 81 */
84 82
85 trace_kvm_async_pf_completed(addr, page, gva); 83 trace_kvm_async_pf_completed(addr, gva);
86 84
87 if (waitqueue_active(&vcpu->wq)) 85 if (waitqueue_active(&vcpu->wq))
88 wake_up_interruptible(&vcpu->wq); 86 wake_up_interruptible(&vcpu->wq);
@@ -112,8 +110,6 @@ void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu)
112 list_entry(vcpu->async_pf.done.next, 110 list_entry(vcpu->async_pf.done.next,
113 typeof(*work), link); 111 typeof(*work), link);
114 list_del(&work->link); 112 list_del(&work->link);
115 if (!is_error_page(work->page))
116 kvm_release_page_clean(work->page);
117 kmem_cache_free(async_pf_cache, work); 113 kmem_cache_free(async_pf_cache, work);
118 } 114 }
119 spin_unlock(&vcpu->async_pf.lock); 115 spin_unlock(&vcpu->async_pf.lock);
@@ -133,14 +129,11 @@ void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu)
133 list_del(&work->link); 129 list_del(&work->link);
134 spin_unlock(&vcpu->async_pf.lock); 130 spin_unlock(&vcpu->async_pf.lock);
135 131
136 if (work->page) 132 kvm_arch_async_page_ready(vcpu, work);
137 kvm_arch_async_page_ready(vcpu, work);
138 kvm_arch_async_page_present(vcpu, work); 133 kvm_arch_async_page_present(vcpu, work);
139 134
140 list_del(&work->queue); 135 list_del(&work->queue);
141 vcpu->async_pf.queued--; 136 vcpu->async_pf.queued--;
142 if (!is_error_page(work->page))
143 kvm_release_page_clean(work->page);
144 kmem_cache_free(async_pf_cache, work); 137 kmem_cache_free(async_pf_cache, work);
145 } 138 }
146} 139}
@@ -163,7 +156,7 @@ int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn,
163 if (!work) 156 if (!work)
164 return 0; 157 return 0;
165 158
166 work->page = NULL; 159 work->wakeup_all = false;
167 work->vcpu = vcpu; 160 work->vcpu = vcpu;
168 work->gva = gva; 161 work->gva = gva;
169 work->addr = gfn_to_hva(vcpu->kvm, gfn); 162 work->addr = gfn_to_hva(vcpu->kvm, gfn);
@@ -203,7 +196,7 @@ int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu)
203 if (!work) 196 if (!work)
204 return -ENOMEM; 197 return -ENOMEM;
205 198
206 work->page = KVM_ERR_PTR_BAD_PAGE; 199 work->wakeup_all = true;
207 INIT_LIST_HEAD(&work->queue); /* for list_del to work */ 200 INIT_LIST_HEAD(&work->queue); /* for list_del to work */
208 201
209 spin_lock(&vcpu->async_pf.lock); 202 spin_lock(&vcpu->async_pf.lock);