aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorchai wen <chaiw.fnst@cn.fujitsu.com>2013-10-14 10:22:33 -0400
committerGleb Natapov <gleb@redhat.com>2013-10-15 06:43:37 -0400
commitf2e106692d5189303997ad7b96de8d8123aa5613 (patch)
treebe15cad31bc5701570a571b3be04e299bb4ac547
parenta7efdf6bec34f1a693a926ebd08de6ba6e700dff (diff)
KVM: Drop FOLL_GET in GUP when doing async page fault
Page pinning is not mandatory in kvm async page fault processing since after async page fault event is delivered to a guest it accesses page once again and does its own GUP. Drop the FOLL_GET flag in GUP in async_pf code, and do some simplifying in check/clear processing. Suggested-by: Gleb Natapov <gleb@redhat.com> Signed-off-by: Gu zheng <guz.fnst@cn.fujitsu.com> Signed-off-by: chai wen <chaiw.fnst@cn.fujitsu.com> Signed-off-by: Gleb Natapov <gleb@redhat.com>
-rw-r--r--arch/x86/kvm/x86.c4
-rw-r--r--include/linux/kvm_host.h2
-rw-r--r--include/trace/events/kvm.h10
-rw-r--r--virt/kvm/async_pf.c17
4 files changed, 12 insertions, 21 deletions
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index c951c71dc80b..edf2a07df3a3 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -7298,7 +7298,7 @@ void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work)
7298 int r; 7298 int r;
7299 7299
7300 if ((vcpu->arch.mmu.direct_map != work->arch.direct_map) || 7300 if ((vcpu->arch.mmu.direct_map != work->arch.direct_map) ||
7301 is_error_page(work->page)) 7301 work->wakeup_all)
7302 return; 7302 return;
7303 7303
7304 r = kvm_mmu_reload(vcpu); 7304 r = kvm_mmu_reload(vcpu);
@@ -7408,7 +7408,7 @@ void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
7408 struct x86_exception fault; 7408 struct x86_exception fault;
7409 7409
7410 trace_kvm_async_pf_ready(work->arch.token, work->gva); 7410 trace_kvm_async_pf_ready(work->arch.token, work->gva);
7411 if (is_error_page(work->page)) 7411 if (work->wakeup_all)
7412 work->arch.token = ~0; /* broadcast wakeup */ 7412 work->arch.token = ~0; /* broadcast wakeup */
7413 else 7413 else
7414 kvm_del_async_pf_gfn(vcpu, work->arch.gfn); 7414 kvm_del_async_pf_gfn(vcpu, work->arch.gfn);
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index f6dccde755f6..c9d4236ab442 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -189,7 +189,7 @@ struct kvm_async_pf {
189 gva_t gva; 189 gva_t gva;
190 unsigned long addr; 190 unsigned long addr;
191 struct kvm_arch_async_pf arch; 191 struct kvm_arch_async_pf arch;
192 struct page *page; 192 bool wakeup_all;
193}; 193};
194 194
195void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu); 195void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu);
diff --git a/include/trace/events/kvm.h b/include/trace/events/kvm.h
index 7005d1109ec9..131a0bda7aec 100644
--- a/include/trace/events/kvm.h
+++ b/include/trace/events/kvm.h
@@ -296,23 +296,21 @@ DEFINE_EVENT(kvm_async_pf_nopresent_ready, kvm_async_pf_ready,
296 296
297TRACE_EVENT( 297TRACE_EVENT(
298 kvm_async_pf_completed, 298 kvm_async_pf_completed,
299 TP_PROTO(unsigned long address, struct page *page, u64 gva), 299 TP_PROTO(unsigned long address, u64 gva),
300 TP_ARGS(address, page, gva), 300 TP_ARGS(address, gva),
301 301
302 TP_STRUCT__entry( 302 TP_STRUCT__entry(
303 __field(unsigned long, address) 303 __field(unsigned long, address)
304 __field(pfn_t, pfn)
305 __field(u64, gva) 304 __field(u64, gva)
306 ), 305 ),
307 306
308 TP_fast_assign( 307 TP_fast_assign(
309 __entry->address = address; 308 __entry->address = address;
310 __entry->pfn = page ? page_to_pfn(page) : 0;
311 __entry->gva = gva; 309 __entry->gva = gva;
312 ), 310 ),
313 311
314 TP_printk("gva %#llx address %#lx pfn %#llx", __entry->gva, 312 TP_printk("gva %#llx address %#lx", __entry->gva,
315 __entry->address, __entry->pfn) 313 __entry->address)
316); 314);
317 315
318#endif 316#endif
diff --git a/virt/kvm/async_pf.c b/virt/kvm/async_pf.c
index b197950ac4d5..8631d9c14320 100644
--- a/virt/kvm/async_pf.c
+++ b/virt/kvm/async_pf.c
@@ -56,7 +56,6 @@ void kvm_async_pf_vcpu_init(struct kvm_vcpu *vcpu)
56 56
57static void async_pf_execute(struct work_struct *work) 57static void async_pf_execute(struct work_struct *work)
58{ 58{
59 struct page *page = NULL;
60 struct kvm_async_pf *apf = 59 struct kvm_async_pf *apf =
61 container_of(work, struct kvm_async_pf, work); 60 container_of(work, struct kvm_async_pf, work);
62 struct mm_struct *mm = apf->mm; 61 struct mm_struct *mm = apf->mm;
@@ -68,13 +67,12 @@ static void async_pf_execute(struct work_struct *work)
68 67
69 use_mm(mm); 68 use_mm(mm);
70 down_read(&mm->mmap_sem); 69 down_read(&mm->mmap_sem);
71 get_user_pages(current, mm, addr, 1, 1, 0, &page, NULL); 70 get_user_pages(current, mm, addr, 1, 1, 0, NULL, NULL);
72 up_read(&mm->mmap_sem); 71 up_read(&mm->mmap_sem);
73 unuse_mm(mm); 72 unuse_mm(mm);
74 73
75 spin_lock(&vcpu->async_pf.lock); 74 spin_lock(&vcpu->async_pf.lock);
76 list_add_tail(&apf->link, &vcpu->async_pf.done); 75 list_add_tail(&apf->link, &vcpu->async_pf.done);
77 apf->page = page;
78 spin_unlock(&vcpu->async_pf.lock); 76 spin_unlock(&vcpu->async_pf.lock);
79 77
80 /* 78 /*
@@ -82,7 +80,7 @@ static void async_pf_execute(struct work_struct *work)
82 * this point 80 * this point
83 */ 81 */
84 82
85 trace_kvm_async_pf_completed(addr, page, gva); 83 trace_kvm_async_pf_completed(addr, gva);
86 84
87 if (waitqueue_active(&vcpu->wq)) 85 if (waitqueue_active(&vcpu->wq))
88 wake_up_interruptible(&vcpu->wq); 86 wake_up_interruptible(&vcpu->wq);
@@ -112,8 +110,6 @@ void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu)
112 list_entry(vcpu->async_pf.done.next, 110 list_entry(vcpu->async_pf.done.next,
113 typeof(*work), link); 111 typeof(*work), link);
114 list_del(&work->link); 112 list_del(&work->link);
115 if (!is_error_page(work->page))
116 kvm_release_page_clean(work->page);
117 kmem_cache_free(async_pf_cache, work); 113 kmem_cache_free(async_pf_cache, work);
118 } 114 }
119 spin_unlock(&vcpu->async_pf.lock); 115 spin_unlock(&vcpu->async_pf.lock);
@@ -133,14 +129,11 @@ void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu)
133 list_del(&work->link); 129 list_del(&work->link);
134 spin_unlock(&vcpu->async_pf.lock); 130 spin_unlock(&vcpu->async_pf.lock);
135 131
136 if (work->page) 132 kvm_arch_async_page_ready(vcpu, work);
137 kvm_arch_async_page_ready(vcpu, work);
138 kvm_arch_async_page_present(vcpu, work); 133 kvm_arch_async_page_present(vcpu, work);
139 134
140 list_del(&work->queue); 135 list_del(&work->queue);
141 vcpu->async_pf.queued--; 136 vcpu->async_pf.queued--;
142 if (!is_error_page(work->page))
143 kvm_release_page_clean(work->page);
144 kmem_cache_free(async_pf_cache, work); 137 kmem_cache_free(async_pf_cache, work);
145 } 138 }
146} 139}
@@ -163,7 +156,7 @@ int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn,
163 if (!work) 156 if (!work)
164 return 0; 157 return 0;
165 158
166 work->page = NULL; 159 work->wakeup_all = false;
167 work->vcpu = vcpu; 160 work->vcpu = vcpu;
168 work->gva = gva; 161 work->gva = gva;
169 work->addr = gfn_to_hva(vcpu->kvm, gfn); 162 work->addr = gfn_to_hva(vcpu->kvm, gfn);
@@ -203,7 +196,7 @@ int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu)
203 if (!work) 196 if (!work)
204 return -ENOMEM; 197 return -ENOMEM;
205 198
206 work->page = KVM_ERR_PTR_BAD_PAGE; 199 work->wakeup_all = true;
207 INIT_LIST_HEAD(&work->queue); /* for list_del to work */ 200 INIT_LIST_HEAD(&work->queue); /* for list_del to work */
208 201
209 spin_lock(&vcpu->async_pf.lock); 202 spin_lock(&vcpu->async_pf.lock);