aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGleb Natapov <gleb@redhat.com>2010-10-14 05:22:53 -0400
committerAvi Kivity <avi@redhat.com>2011-01-12 04:23:17 -0500
commit7c90705bf2a373aa238661bdb6446f27299ef489 (patch)
treed3d00b4413b0d33254d53bbb3285be82444494d9
parent631bc4878220932fe67fc46fc7cf7cccdb1ec597 (diff)
KVM: Inject asynchronous page fault into a PV guest if page is swapped out.
Send async page fault to a PV guest if it accesses swapped out memory. Guest will choose another task to run upon receiving the fault. Allow async page fault injection only when guest is in user mode since otherwise guest may be in non-sleepable context and will not be able to reschedule. Vcpu will be halted if guest will fault on the same page again or if vcpu executes kernel code. Acked-by: Rik van Riel <riel@redhat.com> Signed-off-by: Gleb Natapov <gleb@redhat.com> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
-rw-r--r--arch/x86/include/asm/kvm_host.h3
-rw-r--r--arch/x86/kvm/mmu.c1
-rw-r--r--arch/x86/kvm/x86.c43
-rw-r--r--include/trace/events/kvm.h17
-rw-r--r--virt/kvm/async_pf.c3
5 files changed, 55 insertions, 12 deletions
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 0d7039804b4..167375cc49f 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -421,6 +421,7 @@ struct kvm_vcpu_arch {
421 gfn_t gfns[roundup_pow_of_two(ASYNC_PF_PER_VCPU)]; 421 gfn_t gfns[roundup_pow_of_two(ASYNC_PF_PER_VCPU)];
422 struct gfn_to_hva_cache data; 422 struct gfn_to_hva_cache data;
423 u64 msr_val; 423 u64 msr_val;
424 u32 id;
424 } apf; 425 } apf;
425}; 426};
426 427
@@ -596,6 +597,7 @@ struct kvm_x86_ops {
596}; 597};
597 598
598struct kvm_arch_async_pf { 599struct kvm_arch_async_pf {
600 u32 token;
599 gfn_t gfn; 601 gfn_t gfn;
600}; 602};
601 603
@@ -819,6 +821,7 @@ void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
819 struct kvm_async_pf *work); 821 struct kvm_async_pf *work);
820void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, 822void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
821 struct kvm_async_pf *work); 823 struct kvm_async_pf *work);
824bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu);
822extern bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn); 825extern bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn);
823 826
824#endif /* _ASM_X86_KVM_HOST_H */ 827#endif /* _ASM_X86_KVM_HOST_H */
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index b2c60986a7c..64f90db369f 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -2592,6 +2592,7 @@ static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
2592int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn) 2592int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn)
2593{ 2593{
2594 struct kvm_arch_async_pf arch; 2594 struct kvm_arch_async_pf arch;
2595 arch.token = (vcpu->arch.apf.id++ << 12) | vcpu->vcpu_id;
2595 arch.gfn = gfn; 2596 arch.gfn = gfn;
2596 2597
2597 return kvm_setup_async_pf(vcpu, gva, gfn, &arch); 2598 return kvm_setup_async_pf(vcpu, gva, gfn, &arch);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 063c0729676..ac4c368afd4 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -6248,20 +6248,53 @@ static void kvm_del_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
6248 } 6248 }
6249} 6249}
6250 6250
6251static int apf_put_user(struct kvm_vcpu *vcpu, u32 val)
6252{
6253
6254 return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apf.data, &val,
6255 sizeof(val));
6256}
6257
6251void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu, 6258void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
6252 struct kvm_async_pf *work) 6259 struct kvm_async_pf *work)
6253{ 6260{
6254 trace_kvm_async_pf_not_present(work->gva); 6261 trace_kvm_async_pf_not_present(work->arch.token, work->gva);
6255
6256 kvm_make_request(KVM_REQ_APF_HALT, vcpu);
6257 kvm_add_async_pf_gfn(vcpu, work->arch.gfn); 6262 kvm_add_async_pf_gfn(vcpu, work->arch.gfn);
6263
6264 if (!(vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED) ||
6265 kvm_x86_ops->get_cpl(vcpu) == 0)
6266 kvm_make_request(KVM_REQ_APF_HALT, vcpu);
6267 else if (!apf_put_user(vcpu, KVM_PV_REASON_PAGE_NOT_PRESENT)) {
6268 vcpu->arch.fault.error_code = 0;
6269 vcpu->arch.fault.address = work->arch.token;
6270 kvm_inject_page_fault(vcpu);
6271 }
6258} 6272}
6259 6273
6260void kvm_arch_async_page_present(struct kvm_vcpu *vcpu, 6274void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
6261 struct kvm_async_pf *work) 6275 struct kvm_async_pf *work)
6262{ 6276{
6263 trace_kvm_async_pf_ready(work->gva); 6277 trace_kvm_async_pf_ready(work->arch.token, work->gva);
6264 kvm_del_async_pf_gfn(vcpu, work->arch.gfn); 6278 if (is_error_page(work->page))
6279 work->arch.token = ~0; /* broadcast wakeup */
6280 else
6281 kvm_del_async_pf_gfn(vcpu, work->arch.gfn);
6282
6283 if ((vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED) &&
6284 !apf_put_user(vcpu, KVM_PV_REASON_PAGE_READY)) {
6285 vcpu->arch.fault.error_code = 0;
6286 vcpu->arch.fault.address = work->arch.token;
6287 kvm_inject_page_fault(vcpu);
6288 }
6289}
6290
6291bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
6292{
6293 if (!(vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED))
6294 return true;
6295 else
6296 return !kvm_event_needs_reinjection(vcpu) &&
6297 kvm_x86_ops->interrupt_allowed(vcpu);
6265} 6298}
6266 6299
6267EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_exit); 6300EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_exit);
diff --git a/include/trace/events/kvm.h b/include/trace/events/kvm.h
index a78a5e57463..9c2cc6a96e8 100644
--- a/include/trace/events/kvm.h
+++ b/include/trace/events/kvm.h
@@ -204,34 +204,39 @@ TRACE_EVENT(
204 204
205TRACE_EVENT( 205TRACE_EVENT(
206 kvm_async_pf_not_present, 206 kvm_async_pf_not_present,
207 TP_PROTO(u64 gva), 207 TP_PROTO(u64 token, u64 gva),
208 TP_ARGS(gva), 208 TP_ARGS(token, gva),
209 209
210 TP_STRUCT__entry( 210 TP_STRUCT__entry(
211 __field(__u64, token)
211 __field(__u64, gva) 212 __field(__u64, gva)
212 ), 213 ),
213 214
214 TP_fast_assign( 215 TP_fast_assign(
216 __entry->token = token;
215 __entry->gva = gva; 217 __entry->gva = gva;
216 ), 218 ),
217 219
218 TP_printk("gva %#llx not present", __entry->gva) 220 TP_printk("token %#llx gva %#llx not present", __entry->token,
221 __entry->gva)
219); 222);
220 223
221TRACE_EVENT( 224TRACE_EVENT(
222 kvm_async_pf_ready, 225 kvm_async_pf_ready,
223 TP_PROTO(u64 gva), 226 TP_PROTO(u64 token, u64 gva),
224 TP_ARGS(gva), 227 TP_ARGS(token, gva),
225 228
226 TP_STRUCT__entry( 229 TP_STRUCT__entry(
230 __field(__u64, token)
227 __field(__u64, gva) 231 __field(__u64, gva)
228 ), 232 ),
229 233
230 TP_fast_assign( 234 TP_fast_assign(
235 __entry->token = token;
231 __entry->gva = gva; 236 __entry->gva = gva;
232 ), 237 ),
233 238
234 TP_printk("gva %#llx ready", __entry->gva) 239 TP_printk("token %#llx gva %#llx ready", __entry->token, __entry->gva)
235); 240);
236 241
237TRACE_EVENT( 242TRACE_EVENT(
diff --git a/virt/kvm/async_pf.c b/virt/kvm/async_pf.c
index 1f59498561b..60df9e059e6 100644
--- a/virt/kvm/async_pf.c
+++ b/virt/kvm/async_pf.c
@@ -124,7 +124,8 @@ void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu)
124{ 124{
125 struct kvm_async_pf *work; 125 struct kvm_async_pf *work;
126 126
127 if (list_empty_careful(&vcpu->async_pf.done)) 127 if (list_empty_careful(&vcpu->async_pf.done) ||
128 !kvm_arch_can_inject_async_page_present(vcpu))
128 return; 129 return;
129 130
130 spin_lock(&vcpu->async_pf.lock); 131 spin_lock(&vcpu->async_pf.lock);