diff options
author | Gleb Natapov <gleb@redhat.com> | 2010-10-14 05:22:53 -0400 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2011-01-12 04:23:17 -0500 |
commit | 7c90705bf2a373aa238661bdb6446f27299ef489 (patch) | |
tree | d3d00b4413b0d33254d53bbb3285be82444494d9 /arch/x86/kvm/x86.c | |
parent | 631bc4878220932fe67fc46fc7cf7cccdb1ec597 (diff) |
KVM: Inject asynchronous page fault into a PV guest if page is swapped out.
Send async page fault to a PV guest if it accesses swapped out memory.
Guest will choose another task to run upon receiving the fault.
Allow async page fault injection only when guest is in user mode since
otherwise guest may be in non-sleepable context and will not be able
to reschedule.
Vcpu will be halted if guest will fault on the same page again or if
vcpu executes kernel code.
Acked-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Diffstat (limited to 'arch/x86/kvm/x86.c')
-rw-r--r-- | arch/x86/kvm/x86.c | 43 |
1 files changed, 38 insertions, 5 deletions
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 063c07296764..ac4c368afd40 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -6248,20 +6248,53 @@ static void kvm_del_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) | |||
6248 | } | 6248 | } |
6249 | } | 6249 | } |
6250 | 6250 | ||
6251 | static int apf_put_user(struct kvm_vcpu *vcpu, u32 val) | ||
6252 | { | ||
6253 | |||
6254 | return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apf.data, &val, | ||
6255 | sizeof(val)); | ||
6256 | } | ||
6257 | |||
6251 | void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu, | 6258 | void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu, |
6252 | struct kvm_async_pf *work) | 6259 | struct kvm_async_pf *work) |
6253 | { | 6260 | { |
6254 | trace_kvm_async_pf_not_present(work->gva); | 6261 | trace_kvm_async_pf_not_present(work->arch.token, work->gva); |
6255 | |||
6256 | kvm_make_request(KVM_REQ_APF_HALT, vcpu); | ||
6257 | kvm_add_async_pf_gfn(vcpu, work->arch.gfn); | 6262 | kvm_add_async_pf_gfn(vcpu, work->arch.gfn); |
6263 | |||
6264 | if (!(vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED) || | ||
6265 | kvm_x86_ops->get_cpl(vcpu) == 0) | ||
6266 | kvm_make_request(KVM_REQ_APF_HALT, vcpu); | ||
6267 | else if (!apf_put_user(vcpu, KVM_PV_REASON_PAGE_NOT_PRESENT)) { | ||
6268 | vcpu->arch.fault.error_code = 0; | ||
6269 | vcpu->arch.fault.address = work->arch.token; | ||
6270 | kvm_inject_page_fault(vcpu); | ||
6271 | } | ||
6258 | } | 6272 | } |
6259 | 6273 | ||
6260 | void kvm_arch_async_page_present(struct kvm_vcpu *vcpu, | 6274 | void kvm_arch_async_page_present(struct kvm_vcpu *vcpu, |
6261 | struct kvm_async_pf *work) | 6275 | struct kvm_async_pf *work) |
6262 | { | 6276 | { |
6263 | trace_kvm_async_pf_ready(work->gva); | 6277 | trace_kvm_async_pf_ready(work->arch.token, work->gva); |
6264 | kvm_del_async_pf_gfn(vcpu, work->arch.gfn); | 6278 | if (is_error_page(work->page)) |
6279 | work->arch.token = ~0; /* broadcast wakeup */ | ||
6280 | else | ||
6281 | kvm_del_async_pf_gfn(vcpu, work->arch.gfn); | ||
6282 | |||
6283 | if ((vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED) && | ||
6284 | !apf_put_user(vcpu, KVM_PV_REASON_PAGE_READY)) { | ||
6285 | vcpu->arch.fault.error_code = 0; | ||
6286 | vcpu->arch.fault.address = work->arch.token; | ||
6287 | kvm_inject_page_fault(vcpu); | ||
6288 | } | ||
6289 | } | ||
6290 | |||
6291 | bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu) | ||
6292 | { | ||
6293 | if (!(vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED)) | ||
6294 | return true; | ||
6295 | else | ||
6296 | return !kvm_event_needs_reinjection(vcpu) && | ||
6297 | kvm_x86_ops->interrupt_allowed(vcpu); | ||
6265 | } | 6298 | } |
6266 | 6299 | ||
6267 | EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_exit); | 6300 | EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_exit); |