diff options
author | Avi Kivity <avi@redhat.com> | 2012-05-14 11:07:56 -0400 |
---|---|---|
committer | Marcelo Tosatti <mtosatti@redhat.com> | 2012-05-16 17:09:26 -0400 |
commit | d8368af8b46b904def42a0f341d2f4f29001fa77 (patch) | |
tree | 00ae5723342936821b855356544bef08ac967b3d /arch/x86/kvm/x86.c | |
parent | c142786c6291189b5c85f53d91743e1eefbd8fe0 (diff) |
KVM: Fix mmu_reload() clash with nested vmx event injection
Currently the inject_pending_event() call during guest entry happens after
kvm_mmu_reload(). This is for historical reasons - we used to
inject_pending_event() in atomic context, while kvm_mmu_reload() needs task
context.
A problem is that nested vmx can cause the mmu context to be reset, if event
injection is intercepted and causes a #VMEXIT instead (the #VMEXIT resets
CR0/CR3/CR4). If this happens, we end up with invalid root_hpa, and since
kvm_mmu_reload() has already run, no one will fix it and we end up entering
the guest this way.
Fix by reordering event injection to be before kvm_mmu_reload(). Use
->cancel_injection() to undo if kvm_mmu_reload() fails.
https://bugzilla.kernel.org/show_bug.cgi?id=42980
Reported-by: Luke-Jr <luke-jr+linuxbugs@utopios.org>
Signed-off-by: Avi Kivity <avi@redhat.com>
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Diffstat (limited to 'arch/x86/kvm/x86.c')
-rw-r--r-- | arch/x86/kvm/x86.c | 10 |
1 files changed, 6 insertions, 4 deletions
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 4de705cdcafd..b78f89d34242 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -5279,10 +5279,6 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) | |||
5279 | kvm_deliver_pmi(vcpu); | 5279 | kvm_deliver_pmi(vcpu); |
5280 | } | 5280 | } |
5281 | 5281 | ||
5282 | r = kvm_mmu_reload(vcpu); | ||
5283 | if (unlikely(r)) | ||
5284 | goto out; | ||
5285 | |||
5286 | if (kvm_check_request(KVM_REQ_EVENT, vcpu) || req_int_win) { | 5282 | if (kvm_check_request(KVM_REQ_EVENT, vcpu) || req_int_win) { |
5287 | inject_pending_event(vcpu); | 5283 | inject_pending_event(vcpu); |
5288 | 5284 | ||
@@ -5298,6 +5294,12 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) | |||
5298 | } | 5294 | } |
5299 | } | 5295 | } |
5300 | 5296 | ||
5297 | r = kvm_mmu_reload(vcpu); | ||
5298 | if (unlikely(r)) { | ||
5299 | kvm_x86_ops->cancel_injection(vcpu); | ||
5300 | goto out; | ||
5301 | } | ||
5302 | |||
5301 | preempt_disable(); | 5303 | preempt_disable(); |
5302 | 5304 | ||
5303 | kvm_x86_ops->prepare_guest_switch(vcpu); | 5305 | kvm_x86_ops->prepare_guest_switch(vcpu); |