diff options
author | Bandan Das <bsd@redhat.com> | 2014-07-08 00:30:23 -0400 |
---|---|---|
committer | Paolo Bonzini <pbonzini@redhat.com> | 2014-07-08 04:06:42 -0400 |
commit | 9242b5b60df8b13b469bc6b7be08ff6ebb551ad3 (patch) | |
tree | c490ce887713f72c5ba0df7c214693abf7fa60f4 /arch | |
parent | cd3de83f147601356395b57a8673e9c5ff1e59d1 (diff) |
KVM: x86: Check for nested events if there is an injectable interrupt
With commit b6b8a1451fc40412c57d1 that introduced
vmx_check_nested_events, checks for injectable interrupts happen
at different points in time for L1 and L2 that could potentially
cause a race. The regression occurs because KVM_REQ_EVENT is always
set when nested_run_pending is set even if there's no pending interrupt.
Consequently, there could be a small window when check_nested_events
returns without exiting to L1, but an interrupt comes through soon
after and it incorrectly, gets injected to L2 by inject_pending_event
Fix this by adding a call to check for nested events too when a check
for injectable interrupt returns true
Signed-off-by: Bandan Das <bsd@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/x86/kvm/x86.c | 12 |
1 files changed, 12 insertions, 0 deletions
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index f6449334ec45..ef432f891d30 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -5887,6 +5887,18 @@ static int inject_pending_event(struct kvm_vcpu *vcpu, bool req_int_win) | |||
5887 | kvm_x86_ops->set_nmi(vcpu); | 5887 | kvm_x86_ops->set_nmi(vcpu); |
5888 | } | 5888 | } |
5889 | } else if (kvm_cpu_has_injectable_intr(vcpu)) { | 5889 | } else if (kvm_cpu_has_injectable_intr(vcpu)) { |
5890 | /* | ||
5891 | * Because interrupts can be injected asynchronously, we are | ||
5892 | * calling check_nested_events again here to avoid a race condition. | ||
5893 | * See https://lkml.org/lkml/2014/7/2/60 for discussion about this | ||
5894 | * proposal and current concerns. Perhaps we should be setting | ||
5895 | * KVM_REQ_EVENT only on certain events and not unconditionally? | ||
5896 | */ | ||
5897 | if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events) { | ||
5898 | r = kvm_x86_ops->check_nested_events(vcpu, req_int_win); | ||
5899 | if (r != 0) | ||
5900 | return r; | ||
5901 | } | ||
5890 | if (kvm_x86_ops->interrupt_allowed(vcpu)) { | 5902 | if (kvm_x86_ops->interrupt_allowed(vcpu)) { |
5891 | kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu), | 5903 | kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu), |
5892 | false); | 5904 | false); |