aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorYuki Shibuya <shibuya.yk@ncos.nec.co.jp>2016-03-24 01:17:03 -0400
committerPaolo Bonzini <pbonzini@redhat.com>2016-04-01 06:10:09 -0400
commit321c5658c5e9192dea0d58ab67cf1791e45b2b26 (patch)
tree3c5f9beb62f2a937adeaf511e74388cf7127ddc9
parentc26e5f303a1e10a76dab81cd4c93dfec3a8bff6b (diff)
KVM: x86: Inject pending interrupt even if pending nmi exist
Non maskable interrupts (NMI) are preferred to interrupts in current implementation. If a NMI is pending and NMI is blocked by the result of nmi_allowed(), pending interrupt is not injected and enable_irq_window() is not executed, even if interrupts injection is allowed. In old kernel (e.g. 2.6.32), schedule() is often called in NMI context. In this case, interrupts are needed to execute iret that intends end of NMI. The flag of blocking new NMI is not cleared until the guest execute the iret, and interrupts are blocked by pending NMI. Due to this, iret can't be invoked in the guest, and the guest is starved until block is cleared by some events (e.g. canceling injection). This patch injects pending interrupts, when it's allowed, even if NMI is blocked. And, If an interrupts is pending after executing inject_pending_event(), enable_irq_window() is executed regardless of NMI pending counter. Cc: stable@vger.kernel.org Signed-off-by: Yuki Shibuya <shibuya.yk@ncos.nec.co.jp> Suggested-by: Paolo Bonzini <pbonzini@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
-rw-r--r--arch/x86/kvm/x86.c20
1 files changed, 10 insertions, 10 deletions
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 742d0f7d3556..0a2c70e43bc8 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -6095,12 +6095,10 @@ static int inject_pending_event(struct kvm_vcpu *vcpu, bool req_int_win)
6095 } 6095 }
6096 6096
6097 /* try to inject new event if pending */ 6097 /* try to inject new event if pending */
6098 if (vcpu->arch.nmi_pending) { 6098 if (vcpu->arch.nmi_pending && kvm_x86_ops->nmi_allowed(vcpu)) {
6099 if (kvm_x86_ops->nmi_allowed(vcpu)) { 6099 --vcpu->arch.nmi_pending;
6100 --vcpu->arch.nmi_pending; 6100 vcpu->arch.nmi_injected = true;
6101 vcpu->arch.nmi_injected = true; 6101 kvm_x86_ops->set_nmi(vcpu);
6102 kvm_x86_ops->set_nmi(vcpu);
6103 }
6104 } else if (kvm_cpu_has_injectable_intr(vcpu)) { 6102 } else if (kvm_cpu_has_injectable_intr(vcpu)) {
6105 /* 6103 /*
6106 * Because interrupts can be injected asynchronously, we are 6104 * Because interrupts can be injected asynchronously, we are
@@ -6569,10 +6567,12 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
6569 if (inject_pending_event(vcpu, req_int_win) != 0) 6567 if (inject_pending_event(vcpu, req_int_win) != 0)
6570 req_immediate_exit = true; 6568 req_immediate_exit = true;
6571 /* enable NMI/IRQ window open exits if needed */ 6569 /* enable NMI/IRQ window open exits if needed */
6572 else if (vcpu->arch.nmi_pending) 6570 else {
6573 kvm_x86_ops->enable_nmi_window(vcpu); 6571 if (vcpu->arch.nmi_pending)
6574 else if (kvm_cpu_has_injectable_intr(vcpu) || req_int_win) 6572 kvm_x86_ops->enable_nmi_window(vcpu);
6575 kvm_x86_ops->enable_irq_window(vcpu); 6573 if (kvm_cpu_has_injectable_intr(vcpu) || req_int_win)
6574 kvm_x86_ops->enable_irq_window(vcpu);
6575 }
6576 6576
6577 if (kvm_lapic_enabled(vcpu)) { 6577 if (kvm_lapic_enabled(vcpu)) {
6578 update_cr8_intercept(vcpu); 6578 update_cr8_intercept(vcpu);