diff options
author | Jan Kiszka <jan.kiszka@siemens.com> | 2013-04-28 04:50:52 -0400 |
---|---|---|
committer | Gleb Natapov <gleb@redhat.com> | 2013-04-28 05:44:18 -0400 |
commit | 730dca42c1d363c939da18c1499c7327c66e2b37 (patch) | |
tree | f94998cccc58814642c4160da1bb75181af6ba68 /arch/x86/kvm/x86.c | |
parent | 6614c7d042eb1096d4eba253b4952bec349f8593 (diff) |
KVM: x86: Rework request for immediate exit
The VMX implementation of enable_irq_window raised
KVM_REQ_IMMEDIATE_EXIT after we checked it in vcpu_enter_guest. This
caused infinite loops on vmentry. Fix it by letting enable_irq_window
signal the need for an immediate exit via its return value and drop
KVM_REQ_IMMEDIATE_EXIT.
This issue only affects nested VMX scenarios.
Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com>
Signed-off-by: Gleb Natapov <gleb@redhat.com>
Diffstat (limited to 'arch/x86/kvm/x86.c')
-rw-r--r-- | arch/x86/kvm/x86.c | 7 |
1 files changed, 3 insertions, 4 deletions
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 2a434bf3918d..c522260b5bbf 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -5692,7 +5692,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) | |||
5692 | int r; | 5692 | int r; |
5693 | bool req_int_win = !irqchip_in_kernel(vcpu->kvm) && | 5693 | bool req_int_win = !irqchip_in_kernel(vcpu->kvm) && |
5694 | vcpu->run->request_interrupt_window; | 5694 | vcpu->run->request_interrupt_window; |
5695 | bool req_immediate_exit = 0; | 5695 | bool req_immediate_exit = false; |
5696 | 5696 | ||
5697 | if (vcpu->requests) { | 5697 | if (vcpu->requests) { |
5698 | if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) | 5698 | if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) |
@@ -5734,8 +5734,6 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) | |||
5734 | record_steal_time(vcpu); | 5734 | record_steal_time(vcpu); |
5735 | if (kvm_check_request(KVM_REQ_NMI, vcpu)) | 5735 | if (kvm_check_request(KVM_REQ_NMI, vcpu)) |
5736 | process_nmi(vcpu); | 5736 | process_nmi(vcpu); |
5737 | req_immediate_exit = | ||
5738 | kvm_check_request(KVM_REQ_IMMEDIATE_EXIT, vcpu); | ||
5739 | if (kvm_check_request(KVM_REQ_PMU, vcpu)) | 5737 | if (kvm_check_request(KVM_REQ_PMU, vcpu)) |
5740 | kvm_handle_pmu_event(vcpu); | 5738 | kvm_handle_pmu_event(vcpu); |
5741 | if (kvm_check_request(KVM_REQ_PMI, vcpu)) | 5739 | if (kvm_check_request(KVM_REQ_PMI, vcpu)) |
@@ -5757,7 +5755,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) | |||
5757 | if (vcpu->arch.nmi_pending) | 5755 | if (vcpu->arch.nmi_pending) |
5758 | kvm_x86_ops->enable_nmi_window(vcpu); | 5756 | kvm_x86_ops->enable_nmi_window(vcpu); |
5759 | else if (kvm_cpu_has_injectable_intr(vcpu) || req_int_win) | 5757 | else if (kvm_cpu_has_injectable_intr(vcpu) || req_int_win) |
5760 | kvm_x86_ops->enable_irq_window(vcpu); | 5758 | req_immediate_exit = |
5759 | kvm_x86_ops->enable_irq_window(vcpu) != 0; | ||
5761 | 5760 | ||
5762 | if (kvm_lapic_enabled(vcpu)) { | 5761 | if (kvm_lapic_enabled(vcpu)) { |
5763 | /* | 5762 | /* |