diff options
author | Jan Kiszka <jan.kiszka@siemens.com> | 2013-04-28 04:50:52 -0400 |
---|---|---|
committer | Gleb Natapov <gleb@redhat.com> | 2013-04-28 05:44:18 -0400 |
commit | 730dca42c1d363c939da18c1499c7327c66e2b37 (patch) | |
tree | f94998cccc58814642c4160da1bb75181af6ba68 /arch/x86/kvm | |
parent | 6614c7d042eb1096d4eba253b4952bec349f8593 (diff) |
KVM: x86: Rework request for immediate exit
The VMX implementation of enable_irq_window raised
KVM_REQ_IMMEDIATE_EXIT after we checked it in vcpu_enter_guest. This
caused infinite loops on vmentry. Fix it by letting enable_irq_window
signal the need for an immediate exit via its return value and drop
KVM_REQ_IMMEDIATE_EXIT.
This issue only affects nested VMX scenarios.
Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com>
Signed-off-by: Gleb Natapov <gleb@redhat.com>
Diffstat (limited to 'arch/x86/kvm')
-rw-r--r-- | arch/x86/kvm/svm.c | 3 | ||||
-rw-r--r-- | arch/x86/kvm/vmx.c | 15 | ||||
-rw-r--r-- | arch/x86/kvm/x86.c | 7 |
3 files changed, 13 insertions, 12 deletions
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 15c9cccd716b..7f896cbe717f 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c | |||
@@ -3632,7 +3632,7 @@ static int svm_interrupt_allowed(struct kvm_vcpu *vcpu) | |||
3632 | return ret; | 3632 | return ret; |
3633 | } | 3633 | } |
3634 | 3634 | ||
3635 | static void enable_irq_window(struct kvm_vcpu *vcpu) | 3635 | static int enable_irq_window(struct kvm_vcpu *vcpu) |
3636 | { | 3636 | { |
3637 | struct vcpu_svm *svm = to_svm(vcpu); | 3637 | struct vcpu_svm *svm = to_svm(vcpu); |
3638 | 3638 | ||
@@ -3646,6 +3646,7 @@ static void enable_irq_window(struct kvm_vcpu *vcpu) | |||
3646 | svm_set_vintr(svm); | 3646 | svm_set_vintr(svm); |
3647 | svm_inject_irq(svm, 0x0); | 3647 | svm_inject_irq(svm, 0x0); |
3648 | } | 3648 | } |
3649 | return 0; | ||
3649 | } | 3650 | } |
3650 | 3651 | ||
3651 | static void enable_nmi_window(struct kvm_vcpu *vcpu) | 3652 | static void enable_nmi_window(struct kvm_vcpu *vcpu) |
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 0f0cb3110626..74c525e2c608 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
@@ -4398,22 +4398,23 @@ static bool nested_exit_on_nmi(struct kvm_vcpu *vcpu) | |||
4398 | PIN_BASED_NMI_EXITING; | 4398 | PIN_BASED_NMI_EXITING; |
4399 | } | 4399 | } |
4400 | 4400 | ||
4401 | static void enable_irq_window(struct kvm_vcpu *vcpu) | 4401 | static int enable_irq_window(struct kvm_vcpu *vcpu) |
4402 | { | 4402 | { |
4403 | u32 cpu_based_vm_exec_control; | 4403 | u32 cpu_based_vm_exec_control; |
4404 | if (is_guest_mode(vcpu) && nested_exit_on_intr(vcpu)) { | 4404 | |
4405 | if (is_guest_mode(vcpu) && nested_exit_on_intr(vcpu)) | ||
4405 | /* | 4406 | /* |
4406 | * We get here if vmx_interrupt_allowed() said we can't | 4407 | * We get here if vmx_interrupt_allowed() said we can't |
4407 | * inject to L1 now because L2 must run. Ask L2 to exit | 4408 | * inject to L1 now because L2 must run. The caller will have |
4408 | * right after entry, so we can inject to L1 more promptly. | 4409 | * to make L2 exit right after entry, so we can inject to L1 |
4410 | * more promptly. | ||
4409 | */ | 4411 | */ |
4410 | kvm_make_request(KVM_REQ_IMMEDIATE_EXIT, vcpu); | 4412 | return -EBUSY; |
4411 | return; | ||
4412 | } | ||
4413 | 4413 | ||
4414 | cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL); | 4414 | cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL); |
4415 | cpu_based_vm_exec_control |= CPU_BASED_VIRTUAL_INTR_PENDING; | 4415 | cpu_based_vm_exec_control |= CPU_BASED_VIRTUAL_INTR_PENDING; |
4416 | vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control); | 4416 | vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control); |
4417 | return 0; | ||
4417 | } | 4418 | } |
4418 | 4419 | ||
4419 | static void enable_nmi_window(struct kvm_vcpu *vcpu) | 4420 | static void enable_nmi_window(struct kvm_vcpu *vcpu) |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 2a434bf3918d..c522260b5bbf 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -5692,7 +5692,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) | |||
5692 | int r; | 5692 | int r; |
5693 | bool req_int_win = !irqchip_in_kernel(vcpu->kvm) && | 5693 | bool req_int_win = !irqchip_in_kernel(vcpu->kvm) && |
5694 | vcpu->run->request_interrupt_window; | 5694 | vcpu->run->request_interrupt_window; |
5695 | bool req_immediate_exit = 0; | 5695 | bool req_immediate_exit = false; |
5696 | 5696 | ||
5697 | if (vcpu->requests) { | 5697 | if (vcpu->requests) { |
5698 | if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) | 5698 | if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) |
@@ -5734,8 +5734,6 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) | |||
5734 | record_steal_time(vcpu); | 5734 | record_steal_time(vcpu); |
5735 | if (kvm_check_request(KVM_REQ_NMI, vcpu)) | 5735 | if (kvm_check_request(KVM_REQ_NMI, vcpu)) |
5736 | process_nmi(vcpu); | 5736 | process_nmi(vcpu); |
5737 | req_immediate_exit = | ||
5738 | kvm_check_request(KVM_REQ_IMMEDIATE_EXIT, vcpu); | ||
5739 | if (kvm_check_request(KVM_REQ_PMU, vcpu)) | 5737 | if (kvm_check_request(KVM_REQ_PMU, vcpu)) |
5740 | kvm_handle_pmu_event(vcpu); | 5738 | kvm_handle_pmu_event(vcpu); |
5741 | if (kvm_check_request(KVM_REQ_PMI, vcpu)) | 5739 | if (kvm_check_request(KVM_REQ_PMI, vcpu)) |
@@ -5757,7 +5755,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) | |||
5757 | if (vcpu->arch.nmi_pending) | 5755 | if (vcpu->arch.nmi_pending) |
5758 | kvm_x86_ops->enable_nmi_window(vcpu); | 5756 | kvm_x86_ops->enable_nmi_window(vcpu); |
5759 | else if (kvm_cpu_has_injectable_intr(vcpu) || req_int_win) | 5757 | else if (kvm_cpu_has_injectable_intr(vcpu) || req_int_win) |
5760 | kvm_x86_ops->enable_irq_window(vcpu); | 5758 | req_immediate_exit = |
5759 | kvm_x86_ops->enable_irq_window(vcpu) != 0; | ||
5761 | 5760 | ||
5762 | if (kvm_lapic_enabled(vcpu)) { | 5761 | if (kvm_lapic_enabled(vcpu)) { |
5763 | /* | 5762 | /* |