diff options
author | Jan Kiszka <jan.kiszka@siemens.com> | 2013-04-28 04:50:52 -0400 |
---|---|---|
committer | Gleb Natapov <gleb@redhat.com> | 2013-04-28 05:44:18 -0400 |
commit | 730dca42c1d363c939da18c1499c7327c66e2b37 (patch) | |
tree | f94998cccc58814642c4160da1bb75181af6ba68 | |
parent | 6614c7d042eb1096d4eba253b4952bec349f8593 (diff) |
KVM: x86: Rework request for immediate exit
The VMX implementation of enable_irq_window raised
KVM_REQ_IMMEDIATE_EXIT after we checked it in vcpu_enter_guest. This
caused infinite loops on vmentry. Fix it by letting enable_irq_window
signal the need for an immediate exit via its return value and drop
KVM_REQ_IMMEDIATE_EXIT.
This issue only affects nested VMX scenarios.
Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com>
Signed-off-by: Gleb Natapov <gleb@redhat.com>
-rw-r--r-- | arch/x86/include/asm/kvm_host.h | 2 | ||||
-rw-r--r-- | arch/x86/kvm/svm.c | 3 | ||||
-rw-r--r-- | arch/x86/kvm/vmx.c | 15 | ||||
-rw-r--r-- | arch/x86/kvm/x86.c | 7 | ||||
-rw-r--r-- | include/linux/kvm_host.h | 15 |
5 files changed, 21 insertions, 21 deletions
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 18635ae42a8e..111b4a0c3907 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h | |||
@@ -694,7 +694,7 @@ struct kvm_x86_ops { | |||
694 | bool (*get_nmi_mask)(struct kvm_vcpu *vcpu); | 694 | bool (*get_nmi_mask)(struct kvm_vcpu *vcpu); |
695 | void (*set_nmi_mask)(struct kvm_vcpu *vcpu, bool masked); | 695 | void (*set_nmi_mask)(struct kvm_vcpu *vcpu, bool masked); |
696 | void (*enable_nmi_window)(struct kvm_vcpu *vcpu); | 696 | void (*enable_nmi_window)(struct kvm_vcpu *vcpu); |
697 | void (*enable_irq_window)(struct kvm_vcpu *vcpu); | 697 | int (*enable_irq_window)(struct kvm_vcpu *vcpu); |
698 | void (*update_cr8_intercept)(struct kvm_vcpu *vcpu, int tpr, int irr); | 698 | void (*update_cr8_intercept)(struct kvm_vcpu *vcpu, int tpr, int irr); |
699 | int (*vm_has_apicv)(struct kvm *kvm); | 699 | int (*vm_has_apicv)(struct kvm *kvm); |
700 | void (*hwapic_irr_update)(struct kvm_vcpu *vcpu, int max_irr); | 700 | void (*hwapic_irr_update)(struct kvm_vcpu *vcpu, int max_irr); |
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 15c9cccd716b..7f896cbe717f 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c | |||
@@ -3632,7 +3632,7 @@ static int svm_interrupt_allowed(struct kvm_vcpu *vcpu) | |||
3632 | return ret; | 3632 | return ret; |
3633 | } | 3633 | } |
3634 | 3634 | ||
3635 | static void enable_irq_window(struct kvm_vcpu *vcpu) | 3635 | static int enable_irq_window(struct kvm_vcpu *vcpu) |
3636 | { | 3636 | { |
3637 | struct vcpu_svm *svm = to_svm(vcpu); | 3637 | struct vcpu_svm *svm = to_svm(vcpu); |
3638 | 3638 | ||
@@ -3646,6 +3646,7 @@ static void enable_irq_window(struct kvm_vcpu *vcpu) | |||
3646 | svm_set_vintr(svm); | 3646 | svm_set_vintr(svm); |
3647 | svm_inject_irq(svm, 0x0); | 3647 | svm_inject_irq(svm, 0x0); |
3648 | } | 3648 | } |
3649 | return 0; | ||
3649 | } | 3650 | } |
3650 | 3651 | ||
3651 | static void enable_nmi_window(struct kvm_vcpu *vcpu) | 3652 | static void enable_nmi_window(struct kvm_vcpu *vcpu) |
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 0f0cb3110626..74c525e2c608 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
@@ -4398,22 +4398,23 @@ static bool nested_exit_on_nmi(struct kvm_vcpu *vcpu) | |||
4398 | PIN_BASED_NMI_EXITING; | 4398 | PIN_BASED_NMI_EXITING; |
4399 | } | 4399 | } |
4400 | 4400 | ||
4401 | static void enable_irq_window(struct kvm_vcpu *vcpu) | 4401 | static int enable_irq_window(struct kvm_vcpu *vcpu) |
4402 | { | 4402 | { |
4403 | u32 cpu_based_vm_exec_control; | 4403 | u32 cpu_based_vm_exec_control; |
4404 | if (is_guest_mode(vcpu) && nested_exit_on_intr(vcpu)) { | 4404 | |
4405 | if (is_guest_mode(vcpu) && nested_exit_on_intr(vcpu)) | ||
4405 | /* | 4406 | /* |
4406 | * We get here if vmx_interrupt_allowed() said we can't | 4407 | * We get here if vmx_interrupt_allowed() said we can't |
4407 | * inject to L1 now because L2 must run. Ask L2 to exit | 4408 | * inject to L1 now because L2 must run. The caller will have |
4408 | * right after entry, so we can inject to L1 more promptly. | 4409 | * to make L2 exit right after entry, so we can inject to L1 |
4410 | * more promptly. | ||
4409 | */ | 4411 | */ |
4410 | kvm_make_request(KVM_REQ_IMMEDIATE_EXIT, vcpu); | 4412 | return -EBUSY; |
4411 | return; | ||
4412 | } | ||
4413 | 4413 | ||
4414 | cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL); | 4414 | cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL); |
4415 | cpu_based_vm_exec_control |= CPU_BASED_VIRTUAL_INTR_PENDING; | 4415 | cpu_based_vm_exec_control |= CPU_BASED_VIRTUAL_INTR_PENDING; |
4416 | vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control); | 4416 | vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control); |
4417 | return 0; | ||
4417 | } | 4418 | } |
4418 | 4419 | ||
4419 | static void enable_nmi_window(struct kvm_vcpu *vcpu) | 4420 | static void enable_nmi_window(struct kvm_vcpu *vcpu) |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 2a434bf3918d..c522260b5bbf 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -5692,7 +5692,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) | |||
5692 | int r; | 5692 | int r; |
5693 | bool req_int_win = !irqchip_in_kernel(vcpu->kvm) && | 5693 | bool req_int_win = !irqchip_in_kernel(vcpu->kvm) && |
5694 | vcpu->run->request_interrupt_window; | 5694 | vcpu->run->request_interrupt_window; |
5695 | bool req_immediate_exit = 0; | 5695 | bool req_immediate_exit = false; |
5696 | 5696 | ||
5697 | if (vcpu->requests) { | 5697 | if (vcpu->requests) { |
5698 | if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) | 5698 | if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) |
@@ -5734,8 +5734,6 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) | |||
5734 | record_steal_time(vcpu); | 5734 | record_steal_time(vcpu); |
5735 | if (kvm_check_request(KVM_REQ_NMI, vcpu)) | 5735 | if (kvm_check_request(KVM_REQ_NMI, vcpu)) |
5736 | process_nmi(vcpu); | 5736 | process_nmi(vcpu); |
5737 | req_immediate_exit = | ||
5738 | kvm_check_request(KVM_REQ_IMMEDIATE_EXIT, vcpu); | ||
5739 | if (kvm_check_request(KVM_REQ_PMU, vcpu)) | 5737 | if (kvm_check_request(KVM_REQ_PMU, vcpu)) |
5740 | kvm_handle_pmu_event(vcpu); | 5738 | kvm_handle_pmu_event(vcpu); |
5741 | if (kvm_check_request(KVM_REQ_PMI, vcpu)) | 5739 | if (kvm_check_request(KVM_REQ_PMI, vcpu)) |
@@ -5757,7 +5755,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) | |||
5757 | if (vcpu->arch.nmi_pending) | 5755 | if (vcpu->arch.nmi_pending) |
5758 | kvm_x86_ops->enable_nmi_window(vcpu); | 5756 | kvm_x86_ops->enable_nmi_window(vcpu); |
5759 | else if (kvm_cpu_has_injectable_intr(vcpu) || req_int_win) | 5757 | else if (kvm_cpu_has_injectable_intr(vcpu) || req_int_win) |
5760 | kvm_x86_ops->enable_irq_window(vcpu); | 5758 | req_immediate_exit = |
5759 | kvm_x86_ops->enable_irq_window(vcpu) != 0; | ||
5761 | 5760 | ||
5762 | if (kvm_lapic_enabled(vcpu)) { | 5761 | if (kvm_lapic_enabled(vcpu)) { |
5763 | /* | 5762 | /* |
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 93a50054d46c..7bde42470e37 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h | |||
@@ -119,14 +119,13 @@ static inline bool is_error_page(struct page *page) | |||
119 | #define KVM_REQ_APF_HALT 12 | 119 | #define KVM_REQ_APF_HALT 12 |
120 | #define KVM_REQ_STEAL_UPDATE 13 | 120 | #define KVM_REQ_STEAL_UPDATE 13 |
121 | #define KVM_REQ_NMI 14 | 121 | #define KVM_REQ_NMI 14 |
122 | #define KVM_REQ_IMMEDIATE_EXIT 15 | 122 | #define KVM_REQ_PMU 15 |
123 | #define KVM_REQ_PMU 16 | 123 | #define KVM_REQ_PMI 16 |
124 | #define KVM_REQ_PMI 17 | 124 | #define KVM_REQ_WATCHDOG 17 |
125 | #define KVM_REQ_WATCHDOG 18 | 125 | #define KVM_REQ_MASTERCLOCK_UPDATE 18 |
126 | #define KVM_REQ_MASTERCLOCK_UPDATE 19 | 126 | #define KVM_REQ_MCLOCK_INPROGRESS 19 |
127 | #define KVM_REQ_MCLOCK_INPROGRESS 20 | 127 | #define KVM_REQ_EPR_EXIT 20 |
128 | #define KVM_REQ_EPR_EXIT 21 | 128 | #define KVM_REQ_SCAN_IOAPIC 21 |
129 | #define KVM_REQ_SCAN_IOAPIC 22 | ||
130 | 129 | ||
131 | #define KVM_USERSPACE_IRQ_SOURCE_ID 0 | 130 | #define KVM_USERSPACE_IRQ_SOURCE_ID 0 |
132 | #define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1 | 131 | #define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1 |