diff options
-rw-r--r-- | arch/x86/include/asm/kvm_host.h | 2 | ||||
-rw-r--r-- | arch/x86/kvm/vmx.c | 67 | ||||
-rw-r--r-- | arch/x86/kvm/x86.c | 26 |
3 files changed, 59 insertions, 36 deletions
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 85be627ef5de..461d00a554e0 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h | |||
@@ -767,6 +767,8 @@ struct kvm_x86_ops { | |||
767 | enum x86_intercept_stage stage); | 767 | enum x86_intercept_stage stage); |
768 | void (*handle_external_intr)(struct kvm_vcpu *vcpu); | 768 | void (*handle_external_intr)(struct kvm_vcpu *vcpu); |
769 | bool (*mpx_supported)(void); | 769 | bool (*mpx_supported)(void); |
770 | |||
771 | int (*check_nested_events)(struct kvm_vcpu *vcpu, bool external_intr); | ||
770 | }; | 772 | }; |
771 | 773 | ||
772 | struct kvm_arch_async_pf { | 774 | struct kvm_arch_async_pf { |
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 53c324f3cc5e..11718b44a62d 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
@@ -4631,22 +4631,8 @@ static void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked) | |||
4631 | 4631 | ||
4632 | static int vmx_nmi_allowed(struct kvm_vcpu *vcpu) | 4632 | static int vmx_nmi_allowed(struct kvm_vcpu *vcpu) |
4633 | { | 4633 | { |
4634 | if (is_guest_mode(vcpu)) { | 4634 | if (to_vmx(vcpu)->nested.nested_run_pending) |
4635 | if (to_vmx(vcpu)->nested.nested_run_pending) | 4635 | return 0; |
4636 | return 0; | ||
4637 | if (nested_exit_on_nmi(vcpu)) { | ||
4638 | nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI, | ||
4639 | NMI_VECTOR | INTR_TYPE_NMI_INTR | | ||
4640 | INTR_INFO_VALID_MASK, 0); | ||
4641 | /* | ||
4642 | * The NMI-triggered VM exit counts as injection: | ||
4643 | * clear this one and block further NMIs. | ||
4644 | */ | ||
4645 | vcpu->arch.nmi_pending = 0; | ||
4646 | vmx_set_nmi_mask(vcpu, true); | ||
4647 | return 0; | ||
4648 | } | ||
4649 | } | ||
4650 | 4636 | ||
4651 | if (!cpu_has_virtual_nmis() && to_vmx(vcpu)->soft_vnmi_blocked) | 4637 | if (!cpu_has_virtual_nmis() && to_vmx(vcpu)->soft_vnmi_blocked) |
4652 | return 0; | 4638 | return 0; |
@@ -4658,19 +4644,8 @@ static int vmx_nmi_allowed(struct kvm_vcpu *vcpu) | |||
4658 | 4644 | ||
4659 | static int vmx_interrupt_allowed(struct kvm_vcpu *vcpu) | 4645 | static int vmx_interrupt_allowed(struct kvm_vcpu *vcpu) |
4660 | { | 4646 | { |
4661 | if (is_guest_mode(vcpu)) { | 4647 | return (!to_vmx(vcpu)->nested.nested_run_pending && |
4662 | if (to_vmx(vcpu)->nested.nested_run_pending) | 4648 | vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) && |
4663 | return 0; | ||
4664 | if (nested_exit_on_intr(vcpu)) { | ||
4665 | nested_vmx_vmexit(vcpu, EXIT_REASON_EXTERNAL_INTERRUPT, | ||
4666 | 0, 0); | ||
4667 | /* | ||
4668 | * fall through to normal code, but now in L1, not L2 | ||
4669 | */ | ||
4670 | } | ||
4671 | } | ||
4672 | |||
4673 | return (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) && | ||
4674 | !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & | 4649 | !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & |
4675 | (GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS)); | 4650 | (GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS)); |
4676 | } | 4651 | } |
@@ -8172,6 +8147,35 @@ static void vmcs12_save_pending_event(struct kvm_vcpu *vcpu, | |||
8172 | } | 8147 | } |
8173 | } | 8148 | } |
8174 | 8149 | ||
8150 | static int vmx_check_nested_events(struct kvm_vcpu *vcpu, bool external_intr) | ||
8151 | { | ||
8152 | struct vcpu_vmx *vmx = to_vmx(vcpu); | ||
8153 | |||
8154 | if (vcpu->arch.nmi_pending && nested_exit_on_nmi(vcpu)) { | ||
8155 | if (vmx->nested.nested_run_pending) | ||
8156 | return -EBUSY; | ||
8157 | nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI, | ||
8158 | NMI_VECTOR | INTR_TYPE_NMI_INTR | | ||
8159 | INTR_INFO_VALID_MASK, 0); | ||
8160 | /* | ||
8161 | * The NMI-triggered VM exit counts as injection: | ||
8162 | * clear this one and block further NMIs. | ||
8163 | */ | ||
8164 | vcpu->arch.nmi_pending = 0; | ||
8165 | vmx_set_nmi_mask(vcpu, true); | ||
8166 | return 0; | ||
8167 | } | ||
8168 | |||
8169 | if ((kvm_cpu_has_interrupt(vcpu) || external_intr) && | ||
8170 | nested_exit_on_intr(vcpu)) { | ||
8171 | if (vmx->nested.nested_run_pending) | ||
8172 | return -EBUSY; | ||
8173 | nested_vmx_vmexit(vcpu, EXIT_REASON_EXTERNAL_INTERRUPT, 0, 0); | ||
8174 | } | ||
8175 | |||
8176 | return 0; | ||
8177 | } | ||
8178 | |||
8175 | /* | 8179 | /* |
8176 | * prepare_vmcs12 is part of what we need to do when the nested L2 guest exits | 8180 | * prepare_vmcs12 is part of what we need to do when the nested L2 guest exits |
8177 | * and we want to prepare to run its L1 parent. L1 keeps a vmcs for L2 (vmcs12), | 8181 | * and we want to prepare to run its L1 parent. L1 keeps a vmcs for L2 (vmcs12), |
@@ -8512,6 +8516,9 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason, | |||
8512 | nested_vmx_succeed(vcpu); | 8516 | nested_vmx_succeed(vcpu); |
8513 | if (enable_shadow_vmcs) | 8517 | if (enable_shadow_vmcs) |
8514 | vmx->nested.sync_shadow_vmcs = true; | 8518 | vmx->nested.sync_shadow_vmcs = true; |
8519 | |||
8520 | /* in case we halted in L2 */ | ||
8521 | vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; | ||
8515 | } | 8522 | } |
8516 | 8523 | ||
8517 | /* | 8524 | /* |
@@ -8652,6 +8659,8 @@ static struct kvm_x86_ops vmx_x86_ops = { | |||
8652 | .check_intercept = vmx_check_intercept, | 8659 | .check_intercept = vmx_check_intercept, |
8653 | .handle_external_intr = vmx_handle_external_intr, | 8660 | .handle_external_intr = vmx_handle_external_intr, |
8654 | .mpx_supported = vmx_mpx_supported, | 8661 | .mpx_supported = vmx_mpx_supported, |
8662 | |||
8663 | .check_nested_events = vmx_check_nested_events, | ||
8655 | }; | 8664 | }; |
8656 | 8665 | ||
8657 | static int __init vmx_init(void) | 8666 | static int __init vmx_init(void) |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index a45bcac45645..738262595706 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -5821,8 +5821,10 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu) | |||
5821 | kvm_x86_ops->update_cr8_intercept(vcpu, tpr, max_irr); | 5821 | kvm_x86_ops->update_cr8_intercept(vcpu, tpr, max_irr); |
5822 | } | 5822 | } |
5823 | 5823 | ||
5824 | static void inject_pending_event(struct kvm_vcpu *vcpu) | 5824 | static int inject_pending_event(struct kvm_vcpu *vcpu, bool req_int_win) |
5825 | { | 5825 | { |
5826 | int r; | ||
5827 | |||
5826 | /* try to reinject previous events if any */ | 5828 | /* try to reinject previous events if any */ |
5827 | if (vcpu->arch.exception.pending) { | 5829 | if (vcpu->arch.exception.pending) { |
5828 | trace_kvm_inj_exception(vcpu->arch.exception.nr, | 5830 | trace_kvm_inj_exception(vcpu->arch.exception.nr, |
@@ -5832,17 +5834,23 @@ static void inject_pending_event(struct kvm_vcpu *vcpu) | |||
5832 | vcpu->arch.exception.has_error_code, | 5834 | vcpu->arch.exception.has_error_code, |
5833 | vcpu->arch.exception.error_code, | 5835 | vcpu->arch.exception.error_code, |
5834 | vcpu->arch.exception.reinject); | 5836 | vcpu->arch.exception.reinject); |
5835 | return; | 5837 | return 0; |
5836 | } | 5838 | } |
5837 | 5839 | ||
5838 | if (vcpu->arch.nmi_injected) { | 5840 | if (vcpu->arch.nmi_injected) { |
5839 | kvm_x86_ops->set_nmi(vcpu); | 5841 | kvm_x86_ops->set_nmi(vcpu); |
5840 | return; | 5842 | return 0; |
5841 | } | 5843 | } |
5842 | 5844 | ||
5843 | if (vcpu->arch.interrupt.pending) { | 5845 | if (vcpu->arch.interrupt.pending) { |
5844 | kvm_x86_ops->set_irq(vcpu); | 5846 | kvm_x86_ops->set_irq(vcpu); |
5845 | return; | 5847 | return 0; |
5848 | } | ||
5849 | |||
5850 | if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events) { | ||
5851 | r = kvm_x86_ops->check_nested_events(vcpu, req_int_win); | ||
5852 | if (r != 0) | ||
5853 | return r; | ||
5846 | } | 5854 | } |
5847 | 5855 | ||
5848 | /* try to inject new event if pending */ | 5856 | /* try to inject new event if pending */ |
@@ -5859,6 +5867,7 @@ static void inject_pending_event(struct kvm_vcpu *vcpu) | |||
5859 | kvm_x86_ops->set_irq(vcpu); | 5867 | kvm_x86_ops->set_irq(vcpu); |
5860 | } | 5868 | } |
5861 | } | 5869 | } |
5870 | return 0; | ||
5862 | } | 5871 | } |
5863 | 5872 | ||
5864 | static void process_nmi(struct kvm_vcpu *vcpu) | 5873 | static void process_nmi(struct kvm_vcpu *vcpu) |
@@ -5963,10 +5972,10 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) | |||
5963 | goto out; | 5972 | goto out; |
5964 | } | 5973 | } |
5965 | 5974 | ||
5966 | inject_pending_event(vcpu); | 5975 | if (inject_pending_event(vcpu, req_int_win) != 0) |
5967 | 5976 | req_immediate_exit = true; | |
5968 | /* enable NMI/IRQ window open exits if needed */ | 5977 | /* enable NMI/IRQ window open exits if needed */ |
5969 | if (vcpu->arch.nmi_pending) | 5978 | else if (vcpu->arch.nmi_pending) |
5970 | req_immediate_exit = | 5979 | req_immediate_exit = |
5971 | kvm_x86_ops->enable_nmi_window(vcpu) != 0; | 5980 | kvm_x86_ops->enable_nmi_window(vcpu) != 0; |
5972 | else if (kvm_cpu_has_injectable_intr(vcpu) || req_int_win) | 5981 | else if (kvm_cpu_has_injectable_intr(vcpu) || req_int_win) |
@@ -7296,6 +7305,9 @@ void kvm_arch_flush_shadow_memslot(struct kvm *kvm, | |||
7296 | 7305 | ||
7297 | int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) | 7306 | int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) |
7298 | { | 7307 | { |
7308 | if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events) | ||
7309 | kvm_x86_ops->check_nested_events(vcpu, false); | ||
7310 | |||
7299 | return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE && | 7311 | return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE && |
7300 | !vcpu->arch.apf.halted) | 7312 | !vcpu->arch.apf.halted) |
7301 | || !list_empty_careful(&vcpu->async_pf.done) | 7313 | || !list_empty_careful(&vcpu->async_pf.done) |