diff options
Diffstat (limited to 'arch/x86/kvm')
-rw-r--r-- | arch/x86/kvm/vmx.c | 11 | ||||
-rw-r--r-- | arch/x86/kvm/x86.c | 7 |
2 files changed, 13 insertions, 5 deletions
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 579a0b51696a..d75d91465246 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
@@ -3945,12 +3945,15 @@ static bool nested_exit_on_intr(struct kvm_vcpu *vcpu) | |||
3945 | static void enable_irq_window(struct kvm_vcpu *vcpu) | 3945 | static void enable_irq_window(struct kvm_vcpu *vcpu) |
3946 | { | 3946 | { |
3947 | u32 cpu_based_vm_exec_control; | 3947 | u32 cpu_based_vm_exec_control; |
3948 | if (is_guest_mode(vcpu) && nested_exit_on_intr(vcpu)) | 3948 | if (is_guest_mode(vcpu) && nested_exit_on_intr(vcpu)) { |
3949 | /* We can get here when nested_run_pending caused | 3949 | /* |
3950 | * vmx_interrupt_allowed() to return false. In this case, do | 3950 | * We get here if vmx_interrupt_allowed() said we can't |
3951 | * nothing - the interrupt will be injected later. | 3951 | * inject to L1 now because L2 must run. Ask L2 to exit |
3952 | * right after entry, so we can inject to L1 more promptly. | ||
3952 | */ | 3953 | */ |
3954 | kvm_make_request(KVM_REQ_IMMEDIATE_EXIT, vcpu); | ||
3953 | return; | 3955 | return; |
3956 | } | ||
3954 | 3957 | ||
3955 | cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL); | 3958 | cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL); |
3956 | cpu_based_vm_exec_control |= CPU_BASED_VIRTUAL_INTR_PENDING; | 3959 | cpu_based_vm_exec_control |= CPU_BASED_VIRTUAL_INTR_PENDING; |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 4c938da2ba00..e24edbc7f2ec 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -5648,6 +5648,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) | |||
5648 | int r; | 5648 | int r; |
5649 | bool req_int_win = !irqchip_in_kernel(vcpu->kvm) && | 5649 | bool req_int_win = !irqchip_in_kernel(vcpu->kvm) && |
5650 | vcpu->run->request_interrupt_window; | 5650 | vcpu->run->request_interrupt_window; |
5651 | bool req_immediate_exit = 0; | ||
5651 | 5652 | ||
5652 | if (vcpu->requests) { | 5653 | if (vcpu->requests) { |
5653 | if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) | 5654 | if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) |
@@ -5687,7 +5688,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) | |||
5687 | record_steal_time(vcpu); | 5688 | record_steal_time(vcpu); |
5688 | if (kvm_check_request(KVM_REQ_NMI, vcpu)) | 5689 | if (kvm_check_request(KVM_REQ_NMI, vcpu)) |
5689 | process_nmi(vcpu); | 5690 | process_nmi(vcpu); |
5690 | 5691 | req_immediate_exit = | |
5692 | kvm_check_request(KVM_REQ_IMMEDIATE_EXIT, vcpu); | ||
5691 | } | 5693 | } |
5692 | 5694 | ||
5693 | r = kvm_mmu_reload(vcpu); | 5695 | r = kvm_mmu_reload(vcpu); |
@@ -5738,6 +5740,9 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) | |||
5738 | 5740 | ||
5739 | srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); | 5741 | srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); |
5740 | 5742 | ||
5743 | if (req_immediate_exit) | ||
5744 | smp_send_reschedule(vcpu->cpu); | ||
5745 | |||
5741 | kvm_guest_enter(); | 5746 | kvm_guest_enter(); |
5742 | 5747 | ||
5743 | if (unlikely(vcpu->arch.switch_db_regs)) { | 5748 | if (unlikely(vcpu->arch.switch_db_regs)) { |