aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/kvm/vmx.c24
-rw-r--r--arch/x86/kvm/x86.c28
2 files changed, 4 insertions, 48 deletions
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 487e1dcdce33..6259d7467648 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -2498,15 +2498,13 @@ static void do_interrupt_requests(struct kvm_vcpu *vcpu,
2498 } 2498 }
2499 if (vcpu->arch.nmi_injected) { 2499 if (vcpu->arch.nmi_injected) {
2500 vmx_inject_nmi(vcpu); 2500 vmx_inject_nmi(vcpu);
2501 if (vcpu->arch.nmi_pending || kvm_run->request_nmi_window) 2501 if (vcpu->arch.nmi_pending)
2502 enable_nmi_window(vcpu); 2502 enable_nmi_window(vcpu);
2503 else if (vcpu->arch.irq_summary 2503 else if (vcpu->arch.irq_summary
2504 || kvm_run->request_interrupt_window) 2504 || kvm_run->request_interrupt_window)
2505 enable_irq_window(vcpu); 2505 enable_irq_window(vcpu);
2506 return; 2506 return;
2507 } 2507 }
2508 if (!vcpu->arch.nmi_window_open || kvm_run->request_nmi_window)
2509 enable_nmi_window(vcpu);
2510 2508
2511 if (vcpu->arch.interrupt_window_open) { 2509 if (vcpu->arch.interrupt_window_open) {
2512 if (vcpu->arch.irq_summary && !vcpu->arch.interrupt.pending) 2510 if (vcpu->arch.irq_summary && !vcpu->arch.interrupt.pending)
@@ -3040,14 +3038,6 @@ static int handle_nmi_window(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3040 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control); 3038 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
3041 ++vcpu->stat.nmi_window_exits; 3039 ++vcpu->stat.nmi_window_exits;
3042 3040
3043 /*
3044 * If the user space waits to inject a NMI, exit as soon as possible
3045 */
3046 if (kvm_run->request_nmi_window && !vcpu->arch.nmi_pending) {
3047 kvm_run->exit_reason = KVM_EXIT_NMI_WINDOW_OPEN;
3048 return 0;
3049 }
3050
3051 return 1; 3041 return 1;
3052} 3042}
3053 3043
@@ -3162,7 +3152,7 @@ static int kvm_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
3162 vmx->soft_vnmi_blocked = 0; 3152 vmx->soft_vnmi_blocked = 0;
3163 vcpu->arch.nmi_window_open = 1; 3153 vcpu->arch.nmi_window_open = 1;
3164 } else if (vmx->vnmi_blocked_time > 1000000000LL && 3154 } else if (vmx->vnmi_blocked_time > 1000000000LL &&
3165 (kvm_run->request_nmi_window || vcpu->arch.nmi_pending)) { 3155 vcpu->arch.nmi_pending) {
3166 /* 3156 /*
3167 * This CPU don't support us in finding the end of an 3157 * This CPU don't support us in finding the end of an
3168 * NMI-blocked window if the guest runs with IRQs 3158 * NMI-blocked window if the guest runs with IRQs
@@ -3175,16 +3165,6 @@ static int kvm_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
3175 vmx->soft_vnmi_blocked = 0; 3165 vmx->soft_vnmi_blocked = 0;
3176 vmx->vcpu.arch.nmi_window_open = 1; 3166 vmx->vcpu.arch.nmi_window_open = 1;
3177 } 3167 }
3178
3179 /*
3180 * If the user space waits to inject an NNI, exit ASAP
3181 */
3182 if (vcpu->arch.nmi_window_open && kvm_run->request_nmi_window
3183 && !vcpu->arch.nmi_pending) {
3184 kvm_run->exit_reason = KVM_EXIT_NMI_WINDOW_OPEN;
3185 ++vcpu->stat.nmi_window_exits;
3186 return 0;
3187 }
3188 } 3168 }
3189 3169
3190 if (exit_reason < kvm_vmx_max_exit_handlers 3170 if (exit_reason < kvm_vmx_max_exit_handlers
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 10302d3bd415..0e6aa8141dcd 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -2887,37 +2887,18 @@ static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu,
2887 (kvm_x86_ops->get_rflags(vcpu) & X86_EFLAGS_IF)); 2887 (kvm_x86_ops->get_rflags(vcpu) & X86_EFLAGS_IF));
2888} 2888}
2889 2889
2890/*
2891 * Check if userspace requested a NMI window, and that the NMI window
2892 * is open.
2893 *
2894 * No need to exit to userspace if we already have a NMI queued.
2895 */
2896static int dm_request_for_nmi_injection(struct kvm_vcpu *vcpu,
2897 struct kvm_run *kvm_run)
2898{
2899 return (!vcpu->arch.nmi_pending &&
2900 kvm_run->request_nmi_window &&
2901 vcpu->arch.nmi_window_open);
2902}
2903
2904static void post_kvm_run_save(struct kvm_vcpu *vcpu, 2890static void post_kvm_run_save(struct kvm_vcpu *vcpu,
2905 struct kvm_run *kvm_run) 2891 struct kvm_run *kvm_run)
2906{ 2892{
2907 kvm_run->if_flag = (kvm_x86_ops->get_rflags(vcpu) & X86_EFLAGS_IF) != 0; 2893 kvm_run->if_flag = (kvm_x86_ops->get_rflags(vcpu) & X86_EFLAGS_IF) != 0;
2908 kvm_run->cr8 = kvm_get_cr8(vcpu); 2894 kvm_run->cr8 = kvm_get_cr8(vcpu);
2909 kvm_run->apic_base = kvm_get_apic_base(vcpu); 2895 kvm_run->apic_base = kvm_get_apic_base(vcpu);
2910 if (irqchip_in_kernel(vcpu->kvm)) { 2896 if (irqchip_in_kernel(vcpu->kvm))
2911 kvm_run->ready_for_interrupt_injection = 1; 2897 kvm_run->ready_for_interrupt_injection = 1;
2912 kvm_run->ready_for_nmi_injection = 1; 2898 else
2913 } else {
2914 kvm_run->ready_for_interrupt_injection = 2899 kvm_run->ready_for_interrupt_injection =
2915 (vcpu->arch.interrupt_window_open && 2900 (vcpu->arch.interrupt_window_open &&
2916 vcpu->arch.irq_summary == 0); 2901 vcpu->arch.irq_summary == 0);
2917 kvm_run->ready_for_nmi_injection =
2918 (vcpu->arch.nmi_window_open &&
2919 vcpu->arch.nmi_pending == 0);
2920 }
2921} 2902}
2922 2903
2923static void vapic_enter(struct kvm_vcpu *vcpu) 2904static void vapic_enter(struct kvm_vcpu *vcpu)
@@ -3093,11 +3074,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3093 } 3074 }
3094 3075
3095 if (r > 0) { 3076 if (r > 0) {
3096 if (dm_request_for_nmi_injection(vcpu, kvm_run)) {
3097 r = -EINTR;
3098 kvm_run->exit_reason = KVM_EXIT_NMI;
3099 ++vcpu->stat.request_nmi_exits;
3100 }
3101 if (dm_request_for_irq_injection(vcpu, kvm_run)) { 3077 if (dm_request_for_irq_injection(vcpu, kvm_run)) {
3102 r = -EINTR; 3078 r = -EINTR;
3103 kvm_run->exit_reason = KVM_EXIT_INTR; 3079 kvm_run->exit_reason = KVM_EXIT_INTR;