diff options
-rw-r--r-- | arch/x86/kvm/vmx.c | 24 | ||||
-rw-r--r-- | arch/x86/kvm/x86.c | 28 | ||||
-rw-r--r-- | include/linux/kvm.h | 11 |
3 files changed, 9 insertions, 54 deletions
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 487e1dcdce33..6259d7467648 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
@@ -2498,15 +2498,13 @@ static void do_interrupt_requests(struct kvm_vcpu *vcpu, | |||
2498 | } | 2498 | } |
2499 | if (vcpu->arch.nmi_injected) { | 2499 | if (vcpu->arch.nmi_injected) { |
2500 | vmx_inject_nmi(vcpu); | 2500 | vmx_inject_nmi(vcpu); |
2501 | if (vcpu->arch.nmi_pending || kvm_run->request_nmi_window) | 2501 | if (vcpu->arch.nmi_pending) |
2502 | enable_nmi_window(vcpu); | 2502 | enable_nmi_window(vcpu); |
2503 | else if (vcpu->arch.irq_summary | 2503 | else if (vcpu->arch.irq_summary |
2504 | || kvm_run->request_interrupt_window) | 2504 | || kvm_run->request_interrupt_window) |
2505 | enable_irq_window(vcpu); | 2505 | enable_irq_window(vcpu); |
2506 | return; | 2506 | return; |
2507 | } | 2507 | } |
2508 | if (!vcpu->arch.nmi_window_open || kvm_run->request_nmi_window) | ||
2509 | enable_nmi_window(vcpu); | ||
2510 | 2508 | ||
2511 | if (vcpu->arch.interrupt_window_open) { | 2509 | if (vcpu->arch.interrupt_window_open) { |
2512 | if (vcpu->arch.irq_summary && !vcpu->arch.interrupt.pending) | 2510 | if (vcpu->arch.irq_summary && !vcpu->arch.interrupt.pending) |
@@ -3040,14 +3038,6 @@ static int handle_nmi_window(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
3040 | vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control); | 3038 | vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control); |
3041 | ++vcpu->stat.nmi_window_exits; | 3039 | ++vcpu->stat.nmi_window_exits; |
3042 | 3040 | ||
3043 | /* | ||
3044 | * If the user space waits to inject a NMI, exit as soon as possible | ||
3045 | */ | ||
3046 | if (kvm_run->request_nmi_window && !vcpu->arch.nmi_pending) { | ||
3047 | kvm_run->exit_reason = KVM_EXIT_NMI_WINDOW_OPEN; | ||
3048 | return 0; | ||
3049 | } | ||
3050 | |||
3051 | return 1; | 3041 | return 1; |
3052 | } | 3042 | } |
3053 | 3043 | ||
@@ -3162,7 +3152,7 @@ static int kvm_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) | |||
3162 | vmx->soft_vnmi_blocked = 0; | 3152 | vmx->soft_vnmi_blocked = 0; |
3163 | vcpu->arch.nmi_window_open = 1; | 3153 | vcpu->arch.nmi_window_open = 1; |
3164 | } else if (vmx->vnmi_blocked_time > 1000000000LL && | 3154 | } else if (vmx->vnmi_blocked_time > 1000000000LL && |
3165 | (kvm_run->request_nmi_window || vcpu->arch.nmi_pending)) { | 3155 | vcpu->arch.nmi_pending) { |
3166 | /* | 3156 | /* |
3167 | * This CPU don't support us in finding the end of an | 3157 | * This CPU don't support us in finding the end of an |
3168 | * NMI-blocked window if the guest runs with IRQs | 3158 | * NMI-blocked window if the guest runs with IRQs |
@@ -3175,16 +3165,6 @@ static int kvm_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) | |||
3175 | vmx->soft_vnmi_blocked = 0; | 3165 | vmx->soft_vnmi_blocked = 0; |
3176 | vmx->vcpu.arch.nmi_window_open = 1; | 3166 | vmx->vcpu.arch.nmi_window_open = 1; |
3177 | } | 3167 | } |
3178 | |||
3179 | /* | ||
3180 | * If the user space waits to inject an NNI, exit ASAP | ||
3181 | */ | ||
3182 | if (vcpu->arch.nmi_window_open && kvm_run->request_nmi_window | ||
3183 | && !vcpu->arch.nmi_pending) { | ||
3184 | kvm_run->exit_reason = KVM_EXIT_NMI_WINDOW_OPEN; | ||
3185 | ++vcpu->stat.nmi_window_exits; | ||
3186 | return 0; | ||
3187 | } | ||
3188 | } | 3168 | } |
3189 | 3169 | ||
3190 | if (exit_reason < kvm_vmx_max_exit_handlers | 3170 | if (exit_reason < kvm_vmx_max_exit_handlers |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 10302d3bd415..0e6aa8141dcd 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -2887,37 +2887,18 @@ static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu, | |||
2887 | (kvm_x86_ops->get_rflags(vcpu) & X86_EFLAGS_IF)); | 2887 | (kvm_x86_ops->get_rflags(vcpu) & X86_EFLAGS_IF)); |
2888 | } | 2888 | } |
2889 | 2889 | ||
2890 | /* | ||
2891 | * Check if userspace requested a NMI window, and that the NMI window | ||
2892 | * is open. | ||
2893 | * | ||
2894 | * No need to exit to userspace if we already have a NMI queued. | ||
2895 | */ | ||
2896 | static int dm_request_for_nmi_injection(struct kvm_vcpu *vcpu, | ||
2897 | struct kvm_run *kvm_run) | ||
2898 | { | ||
2899 | return (!vcpu->arch.nmi_pending && | ||
2900 | kvm_run->request_nmi_window && | ||
2901 | vcpu->arch.nmi_window_open); | ||
2902 | } | ||
2903 | |||
2904 | static void post_kvm_run_save(struct kvm_vcpu *vcpu, | 2890 | static void post_kvm_run_save(struct kvm_vcpu *vcpu, |
2905 | struct kvm_run *kvm_run) | 2891 | struct kvm_run *kvm_run) |
2906 | { | 2892 | { |
2907 | kvm_run->if_flag = (kvm_x86_ops->get_rflags(vcpu) & X86_EFLAGS_IF) != 0; | 2893 | kvm_run->if_flag = (kvm_x86_ops->get_rflags(vcpu) & X86_EFLAGS_IF) != 0; |
2908 | kvm_run->cr8 = kvm_get_cr8(vcpu); | 2894 | kvm_run->cr8 = kvm_get_cr8(vcpu); |
2909 | kvm_run->apic_base = kvm_get_apic_base(vcpu); | 2895 | kvm_run->apic_base = kvm_get_apic_base(vcpu); |
2910 | if (irqchip_in_kernel(vcpu->kvm)) { | 2896 | if (irqchip_in_kernel(vcpu->kvm)) |
2911 | kvm_run->ready_for_interrupt_injection = 1; | 2897 | kvm_run->ready_for_interrupt_injection = 1; |
2912 | kvm_run->ready_for_nmi_injection = 1; | 2898 | else |
2913 | } else { | ||
2914 | kvm_run->ready_for_interrupt_injection = | 2899 | kvm_run->ready_for_interrupt_injection = |
2915 | (vcpu->arch.interrupt_window_open && | 2900 | (vcpu->arch.interrupt_window_open && |
2916 | vcpu->arch.irq_summary == 0); | 2901 | vcpu->arch.irq_summary == 0); |
2917 | kvm_run->ready_for_nmi_injection = | ||
2918 | (vcpu->arch.nmi_window_open && | ||
2919 | vcpu->arch.nmi_pending == 0); | ||
2920 | } | ||
2921 | } | 2902 | } |
2922 | 2903 | ||
2923 | static void vapic_enter(struct kvm_vcpu *vcpu) | 2904 | static void vapic_enter(struct kvm_vcpu *vcpu) |
@@ -3093,11 +3074,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
3093 | } | 3074 | } |
3094 | 3075 | ||
3095 | if (r > 0) { | 3076 | if (r > 0) { |
3096 | if (dm_request_for_nmi_injection(vcpu, kvm_run)) { | ||
3097 | r = -EINTR; | ||
3098 | kvm_run->exit_reason = KVM_EXIT_NMI; | ||
3099 | ++vcpu->stat.request_nmi_exits; | ||
3100 | } | ||
3101 | if (dm_request_for_irq_injection(vcpu, kvm_run)) { | 3077 | if (dm_request_for_irq_injection(vcpu, kvm_run)) { |
3102 | r = -EINTR; | 3078 | r = -EINTR; |
3103 | kvm_run->exit_reason = KVM_EXIT_INTR; | 3079 | kvm_run->exit_reason = KVM_EXIT_INTR; |
diff --git a/include/linux/kvm.h b/include/linux/kvm.h index 48807767e726..35525ac63337 100644 --- a/include/linux/kvm.h +++ b/include/linux/kvm.h | |||
@@ -84,21 +84,18 @@ struct kvm_irqchip { | |||
84 | #define KVM_EXIT_S390_RESET 14 | 84 | #define KVM_EXIT_S390_RESET 14 |
85 | #define KVM_EXIT_DCR 15 | 85 | #define KVM_EXIT_DCR 15 |
86 | #define KVM_EXIT_NMI 16 | 86 | #define KVM_EXIT_NMI 16 |
87 | #define KVM_EXIT_NMI_WINDOW_OPEN 17 | ||
88 | 87 | ||
89 | /* for KVM_RUN, returned by mmap(vcpu_fd, offset=0) */ | 88 | /* for KVM_RUN, returned by mmap(vcpu_fd, offset=0) */ |
90 | struct kvm_run { | 89 | struct kvm_run { |
91 | /* in */ | 90 | /* in */ |
92 | __u8 request_interrupt_window; | 91 | __u8 request_interrupt_window; |
93 | __u8 request_nmi_window; | 92 | __u8 padding1[7]; |
94 | __u8 padding1[6]; | ||
95 | 93 | ||
96 | /* out */ | 94 | /* out */ |
97 | __u32 exit_reason; | 95 | __u32 exit_reason; |
98 | __u8 ready_for_interrupt_injection; | 96 | __u8 ready_for_interrupt_injection; |
99 | __u8 if_flag; | 97 | __u8 if_flag; |
100 | __u8 ready_for_nmi_injection; | 98 | __u8 padding2[2]; |
101 | __u8 padding2; | ||
102 | 99 | ||
103 | /* in (pre_kvm_run), out (post_kvm_run) */ | 100 | /* in (pre_kvm_run), out (post_kvm_run) */ |
104 | __u64 cr8; | 101 | __u64 cr8; |
@@ -391,12 +388,14 @@ struct kvm_trace_rec { | |||
391 | #define KVM_CAP_DEVICE_ASSIGNMENT 17 | 388 | #define KVM_CAP_DEVICE_ASSIGNMENT 17 |
392 | #endif | 389 | #endif |
393 | #define KVM_CAP_IOMMU 18 | 390 | #define KVM_CAP_IOMMU 18 |
394 | #define KVM_CAP_NMI 19 | ||
395 | #if defined(CONFIG_X86) | 391 | #if defined(CONFIG_X86) |
396 | #define KVM_CAP_DEVICE_MSI 20 | 392 | #define KVM_CAP_DEVICE_MSI 20 |
397 | #endif | 393 | #endif |
398 | /* Bug in KVM_SET_USER_MEMORY_REGION fixed: */ | 394 | /* Bug in KVM_SET_USER_MEMORY_REGION fixed: */ |
399 | #define KVM_CAP_DESTROY_MEMORY_REGION_WORKS 21 | 395 | #define KVM_CAP_DESTROY_MEMORY_REGION_WORKS 21 |
396 | #if defined(CONFIG_X86) | ||
397 | #define KVM_CAP_USER_NMI 22 | ||
398 | #endif | ||
400 | 399 | ||
401 | /* | 400 | /* |
402 | * ioctls for VM fds | 401 | * ioctls for VM fds |