aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorJan Kiszka <jan.kiszka@siemens.com>2008-09-26 03:30:56 -0400
committerAvi Kivity <avi@redhat.com>2008-12-31 09:51:43 -0500
commit487b391d6ea9b1d0e2e0440466fb3130e78c98d9 (patch)
treea0fd9693edbcbff5cfdef94849679e583229c781 /arch
parentc4abb7c9cde24b7351a47328ef866e6a2bbb1ad0 (diff)
KVM: VMX: Provide support for user space injected NMIs
This patch adds the required bits to the VMX side for user space injected NMIs. As with the preexisting in-kernel irqchip support, the CPU must provide the "virtual NMI" feature for proper tracking of the NMI blocking state. Based on the original patch by Sheng Yang. Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com> Signed-off-by: Sheng Yang <sheng.yang@intel.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/kvm/vmx.c33
1 files changed, 33 insertions, 0 deletions
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 38d138566617..f16a62c79267 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -2360,6 +2360,7 @@ static void vmx_inject_nmi(struct kvm_vcpu *vcpu)
2360{ 2360{
2361 struct vcpu_vmx *vmx = to_vmx(vcpu); 2361 struct vcpu_vmx *vmx = to_vmx(vcpu);
2362 2362
2363 ++vcpu->stat.nmi_injections;
2363 if (vcpu->arch.rmode.active) { 2364 if (vcpu->arch.rmode.active) {
2364 vmx->rmode.irq.pending = true; 2365 vmx->rmode.irq.pending = true;
2365 vmx->rmode.irq.vector = NMI_VECTOR; 2366 vmx->rmode.irq.vector = NMI_VECTOR;
@@ -2428,6 +2429,30 @@ static void do_interrupt_requests(struct kvm_vcpu *vcpu,
2428{ 2429{
2429 vmx_update_window_states(vcpu); 2430 vmx_update_window_states(vcpu);
2430 2431
2432 if (cpu_has_virtual_nmis()) {
2433 if (vcpu->arch.nmi_pending && !vcpu->arch.nmi_injected) {
2434 if (vcpu->arch.nmi_window_open) {
2435 vcpu->arch.nmi_pending = false;
2436 vcpu->arch.nmi_injected = true;
2437 } else {
2438 enable_nmi_window(vcpu);
2439 return;
2440 }
2441 }
2442 if (vcpu->arch.nmi_injected) {
2443 vmx_inject_nmi(vcpu);
2444 if (vcpu->arch.nmi_pending
2445 || kvm_run->request_nmi_window)
2446 enable_nmi_window(vcpu);
2447 else if (vcpu->arch.irq_summary
2448 || kvm_run->request_interrupt_window)
2449 enable_irq_window(vcpu);
2450 return;
2451 }
2452 if (!vcpu->arch.nmi_window_open || kvm_run->request_nmi_window)
2453 enable_nmi_window(vcpu);
2454 }
2455
2431 if (vcpu->arch.interrupt_window_open) { 2456 if (vcpu->arch.interrupt_window_open) {
2432 if (vcpu->arch.irq_summary && !vcpu->arch.interrupt.pending) 2457 if (vcpu->arch.irq_summary && !vcpu->arch.interrupt.pending)
2433 kvm_do_inject_irq(vcpu); 2458 kvm_do_inject_irq(vcpu);
@@ -2959,6 +2984,14 @@ static int handle_nmi_window(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2959 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control); 2984 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
2960 ++vcpu->stat.nmi_window_exits; 2985 ++vcpu->stat.nmi_window_exits;
2961 2986
2987 /*
2988 * If the user space waits to inject a NMI, exit as soon as possible
2989 */
2990 if (kvm_run->request_nmi_window && !vcpu->arch.nmi_pending) {
2991 kvm_run->exit_reason = KVM_EXIT_NMI_WINDOW_OPEN;
2992 return 0;
2993 }
2994
2962 return 1; 2995 return 1;
2963} 2996}
2964 2997