diff options
author | Gleb Natapov <gleb@redhat.com> | 2009-04-21 10:44:57 -0400 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2009-06-10 04:48:45 -0400 |
commit | 863e8e658ee9ac6e5931b295eb7428456e450a0f (patch) | |
tree | 96198a74491bd96ba81d05e3773b19951f050744 /arch/x86/kvm | |
parent | 8061823a25218174f30c3dd943989e1d72f7d06e (diff) |
KVM: VMX: Consolidate userspace and kernel interrupt injection for VMX
Use the same callback to inject irq/nmi events no matter what irqchip is
in use. Only from VMX for now.
Signed-off-by: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm')
-rw-r--r-- | arch/x86/kvm/svm.c | 2 | ||||
-rw-r--r-- | arch/x86/kvm/vmx.c | 71 | ||||
-rw-r--r-- | arch/x86/kvm/x86.c | 2 |
3 files changed, 18 insertions, 57 deletions
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 6eef6d22e87e..f2933abc9691 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c | |||
@@ -2298,7 +2298,7 @@ static int svm_interrupt_allowed(struct kvm_vcpu *vcpu) | |||
2298 | (svm->vcpu.arch.hflags & HF_GIF_MASK); | 2298 | (svm->vcpu.arch.hflags & HF_GIF_MASK); |
2299 | } | 2299 | } |
2300 | 2300 | ||
2301 | static void svm_intr_assist(struct kvm_vcpu *vcpu) | 2301 | static void svm_intr_assist(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) |
2302 | { | 2302 | { |
2303 | struct vcpu_svm *svm = to_svm(vcpu); | 2303 | struct vcpu_svm *svm = to_svm(vcpu); |
2304 | struct vmcb *vmcb = svm->vmcb; | 2304 | struct vmcb *vmcb = svm->vmcb; |
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index b3292c1ea2f2..06252f7465d6 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
@@ -2510,48 +2510,6 @@ static int vmx_interrupt_allowed(struct kvm_vcpu *vcpu) | |||
2510 | return vcpu->arch.interrupt_window_open; | 2510 | return vcpu->arch.interrupt_window_open; |
2511 | } | 2511 | } |
2512 | 2512 | ||
2513 | static void do_interrupt_requests(struct kvm_vcpu *vcpu, | ||
2514 | struct kvm_run *kvm_run) | ||
2515 | { | ||
2516 | vmx_update_window_states(vcpu); | ||
2517 | |||
2518 | if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) | ||
2519 | vmcs_clear_bits(GUEST_INTERRUPTIBILITY_INFO, | ||
2520 | GUEST_INTR_STATE_STI | | ||
2521 | GUEST_INTR_STATE_MOV_SS); | ||
2522 | |||
2523 | if (vcpu->arch.nmi_pending && !vcpu->arch.nmi_injected) { | ||
2524 | if (vcpu->arch.interrupt.pending) { | ||
2525 | enable_nmi_window(vcpu); | ||
2526 | } else if (vcpu->arch.nmi_window_open) { | ||
2527 | vcpu->arch.nmi_pending = false; | ||
2528 | vcpu->arch.nmi_injected = true; | ||
2529 | } else { | ||
2530 | enable_nmi_window(vcpu); | ||
2531 | return; | ||
2532 | } | ||
2533 | } | ||
2534 | if (vcpu->arch.nmi_injected) { | ||
2535 | vmx_inject_nmi(vcpu); | ||
2536 | if (vcpu->arch.nmi_pending) | ||
2537 | enable_nmi_window(vcpu); | ||
2538 | else if (kvm_cpu_has_interrupt(vcpu) || | ||
2539 | kvm_run->request_interrupt_window) | ||
2540 | enable_irq_window(vcpu); | ||
2541 | return; | ||
2542 | } | ||
2543 | |||
2544 | if (vcpu->arch.interrupt_window_open) { | ||
2545 | if (kvm_cpu_has_interrupt(vcpu) && !vcpu->arch.interrupt.pending) | ||
2546 | kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu)); | ||
2547 | |||
2548 | if (vcpu->arch.interrupt.pending) | ||
2549 | vmx_inject_irq(vcpu, vcpu->arch.interrupt.nr); | ||
2550 | } else if(kvm_cpu_has_interrupt(vcpu) || | ||
2551 | kvm_run->request_interrupt_window) | ||
2552 | enable_irq_window(vcpu); | ||
2553 | } | ||
2554 | |||
2555 | static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr) | 2513 | static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr) |
2556 | { | 2514 | { |
2557 | int ret; | 2515 | int ret; |
@@ -3351,8 +3309,11 @@ static void vmx_complete_interrupts(struct vcpu_vmx *vmx) | |||
3351 | } | 3309 | } |
3352 | } | 3310 | } |
3353 | 3311 | ||
3354 | static void vmx_intr_assist(struct kvm_vcpu *vcpu) | 3312 | static void vmx_intr_assist(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) |
3355 | { | 3313 | { |
3314 | bool req_int_win = !irqchip_in_kernel(vcpu->kvm) && | ||
3315 | kvm_run->request_interrupt_window; | ||
3316 | |||
3356 | update_tpr_threshold(vcpu); | 3317 | update_tpr_threshold(vcpu); |
3357 | 3318 | ||
3358 | vmx_update_window_states(vcpu); | 3319 | vmx_update_window_states(vcpu); |
@@ -3373,25 +3334,25 @@ static void vmx_intr_assist(struct kvm_vcpu *vcpu) | |||
3373 | return; | 3334 | return; |
3374 | } | 3335 | } |
3375 | } | 3336 | } |
3337 | |||
3376 | if (vcpu->arch.nmi_injected) { | 3338 | if (vcpu->arch.nmi_injected) { |
3377 | vmx_inject_nmi(vcpu); | 3339 | vmx_inject_nmi(vcpu); |
3378 | if (vcpu->arch.nmi_pending) | 3340 | goto out; |
3379 | enable_nmi_window(vcpu); | ||
3380 | else if (kvm_cpu_has_interrupt(vcpu)) | ||
3381 | enable_irq_window(vcpu); | ||
3382 | return; | ||
3383 | } | 3341 | } |
3342 | |||
3384 | if (!vcpu->arch.interrupt.pending && kvm_cpu_has_interrupt(vcpu)) { | 3343 | if (!vcpu->arch.interrupt.pending && kvm_cpu_has_interrupt(vcpu)) { |
3385 | if (vcpu->arch.interrupt_window_open) | 3344 | if (vcpu->arch.interrupt_window_open) |
3386 | kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu)); | 3345 | kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu)); |
3387 | else | ||
3388 | enable_irq_window(vcpu); | ||
3389 | } | 3346 | } |
3390 | if (vcpu->arch.interrupt.pending) { | 3347 | |
3348 | if (vcpu->arch.interrupt.pending) | ||
3391 | vmx_inject_irq(vcpu, vcpu->arch.interrupt.nr); | 3349 | vmx_inject_irq(vcpu, vcpu->arch.interrupt.nr); |
3392 | if (kvm_cpu_has_interrupt(vcpu)) | 3350 | |
3393 | enable_irq_window(vcpu); | 3351 | out: |
3394 | } | 3352 | if (vcpu->arch.nmi_pending) |
3353 | enable_nmi_window(vcpu); | ||
3354 | else if (kvm_cpu_has_interrupt(vcpu) || req_int_win) | ||
3355 | enable_irq_window(vcpu); | ||
3395 | } | 3356 | } |
3396 | 3357 | ||
3397 | /* | 3358 | /* |
@@ -3733,7 +3694,7 @@ static struct kvm_x86_ops vmx_x86_ops = { | |||
3733 | .queue_exception = vmx_queue_exception, | 3694 | .queue_exception = vmx_queue_exception, |
3734 | .exception_injected = vmx_exception_injected, | 3695 | .exception_injected = vmx_exception_injected, |
3735 | .inject_pending_irq = vmx_intr_assist, | 3696 | .inject_pending_irq = vmx_intr_assist, |
3736 | .inject_pending_vectors = do_interrupt_requests, | 3697 | .inject_pending_vectors = vmx_intr_assist, |
3737 | .interrupt_allowed = vmx_interrupt_allowed, | 3698 | .interrupt_allowed = vmx_interrupt_allowed, |
3738 | .set_tss_addr = vmx_set_tss_addr, | 3699 | .set_tss_addr = vmx_set_tss_addr, |
3739 | .get_tdp_level = get_ept_level, | 3700 | .get_tdp_level = get_ept_level, |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 4c2eb7c0e1fb..a84c96a7ea5e 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -3173,7 +3173,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
3173 | if (vcpu->arch.exception.pending) | 3173 | if (vcpu->arch.exception.pending) |
3174 | __queue_exception(vcpu); | 3174 | __queue_exception(vcpu); |
3175 | else if (irqchip_in_kernel(vcpu->kvm)) | 3175 | else if (irqchip_in_kernel(vcpu->kvm)) |
3176 | kvm_x86_ops->inject_pending_irq(vcpu); | 3176 | kvm_x86_ops->inject_pending_irq(vcpu, kvm_run); |
3177 | else | 3177 | else |
3178 | kvm_x86_ops->inject_pending_vectors(vcpu, kvm_run); | 3178 | kvm_x86_ops->inject_pending_vectors(vcpu, kvm_run); |
3179 | 3179 | ||