diff options
author | Gleb Natapov <gleb@redhat.com> | 2009-04-21 10:45:08 -0400 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2009-06-10 04:48:48 -0400 |
commit | 95ba82731374eb1c2af4dd442526c4b314f0e8b6 (patch) | |
tree | a8b8e23285686761694ee214c6de85e83f52652b /arch/x86/kvm/vmx.c | |
parent | c4282df98ae0993983924c00ed76428a6609d68b (diff) |
KVM: SVM: Add NMI injection support
Signed-off-by: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm/vmx.c')
-rw-r--r-- | arch/x86/kvm/vmx.c | 79 |
1 files changed, 19 insertions, 60 deletions
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 116eac01a9f0..bad2413fbd51 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
@@ -1314,6 +1314,9 @@ static __init int hardware_setup(void) | |||
1314 | if (!cpu_has_vmx_flexpriority()) | 1314 | if (!cpu_has_vmx_flexpriority()) |
1315 | flexpriority_enabled = 0; | 1315 | flexpriority_enabled = 0; |
1316 | 1316 | ||
1317 | if (!cpu_has_vmx_tpr_shadow()) | ||
1318 | kvm_x86_ops->update_cr8_intercept = NULL; | ||
1319 | |||
1317 | return alloc_kvm_area(); | 1320 | return alloc_kvm_area(); |
1318 | } | 1321 | } |
1319 | 1322 | ||
@@ -2404,6 +2407,12 @@ out: | |||
2404 | return ret; | 2407 | return ret; |
2405 | } | 2408 | } |
2406 | 2409 | ||
2410 | void vmx_drop_interrupt_shadow(struct kvm_vcpu *vcpu) | ||
2411 | { | ||
2412 | vmcs_clear_bits(GUEST_INTERRUPTIBILITY_INFO, | ||
2413 | GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS); | ||
2414 | } | ||
2415 | |||
2407 | static void enable_irq_window(struct kvm_vcpu *vcpu) | 2416 | static void enable_irq_window(struct kvm_vcpu *vcpu) |
2408 | { | 2417 | { |
2409 | u32 cpu_based_vm_exec_control; | 2418 | u32 cpu_based_vm_exec_control; |
@@ -3214,21 +3223,14 @@ static int vmx_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) | |||
3214 | return 0; | 3223 | return 0; |
3215 | } | 3224 | } |
3216 | 3225 | ||
3217 | static void update_tpr_threshold(struct kvm_vcpu *vcpu) | 3226 | static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr) |
3218 | { | 3227 | { |
3219 | int max_irr, tpr; | 3228 | if (irr == -1 || tpr < irr) { |
3220 | |||
3221 | if (!vm_need_tpr_shadow(vcpu->kvm)) | ||
3222 | return; | ||
3223 | |||
3224 | if (!kvm_lapic_enabled(vcpu) || | ||
3225 | ((max_irr = kvm_lapic_find_highest_irr(vcpu)) == -1)) { | ||
3226 | vmcs_write32(TPR_THRESHOLD, 0); | 3229 | vmcs_write32(TPR_THRESHOLD, 0); |
3227 | return; | 3230 | return; |
3228 | } | 3231 | } |
3229 | 3232 | ||
3230 | tpr = (kvm_lapic_get_cr8(vcpu) & 0x0f) << 4; | 3233 | vmcs_write32(TPR_THRESHOLD, irr); |
3231 | vmcs_write32(TPR_THRESHOLD, (max_irr > tpr) ? tpr >> 4 : max_irr >> 4); | ||
3232 | } | 3234 | } |
3233 | 3235 | ||
3234 | static void vmx_complete_interrupts(struct vcpu_vmx *vmx) | 3236 | static void vmx_complete_interrupts(struct vcpu_vmx *vmx) |
@@ -3300,55 +3302,6 @@ static void vmx_complete_interrupts(struct vcpu_vmx *vmx) | |||
3300 | } | 3302 | } |
3301 | } | 3303 | } |
3302 | 3304 | ||
3303 | static void vmx_intr_inject(struct kvm_vcpu *vcpu) | ||
3304 | { | ||
3305 | /* try to reinject previous events if any */ | ||
3306 | if (vcpu->arch.nmi_injected) { | ||
3307 | vmx_inject_nmi(vcpu); | ||
3308 | return; | ||
3309 | } | ||
3310 | |||
3311 | if (vcpu->arch.interrupt.pending) { | ||
3312 | vmx_inject_irq(vcpu, vcpu->arch.interrupt.nr); | ||
3313 | return; | ||
3314 | } | ||
3315 | |||
3316 | /* try to inject new event if pending */ | ||
3317 | if (vcpu->arch.nmi_pending) { | ||
3318 | if (vmx_nmi_allowed(vcpu)) { | ||
3319 | vcpu->arch.nmi_pending = false; | ||
3320 | vcpu->arch.nmi_injected = true; | ||
3321 | vmx_inject_nmi(vcpu); | ||
3322 | } | ||
3323 | } else if (kvm_cpu_has_interrupt(vcpu)) { | ||
3324 | if (vmx_interrupt_allowed(vcpu)) { | ||
3325 | kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu)); | ||
3326 | vmx_inject_irq(vcpu, vcpu->arch.interrupt.nr); | ||
3327 | } | ||
3328 | } | ||
3329 | } | ||
3330 | |||
3331 | static void vmx_intr_assist(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | ||
3332 | { | ||
3333 | bool req_int_win = !irqchip_in_kernel(vcpu->kvm) && | ||
3334 | kvm_run->request_interrupt_window; | ||
3335 | |||
3336 | update_tpr_threshold(vcpu); | ||
3337 | |||
3338 | if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) | ||
3339 | vmcs_clear_bits(GUEST_INTERRUPTIBILITY_INFO, | ||
3340 | GUEST_INTR_STATE_STI | | ||
3341 | GUEST_INTR_STATE_MOV_SS); | ||
3342 | |||
3343 | vmx_intr_inject(vcpu); | ||
3344 | |||
3345 | /* enable NMI/IRQ window open exits if needed */ | ||
3346 | if (vcpu->arch.nmi_pending) | ||
3347 | enable_nmi_window(vcpu); | ||
3348 | else if (kvm_cpu_has_interrupt(vcpu) || req_int_win) | ||
3349 | enable_irq_window(vcpu); | ||
3350 | } | ||
3351 | |||
3352 | /* | 3305 | /* |
3353 | * Failure to inject an interrupt should give us the information | 3306 | * Failure to inject an interrupt should give us the information |
3354 | * in IDT_VECTORING_INFO_FIELD. However, if the failure occurs | 3307 | * in IDT_VECTORING_INFO_FIELD. However, if the failure occurs |
@@ -3683,9 +3636,15 @@ static struct kvm_x86_ops vmx_x86_ops = { | |||
3683 | .patch_hypercall = vmx_patch_hypercall, | 3636 | .patch_hypercall = vmx_patch_hypercall, |
3684 | .get_irq = vmx_get_irq, | 3637 | .get_irq = vmx_get_irq, |
3685 | .set_irq = vmx_inject_irq, | 3638 | .set_irq = vmx_inject_irq, |
3639 | .set_nmi = vmx_inject_nmi, | ||
3686 | .queue_exception = vmx_queue_exception, | 3640 | .queue_exception = vmx_queue_exception, |
3687 | .inject_pending_irq = vmx_intr_assist, | ||
3688 | .interrupt_allowed = vmx_interrupt_allowed, | 3641 | .interrupt_allowed = vmx_interrupt_allowed, |
3642 | .nmi_allowed = vmx_nmi_allowed, | ||
3643 | .enable_nmi_window = enable_nmi_window, | ||
3644 | .enable_irq_window = enable_irq_window, | ||
3645 | .update_cr8_intercept = update_cr8_intercept, | ||
3646 | .drop_interrupt_shadow = vmx_drop_interrupt_shadow, | ||
3647 | |||
3689 | .set_tss_addr = vmx_set_tss_addr, | 3648 | .set_tss_addr = vmx_set_tss_addr, |
3690 | .get_tdp_level = get_ept_level, | 3649 | .get_tdp_level = get_ept_level, |
3691 | .get_mt_mask_shift = vmx_get_mt_mask_shift, | 3650 | .get_mt_mask_shift = vmx_get_mt_mask_shift, |