aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/vmx.c
diff options
context:
space:
mode:
authorGleb Natapov <gleb@redhat.com>2009-04-21 10:45:07 -0400
committerAvi Kivity <avi@redhat.com>2009-06-10 04:48:48 -0400
commitc4282df98ae0993983924c00ed76428a6609d68b (patch)
tree8f09653f40a996fd0f5070d8e060aa6129eec096 /arch/x86/kvm/vmx.c
parent0a5fff192388d2a74aa9ab5e0d394b745df9f225 (diff)
KVM: Get rid of arch.interrupt_window_open & arch.nmi_window_open
They are recalculated before each use anyway. Signed-off-by: Gleb Natapov <gleb@redhat.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm/vmx.c')
-rw-r--r--arch/x86/kvm/vmx.c35
1 files changed, 11 insertions, 24 deletions
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 51f804c8fe79..116eac01a9f0 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -753,7 +753,6 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
753 if (interruptibility & 3) 753 if (interruptibility & 3)
754 vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, 754 vmcs_write32(GUEST_INTERRUPTIBILITY_INFO,
755 interruptibility & ~3); 755 interruptibility & ~3);
756 vcpu->arch.interrupt_window_open = 1;
757} 756}
758 757
759static void vmx_queue_exception(struct kvm_vcpu *vcpu, unsigned nr, 758static void vmx_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
@@ -2482,27 +2481,21 @@ static void vmx_inject_nmi(struct kvm_vcpu *vcpu)
2482 INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK | NMI_VECTOR); 2481 INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK | NMI_VECTOR);
2483} 2482}
2484 2483
2485static void vmx_update_window_states(struct kvm_vcpu *vcpu) 2484static int vmx_nmi_allowed(struct kvm_vcpu *vcpu)
2486{ 2485{
2487 u32 guest_intr = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
2488
2489 vcpu->arch.nmi_window_open =
2490 !(guest_intr & (GUEST_INTR_STATE_STI |
2491 GUEST_INTR_STATE_MOV_SS |
2492 GUEST_INTR_STATE_NMI));
2493 if (!cpu_has_virtual_nmis() && to_vmx(vcpu)->soft_vnmi_blocked) 2486 if (!cpu_has_virtual_nmis() && to_vmx(vcpu)->soft_vnmi_blocked)
2494 vcpu->arch.nmi_window_open = 0; 2487 return 0;
2495 2488
2496 vcpu->arch.interrupt_window_open = 2489 return !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) &
2497 ((vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) && 2490 (GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS |
2498 !(guest_intr & (GUEST_INTR_STATE_STI | 2491 GUEST_INTR_STATE_NMI));
2499 GUEST_INTR_STATE_MOV_SS)));
2500} 2492}
2501 2493
2502static int vmx_interrupt_allowed(struct kvm_vcpu *vcpu) 2494static int vmx_interrupt_allowed(struct kvm_vcpu *vcpu)
2503{ 2495{
2504 vmx_update_window_states(vcpu); 2496 return (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) &&
2505 return vcpu->arch.interrupt_window_open; 2497 !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) &
2498 (GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS));
2506} 2499}
2507 2500
2508static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr) 2501static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr)
@@ -3194,9 +3187,8 @@ static int vmx_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
3194 __func__, vectoring_info, exit_reason); 3187 __func__, vectoring_info, exit_reason);
3195 3188
3196 if (unlikely(!cpu_has_virtual_nmis() && vmx->soft_vnmi_blocked)) { 3189 if (unlikely(!cpu_has_virtual_nmis() && vmx->soft_vnmi_blocked)) {
3197 if (vcpu->arch.interrupt_window_open) { 3190 if (vmx_interrupt_allowed(vcpu)) {
3198 vmx->soft_vnmi_blocked = 0; 3191 vmx->soft_vnmi_blocked = 0;
3199 vcpu->arch.nmi_window_open = 1;
3200 } else if (vmx->vnmi_blocked_time > 1000000000LL && 3192 } else if (vmx->vnmi_blocked_time > 1000000000LL &&
3201 vcpu->arch.nmi_pending) { 3193 vcpu->arch.nmi_pending) {
3202 /* 3194 /*
@@ -3209,7 +3201,6 @@ static int vmx_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
3209 "state on VCPU %d after 1 s timeout\n", 3201 "state on VCPU %d after 1 s timeout\n",
3210 __func__, vcpu->vcpu_id); 3202 __func__, vcpu->vcpu_id);
3211 vmx->soft_vnmi_blocked = 0; 3203 vmx->soft_vnmi_blocked = 0;
3212 vmx->vcpu.arch.nmi_window_open = 1;
3213 } 3204 }
3214 } 3205 }
3215 3206
@@ -3324,13 +3315,13 @@ static void vmx_intr_inject(struct kvm_vcpu *vcpu)
3324 3315
3325 /* try to inject new event if pending */ 3316 /* try to inject new event if pending */
3326 if (vcpu->arch.nmi_pending) { 3317 if (vcpu->arch.nmi_pending) {
3327 if (vcpu->arch.nmi_window_open) { 3318 if (vmx_nmi_allowed(vcpu)) {
3328 vcpu->arch.nmi_pending = false; 3319 vcpu->arch.nmi_pending = false;
3329 vcpu->arch.nmi_injected = true; 3320 vcpu->arch.nmi_injected = true;
3330 vmx_inject_nmi(vcpu); 3321 vmx_inject_nmi(vcpu);
3331 } 3322 }
3332 } else if (kvm_cpu_has_interrupt(vcpu)) { 3323 } else if (kvm_cpu_has_interrupt(vcpu)) {
3333 if (vcpu->arch.interrupt_window_open) { 3324 if (vmx_interrupt_allowed(vcpu)) {
3334 kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu)); 3325 kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu));
3335 vmx_inject_irq(vcpu, vcpu->arch.interrupt.nr); 3326 vmx_inject_irq(vcpu, vcpu->arch.interrupt.nr);
3336 } 3327 }
@@ -3344,8 +3335,6 @@ static void vmx_intr_assist(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3344 3335
3345 update_tpr_threshold(vcpu); 3336 update_tpr_threshold(vcpu);
3346 3337
3347 vmx_update_window_states(vcpu);
3348
3349 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) 3338 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
3350 vmcs_clear_bits(GUEST_INTERRUPTIBILITY_INFO, 3339 vmcs_clear_bits(GUEST_INTERRUPTIBILITY_INFO,
3351 GUEST_INTR_STATE_STI | 3340 GUEST_INTR_STATE_STI |
@@ -3518,8 +3507,6 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3518 if (vmx->rmode.irq.pending) 3507 if (vmx->rmode.irq.pending)
3519 fixup_rmode_irq(vmx); 3508 fixup_rmode_irq(vmx);
3520 3509
3521 vmx_update_window_states(vcpu);
3522
3523 asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS)); 3510 asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
3524 vmx->launched = 1; 3511 vmx->launched = 1;
3525 3512