diff options
author | Gleb Natapov <gleb@redhat.com> | 2009-04-21 10:45:07 -0400 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2009-06-10 04:48:48 -0400 |
commit | c4282df98ae0993983924c00ed76428a6609d68b (patch) | |
tree | 8f09653f40a996fd0f5070d8e060aa6129eec096 | |
parent | 0a5fff192388d2a74aa9ab5e0d394b745df9f225 (diff) |
KVM: Get rid of arch.interrupt_window_open & arch.nmi_window_open
They are recalculated before each use anyway.
Signed-off-by: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
-rw-r--r-- | arch/x86/include/asm/kvm_host.h | 2 | ||||
-rw-r--r-- | arch/x86/kvm/svm.c | 6 | ||||
-rw-r--r-- | arch/x86/kvm/vmx.c | 35 |
3 files changed, 12 insertions, 31 deletions
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index aa5a54eb4da4..53533ea17555 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h | |||
@@ -266,7 +266,6 @@ struct kvm_mmu { | |||
266 | 266 | ||
267 | struct kvm_vcpu_arch { | 267 | struct kvm_vcpu_arch { |
268 | u64 host_tsc; | 268 | u64 host_tsc; |
269 | int interrupt_window_open; | ||
270 | unsigned long irq_summary; /* bit vector: 1 per word in irq_pending */ | 269 | unsigned long irq_summary; /* bit vector: 1 per word in irq_pending */ |
271 | DECLARE_BITMAP(irq_pending, KVM_NR_INTERRUPTS); | 270 | DECLARE_BITMAP(irq_pending, KVM_NR_INTERRUPTS); |
272 | /* | 271 | /* |
@@ -360,7 +359,6 @@ struct kvm_vcpu_arch { | |||
360 | 359 | ||
361 | bool nmi_pending; | 360 | bool nmi_pending; |
362 | bool nmi_injected; | 361 | bool nmi_injected; |
363 | bool nmi_window_open; | ||
364 | 362 | ||
365 | struct mtrr_state_type mtrr_state; | 363 | struct mtrr_state_type mtrr_state; |
366 | u32 pat; | 364 | u32 pat; |
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index e283a63b2bca..0f53439296b9 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c | |||
@@ -216,8 +216,6 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu) | |||
216 | 216 | ||
217 | kvm_rip_write(vcpu, svm->next_rip); | 217 | kvm_rip_write(vcpu, svm->next_rip); |
218 | svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK; | 218 | svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK; |
219 | |||
220 | vcpu->arch.interrupt_window_open = (svm->vcpu.arch.hflags & HF_GIF_MASK); | ||
221 | } | 219 | } |
222 | 220 | ||
223 | static int has_svm(void) | 221 | static int has_svm(void) |
@@ -2305,7 +2303,7 @@ static void svm_intr_inject(struct kvm_vcpu *vcpu) | |||
2305 | 2303 | ||
2306 | /* try to inject new event if pending */ | 2304 | /* try to inject new event if pending */ |
2307 | if (kvm_cpu_has_interrupt(vcpu)) { | 2305 | if (kvm_cpu_has_interrupt(vcpu)) { |
2308 | if (vcpu->arch.interrupt_window_open) { | 2306 | if (svm_interrupt_allowed(vcpu)) { |
2309 | kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu)); | 2307 | kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu)); |
2310 | svm_queue_irq(to_svm(vcpu), vcpu->arch.interrupt.nr); | 2308 | svm_queue_irq(to_svm(vcpu), vcpu->arch.interrupt.nr); |
2311 | } | 2309 | } |
@@ -2321,8 +2319,6 @@ static void svm_intr_assist(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
2321 | if (nested_svm_intr(svm)) | 2319 | if (nested_svm_intr(svm)) |
2322 | goto out; | 2320 | goto out; |
2323 | 2321 | ||
2324 | svm->vcpu.arch.interrupt_window_open = svm_interrupt_allowed(vcpu); | ||
2325 | |||
2326 | svm_intr_inject(vcpu); | 2322 | svm_intr_inject(vcpu); |
2327 | 2323 | ||
2328 | if (kvm_cpu_has_interrupt(vcpu) || req_int_win) | 2324 | if (kvm_cpu_has_interrupt(vcpu) || req_int_win) |
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 51f804c8fe79..116eac01a9f0 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
@@ -753,7 +753,6 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu) | |||
753 | if (interruptibility & 3) | 753 | if (interruptibility & 3) |
754 | vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, | 754 | vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, |
755 | interruptibility & ~3); | 755 | interruptibility & ~3); |
756 | vcpu->arch.interrupt_window_open = 1; | ||
757 | } | 756 | } |
758 | 757 | ||
759 | static void vmx_queue_exception(struct kvm_vcpu *vcpu, unsigned nr, | 758 | static void vmx_queue_exception(struct kvm_vcpu *vcpu, unsigned nr, |
@@ -2482,27 +2481,21 @@ static void vmx_inject_nmi(struct kvm_vcpu *vcpu) | |||
2482 | INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK | NMI_VECTOR); | 2481 | INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK | NMI_VECTOR); |
2483 | } | 2482 | } |
2484 | 2483 | ||
2485 | static void vmx_update_window_states(struct kvm_vcpu *vcpu) | 2484 | static int vmx_nmi_allowed(struct kvm_vcpu *vcpu) |
2486 | { | 2485 | { |
2487 | u32 guest_intr = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO); | ||
2488 | |||
2489 | vcpu->arch.nmi_window_open = | ||
2490 | !(guest_intr & (GUEST_INTR_STATE_STI | | ||
2491 | GUEST_INTR_STATE_MOV_SS | | ||
2492 | GUEST_INTR_STATE_NMI)); | ||
2493 | if (!cpu_has_virtual_nmis() && to_vmx(vcpu)->soft_vnmi_blocked) | 2486 | if (!cpu_has_virtual_nmis() && to_vmx(vcpu)->soft_vnmi_blocked) |
2494 | vcpu->arch.nmi_window_open = 0; | 2487 | return 0; |
2495 | 2488 | ||
2496 | vcpu->arch.interrupt_window_open = | 2489 | return !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & |
2497 | ((vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) && | 2490 | (GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS | |
2498 | !(guest_intr & (GUEST_INTR_STATE_STI | | 2491 | GUEST_INTR_STATE_NMI)); |
2499 | GUEST_INTR_STATE_MOV_SS))); | ||
2500 | } | 2492 | } |
2501 | 2493 | ||
2502 | static int vmx_interrupt_allowed(struct kvm_vcpu *vcpu) | 2494 | static int vmx_interrupt_allowed(struct kvm_vcpu *vcpu) |
2503 | { | 2495 | { |
2504 | vmx_update_window_states(vcpu); | 2496 | return (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) && |
2505 | return vcpu->arch.interrupt_window_open; | 2497 | !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & |
2498 | (GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS)); | ||
2506 | } | 2499 | } |
2507 | 2500 | ||
2508 | static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr) | 2501 | static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr) |
@@ -3194,9 +3187,8 @@ static int vmx_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) | |||
3194 | __func__, vectoring_info, exit_reason); | 3187 | __func__, vectoring_info, exit_reason); |
3195 | 3188 | ||
3196 | if (unlikely(!cpu_has_virtual_nmis() && vmx->soft_vnmi_blocked)) { | 3189 | if (unlikely(!cpu_has_virtual_nmis() && vmx->soft_vnmi_blocked)) { |
3197 | if (vcpu->arch.interrupt_window_open) { | 3190 | if (vmx_interrupt_allowed(vcpu)) { |
3198 | vmx->soft_vnmi_blocked = 0; | 3191 | vmx->soft_vnmi_blocked = 0; |
3199 | vcpu->arch.nmi_window_open = 1; | ||
3200 | } else if (vmx->vnmi_blocked_time > 1000000000LL && | 3192 | } else if (vmx->vnmi_blocked_time > 1000000000LL && |
3201 | vcpu->arch.nmi_pending) { | 3193 | vcpu->arch.nmi_pending) { |
3202 | /* | 3194 | /* |
@@ -3209,7 +3201,6 @@ static int vmx_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) | |||
3209 | "state on VCPU %d after 1 s timeout\n", | 3201 | "state on VCPU %d after 1 s timeout\n", |
3210 | __func__, vcpu->vcpu_id); | 3202 | __func__, vcpu->vcpu_id); |
3211 | vmx->soft_vnmi_blocked = 0; | 3203 | vmx->soft_vnmi_blocked = 0; |
3212 | vmx->vcpu.arch.nmi_window_open = 1; | ||
3213 | } | 3204 | } |
3214 | } | 3205 | } |
3215 | 3206 | ||
@@ -3324,13 +3315,13 @@ static void vmx_intr_inject(struct kvm_vcpu *vcpu) | |||
3324 | 3315 | ||
3325 | /* try to inject new event if pending */ | 3316 | /* try to inject new event if pending */ |
3326 | if (vcpu->arch.nmi_pending) { | 3317 | if (vcpu->arch.nmi_pending) { |
3327 | if (vcpu->arch.nmi_window_open) { | 3318 | if (vmx_nmi_allowed(vcpu)) { |
3328 | vcpu->arch.nmi_pending = false; | 3319 | vcpu->arch.nmi_pending = false; |
3329 | vcpu->arch.nmi_injected = true; | 3320 | vcpu->arch.nmi_injected = true; |
3330 | vmx_inject_nmi(vcpu); | 3321 | vmx_inject_nmi(vcpu); |
3331 | } | 3322 | } |
3332 | } else if (kvm_cpu_has_interrupt(vcpu)) { | 3323 | } else if (kvm_cpu_has_interrupt(vcpu)) { |
3333 | if (vcpu->arch.interrupt_window_open) { | 3324 | if (vmx_interrupt_allowed(vcpu)) { |
3334 | kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu)); | 3325 | kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu)); |
3335 | vmx_inject_irq(vcpu, vcpu->arch.interrupt.nr); | 3326 | vmx_inject_irq(vcpu, vcpu->arch.interrupt.nr); |
3336 | } | 3327 | } |
@@ -3344,8 +3335,6 @@ static void vmx_intr_assist(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
3344 | 3335 | ||
3345 | update_tpr_threshold(vcpu); | 3336 | update_tpr_threshold(vcpu); |
3346 | 3337 | ||
3347 | vmx_update_window_states(vcpu); | ||
3348 | |||
3349 | if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) | 3338 | if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) |
3350 | vmcs_clear_bits(GUEST_INTERRUPTIBILITY_INFO, | 3339 | vmcs_clear_bits(GUEST_INTERRUPTIBILITY_INFO, |
3351 | GUEST_INTR_STATE_STI | | 3340 | GUEST_INTR_STATE_STI | |
@@ -3518,8 +3507,6 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
3518 | if (vmx->rmode.irq.pending) | 3507 | if (vmx->rmode.irq.pending) |
3519 | fixup_rmode_irq(vmx); | 3508 | fixup_rmode_irq(vmx); |
3520 | 3509 | ||
3521 | vmx_update_window_states(vcpu); | ||
3522 | |||
3523 | asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS)); | 3510 | asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS)); |
3524 | vmx->launched = 1; | 3511 | vmx->launched = 1; |
3525 | 3512 | ||