aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/x86.c
diff options
context:
space:
mode:
authorGleb Natapov <gleb@redhat.com>2009-03-23 09:11:44 -0400
committerAvi Kivity <avi@redhat.com>2009-06-10 04:48:33 -0400
commit09cec754885f900f6aab23801878c0cd217ee1d6 (patch)
tree3f634b6993af33914b031421e23db67744b84a9f /arch/x86/kvm/x86.c
parent089d034e0c4538d2436512fa64782b91008d4a7c (diff)
KVM: Timer event should not unconditionally unhalt vcpu.
Currently timer events are processed before entering guest mode. Move it to main vcpu event loop since timer events should be processed even while vcpu is halted. Timer may cause interrupt/nmi to be injected and only then vcpu will be unhalted. Signed-off-by: Gleb Natapov <gleb@redhat.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm/x86.c')
-rw-r--r--arch/x86/kvm/x86.c57
1 files changed, 34 insertions, 23 deletions
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index c0ae5e6cba9b..8fca7a4e95a3 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -3133,9 +3133,6 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3133 } 3133 }
3134 } 3134 }
3135 3135
3136 clear_bit(KVM_REQ_PENDING_TIMER, &vcpu->requests);
3137 kvm_inject_pending_timer_irqs(vcpu);
3138
3139 preempt_disable(); 3136 preempt_disable();
3140 3137
3141 kvm_x86_ops->prepare_guest_switch(vcpu); 3138 kvm_x86_ops->prepare_guest_switch(vcpu);
@@ -3235,6 +3232,7 @@ out:
3235 return r; 3232 return r;
3236} 3233}
3237 3234
3235
3238static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 3236static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3239{ 3237{
3240 int r; 3238 int r;
@@ -3261,29 +3259,42 @@ static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3261 kvm_vcpu_block(vcpu); 3259 kvm_vcpu_block(vcpu);
3262 down_read(&vcpu->kvm->slots_lock); 3260 down_read(&vcpu->kvm->slots_lock);
3263 if (test_and_clear_bit(KVM_REQ_UNHALT, &vcpu->requests)) 3261 if (test_and_clear_bit(KVM_REQ_UNHALT, &vcpu->requests))
3264 if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED) 3262 {
3263 switch(vcpu->arch.mp_state) {
3264 case KVM_MP_STATE_HALTED:
3265 vcpu->arch.mp_state = 3265 vcpu->arch.mp_state =
3266 KVM_MP_STATE_RUNNABLE; 3266 KVM_MP_STATE_RUNNABLE;
3267 if (vcpu->arch.mp_state != KVM_MP_STATE_RUNNABLE) 3267 case KVM_MP_STATE_RUNNABLE:
3268 r = -EINTR; 3268 break;
3269 case KVM_MP_STATE_SIPI_RECEIVED:
3270 default:
3271 r = -EINTR;
3272 break;
3273 }
3274 }
3269 } 3275 }
3270 3276
3271 if (r > 0) { 3277 if (r <= 0)
3272 if (dm_request_for_irq_injection(vcpu, kvm_run)) { 3278 break;
3273 r = -EINTR; 3279
3274 kvm_run->exit_reason = KVM_EXIT_INTR; 3280 clear_bit(KVM_REQ_PENDING_TIMER, &vcpu->requests);
3275 ++vcpu->stat.request_irq_exits; 3281 if (kvm_cpu_has_pending_timer(vcpu))
3276 } 3282 kvm_inject_pending_timer_irqs(vcpu);
3277 if (signal_pending(current)) { 3283
3278 r = -EINTR; 3284 if (dm_request_for_irq_injection(vcpu, kvm_run)) {
3279 kvm_run->exit_reason = KVM_EXIT_INTR; 3285 r = -EINTR;
3280 ++vcpu->stat.signal_exits; 3286 kvm_run->exit_reason = KVM_EXIT_INTR;
3281 } 3287 ++vcpu->stat.request_irq_exits;
3282 if (need_resched()) { 3288 }
3283 up_read(&vcpu->kvm->slots_lock); 3289 if (signal_pending(current)) {
3284 kvm_resched(vcpu); 3290 r = -EINTR;
3285 down_read(&vcpu->kvm->slots_lock); 3291 kvm_run->exit_reason = KVM_EXIT_INTR;
3286 } 3292 ++vcpu->stat.signal_exits;
3293 }
3294 if (need_resched()) {
3295 up_read(&vcpu->kvm->slots_lock);
3296 kvm_resched(vcpu);
3297 down_read(&vcpu->kvm->slots_lock);
3287 } 3298 }
3288 } 3299 }
3289 3300