diff options
author | Marcelo Tosatti <mtosatti@redhat.com> | 2008-09-08 14:23:48 -0400 |
---|---|---|
committer | Avi Kivity <avi@qumranet.com> | 2008-10-15 04:15:26 -0400 |
commit | d76901750ab9f71091d33ef3d2b5909d8a9a4ad4 (patch) | |
tree | e4a7ac912c70a05e4c8ee4e7294d9add48383fc2 | |
parent | a6a3034cb979b1fa3948d8e1e91b2387fc66b89b (diff) |
KVM: x86: do not execute halted vcpus
Offline or uninitialized vcpu's can be executed if requested to perform
userspace work.
Follow Avi's suggestion to handle halted vcpu's in the main loop,
simplifying kvm_emulate_halt(). Introduce a new vcpu->requests bit to
indicate events that promote state from halted to running.
Also standardize vcpu wake sites.
Signed-off-by: Marcelo Tosatti <mtosatti <at> redhat.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
-rw-r--r-- | arch/x86/kvm/i8254.c | 5 | ||||
-rw-r--r-- | arch/x86/kvm/lapic.c | 16 | ||||
-rw-r--r-- | arch/x86/kvm/x86.c | 100 | ||||
-rw-r--r-- | include/linux/kvm_host.h | 1 | ||||
-rw-r--r-- | virt/kvm/kvm_main.c | 10 |
5 files changed, 67 insertions, 65 deletions
diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c index 4cb443026ec4..634132a9a512 100644 --- a/arch/x86/kvm/i8254.c +++ b/arch/x86/kvm/i8254.c | |||
@@ -200,10 +200,9 @@ static int __pit_timer_fn(struct kvm_kpit_state *ps) | |||
200 | 200 | ||
201 | if (!atomic_inc_and_test(&pt->pending)) | 201 | if (!atomic_inc_and_test(&pt->pending)) |
202 | set_bit(KVM_REQ_PENDING_TIMER, &vcpu0->requests); | 202 | set_bit(KVM_REQ_PENDING_TIMER, &vcpu0->requests); |
203 | if (vcpu0 && waitqueue_active(&vcpu0->wq)) { | 203 | |
204 | vcpu0->arch.mp_state = KVM_MP_STATE_RUNNABLE; | 204 | if (vcpu0 && waitqueue_active(&vcpu0->wq)) |
205 | wake_up_interruptible(&vcpu0->wq); | 205 | wake_up_interruptible(&vcpu0->wq); |
206 | } | ||
207 | 206 | ||
208 | pt->timer.expires = ktime_add_ns(pt->timer.expires, pt->period); | 207 | pt->timer.expires = ktime_add_ns(pt->timer.expires, pt->period); |
209 | pt->scheduled = ktime_to_ns(pt->timer.expires); | 208 | pt->scheduled = ktime_to_ns(pt->timer.expires); |
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index be94f93a73f6..fd00f698692f 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c | |||
@@ -339,13 +339,7 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode, | |||
339 | } else | 339 | } else |
340 | apic_clear_vector(vector, apic->regs + APIC_TMR); | 340 | apic_clear_vector(vector, apic->regs + APIC_TMR); |
341 | 341 | ||
342 | if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE) | 342 | kvm_vcpu_kick(vcpu); |
343 | kvm_vcpu_kick(vcpu); | ||
344 | else if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED) { | ||
345 | vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; | ||
346 | if (waitqueue_active(&vcpu->wq)) | ||
347 | wake_up_interruptible(&vcpu->wq); | ||
348 | } | ||
349 | 343 | ||
350 | result = (orig_irr == 0); | 344 | result = (orig_irr == 0); |
351 | break; | 345 | break; |
@@ -384,8 +378,7 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode, | |||
384 | if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) { | 378 | if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) { |
385 | vcpu->arch.sipi_vector = vector; | 379 | vcpu->arch.sipi_vector = vector; |
386 | vcpu->arch.mp_state = KVM_MP_STATE_SIPI_RECEIVED; | 380 | vcpu->arch.mp_state = KVM_MP_STATE_SIPI_RECEIVED; |
387 | if (waitqueue_active(&vcpu->wq)) | 381 | kvm_vcpu_kick(vcpu); |
388 | wake_up_interruptible(&vcpu->wq); | ||
389 | } | 382 | } |
390 | break; | 383 | break; |
391 | 384 | ||
@@ -950,10 +943,9 @@ static int __apic_timer_fn(struct kvm_lapic *apic) | |||
950 | 943 | ||
951 | if(!atomic_inc_and_test(&apic->timer.pending)) | 944 | if(!atomic_inc_and_test(&apic->timer.pending)) |
952 | set_bit(KVM_REQ_PENDING_TIMER, &apic->vcpu->requests); | 945 | set_bit(KVM_REQ_PENDING_TIMER, &apic->vcpu->requests); |
953 | if (waitqueue_active(q)) { | 946 | if (waitqueue_active(q)) |
954 | apic->vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; | ||
955 | wake_up_interruptible(q); | 947 | wake_up_interruptible(q); |
956 | } | 948 | |
957 | if (apic_lvtt_period(apic)) { | 949 | if (apic_lvtt_period(apic)) { |
958 | result = 1; | 950 | result = 1; |
959 | apic->timer.dev.expires = ktime_add_ns( | 951 | apic->timer.dev.expires = ktime_add_ns( |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 3f3cb7107c03..bf98d40b21ec 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -2798,11 +2798,6 @@ int kvm_emulate_halt(struct kvm_vcpu *vcpu) | |||
2798 | KVMTRACE_0D(HLT, vcpu, handler); | 2798 | KVMTRACE_0D(HLT, vcpu, handler); |
2799 | if (irqchip_in_kernel(vcpu->kvm)) { | 2799 | if (irqchip_in_kernel(vcpu->kvm)) { |
2800 | vcpu->arch.mp_state = KVM_MP_STATE_HALTED; | 2800 | vcpu->arch.mp_state = KVM_MP_STATE_HALTED; |
2801 | up_read(&vcpu->kvm->slots_lock); | ||
2802 | kvm_vcpu_block(vcpu); | ||
2803 | down_read(&vcpu->kvm->slots_lock); | ||
2804 | if (vcpu->arch.mp_state != KVM_MP_STATE_RUNNABLE) | ||
2805 | return -EINTR; | ||
2806 | return 1; | 2801 | return 1; |
2807 | } else { | 2802 | } else { |
2808 | vcpu->run->exit_reason = KVM_EXIT_HLT; | 2803 | vcpu->run->exit_reason = KVM_EXIT_HLT; |
@@ -3097,24 +3092,10 @@ static void vapic_exit(struct kvm_vcpu *vcpu) | |||
3097 | up_read(&vcpu->kvm->slots_lock); | 3092 | up_read(&vcpu->kvm->slots_lock); |
3098 | } | 3093 | } |
3099 | 3094 | ||
3100 | static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | 3095 | static int vcpu_enter_guest(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) |
3101 | { | 3096 | { |
3102 | int r; | 3097 | int r; |
3103 | 3098 | ||
3104 | if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED)) { | ||
3105 | pr_debug("vcpu %d received sipi with vector # %x\n", | ||
3106 | vcpu->vcpu_id, vcpu->arch.sipi_vector); | ||
3107 | kvm_lapic_reset(vcpu); | ||
3108 | r = kvm_x86_ops->vcpu_reset(vcpu); | ||
3109 | if (r) | ||
3110 | return r; | ||
3111 | vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; | ||
3112 | } | ||
3113 | |||
3114 | down_read(&vcpu->kvm->slots_lock); | ||
3115 | vapic_enter(vcpu); | ||
3116 | |||
3117 | again: | ||
3118 | if (vcpu->requests) | 3099 | if (vcpu->requests) |
3119 | if (test_and_clear_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests)) | 3100 | if (test_and_clear_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests)) |
3120 | kvm_mmu_unload(vcpu); | 3101 | kvm_mmu_unload(vcpu); |
@@ -3151,22 +3132,13 @@ again: | |||
3151 | 3132 | ||
3152 | local_irq_disable(); | 3133 | local_irq_disable(); |
3153 | 3134 | ||
3154 | if (vcpu->requests || need_resched()) { | 3135 | if (vcpu->requests || need_resched() || signal_pending(current)) { |
3155 | local_irq_enable(); | 3136 | local_irq_enable(); |
3156 | preempt_enable(); | 3137 | preempt_enable(); |
3157 | r = 1; | 3138 | r = 1; |
3158 | goto out; | 3139 | goto out; |
3159 | } | 3140 | } |
3160 | 3141 | ||
3161 | if (signal_pending(current)) { | ||
3162 | local_irq_enable(); | ||
3163 | preempt_enable(); | ||
3164 | r = -EINTR; | ||
3165 | kvm_run->exit_reason = KVM_EXIT_INTR; | ||
3166 | ++vcpu->stat.signal_exits; | ||
3167 | goto out; | ||
3168 | } | ||
3169 | |||
3170 | if (vcpu->guest_debug.enabled) | 3142 | if (vcpu->guest_debug.enabled) |
3171 | kvm_x86_ops->guest_debug_pre(vcpu); | 3143 | kvm_x86_ops->guest_debug_pre(vcpu); |
3172 | 3144 | ||
@@ -3227,26 +3199,63 @@ again: | |||
3227 | kvm_lapic_sync_from_vapic(vcpu); | 3199 | kvm_lapic_sync_from_vapic(vcpu); |
3228 | 3200 | ||
3229 | r = kvm_x86_ops->handle_exit(kvm_run, vcpu); | 3201 | r = kvm_x86_ops->handle_exit(kvm_run, vcpu); |
3202 | out: | ||
3203 | return r; | ||
3204 | } | ||
3230 | 3205 | ||
3231 | if (r > 0) { | 3206 | static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) |
3232 | if (dm_request_for_irq_injection(vcpu, kvm_run)) { | 3207 | { |
3233 | r = -EINTR; | 3208 | int r; |
3234 | kvm_run->exit_reason = KVM_EXIT_INTR; | 3209 | |
3235 | ++vcpu->stat.request_irq_exits; | 3210 | if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED)) { |
3236 | goto out; | 3211 | printk("vcpu %d received sipi with vector # %x\n", |
3237 | } | 3212 | vcpu->vcpu_id, vcpu->arch.sipi_vector); |
3238 | if (!need_resched()) | 3213 | kvm_lapic_reset(vcpu); |
3239 | goto again; | 3214 | r = kvm_x86_ops->vcpu_reset(vcpu); |
3215 | if (r) | ||
3216 | return r; | ||
3217 | vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; | ||
3240 | } | 3218 | } |
3241 | 3219 | ||
3242 | out: | 3220 | down_read(&vcpu->kvm->slots_lock); |
3243 | up_read(&vcpu->kvm->slots_lock); | 3221 | vapic_enter(vcpu); |
3244 | if (r > 0) { | 3222 | |
3245 | kvm_resched(vcpu); | 3223 | r = 1; |
3246 | down_read(&vcpu->kvm->slots_lock); | 3224 | while (r > 0) { |
3247 | goto again; | 3225 | if (kvm_arch_vcpu_runnable(vcpu)) |
3226 | r = vcpu_enter_guest(vcpu, kvm_run); | ||
3227 | else { | ||
3228 | up_read(&vcpu->kvm->slots_lock); | ||
3229 | kvm_vcpu_block(vcpu); | ||
3230 | down_read(&vcpu->kvm->slots_lock); | ||
3231 | if (test_and_clear_bit(KVM_REQ_UNHALT, &vcpu->requests)) | ||
3232 | if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED) | ||
3233 | vcpu->arch.mp_state = | ||
3234 | KVM_MP_STATE_RUNNABLE; | ||
3235 | if (vcpu->arch.mp_state != KVM_MP_STATE_RUNNABLE) | ||
3236 | r = -EINTR; | ||
3237 | } | ||
3238 | |||
3239 | if (r > 0) { | ||
3240 | if (dm_request_for_irq_injection(vcpu, kvm_run)) { | ||
3241 | r = -EINTR; | ||
3242 | kvm_run->exit_reason = KVM_EXIT_INTR; | ||
3243 | ++vcpu->stat.request_irq_exits; | ||
3244 | } | ||
3245 | if (signal_pending(current)) { | ||
3246 | r = -EINTR; | ||
3247 | kvm_run->exit_reason = KVM_EXIT_INTR; | ||
3248 | ++vcpu->stat.signal_exits; | ||
3249 | } | ||
3250 | if (need_resched()) { | ||
3251 | up_read(&vcpu->kvm->slots_lock); | ||
3252 | kvm_resched(vcpu); | ||
3253 | down_read(&vcpu->kvm->slots_lock); | ||
3254 | } | ||
3255 | } | ||
3248 | } | 3256 | } |
3249 | 3257 | ||
3258 | up_read(&vcpu->kvm->slots_lock); | ||
3250 | post_kvm_run_save(vcpu, kvm_run); | 3259 | post_kvm_run_save(vcpu, kvm_run); |
3251 | 3260 | ||
3252 | vapic_exit(vcpu); | 3261 | vapic_exit(vcpu); |
@@ -3266,6 +3275,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
3266 | 3275 | ||
3267 | if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) { | 3276 | if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) { |
3268 | kvm_vcpu_block(vcpu); | 3277 | kvm_vcpu_block(vcpu); |
3278 | clear_bit(KVM_REQ_UNHALT, &vcpu->requests); | ||
3269 | r = -EAGAIN; | 3279 | r = -EAGAIN; |
3270 | goto out; | 3280 | goto out; |
3271 | } | 3281 | } |
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index a18aaad2ab79..4b036430ea23 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h | |||
@@ -34,6 +34,7 @@ | |||
34 | #define KVM_REQ_MMU_RELOAD 3 | 34 | #define KVM_REQ_MMU_RELOAD 3 |
35 | #define KVM_REQ_TRIPLE_FAULT 4 | 35 | #define KVM_REQ_TRIPLE_FAULT 4 |
36 | #define KVM_REQ_PENDING_TIMER 5 | 36 | #define KVM_REQ_PENDING_TIMER 5 |
37 | #define KVM_REQ_UNHALT 6 | ||
37 | 38 | ||
38 | struct kvm_vcpu; | 39 | struct kvm_vcpu; |
39 | extern struct kmem_cache *kvm_vcpu_cache; | 40 | extern struct kmem_cache *kvm_vcpu_cache; |
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index de3b029f6adf..63e661be040a 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c | |||
@@ -980,12 +980,12 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu) | |||
980 | for (;;) { | 980 | for (;;) { |
981 | prepare_to_wait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE); | 981 | prepare_to_wait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE); |
982 | 982 | ||
983 | if (kvm_cpu_has_interrupt(vcpu)) | 983 | if (kvm_cpu_has_interrupt(vcpu) || |
984 | break; | 984 | kvm_cpu_has_pending_timer(vcpu) || |
985 | if (kvm_cpu_has_pending_timer(vcpu)) | 985 | kvm_arch_vcpu_runnable(vcpu)) { |
986 | break; | 986 | set_bit(KVM_REQ_UNHALT, &vcpu->requests); |
987 | if (kvm_arch_vcpu_runnable(vcpu)) | ||
988 | break; | 987 | break; |
988 | } | ||
989 | if (signal_pending(current)) | 989 | if (signal_pending(current)) |
990 | break; | 990 | break; |
991 | 991 | ||