diff options
author | Marcelo Tosatti <mtosatti@redhat.com> | 2008-09-08 14:23:48 -0400 |
---|---|---|
committer | Avi Kivity <avi@qumranet.com> | 2008-10-15 04:15:26 -0400 |
commit | d76901750ab9f71091d33ef3d2b5909d8a9a4ad4 (patch) | |
tree | e4a7ac912c70a05e4c8ee4e7294d9add48383fc2 /arch/x86/kvm/x86.c | |
parent | a6a3034cb979b1fa3948d8e1e91b2387fc66b89b (diff) |
KVM: x86: do not execute halted vcpus
Offline or uninitialized vcpu's can be executed if requested to perform
userspace work.
Follow Avi's suggestion to handle halted vcpu's in the main loop,
simplifying kvm_emulate_halt(). Introduce a new vcpu->requests bit to
indicate events that promote state from halted to running.
Also standardize vcpu wake sites.
Signed-off-by: Marcelo Tosatti <mtosatti <at> redhat.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'arch/x86/kvm/x86.c')
-rw-r--r-- | arch/x86/kvm/x86.c | 100 |
1 files changed, 55 insertions, 45 deletions
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 3f3cb7107c03..bf98d40b21ec 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -2798,11 +2798,6 @@ int kvm_emulate_halt(struct kvm_vcpu *vcpu) | |||
2798 | KVMTRACE_0D(HLT, vcpu, handler); | 2798 | KVMTRACE_0D(HLT, vcpu, handler); |
2799 | if (irqchip_in_kernel(vcpu->kvm)) { | 2799 | if (irqchip_in_kernel(vcpu->kvm)) { |
2800 | vcpu->arch.mp_state = KVM_MP_STATE_HALTED; | 2800 | vcpu->arch.mp_state = KVM_MP_STATE_HALTED; |
2801 | up_read(&vcpu->kvm->slots_lock); | ||
2802 | kvm_vcpu_block(vcpu); | ||
2803 | down_read(&vcpu->kvm->slots_lock); | ||
2804 | if (vcpu->arch.mp_state != KVM_MP_STATE_RUNNABLE) | ||
2805 | return -EINTR; | ||
2806 | return 1; | 2801 | return 1; |
2807 | } else { | 2802 | } else { |
2808 | vcpu->run->exit_reason = KVM_EXIT_HLT; | 2803 | vcpu->run->exit_reason = KVM_EXIT_HLT; |
@@ -3097,24 +3092,10 @@ static void vapic_exit(struct kvm_vcpu *vcpu) | |||
3097 | up_read(&vcpu->kvm->slots_lock); | 3092 | up_read(&vcpu->kvm->slots_lock); |
3098 | } | 3093 | } |
3099 | 3094 | ||
3100 | static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | 3095 | static int vcpu_enter_guest(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) |
3101 | { | 3096 | { |
3102 | int r; | 3097 | int r; |
3103 | 3098 | ||
3104 | if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED)) { | ||
3105 | pr_debug("vcpu %d received sipi with vector # %x\n", | ||
3106 | vcpu->vcpu_id, vcpu->arch.sipi_vector); | ||
3107 | kvm_lapic_reset(vcpu); | ||
3108 | r = kvm_x86_ops->vcpu_reset(vcpu); | ||
3109 | if (r) | ||
3110 | return r; | ||
3111 | vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; | ||
3112 | } | ||
3113 | |||
3114 | down_read(&vcpu->kvm->slots_lock); | ||
3115 | vapic_enter(vcpu); | ||
3116 | |||
3117 | again: | ||
3118 | if (vcpu->requests) | 3099 | if (vcpu->requests) |
3119 | if (test_and_clear_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests)) | 3100 | if (test_and_clear_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests)) |
3120 | kvm_mmu_unload(vcpu); | 3101 | kvm_mmu_unload(vcpu); |
@@ -3151,22 +3132,13 @@ again: | |||
3151 | 3132 | ||
3152 | local_irq_disable(); | 3133 | local_irq_disable(); |
3153 | 3134 | ||
3154 | if (vcpu->requests || need_resched()) { | 3135 | if (vcpu->requests || need_resched() || signal_pending(current)) { |
3155 | local_irq_enable(); | 3136 | local_irq_enable(); |
3156 | preempt_enable(); | 3137 | preempt_enable(); |
3157 | r = 1; | 3138 | r = 1; |
3158 | goto out; | 3139 | goto out; |
3159 | } | 3140 | } |
3160 | 3141 | ||
3161 | if (signal_pending(current)) { | ||
3162 | local_irq_enable(); | ||
3163 | preempt_enable(); | ||
3164 | r = -EINTR; | ||
3165 | kvm_run->exit_reason = KVM_EXIT_INTR; | ||
3166 | ++vcpu->stat.signal_exits; | ||
3167 | goto out; | ||
3168 | } | ||
3169 | |||
3170 | if (vcpu->guest_debug.enabled) | 3142 | if (vcpu->guest_debug.enabled) |
3171 | kvm_x86_ops->guest_debug_pre(vcpu); | 3143 | kvm_x86_ops->guest_debug_pre(vcpu); |
3172 | 3144 | ||
@@ -3227,26 +3199,63 @@ again: | |||
3227 | kvm_lapic_sync_from_vapic(vcpu); | 3199 | kvm_lapic_sync_from_vapic(vcpu); |
3228 | 3200 | ||
3229 | r = kvm_x86_ops->handle_exit(kvm_run, vcpu); | 3201 | r = kvm_x86_ops->handle_exit(kvm_run, vcpu); |
3202 | out: | ||
3203 | return r; | ||
3204 | } | ||
3230 | 3205 | ||
3231 | if (r > 0) { | 3206 | static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) |
3232 | if (dm_request_for_irq_injection(vcpu, kvm_run)) { | 3207 | { |
3233 | r = -EINTR; | 3208 | int r; |
3234 | kvm_run->exit_reason = KVM_EXIT_INTR; | 3209 | |
3235 | ++vcpu->stat.request_irq_exits; | 3210 | if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED)) { |
3236 | goto out; | 3211 | printk("vcpu %d received sipi with vector # %x\n", |
3237 | } | 3212 | vcpu->vcpu_id, vcpu->arch.sipi_vector); |
3238 | if (!need_resched()) | 3213 | kvm_lapic_reset(vcpu); |
3239 | goto again; | 3214 | r = kvm_x86_ops->vcpu_reset(vcpu); |
3215 | if (r) | ||
3216 | return r; | ||
3217 | vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; | ||
3240 | } | 3218 | } |
3241 | 3219 | ||
3242 | out: | 3220 | down_read(&vcpu->kvm->slots_lock); |
3243 | up_read(&vcpu->kvm->slots_lock); | 3221 | vapic_enter(vcpu); |
3244 | if (r > 0) { | 3222 | |
3245 | kvm_resched(vcpu); | 3223 | r = 1; |
3246 | down_read(&vcpu->kvm->slots_lock); | 3224 | while (r > 0) { |
3247 | goto again; | 3225 | if (kvm_arch_vcpu_runnable(vcpu)) |
3226 | r = vcpu_enter_guest(vcpu, kvm_run); | ||
3227 | else { | ||
3228 | up_read(&vcpu->kvm->slots_lock); | ||
3229 | kvm_vcpu_block(vcpu); | ||
3230 | down_read(&vcpu->kvm->slots_lock); | ||
3231 | if (test_and_clear_bit(KVM_REQ_UNHALT, &vcpu->requests)) | ||
3232 | if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED) | ||
3233 | vcpu->arch.mp_state = | ||
3234 | KVM_MP_STATE_RUNNABLE; | ||
3235 | if (vcpu->arch.mp_state != KVM_MP_STATE_RUNNABLE) | ||
3236 | r = -EINTR; | ||
3237 | } | ||
3238 | |||
3239 | if (r > 0) { | ||
3240 | if (dm_request_for_irq_injection(vcpu, kvm_run)) { | ||
3241 | r = -EINTR; | ||
3242 | kvm_run->exit_reason = KVM_EXIT_INTR; | ||
3243 | ++vcpu->stat.request_irq_exits; | ||
3244 | } | ||
3245 | if (signal_pending(current)) { | ||
3246 | r = -EINTR; | ||
3247 | kvm_run->exit_reason = KVM_EXIT_INTR; | ||
3248 | ++vcpu->stat.signal_exits; | ||
3249 | } | ||
3250 | if (need_resched()) { | ||
3251 | up_read(&vcpu->kvm->slots_lock); | ||
3252 | kvm_resched(vcpu); | ||
3253 | down_read(&vcpu->kvm->slots_lock); | ||
3254 | } | ||
3255 | } | ||
3248 | } | 3256 | } |
3249 | 3257 | ||
3258 | up_read(&vcpu->kvm->slots_lock); | ||
3250 | post_kvm_run_save(vcpu, kvm_run); | 3259 | post_kvm_run_save(vcpu, kvm_run); |
3251 | 3260 | ||
3252 | vapic_exit(vcpu); | 3261 | vapic_exit(vcpu); |
@@ -3266,6 +3275,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
3266 | 3275 | ||
3267 | if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) { | 3276 | if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) { |
3268 | kvm_vcpu_block(vcpu); | 3277 | kvm_vcpu_block(vcpu); |
3278 | clear_bit(KVM_REQ_UNHALT, &vcpu->requests); | ||
3269 | r = -EAGAIN; | 3279 | r = -EAGAIN; |
3270 | goto out; | 3280 | goto out; |
3271 | } | 3281 | } |