diff options
Diffstat (limited to 'arch/x86/kvm')
-rw-r--r-- | arch/x86/kvm/i8254.c | 5 | ||||
-rw-r--r-- | arch/x86/kvm/lapic.c | 16 | ||||
-rw-r--r-- | arch/x86/kvm/x86.c | 100 |
3 files changed, 61 insertions, 60 deletions
diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c index 4cb443026ec4..634132a9a512 100644 --- a/arch/x86/kvm/i8254.c +++ b/arch/x86/kvm/i8254.c | |||
@@ -200,10 +200,9 @@ static int __pit_timer_fn(struct kvm_kpit_state *ps) | |||
200 | 200 | ||
201 | if (!atomic_inc_and_test(&pt->pending)) | 201 | if (!atomic_inc_and_test(&pt->pending)) |
202 | set_bit(KVM_REQ_PENDING_TIMER, &vcpu0->requests); | 202 | set_bit(KVM_REQ_PENDING_TIMER, &vcpu0->requests); |
203 | if (vcpu0 && waitqueue_active(&vcpu0->wq)) { | 203 | |
204 | vcpu0->arch.mp_state = KVM_MP_STATE_RUNNABLE; | 204 | if (vcpu0 && waitqueue_active(&vcpu0->wq)) |
205 | wake_up_interruptible(&vcpu0->wq); | 205 | wake_up_interruptible(&vcpu0->wq); |
206 | } | ||
207 | 206 | ||
208 | pt->timer.expires = ktime_add_ns(pt->timer.expires, pt->period); | 207 | pt->timer.expires = ktime_add_ns(pt->timer.expires, pt->period); |
209 | pt->scheduled = ktime_to_ns(pt->timer.expires); | 208 | pt->scheduled = ktime_to_ns(pt->timer.expires); |
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index be94f93a73f6..fd00f698692f 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c | |||
@@ -339,13 +339,7 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode, | |||
339 | } else | 339 | } else |
340 | apic_clear_vector(vector, apic->regs + APIC_TMR); | 340 | apic_clear_vector(vector, apic->regs + APIC_TMR); |
341 | 341 | ||
342 | if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE) | 342 | kvm_vcpu_kick(vcpu); |
343 | kvm_vcpu_kick(vcpu); | ||
344 | else if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED) { | ||
345 | vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; | ||
346 | if (waitqueue_active(&vcpu->wq)) | ||
347 | wake_up_interruptible(&vcpu->wq); | ||
348 | } | ||
349 | 343 | ||
350 | result = (orig_irr == 0); | 344 | result = (orig_irr == 0); |
351 | break; | 345 | break; |
@@ -384,8 +378,7 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode, | |||
384 | if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) { | 378 | if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) { |
385 | vcpu->arch.sipi_vector = vector; | 379 | vcpu->arch.sipi_vector = vector; |
386 | vcpu->arch.mp_state = KVM_MP_STATE_SIPI_RECEIVED; | 380 | vcpu->arch.mp_state = KVM_MP_STATE_SIPI_RECEIVED; |
387 | if (waitqueue_active(&vcpu->wq)) | 381 | kvm_vcpu_kick(vcpu); |
388 | wake_up_interruptible(&vcpu->wq); | ||
389 | } | 382 | } |
390 | break; | 383 | break; |
391 | 384 | ||
@@ -950,10 +943,9 @@ static int __apic_timer_fn(struct kvm_lapic *apic) | |||
950 | 943 | ||
951 | if(!atomic_inc_and_test(&apic->timer.pending)) | 944 | if(!atomic_inc_and_test(&apic->timer.pending)) |
952 | set_bit(KVM_REQ_PENDING_TIMER, &apic->vcpu->requests); | 945 | set_bit(KVM_REQ_PENDING_TIMER, &apic->vcpu->requests); |
953 | if (waitqueue_active(q)) { | 946 | if (waitqueue_active(q)) |
954 | apic->vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; | ||
955 | wake_up_interruptible(q); | 947 | wake_up_interruptible(q); |
956 | } | 948 | |
957 | if (apic_lvtt_period(apic)) { | 949 | if (apic_lvtt_period(apic)) { |
958 | result = 1; | 950 | result = 1; |
959 | apic->timer.dev.expires = ktime_add_ns( | 951 | apic->timer.dev.expires = ktime_add_ns( |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 3f3cb7107c03..bf98d40b21ec 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -2798,11 +2798,6 @@ int kvm_emulate_halt(struct kvm_vcpu *vcpu) | |||
2798 | KVMTRACE_0D(HLT, vcpu, handler); | 2798 | KVMTRACE_0D(HLT, vcpu, handler); |
2799 | if (irqchip_in_kernel(vcpu->kvm)) { | 2799 | if (irqchip_in_kernel(vcpu->kvm)) { |
2800 | vcpu->arch.mp_state = KVM_MP_STATE_HALTED; | 2800 | vcpu->arch.mp_state = KVM_MP_STATE_HALTED; |
2801 | up_read(&vcpu->kvm->slots_lock); | ||
2802 | kvm_vcpu_block(vcpu); | ||
2803 | down_read(&vcpu->kvm->slots_lock); | ||
2804 | if (vcpu->arch.mp_state != KVM_MP_STATE_RUNNABLE) | ||
2805 | return -EINTR; | ||
2806 | return 1; | 2801 | return 1; |
2807 | } else { | 2802 | } else { |
2808 | vcpu->run->exit_reason = KVM_EXIT_HLT; | 2803 | vcpu->run->exit_reason = KVM_EXIT_HLT; |
@@ -3097,24 +3092,10 @@ static void vapic_exit(struct kvm_vcpu *vcpu) | |||
3097 | up_read(&vcpu->kvm->slots_lock); | 3092 | up_read(&vcpu->kvm->slots_lock); |
3098 | } | 3093 | } |
3099 | 3094 | ||
3100 | static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | 3095 | static int vcpu_enter_guest(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) |
3101 | { | 3096 | { |
3102 | int r; | 3097 | int r; |
3103 | 3098 | ||
3104 | if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED)) { | ||
3105 | pr_debug("vcpu %d received sipi with vector # %x\n", | ||
3106 | vcpu->vcpu_id, vcpu->arch.sipi_vector); | ||
3107 | kvm_lapic_reset(vcpu); | ||
3108 | r = kvm_x86_ops->vcpu_reset(vcpu); | ||
3109 | if (r) | ||
3110 | return r; | ||
3111 | vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; | ||
3112 | } | ||
3113 | |||
3114 | down_read(&vcpu->kvm->slots_lock); | ||
3115 | vapic_enter(vcpu); | ||
3116 | |||
3117 | again: | ||
3118 | if (vcpu->requests) | 3099 | if (vcpu->requests) |
3119 | if (test_and_clear_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests)) | 3100 | if (test_and_clear_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests)) |
3120 | kvm_mmu_unload(vcpu); | 3101 | kvm_mmu_unload(vcpu); |
@@ -3151,22 +3132,13 @@ again: | |||
3151 | 3132 | ||
3152 | local_irq_disable(); | 3133 | local_irq_disable(); |
3153 | 3134 | ||
3154 | if (vcpu->requests || need_resched()) { | 3135 | if (vcpu->requests || need_resched() || signal_pending(current)) { |
3155 | local_irq_enable(); | 3136 | local_irq_enable(); |
3156 | preempt_enable(); | 3137 | preempt_enable(); |
3157 | r = 1; | 3138 | r = 1; |
3158 | goto out; | 3139 | goto out; |
3159 | } | 3140 | } |
3160 | 3141 | ||
3161 | if (signal_pending(current)) { | ||
3162 | local_irq_enable(); | ||
3163 | preempt_enable(); | ||
3164 | r = -EINTR; | ||
3165 | kvm_run->exit_reason = KVM_EXIT_INTR; | ||
3166 | ++vcpu->stat.signal_exits; | ||
3167 | goto out; | ||
3168 | } | ||
3169 | |||
3170 | if (vcpu->guest_debug.enabled) | 3142 | if (vcpu->guest_debug.enabled) |
3171 | kvm_x86_ops->guest_debug_pre(vcpu); | 3143 | kvm_x86_ops->guest_debug_pre(vcpu); |
3172 | 3144 | ||
@@ -3227,26 +3199,63 @@ again: | |||
3227 | kvm_lapic_sync_from_vapic(vcpu); | 3199 | kvm_lapic_sync_from_vapic(vcpu); |
3228 | 3200 | ||
3229 | r = kvm_x86_ops->handle_exit(kvm_run, vcpu); | 3201 | r = kvm_x86_ops->handle_exit(kvm_run, vcpu); |
3202 | out: | ||
3203 | return r; | ||
3204 | } | ||
3230 | 3205 | ||
3231 | if (r > 0) { | 3206 | static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) |
3232 | if (dm_request_for_irq_injection(vcpu, kvm_run)) { | 3207 | { |
3233 | r = -EINTR; | 3208 | int r; |
3234 | kvm_run->exit_reason = KVM_EXIT_INTR; | 3209 | |
3235 | ++vcpu->stat.request_irq_exits; | 3210 | if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED)) { |
3236 | goto out; | 3211 | printk("vcpu %d received sipi with vector # %x\n", |
3237 | } | 3212 | vcpu->vcpu_id, vcpu->arch.sipi_vector); |
3238 | if (!need_resched()) | 3213 | kvm_lapic_reset(vcpu); |
3239 | goto again; | 3214 | r = kvm_x86_ops->vcpu_reset(vcpu); |
3215 | if (r) | ||
3216 | return r; | ||
3217 | vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; | ||
3240 | } | 3218 | } |
3241 | 3219 | ||
3242 | out: | 3220 | down_read(&vcpu->kvm->slots_lock); |
3243 | up_read(&vcpu->kvm->slots_lock); | 3221 | vapic_enter(vcpu); |
3244 | if (r > 0) { | 3222 | |
3245 | kvm_resched(vcpu); | 3223 | r = 1; |
3246 | down_read(&vcpu->kvm->slots_lock); | 3224 | while (r > 0) { |
3247 | goto again; | 3225 | if (kvm_arch_vcpu_runnable(vcpu)) |
3226 | r = vcpu_enter_guest(vcpu, kvm_run); | ||
3227 | else { | ||
3228 | up_read(&vcpu->kvm->slots_lock); | ||
3229 | kvm_vcpu_block(vcpu); | ||
3230 | down_read(&vcpu->kvm->slots_lock); | ||
3231 | if (test_and_clear_bit(KVM_REQ_UNHALT, &vcpu->requests)) | ||
3232 | if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED) | ||
3233 | vcpu->arch.mp_state = | ||
3234 | KVM_MP_STATE_RUNNABLE; | ||
3235 | if (vcpu->arch.mp_state != KVM_MP_STATE_RUNNABLE) | ||
3236 | r = -EINTR; | ||
3237 | } | ||
3238 | |||
3239 | if (r > 0) { | ||
3240 | if (dm_request_for_irq_injection(vcpu, kvm_run)) { | ||
3241 | r = -EINTR; | ||
3242 | kvm_run->exit_reason = KVM_EXIT_INTR; | ||
3243 | ++vcpu->stat.request_irq_exits; | ||
3244 | } | ||
3245 | if (signal_pending(current)) { | ||
3246 | r = -EINTR; | ||
3247 | kvm_run->exit_reason = KVM_EXIT_INTR; | ||
3248 | ++vcpu->stat.signal_exits; | ||
3249 | } | ||
3250 | if (need_resched()) { | ||
3251 | up_read(&vcpu->kvm->slots_lock); | ||
3252 | kvm_resched(vcpu); | ||
3253 | down_read(&vcpu->kvm->slots_lock); | ||
3254 | } | ||
3255 | } | ||
3248 | } | 3256 | } |
3249 | 3257 | ||
3258 | up_read(&vcpu->kvm->slots_lock); | ||
3250 | post_kvm_run_save(vcpu, kvm_run); | 3259 | post_kvm_run_save(vcpu, kvm_run); |
3251 | 3260 | ||
3252 | vapic_exit(vcpu); | 3261 | vapic_exit(vcpu); |
@@ -3266,6 +3275,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
3266 | 3275 | ||
3267 | if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) { | 3276 | if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) { |
3268 | kvm_vcpu_block(vcpu); | 3277 | kvm_vcpu_block(vcpu); |
3278 | clear_bit(KVM_REQ_UNHALT, &vcpu->requests); | ||
3269 | r = -EAGAIN; | 3279 | r = -EAGAIN; |
3270 | goto out; | 3280 | goto out; |
3271 | } | 3281 | } |