diff options
author | Christoffer Dall <christoffer.dall@arm.com> | 2018-11-26 12:21:22 -0500 |
---|---|---|
committer | Marc Zyngier <marc.zyngier@arm.com> | 2019-02-19 16:05:36 -0500 |
commit | accb99bcd0ca6d3ee412557b0c3f583a3abc0eb6 (patch) | |
tree | e997f4ed8b196b597a92bc89b22f00b70e78eee0 /virt/kvm | |
parent | e329fb75d519e3dc3eb11b22d5bb846516be3521 (diff) |
KVM: arm/arm64: Simplify bg_timer programming
Instead of calling into kvm_timer_[un]schedule from the main kvm
blocking path, test if the VCPU is on the wait queue from the load/put
path and perform the background timer setup/cancel in this path.
This has the distinct advantage that we no longer race between load/put
and schedule/unschedule and programming and canceling of the bg_timer
always happens when the timer state is not loaded.
Note that we must now remove the checks in kvm_timer_blocking that do
not schedule a background timer if one of the timers can fire, because
we no longer have a guarantee that kvm_vcpu_check_block() will be called
before kvm_timer_blocking.
Reported-by: Andre Przywara <andre.przywara@arm.com>
Signed-off-by: Christoffer Dall <christoffer.dall@arm.com>
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Diffstat (limited to 'virt/kvm')
-rw-r--r-- | virt/kvm/arm/arch_timer.c | 35 | ||||
-rw-r--r-- | virt/kvm/arm/arm.c | 2 |
2 files changed, 14 insertions, 23 deletions
diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c index b07ac4614e1c..4986028d9829 100644 --- a/virt/kvm/arm/arch_timer.c +++ b/virt/kvm/arm/arch_timer.c | |||
@@ -349,22 +349,12 @@ out: | |||
349 | * thread is removed from its waitqueue and made runnable when there's a timer | 349 | * thread is removed from its waitqueue and made runnable when there's a timer |
350 | * interrupt to handle. | 350 | * interrupt to handle. |
351 | */ | 351 | */ |
352 | void kvm_timer_schedule(struct kvm_vcpu *vcpu) | 352 | static void kvm_timer_blocking(struct kvm_vcpu *vcpu) |
353 | { | 353 | { |
354 | struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; | 354 | struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; |
355 | struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); | 355 | struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); |
356 | struct arch_timer_context *ptimer = vcpu_ptimer(vcpu); | 356 | struct arch_timer_context *ptimer = vcpu_ptimer(vcpu); |
357 | 357 | ||
358 | vtimer_save_state(vcpu); | ||
359 | |||
360 | /* | ||
361 | * No need to schedule a background timer if any guest timer has | ||
362 | * already expired, because kvm_vcpu_block will return before putting | ||
363 | * the thread to sleep. | ||
364 | */ | ||
365 | if (kvm_timer_should_fire(vtimer) || kvm_timer_should_fire(ptimer)) | ||
366 | return; | ||
367 | |||
368 | /* | 358 | /* |
369 | * If both timers are not capable of raising interrupts (disabled or | 359 | * If both timers are not capable of raising interrupts (disabled or |
370 | * masked), then there's no more work for us to do. | 360 | * masked), then there's no more work for us to do. |
@@ -373,12 +363,19 @@ void kvm_timer_schedule(struct kvm_vcpu *vcpu) | |||
373 | return; | 363 | return; |
374 | 364 | ||
375 | /* | 365 | /* |
376 | * The guest timers have not yet expired, schedule a background timer. | 366 | * At least one guest time will expire. Schedule a background timer. |
377 | * Set the earliest expiration time among the guest timers. | 367 | * Set the earliest expiration time among the guest timers. |
378 | */ | 368 | */ |
379 | soft_timer_start(&timer->bg_timer, kvm_timer_earliest_exp(vcpu)); | 369 | soft_timer_start(&timer->bg_timer, kvm_timer_earliest_exp(vcpu)); |
380 | } | 370 | } |
381 | 371 | ||
372 | static void kvm_timer_unblocking(struct kvm_vcpu *vcpu) | ||
373 | { | ||
374 | struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; | ||
375 | |||
376 | soft_timer_cancel(&timer->bg_timer); | ||
377 | } | ||
378 | |||
382 | static void vtimer_restore_state(struct kvm_vcpu *vcpu) | 379 | static void vtimer_restore_state(struct kvm_vcpu *vcpu) |
383 | { | 380 | { |
384 | struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; | 381 | struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; |
@@ -401,15 +398,6 @@ out: | |||
401 | local_irq_restore(flags); | 398 | local_irq_restore(flags); |
402 | } | 399 | } |
403 | 400 | ||
404 | void kvm_timer_unschedule(struct kvm_vcpu *vcpu) | ||
405 | { | ||
406 | struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; | ||
407 | |||
408 | vtimer_restore_state(vcpu); | ||
409 | |||
410 | soft_timer_cancel(&timer->bg_timer); | ||
411 | } | ||
412 | |||
413 | static void set_cntvoff(u64 cntvoff) | 401 | static void set_cntvoff(u64 cntvoff) |
414 | { | 402 | { |
415 | u32 low = lower_32_bits(cntvoff); | 403 | u32 low = lower_32_bits(cntvoff); |
@@ -485,6 +473,8 @@ void kvm_timer_vcpu_load(struct kvm_vcpu *vcpu) | |||
485 | /* Set the background timer for the physical timer emulation. */ | 473 | /* Set the background timer for the physical timer emulation. */ |
486 | phys_timer_emulate(vcpu); | 474 | phys_timer_emulate(vcpu); |
487 | 475 | ||
476 | kvm_timer_unblocking(vcpu); | ||
477 | |||
488 | /* If the timer fired while we weren't running, inject it now */ | 478 | /* If the timer fired while we weren't running, inject it now */ |
489 | if (kvm_timer_should_fire(ptimer) != ptimer->irq.level) | 479 | if (kvm_timer_should_fire(ptimer) != ptimer->irq.level) |
490 | kvm_timer_update_irq(vcpu, !ptimer->irq.level, ptimer); | 480 | kvm_timer_update_irq(vcpu, !ptimer->irq.level, ptimer); |
@@ -527,6 +517,9 @@ void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu) | |||
527 | */ | 517 | */ |
528 | soft_timer_cancel(&timer->phys_timer); | 518 | soft_timer_cancel(&timer->phys_timer); |
529 | 519 | ||
520 | if (swait_active(kvm_arch_vcpu_wq(vcpu))) | ||
521 | kvm_timer_blocking(vcpu); | ||
522 | |||
530 | /* | 523 | /* |
531 | * The kernel may decide to run userspace after calling vcpu_put, so | 524 | * The kernel may decide to run userspace after calling vcpu_put, so |
532 | * we reset cntvoff to 0 to ensure a consistent read between user | 525 | * we reset cntvoff to 0 to ensure a consistent read between user |
diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c index b77db673bb03..9fbdb9e1c51f 100644 --- a/virt/kvm/arm/arm.c +++ b/virt/kvm/arm/arm.c | |||
@@ -335,13 +335,11 @@ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) | |||
335 | 335 | ||
336 | void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) | 336 | void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) |
337 | { | 337 | { |
338 | kvm_timer_schedule(vcpu); | ||
339 | kvm_vgic_v4_enable_doorbell(vcpu); | 338 | kvm_vgic_v4_enable_doorbell(vcpu); |
340 | } | 339 | } |
341 | 340 | ||
342 | void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) | 341 | void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) |
343 | { | 342 | { |
344 | kvm_timer_unschedule(vcpu); | ||
345 | kvm_vgic_v4_disable_doorbell(vcpu); | 343 | kvm_vgic_v4_disable_doorbell(vcpu); |
346 | } | 344 | } |
347 | 345 | ||