aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/kvm/interrupt.c
diff options
context:
space:
mode:
authorDavid Hildenbrand <dahi@linux.vnet.ibm.com>2014-12-11 04:18:01 -0500
committerChristian Borntraeger <borntraeger@de.ibm.com>2015-01-23 07:25:32 -0500
commit2d00f759427bb3ed963b60f570830e9eca7e1c69 (patch)
treea3c2d5b990f08f08c6003d24a16ce7e5925e0368 /arch/s390/kvm/interrupt.c
parent0ac96caf0f9381088c673a16d910b1d329670edf (diff)
KVM: s390: forward hrtimer if guest ckc not pending yet
Patch 0759d0681cae ("KVM: s390: cleanup handle_wait by reusing kvm_vcpu_block") changed the way pending guest clock comparator interrupts are detected. It was assumed that as soon as the hrtimer wakes up, the condition for the guest ckc is satisfied. This is however only true as long as adjclock() doesn't speed up the monotonic clock. Reason is that the hrtimer is based on CLOCK_MONOTONIC, the guest clock comparator detection is based on the raw TOD clock. If CLOCK_MONOTONIC runs faster than the TOD clock, the hrtimer wakes the target VCPU up too early and the target VCPU will not detect any pending interrupts, therefore going back to sleep. It will never be woken up again because the hrtimer has finished. The VCPU is stuck. As a quick fix, we have to forward the hrtimer until the guest clock comparator is really due, to guarantee properly timed wake ups. As the hrtimer callback might be triggered on another cpu, we have to make sure that the timer is really stopped and not currently executing the callback on another cpu. This can happen if the vcpu thread is scheduled onto another physical cpu, but the timer base is not migrated. So lets use hrtimer_cancel instead of try_to_cancel. A proper fix might be to introduce a RAW based hrtimer. Reported-by: Christian Borntraeger <borntraeger@de.ibm.com> Cc: stable@vger.kernel.org Signed-off-by: David Hildenbrand <dahi@linux.vnet.ibm.com> Acked-by: Cornelia Huck <cornelia.huck@de.ibm.com> Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
Diffstat (limited to 'arch/s390/kvm/interrupt.c')
-rw-r--r--arch/s390/kvm/interrupt.c14
1 files changed, 12 insertions, 2 deletions
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index 5744303c1cde..7fbbcbcea6ac 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -826,7 +826,7 @@ no_timer:
826 __unset_cpu_idle(vcpu); 826 __unset_cpu_idle(vcpu);
827 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); 827 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
828 828
829 hrtimer_try_to_cancel(&vcpu->arch.ckc_timer); 829 hrtimer_cancel(&vcpu->arch.ckc_timer);
830 return 0; 830 return 0;
831} 831}
832 832
@@ -846,10 +846,20 @@ void kvm_s390_vcpu_wakeup(struct kvm_vcpu *vcpu)
846enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer) 846enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer)
847{ 847{
848 struct kvm_vcpu *vcpu; 848 struct kvm_vcpu *vcpu;
849 u64 now, sltime;
849 850
850 vcpu = container_of(timer, struct kvm_vcpu, arch.ckc_timer); 851 vcpu = container_of(timer, struct kvm_vcpu, arch.ckc_timer);
851 kvm_s390_vcpu_wakeup(vcpu); 852 now = get_tod_clock_fast() + vcpu->arch.sie_block->epoch;
853 sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now);
852 854
855 /*
856 * If the monotonic clock runs faster than the tod clock we might be
857 * woken up too early and have to go back to sleep to avoid deadlocks.
858 */
859 if (vcpu->arch.sie_block->ckc > now &&
860 hrtimer_forward_now(timer, ns_to_ktime(sltime)))
861 return HRTIMER_RESTART;
862 kvm_s390_vcpu_wakeup(vcpu);
853 return HRTIMER_NORESTART; 863 return HRTIMER_NORESTART;
854} 864}
855 865