aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid Hildenbrand <dahi@linux.vnet.ibm.com>2016-02-22 08:14:50 -0500
committerChristian Borntraeger <borntraeger@de.ibm.com>2016-03-08 07:57:53 -0500
commitb3c17f10fa2cfc29cf35e4821275e046e725213e (patch)
treef9b80110e242ab7f1c044970d7e31f71a7234d37
parent5ebda31686af6bb70affdcc5777ebc7ed81c0eac (diff)
KVM: s390: wake up when the VCPU cpu timer expires
When the VCPU cpu timer expires, we have to wake up just like when the ckc triggers. For now, setting up a cpu timer in the guest and going into enabled wait will never lead to a wakeup. This patch fixes this problem. Just as for the ckc, we have to take care of waking up too early. We have to recalculate the sleep time and go back to sleep. Please note that the timer callback calls kvm_s390_get_cpu_timer() from interrupt context. As the timer is canceled when leaving handle_wait(), and we don't do any VCPU cpu timer writes/updates in that function, we can be sure that we will never try to read the VCPU cpu timer from the same cpu that is currentyl updating the timer (deadlock). Reported-by: Sascha Silbe <silbe@linux.vnet.ibm.com> Tested-by: Sascha Silbe <silbe@linux.vnet.ibm.com> Signed-off-by: David Hildenbrand <dahi@linux.vnet.ibm.com> Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
-rw-r--r--arch/s390/kvm/interrupt.c48
1 files changed, 35 insertions, 13 deletions
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index 4604e9accc65..ef84a803433e 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -909,9 +909,35 @@ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
909 return ckc_irq_pending(vcpu) || cpu_timer_irq_pending(vcpu); 909 return ckc_irq_pending(vcpu) || cpu_timer_irq_pending(vcpu);
910} 910}
911 911
912static u64 __calculate_sltime(struct kvm_vcpu *vcpu)
913{
914 u64 now, cputm, sltime = 0;
915
916 if (ckc_interrupts_enabled(vcpu)) {
917 now = kvm_s390_get_tod_clock_fast(vcpu->kvm);
918 sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now);
919 /* already expired or overflow? */
920 if (!sltime || vcpu->arch.sie_block->ckc <= now)
921 return 0;
922 if (cpu_timer_interrupts_enabled(vcpu)) {
923 cputm = kvm_s390_get_cpu_timer(vcpu);
924 /* already expired? */
925 if (cputm >> 63)
926 return 0;
927 return min(sltime, tod_to_ns(cputm));
928 }
929 } else if (cpu_timer_interrupts_enabled(vcpu)) {
930 sltime = kvm_s390_get_cpu_timer(vcpu);
931 /* already expired? */
932 if (sltime >> 63)
933 return 0;
934 }
935 return sltime;
936}
937
912int kvm_s390_handle_wait(struct kvm_vcpu *vcpu) 938int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
913{ 939{
914 u64 now, sltime; 940 u64 sltime;
915 941
916 vcpu->stat.exit_wait_state++; 942 vcpu->stat.exit_wait_state++;
917 943
@@ -924,22 +950,20 @@ int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
924 return -EOPNOTSUPP; /* disabled wait */ 950 return -EOPNOTSUPP; /* disabled wait */
925 } 951 }
926 952
927 if (!ckc_interrupts_enabled(vcpu)) { 953 if (!ckc_interrupts_enabled(vcpu) &&
954 !cpu_timer_interrupts_enabled(vcpu)) {
928 VCPU_EVENT(vcpu, 3, "%s", "enabled wait w/o timer"); 955 VCPU_EVENT(vcpu, 3, "%s", "enabled wait w/o timer");
929 __set_cpu_idle(vcpu); 956 __set_cpu_idle(vcpu);
930 goto no_timer; 957 goto no_timer;
931 } 958 }
932 959
933 now = kvm_s390_get_tod_clock_fast(vcpu->kvm); 960 sltime = __calculate_sltime(vcpu);
934 sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now); 961 if (!sltime)
935
936 /* underflow */
937 if (vcpu->arch.sie_block->ckc < now)
938 return 0; 962 return 0;
939 963
940 __set_cpu_idle(vcpu); 964 __set_cpu_idle(vcpu);
941 hrtimer_start(&vcpu->arch.ckc_timer, ktime_set (0, sltime) , HRTIMER_MODE_REL); 965 hrtimer_start(&vcpu->arch.ckc_timer, ktime_set (0, sltime) , HRTIMER_MODE_REL);
942 VCPU_EVENT(vcpu, 4, "enabled wait via clock comparator: %llu ns", sltime); 966 VCPU_EVENT(vcpu, 4, "enabled wait: %llu ns", sltime);
943no_timer: 967no_timer:
944 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); 968 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
945 kvm_vcpu_block(vcpu); 969 kvm_vcpu_block(vcpu);
@@ -966,18 +990,16 @@ void kvm_s390_vcpu_wakeup(struct kvm_vcpu *vcpu)
966enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer) 990enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer)
967{ 991{
968 struct kvm_vcpu *vcpu; 992 struct kvm_vcpu *vcpu;
969 u64 now, sltime; 993 u64 sltime;
970 994
971 vcpu = container_of(timer, struct kvm_vcpu, arch.ckc_timer); 995 vcpu = container_of(timer, struct kvm_vcpu, arch.ckc_timer);
972 now = kvm_s390_get_tod_clock_fast(vcpu->kvm); 996 sltime = __calculate_sltime(vcpu);
973 sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now);
974 997
975 /* 998 /*
976 * If the monotonic clock runs faster than the tod clock we might be 999 * If the monotonic clock runs faster than the tod clock we might be
977 * woken up too early and have to go back to sleep to avoid deadlocks. 1000 * woken up too early and have to go back to sleep to avoid deadlocks.
978 */ 1001 */
979 if (vcpu->arch.sie_block->ckc > now && 1002 if (sltime && hrtimer_forward_now(timer, ns_to_ktime(sltime)))
980 hrtimer_forward_now(timer, ns_to_ktime(sltime)))
981 return HRTIMER_RESTART; 1003 return HRTIMER_RESTART;
982 kvm_s390_vcpu_wakeup(vcpu); 1004 kvm_s390_vcpu_wakeup(vcpu);
983 return HRTIMER_NORESTART; 1005 return HRTIMER_NORESTART;