aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid Hildenbrand <dahi@linux.vnet.ibm.com>2015-09-29 10:20:36 -0400
committerChristian Borntraeger <borntraeger@de.ibm.com>2015-10-13 09:50:35 -0400
commit60417fcc2b0235dfe3dcd589c56dbe3ea1a64c54 (patch)
tree4e5442dbd7584f8710dd13a3b09eada92dcf0292
parent25ed16759660cdfccd4a3cb7d30cce8a797b542a (diff)
KVM: s390: factor out reading of the guest TOD clock
Let's factor this out and always use get_tod_clock_fast() when reading the guest TOD. STORE CLOCK FAST does not do serialization and, therefore, might result in some fuzziness between different processors in a way that subsequent calls on different CPUs might have time stamps that are earlier. This semantics is fine though for all KVM use cases. To make it obvious that the new function has STORE CLOCK FAST semantics we name it kvm_s390_get_tod_clock_fast. With this patch, we only have a handful of places were we have to care about STP sync (using preempt_disable() logic). Reviewed-by: Christian Borntraeger <borntraeger@de.ibm.com> Signed-off-by: David Hildenbrand <dahi@linux.vnet.ibm.com> Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
-rw-r--r--arch/s390/kvm/interrupt.c15
-rw-r--r--arch/s390/kvm/kvm-s390.c4
-rw-r--r--arch/s390/kvm/kvm-s390.h10
3 files changed, 14 insertions, 15 deletions
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index a8be542b9cb0..373e32346d68 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -69,13 +69,8 @@ static int ckc_interrupts_enabled(struct kvm_vcpu *vcpu)
69 69
70static int ckc_irq_pending(struct kvm_vcpu *vcpu) 70static int ckc_irq_pending(struct kvm_vcpu *vcpu)
71{ 71{
72 preempt_disable(); 72 if (vcpu->arch.sie_block->ckc >= kvm_s390_get_tod_clock_fast(vcpu->kvm))
73 if (!(vcpu->arch.sie_block->ckc <
74 get_tod_clock_fast() + vcpu->arch.sie_block->epoch)) {
75 preempt_enable();
76 return 0; 73 return 0;
77 }
78 preempt_enable();
79 return ckc_interrupts_enabled(vcpu); 74 return ckc_interrupts_enabled(vcpu);
80} 75}
81 76
@@ -851,9 +846,7 @@ int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
851 goto no_timer; 846 goto no_timer;
852 } 847 }
853 848
854 preempt_disable(); 849 now = kvm_s390_get_tod_clock_fast(vcpu->kvm);
855 now = get_tod_clock_fast() + vcpu->arch.sie_block->epoch;
856 preempt_enable();
857 sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now); 850 sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now);
858 851
859 /* underflow */ 852 /* underflow */
@@ -892,9 +885,7 @@ enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer)
892 u64 now, sltime; 885 u64 now, sltime;
893 886
894 vcpu = container_of(timer, struct kvm_vcpu, arch.ckc_timer); 887 vcpu = container_of(timer, struct kvm_vcpu, arch.ckc_timer);
895 preempt_disable(); 888 now = kvm_s390_get_tod_clock_fast(vcpu->kvm);
896 now = get_tod_clock_fast() + vcpu->arch.sie_block->epoch;
897 preempt_enable();
898 sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now); 889 sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now);
899 890
900 /* 891 /*
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 87bd602f326c..618c85411a51 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -568,9 +568,7 @@ static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
568{ 568{
569 u64 gtod; 569 u64 gtod;
570 570
571 preempt_disable(); 571 gtod = kvm_s390_get_tod_clock_fast(kvm);
572 gtod = get_tod_clock() + kvm->arch.epoch;
573 preempt_enable();
574 if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod))) 572 if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
575 return -EFAULT; 573 return -EFAULT;
576 VM_EVENT(kvm, 3, "QUERY: TOD base: 0x%llx\n", gtod); 574 VM_EVENT(kvm, 3, "QUERY: TOD base: 0x%llx\n", gtod);
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
index cc15ea3a150e..1e70e00d3c5e 100644
--- a/arch/s390/kvm/kvm-s390.h
+++ b/arch/s390/kvm/kvm-s390.h
@@ -271,6 +271,16 @@ static inline void kvm_s390_vcpu_unblock_all(struct kvm *kvm)
271 kvm_s390_vcpu_unblock(vcpu); 271 kvm_s390_vcpu_unblock(vcpu);
272} 272}
273 273
274static inline u64 kvm_s390_get_tod_clock_fast(struct kvm *kvm)
275{
276 u64 rc;
277
278 preempt_disable();
279 rc = get_tod_clock_fast() + kvm->arch.epoch;
280 preempt_enable();
281 return rc;
282}
283
274/** 284/**
275 * kvm_s390_inject_prog_cond - conditionally inject a program check 285 * kvm_s390_inject_prog_cond - conditionally inject a program check
276 * @vcpu: virtual cpu 286 * @vcpu: virtual cpu