aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid Hildenbrand <dahi@linux.vnet.ibm.com>2015-05-12 03:49:14 -0400
committerChristian Borntraeger <borntraeger@de.ibm.com>2015-10-13 09:50:35 -0400
commit25ed16759660cdfccd4a3cb7d30cce8a797b542a (patch)
tree4aafb77404f7fede2ac763509169a0f586b21c9e
parent5a3d883a59b3fe8dc8775c7a79200a5b11a6761e (diff)
KVM: s390: factor out and fix setting of guest TOD clock
Let's move that whole logic into one function. We now always use unsigned values when calculating the epoch (to avoid over/underflow defined). Also, we always have to get all VCPUs out of SIE before doing the update to avoid running differing VCPUs with different TODs. Acked-by: Christian Borntraeger <borntraeger@de.ibm.com> Signed-off-by: David Hildenbrand <dahi@linux.vnet.ibm.com> Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
-rw-r--r--arch/s390/kvm/kvm-s390.c28
-rw-r--r--arch/s390/kvm/kvm-s390.h1
-rw-r--r--arch/s390/kvm/priv.c15
3 files changed, 21 insertions, 23 deletions
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index a0907795f31d..87bd602f326c 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -521,22 +521,12 @@ static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
521 521
522static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr) 522static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
523{ 523{
524 struct kvm_vcpu *cur_vcpu;
525 unsigned int vcpu_idx;
526 u64 gtod; 524 u64 gtod;
527 525
528 if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod))) 526 if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod)))
529 return -EFAULT; 527 return -EFAULT;
530 528
531 mutex_lock(&kvm->lock); 529 kvm_s390_set_tod_clock(kvm, gtod);
532 preempt_disable();
533 kvm->arch.epoch = gtod - get_tod_clock();
534 kvm_s390_vcpu_block_all(kvm);
535 kvm_for_each_vcpu(vcpu_idx, cur_vcpu, kvm)
536 cur_vcpu->arch.sie_block->epoch = kvm->arch.epoch;
537 kvm_s390_vcpu_unblock_all(kvm);
538 preempt_enable();
539 mutex_unlock(&kvm->lock);
540 VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx\n", gtod); 530 VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx\n", gtod);
541 return 0; 531 return 0;
542} 532}
@@ -1906,6 +1896,22 @@ retry:
1906 return 0; 1896 return 0;
1907} 1897}
1908 1898
1899void kvm_s390_set_tod_clock(struct kvm *kvm, u64 tod)
1900{
1901 struct kvm_vcpu *vcpu;
1902 int i;
1903
1904 mutex_lock(&kvm->lock);
1905 preempt_disable();
1906 kvm->arch.epoch = tod - get_tod_clock();
1907 kvm_s390_vcpu_block_all(kvm);
1908 kvm_for_each_vcpu(i, vcpu, kvm)
1909 vcpu->arch.sie_block->epoch = kvm->arch.epoch;
1910 kvm_s390_vcpu_unblock_all(kvm);
1911 preempt_enable();
1912 mutex_unlock(&kvm->lock);
1913}
1914
1909/** 1915/**
1910 * kvm_arch_fault_in_page - fault-in guest page if necessary 1916 * kvm_arch_fault_in_page - fault-in guest page if necessary
1911 * @vcpu: The corresponding virtual cpu 1917 * @vcpu: The corresponding virtual cpu
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
index 3a368d2a6114..cc15ea3a150e 100644
--- a/arch/s390/kvm/kvm-s390.h
+++ b/arch/s390/kvm/kvm-s390.h
@@ -231,6 +231,7 @@ int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu);
231int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu); 231int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu);
232 232
233/* implemented in kvm-s390.c */ 233/* implemented in kvm-s390.c */
234void kvm_s390_set_tod_clock(struct kvm *kvm, u64 tod);
234long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable); 235long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable);
235int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long addr); 236int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long addr);
236int kvm_s390_store_adtl_status_unloaded(struct kvm_vcpu *vcpu, 237int kvm_s390_store_adtl_status_unloaded(struct kvm_vcpu *vcpu,
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
index b253de5b8945..77191b85ea7a 100644
--- a/arch/s390/kvm/priv.c
+++ b/arch/s390/kvm/priv.c
@@ -33,11 +33,9 @@
33/* Handle SCK (SET CLOCK) interception */ 33/* Handle SCK (SET CLOCK) interception */
34static int handle_set_clock(struct kvm_vcpu *vcpu) 34static int handle_set_clock(struct kvm_vcpu *vcpu)
35{ 35{
36 struct kvm_vcpu *cpup; 36 int rc;
37 s64 val;
38 int i, rc;
39 ar_t ar; 37 ar_t ar;
40 u64 op2; 38 u64 op2, val;
41 39
42 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 40 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
43 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 41 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
@@ -50,14 +48,7 @@ static int handle_set_clock(struct kvm_vcpu *vcpu)
50 return kvm_s390_inject_prog_cond(vcpu, rc); 48 return kvm_s390_inject_prog_cond(vcpu, rc);
51 49
52 VCPU_EVENT(vcpu, 3, "SCK: setting guest TOD to 0x%llx", val); 50 VCPU_EVENT(vcpu, 3, "SCK: setting guest TOD to 0x%llx", val);
53 51 kvm_s390_set_tod_clock(vcpu->kvm, val);
54 mutex_lock(&vcpu->kvm->lock);
55 preempt_disable();
56 val = (val - get_tod_clock()) & ~0x3fUL;
57 kvm_for_each_vcpu(i, cpup, vcpu->kvm)
58 cpup->arch.sie_block->epoch = val;
59 preempt_enable();
60 mutex_unlock(&vcpu->kvm->lock);
61 52
62 kvm_s390_set_psw_cc(vcpu, 0); 53 kvm_s390_set_psw_cc(vcpu, 0);
63 return 0; 54 return 0;