diff options
author | David Hildenbrand <dahi@linux.vnet.ibm.com> | 2015-09-29 10:27:24 -0400 |
---|---|---|
committer | Christian Borntraeger <borntraeger@de.ibm.com> | 2015-10-13 09:50:34 -0400 |
commit | 5a3d883a59b3fe8dc8775c7a79200a5b11a6761e (patch) | |
tree | a1b60ce1821f86e3a6fc30e32ba3228d5525f6f6 | |
parent | 238293b14d9b1f5689e2aa68710000b0f25aa612 (diff) |
KVM: s390: switch to get_tod_clock() and fix STP sync races
Nobody except early.c makes use of store_tod_clock() to handle the
cc. So if we would get a cc != 0, we would be in more trouble.
Let's replace all users with get_tod_clock(). Returning a cc
on an ioctl sounded strange either way.
We can now also easily move the get_tod_clock() call into the
preempt_disable() section. This is in fact necessary to make the
STP sync work as expected. Otherwise the host TOD could change
and we would end up with a wrong epoch calculation.
Reviewed-by: Christian Borntraeger <borntraeger@de.ibm.com>
Signed-off-by: David Hildenbrand <dahi@linux.vnet.ibm.com>
Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
-rw-r--r-- | arch/s390/kvm/kvm-s390.c | 18 | ||||
-rw-r--r-- | arch/s390/kvm/priv.c | 8 |
2 files changed, 6 insertions, 20 deletions
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 0a67c40eece9..a0907795f31d 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c | |||
@@ -523,19 +523,14 @@ static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr) | |||
523 | { | 523 | { |
524 | struct kvm_vcpu *cur_vcpu; | 524 | struct kvm_vcpu *cur_vcpu; |
525 | unsigned int vcpu_idx; | 525 | unsigned int vcpu_idx; |
526 | u64 host_tod, gtod; | 526 | u64 gtod; |
527 | int r; | ||
528 | 527 | ||
529 | if (copy_from_user(>od, (void __user *)attr->addr, sizeof(gtod))) | 528 | if (copy_from_user(>od, (void __user *)attr->addr, sizeof(gtod))) |
530 | return -EFAULT; | 529 | return -EFAULT; |
531 | 530 | ||
532 | r = store_tod_clock(&host_tod); | ||
533 | if (r) | ||
534 | return r; | ||
535 | |||
536 | mutex_lock(&kvm->lock); | 531 | mutex_lock(&kvm->lock); |
537 | preempt_disable(); | 532 | preempt_disable(); |
538 | kvm->arch.epoch = gtod - host_tod; | 533 | kvm->arch.epoch = gtod - get_tod_clock(); |
539 | kvm_s390_vcpu_block_all(kvm); | 534 | kvm_s390_vcpu_block_all(kvm); |
540 | kvm_for_each_vcpu(vcpu_idx, cur_vcpu, kvm) | 535 | kvm_for_each_vcpu(vcpu_idx, cur_vcpu, kvm) |
541 | cur_vcpu->arch.sie_block->epoch = kvm->arch.epoch; | 536 | cur_vcpu->arch.sie_block->epoch = kvm->arch.epoch; |
@@ -581,15 +576,10 @@ static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr) | |||
581 | 576 | ||
582 | static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr) | 577 | static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr) |
583 | { | 578 | { |
584 | u64 host_tod, gtod; | 579 | u64 gtod; |
585 | int r; | ||
586 | |||
587 | r = store_tod_clock(&host_tod); | ||
588 | if (r) | ||
589 | return r; | ||
590 | 580 | ||
591 | preempt_disable(); | 581 | preempt_disable(); |
592 | gtod = host_tod + kvm->arch.epoch; | 582 | gtod = get_tod_clock() + kvm->arch.epoch; |
593 | preempt_enable(); | 583 | preempt_enable(); |
594 | if (copy_to_user((void __user *)attr->addr, >od, sizeof(gtod))) | 584 | if (copy_to_user((void __user *)attr->addr, >od, sizeof(gtod))) |
595 | return -EFAULT; | 585 | return -EFAULT; |
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c index 4d21dc4d1a84..b253de5b8945 100644 --- a/arch/s390/kvm/priv.c +++ b/arch/s390/kvm/priv.c | |||
@@ -34,7 +34,7 @@ | |||
34 | static int handle_set_clock(struct kvm_vcpu *vcpu) | 34 | static int handle_set_clock(struct kvm_vcpu *vcpu) |
35 | { | 35 | { |
36 | struct kvm_vcpu *cpup; | 36 | struct kvm_vcpu *cpup; |
37 | s64 hostclk, val; | 37 | s64 val; |
38 | int i, rc; | 38 | int i, rc; |
39 | ar_t ar; | 39 | ar_t ar; |
40 | u64 op2; | 40 | u64 op2; |
@@ -49,15 +49,11 @@ static int handle_set_clock(struct kvm_vcpu *vcpu) | |||
49 | if (rc) | 49 | if (rc) |
50 | return kvm_s390_inject_prog_cond(vcpu, rc); | 50 | return kvm_s390_inject_prog_cond(vcpu, rc); |
51 | 51 | ||
52 | if (store_tod_clock(&hostclk)) { | ||
53 | kvm_s390_set_psw_cc(vcpu, 3); | ||
54 | return 0; | ||
55 | } | ||
56 | VCPU_EVENT(vcpu, 3, "SCK: setting guest TOD to 0x%llx", val); | 52 | VCPU_EVENT(vcpu, 3, "SCK: setting guest TOD to 0x%llx", val); |
57 | val = (val - hostclk) & ~0x3fUL; | ||
58 | 53 | ||
59 | mutex_lock(&vcpu->kvm->lock); | 54 | mutex_lock(&vcpu->kvm->lock); |
60 | preempt_disable(); | 55 | preempt_disable(); |
56 | val = (val - get_tod_clock()) & ~0x3fUL; | ||
61 | kvm_for_each_vcpu(i, cpup, vcpu->kvm) | 57 | kvm_for_each_vcpu(i, cpup, vcpu->kvm) |
62 | cpup->arch.sie_block->epoch = val; | 58 | cpup->arch.sie_block->epoch = val; |
63 | preempt_enable(); | 59 | preempt_enable(); |