aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/kvm
diff options
context:
space:
mode:
authorPaolo Bonzini <pbonzini@redhat.com>2016-06-13 08:50:04 -0400
committerPaolo Bonzini <pbonzini@redhat.com>2016-06-16 04:07:37 -0400
commita03825bbd0c39feeba605912cdbc28e79e4e01e1 (patch)
tree072c00b26e9ba5c2ebb07ab7d80800ed1a3239f1 /arch/s390/kvm
parent557abc40d121358883d2da8bc8bf976d6e8ec332 (diff)
KVM: s390: use kvm->created_vcpus
The new created_vcpus field avoids possible races between enabling capabilities and creating VCPUs. Acked-by: Christian Borntraeger <borntraeger@de.ibm.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch/s390/kvm')
-rw-r--r--arch/s390/kvm/kvm-s390.c10
1 files changed, 5 insertions, 5 deletions
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 49c60393a15c..0dcf9b8fc12c 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -422,7 +422,7 @@ static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
422 break; 422 break;
423 case KVM_CAP_S390_VECTOR_REGISTERS: 423 case KVM_CAP_S390_VECTOR_REGISTERS:
424 mutex_lock(&kvm->lock); 424 mutex_lock(&kvm->lock);
425 if (atomic_read(&kvm->online_vcpus)) { 425 if (kvm->created_vcpus) {
426 r = -EBUSY; 426 r = -EBUSY;
427 } else if (MACHINE_HAS_VX) { 427 } else if (MACHINE_HAS_VX) {
428 set_kvm_facility(kvm->arch.model.fac_mask, 129); 428 set_kvm_facility(kvm->arch.model.fac_mask, 129);
@@ -437,7 +437,7 @@ static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
437 case KVM_CAP_S390_RI: 437 case KVM_CAP_S390_RI:
438 r = -EINVAL; 438 r = -EINVAL;
439 mutex_lock(&kvm->lock); 439 mutex_lock(&kvm->lock);
440 if (atomic_read(&kvm->online_vcpus)) { 440 if (kvm->created_vcpus) {
441 r = -EBUSY; 441 r = -EBUSY;
442 } else if (test_facility(64)) { 442 } else if (test_facility(64)) {
443 set_kvm_facility(kvm->arch.model.fac_mask, 64); 443 set_kvm_facility(kvm->arch.model.fac_mask, 64);
@@ -492,7 +492,7 @@ static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *att
492 ret = -EBUSY; 492 ret = -EBUSY;
493 VM_EVENT(kvm, 3, "%s", "ENABLE: CMMA support"); 493 VM_EVENT(kvm, 3, "%s", "ENABLE: CMMA support");
494 mutex_lock(&kvm->lock); 494 mutex_lock(&kvm->lock);
495 if (atomic_read(&kvm->online_vcpus) == 0) { 495 if (!kvm->created_vcpus) {
496 kvm->arch.use_cmma = 1; 496 kvm->arch.use_cmma = 1;
497 ret = 0; 497 ret = 0;
498 } 498 }
@@ -536,7 +536,7 @@ static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *att
536 536
537 ret = -EBUSY; 537 ret = -EBUSY;
538 mutex_lock(&kvm->lock); 538 mutex_lock(&kvm->lock);
539 if (atomic_read(&kvm->online_vcpus) == 0) { 539 if (!kvm->created_vcpus) {
540 /* gmap_alloc will round the limit up */ 540 /* gmap_alloc will round the limit up */
541 struct gmap *new = gmap_alloc(current->mm, new_limit); 541 struct gmap *new = gmap_alloc(current->mm, new_limit);
542 542
@@ -713,7 +713,7 @@ static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr)
713 int ret = 0; 713 int ret = 0;
714 714
715 mutex_lock(&kvm->lock); 715 mutex_lock(&kvm->lock);
716 if (atomic_read(&kvm->online_vcpus)) { 716 if (kvm->created_vcpus) {
717 ret = -EBUSY; 717 ret = -EBUSY;
718 goto out; 718 goto out;
719 } 719 }