aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid Hildenbrand <dahi@linux.vnet.ibm.com>2015-10-12 10:27:23 -0400
committerChristian Borntraeger <borntraeger@de.ibm.com>2015-11-30 06:47:09 -0500
commit2550882449299fd55c8214529cc0777b789db0f7 (patch)
treecda96a382c91c236794ffa0ad98e283553c0634f
parent5f3fe620a56f2f5c79e89522107f2476a45ed6ce (diff)
KVM: s390: fix SCA related races and double use
If something goes wrong in kvm_arch_vcpu_create, the VCPU has already been added to the sca but will never be removed. Trying to create VCPUs with duplicate ids (e.g. after a failed attempt) is problematic. Also, when creating multiple VCPUs in parallel, we could theoretically forget to set the correct SCA when the switch to ESCA happens just before the VCPU is registered. Let's add the VCPU to the SCA in kvm_arch_vcpu_postcreate, where we can be sure that no duplicate VCPU with the same id is around and the VCPU has already been registered at the VM. We also have to make sure to update ECB at that point. Signed-off-by: David Hildenbrand <dahi@linux.vnet.ibm.com> Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
-rw-r--r--arch/s390/kvm/kvm-s390.c11
1 files changed, 5 insertions, 6 deletions
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 5c58127b7527..2ba5978829f6 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -1289,6 +1289,7 @@ static void sca_add_vcpu(struct kvm_vcpu *vcpu, struct kvm *kvm,
1289 sca->cpu[id].sda = (__u64) vcpu->arch.sie_block; 1289 sca->cpu[id].sda = (__u64) vcpu->arch.sie_block;
1290 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32); 1290 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
1291 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca & ~0x3fU; 1291 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca & ~0x3fU;
1292 vcpu->arch.sie_block->ecb2 |= 0x04U;
1292 set_bit_inv(id, (unsigned long *) sca->mcn); 1293 set_bit_inv(id, (unsigned long *) sca->mcn);
1293 } else { 1294 } else {
1294 struct bsca_block *sca = kvm->arch.sca; 1295 struct bsca_block *sca = kvm->arch.sca;
@@ -1493,8 +1494,11 @@ void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
1493 vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch; 1494 vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
1494 preempt_enable(); 1495 preempt_enable();
1495 mutex_unlock(&vcpu->kvm->lock); 1496 mutex_unlock(&vcpu->kvm->lock);
1496 if (!kvm_is_ucontrol(vcpu->kvm)) 1497 if (!kvm_is_ucontrol(vcpu->kvm)) {
1497 vcpu->arch.gmap = vcpu->kvm->arch.gmap; 1498 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
1499 sca_add_vcpu(vcpu, vcpu->kvm, vcpu->vcpu_id);
1500 }
1501
1498} 1502}
1499 1503
1500static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu) 1504static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
@@ -1558,8 +1562,6 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
1558 vcpu->arch.sie_block->ecb |= 0x10; 1562 vcpu->arch.sie_block->ecb |= 0x10;
1559 1563
1560 vcpu->arch.sie_block->ecb2 = 8; 1564 vcpu->arch.sie_block->ecb2 = 8;
1561 if (vcpu->kvm->arch.use_esca)
1562 vcpu->arch.sie_block->ecb2 |= 4;
1563 vcpu->arch.sie_block->eca = 0xC1002000U; 1565 vcpu->arch.sie_block->eca = 0xC1002000U;
1564 if (sclp.has_siif) 1566 if (sclp.has_siif)
1565 vcpu->arch.sie_block->eca |= 1; 1567 vcpu->arch.sie_block->eca |= 1;
@@ -1608,9 +1610,6 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
1608 vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb; 1610 vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
1609 1611
1610 vcpu->arch.sie_block->icpua = id; 1612 vcpu->arch.sie_block->icpua = id;
1611 if (!kvm_is_ucontrol(kvm))
1612 sca_add_vcpu(vcpu, kvm, id);
1613
1614 spin_lock_init(&vcpu->arch.local_int.lock); 1613 spin_lock_init(&vcpu->arch.local_int.lock);
1615 vcpu->arch.local_int.float_int = &kvm->arch.float_int; 1614 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
1616 vcpu->arch.local_int.wq = &vcpu->wq; 1615 vcpu->arch.local_int.wq = &vcpu->wq;