aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390
diff options
context:
space:
mode:
authorCarsten Otte <cotte@de.ibm.com>2009-05-12 11:21:48 -0400
committerAvi Kivity <avi@redhat.com>2009-06-10 04:48:55 -0400
commit2668dab794272f0898491acaf1e77e9a005abc0f (patch)
treeaeb223675bed96f0b5fc15bc3f9474381fe28364 /arch/s390
parent58f8ac279a8c46eb0a3193edd521ed2e41c4f914 (diff)
KVM: s390: Fix memory slot versus run - v3
This patch fixes an incorrectness in the kvm backend for s390. In case virtual cpus are being created before the corresponding memory slot is being registered, we need to update the sie control blocks for the virtual cpus. *updates in v3* In consideration of the s390 memslot constraints locking was changed to trylock. These locks should never be held, as vcpu's can't run without the single memslot we just assign when running this code. To ensure this never deadlocks in case other code changes the code uses trylocks and bail out if it can't get all locks. Additionally most of the discussed special conditions for s390 like only one memslot and no user_alloc are now checked for validity in kvm_arch_set_memory_region. Reported-by: Mijo Safradin <mijo@linux.vnet.ibm.com> Signed-off-by: Carsten Otte <cotte@de.ibm.com> Signed-off-by: Christian Ehrhardt <ehrhardt@de.ibm.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/s390')
-rw-r--r--arch/s390/kvm/kvm-s390.c36
1 files changed, 31 insertions, 5 deletions
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index f4d56e9939c9..86567e174fd7 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -657,6 +657,8 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
657 struct kvm_memory_slot old, 657 struct kvm_memory_slot old,
658 int user_alloc) 658 int user_alloc)
659{ 659{
660 int i;
661
660 /* A few sanity checks. We can have exactly one memory slot which has 662 /* A few sanity checks. We can have exactly one memory slot which has
661 to start at guest virtual zero and which has to be located at a 663 to start at guest virtual zero and which has to be located at a
662 page boundary in userland and which has to end at a page boundary. 664 page boundary in userland and which has to end at a page boundary.
@@ -664,7 +666,7 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
664 vmas. It is okay to mmap() and munmap() stuff in this slot after 666 vmas. It is okay to mmap() and munmap() stuff in this slot after
665 doing this call at any time */ 667 doing this call at any time */
666 668
667 if (mem->slot) 669 if (mem->slot || kvm->arch.guest_memsize)
668 return -EINVAL; 670 return -EINVAL;
669 671
670 if (mem->guest_phys_addr) 672 if (mem->guest_phys_addr)
@@ -676,15 +678,39 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
676 if (mem->memory_size & (PAGE_SIZE - 1)) 678 if (mem->memory_size & (PAGE_SIZE - 1))
677 return -EINVAL; 679 return -EINVAL;
678 680
681 if (!user_alloc)
682 return -EINVAL;
683
684 /* lock all vcpus */
685 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
686 if (!kvm->vcpus[i])
687 continue;
688 if (!mutex_trylock(&kvm->vcpus[i]->mutex))
689 goto fail_out;
690 }
691
679 kvm->arch.guest_origin = mem->userspace_addr; 692 kvm->arch.guest_origin = mem->userspace_addr;
680 kvm->arch.guest_memsize = mem->memory_size; 693 kvm->arch.guest_memsize = mem->memory_size;
681 694
682 /* FIXME: we do want to interrupt running CPUs and update their memory 695 /* update sie control blocks, and unlock all vcpus */
683 configuration now to avoid race conditions. But hey, changing the 696 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
684 memory layout while virtual CPUs are running is usually bad 697 if (kvm->vcpus[i]) {
685 programming practice. */ 698 kvm->vcpus[i]->arch.sie_block->gmsor =
699 kvm->arch.guest_origin;
700 kvm->vcpus[i]->arch.sie_block->gmslm =
701 kvm->arch.guest_memsize +
702 kvm->arch.guest_origin +
703 VIRTIODESCSPACE - 1ul;
704 mutex_unlock(&kvm->vcpus[i]->mutex);
705 }
706 }
686 707
687 return 0; 708 return 0;
709
710fail_out:
711 for (; i >= 0; i--)
712 mutex_unlock(&kvm->vcpus[i]->mutex);
713 return -EINVAL;
688} 714}
689 715
690void kvm_arch_flush_shadow(struct kvm *kvm) 716void kvm_arch_flush_shadow(struct kvm *kvm)