aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/kvm/kvm-s390.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390/kvm/kvm-s390.c')
-rw-r--r--arch/s390/kvm/kvm-s390.c36
1 files changed, 31 insertions, 5 deletions
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index f4d56e9939c9..86567e174fd7 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -657,6 +657,8 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
657 struct kvm_memory_slot old, 657 struct kvm_memory_slot old,
658 int user_alloc) 658 int user_alloc)
659{ 659{
660 int i;
661
660 /* A few sanity checks. We can have exactly one memory slot which has 662 /* A few sanity checks. We can have exactly one memory slot which has
661 to start at guest virtual zero and which has to be located at a 663 to start at guest virtual zero and which has to be located at a
662 page boundary in userland and which has to end at a page boundary. 664 page boundary in userland and which has to end at a page boundary.
@@ -664,7 +666,7 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
664 vmas. It is okay to mmap() and munmap() stuff in this slot after 666 vmas. It is okay to mmap() and munmap() stuff in this slot after
665 doing this call at any time */ 667 doing this call at any time */
666 668
667 if (mem->slot) 669 if (mem->slot || kvm->arch.guest_memsize)
668 return -EINVAL; 670 return -EINVAL;
669 671
670 if (mem->guest_phys_addr) 672 if (mem->guest_phys_addr)
@@ -676,15 +678,39 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
676 if (mem->memory_size & (PAGE_SIZE - 1)) 678 if (mem->memory_size & (PAGE_SIZE - 1))
677 return -EINVAL; 679 return -EINVAL;
678 680
681 if (!user_alloc)
682 return -EINVAL;
683
684 /* lock all vcpus */
685 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
686 if (!kvm->vcpus[i])
687 continue;
688 if (!mutex_trylock(&kvm->vcpus[i]->mutex))
689 goto fail_out;
690 }
691
679 kvm->arch.guest_origin = mem->userspace_addr; 692 kvm->arch.guest_origin = mem->userspace_addr;
680 kvm->arch.guest_memsize = mem->memory_size; 693 kvm->arch.guest_memsize = mem->memory_size;
681 694
682 /* FIXME: we do want to interrupt running CPUs and update their memory 695 /* update sie control blocks, and unlock all vcpus */
683 configuration now to avoid race conditions. But hey, changing the 696 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
684 memory layout while virtual CPUs are running is usually bad 697 if (kvm->vcpus[i]) {
685 programming practice. */ 698 kvm->vcpus[i]->arch.sie_block->gmsor =
699 kvm->arch.guest_origin;
700 kvm->vcpus[i]->arch.sie_block->gmslm =
701 kvm->arch.guest_memsize +
702 kvm->arch.guest_origin +
703 VIRTIODESCSPACE - 1ul;
704 mutex_unlock(&kvm->vcpus[i]->mutex);
705 }
706 }
686 707
687 return 0; 708 return 0;
709
710fail_out:
711 for (; i >= 0; i--)
712 mutex_unlock(&kvm->vcpus[i]->mutex);
713 return -EINVAL;
688} 714}
689 715
690void kvm_arch_flush_shadow(struct kvm *kvm) 716void kvm_arch_flush_shadow(struct kvm *kvm)