diff options
Diffstat (limited to 'arch/s390/kvm/kvm-s390.c')
-rw-r--r-- | arch/s390/kvm/kvm-s390.c | 63 |
1 files changed, 52 insertions, 11 deletions
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index f4d56e9939c9..10bccd1f8aee 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <linux/compiler.h> | 15 | #include <linux/compiler.h> |
16 | #include <linux/err.h> | 16 | #include <linux/err.h> |
17 | #include <linux/fs.h> | 17 | #include <linux/fs.h> |
18 | #include <linux/hrtimer.h> | ||
18 | #include <linux/init.h> | 19 | #include <linux/init.h> |
19 | #include <linux/kvm.h> | 20 | #include <linux/kvm.h> |
20 | #include <linux/kvm_host.h> | 21 | #include <linux/kvm_host.h> |
@@ -195,6 +196,10 @@ out_nokvm: | |||
195 | void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) | 196 | void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) |
196 | { | 197 | { |
197 | VCPU_EVENT(vcpu, 3, "%s", "free cpu"); | 198 | VCPU_EVENT(vcpu, 3, "%s", "free cpu"); |
199 | if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda == | ||
200 | (__u64) vcpu->arch.sie_block) | ||
201 | vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0; | ||
202 | smp_mb(); | ||
198 | free_page((unsigned long)(vcpu->arch.sie_block)); | 203 | free_page((unsigned long)(vcpu->arch.sie_block)); |
199 | kvm_vcpu_uninit(vcpu); | 204 | kvm_vcpu_uninit(vcpu); |
200 | kfree(vcpu); | 205 | kfree(vcpu); |
@@ -283,8 +288,10 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) | |||
283 | vcpu->arch.sie_block->gmsor = vcpu->kvm->arch.guest_origin; | 288 | vcpu->arch.sie_block->gmsor = vcpu->kvm->arch.guest_origin; |
284 | vcpu->arch.sie_block->ecb = 2; | 289 | vcpu->arch.sie_block->ecb = 2; |
285 | vcpu->arch.sie_block->eca = 0xC1002001U; | 290 | vcpu->arch.sie_block->eca = 0xC1002001U; |
286 | setup_timer(&vcpu->arch.ckc_timer, kvm_s390_idle_wakeup, | 291 | hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS); |
287 | (unsigned long) vcpu); | 292 | tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet, |
293 | (unsigned long) vcpu); | ||
294 | vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup; | ||
288 | get_cpu_id(&vcpu->arch.cpu_id); | 295 | get_cpu_id(&vcpu->arch.cpu_id); |
289 | vcpu->arch.cpu_id.version = 0xff; | 296 | vcpu->arch.cpu_id.version = 0xff; |
290 | return 0; | 297 | return 0; |
@@ -307,19 +314,21 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, | |||
307 | 314 | ||
308 | vcpu->arch.sie_block->icpua = id; | 315 | vcpu->arch.sie_block->icpua = id; |
309 | BUG_ON(!kvm->arch.sca); | 316 | BUG_ON(!kvm->arch.sca); |
310 | BUG_ON(kvm->arch.sca->cpu[id].sda); | 317 | if (!kvm->arch.sca->cpu[id].sda) |
311 | kvm->arch.sca->cpu[id].sda = (__u64) vcpu->arch.sie_block; | 318 | kvm->arch.sca->cpu[id].sda = (__u64) vcpu->arch.sie_block; |
319 | else | ||
320 | BUG_ON(!kvm->vcpus[id]); /* vcpu does already exist */ | ||
312 | vcpu->arch.sie_block->scaoh = (__u32)(((__u64)kvm->arch.sca) >> 32); | 321 | vcpu->arch.sie_block->scaoh = (__u32)(((__u64)kvm->arch.sca) >> 32); |
313 | vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca; | 322 | vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca; |
314 | 323 | ||
315 | spin_lock_init(&vcpu->arch.local_int.lock); | 324 | spin_lock_init(&vcpu->arch.local_int.lock); |
316 | INIT_LIST_HEAD(&vcpu->arch.local_int.list); | 325 | INIT_LIST_HEAD(&vcpu->arch.local_int.list); |
317 | vcpu->arch.local_int.float_int = &kvm->arch.float_int; | 326 | vcpu->arch.local_int.float_int = &kvm->arch.float_int; |
318 | spin_lock_bh(&kvm->arch.float_int.lock); | 327 | spin_lock(&kvm->arch.float_int.lock); |
319 | kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int; | 328 | kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int; |
320 | init_waitqueue_head(&vcpu->arch.local_int.wq); | 329 | init_waitqueue_head(&vcpu->arch.local_int.wq); |
321 | vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags; | 330 | vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags; |
322 | spin_unlock_bh(&kvm->arch.float_int.lock); | 331 | spin_unlock(&kvm->arch.float_int.lock); |
323 | 332 | ||
324 | rc = kvm_vcpu_init(vcpu, kvm, id); | 333 | rc = kvm_vcpu_init(vcpu, kvm, id); |
325 | if (rc) | 334 | if (rc) |
@@ -478,6 +487,12 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
478 | 487 | ||
479 | vcpu_load(vcpu); | 488 | vcpu_load(vcpu); |
480 | 489 | ||
490 | /* verify, that memory has been registered */ | ||
491 | if (!vcpu->kvm->arch.guest_memsize) { | ||
492 | vcpu_put(vcpu); | ||
493 | return -EINVAL; | ||
494 | } | ||
495 | |||
481 | if (vcpu->sigset_active) | 496 | if (vcpu->sigset_active) |
482 | sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); | 497 | sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); |
483 | 498 | ||
@@ -657,6 +672,8 @@ int kvm_arch_set_memory_region(struct kvm *kvm, | |||
657 | struct kvm_memory_slot old, | 672 | struct kvm_memory_slot old, |
658 | int user_alloc) | 673 | int user_alloc) |
659 | { | 674 | { |
675 | int i; | ||
676 | |||
660 | /* A few sanity checks. We can have exactly one memory slot which has | 677 | /* A few sanity checks. We can have exactly one memory slot which has |
661 | to start at guest virtual zero and which has to be located at a | 678 | to start at guest virtual zero and which has to be located at a |
662 | page boundary in userland and which has to end at a page boundary. | 679 | page boundary in userland and which has to end at a page boundary. |
@@ -664,7 +681,7 @@ int kvm_arch_set_memory_region(struct kvm *kvm, | |||
664 | vmas. It is okay to mmap() and munmap() stuff in this slot after | 681 | vmas. It is okay to mmap() and munmap() stuff in this slot after |
665 | doing this call at any time */ | 682 | doing this call at any time */ |
666 | 683 | ||
667 | if (mem->slot) | 684 | if (mem->slot || kvm->arch.guest_memsize) |
668 | return -EINVAL; | 685 | return -EINVAL; |
669 | 686 | ||
670 | if (mem->guest_phys_addr) | 687 | if (mem->guest_phys_addr) |
@@ -676,15 +693,39 @@ int kvm_arch_set_memory_region(struct kvm *kvm, | |||
676 | if (mem->memory_size & (PAGE_SIZE - 1)) | 693 | if (mem->memory_size & (PAGE_SIZE - 1)) |
677 | return -EINVAL; | 694 | return -EINVAL; |
678 | 695 | ||
696 | if (!user_alloc) | ||
697 | return -EINVAL; | ||
698 | |||
699 | /* lock all vcpus */ | ||
700 | for (i = 0; i < KVM_MAX_VCPUS; ++i) { | ||
701 | if (!kvm->vcpus[i]) | ||
702 | continue; | ||
703 | if (!mutex_trylock(&kvm->vcpus[i]->mutex)) | ||
704 | goto fail_out; | ||
705 | } | ||
706 | |||
679 | kvm->arch.guest_origin = mem->userspace_addr; | 707 | kvm->arch.guest_origin = mem->userspace_addr; |
680 | kvm->arch.guest_memsize = mem->memory_size; | 708 | kvm->arch.guest_memsize = mem->memory_size; |
681 | 709 | ||
682 | /* FIXME: we do want to interrupt running CPUs and update their memory | 710 | /* update sie control blocks, and unlock all vcpus */ |
683 | configuration now to avoid race conditions. But hey, changing the | 711 | for (i = 0; i < KVM_MAX_VCPUS; ++i) { |
684 | memory layout while virtual CPUs are running is usually bad | 712 | if (kvm->vcpus[i]) { |
685 | programming practice. */ | 713 | kvm->vcpus[i]->arch.sie_block->gmsor = |
714 | kvm->arch.guest_origin; | ||
715 | kvm->vcpus[i]->arch.sie_block->gmslm = | ||
716 | kvm->arch.guest_memsize + | ||
717 | kvm->arch.guest_origin + | ||
718 | VIRTIODESCSPACE - 1ul; | ||
719 | mutex_unlock(&kvm->vcpus[i]->mutex); | ||
720 | } | ||
721 | } | ||
686 | 722 | ||
687 | return 0; | 723 | return 0; |
724 | |||
725 | fail_out: | ||
726 | for (; i >= 0; i--) | ||
727 | mutex_unlock(&kvm->vcpus[i]->mutex); | ||
728 | return -EINVAL; | ||
688 | } | 729 | } |
689 | 730 | ||
690 | void kvm_arch_flush_shadow(struct kvm *kvm) | 731 | void kvm_arch_flush_shadow(struct kvm *kvm) |