aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/kvm/kvm-s390.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390/kvm/kvm-s390.c')
-rw-r--r--arch/s390/kvm/kvm-s390.c78
1 files changed, 33 insertions, 45 deletions
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 90d9d1ba258b..07ced89740d7 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * s390host.c -- hosting zSeries kernel virtual machines 2 * s390host.c -- hosting zSeries kernel virtual machines
3 * 3 *
4 * Copyright IBM Corp. 2008 4 * Copyright IBM Corp. 2008,2009
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only) 7 * it under the terms of the GNU General Public License (version 2 only)
@@ -10,6 +10,7 @@
10 * Author(s): Carsten Otte <cotte@de.ibm.com> 10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com> 11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com> 12 * Heiko Carstens <heiko.carstens@de.ibm.com>
13 * Christian Ehrhardt <ehrhardt@de.ibm.com>
13 */ 14 */
14 15
15#include <linux/compiler.h> 16#include <linux/compiler.h>
@@ -210,13 +211,17 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
210static void kvm_free_vcpus(struct kvm *kvm) 211static void kvm_free_vcpus(struct kvm *kvm)
211{ 212{
212 unsigned int i; 213 unsigned int i;
214 struct kvm_vcpu *vcpu;
213 215
214 for (i = 0; i < KVM_MAX_VCPUS; ++i) { 216 kvm_for_each_vcpu(i, vcpu, kvm)
215 if (kvm->vcpus[i]) { 217 kvm_arch_vcpu_destroy(vcpu);
216 kvm_arch_vcpu_destroy(kvm->vcpus[i]); 218
217 kvm->vcpus[i] = NULL; 219 mutex_lock(&kvm->lock);
218 } 220 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
219 } 221 kvm->vcpus[i] = NULL;
222
223 atomic_set(&kvm->online_vcpus, 0);
224 mutex_unlock(&kvm->lock);
220} 225}
221 226
222void kvm_arch_sync_events(struct kvm *kvm) 227void kvm_arch_sync_events(struct kvm *kvm)
@@ -278,16 +283,10 @@ static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
278 vcpu->arch.sie_block->gbea = 1; 283 vcpu->arch.sie_block->gbea = 1;
279} 284}
280 285
281/* The current code can have up to 256 pages for virtio */
282#define VIRTIODESCSPACE (256ul * 4096ul)
283
284int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) 286int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
285{ 287{
286 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH); 288 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH);
287 vcpu->arch.sie_block->gmslm = vcpu->kvm->arch.guest_memsize + 289 set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests);
288 vcpu->kvm->arch.guest_origin +
289 VIRTIODESCSPACE - 1ul;
290 vcpu->arch.sie_block->gmsor = vcpu->kvm->arch.guest_origin;
291 vcpu->arch.sie_block->ecb = 2; 290 vcpu->arch.sie_block->ecb = 2;
292 vcpu->arch.sie_block->eca = 0xC1002001U; 291 vcpu->arch.sie_block->eca = 0xC1002001U;
293 vcpu->arch.sie_block->fac = (int) (long) facilities; 292 vcpu->arch.sie_block->fac = (int) (long) facilities;
@@ -319,8 +318,6 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
319 BUG_ON(!kvm->arch.sca); 318 BUG_ON(!kvm->arch.sca);
320 if (!kvm->arch.sca->cpu[id].sda) 319 if (!kvm->arch.sca->cpu[id].sda)
321 kvm->arch.sca->cpu[id].sda = (__u64) vcpu->arch.sie_block; 320 kvm->arch.sca->cpu[id].sda = (__u64) vcpu->arch.sie_block;
322 else
323 BUG_ON(!kvm->vcpus[id]); /* vcpu does already exist */
324 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)kvm->arch.sca) >> 32); 321 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)kvm->arch.sca) >> 32);
325 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca; 322 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
326 323
@@ -490,9 +487,15 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
490 487
491 vcpu_load(vcpu); 488 vcpu_load(vcpu);
492 489
490rerun_vcpu:
491 if (vcpu->requests)
492 if (test_and_clear_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
493 kvm_s390_vcpu_set_mem(vcpu);
494
493 /* verify, that memory has been registered */ 495 /* verify, that memory has been registered */
494 if (!vcpu->kvm->arch.guest_memsize) { 496 if (!vcpu->arch.sie_block->gmslm) {
495 vcpu_put(vcpu); 497 vcpu_put(vcpu);
498 VCPU_EVENT(vcpu, 3, "%s", "no memory registered to run vcpu");
496 return -EINVAL; 499 return -EINVAL;
497 } 500 }
498 501
@@ -509,6 +512,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
509 vcpu->arch.sie_block->gpsw.addr = kvm_run->s390_sieic.addr; 512 vcpu->arch.sie_block->gpsw.addr = kvm_run->s390_sieic.addr;
510 break; 513 break;
511 case KVM_EXIT_UNKNOWN: 514 case KVM_EXIT_UNKNOWN:
515 case KVM_EXIT_INTR:
512 case KVM_EXIT_S390_RESET: 516 case KVM_EXIT_S390_RESET:
513 break; 517 break;
514 default: 518 default:
@@ -522,8 +526,13 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
522 rc = kvm_handle_sie_intercept(vcpu); 526 rc = kvm_handle_sie_intercept(vcpu);
523 } while (!signal_pending(current) && !rc); 527 } while (!signal_pending(current) && !rc);
524 528
525 if (signal_pending(current) && !rc) 529 if (rc == SIE_INTERCEPT_RERUNVCPU)
530 goto rerun_vcpu;
531
532 if (signal_pending(current) && !rc) {
533 kvm_run->exit_reason = KVM_EXIT_INTR;
526 rc = -EINTR; 534 rc = -EINTR;
535 }
527 536
528 if (rc == -ENOTSUPP) { 537 if (rc == -ENOTSUPP) {
529 /* intercept cannot be handled in-kernel, prepare kvm-run */ 538 /* intercept cannot be handled in-kernel, prepare kvm-run */
@@ -676,6 +685,7 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
676 int user_alloc) 685 int user_alloc)
677{ 686{
678 int i; 687 int i;
688 struct kvm_vcpu *vcpu;
679 689
680 /* A few sanity checks. We can have exactly one memory slot which has 690 /* A few sanity checks. We can have exactly one memory slot which has
681 to start at guest virtual zero and which has to be located at a 691 to start at guest virtual zero and which has to be located at a
@@ -684,7 +694,7 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
684 vmas. It is okay to mmap() and munmap() stuff in this slot after 694 vmas. It is okay to mmap() and munmap() stuff in this slot after
685 doing this call at any time */ 695 doing this call at any time */
686 696
687 if (mem->slot || kvm->arch.guest_memsize) 697 if (mem->slot)
688 return -EINVAL; 698 return -EINVAL;
689 699
690 if (mem->guest_phys_addr) 700 if (mem->guest_phys_addr)
@@ -699,36 +709,14 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
699 if (!user_alloc) 709 if (!user_alloc)
700 return -EINVAL; 710 return -EINVAL;
701 711
702 /* lock all vcpus */ 712 /* request update of sie control block for all available vcpus */
703 for (i = 0; i < KVM_MAX_VCPUS; ++i) { 713 kvm_for_each_vcpu(i, vcpu, kvm) {
704 if (!kvm->vcpus[i]) 714 if (test_and_set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
705 continue; 715 continue;
706 if (!mutex_trylock(&kvm->vcpus[i]->mutex)) 716 kvm_s390_inject_sigp_stop(vcpu, ACTION_RELOADVCPU_ON_STOP);
707 goto fail_out;
708 }
709
710 kvm->arch.guest_origin = mem->userspace_addr;
711 kvm->arch.guest_memsize = mem->memory_size;
712
713 /* update sie control blocks, and unlock all vcpus */
714 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
715 if (kvm->vcpus[i]) {
716 kvm->vcpus[i]->arch.sie_block->gmsor =
717 kvm->arch.guest_origin;
718 kvm->vcpus[i]->arch.sie_block->gmslm =
719 kvm->arch.guest_memsize +
720 kvm->arch.guest_origin +
721 VIRTIODESCSPACE - 1ul;
722 mutex_unlock(&kvm->vcpus[i]->mutex);
723 }
724 } 717 }
725 718
726 return 0; 719 return 0;
727
728fail_out:
729 for (; i >= 0; i--)
730 mutex_unlock(&kvm->vcpus[i]->mutex);
731 return -EINVAL;
732} 720}
733 721
734void kvm_arch_flush_shadow(struct kvm *kvm) 722void kvm_arch_flush_shadow(struct kvm *kvm)