aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/kvm/kvm-s390.c
diff options
context:
space:
mode:
authorCarsten Otte <cotte@de.ibm.com>2011-07-24 04:48:21 -0400
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2011-07-24 04:48:21 -0400
commit598841ca9919d008b520114d8a4378c4ce4e40a1 (patch)
tree3f823474e70af4305c395fb220a138b5bc4e9f90 /arch/s390/kvm/kvm-s390.c
parente5992f2e6c3829cd43dbc4438ee13dcd6506f7f3 (diff)
[S390] use gmap address spaces for kvm guest images
This patch switches kvm from using (Qemu's) user address space to Martin's gmap address space. This way QEMU does not have to use a linker script in order to fit large guests at low addresses in its address space. Signed-off-by: Carsten Otte <cotte@de.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390/kvm/kvm-s390.c')
-rw-r--r--arch/s390/kvm/kvm-s390.c32
1 files changed, 21 insertions, 11 deletions
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 123ebea72282..3ebb4ba83d9d 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -190,7 +190,13 @@ int kvm_arch_init_vm(struct kvm *kvm)
190 debug_register_view(kvm->arch.dbf, &debug_sprintf_view); 190 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
191 VM_EVENT(kvm, 3, "%s", "vm created"); 191 VM_EVENT(kvm, 3, "%s", "vm created");
192 192
193 kvm->arch.gmap = gmap_alloc(current->mm);
194 if (!kvm->arch.gmap)
195 goto out_nogmap;
196
193 return 0; 197 return 0;
198out_nogmap:
199 debug_unregister(kvm->arch.dbf);
194out_nodbf: 200out_nodbf:
195 free_page((unsigned long)(kvm->arch.sca)); 201 free_page((unsigned long)(kvm->arch.sca));
196out_err: 202out_err:
@@ -235,11 +241,13 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
235 kvm_free_vcpus(kvm); 241 kvm_free_vcpus(kvm);
236 free_page((unsigned long)(kvm->arch.sca)); 242 free_page((unsigned long)(kvm->arch.sca));
237 debug_unregister(kvm->arch.dbf); 243 debug_unregister(kvm->arch.dbf);
244 gmap_free(kvm->arch.gmap);
238} 245}
239 246
240/* Section: vcpu related */ 247/* Section: vcpu related */
241int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) 248int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
242{ 249{
250 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
243 return 0; 251 return 0;
244} 252}
245 253
@@ -285,7 +293,7 @@ static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
285 293
286int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) 294int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
287{ 295{
288 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH); 296 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH | CPUSTAT_SM);
289 set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests); 297 set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests);
290 vcpu->arch.sie_block->ecb = 6; 298 vcpu->arch.sie_block->ecb = 6;
291 vcpu->arch.sie_block->eca = 0xC1002001U; 299 vcpu->arch.sie_block->eca = 0xC1002001U;
@@ -454,6 +462,7 @@ static void __vcpu_run(struct kvm_vcpu *vcpu)
454 local_irq_disable(); 462 local_irq_disable();
455 kvm_guest_enter(); 463 kvm_guest_enter();
456 local_irq_enable(); 464 local_irq_enable();
465 gmap_enable(vcpu->arch.gmap);
457 VCPU_EVENT(vcpu, 6, "entering sie flags %x", 466 VCPU_EVENT(vcpu, 6, "entering sie flags %x",
458 atomic_read(&vcpu->arch.sie_block->cpuflags)); 467 atomic_read(&vcpu->arch.sie_block->cpuflags));
459 if (sie64a(vcpu->arch.sie_block, vcpu->arch.guest_gprs)) { 468 if (sie64a(vcpu->arch.sie_block, vcpu->arch.guest_gprs)) {
@@ -462,6 +471,7 @@ static void __vcpu_run(struct kvm_vcpu *vcpu)
462 } 471 }
463 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d", 472 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
464 vcpu->arch.sie_block->icptcode); 473 vcpu->arch.sie_block->icptcode);
474 gmap_disable(vcpu->arch.gmap);
465 local_irq_disable(); 475 local_irq_disable();
466 kvm_guest_exit(); 476 kvm_guest_exit();
467 local_irq_enable(); 477 local_irq_enable();
@@ -479,13 +489,6 @@ rerun_vcpu:
479 if (test_and_clear_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests)) 489 if (test_and_clear_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
480 kvm_s390_vcpu_set_mem(vcpu); 490 kvm_s390_vcpu_set_mem(vcpu);
481 491
482 /* verify, that memory has been registered */
483 if (!vcpu->arch.sie_block->gmslm) {
484 vcpu_put(vcpu);
485 VCPU_EVENT(vcpu, 3, "%s", "no memory registered to run vcpu");
486 return -EINVAL;
487 }
488
489 if (vcpu->sigset_active) 492 if (vcpu->sigset_active)
490 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); 493 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
491 494
@@ -681,10 +684,10 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
681 if (mem->guest_phys_addr) 684 if (mem->guest_phys_addr)
682 return -EINVAL; 685 return -EINVAL;
683 686
684 if (mem->userspace_addr & (PAGE_SIZE - 1)) 687 if (mem->userspace_addr & 0xffffful)
685 return -EINVAL; 688 return -EINVAL;
686 689
687 if (mem->memory_size & (PAGE_SIZE - 1)) 690 if (mem->memory_size & 0xffffful)
688 return -EINVAL; 691 return -EINVAL;
689 692
690 if (!user_alloc) 693 if (!user_alloc)
@@ -698,15 +701,22 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
698 struct kvm_memory_slot old, 701 struct kvm_memory_slot old,
699 int user_alloc) 702 int user_alloc)
700{ 703{
701 int i; 704 int i, rc;
702 struct kvm_vcpu *vcpu; 705 struct kvm_vcpu *vcpu;
703 706
707
708 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
709 mem->guest_phys_addr, mem->memory_size);
710 if (rc)
711 return;
712
704 /* request update of sie control block for all available vcpus */ 713 /* request update of sie control block for all available vcpus */
705 kvm_for_each_vcpu(i, vcpu, kvm) { 714 kvm_for_each_vcpu(i, vcpu, kvm) {
706 if (test_and_set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests)) 715 if (test_and_set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
707 continue; 716 continue;
708 kvm_s390_inject_sigp_stop(vcpu, ACTION_RELOADVCPU_ON_STOP); 717 kvm_s390_inject_sigp_stop(vcpu, ACTION_RELOADVCPU_ON_STOP);
709 } 718 }
719 return;
710} 720}
711 721
712void kvm_arch_flush_shadow(struct kvm *kvm) 722void kvm_arch_flush_shadow(struct kvm *kvm)