diff options
Diffstat (limited to 'arch/s390/kvm/kvm-s390.c')
-rw-r--r-- | arch/s390/kvm/kvm-s390.c | 71 |
1 files changed, 40 insertions, 31 deletions
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 67345ae7ce8..8cdb1bd5856 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c | |||
@@ -62,6 +62,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { | |||
62 | { "instruction_chsc", VCPU_STAT(instruction_chsc) }, | 62 | { "instruction_chsc", VCPU_STAT(instruction_chsc) }, |
63 | { "instruction_stsi", VCPU_STAT(instruction_stsi) }, | 63 | { "instruction_stsi", VCPU_STAT(instruction_stsi) }, |
64 | { "instruction_stfl", VCPU_STAT(instruction_stfl) }, | 64 | { "instruction_stfl", VCPU_STAT(instruction_stfl) }, |
65 | { "instruction_tprot", VCPU_STAT(instruction_tprot) }, | ||
65 | { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) }, | 66 | { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) }, |
66 | { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) }, | 67 | { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) }, |
67 | { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) }, | 68 | { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) }, |
@@ -122,6 +123,7 @@ int kvm_dev_ioctl_check_extension(long ext) | |||
122 | 123 | ||
123 | switch (ext) { | 124 | switch (ext) { |
124 | case KVM_CAP_S390_PSW: | 125 | case KVM_CAP_S390_PSW: |
126 | case KVM_CAP_S390_GMAP: | ||
125 | r = 1; | 127 | r = 1; |
126 | break; | 128 | break; |
127 | default: | 129 | default: |
@@ -189,7 +191,13 @@ int kvm_arch_init_vm(struct kvm *kvm) | |||
189 | debug_register_view(kvm->arch.dbf, &debug_sprintf_view); | 191 | debug_register_view(kvm->arch.dbf, &debug_sprintf_view); |
190 | VM_EVENT(kvm, 3, "%s", "vm created"); | 192 | VM_EVENT(kvm, 3, "%s", "vm created"); |
191 | 193 | ||
194 | kvm->arch.gmap = gmap_alloc(current->mm); | ||
195 | if (!kvm->arch.gmap) | ||
196 | goto out_nogmap; | ||
197 | |||
192 | return 0; | 198 | return 0; |
199 | out_nogmap: | ||
200 | debug_unregister(kvm->arch.dbf); | ||
193 | out_nodbf: | 201 | out_nodbf: |
194 | free_page((unsigned long)(kvm->arch.sca)); | 202 | free_page((unsigned long)(kvm->arch.sca)); |
195 | out_err: | 203 | out_err: |
@@ -234,11 +242,13 @@ void kvm_arch_destroy_vm(struct kvm *kvm) | |||
234 | kvm_free_vcpus(kvm); | 242 | kvm_free_vcpus(kvm); |
235 | free_page((unsigned long)(kvm->arch.sca)); | 243 | free_page((unsigned long)(kvm->arch.sca)); |
236 | debug_unregister(kvm->arch.dbf); | 244 | debug_unregister(kvm->arch.dbf); |
245 | gmap_free(kvm->arch.gmap); | ||
237 | } | 246 | } |
238 | 247 | ||
239 | /* Section: vcpu related */ | 248 | /* Section: vcpu related */ |
240 | int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) | 249 | int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) |
241 | { | 250 | { |
251 | vcpu->arch.gmap = vcpu->kvm->arch.gmap; | ||
242 | return 0; | 252 | return 0; |
243 | } | 253 | } |
244 | 254 | ||
@@ -254,10 +264,14 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | |||
254 | vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK; | 264 | vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK; |
255 | restore_fp_regs(&vcpu->arch.guest_fpregs); | 265 | restore_fp_regs(&vcpu->arch.guest_fpregs); |
256 | restore_access_regs(vcpu->arch.guest_acrs); | 266 | restore_access_regs(vcpu->arch.guest_acrs); |
267 | gmap_enable(vcpu->arch.gmap); | ||
268 | atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); | ||
257 | } | 269 | } |
258 | 270 | ||
259 | void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) | 271 | void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) |
260 | { | 272 | { |
273 | atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); | ||
274 | gmap_disable(vcpu->arch.gmap); | ||
261 | save_fp_regs(&vcpu->arch.guest_fpregs); | 275 | save_fp_regs(&vcpu->arch.guest_fpregs); |
262 | save_access_regs(vcpu->arch.guest_acrs); | 276 | save_access_regs(vcpu->arch.guest_acrs); |
263 | restore_fp_regs(&vcpu->arch.host_fpregs); | 277 | restore_fp_regs(&vcpu->arch.host_fpregs); |
@@ -284,8 +298,9 @@ static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu) | |||
284 | 298 | ||
285 | int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) | 299 | int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) |
286 | { | 300 | { |
287 | atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH); | 301 | atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH | |
288 | set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests); | 302 | CPUSTAT_SM | |
303 | CPUSTAT_STOPPED); | ||
289 | vcpu->arch.sie_block->ecb = 6; | 304 | vcpu->arch.sie_block->ecb = 6; |
290 | vcpu->arch.sie_block->eca = 0xC1002001U; | 305 | vcpu->arch.sie_block->eca = 0xC1002001U; |
291 | vcpu->arch.sie_block->fac = (int) (long) facilities; | 306 | vcpu->arch.sie_block->fac = (int) (long) facilities; |
@@ -301,11 +316,17 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) | |||
301 | struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, | 316 | struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, |
302 | unsigned int id) | 317 | unsigned int id) |
303 | { | 318 | { |
304 | struct kvm_vcpu *vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL); | 319 | struct kvm_vcpu *vcpu; |
305 | int rc = -ENOMEM; | 320 | int rc = -EINVAL; |
306 | 321 | ||
322 | if (id >= KVM_MAX_VCPUS) | ||
323 | goto out; | ||
324 | |||
325 | rc = -ENOMEM; | ||
326 | |||
327 | vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL); | ||
307 | if (!vcpu) | 328 | if (!vcpu) |
308 | goto out_nomem; | 329 | goto out; |
309 | 330 | ||
310 | vcpu->arch.sie_block = (struct kvm_s390_sie_block *) | 331 | vcpu->arch.sie_block = (struct kvm_s390_sie_block *) |
311 | get_zeroed_page(GFP_KERNEL); | 332 | get_zeroed_page(GFP_KERNEL); |
@@ -341,7 +362,7 @@ out_free_sie_block: | |||
341 | free_page((unsigned long)(vcpu->arch.sie_block)); | 362 | free_page((unsigned long)(vcpu->arch.sie_block)); |
342 | out_free_cpu: | 363 | out_free_cpu: |
343 | kfree(vcpu); | 364 | kfree(vcpu); |
344 | out_nomem: | 365 | out: |
345 | return ERR_PTR(rc); | 366 | return ERR_PTR(rc); |
346 | } | 367 | } |
347 | 368 | ||
@@ -404,7 +425,7 @@ static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw) | |||
404 | { | 425 | { |
405 | int rc = 0; | 426 | int rc = 0; |
406 | 427 | ||
407 | if (atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_RUNNING) | 428 | if (!(atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_STOPPED)) |
408 | rc = -EBUSY; | 429 | rc = -EBUSY; |
409 | else { | 430 | else { |
410 | vcpu->run->psw_mask = psw.mask; | 431 | vcpu->run->psw_mask = psw.mask; |
@@ -474,21 +495,10 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
474 | sigset_t sigsaved; | 495 | sigset_t sigsaved; |
475 | 496 | ||
476 | rerun_vcpu: | 497 | rerun_vcpu: |
477 | if (vcpu->requests) | ||
478 | if (test_and_clear_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests)) | ||
479 | kvm_s390_vcpu_set_mem(vcpu); | ||
480 | |||
481 | /* verify, that memory has been registered */ | ||
482 | if (!vcpu->arch.sie_block->gmslm) { | ||
483 | vcpu_put(vcpu); | ||
484 | VCPU_EVENT(vcpu, 3, "%s", "no memory registered to run vcpu"); | ||
485 | return -EINVAL; | ||
486 | } | ||
487 | |||
488 | if (vcpu->sigset_active) | 498 | if (vcpu->sigset_active) |
489 | sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); | 499 | sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); |
490 | 500 | ||
491 | atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); | 501 | atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags); |
492 | 502 | ||
493 | BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL); | 503 | BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL); |
494 | 504 | ||
@@ -545,7 +555,7 @@ rerun_vcpu: | |||
545 | return rc; | 555 | return rc; |
546 | } | 556 | } |
547 | 557 | ||
548 | static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, const void *from, | 558 | static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, void *from, |
549 | unsigned long n, int prefix) | 559 | unsigned long n, int prefix) |
550 | { | 560 | { |
551 | if (prefix) | 561 | if (prefix) |
@@ -562,7 +572,7 @@ static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, const void *from, | |||
562 | */ | 572 | */ |
563 | int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr) | 573 | int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr) |
564 | { | 574 | { |
565 | const unsigned char archmode = 1; | 575 | unsigned char archmode = 1; |
566 | int prefix; | 576 | int prefix; |
567 | 577 | ||
568 | if (addr == KVM_S390_STORE_STATUS_NOADDR) { | 578 | if (addr == KVM_S390_STORE_STATUS_NOADDR) { |
@@ -680,10 +690,10 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, | |||
680 | if (mem->guest_phys_addr) | 690 | if (mem->guest_phys_addr) |
681 | return -EINVAL; | 691 | return -EINVAL; |
682 | 692 | ||
683 | if (mem->userspace_addr & (PAGE_SIZE - 1)) | 693 | if (mem->userspace_addr & 0xffffful) |
684 | return -EINVAL; | 694 | return -EINVAL; |
685 | 695 | ||
686 | if (mem->memory_size & (PAGE_SIZE - 1)) | 696 | if (mem->memory_size & 0xffffful) |
687 | return -EINVAL; | 697 | return -EINVAL; |
688 | 698 | ||
689 | if (!user_alloc) | 699 | if (!user_alloc) |
@@ -697,15 +707,14 @@ void kvm_arch_commit_memory_region(struct kvm *kvm, | |||
697 | struct kvm_memory_slot old, | 707 | struct kvm_memory_slot old, |
698 | int user_alloc) | 708 | int user_alloc) |
699 | { | 709 | { |
700 | int i; | 710 | int rc; |
701 | struct kvm_vcpu *vcpu; | ||
702 | 711 | ||
703 | /* request update of sie control block for all available vcpus */ | 712 | |
704 | kvm_for_each_vcpu(i, vcpu, kvm) { | 713 | rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr, |
705 | if (test_and_set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests)) | 714 | mem->guest_phys_addr, mem->memory_size); |
706 | continue; | 715 | if (rc) |
707 | kvm_s390_inject_sigp_stop(vcpu, ACTION_RELOADVCPU_ON_STOP); | 716 | printk(KERN_WARNING "kvm-s390: failed to commit memory region\n"); |
708 | } | 717 | return; |
709 | } | 718 | } |
710 | 719 | ||
711 | void kvm_arch_flush_shadow(struct kvm *kvm) | 720 | void kvm_arch_flush_shadow(struct kvm *kvm) |