aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/kvm/kvm-s390.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390/kvm/kvm-s390.c')
-rw-r--r--arch/s390/kvm/kvm-s390.c221
1 files changed, 183 insertions, 38 deletions
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index d1c445732451..17ad69d596fd 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -129,6 +129,10 @@ int kvm_dev_ioctl_check_extension(long ext)
129 case KVM_CAP_S390_PSW: 129 case KVM_CAP_S390_PSW:
130 case KVM_CAP_S390_GMAP: 130 case KVM_CAP_S390_GMAP:
131 case KVM_CAP_SYNC_MMU: 131 case KVM_CAP_SYNC_MMU:
132#ifdef CONFIG_KVM_S390_UCONTROL
133 case KVM_CAP_S390_UCONTROL:
134#endif
135 case KVM_CAP_SYNC_REGS:
132 r = 1; 136 r = 1;
133 break; 137 break;
134 default: 138 default:
@@ -171,11 +175,22 @@ long kvm_arch_vm_ioctl(struct file *filp,
171 return r; 175 return r;
172} 176}
173 177
174int kvm_arch_init_vm(struct kvm *kvm) 178int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
175{ 179{
176 int rc; 180 int rc;
177 char debug_name[16]; 181 char debug_name[16];
178 182
183 rc = -EINVAL;
184#ifdef CONFIG_KVM_S390_UCONTROL
185 if (type & ~KVM_VM_S390_UCONTROL)
186 goto out_err;
187 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
188 goto out_err;
189#else
190 if (type)
191 goto out_err;
192#endif
193
179 rc = s390_enable_sie(); 194 rc = s390_enable_sie();
180 if (rc) 195 if (rc)
181 goto out_err; 196 goto out_err;
@@ -198,10 +213,13 @@ int kvm_arch_init_vm(struct kvm *kvm)
198 debug_register_view(kvm->arch.dbf, &debug_sprintf_view); 213 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
199 VM_EVENT(kvm, 3, "%s", "vm created"); 214 VM_EVENT(kvm, 3, "%s", "vm created");
200 215
201 kvm->arch.gmap = gmap_alloc(current->mm); 216 if (type & KVM_VM_S390_UCONTROL) {
202 if (!kvm->arch.gmap) 217 kvm->arch.gmap = NULL;
203 goto out_nogmap; 218 } else {
204 219 kvm->arch.gmap = gmap_alloc(current->mm);
220 if (!kvm->arch.gmap)
221 goto out_nogmap;
222 }
205 return 0; 223 return 0;
206out_nogmap: 224out_nogmap:
207 debug_unregister(kvm->arch.dbf); 225 debug_unregister(kvm->arch.dbf);
@@ -214,11 +232,18 @@ out_err:
214void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) 232void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
215{ 233{
216 VCPU_EVENT(vcpu, 3, "%s", "free cpu"); 234 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
217 clear_bit(63 - vcpu->vcpu_id, (unsigned long *) &vcpu->kvm->arch.sca->mcn); 235 if (!kvm_is_ucontrol(vcpu->kvm)) {
218 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda == 236 clear_bit(63 - vcpu->vcpu_id,
219 (__u64) vcpu->arch.sie_block) 237 (unsigned long *) &vcpu->kvm->arch.sca->mcn);
220 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0; 238 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
239 (__u64) vcpu->arch.sie_block)
240 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
241 }
221 smp_mb(); 242 smp_mb();
243
244 if (kvm_is_ucontrol(vcpu->kvm))
245 gmap_free(vcpu->arch.gmap);
246
222 free_page((unsigned long)(vcpu->arch.sie_block)); 247 free_page((unsigned long)(vcpu->arch.sie_block));
223 kvm_vcpu_uninit(vcpu); 248 kvm_vcpu_uninit(vcpu);
224 kfree(vcpu); 249 kfree(vcpu);
@@ -249,13 +274,25 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
249 kvm_free_vcpus(kvm); 274 kvm_free_vcpus(kvm);
250 free_page((unsigned long)(kvm->arch.sca)); 275 free_page((unsigned long)(kvm->arch.sca));
251 debug_unregister(kvm->arch.dbf); 276 debug_unregister(kvm->arch.dbf);
252 gmap_free(kvm->arch.gmap); 277 if (!kvm_is_ucontrol(kvm))
278 gmap_free(kvm->arch.gmap);
253} 279}
254 280
255/* Section: vcpu related */ 281/* Section: vcpu related */
256int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) 282int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
257{ 283{
284 if (kvm_is_ucontrol(vcpu->kvm)) {
285 vcpu->arch.gmap = gmap_alloc(current->mm);
286 if (!vcpu->arch.gmap)
287 return -ENOMEM;
288 return 0;
289 }
290
258 vcpu->arch.gmap = vcpu->kvm->arch.gmap; 291 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
292 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
293 KVM_SYNC_GPRS |
294 KVM_SYNC_ACRS |
295 KVM_SYNC_CRS;
259 return 0; 296 return 0;
260} 297}
261 298
@@ -270,7 +307,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
270 save_access_regs(vcpu->arch.host_acrs); 307 save_access_regs(vcpu->arch.host_acrs);
271 vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK; 308 vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK;
272 restore_fp_regs(&vcpu->arch.guest_fpregs); 309 restore_fp_regs(&vcpu->arch.guest_fpregs);
273 restore_access_regs(vcpu->arch.guest_acrs); 310 restore_access_regs(vcpu->run->s.regs.acrs);
274 gmap_enable(vcpu->arch.gmap); 311 gmap_enable(vcpu->arch.gmap);
275 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); 312 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
276} 313}
@@ -280,7 +317,7 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
280 atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); 317 atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
281 gmap_disable(vcpu->arch.gmap); 318 gmap_disable(vcpu->arch.gmap);
282 save_fp_regs(&vcpu->arch.guest_fpregs); 319 save_fp_regs(&vcpu->arch.guest_fpregs);
283 save_access_regs(vcpu->arch.guest_acrs); 320 save_access_regs(vcpu->run->s.regs.acrs);
284 restore_fp_regs(&vcpu->arch.host_fpregs); 321 restore_fp_regs(&vcpu->arch.host_fpregs);
285 restore_access_regs(vcpu->arch.host_acrs); 322 restore_access_regs(vcpu->arch.host_acrs);
286} 323}
@@ -290,8 +327,7 @@ static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
290 /* this equals initial cpu reset in pop, but we don't switch to ESA */ 327 /* this equals initial cpu reset in pop, but we don't switch to ESA */
291 vcpu->arch.sie_block->gpsw.mask = 0UL; 328 vcpu->arch.sie_block->gpsw.mask = 0UL;
292 vcpu->arch.sie_block->gpsw.addr = 0UL; 329 vcpu->arch.sie_block->gpsw.addr = 0UL;
293 vcpu->arch.sie_block->prefix = 0UL; 330 kvm_s390_set_prefix(vcpu, 0);
294 vcpu->arch.sie_block->ihcpu = 0xffff;
295 vcpu->arch.sie_block->cputm = 0UL; 331 vcpu->arch.sie_block->cputm = 0UL;
296 vcpu->arch.sie_block->ckc = 0UL; 332 vcpu->arch.sie_block->ckc = 0UL;
297 vcpu->arch.sie_block->todpr = 0; 333 vcpu->arch.sie_block->todpr = 0;
@@ -342,12 +378,19 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
342 goto out_free_cpu; 378 goto out_free_cpu;
343 379
344 vcpu->arch.sie_block->icpua = id; 380 vcpu->arch.sie_block->icpua = id;
345 BUG_ON(!kvm->arch.sca); 381 if (!kvm_is_ucontrol(kvm)) {
346 if (!kvm->arch.sca->cpu[id].sda) 382 if (!kvm->arch.sca) {
347 kvm->arch.sca->cpu[id].sda = (__u64) vcpu->arch.sie_block; 383 WARN_ON_ONCE(1);
348 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)kvm->arch.sca) >> 32); 384 goto out_free_cpu;
349 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca; 385 }
350 set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn); 386 if (!kvm->arch.sca->cpu[id].sda)
387 kvm->arch.sca->cpu[id].sda =
388 (__u64) vcpu->arch.sie_block;
389 vcpu->arch.sie_block->scaoh =
390 (__u32)(((__u64)kvm->arch.sca) >> 32);
391 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
392 set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
393 }
351 394
352 spin_lock_init(&vcpu->arch.local_int.lock); 395 spin_lock_init(&vcpu->arch.local_int.lock);
353 INIT_LIST_HEAD(&vcpu->arch.local_int.list); 396 INIT_LIST_HEAD(&vcpu->arch.local_int.list);
@@ -388,29 +431,29 @@ static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
388 431
389int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) 432int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
390{ 433{
391 memcpy(&vcpu->arch.guest_gprs, &regs->gprs, sizeof(regs->gprs)); 434 memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
392 return 0; 435 return 0;
393} 436}
394 437
395int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) 438int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
396{ 439{
397 memcpy(&regs->gprs, &vcpu->arch.guest_gprs, sizeof(regs->gprs)); 440 memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
398 return 0; 441 return 0;
399} 442}
400 443
401int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, 444int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
402 struct kvm_sregs *sregs) 445 struct kvm_sregs *sregs)
403{ 446{
404 memcpy(&vcpu->arch.guest_acrs, &sregs->acrs, sizeof(sregs->acrs)); 447 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
405 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs)); 448 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
406 restore_access_regs(vcpu->arch.guest_acrs); 449 restore_access_regs(vcpu->run->s.regs.acrs);
407 return 0; 450 return 0;
408} 451}
409 452
410int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, 453int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
411 struct kvm_sregs *sregs) 454 struct kvm_sregs *sregs)
412{ 455{
413 memcpy(&sregs->acrs, &vcpu->arch.guest_acrs, sizeof(sregs->acrs)); 456 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
414 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs)); 457 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
415 return 0; 458 return 0;
416} 459}
@@ -418,7 +461,7 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
418int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) 461int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
419{ 462{
420 memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs)); 463 memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
421 vcpu->arch.guest_fpregs.fpc = fpu->fpc; 464 vcpu->arch.guest_fpregs.fpc = fpu->fpc & FPC_VALID_MASK;
422 restore_fp_regs(&vcpu->arch.guest_fpregs); 465 restore_fp_regs(&vcpu->arch.guest_fpregs);
423 return 0; 466 return 0;
424} 467}
@@ -467,9 +510,11 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
467 return -EINVAL; /* not implemented yet */ 510 return -EINVAL; /* not implemented yet */
468} 511}
469 512
470static void __vcpu_run(struct kvm_vcpu *vcpu) 513static int __vcpu_run(struct kvm_vcpu *vcpu)
471{ 514{
472 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->arch.guest_gprs[14], 16); 515 int rc;
516
517 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16);
473 518
474 if (need_resched()) 519 if (need_resched())
475 schedule(); 520 schedule();
@@ -477,7 +522,8 @@ static void __vcpu_run(struct kvm_vcpu *vcpu)
477 if (test_thread_flag(TIF_MCCK_PENDING)) 522 if (test_thread_flag(TIF_MCCK_PENDING))
478 s390_handle_mcck(); 523 s390_handle_mcck();
479 524
480 kvm_s390_deliver_pending_interrupts(vcpu); 525 if (!kvm_is_ucontrol(vcpu->kvm))
526 kvm_s390_deliver_pending_interrupts(vcpu);
481 527
482 vcpu->arch.sie_block->icptcode = 0; 528 vcpu->arch.sie_block->icptcode = 0;
483 local_irq_disable(); 529 local_irq_disable();
@@ -485,9 +531,15 @@ static void __vcpu_run(struct kvm_vcpu *vcpu)
485 local_irq_enable(); 531 local_irq_enable();
486 VCPU_EVENT(vcpu, 6, "entering sie flags %x", 532 VCPU_EVENT(vcpu, 6, "entering sie flags %x",
487 atomic_read(&vcpu->arch.sie_block->cpuflags)); 533 atomic_read(&vcpu->arch.sie_block->cpuflags));
488 if (sie64a(vcpu->arch.sie_block, vcpu->arch.guest_gprs)) { 534 rc = sie64a(vcpu->arch.sie_block, vcpu->run->s.regs.gprs);
489 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction"); 535 if (rc) {
490 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 536 if (kvm_is_ucontrol(vcpu->kvm)) {
537 rc = SIE_INTERCEPT_UCONTROL;
538 } else {
539 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
540 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
541 rc = 0;
542 }
491 } 543 }
492 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d", 544 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
493 vcpu->arch.sie_block->icptcode); 545 vcpu->arch.sie_block->icptcode);
@@ -495,7 +547,8 @@ static void __vcpu_run(struct kvm_vcpu *vcpu)
495 kvm_guest_exit(); 547 kvm_guest_exit();
496 local_irq_enable(); 548 local_irq_enable();
497 549
498 memcpy(&vcpu->arch.guest_gprs[14], &vcpu->arch.sie_block->gg14, 16); 550 memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
551 return rc;
499} 552}
500 553
501int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 554int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
@@ -516,6 +569,7 @@ rerun_vcpu:
516 case KVM_EXIT_UNKNOWN: 569 case KVM_EXIT_UNKNOWN:
517 case KVM_EXIT_INTR: 570 case KVM_EXIT_INTR:
518 case KVM_EXIT_S390_RESET: 571 case KVM_EXIT_S390_RESET:
572 case KVM_EXIT_S390_UCONTROL:
519 break; 573 break;
520 default: 574 default:
521 BUG(); 575 BUG();
@@ -523,12 +577,26 @@ rerun_vcpu:
523 577
524 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask; 578 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
525 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr; 579 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
580 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX) {
581 kvm_run->kvm_dirty_regs &= ~KVM_SYNC_PREFIX;
582 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
583 }
584 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
585 kvm_run->kvm_dirty_regs &= ~KVM_SYNC_CRS;
586 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
587 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
588 }
526 589
527 might_fault(); 590 might_fault();
528 591
529 do { 592 do {
530 __vcpu_run(vcpu); 593 rc = __vcpu_run(vcpu);
531 rc = kvm_handle_sie_intercept(vcpu); 594 if (rc)
595 break;
596 if (kvm_is_ucontrol(vcpu->kvm))
597 rc = -EOPNOTSUPP;
598 else
599 rc = kvm_handle_sie_intercept(vcpu);
532 } while (!signal_pending(current) && !rc); 600 } while (!signal_pending(current) && !rc);
533 601
534 if (rc == SIE_INTERCEPT_RERUNVCPU) 602 if (rc == SIE_INTERCEPT_RERUNVCPU)
@@ -539,6 +607,16 @@ rerun_vcpu:
539 rc = -EINTR; 607 rc = -EINTR;
540 } 608 }
541 609
610#ifdef CONFIG_KVM_S390_UCONTROL
611 if (rc == SIE_INTERCEPT_UCONTROL) {
612 kvm_run->exit_reason = KVM_EXIT_S390_UCONTROL;
613 kvm_run->s390_ucontrol.trans_exc_code =
614 current->thread.gmap_addr;
615 kvm_run->s390_ucontrol.pgm_code = 0x10;
616 rc = 0;
617 }
618#endif
619
542 if (rc == -EOPNOTSUPP) { 620 if (rc == -EOPNOTSUPP) {
543 /* intercept cannot be handled in-kernel, prepare kvm-run */ 621 /* intercept cannot be handled in-kernel, prepare kvm-run */
544 kvm_run->exit_reason = KVM_EXIT_S390_SIEIC; 622 kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
@@ -556,6 +634,8 @@ rerun_vcpu:
556 634
557 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask; 635 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
558 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr; 636 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
637 kvm_run->s.regs.prefix = vcpu->arch.sie_block->prefix;
638 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
559 639
560 if (vcpu->sigset_active) 640 if (vcpu->sigset_active)
561 sigprocmask(SIG_SETMASK, &sigsaved, NULL); 641 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
@@ -602,7 +682,7 @@ int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
602 return -EFAULT; 682 return -EFAULT;
603 683
604 if (__guestcopy(vcpu, addr + offsetof(struct save_area, gp_regs), 684 if (__guestcopy(vcpu, addr + offsetof(struct save_area, gp_regs),
605 vcpu->arch.guest_gprs, 128, prefix)) 685 vcpu->run->s.regs.gprs, 128, prefix))
606 return -EFAULT; 686 return -EFAULT;
607 687
608 if (__guestcopy(vcpu, addr + offsetof(struct save_area, psw), 688 if (__guestcopy(vcpu, addr + offsetof(struct save_area, psw),
@@ -631,7 +711,7 @@ int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
631 return -EFAULT; 711 return -EFAULT;
632 712
633 if (__guestcopy(vcpu, addr + offsetof(struct save_area, acc_regs), 713 if (__guestcopy(vcpu, addr + offsetof(struct save_area, acc_regs),
634 &vcpu->arch.guest_acrs, 64, prefix)) 714 &vcpu->run->s.regs.acrs, 64, prefix))
635 return -EFAULT; 715 return -EFAULT;
636 716
637 if (__guestcopy(vcpu, 717 if (__guestcopy(vcpu,
@@ -673,12 +753,77 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
673 case KVM_S390_INITIAL_RESET: 753 case KVM_S390_INITIAL_RESET:
674 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu); 754 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
675 break; 755 break;
756#ifdef CONFIG_KVM_S390_UCONTROL
757 case KVM_S390_UCAS_MAP: {
758 struct kvm_s390_ucas_mapping ucasmap;
759
760 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
761 r = -EFAULT;
762 break;
763 }
764
765 if (!kvm_is_ucontrol(vcpu->kvm)) {
766 r = -EINVAL;
767 break;
768 }
769
770 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
771 ucasmap.vcpu_addr, ucasmap.length);
772 break;
773 }
774 case KVM_S390_UCAS_UNMAP: {
775 struct kvm_s390_ucas_mapping ucasmap;
776
777 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
778 r = -EFAULT;
779 break;
780 }
781
782 if (!kvm_is_ucontrol(vcpu->kvm)) {
783 r = -EINVAL;
784 break;
785 }
786
787 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
788 ucasmap.length);
789 break;
790 }
791#endif
792 case KVM_S390_VCPU_FAULT: {
793 r = gmap_fault(arg, vcpu->arch.gmap);
794 if (!IS_ERR_VALUE(r))
795 r = 0;
796 break;
797 }
676 default: 798 default:
677 r = -EINVAL; 799 r = -ENOTTY;
678 } 800 }
679 return r; 801 return r;
680} 802}
681 803
804int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
805{
806#ifdef CONFIG_KVM_S390_UCONTROL
807 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
808 && (kvm_is_ucontrol(vcpu->kvm))) {
809 vmf->page = virt_to_page(vcpu->arch.sie_block);
810 get_page(vmf->page);
811 return 0;
812 }
813#endif
814 return VM_FAULT_SIGBUS;
815}
816
817void kvm_arch_free_memslot(struct kvm_memory_slot *free,
818 struct kvm_memory_slot *dont)
819{
820}
821
822int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
823{
824 return 0;
825}
826
682/* Section: memory related */ 827/* Section: memory related */
683int kvm_arch_prepare_memory_region(struct kvm *kvm, 828int kvm_arch_prepare_memory_region(struct kvm *kvm,
684 struct kvm_memory_slot *memslot, 829 struct kvm_memory_slot *memslot,