aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/kvm/kvm-s390.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390/kvm/kvm-s390.c')
-rw-r--r--arch/s390/kvm/kvm-s390.c79
1 files changed, 49 insertions, 30 deletions
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 07ced89740d7..49292869a5cd 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -23,6 +23,7 @@
23#include <linux/module.h> 23#include <linux/module.h>
24#include <linux/slab.h> 24#include <linux/slab.h>
25#include <linux/timer.h> 25#include <linux/timer.h>
26#include <asm/asm-offsets.h>
26#include <asm/lowcore.h> 27#include <asm/lowcore.h>
27#include <asm/pgtable.h> 28#include <asm/pgtable.h>
28#include <asm/nmi.h> 29#include <asm/nmi.h>
@@ -74,9 +75,10 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
74static unsigned long long *facilities; 75static unsigned long long *facilities;
75 76
76/* Section: not file related */ 77/* Section: not file related */
77void kvm_arch_hardware_enable(void *garbage) 78int kvm_arch_hardware_enable(void *garbage)
78{ 79{
79 /* every s390 is virtualization enabled ;-) */ 80 /* every s390 is virtualization enabled ;-) */
81 return 0;
80} 82}
81 83
82void kvm_arch_hardware_disable(void *garbage) 84void kvm_arch_hardware_disable(void *garbage)
@@ -116,10 +118,16 @@ long kvm_arch_dev_ioctl(struct file *filp,
116 118
117int kvm_dev_ioctl_check_extension(long ext) 119int kvm_dev_ioctl_check_extension(long ext)
118{ 120{
121 int r;
122
119 switch (ext) { 123 switch (ext) {
124 case KVM_CAP_S390_PSW:
125 r = 1;
126 break;
120 default: 127 default:
121 return 0; 128 r = 0;
122 } 129 }
130 return r;
123} 131}
124 132
125/* Section: vm related */ 133/* Section: vm related */
@@ -150,7 +158,7 @@ long kvm_arch_vm_ioctl(struct file *filp,
150 break; 158 break;
151 } 159 }
152 default: 160 default:
153 r = -EINVAL; 161 r = -ENOTTY;
154 } 162 }
155 163
156 return r; 164 return r;
@@ -234,6 +242,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
234 kvm_free_physmem(kvm); 242 kvm_free_physmem(kvm);
235 free_page((unsigned long)(kvm->arch.sca)); 243 free_page((unsigned long)(kvm->arch.sca));
236 debug_unregister(kvm->arch.dbf); 244 debug_unregister(kvm->arch.dbf);
245 cleanup_srcu_struct(&kvm->srcu);
237 kfree(kvm); 246 kfree(kvm);
238} 247}
239 248
@@ -419,8 +428,10 @@ static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
419 vcpu_load(vcpu); 428 vcpu_load(vcpu);
420 if (atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_RUNNING) 429 if (atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_RUNNING)
421 rc = -EBUSY; 430 rc = -EBUSY;
422 else 431 else {
423 vcpu->arch.sie_block->gpsw = psw; 432 vcpu->run->psw_mask = psw.mask;
433 vcpu->run->psw_addr = psw.addr;
434 }
424 vcpu_put(vcpu); 435 vcpu_put(vcpu);
425 return rc; 436 return rc;
426} 437}
@@ -508,9 +519,6 @@ rerun_vcpu:
508 519
509 switch (kvm_run->exit_reason) { 520 switch (kvm_run->exit_reason) {
510 case KVM_EXIT_S390_SIEIC: 521 case KVM_EXIT_S390_SIEIC:
511 vcpu->arch.sie_block->gpsw.mask = kvm_run->s390_sieic.mask;
512 vcpu->arch.sie_block->gpsw.addr = kvm_run->s390_sieic.addr;
513 break;
514 case KVM_EXIT_UNKNOWN: 522 case KVM_EXIT_UNKNOWN:
515 case KVM_EXIT_INTR: 523 case KVM_EXIT_INTR:
516 case KVM_EXIT_S390_RESET: 524 case KVM_EXIT_S390_RESET:
@@ -519,6 +527,9 @@ rerun_vcpu:
519 BUG(); 527 BUG();
520 } 528 }
521 529
530 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
531 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
532
522 might_fault(); 533 might_fault();
523 534
524 do { 535 do {
@@ -534,12 +545,10 @@ rerun_vcpu:
534 rc = -EINTR; 545 rc = -EINTR;
535 } 546 }
536 547
537 if (rc == -ENOTSUPP) { 548 if (rc == -EOPNOTSUPP) {
538 /* intercept cannot be handled in-kernel, prepare kvm-run */ 549 /* intercept cannot be handled in-kernel, prepare kvm-run */
539 kvm_run->exit_reason = KVM_EXIT_S390_SIEIC; 550 kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
540 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode; 551 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
541 kvm_run->s390_sieic.mask = vcpu->arch.sie_block->gpsw.mask;
542 kvm_run->s390_sieic.addr = vcpu->arch.sie_block->gpsw.addr;
543 kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa; 552 kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
544 kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb; 553 kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
545 rc = 0; 554 rc = 0;
@@ -551,6 +560,9 @@ rerun_vcpu:
551 rc = 0; 560 rc = 0;
552 } 561 }
553 562
563 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
564 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
565
554 if (vcpu->sigset_active) 566 if (vcpu->sigset_active)
555 sigprocmask(SIG_SETMASK, &sigsaved, NULL); 567 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
556 568
@@ -593,45 +605,45 @@ int __kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
593 } else 605 } else
594 prefix = 0; 606 prefix = 0;
595 607
596 if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, fp_regs), 608 if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs),
597 vcpu->arch.guest_fpregs.fprs, 128, prefix)) 609 vcpu->arch.guest_fpregs.fprs, 128, prefix))
598 return -EFAULT; 610 return -EFAULT;
599 611
600 if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, gp_regs), 612 if (__guestcopy(vcpu, addr + offsetof(struct save_area, gp_regs),
601 vcpu->arch.guest_gprs, 128, prefix)) 613 vcpu->arch.guest_gprs, 128, prefix))
602 return -EFAULT; 614 return -EFAULT;
603 615
604 if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, psw), 616 if (__guestcopy(vcpu, addr + offsetof(struct save_area, psw),
605 &vcpu->arch.sie_block->gpsw, 16, prefix)) 617 &vcpu->arch.sie_block->gpsw, 16, prefix))
606 return -EFAULT; 618 return -EFAULT;
607 619
608 if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, pref_reg), 620 if (__guestcopy(vcpu, addr + offsetof(struct save_area, pref_reg),
609 &vcpu->arch.sie_block->prefix, 4, prefix)) 621 &vcpu->arch.sie_block->prefix, 4, prefix))
610 return -EFAULT; 622 return -EFAULT;
611 623
612 if (__guestcopy(vcpu, 624 if (__guestcopy(vcpu,
613 addr + offsetof(struct save_area_s390x, fp_ctrl_reg), 625 addr + offsetof(struct save_area, fp_ctrl_reg),
614 &vcpu->arch.guest_fpregs.fpc, 4, prefix)) 626 &vcpu->arch.guest_fpregs.fpc, 4, prefix))
615 return -EFAULT; 627 return -EFAULT;
616 628
617 if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, tod_reg), 629 if (__guestcopy(vcpu, addr + offsetof(struct save_area, tod_reg),
618 &vcpu->arch.sie_block->todpr, 4, prefix)) 630 &vcpu->arch.sie_block->todpr, 4, prefix))
619 return -EFAULT; 631 return -EFAULT;
620 632
621 if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, timer), 633 if (__guestcopy(vcpu, addr + offsetof(struct save_area, timer),
622 &vcpu->arch.sie_block->cputm, 8, prefix)) 634 &vcpu->arch.sie_block->cputm, 8, prefix))
623 return -EFAULT; 635 return -EFAULT;
624 636
625 if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, clk_cmp), 637 if (__guestcopy(vcpu, addr + offsetof(struct save_area, clk_cmp),
626 &vcpu->arch.sie_block->ckc, 8, prefix)) 638 &vcpu->arch.sie_block->ckc, 8, prefix))
627 return -EFAULT; 639 return -EFAULT;
628 640
629 if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, acc_regs), 641 if (__guestcopy(vcpu, addr + offsetof(struct save_area, acc_regs),
630 &vcpu->arch.guest_acrs, 64, prefix)) 642 &vcpu->arch.guest_acrs, 64, prefix))
631 return -EFAULT; 643 return -EFAULT;
632 644
633 if (__guestcopy(vcpu, 645 if (__guestcopy(vcpu,
634 addr + offsetof(struct save_area_s390x, ctrl_regs), 646 addr + offsetof(struct save_area, ctrl_regs),
635 &vcpu->arch.sie_block->gcr, 128, prefix)) 647 &vcpu->arch.sie_block->gcr, 128, prefix))
636 return -EFAULT; 648 return -EFAULT;
637 return 0; 649 return 0;
@@ -679,14 +691,12 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
679} 691}
680 692
681/* Section: memory related */ 693/* Section: memory related */
682int kvm_arch_set_memory_region(struct kvm *kvm, 694int kvm_arch_prepare_memory_region(struct kvm *kvm,
683 struct kvm_userspace_memory_region *mem, 695 struct kvm_memory_slot *memslot,
684 struct kvm_memory_slot old, 696 struct kvm_memory_slot old,
685 int user_alloc) 697 struct kvm_userspace_memory_region *mem,
698 int user_alloc)
686{ 699{
687 int i;
688 struct kvm_vcpu *vcpu;
689
690 /* A few sanity checks. We can have exactly one memory slot which has 700 /* A few sanity checks. We can have exactly one memory slot which has
691 to start at guest virtual zero and which has to be located at a 701 to start at guest virtual zero and which has to be located at a
692 page boundary in userland and which has to end at a page boundary. 702 page boundary in userland and which has to end at a page boundary.
@@ -709,14 +719,23 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
709 if (!user_alloc) 719 if (!user_alloc)
710 return -EINVAL; 720 return -EINVAL;
711 721
722 return 0;
723}
724
725void kvm_arch_commit_memory_region(struct kvm *kvm,
726 struct kvm_userspace_memory_region *mem,
727 struct kvm_memory_slot old,
728 int user_alloc)
729{
730 int i;
731 struct kvm_vcpu *vcpu;
732
712 /* request update of sie control block for all available vcpus */ 733 /* request update of sie control block for all available vcpus */
713 kvm_for_each_vcpu(i, vcpu, kvm) { 734 kvm_for_each_vcpu(i, vcpu, kvm) {
714 if (test_and_set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests)) 735 if (test_and_set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
715 continue; 736 continue;
716 kvm_s390_inject_sigp_stop(vcpu, ACTION_RELOADVCPU_ON_STOP); 737 kvm_s390_inject_sigp_stop(vcpu, ACTION_RELOADVCPU_ON_STOP);
717 } 738 }
718
719 return 0;
720} 739}
721 740
722void kvm_arch_flush_shadow(struct kvm *kvm) 741void kvm_arch_flush_shadow(struct kvm *kvm)