diff options
Diffstat (limited to 'arch/s390/kvm/kvm-s390.c')
-rw-r--r-- | arch/s390/kvm/kvm-s390.c | 55 |
1 files changed, 27 insertions, 28 deletions
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 569494e01ec6..7635c00a1479 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c | |||
@@ -732,14 +732,16 @@ static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason) | |||
732 | 732 | ||
733 | if (exit_reason >= 0) { | 733 | if (exit_reason >= 0) { |
734 | rc = 0; | 734 | rc = 0; |
735 | } else if (kvm_is_ucontrol(vcpu->kvm)) { | ||
736 | vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL; | ||
737 | vcpu->run->s390_ucontrol.trans_exc_code = | ||
738 | current->thread.gmap_addr; | ||
739 | vcpu->run->s390_ucontrol.pgm_code = 0x10; | ||
740 | rc = -EREMOTE; | ||
735 | } else { | 741 | } else { |
736 | if (kvm_is_ucontrol(vcpu->kvm)) { | 742 | VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction"); |
737 | rc = SIE_INTERCEPT_UCONTROL; | 743 | trace_kvm_s390_sie_fault(vcpu); |
738 | } else { | 744 | rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); |
739 | VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction"); | ||
740 | trace_kvm_s390_sie_fault(vcpu); | ||
741 | rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); | ||
742 | } | ||
743 | } | 745 | } |
744 | 746 | ||
745 | memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16); | 747 | memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16); |
@@ -833,16 +835,6 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
833 | rc = -EINTR; | 835 | rc = -EINTR; |
834 | } | 836 | } |
835 | 837 | ||
836 | #ifdef CONFIG_KVM_S390_UCONTROL | ||
837 | if (rc == SIE_INTERCEPT_UCONTROL) { | ||
838 | kvm_run->exit_reason = KVM_EXIT_S390_UCONTROL; | ||
839 | kvm_run->s390_ucontrol.trans_exc_code = | ||
840 | current->thread.gmap_addr; | ||
841 | kvm_run->s390_ucontrol.pgm_code = 0x10; | ||
842 | rc = 0; | ||
843 | } | ||
844 | #endif | ||
845 | |||
846 | if (rc == -EOPNOTSUPP) { | 838 | if (rc == -EOPNOTSUPP) { |
847 | /* intercept cannot be handled in-kernel, prepare kvm-run */ | 839 | /* intercept cannot be handled in-kernel, prepare kvm-run */ |
848 | kvm_run->exit_reason = KVM_EXIT_S390_SIEIC; | 840 | kvm_run->exit_reason = KVM_EXIT_S390_SIEIC; |
@@ -885,10 +877,11 @@ static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, void *from, | |||
885 | * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit | 877 | * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit |
886 | * KVM_S390_STORE_STATUS_PREFIXED: -> prefix | 878 | * KVM_S390_STORE_STATUS_PREFIXED: -> prefix |
887 | */ | 879 | */ |
888 | int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr) | 880 | int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long addr) |
889 | { | 881 | { |
890 | unsigned char archmode = 1; | 882 | unsigned char archmode = 1; |
891 | int prefix; | 883 | int prefix; |
884 | u64 clkcomp; | ||
892 | 885 | ||
893 | if (addr == KVM_S390_STORE_STATUS_NOADDR) { | 886 | if (addr == KVM_S390_STORE_STATUS_NOADDR) { |
894 | if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1)) | 887 | if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1)) |
@@ -903,15 +896,6 @@ int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr) | |||
903 | } else | 896 | } else |
904 | prefix = 0; | 897 | prefix = 0; |
905 | 898 | ||
906 | /* | ||
907 | * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy | ||
908 | * copying in vcpu load/put. Lets update our copies before we save | ||
909 | * it into the save area | ||
910 | */ | ||
911 | save_fp_ctl(&vcpu->arch.guest_fpregs.fpc); | ||
912 | save_fp_regs(vcpu->arch.guest_fpregs.fprs); | ||
913 | save_access_regs(vcpu->run->s.regs.acrs); | ||
914 | |||
915 | if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs), | 899 | if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs), |
916 | vcpu->arch.guest_fpregs.fprs, 128, prefix)) | 900 | vcpu->arch.guest_fpregs.fprs, 128, prefix)) |
917 | return -EFAULT; | 901 | return -EFAULT; |
@@ -941,8 +925,9 @@ int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr) | |||
941 | &vcpu->arch.sie_block->cputm, 8, prefix)) | 925 | &vcpu->arch.sie_block->cputm, 8, prefix)) |
942 | return -EFAULT; | 926 | return -EFAULT; |
943 | 927 | ||
928 | clkcomp = vcpu->arch.sie_block->ckc >> 8; | ||
944 | if (__guestcopy(vcpu, addr + offsetof(struct save_area, clk_cmp), | 929 | if (__guestcopy(vcpu, addr + offsetof(struct save_area, clk_cmp), |
945 | &vcpu->arch.sie_block->ckc, 8, prefix)) | 930 | &clkcomp, 8, prefix)) |
946 | return -EFAULT; | 931 | return -EFAULT; |
947 | 932 | ||
948 | if (__guestcopy(vcpu, addr + offsetof(struct save_area, acc_regs), | 933 | if (__guestcopy(vcpu, addr + offsetof(struct save_area, acc_regs), |
@@ -956,6 +941,20 @@ int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr) | |||
956 | return 0; | 941 | return 0; |
957 | } | 942 | } |
958 | 943 | ||
944 | int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr) | ||
945 | { | ||
946 | /* | ||
947 | * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy | ||
948 | * copying in vcpu load/put. Lets update our copies before we save | ||
949 | * it into the save area | ||
950 | */ | ||
951 | save_fp_ctl(&vcpu->arch.guest_fpregs.fpc); | ||
952 | save_fp_regs(vcpu->arch.guest_fpregs.fprs); | ||
953 | save_access_regs(vcpu->run->s.regs.acrs); | ||
954 | |||
955 | return kvm_s390_store_status_unloaded(vcpu, addr); | ||
956 | } | ||
957 | |||
959 | static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, | 958 | static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, |
960 | struct kvm_enable_cap *cap) | 959 | struct kvm_enable_cap *cap) |
961 | { | 960 | { |