aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-11-14 23:51:36 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2013-11-14 23:51:36 -0500
commitf080480488028bcc25357f85e8ae54ccc3bb7173 (patch)
tree8fcc943f16d26c795b3b6324b478af2d5a30285d /arch/s390
parenteda670c626a4f53eb8ac5f20d8c10d3f0b54c583 (diff)
parente504c9098ed6acd9e1079c5e10e4910724ad429f (diff)
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull KVM changes from Paolo Bonzini: "Here are the 3.13 KVM changes. There was a lot of work on the PPC side: the HV and emulation flavors can now coexist in a single kernel is probably the most interesting change from a user point of view. On the x86 side there are nested virtualization improvements and a few bugfixes. ARM got transparent huge page support, improved overcommit, and support for big endian guests. Finally, there is a new interface to connect KVM with VFIO. This helps with devices that use NoSnoop PCI transactions, letting the driver in the guest execute WBINVD instructions. This includes some nVidia cards on Windows, that fail to start without these patches and the corresponding userspace changes" * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (146 commits) kvm, vmx: Fix lazy FPU on nested guest arm/arm64: KVM: PSCI: propagate caller endianness to the incoming vcpu arm/arm64: KVM: MMIO support for BE guest kvm, cpuid: Fix sparse warning kvm: Delete prototype for non-existent function kvm_check_iopl kvm: Delete prototype for non-existent function complete_pio hung_task: add method to reset detector pvclock: detect watchdog reset at pvclock read kvm: optimize out smp_mb after srcu_read_unlock srcu: API for barrier after srcu read unlock KVM: remove vm mmap method KVM: IOMMU: hva align mapping page size KVM: x86: trace cpuid emulation when called from emulator KVM: emulator: cleanup decode_register_operand() a bit KVM: emulator: check rex prefix inside decode_register() KVM: x86: fix emulation of "movzbl %bpl, %eax" kvm_host: typo fix KVM: x86: emulate SAHF instruction MAINTAINERS: add tree for kvm.git Documentation/kvm: add a 00-INDEX file ...
Diffstat (limited to 'arch/s390')
-rw-r--r--arch/s390/include/asm/kvm_host.h8
-rw-r--r--arch/s390/kvm/diag.c4
-rw-r--r--arch/s390/kvm/gaccess.h21
-rw-r--r--arch/s390/kvm/intercept.c6
-rw-r--r--arch/s390/kvm/interrupt.c3
-rw-r--r--arch/s390/kvm/kvm-s390.c96
-rw-r--r--arch/s390/kvm/kvm-s390.h9
-rw-r--r--arch/s390/kvm/priv.c61
8 files changed, 147 insertions, 61 deletions
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index e87ecaa2c569..d5bc3750616e 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -38,13 +38,6 @@ struct sca_block {
38 struct sca_entry cpu[64]; 38 struct sca_entry cpu[64];
39} __attribute__((packed)); 39} __attribute__((packed));
40 40
41#define KVM_NR_PAGE_SIZES 2
42#define KVM_HPAGE_GFN_SHIFT(x) (((x) - 1) * 8)
43#define KVM_HPAGE_SHIFT(x) (PAGE_SHIFT + KVM_HPAGE_GFN_SHIFT(x))
44#define KVM_HPAGE_SIZE(x) (1UL << KVM_HPAGE_SHIFT(x))
45#define KVM_HPAGE_MASK(x) (~(KVM_HPAGE_SIZE(x) - 1))
46#define KVM_PAGES_PER_HPAGE(x) (KVM_HPAGE_SIZE(x) / PAGE_SIZE)
47
48#define CPUSTAT_STOPPED 0x80000000 41#define CPUSTAT_STOPPED 0x80000000
49#define CPUSTAT_WAIT 0x10000000 42#define CPUSTAT_WAIT 0x10000000
50#define CPUSTAT_ECALL_PEND 0x08000000 43#define CPUSTAT_ECALL_PEND 0x08000000
@@ -220,7 +213,6 @@ struct kvm_s390_interrupt_info {
220/* for local_interrupt.action_flags */ 213/* for local_interrupt.action_flags */
221#define ACTION_STORE_ON_STOP (1<<0) 214#define ACTION_STORE_ON_STOP (1<<0)
222#define ACTION_STOP_ON_STOP (1<<1) 215#define ACTION_STOP_ON_STOP (1<<1)
223#define ACTION_RELOADVCPU_ON_STOP (1<<2)
224 216
225struct kvm_s390_local_interrupt { 217struct kvm_s390_local_interrupt {
226 spinlock_t lock; 218 spinlock_t lock;
diff --git a/arch/s390/kvm/diag.c b/arch/s390/kvm/diag.c
index 3a74d8af0d69..78d967f180f4 100644
--- a/arch/s390/kvm/diag.c
+++ b/arch/s390/kvm/diag.c
@@ -107,14 +107,13 @@ static int __diag_ipl_functions(struct kvm_vcpu *vcpu)
107 107
108static int __diag_virtio_hypercall(struct kvm_vcpu *vcpu) 108static int __diag_virtio_hypercall(struct kvm_vcpu *vcpu)
109{ 109{
110 int ret, idx; 110 int ret;
111 111
112 /* No virtio-ccw notification? Get out quickly. */ 112 /* No virtio-ccw notification? Get out quickly. */
113 if (!vcpu->kvm->arch.css_support || 113 if (!vcpu->kvm->arch.css_support ||
114 (vcpu->run->s.regs.gprs[1] != KVM_S390_VIRTIO_CCW_NOTIFY)) 114 (vcpu->run->s.regs.gprs[1] != KVM_S390_VIRTIO_CCW_NOTIFY))
115 return -EOPNOTSUPP; 115 return -EOPNOTSUPP;
116 116
117 idx = srcu_read_lock(&vcpu->kvm->srcu);
118 /* 117 /*
119 * The layout is as follows: 118 * The layout is as follows:
120 * - gpr 2 contains the subchannel id (passed as addr) 119 * - gpr 2 contains the subchannel id (passed as addr)
@@ -125,7 +124,6 @@ static int __diag_virtio_hypercall(struct kvm_vcpu *vcpu)
125 vcpu->run->s.regs.gprs[2], 124 vcpu->run->s.regs.gprs[2],
126 8, &vcpu->run->s.regs.gprs[3], 125 8, &vcpu->run->s.regs.gprs[3],
127 vcpu->run->s.regs.gprs[4]); 126 vcpu->run->s.regs.gprs[4]);
128 srcu_read_unlock(&vcpu->kvm->srcu, idx);
129 127
130 /* 128 /*
131 * Return cookie in gpr 2, but don't overwrite the register if the 129 * Return cookie in gpr 2, but don't overwrite the register if the
diff --git a/arch/s390/kvm/gaccess.h b/arch/s390/kvm/gaccess.h
index 99d789e8a018..374a439ccc60 100644
--- a/arch/s390/kvm/gaccess.h
+++ b/arch/s390/kvm/gaccess.h
@@ -18,20 +18,27 @@
18#include <asm/uaccess.h> 18#include <asm/uaccess.h>
19#include "kvm-s390.h" 19#include "kvm-s390.h"
20 20
21/* Convert real to absolute address by applying the prefix of the CPU */
22static inline unsigned long kvm_s390_real_to_abs(struct kvm_vcpu *vcpu,
23 unsigned long gaddr)
24{
25 unsigned long prefix = vcpu->arch.sie_block->prefix;
26 if (gaddr < 2 * PAGE_SIZE)
27 gaddr += prefix;
28 else if (gaddr >= prefix && gaddr < prefix + 2 * PAGE_SIZE)
29 gaddr -= prefix;
30 return gaddr;
31}
32
21static inline void __user *__gptr_to_uptr(struct kvm_vcpu *vcpu, 33static inline void __user *__gptr_to_uptr(struct kvm_vcpu *vcpu,
22 void __user *gptr, 34 void __user *gptr,
23 int prefixing) 35 int prefixing)
24{ 36{
25 unsigned long prefix = vcpu->arch.sie_block->prefix;
26 unsigned long gaddr = (unsigned long) gptr; 37 unsigned long gaddr = (unsigned long) gptr;
27 unsigned long uaddr; 38 unsigned long uaddr;
28 39
29 if (prefixing) { 40 if (prefixing)
30 if (gaddr < 2 * PAGE_SIZE) 41 gaddr = kvm_s390_real_to_abs(vcpu, gaddr);
31 gaddr += prefix;
32 else if ((gaddr >= prefix) && (gaddr < prefix + 2 * PAGE_SIZE))
33 gaddr -= prefix;
34 }
35 uaddr = gmap_fault(gaddr, vcpu->arch.gmap); 42 uaddr = gmap_fault(gaddr, vcpu->arch.gmap);
36 if (IS_ERR_VALUE(uaddr)) 43 if (IS_ERR_VALUE(uaddr))
37 uaddr = -EFAULT; 44 uaddr = -EFAULT;
diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c
index 5ee56e5acc23..5ddbbde6f65c 100644
--- a/arch/s390/kvm/intercept.c
+++ b/arch/s390/kvm/intercept.c
@@ -62,12 +62,6 @@ static int handle_stop(struct kvm_vcpu *vcpu)
62 62
63 trace_kvm_s390_stop_request(vcpu->arch.local_int.action_bits); 63 trace_kvm_s390_stop_request(vcpu->arch.local_int.action_bits);
64 64
65 if (vcpu->arch.local_int.action_bits & ACTION_RELOADVCPU_ON_STOP) {
66 vcpu->arch.local_int.action_bits &= ~ACTION_RELOADVCPU_ON_STOP;
67 rc = SIE_INTERCEPT_RERUNVCPU;
68 vcpu->run->exit_reason = KVM_EXIT_INTR;
69 }
70
71 if (vcpu->arch.local_int.action_bits & ACTION_STOP_ON_STOP) { 65 if (vcpu->arch.local_int.action_bits & ACTION_STOP_ON_STOP) {
72 atomic_set_mask(CPUSTAT_STOPPED, 66 atomic_set_mask(CPUSTAT_STOPPED,
73 &vcpu->arch.sie_block->cpuflags); 67 &vcpu->arch.sie_block->cpuflags);
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index 7f1f7ac5cf7f..5f79d2d79ca7 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -436,6 +436,7 @@ int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
436 hrtimer_start(&vcpu->arch.ckc_timer, ktime_set (0, sltime) , HRTIMER_MODE_REL); 436 hrtimer_start(&vcpu->arch.ckc_timer, ktime_set (0, sltime) , HRTIMER_MODE_REL);
437 VCPU_EVENT(vcpu, 5, "enabled wait via clock comparator: %llx ns", sltime); 437 VCPU_EVENT(vcpu, 5, "enabled wait via clock comparator: %llx ns", sltime);
438no_timer: 438no_timer:
439 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
439 spin_lock(&vcpu->arch.local_int.float_int->lock); 440 spin_lock(&vcpu->arch.local_int.float_int->lock);
440 spin_lock_bh(&vcpu->arch.local_int.lock); 441 spin_lock_bh(&vcpu->arch.local_int.lock);
441 add_wait_queue(&vcpu->wq, &wait); 442 add_wait_queue(&vcpu->wq, &wait);
@@ -455,6 +456,8 @@ no_timer:
455 remove_wait_queue(&vcpu->wq, &wait); 456 remove_wait_queue(&vcpu->wq, &wait);
456 spin_unlock_bh(&vcpu->arch.local_int.lock); 457 spin_unlock_bh(&vcpu->arch.local_int.lock);
457 spin_unlock(&vcpu->arch.local_int.float_int->lock); 458 spin_unlock(&vcpu->arch.local_int.float_int->lock);
459 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
460
458 hrtimer_try_to_cancel(&vcpu->arch.ckc_timer); 461 hrtimer_try_to_cancel(&vcpu->arch.ckc_timer);
459 return 0; 462 return 0;
460} 463}
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index ed8064cb5c49..569494e01ec6 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -695,9 +695,9 @@ static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
695 return 0; 695 return 0;
696} 696}
697 697
698static int __vcpu_run(struct kvm_vcpu *vcpu) 698static int vcpu_pre_run(struct kvm_vcpu *vcpu)
699{ 699{
700 int rc; 700 int rc, cpuflags;
701 701
702 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16); 702 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16);
703 703
@@ -715,28 +715,24 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
715 return rc; 715 return rc;
716 716
717 vcpu->arch.sie_block->icptcode = 0; 717 vcpu->arch.sie_block->icptcode = 0;
718 VCPU_EVENT(vcpu, 6, "entering sie flags %x", 718 cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
719 atomic_read(&vcpu->arch.sie_block->cpuflags)); 719 VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
720 trace_kvm_s390_sie_enter(vcpu, 720 trace_kvm_s390_sie_enter(vcpu, cpuflags);
721 atomic_read(&vcpu->arch.sie_block->cpuflags));
722 721
723 /* 722 return 0;
724 * As PF_VCPU will be used in fault handler, between guest_enter 723}
725 * and guest_exit should be no uaccess. 724
726 */ 725static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
727 preempt_disable(); 726{
728 kvm_guest_enter(); 727 int rc;
729 preempt_enable();
730 rc = sie64a(vcpu->arch.sie_block, vcpu->run->s.regs.gprs);
731 kvm_guest_exit();
732 728
733 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d", 729 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
734 vcpu->arch.sie_block->icptcode); 730 vcpu->arch.sie_block->icptcode);
735 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode); 731 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
736 732
737 if (rc > 0) 733 if (exit_reason >= 0) {
738 rc = 0; 734 rc = 0;
739 if (rc < 0) { 735 } else {
740 if (kvm_is_ucontrol(vcpu->kvm)) { 736 if (kvm_is_ucontrol(vcpu->kvm)) {
741 rc = SIE_INTERCEPT_UCONTROL; 737 rc = SIE_INTERCEPT_UCONTROL;
742 } else { 738 } else {
@@ -747,6 +743,49 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
747 } 743 }
748 744
749 memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16); 745 memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
746
747 if (rc == 0) {
748 if (kvm_is_ucontrol(vcpu->kvm))
749 rc = -EOPNOTSUPP;
750 else
751 rc = kvm_handle_sie_intercept(vcpu);
752 }
753
754 return rc;
755}
756
757static int __vcpu_run(struct kvm_vcpu *vcpu)
758{
759 int rc, exit_reason;
760
761 /*
762 * We try to hold kvm->srcu during most of vcpu_run (except when run-
763 * ning the guest), so that memslots (and other stuff) are protected
764 */
765 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
766
767 do {
768 rc = vcpu_pre_run(vcpu);
769 if (rc)
770 break;
771
772 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
773 /*
774 * As PF_VCPU will be used in fault handler, between
775 * guest_enter and guest_exit should be no uaccess.
776 */
777 preempt_disable();
778 kvm_guest_enter();
779 preempt_enable();
780 exit_reason = sie64a(vcpu->arch.sie_block,
781 vcpu->run->s.regs.gprs);
782 kvm_guest_exit();
783 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
784
785 rc = vcpu_post_run(vcpu, exit_reason);
786 } while (!signal_pending(current) && !rc);
787
788 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
750 return rc; 789 return rc;
751} 790}
752 791
@@ -755,7 +794,6 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
755 int rc; 794 int rc;
756 sigset_t sigsaved; 795 sigset_t sigsaved;
757 796
758rerun_vcpu:
759 if (vcpu->sigset_active) 797 if (vcpu->sigset_active)
760 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); 798 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
761 799
@@ -788,19 +826,7 @@ rerun_vcpu:
788 } 826 }
789 827
790 might_fault(); 828 might_fault();
791 829 rc = __vcpu_run(vcpu);
792 do {
793 rc = __vcpu_run(vcpu);
794 if (rc)
795 break;
796 if (kvm_is_ucontrol(vcpu->kvm))
797 rc = -EOPNOTSUPP;
798 else
799 rc = kvm_handle_sie_intercept(vcpu);
800 } while (!signal_pending(current) && !rc);
801
802 if (rc == SIE_INTERCEPT_RERUNVCPU)
803 goto rerun_vcpu;
804 830
805 if (signal_pending(current) && !rc) { 831 if (signal_pending(current) && !rc) {
806 kvm_run->exit_reason = KVM_EXIT_INTR; 832 kvm_run->exit_reason = KVM_EXIT_INTR;
@@ -958,6 +984,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
958{ 984{
959 struct kvm_vcpu *vcpu = filp->private_data; 985 struct kvm_vcpu *vcpu = filp->private_data;
960 void __user *argp = (void __user *)arg; 986 void __user *argp = (void __user *)arg;
987 int idx;
961 long r; 988 long r;
962 989
963 switch (ioctl) { 990 switch (ioctl) {
@@ -971,7 +998,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
971 break; 998 break;
972 } 999 }
973 case KVM_S390_STORE_STATUS: 1000 case KVM_S390_STORE_STATUS:
1001 idx = srcu_read_lock(&vcpu->kvm->srcu);
974 r = kvm_s390_vcpu_store_status(vcpu, arg); 1002 r = kvm_s390_vcpu_store_status(vcpu, arg);
1003 srcu_read_unlock(&vcpu->kvm->srcu, idx);
975 break; 1004 break;
976 case KVM_S390_SET_INITIAL_PSW: { 1005 case KVM_S390_SET_INITIAL_PSW: {
977 psw_t psw; 1006 psw_t psw;
@@ -1067,12 +1096,13 @@ int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
1067 return VM_FAULT_SIGBUS; 1096 return VM_FAULT_SIGBUS;
1068} 1097}
1069 1098
1070void kvm_arch_free_memslot(struct kvm_memory_slot *free, 1099void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
1071 struct kvm_memory_slot *dont) 1100 struct kvm_memory_slot *dont)
1072{ 1101{
1073} 1102}
1074 1103
1075int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages) 1104int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
1105 unsigned long npages)
1076{ 1106{
1077 return 0; 1107 return 0;
1078} 1108}
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
index dc99f1ca4267..b44912a32949 100644
--- a/arch/s390/kvm/kvm-s390.h
+++ b/arch/s390/kvm/kvm-s390.h
@@ -28,8 +28,7 @@ typedef int (*intercept_handler_t)(struct kvm_vcpu *vcpu);
28extern unsigned long *vfacilities; 28extern unsigned long *vfacilities;
29 29
30/* negativ values are error codes, positive values for internal conditions */ 30/* negativ values are error codes, positive values for internal conditions */
31#define SIE_INTERCEPT_RERUNVCPU (1<<0) 31#define SIE_INTERCEPT_UCONTROL (1<<0)
32#define SIE_INTERCEPT_UCONTROL (1<<1)
33int kvm_handle_sie_intercept(struct kvm_vcpu *vcpu); 32int kvm_handle_sie_intercept(struct kvm_vcpu *vcpu);
34 33
35#define VM_EVENT(d_kvm, d_loglevel, d_string, d_args...)\ 34#define VM_EVENT(d_kvm, d_loglevel, d_string, d_args...)\
@@ -91,8 +90,10 @@ static inline void kvm_s390_get_base_disp_sse(struct kvm_vcpu *vcpu,
91 90
92static inline void kvm_s390_get_regs_rre(struct kvm_vcpu *vcpu, int *r1, int *r2) 91static inline void kvm_s390_get_regs_rre(struct kvm_vcpu *vcpu, int *r1, int *r2)
93{ 92{
94 *r1 = (vcpu->arch.sie_block->ipb & 0x00f00000) >> 20; 93 if (r1)
95 *r2 = (vcpu->arch.sie_block->ipb & 0x000f0000) >> 16; 94 *r1 = (vcpu->arch.sie_block->ipb & 0x00f00000) >> 20;
95 if (r2)
96 *r2 = (vcpu->arch.sie_block->ipb & 0x000f0000) >> 16;
96} 97}
97 98
98static inline u64 kvm_s390_get_base_disp_rsy(struct kvm_vcpu *vcpu) 99static inline u64 kvm_s390_get_base_disp_rsy(struct kvm_vcpu *vcpu)
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
index 59200ee275e5..2440602e6df1 100644
--- a/arch/s390/kvm/priv.c
+++ b/arch/s390/kvm/priv.c
@@ -30,6 +30,38 @@
30#include "kvm-s390.h" 30#include "kvm-s390.h"
31#include "trace.h" 31#include "trace.h"
32 32
33/* Handle SCK (SET CLOCK) interception */
34static int handle_set_clock(struct kvm_vcpu *vcpu)
35{
36 struct kvm_vcpu *cpup;
37 s64 hostclk, val;
38 u64 op2;
39 int i;
40
41 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
42 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
43
44 op2 = kvm_s390_get_base_disp_s(vcpu);
45 if (op2 & 7) /* Operand must be on a doubleword boundary */
46 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
47 if (get_guest(vcpu, val, (u64 __user *) op2))
48 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
49
50 if (store_tod_clock(&hostclk)) {
51 kvm_s390_set_psw_cc(vcpu, 3);
52 return 0;
53 }
54 val = (val - hostclk) & ~0x3fUL;
55
56 mutex_lock(&vcpu->kvm->lock);
57 kvm_for_each_vcpu(i, cpup, vcpu->kvm)
58 cpup->arch.sie_block->epoch = val;
59 mutex_unlock(&vcpu->kvm->lock);
60
61 kvm_s390_set_psw_cc(vcpu, 0);
62 return 0;
63}
64
33static int handle_set_prefix(struct kvm_vcpu *vcpu) 65static int handle_set_prefix(struct kvm_vcpu *vcpu)
34{ 66{
35 u64 operand2; 67 u64 operand2;
@@ -128,6 +160,33 @@ static int handle_skey(struct kvm_vcpu *vcpu)
128 return 0; 160 return 0;
129} 161}
130 162
163static int handle_test_block(struct kvm_vcpu *vcpu)
164{
165 unsigned long hva;
166 gpa_t addr;
167 int reg2;
168
169 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
170 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
171
172 kvm_s390_get_regs_rre(vcpu, NULL, &reg2);
173 addr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
174 addr = kvm_s390_real_to_abs(vcpu, addr);
175
176 hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(addr));
177 if (kvm_is_error_hva(hva))
178 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
179 /*
180 * We don't expect errors on modern systems, and do not care
181 * about storage keys (yet), so let's just clear the page.
182 */
183 if (clear_user((void __user *)hva, PAGE_SIZE) != 0)
184 return -EFAULT;
185 kvm_s390_set_psw_cc(vcpu, 0);
186 vcpu->run->s.regs.gprs[0] = 0;
187 return 0;
188}
189
131static int handle_tpi(struct kvm_vcpu *vcpu) 190static int handle_tpi(struct kvm_vcpu *vcpu)
132{ 191{
133 struct kvm_s390_interrupt_info *inti; 192 struct kvm_s390_interrupt_info *inti;
@@ -438,12 +497,14 @@ out_exception:
438 497
439static const intercept_handler_t b2_handlers[256] = { 498static const intercept_handler_t b2_handlers[256] = {
440 [0x02] = handle_stidp, 499 [0x02] = handle_stidp,
500 [0x04] = handle_set_clock,
441 [0x10] = handle_set_prefix, 501 [0x10] = handle_set_prefix,
442 [0x11] = handle_store_prefix, 502 [0x11] = handle_store_prefix,
443 [0x12] = handle_store_cpu_address, 503 [0x12] = handle_store_cpu_address,
444 [0x29] = handle_skey, 504 [0x29] = handle_skey,
445 [0x2a] = handle_skey, 505 [0x2a] = handle_skey,
446 [0x2b] = handle_skey, 506 [0x2b] = handle_skey,
507 [0x2c] = handle_test_block,
447 [0x30] = handle_io_inst, 508 [0x30] = handle_io_inst,
448 [0x31] = handle_io_inst, 509 [0x31] = handle_io_inst,
449 [0x32] = handle_io_inst, 510 [0x32] = handle_io_inst,