aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-05-05 17:47:31 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-05-05 17:47:31 -0400
commit01227a889ed56ae53aeebb9f93be9d54dd8b2de8 (patch)
treed5eba9359a9827e84d4112b84d48c54df5c5acde /arch/s390
parent9e6879460c8edb0cd3c24c09b83d06541b5af0dc (diff)
parentdb6ae6158186a17165ef990bda2895ae7594b039 (diff)
Merge tag 'kvm-3.10-1' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull kvm updates from Gleb Natapov: "Highlights of the updates are: general: - new emulated device API - legacy device assignment is now optional - irqfd interface is more generic and can be shared between arches x86: - VMCS shadow support and other nested VMX improvements - APIC virtualization and Posted Interrupt hardware support - Optimize mmio spte zapping ppc: - BookE: in-kernel MPIC emulation with irqfd support - Book3S: in-kernel XICS emulation (incomplete) - Book3S: HV: migration fixes - BookE: more debug support preparation - BookE: e6500 support ARM: - reworking of Hyp idmaps s390: - ioeventfd for virtio-ccw And many other bug fixes, cleanups and improvements" * tag 'kvm-3.10-1' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (204 commits) kvm: Add compat_ioctl for device control API KVM: x86: Account for failing enable_irq_window for NMI window request KVM: PPC: Book3S: Add API for in-kernel XICS emulation kvm/ppc/mpic: fix missing unlock in set_base_addr() kvm/ppc: Hold srcu lock when calling kvm_io_bus_read/write kvm/ppc/mpic: remove users kvm/ppc/mpic: fix mmio region lists when multiple guests used kvm/ppc/mpic: remove default routes from documentation kvm: KVM_CAP_IOMMU only available with device assignment ARM: KVM: iterate over all CPUs for CPU compatibility check KVM: ARM: Fix spelling in error message ARM: KVM: define KVM_ARM_MAX_VCPUS unconditionally KVM: ARM: Fix API documentation for ONE_REG encoding ARM: KVM: promote vfp_host pointer to generic host cpu context ARM: KVM: add architecture specific hook for capabilities ARM: KVM: perform HYP initilization for hotplugged CPUs ARM: KVM: switch to a dual-step HYP init code ARM: KVM: rework HYP page table freeing ARM: KVM: enforce maximum size for identity mapped code ARM: KVM: move to a KVM provided HYP idmap ...
Diffstat (limited to 'arch/s390')
-rw-r--r--arch/s390/include/uapi/asm/Kbuild1
-rw-r--r--arch/s390/include/uapi/asm/virtio-ccw.h21
-rw-r--r--arch/s390/kvm/Kconfig1
-rw-r--r--arch/s390/kvm/Makefile2
-rw-r--r--arch/s390/kvm/diag.c26
-rw-r--r--arch/s390/kvm/gaccess.h429
-rw-r--r--arch/s390/kvm/intercept.c18
-rw-r--r--arch/s390/kvm/interrupt.c245
-rw-r--r--arch/s390/kvm/kvm-s390.c43
-rw-r--r--arch/s390/kvm/kvm-s390.h12
-rw-r--r--arch/s390/kvm/priv.c270
11 files changed, 340 insertions, 728 deletions
diff --git a/arch/s390/include/uapi/asm/Kbuild b/arch/s390/include/uapi/asm/Kbuild
index 7bf68fff7c5d..9ccd1905bdad 100644
--- a/arch/s390/include/uapi/asm/Kbuild
+++ b/arch/s390/include/uapi/asm/Kbuild
@@ -44,5 +44,6 @@ header-y += termios.h
44header-y += types.h 44header-y += types.h
45header-y += ucontext.h 45header-y += ucontext.h
46header-y += unistd.h 46header-y += unistd.h
47header-y += virtio-ccw.h
47header-y += vtoc.h 48header-y += vtoc.h
48header-y += zcrypt.h 49header-y += zcrypt.h
diff --git a/arch/s390/include/uapi/asm/virtio-ccw.h b/arch/s390/include/uapi/asm/virtio-ccw.h
new file mode 100644
index 000000000000..a9a4ebf79fa7
--- /dev/null
+++ b/arch/s390/include/uapi/asm/virtio-ccw.h
@@ -0,0 +1,21 @@
1/*
2 * Definitions for virtio-ccw devices.
3 *
4 * Copyright IBM Corp. 2013
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
11 */
12#ifndef __KVM_VIRTIO_CCW_H
13#define __KVM_VIRTIO_CCW_H
14
15/* Alignment of vring buffers. */
16#define KVM_VIRTIO_CCW_RING_ALIGN 4096
17
18/* Subcode for diagnose 500 (virtio hypercall). */
19#define KVM_S390_VIRTIO_CCW_NOTIFY 3
20
21#endif
diff --git a/arch/s390/kvm/Kconfig b/arch/s390/kvm/Kconfig
index 60f9f8ae0fc8..70b46eacf8e1 100644
--- a/arch/s390/kvm/Kconfig
+++ b/arch/s390/kvm/Kconfig
@@ -22,6 +22,7 @@ config KVM
22 select PREEMPT_NOTIFIERS 22 select PREEMPT_NOTIFIERS
23 select ANON_INODES 23 select ANON_INODES
24 select HAVE_KVM_CPU_RELAX_INTERCEPT 24 select HAVE_KVM_CPU_RELAX_INTERCEPT
25 select HAVE_KVM_EVENTFD
25 ---help--- 26 ---help---
26 Support hosting paravirtualized guest machines using the SIE 27 Support hosting paravirtualized guest machines using the SIE
27 virtualization capability on the mainframe. This should work 28 virtualization capability on the mainframe. This should work
diff --git a/arch/s390/kvm/Makefile b/arch/s390/kvm/Makefile
index 3975722bb19d..8fe9d65a4585 100644
--- a/arch/s390/kvm/Makefile
+++ b/arch/s390/kvm/Makefile
@@ -6,7 +6,7 @@
6# it under the terms of the GNU General Public License (version 2 only) 6# it under the terms of the GNU General Public License (version 2 only)
7# as published by the Free Software Foundation. 7# as published by the Free Software Foundation.
8 8
9common-objs = $(addprefix ../../../virt/kvm/, kvm_main.o) 9common-objs = $(addprefix ../../../virt/kvm/, kvm_main.o eventfd.o)
10 10
11ccflags-y := -Ivirt/kvm -Iarch/s390/kvm 11ccflags-y := -Ivirt/kvm -Iarch/s390/kvm
12 12
diff --git a/arch/s390/kvm/diag.c b/arch/s390/kvm/diag.c
index a390687feb13..1c01a9912989 100644
--- a/arch/s390/kvm/diag.c
+++ b/arch/s390/kvm/diag.c
@@ -13,6 +13,7 @@
13 13
14#include <linux/kvm.h> 14#include <linux/kvm.h>
15#include <linux/kvm_host.h> 15#include <linux/kvm_host.h>
16#include <asm/virtio-ccw.h>
16#include "kvm-s390.h" 17#include "kvm-s390.h"
17#include "trace.h" 18#include "trace.h"
18#include "trace-s390.h" 19#include "trace-s390.h"
@@ -104,6 +105,29 @@ static int __diag_ipl_functions(struct kvm_vcpu *vcpu)
104 return -EREMOTE; 105 return -EREMOTE;
105} 106}
106 107
108static int __diag_virtio_hypercall(struct kvm_vcpu *vcpu)
109{
110 int ret, idx;
111
112 /* No virtio-ccw notification? Get out quickly. */
113 if (!vcpu->kvm->arch.css_support ||
114 (vcpu->run->s.regs.gprs[1] != KVM_S390_VIRTIO_CCW_NOTIFY))
115 return -EOPNOTSUPP;
116
117 idx = srcu_read_lock(&vcpu->kvm->srcu);
118 /*
119 * The layout is as follows:
120 * - gpr 2 contains the subchannel id (passed as addr)
121 * - gpr 3 contains the virtqueue index (passed as datamatch)
122 */
123 ret = kvm_io_bus_write(vcpu->kvm, KVM_VIRTIO_CCW_NOTIFY_BUS,
124 vcpu->run->s.regs.gprs[2],
125 8, &vcpu->run->s.regs.gprs[3]);
126 srcu_read_unlock(&vcpu->kvm->srcu, idx);
127 /* kvm_io_bus_write returns -EOPNOTSUPP if it found no match. */
128 return ret < 0 ? ret : 0;
129}
130
107int kvm_s390_handle_diag(struct kvm_vcpu *vcpu) 131int kvm_s390_handle_diag(struct kvm_vcpu *vcpu)
108{ 132{
109 int code = (vcpu->arch.sie_block->ipb & 0xfff0000) >> 16; 133 int code = (vcpu->arch.sie_block->ipb & 0xfff0000) >> 16;
@@ -118,6 +142,8 @@ int kvm_s390_handle_diag(struct kvm_vcpu *vcpu)
118 return __diag_time_slice_end_directed(vcpu); 142 return __diag_time_slice_end_directed(vcpu);
119 case 0x308: 143 case 0x308:
120 return __diag_ipl_functions(vcpu); 144 return __diag_ipl_functions(vcpu);
145 case 0x500:
146 return __diag_virtio_hypercall(vcpu);
121 default: 147 default:
122 return -EOPNOTSUPP; 148 return -EOPNOTSUPP;
123 } 149 }
diff --git a/arch/s390/kvm/gaccess.h b/arch/s390/kvm/gaccess.h
index 4703f129e95e..302e0e52b009 100644
--- a/arch/s390/kvm/gaccess.h
+++ b/arch/s390/kvm/gaccess.h
@@ -18,369 +18,86 @@
18#include <asm/uaccess.h> 18#include <asm/uaccess.h>
19#include "kvm-s390.h" 19#include "kvm-s390.h"
20 20
21static inline void __user *__guestaddr_to_user(struct kvm_vcpu *vcpu, 21static inline void __user *__gptr_to_uptr(struct kvm_vcpu *vcpu,
22 unsigned long guestaddr) 22 void __user *gptr,
23 int prefixing)
23{ 24{
24 unsigned long prefix = vcpu->arch.sie_block->prefix; 25 unsigned long prefix = vcpu->arch.sie_block->prefix;
25 26 unsigned long gaddr = (unsigned long) gptr;
26 if (guestaddr < 2 * PAGE_SIZE) 27 unsigned long uaddr;
27 guestaddr += prefix; 28
28 else if ((guestaddr >= prefix) && (guestaddr < prefix + 2 * PAGE_SIZE)) 29 if (prefixing) {
29 guestaddr -= prefix; 30 if (gaddr < 2 * PAGE_SIZE)
30 31 gaddr += prefix;
31 return (void __user *) gmap_fault(guestaddr, vcpu->arch.gmap); 32 else if ((gaddr >= prefix) && (gaddr < prefix + 2 * PAGE_SIZE))
32} 33 gaddr -= prefix;
33
34static inline int get_guest_u64(struct kvm_vcpu *vcpu, unsigned long guestaddr,
35 u64 *result)
36{
37 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
38
39 BUG_ON(guestaddr & 7);
40
41 if (IS_ERR((void __force *) uptr))
42 return PTR_ERR((void __force *) uptr);
43
44 return get_user(*result, (unsigned long __user *) uptr);
45}
46
47static inline int get_guest_u32(struct kvm_vcpu *vcpu, unsigned long guestaddr,
48 u32 *result)
49{
50 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
51
52 BUG_ON(guestaddr & 3);
53
54 if (IS_ERR((void __force *) uptr))
55 return PTR_ERR((void __force *) uptr);
56
57 return get_user(*result, (u32 __user *) uptr);
58}
59
60static inline int get_guest_u16(struct kvm_vcpu *vcpu, unsigned long guestaddr,
61 u16 *result)
62{
63 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
64
65 BUG_ON(guestaddr & 1);
66
67 if (IS_ERR(uptr))
68 return PTR_ERR(uptr);
69
70 return get_user(*result, (u16 __user *) uptr);
71}
72
73static inline int get_guest_u8(struct kvm_vcpu *vcpu, unsigned long guestaddr,
74 u8 *result)
75{
76 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
77
78 if (IS_ERR((void __force *) uptr))
79 return PTR_ERR((void __force *) uptr);
80
81 return get_user(*result, (u8 __user *) uptr);
82}
83
84static inline int put_guest_u64(struct kvm_vcpu *vcpu, unsigned long guestaddr,
85 u64 value)
86{
87 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
88
89 BUG_ON(guestaddr & 7);
90
91 if (IS_ERR((void __force *) uptr))
92 return PTR_ERR((void __force *) uptr);
93
94 return put_user(value, (u64 __user *) uptr);
95}
96
97static inline int put_guest_u32(struct kvm_vcpu *vcpu, unsigned long guestaddr,
98 u32 value)
99{
100 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
101
102 BUG_ON(guestaddr & 3);
103
104 if (IS_ERR((void __force *) uptr))
105 return PTR_ERR((void __force *) uptr);
106
107 return put_user(value, (u32 __user *) uptr);
108}
109
110static inline int put_guest_u16(struct kvm_vcpu *vcpu, unsigned long guestaddr,
111 u16 value)
112{
113 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
114
115 BUG_ON(guestaddr & 1);
116
117 if (IS_ERR((void __force *) uptr))
118 return PTR_ERR((void __force *) uptr);
119
120 return put_user(value, (u16 __user *) uptr);
121}
122
123static inline int put_guest_u8(struct kvm_vcpu *vcpu, unsigned long guestaddr,
124 u8 value)
125{
126 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
127
128 if (IS_ERR((void __force *) uptr))
129 return PTR_ERR((void __force *) uptr);
130
131 return put_user(value, (u8 __user *) uptr);
132}
133
134
135static inline int __copy_to_guest_slow(struct kvm_vcpu *vcpu,
136 unsigned long guestdest,
137 void *from, unsigned long n)
138{
139 int rc;
140 unsigned long i;
141 u8 *data = from;
142
143 for (i = 0; i < n; i++) {
144 rc = put_guest_u8(vcpu, guestdest++, *(data++));
145 if (rc < 0)
146 return rc;
147 } 34 }
148 return 0; 35 uaddr = gmap_fault(gaddr, vcpu->arch.gmap);
149} 36 if (IS_ERR_VALUE(uaddr))
150 37 uaddr = -EFAULT;
151static inline int __copy_to_guest_fast(struct kvm_vcpu *vcpu, 38 return (void __user *)uaddr;
152 unsigned long guestdest, 39}
153 void *from, unsigned long n) 40
154{ 41#define get_guest(vcpu, x, gptr) \
155 int r; 42({ \
43 __typeof__(gptr) __uptr = __gptr_to_uptr(vcpu, gptr, 1);\
44 int __mask = sizeof(__typeof__(*(gptr))) - 1; \
45 int __ret = PTR_RET((void __force *)__uptr); \
46 \
47 if (!__ret) { \
48 BUG_ON((unsigned long)__uptr & __mask); \
49 __ret = get_user(x, __uptr); \
50 } \
51 __ret; \
52})
53
54#define put_guest(vcpu, x, gptr) \
55({ \
56 __typeof__(gptr) __uptr = __gptr_to_uptr(vcpu, gptr, 1);\
57 int __mask = sizeof(__typeof__(*(gptr))) - 1; \
58 int __ret = PTR_RET((void __force *)__uptr); \
59 \
60 if (!__ret) { \
61 BUG_ON((unsigned long)__uptr & __mask); \
62 __ret = put_user(x, __uptr); \
63 } \
64 __ret; \
65})
66
67static inline int __copy_guest(struct kvm_vcpu *vcpu, unsigned long to,
68 unsigned long from, unsigned long len,
69 int to_guest, int prefixing)
70{
71 unsigned long _len, rc;
156 void __user *uptr; 72 void __user *uptr;
157 unsigned long size;
158
159 if (guestdest + n < guestdest)
160 return -EFAULT;
161
162 /* simple case: all within one segment table entry? */
163 if ((guestdest & PMD_MASK) == ((guestdest+n) & PMD_MASK)) {
164 uptr = (void __user *) gmap_fault(guestdest, vcpu->arch.gmap);
165
166 if (IS_ERR((void __force *) uptr))
167 return PTR_ERR((void __force *) uptr);
168
169 r = copy_to_user(uptr, from, n);
170
171 if (r)
172 r = -EFAULT;
173
174 goto out;
175 }
176
177 /* copy first segment */
178 uptr = (void __user *)gmap_fault(guestdest, vcpu->arch.gmap);
179
180 if (IS_ERR((void __force *) uptr))
181 return PTR_ERR((void __force *) uptr);
182 73
183 size = PMD_SIZE - (guestdest & ~PMD_MASK); 74 while (len) {
184 75 uptr = to_guest ? (void __user *)to : (void __user *)from;
185 r = copy_to_user(uptr, from, size); 76 uptr = __gptr_to_uptr(vcpu, uptr, prefixing);
186 77 if (IS_ERR((void __force *)uptr))
187 if (r) { 78 return -EFAULT;
188 r = -EFAULT; 79 _len = PAGE_SIZE - ((unsigned long)uptr & (PAGE_SIZE - 1));
189 goto out; 80 _len = min(_len, len);
190 } 81 if (to_guest)
191 from += size; 82 rc = copy_to_user((void __user *) uptr, (void *)from, _len);
192 n -= size; 83 else
193 guestdest += size; 84 rc = copy_from_user((void *)to, (void __user *)uptr, _len);
194 85 if (rc)
195 /* copy full segments */ 86 return -EFAULT;
196 while (n >= PMD_SIZE) { 87 len -= _len;
197 uptr = (void __user *)gmap_fault(guestdest, vcpu->arch.gmap); 88 from += _len;
198 89 to += _len;
199 if (IS_ERR((void __force *) uptr))
200 return PTR_ERR((void __force *) uptr);
201
202 r = copy_to_user(uptr, from, PMD_SIZE);
203
204 if (r) {
205 r = -EFAULT;
206 goto out;
207 }
208 from += PMD_SIZE;
209 n -= PMD_SIZE;
210 guestdest += PMD_SIZE;
211 }
212
213 /* copy the tail segment */
214 if (n) {
215 uptr = (void __user *)gmap_fault(guestdest, vcpu->arch.gmap);
216
217 if (IS_ERR((void __force *) uptr))
218 return PTR_ERR((void __force *) uptr);
219
220 r = copy_to_user(uptr, from, n);
221
222 if (r)
223 r = -EFAULT;
224 }
225out:
226 return r;
227}
228
229static inline int copy_to_guest_absolute(struct kvm_vcpu *vcpu,
230 unsigned long guestdest,
231 void *from, unsigned long n)
232{
233 return __copy_to_guest_fast(vcpu, guestdest, from, n);
234}
235
236static inline int copy_to_guest(struct kvm_vcpu *vcpu, unsigned long guestdest,
237 void *from, unsigned long n)
238{
239 unsigned long prefix = vcpu->arch.sie_block->prefix;
240
241 if ((guestdest < 2 * PAGE_SIZE) && (guestdest + n > 2 * PAGE_SIZE))
242 goto slowpath;
243
244 if ((guestdest < prefix) && (guestdest + n > prefix))
245 goto slowpath;
246
247 if ((guestdest < prefix + 2 * PAGE_SIZE)
248 && (guestdest + n > prefix + 2 * PAGE_SIZE))
249 goto slowpath;
250
251 if (guestdest < 2 * PAGE_SIZE)
252 guestdest += prefix;
253 else if ((guestdest >= prefix) && (guestdest < prefix + 2 * PAGE_SIZE))
254 guestdest -= prefix;
255
256 return __copy_to_guest_fast(vcpu, guestdest, from, n);
257slowpath:
258 return __copy_to_guest_slow(vcpu, guestdest, from, n);
259}
260
261static inline int __copy_from_guest_slow(struct kvm_vcpu *vcpu, void *to,
262 unsigned long guestsrc,
263 unsigned long n)
264{
265 int rc;
266 unsigned long i;
267 u8 *data = to;
268
269 for (i = 0; i < n; i++) {
270 rc = get_guest_u8(vcpu, guestsrc++, data++);
271 if (rc < 0)
272 return rc;
273 } 90 }
274 return 0; 91 return 0;
275} 92}
276 93
277static inline int __copy_from_guest_fast(struct kvm_vcpu *vcpu, void *to, 94#define copy_to_guest(vcpu, to, from, size) \
278 unsigned long guestsrc, 95 __copy_guest(vcpu, to, (unsigned long)from, size, 1, 1)
279 unsigned long n) 96#define copy_from_guest(vcpu, to, from, size) \
280{ 97 __copy_guest(vcpu, (unsigned long)to, from, size, 0, 1)
281 int r; 98#define copy_to_guest_absolute(vcpu, to, from, size) \
282 void __user *uptr; 99 __copy_guest(vcpu, to, (unsigned long)from, size, 1, 0)
283 unsigned long size; 100#define copy_from_guest_absolute(vcpu, to, from, size) \
284 101 __copy_guest(vcpu, (unsigned long)to, from, size, 0, 0)
285 if (guestsrc + n < guestsrc)
286 return -EFAULT;
287
288 /* simple case: all within one segment table entry? */
289 if ((guestsrc & PMD_MASK) == ((guestsrc+n) & PMD_MASK)) {
290 uptr = (void __user *) gmap_fault(guestsrc, vcpu->arch.gmap);
291
292 if (IS_ERR((void __force *) uptr))
293 return PTR_ERR((void __force *) uptr);
294
295 r = copy_from_user(to, uptr, n);
296
297 if (r)
298 r = -EFAULT;
299
300 goto out;
301 }
302
303 /* copy first segment */
304 uptr = (void __user *)gmap_fault(guestsrc, vcpu->arch.gmap);
305
306 if (IS_ERR((void __force *) uptr))
307 return PTR_ERR((void __force *) uptr);
308
309 size = PMD_SIZE - (guestsrc & ~PMD_MASK);
310
311 r = copy_from_user(to, uptr, size);
312
313 if (r) {
314 r = -EFAULT;
315 goto out;
316 }
317 to += size;
318 n -= size;
319 guestsrc += size;
320
321 /* copy full segments */
322 while (n >= PMD_SIZE) {
323 uptr = (void __user *)gmap_fault(guestsrc, vcpu->arch.gmap);
324
325 if (IS_ERR((void __force *) uptr))
326 return PTR_ERR((void __force *) uptr);
327
328 r = copy_from_user(to, uptr, PMD_SIZE);
329
330 if (r) {
331 r = -EFAULT;
332 goto out;
333 }
334 to += PMD_SIZE;
335 n -= PMD_SIZE;
336 guestsrc += PMD_SIZE;
337 }
338
339 /* copy the tail segment */
340 if (n) {
341 uptr = (void __user *)gmap_fault(guestsrc, vcpu->arch.gmap);
342
343 if (IS_ERR((void __force *) uptr))
344 return PTR_ERR((void __force *) uptr);
345
346 r = copy_from_user(to, uptr, n);
347
348 if (r)
349 r = -EFAULT;
350 }
351out:
352 return r;
353}
354
355static inline int copy_from_guest_absolute(struct kvm_vcpu *vcpu, void *to,
356 unsigned long guestsrc,
357 unsigned long n)
358{
359 return __copy_from_guest_fast(vcpu, to, guestsrc, n);
360}
361
362static inline int copy_from_guest(struct kvm_vcpu *vcpu, void *to,
363 unsigned long guestsrc, unsigned long n)
364{
365 unsigned long prefix = vcpu->arch.sie_block->prefix;
366
367 if ((guestsrc < 2 * PAGE_SIZE) && (guestsrc + n > 2 * PAGE_SIZE))
368 goto slowpath;
369 102
370 if ((guestsrc < prefix) && (guestsrc + n > prefix)) 103#endif /* __KVM_S390_GACCESS_H */
371 goto slowpath;
372
373 if ((guestsrc < prefix + 2 * PAGE_SIZE)
374 && (guestsrc + n > prefix + 2 * PAGE_SIZE))
375 goto slowpath;
376
377 if (guestsrc < 2 * PAGE_SIZE)
378 guestsrc += prefix;
379 else if ((guestsrc >= prefix) && (guestsrc < prefix + 2 * PAGE_SIZE))
380 guestsrc -= prefix;
381
382 return __copy_from_guest_fast(vcpu, to, guestsrc, n);
383slowpath:
384 return __copy_from_guest_slow(vcpu, to, guestsrc, n);
385}
386#endif
diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c
index f26ff1e31bdb..b7d1b2edeeb3 100644
--- a/arch/s390/kvm/intercept.c
+++ b/arch/s390/kvm/intercept.c
@@ -43,12 +43,10 @@ static int handle_lctlg(struct kvm_vcpu *vcpu)
43 trace_kvm_s390_handle_lctl(vcpu, 1, reg1, reg3, useraddr); 43 trace_kvm_s390_handle_lctl(vcpu, 1, reg1, reg3, useraddr);
44 44
45 do { 45 do {
46 rc = get_guest_u64(vcpu, useraddr, 46 rc = get_guest(vcpu, vcpu->arch.sie_block->gcr[reg],
47 &vcpu->arch.sie_block->gcr[reg]); 47 (u64 __user *) useraddr);
48 if (rc == -EFAULT) { 48 if (rc)
49 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 49 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
50 break;
51 }
52 useraddr += 8; 50 useraddr += 8;
53 if (reg == reg3) 51 if (reg == reg3)
54 break; 52 break;
@@ -78,11 +76,9 @@ static int handle_lctl(struct kvm_vcpu *vcpu)
78 76
79 reg = reg1; 77 reg = reg1;
80 do { 78 do {
81 rc = get_guest_u32(vcpu, useraddr, &val); 79 rc = get_guest(vcpu, val, (u32 __user *) useraddr);
82 if (rc == -EFAULT) { 80 if (rc)
83 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 81 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
84 break;
85 }
86 vcpu->arch.sie_block->gcr[reg] &= 0xffffffff00000000ul; 82 vcpu->arch.sie_block->gcr[reg] &= 0xffffffff00000000ul;
87 vcpu->arch.sie_block->gcr[reg] |= val; 83 vcpu->arch.sie_block->gcr[reg] |= val;
88 useraddr += 4; 84 useraddr += 4;
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index 37116a77cb4b..5c948177529e 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -180,7 +180,7 @@ static void __do_deliver_interrupt(struct kvm_vcpu *vcpu,
180 struct kvm_s390_interrupt_info *inti) 180 struct kvm_s390_interrupt_info *inti)
181{ 181{
182 const unsigned short table[] = { 2, 4, 4, 6 }; 182 const unsigned short table[] = { 2, 4, 4, 6 };
183 int rc, exception = 0; 183 int rc = 0;
184 184
185 switch (inti->type) { 185 switch (inti->type) {
186 case KVM_S390_INT_EMERGENCY: 186 case KVM_S390_INT_EMERGENCY:
@@ -188,74 +188,41 @@ static void __do_deliver_interrupt(struct kvm_vcpu *vcpu,
188 vcpu->stat.deliver_emergency_signal++; 188 vcpu->stat.deliver_emergency_signal++;
189 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, 189 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
190 inti->emerg.code, 0); 190 inti->emerg.code, 0);
191 rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x1201); 191 rc = put_guest(vcpu, 0x1201, (u16 __user *)__LC_EXT_INT_CODE);
192 if (rc == -EFAULT) 192 rc |= put_guest(vcpu, inti->emerg.code,
193 exception = 1; 193 (u16 __user *)__LC_EXT_CPU_ADDR);
194 194 rc |= copy_to_guest(vcpu, __LC_EXT_OLD_PSW,
195 rc = put_guest_u16(vcpu, __LC_EXT_CPU_ADDR, inti->emerg.code); 195 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
196 if (rc == -EFAULT) 196 rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
197 exception = 1; 197 __LC_EXT_NEW_PSW, sizeof(psw_t));
198
199 rc = copy_to_guest(vcpu, __LC_EXT_OLD_PSW,
200 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
201 if (rc == -EFAULT)
202 exception = 1;
203
204 rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
205 __LC_EXT_NEW_PSW, sizeof(psw_t));
206 if (rc == -EFAULT)
207 exception = 1;
208 break; 198 break;
209
210 case KVM_S390_INT_EXTERNAL_CALL: 199 case KVM_S390_INT_EXTERNAL_CALL:
211 VCPU_EVENT(vcpu, 4, "%s", "interrupt: sigp ext call"); 200 VCPU_EVENT(vcpu, 4, "%s", "interrupt: sigp ext call");
212 vcpu->stat.deliver_external_call++; 201 vcpu->stat.deliver_external_call++;
213 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, 202 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
214 inti->extcall.code, 0); 203 inti->extcall.code, 0);
215 rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x1202); 204 rc = put_guest(vcpu, 0x1202, (u16 __user *)__LC_EXT_INT_CODE);
216 if (rc == -EFAULT) 205 rc |= put_guest(vcpu, inti->extcall.code,
217 exception = 1; 206 (u16 __user *)__LC_EXT_CPU_ADDR);
218 207 rc |= copy_to_guest(vcpu, __LC_EXT_OLD_PSW,
219 rc = put_guest_u16(vcpu, __LC_EXT_CPU_ADDR, inti->extcall.code); 208 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
220 if (rc == -EFAULT) 209 rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
221 exception = 1; 210 __LC_EXT_NEW_PSW, sizeof(psw_t));
222
223 rc = copy_to_guest(vcpu, __LC_EXT_OLD_PSW,
224 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
225 if (rc == -EFAULT)
226 exception = 1;
227
228 rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
229 __LC_EXT_NEW_PSW, sizeof(psw_t));
230 if (rc == -EFAULT)
231 exception = 1;
232 break; 211 break;
233
234 case KVM_S390_INT_SERVICE: 212 case KVM_S390_INT_SERVICE:
235 VCPU_EVENT(vcpu, 4, "interrupt: sclp parm:%x", 213 VCPU_EVENT(vcpu, 4, "interrupt: sclp parm:%x",
236 inti->ext.ext_params); 214 inti->ext.ext_params);
237 vcpu->stat.deliver_service_signal++; 215 vcpu->stat.deliver_service_signal++;
238 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, 216 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
239 inti->ext.ext_params, 0); 217 inti->ext.ext_params, 0);
240 rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x2401); 218 rc = put_guest(vcpu, 0x2401, (u16 __user *)__LC_EXT_INT_CODE);
241 if (rc == -EFAULT) 219 rc |= copy_to_guest(vcpu, __LC_EXT_OLD_PSW,
242 exception = 1; 220 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
243 221 rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
244 rc = copy_to_guest(vcpu, __LC_EXT_OLD_PSW, 222 __LC_EXT_NEW_PSW, sizeof(psw_t));
245 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 223 rc |= put_guest(vcpu, inti->ext.ext_params,
246 if (rc == -EFAULT) 224 (u32 __user *)__LC_EXT_PARAMS);
247 exception = 1;
248
249 rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
250 __LC_EXT_NEW_PSW, sizeof(psw_t));
251 if (rc == -EFAULT)
252 exception = 1;
253
254 rc = put_guest_u32(vcpu, __LC_EXT_PARAMS, inti->ext.ext_params);
255 if (rc == -EFAULT)
256 exception = 1;
257 break; 225 break;
258
259 case KVM_S390_INT_VIRTIO: 226 case KVM_S390_INT_VIRTIO:
260 VCPU_EVENT(vcpu, 4, "interrupt: virtio parm:%x,parm64:%llx", 227 VCPU_EVENT(vcpu, 4, "interrupt: virtio parm:%x,parm64:%llx",
261 inti->ext.ext_params, inti->ext.ext_params2); 228 inti->ext.ext_params, inti->ext.ext_params2);
@@ -263,34 +230,17 @@ static void __do_deliver_interrupt(struct kvm_vcpu *vcpu,
263 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, 230 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
264 inti->ext.ext_params, 231 inti->ext.ext_params,
265 inti->ext.ext_params2); 232 inti->ext.ext_params2);
266 rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x2603); 233 rc = put_guest(vcpu, 0x2603, (u16 __user *)__LC_EXT_INT_CODE);
267 if (rc == -EFAULT) 234 rc |= put_guest(vcpu, 0x0d00, (u16 __user *)__LC_EXT_CPU_ADDR);
268 exception = 1; 235 rc |= copy_to_guest(vcpu, __LC_EXT_OLD_PSW,
269 236 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
270 rc = put_guest_u16(vcpu, __LC_EXT_CPU_ADDR, 0x0d00); 237 rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
271 if (rc == -EFAULT) 238 __LC_EXT_NEW_PSW, sizeof(psw_t));
272 exception = 1; 239 rc |= put_guest(vcpu, inti->ext.ext_params,
273 240 (u32 __user *)__LC_EXT_PARAMS);
274 rc = copy_to_guest(vcpu, __LC_EXT_OLD_PSW, 241 rc |= put_guest(vcpu, inti->ext.ext_params2,
275 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 242 (u64 __user *)__LC_EXT_PARAMS2);
276 if (rc == -EFAULT)
277 exception = 1;
278
279 rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
280 __LC_EXT_NEW_PSW, sizeof(psw_t));
281 if (rc == -EFAULT)
282 exception = 1;
283
284 rc = put_guest_u32(vcpu, __LC_EXT_PARAMS, inti->ext.ext_params);
285 if (rc == -EFAULT)
286 exception = 1;
287
288 rc = put_guest_u64(vcpu, __LC_EXT_PARAMS2,
289 inti->ext.ext_params2);
290 if (rc == -EFAULT)
291 exception = 1;
292 break; 243 break;
293
294 case KVM_S390_SIGP_STOP: 244 case KVM_S390_SIGP_STOP:
295 VCPU_EVENT(vcpu, 4, "%s", "interrupt: cpu stop"); 245 VCPU_EVENT(vcpu, 4, "%s", "interrupt: cpu stop");
296 vcpu->stat.deliver_stop_signal++; 246 vcpu->stat.deliver_stop_signal++;
@@ -313,18 +263,14 @@ static void __do_deliver_interrupt(struct kvm_vcpu *vcpu,
313 vcpu->stat.deliver_restart_signal++; 263 vcpu->stat.deliver_restart_signal++;
314 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, 264 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
315 0, 0); 265 0, 0);
316 rc = copy_to_guest(vcpu, offsetof(struct _lowcore, 266 rc = copy_to_guest(vcpu,
317 restart_old_psw), &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 267 offsetof(struct _lowcore, restart_old_psw),
318 if (rc == -EFAULT) 268 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
319 exception = 1; 269 rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
320 270 offsetof(struct _lowcore, restart_psw),
321 rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw, 271 sizeof(psw_t));
322 offsetof(struct _lowcore, restart_psw), sizeof(psw_t));
323 if (rc == -EFAULT)
324 exception = 1;
325 atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags); 272 atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
326 break; 273 break;
327
328 case KVM_S390_PROGRAM_INT: 274 case KVM_S390_PROGRAM_INT:
329 VCPU_EVENT(vcpu, 4, "interrupt: pgm check code:%x, ilc:%x", 275 VCPU_EVENT(vcpu, 4, "interrupt: pgm check code:%x, ilc:%x",
330 inti->pgm.code, 276 inti->pgm.code,
@@ -332,24 +278,13 @@ static void __do_deliver_interrupt(struct kvm_vcpu *vcpu,
332 vcpu->stat.deliver_program_int++; 278 vcpu->stat.deliver_program_int++;
333 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, 279 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
334 inti->pgm.code, 0); 280 inti->pgm.code, 0);
335 rc = put_guest_u16(vcpu, __LC_PGM_INT_CODE, inti->pgm.code); 281 rc = put_guest(vcpu, inti->pgm.code, (u16 __user *)__LC_PGM_INT_CODE);
336 if (rc == -EFAULT) 282 rc |= put_guest(vcpu, table[vcpu->arch.sie_block->ipa >> 14],
337 exception = 1; 283 (u16 __user *)__LC_PGM_ILC);
338 284 rc |= copy_to_guest(vcpu, __LC_PGM_OLD_PSW,
339 rc = put_guest_u16(vcpu, __LC_PGM_ILC, 285 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
340 table[vcpu->arch.sie_block->ipa >> 14]); 286 rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
341 if (rc == -EFAULT) 287 __LC_PGM_NEW_PSW, sizeof(psw_t));
342 exception = 1;
343
344 rc = copy_to_guest(vcpu, __LC_PGM_OLD_PSW,
345 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
346 if (rc == -EFAULT)
347 exception = 1;
348
349 rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
350 __LC_PGM_NEW_PSW, sizeof(psw_t));
351 if (rc == -EFAULT)
352 exception = 1;
353 break; 288 break;
354 289
355 case KVM_S390_MCHK: 290 case KVM_S390_MCHK:
@@ -358,24 +293,13 @@ static void __do_deliver_interrupt(struct kvm_vcpu *vcpu,
358 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, 293 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
359 inti->mchk.cr14, 294 inti->mchk.cr14,
360 inti->mchk.mcic); 295 inti->mchk.mcic);
361 rc = kvm_s390_vcpu_store_status(vcpu, 296 rc = kvm_s390_vcpu_store_status(vcpu,
362 KVM_S390_STORE_STATUS_PREFIXED); 297 KVM_S390_STORE_STATUS_PREFIXED);
363 if (rc == -EFAULT) 298 rc |= put_guest(vcpu, inti->mchk.mcic, (u64 __user *) __LC_MCCK_CODE);
364 exception = 1; 299 rc |= copy_to_guest(vcpu, __LC_MCK_OLD_PSW,
365 300 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
366 rc = put_guest_u64(vcpu, __LC_MCCK_CODE, inti->mchk.mcic); 301 rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
367 if (rc == -EFAULT) 302 __LC_MCK_NEW_PSW, sizeof(psw_t));
368 exception = 1;
369
370 rc = copy_to_guest(vcpu, __LC_MCK_OLD_PSW,
371 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
372 if (rc == -EFAULT)
373 exception = 1;
374
375 rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
376 __LC_MCK_NEW_PSW, sizeof(psw_t));
377 if (rc == -EFAULT)
378 exception = 1;
379 break; 303 break;
380 304
381 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: 305 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
@@ -388,67 +312,44 @@ static void __do_deliver_interrupt(struct kvm_vcpu *vcpu,
388 vcpu->stat.deliver_io_int++; 312 vcpu->stat.deliver_io_int++;
389 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, 313 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
390 param0, param1); 314 param0, param1);
391 rc = put_guest_u16(vcpu, __LC_SUBCHANNEL_ID, 315 rc = put_guest(vcpu, inti->io.subchannel_id,
392 inti->io.subchannel_id); 316 (u16 __user *) __LC_SUBCHANNEL_ID);
393 if (rc == -EFAULT) 317 rc |= put_guest(vcpu, inti->io.subchannel_nr,
394 exception = 1; 318 (u16 __user *) __LC_SUBCHANNEL_NR);
395 319 rc |= put_guest(vcpu, inti->io.io_int_parm,
396 rc = put_guest_u16(vcpu, __LC_SUBCHANNEL_NR, 320 (u32 __user *) __LC_IO_INT_PARM);
397 inti->io.subchannel_nr); 321 rc |= put_guest(vcpu, inti->io.io_int_word,
398 if (rc == -EFAULT) 322 (u32 __user *) __LC_IO_INT_WORD);
399 exception = 1; 323 rc |= copy_to_guest(vcpu, __LC_IO_OLD_PSW,
400 324 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
401 rc = put_guest_u32(vcpu, __LC_IO_INT_PARM, 325 rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
402 inti->io.io_int_parm); 326 __LC_IO_NEW_PSW, sizeof(psw_t));
403 if (rc == -EFAULT)
404 exception = 1;
405
406 rc = put_guest_u32(vcpu, __LC_IO_INT_WORD,
407 inti->io.io_int_word);
408 if (rc == -EFAULT)
409 exception = 1;
410
411 rc = copy_to_guest(vcpu, __LC_IO_OLD_PSW,
412 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
413 if (rc == -EFAULT)
414 exception = 1;
415
416 rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
417 __LC_IO_NEW_PSW, sizeof(psw_t));
418 if (rc == -EFAULT)
419 exception = 1;
420 break; 327 break;
421 } 328 }
422 default: 329 default:
423 BUG(); 330 BUG();
424 } 331 }
425 if (exception) { 332 if (rc) {
426 printk("kvm: The guest lowcore is not mapped during interrupt " 333 printk("kvm: The guest lowcore is not mapped during interrupt "
427 "delivery, killing userspace\n"); 334 "delivery, killing userspace\n");
428 do_exit(SIGKILL); 335 do_exit(SIGKILL);
429 } 336 }
430} 337}
431 338
432static int __try_deliver_ckc_interrupt(struct kvm_vcpu *vcpu) 339static int __try_deliver_ckc_interrupt(struct kvm_vcpu *vcpu)
433{ 340{
434 int rc, exception = 0; 341 int rc;
435 342
436 if (psw_extint_disabled(vcpu)) 343 if (psw_extint_disabled(vcpu))
437 return 0; 344 return 0;
438 if (!(vcpu->arch.sie_block->gcr[0] & 0x800ul)) 345 if (!(vcpu->arch.sie_block->gcr[0] & 0x800ul))
439 return 0; 346 return 0;
440 rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x1004); 347 rc = put_guest(vcpu, 0x1004, (u16 __user *)__LC_EXT_INT_CODE);
441 if (rc == -EFAULT) 348 rc |= copy_to_guest(vcpu, __LC_EXT_OLD_PSW,
442 exception = 1; 349 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
443 rc = copy_to_guest(vcpu, __LC_EXT_OLD_PSW, 350 rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
444 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 351 __LC_EXT_NEW_PSW, sizeof(psw_t));
445 if (rc == -EFAULT) 352 if (rc) {
446 exception = 1;
447 rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
448 __LC_EXT_NEW_PSW, sizeof(psw_t));
449 if (rc == -EFAULT)
450 exception = 1;
451 if (exception) {
452 printk("kvm: The guest lowcore is not mapped during interrupt " 353 printk("kvm: The guest lowcore is not mapped during interrupt "
453 "delivery, killing userspace\n"); 354 "delivery, killing userspace\n");
454 do_exit(SIGKILL); 355 do_exit(SIGKILL);
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 4cf35a0a79e7..c1c7c683fa26 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -142,12 +142,16 @@ int kvm_dev_ioctl_check_extension(long ext)
142 case KVM_CAP_ONE_REG: 142 case KVM_CAP_ONE_REG:
143 case KVM_CAP_ENABLE_CAP: 143 case KVM_CAP_ENABLE_CAP:
144 case KVM_CAP_S390_CSS_SUPPORT: 144 case KVM_CAP_S390_CSS_SUPPORT:
145 case KVM_CAP_IOEVENTFD:
145 r = 1; 146 r = 1;
146 break; 147 break;
147 case KVM_CAP_NR_VCPUS: 148 case KVM_CAP_NR_VCPUS:
148 case KVM_CAP_MAX_VCPUS: 149 case KVM_CAP_MAX_VCPUS:
149 r = KVM_MAX_VCPUS; 150 r = KVM_MAX_VCPUS;
150 break; 151 break;
152 case KVM_CAP_NR_MEMSLOTS:
153 r = KVM_USER_MEM_SLOTS;
154 break;
151 case KVM_CAP_S390_COW: 155 case KVM_CAP_S390_COW:
152 r = MACHINE_HAS_ESOP; 156 r = MACHINE_HAS_ESOP;
153 break; 157 break;
@@ -632,8 +636,7 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
632 } else { 636 } else {
633 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction"); 637 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
634 trace_kvm_s390_sie_fault(vcpu); 638 trace_kvm_s390_sie_fault(vcpu);
635 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 639 rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
636 rc = 0;
637 } 640 }
638 } 641 }
639 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d", 642 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
@@ -974,22 +977,13 @@ int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
974/* Section: memory related */ 977/* Section: memory related */
975int kvm_arch_prepare_memory_region(struct kvm *kvm, 978int kvm_arch_prepare_memory_region(struct kvm *kvm,
976 struct kvm_memory_slot *memslot, 979 struct kvm_memory_slot *memslot,
977 struct kvm_memory_slot old,
978 struct kvm_userspace_memory_region *mem, 980 struct kvm_userspace_memory_region *mem,
979 bool user_alloc) 981 enum kvm_mr_change change)
980{ 982{
981 /* A few sanity checks. We can have exactly one memory slot which has 983 /* A few sanity checks. We can have memory slots which have to be
982 to start at guest virtual zero and which has to be located at a 984 located/ended at a segment boundary (1MB). The memory in userland is
983 page boundary in userland and which has to end at a page boundary. 985 ok to be fragmented into various different vmas. It is okay to mmap()
984 The memory in userland is ok to be fragmented into various different 986 and munmap() stuff in this slot after doing this call at any time */
985 vmas. It is okay to mmap() and munmap() stuff in this slot after
986 doing this call at any time */
987
988 if (mem->slot)
989 return -EINVAL;
990
991 if (mem->guest_phys_addr)
992 return -EINVAL;
993 987
994 if (mem->userspace_addr & 0xffffful) 988 if (mem->userspace_addr & 0xffffful)
995 return -EINVAL; 989 return -EINVAL;
@@ -997,19 +991,26 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
997 if (mem->memory_size & 0xffffful) 991 if (mem->memory_size & 0xffffful)
998 return -EINVAL; 992 return -EINVAL;
999 993
1000 if (!user_alloc)
1001 return -EINVAL;
1002
1003 return 0; 994 return 0;
1004} 995}
1005 996
1006void kvm_arch_commit_memory_region(struct kvm *kvm, 997void kvm_arch_commit_memory_region(struct kvm *kvm,
1007 struct kvm_userspace_memory_region *mem, 998 struct kvm_userspace_memory_region *mem,
1008 struct kvm_memory_slot old, 999 const struct kvm_memory_slot *old,
1009 bool user_alloc) 1000 enum kvm_mr_change change)
1010{ 1001{
1011 int rc; 1002 int rc;
1012 1003
1004 /* If the basics of the memslot do not change, we do not want
1005 * to update the gmap. Every update causes several unnecessary
1006 * segment translation exceptions. This is usually handled just
1007 * fine by the normal fault handler + gmap, but it will also
1008 * cause faults on the prefix page of running guest CPUs.
1009 */
1010 if (old->userspace_addr == mem->userspace_addr &&
1011 old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
1012 old->npages * PAGE_SIZE == mem->memory_size)
1013 return;
1013 1014
1014 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr, 1015 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
1015 mem->guest_phys_addr, mem->memory_size); 1016 mem->guest_phys_addr, mem->memory_size);
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
index 4d89d64a8161..efc14f687265 100644
--- a/arch/s390/kvm/kvm-s390.h
+++ b/arch/s390/kvm/kvm-s390.h
@@ -110,12 +110,12 @@ enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer);
110void kvm_s390_tasklet(unsigned long parm); 110void kvm_s390_tasklet(unsigned long parm);
111void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu); 111void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu);
112void kvm_s390_deliver_pending_machine_checks(struct kvm_vcpu *vcpu); 112void kvm_s390_deliver_pending_machine_checks(struct kvm_vcpu *vcpu);
113int kvm_s390_inject_vm(struct kvm *kvm, 113int __must_check kvm_s390_inject_vm(struct kvm *kvm,
114 struct kvm_s390_interrupt *s390int); 114 struct kvm_s390_interrupt *s390int);
115int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, 115int __must_check kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu,
116 struct kvm_s390_interrupt *s390int); 116 struct kvm_s390_interrupt *s390int);
117int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code); 117int __must_check kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code);
118int kvm_s390_inject_sigp_stop(struct kvm_vcpu *vcpu, int action); 118int __must_check kvm_s390_inject_sigp_stop(struct kvm_vcpu *vcpu, int action);
119struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm, 119struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm,
120 u64 cr6, u64 schid); 120 u64 cr6, u64 schid);
121 121
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
index 0ef9894606e5..6bbd7b5a0bbe 100644
--- a/arch/s390/kvm/priv.c
+++ b/arch/s390/kvm/priv.c
@@ -14,6 +14,8 @@
14#include <linux/kvm.h> 14#include <linux/kvm.h>
15#include <linux/gfp.h> 15#include <linux/gfp.h>
16#include <linux/errno.h> 16#include <linux/errno.h>
17#include <linux/compat.h>
18#include <asm/asm-offsets.h>
17#include <asm/current.h> 19#include <asm/current.h>
18#include <asm/debug.h> 20#include <asm/debug.h>
19#include <asm/ebcdic.h> 21#include <asm/ebcdic.h>
@@ -35,31 +37,24 @@ static int handle_set_prefix(struct kvm_vcpu *vcpu)
35 operand2 = kvm_s390_get_base_disp_s(vcpu); 37 operand2 = kvm_s390_get_base_disp_s(vcpu);
36 38
37 /* must be word boundary */ 39 /* must be word boundary */
38 if (operand2 & 3) { 40 if (operand2 & 3)
39 kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 41 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
40 goto out;
41 }
42 42
43 /* get the value */ 43 /* get the value */
44 if (get_guest_u32(vcpu, operand2, &address)) { 44 if (get_guest(vcpu, address, (u32 __user *) operand2))
45 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 45 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
46 goto out;
47 }
48 46
49 address = address & 0x7fffe000u; 47 address = address & 0x7fffe000u;
50 48
51 /* make sure that the new value is valid memory */ 49 /* make sure that the new value is valid memory */
52 if (copy_from_guest_absolute(vcpu, &tmp, address, 1) || 50 if (copy_from_guest_absolute(vcpu, &tmp, address, 1) ||
53 (copy_from_guest_absolute(vcpu, &tmp, address + PAGE_SIZE, 1))) { 51 (copy_from_guest_absolute(vcpu, &tmp, address + PAGE_SIZE, 1)))
54 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 52 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
55 goto out;
56 }
57 53
58 kvm_s390_set_prefix(vcpu, address); 54 kvm_s390_set_prefix(vcpu, address);
59 55
60 VCPU_EVENT(vcpu, 5, "setting prefix to %x", address); 56 VCPU_EVENT(vcpu, 5, "setting prefix to %x", address);
61 trace_kvm_s390_handle_prefix(vcpu, 1, address); 57 trace_kvm_s390_handle_prefix(vcpu, 1, address);
62out:
63 return 0; 58 return 0;
64} 59}
65 60
@@ -73,49 +68,37 @@ static int handle_store_prefix(struct kvm_vcpu *vcpu)
73 operand2 = kvm_s390_get_base_disp_s(vcpu); 68 operand2 = kvm_s390_get_base_disp_s(vcpu);
74 69
75 /* must be word boundary */ 70 /* must be word boundary */
76 if (operand2 & 3) { 71 if (operand2 & 3)
77 kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 72 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
78 goto out;
79 }
80 73
81 address = vcpu->arch.sie_block->prefix; 74 address = vcpu->arch.sie_block->prefix;
82 address = address & 0x7fffe000u; 75 address = address & 0x7fffe000u;
83 76
84 /* get the value */ 77 /* get the value */
85 if (put_guest_u32(vcpu, operand2, address)) { 78 if (put_guest(vcpu, address, (u32 __user *)operand2))
86 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 79 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
87 goto out;
88 }
89 80
90 VCPU_EVENT(vcpu, 5, "storing prefix to %x", address); 81 VCPU_EVENT(vcpu, 5, "storing prefix to %x", address);
91 trace_kvm_s390_handle_prefix(vcpu, 0, address); 82 trace_kvm_s390_handle_prefix(vcpu, 0, address);
92out:
93 return 0; 83 return 0;
94} 84}
95 85
96static int handle_store_cpu_address(struct kvm_vcpu *vcpu) 86static int handle_store_cpu_address(struct kvm_vcpu *vcpu)
97{ 87{
98 u64 useraddr; 88 u64 useraddr;
99 int rc;
100 89
101 vcpu->stat.instruction_stap++; 90 vcpu->stat.instruction_stap++;
102 91
103 useraddr = kvm_s390_get_base_disp_s(vcpu); 92 useraddr = kvm_s390_get_base_disp_s(vcpu);
104 93
105 if (useraddr & 1) { 94 if (useraddr & 1)
106 kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 95 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
107 goto out;
108 }
109 96
110 rc = put_guest_u16(vcpu, useraddr, vcpu->vcpu_id); 97 if (put_guest(vcpu, vcpu->vcpu_id, (u16 __user *)useraddr))
111 if (rc == -EFAULT) { 98 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
112 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
113 goto out;
114 }
115 99
116 VCPU_EVENT(vcpu, 5, "storing cpu address to %llx", useraddr); 100 VCPU_EVENT(vcpu, 5, "storing cpu address to %llx", useraddr);
117 trace_kvm_s390_handle_stap(vcpu, useraddr); 101 trace_kvm_s390_handle_stap(vcpu, useraddr);
118out:
119 return 0; 102 return 0;
120} 103}
121 104
@@ -129,36 +112,38 @@ static int handle_skey(struct kvm_vcpu *vcpu)
129 112
130static int handle_tpi(struct kvm_vcpu *vcpu) 113static int handle_tpi(struct kvm_vcpu *vcpu)
131{ 114{
132 u64 addr;
133 struct kvm_s390_interrupt_info *inti; 115 struct kvm_s390_interrupt_info *inti;
116 u64 addr;
134 int cc; 117 int cc;
135 118
136 addr = kvm_s390_get_base_disp_s(vcpu); 119 addr = kvm_s390_get_base_disp_s(vcpu);
137 120 if (addr & 3)
121 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
122 cc = 0;
138 inti = kvm_s390_get_io_int(vcpu->kvm, vcpu->run->s.regs.crs[6], 0); 123 inti = kvm_s390_get_io_int(vcpu->kvm, vcpu->run->s.regs.crs[6], 0);
139 if (inti) { 124 if (!inti)
140 if (addr) { 125 goto no_interrupt;
141 /* 126 cc = 1;
142 * Store the two-word I/O interruption code into the 127 if (addr) {
143 * provided area. 128 /*
144 */ 129 * Store the two-word I/O interruption code into the
145 put_guest_u16(vcpu, addr, inti->io.subchannel_id); 130 * provided area.
146 put_guest_u16(vcpu, addr + 2, inti->io.subchannel_nr); 131 */
147 put_guest_u32(vcpu, addr + 4, inti->io.io_int_parm); 132 put_guest(vcpu, inti->io.subchannel_id, (u16 __user *) addr);
148 } else { 133 put_guest(vcpu, inti->io.subchannel_nr, (u16 __user *) (addr + 2));
149 /* 134 put_guest(vcpu, inti->io.io_int_parm, (u32 __user *) (addr + 4));
150 * Store the three-word I/O interruption code into 135 } else {
151 * the appropriate lowcore area. 136 /*
152 */ 137 * Store the three-word I/O interruption code into
153 put_guest_u16(vcpu, 184, inti->io.subchannel_id); 138 * the appropriate lowcore area.
154 put_guest_u16(vcpu, 186, inti->io.subchannel_nr); 139 */
155 put_guest_u32(vcpu, 188, inti->io.io_int_parm); 140 put_guest(vcpu, inti->io.subchannel_id, (u16 __user *) __LC_SUBCHANNEL_ID);
156 put_guest_u32(vcpu, 192, inti->io.io_int_word); 141 put_guest(vcpu, inti->io.subchannel_nr, (u16 __user *) __LC_SUBCHANNEL_NR);
157 } 142 put_guest(vcpu, inti->io.io_int_parm, (u32 __user *) __LC_IO_INT_PARM);
158 cc = 1; 143 put_guest(vcpu, inti->io.io_int_word, (u32 __user *) __LC_IO_INT_WORD);
159 } else 144 }
160 cc = 0;
161 kfree(inti); 145 kfree(inti);
146no_interrupt:
162 /* Set condition code and we're done. */ 147 /* Set condition code and we're done. */
163 vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44); 148 vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
164 vcpu->arch.sie_block->gpsw.mask |= (cc & 3ul) << 44; 149 vcpu->arch.sie_block->gpsw.mask |= (cc & 3ul) << 44;
@@ -230,13 +215,10 @@ static int handle_stfl(struct kvm_vcpu *vcpu)
230 215
231 rc = copy_to_guest(vcpu, offsetof(struct _lowcore, stfl_fac_list), 216 rc = copy_to_guest(vcpu, offsetof(struct _lowcore, stfl_fac_list),
232 &facility_list, sizeof(facility_list)); 217 &facility_list, sizeof(facility_list));
233 if (rc == -EFAULT) 218 if (rc)
234 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 219 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
235 else { 220 VCPU_EVENT(vcpu, 5, "store facility list value %x", facility_list);
236 VCPU_EVENT(vcpu, 5, "store facility list value %x", 221 trace_kvm_s390_handle_stfl(vcpu, facility_list);
237 facility_list);
238 trace_kvm_s390_handle_stfl(vcpu, facility_list);
239 }
240 return 0; 222 return 0;
241} 223}
242 224
@@ -249,112 +231,80 @@ static void handle_new_psw(struct kvm_vcpu *vcpu)
249 231
250#define PSW_MASK_ADDR_MODE (PSW_MASK_EA | PSW_MASK_BA) 232#define PSW_MASK_ADDR_MODE (PSW_MASK_EA | PSW_MASK_BA)
251#define PSW_MASK_UNASSIGNED 0xb80800fe7fffffffUL 233#define PSW_MASK_UNASSIGNED 0xb80800fe7fffffffUL
252#define PSW_ADDR_24 0x00000000000fffffUL 234#define PSW_ADDR_24 0x0000000000ffffffUL
253#define PSW_ADDR_31 0x000000007fffffffUL 235#define PSW_ADDR_31 0x000000007fffffffUL
254 236
237static int is_valid_psw(psw_t *psw) {
238 if (psw->mask & PSW_MASK_UNASSIGNED)
239 return 0;
240 if ((psw->mask & PSW_MASK_ADDR_MODE) == PSW_MASK_BA) {
241 if (psw->addr & ~PSW_ADDR_31)
242 return 0;
243 }
244 if (!(psw->mask & PSW_MASK_ADDR_MODE) && (psw->addr & ~PSW_ADDR_24))
245 return 0;
246 if ((psw->mask & PSW_MASK_ADDR_MODE) == PSW_MASK_EA)
247 return 0;
248 return 1;
249}
250
255int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu) 251int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu)
256{ 252{
257 u64 addr; 253 psw_t *gpsw = &vcpu->arch.sie_block->gpsw;
258 psw_compat_t new_psw; 254 psw_compat_t new_psw;
255 u64 addr;
259 256
260 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 257 if (gpsw->mask & PSW_MASK_PSTATE)
261 return kvm_s390_inject_program_int(vcpu, 258 return kvm_s390_inject_program_int(vcpu,
262 PGM_PRIVILEGED_OPERATION); 259 PGM_PRIVILEGED_OPERATION);
263
264 addr = kvm_s390_get_base_disp_s(vcpu); 260 addr = kvm_s390_get_base_disp_s(vcpu);
265 261 if (addr & 7)
266 if (addr & 7) { 262 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
267 kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 263 if (copy_from_guest(vcpu, &new_psw, addr, sizeof(new_psw)))
268 goto out; 264 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
269 } 265 if (!(new_psw.mask & PSW32_MASK_BASE))
270 266 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
271 if (copy_from_guest(vcpu, &new_psw, addr, sizeof(new_psw))) { 267 gpsw->mask = (new_psw.mask & ~PSW32_MASK_BASE) << 32;
272 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 268 gpsw->mask |= new_psw.addr & PSW32_ADDR_AMODE;
273 goto out; 269 gpsw->addr = new_psw.addr & ~PSW32_ADDR_AMODE;
274 } 270 if (!is_valid_psw(gpsw))
275 271 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
276 if (!(new_psw.mask & PSW32_MASK_BASE)) {
277 kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
278 goto out;
279 }
280
281 vcpu->arch.sie_block->gpsw.mask =
282 (new_psw.mask & ~PSW32_MASK_BASE) << 32;
283 vcpu->arch.sie_block->gpsw.addr = new_psw.addr;
284
285 if ((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_UNASSIGNED) ||
286 (!(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_ADDR_MODE) &&
287 (vcpu->arch.sie_block->gpsw.addr & ~PSW_ADDR_24)) ||
288 ((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_ADDR_MODE) ==
289 PSW_MASK_EA)) {
290 kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
291 goto out;
292 }
293
294 handle_new_psw(vcpu); 272 handle_new_psw(vcpu);
295out:
296 return 0; 273 return 0;
297} 274}
298 275
299static int handle_lpswe(struct kvm_vcpu *vcpu) 276static int handle_lpswe(struct kvm_vcpu *vcpu)
300{ 277{
301 u64 addr;
302 psw_t new_psw; 278 psw_t new_psw;
279 u64 addr;
303 280
304 addr = kvm_s390_get_base_disp_s(vcpu); 281 addr = kvm_s390_get_base_disp_s(vcpu);
305 282 if (addr & 7)
306 if (addr & 7) { 283 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
307 kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 284 if (copy_from_guest(vcpu, &new_psw, addr, sizeof(new_psw)))
308 goto out; 285 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
309 } 286 vcpu->arch.sie_block->gpsw = new_psw;
310 287 if (!is_valid_psw(&vcpu->arch.sie_block->gpsw))
311 if (copy_from_guest(vcpu, &new_psw, addr, sizeof(new_psw))) { 288 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
312 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
313 goto out;
314 }
315
316 vcpu->arch.sie_block->gpsw.mask = new_psw.mask;
317 vcpu->arch.sie_block->gpsw.addr = new_psw.addr;
318
319 if ((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_UNASSIGNED) ||
320 (((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_ADDR_MODE) ==
321 PSW_MASK_BA) &&
322 (vcpu->arch.sie_block->gpsw.addr & ~PSW_ADDR_31)) ||
323 (!(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_ADDR_MODE) &&
324 (vcpu->arch.sie_block->gpsw.addr & ~PSW_ADDR_24)) ||
325 ((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_ADDR_MODE) ==
326 PSW_MASK_EA)) {
327 kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
328 goto out;
329 }
330
331 handle_new_psw(vcpu); 289 handle_new_psw(vcpu);
332out:
333 return 0; 290 return 0;
334} 291}
335 292
336static int handle_stidp(struct kvm_vcpu *vcpu) 293static int handle_stidp(struct kvm_vcpu *vcpu)
337{ 294{
338 u64 operand2; 295 u64 operand2;
339 int rc;
340 296
341 vcpu->stat.instruction_stidp++; 297 vcpu->stat.instruction_stidp++;
342 298
343 operand2 = kvm_s390_get_base_disp_s(vcpu); 299 operand2 = kvm_s390_get_base_disp_s(vcpu);
344 300
345 if (operand2 & 7) { 301 if (operand2 & 7)
346 kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 302 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
347 goto out;
348 }
349 303
350 rc = put_guest_u64(vcpu, operand2, vcpu->arch.stidp_data); 304 if (put_guest(vcpu, vcpu->arch.stidp_data, (u64 __user *)operand2))
351 if (rc == -EFAULT) { 305 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
352 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
353 goto out;
354 }
355 306
356 VCPU_EVENT(vcpu, 5, "%s", "store cpu id"); 307 VCPU_EVENT(vcpu, 5, "%s", "store cpu id");
357out:
358 return 0; 308 return 0;
359} 309}
360 310
@@ -394,8 +344,9 @@ static int handle_stsi(struct kvm_vcpu *vcpu)
394 int fc = (vcpu->run->s.regs.gprs[0] & 0xf0000000) >> 28; 344 int fc = (vcpu->run->s.regs.gprs[0] & 0xf0000000) >> 28;
395 int sel1 = vcpu->run->s.regs.gprs[0] & 0xff; 345 int sel1 = vcpu->run->s.regs.gprs[0] & 0xff;
396 int sel2 = vcpu->run->s.regs.gprs[1] & 0xffff; 346 int sel2 = vcpu->run->s.regs.gprs[1] & 0xffff;
347 unsigned long mem = 0;
397 u64 operand2; 348 u64 operand2;
398 unsigned long mem; 349 int rc = 0;
399 350
400 vcpu->stat.instruction_stsi++; 351 vcpu->stat.instruction_stsi++;
401 VCPU_EVENT(vcpu, 4, "stsi: fc: %x sel1: %x sel2: %x", fc, sel1, sel2); 352 VCPU_EVENT(vcpu, 4, "stsi: fc: %x sel1: %x sel2: %x", fc, sel1, sel2);
@@ -414,37 +365,37 @@ static int handle_stsi(struct kvm_vcpu *vcpu)
414 case 2: 365 case 2:
415 mem = get_zeroed_page(GFP_KERNEL); 366 mem = get_zeroed_page(GFP_KERNEL);
416 if (!mem) 367 if (!mem)
417 goto out_fail; 368 goto out_no_data;
418 if (stsi((void *) mem, fc, sel1, sel2)) 369 if (stsi((void *) mem, fc, sel1, sel2))
419 goto out_mem; 370 goto out_no_data;
420 break; 371 break;
421 case 3: 372 case 3:
422 if (sel1 != 2 || sel2 != 2) 373 if (sel1 != 2 || sel2 != 2)
423 goto out_fail; 374 goto out_no_data;
424 mem = get_zeroed_page(GFP_KERNEL); 375 mem = get_zeroed_page(GFP_KERNEL);
425 if (!mem) 376 if (!mem)
426 goto out_fail; 377 goto out_no_data;
427 handle_stsi_3_2_2(vcpu, (void *) mem); 378 handle_stsi_3_2_2(vcpu, (void *) mem);
428 break; 379 break;
429 default: 380 default:
430 goto out_fail; 381 goto out_no_data;
431 } 382 }
432 383
433 if (copy_to_guest_absolute(vcpu, operand2, (void *) mem, PAGE_SIZE)) { 384 if (copy_to_guest_absolute(vcpu, operand2, (void *) mem, PAGE_SIZE)) {
434 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 385 rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
435 goto out_mem; 386 goto out_exception;
436 } 387 }
437 trace_kvm_s390_handle_stsi(vcpu, fc, sel1, sel2, operand2); 388 trace_kvm_s390_handle_stsi(vcpu, fc, sel1, sel2, operand2);
438 free_page(mem); 389 free_page(mem);
439 vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44); 390 vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
440 vcpu->run->s.regs.gprs[0] = 0; 391 vcpu->run->s.regs.gprs[0] = 0;
441 return 0; 392 return 0;
442out_mem: 393out_no_data:
443 free_page(mem);
444out_fail:
445 /* condition code 3 */ 394 /* condition code 3 */
446 vcpu->arch.sie_block->gpsw.mask |= 3ul << 44; 395 vcpu->arch.sie_block->gpsw.mask |= 3ul << 44;
447 return 0; 396out_exception:
397 free_page(mem);
398 return rc;
448} 399}
449 400
450static const intercept_handler_t b2_handlers[256] = { 401static const intercept_handler_t b2_handlers[256] = {
@@ -575,20 +526,13 @@ static int handle_tprot(struct kvm_vcpu *vcpu)
575 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT) 526 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT)
576 return -EOPNOTSUPP; 527 return -EOPNOTSUPP;
577 528
578
579 /* we must resolve the address without holding the mmap semaphore.
580 * This is ok since the userspace hypervisor is not supposed to change
581 * the mapping while the guest queries the memory. Otherwise the guest
582 * might crash or get wrong info anyway. */
583 user_address = (unsigned long) __guestaddr_to_user(vcpu, address1);
584
585 down_read(&current->mm->mmap_sem); 529 down_read(&current->mm->mmap_sem);
530 user_address = __gmap_translate(address1, vcpu->arch.gmap);
531 if (IS_ERR_VALUE(user_address))
532 goto out_inject;
586 vma = find_vma(current->mm, user_address); 533 vma = find_vma(current->mm, user_address);
587 if (!vma) { 534 if (!vma)
588 up_read(&current->mm->mmap_sem); 535 goto out_inject;
589 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
590 }
591
592 vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44); 536 vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
593 if (!(vma->vm_flags & VM_WRITE) && (vma->vm_flags & VM_READ)) 537 if (!(vma->vm_flags & VM_WRITE) && (vma->vm_flags & VM_READ))
594 vcpu->arch.sie_block->gpsw.mask |= (1ul << 44); 538 vcpu->arch.sie_block->gpsw.mask |= (1ul << 44);
@@ -597,6 +541,10 @@ static int handle_tprot(struct kvm_vcpu *vcpu)
597 541
598 up_read(&current->mm->mmap_sem); 542 up_read(&current->mm->mmap_sem);
599 return 0; 543 return 0;
544
545out_inject:
546 up_read(&current->mm->mmap_sem);
547 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
600} 548}
601 549
602int kvm_s390_handle_e5(struct kvm_vcpu *vcpu) 550int kvm_s390_handle_e5(struct kvm_vcpu *vcpu)