aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/kvm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390/kvm')
-rw-r--r--arch/s390/kvm/Kconfig1
-rw-r--r--arch/s390/kvm/Makefile2
-rw-r--r--arch/s390/kvm/diag.c2
-rw-r--r--arch/s390/kvm/gaccess.h243
-rw-r--r--arch/s390/kvm/intercept.c38
-rw-r--r--arch/s390/kvm/interrupt.c5
-rw-r--r--arch/s390/kvm/kvm-s390.c71
-rw-r--r--arch/s390/kvm/kvm-s390.h28
-rw-r--r--arch/s390/kvm/priv.c49
-rw-r--r--arch/s390/kvm/sie64a.S98
-rw-r--r--arch/s390/kvm/sigp.c12
11 files changed, 307 insertions, 242 deletions
diff --git a/arch/s390/kvm/Kconfig b/arch/s390/kvm/Kconfig
index f66a1bdbb61..a21634173a6 100644
--- a/arch/s390/kvm/Kconfig
+++ b/arch/s390/kvm/Kconfig
@@ -37,6 +37,5 @@ config KVM
37# OK, it's a little counter-intuitive to do this, but it puts it neatly under 37# OK, it's a little counter-intuitive to do this, but it puts it neatly under
38# the virtualization menu. 38# the virtualization menu.
39source drivers/vhost/Kconfig 39source drivers/vhost/Kconfig
40source drivers/virtio/Kconfig
41 40
42endif # VIRTUALIZATION 41endif # VIRTUALIZATION
diff --git a/arch/s390/kvm/Makefile b/arch/s390/kvm/Makefile
index 860d26514c0..3975722bb19 100644
--- a/arch/s390/kvm/Makefile
+++ b/arch/s390/kvm/Makefile
@@ -10,5 +10,5 @@ common-objs = $(addprefix ../../../virt/kvm/, kvm_main.o)
10 10
11ccflags-y := -Ivirt/kvm -Iarch/s390/kvm 11ccflags-y := -Ivirt/kvm -Iarch/s390/kvm
12 12
13kvm-objs := $(common-objs) kvm-s390.o sie64a.o intercept.o interrupt.o priv.o sigp.o diag.o 13kvm-objs := $(common-objs) kvm-s390.o intercept.o interrupt.o priv.o sigp.o diag.o
14obj-$(CONFIG_KVM) += kvm.o 14obj-$(CONFIG_KVM) += kvm.o
diff --git a/arch/s390/kvm/diag.c b/arch/s390/kvm/diag.c
index 9e4c84187cf..5a5c084cc01 100644
--- a/arch/s390/kvm/diag.c
+++ b/arch/s390/kvm/diag.c
@@ -42,7 +42,7 @@ static int __diag_ipl_functions(struct kvm_vcpu *vcpu)
42 return -EOPNOTSUPP; 42 return -EOPNOTSUPP;
43 } 43 }
44 44
45 atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); 45 atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
46 vcpu->run->s390_reset_flags |= KVM_S390_RESET_SUBSYSTEM; 46 vcpu->run->s390_reset_flags |= KVM_S390_RESET_SUBSYSTEM;
47 vcpu->run->s390_reset_flags |= KVM_S390_RESET_IPL; 47 vcpu->run->s390_reset_flags |= KVM_S390_RESET_IPL;
48 vcpu->run->s390_reset_flags |= KVM_S390_RESET_CPU_INIT; 48 vcpu->run->s390_reset_flags |= KVM_S390_RESET_CPU_INIT;
diff --git a/arch/s390/kvm/gaccess.h b/arch/s390/kvm/gaccess.h
index 03c716a0f01..c86f6ae43f7 100644
--- a/arch/s390/kvm/gaccess.h
+++ b/arch/s390/kvm/gaccess.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * gaccess.h - access guest memory 2 * access.h - access guest memory
3 * 3 *
4 * Copyright IBM Corp. 2008,2009 4 * Copyright IBM Corp. 2008,2009
5 * 5 *
@@ -22,20 +22,13 @@ static inline void __user *__guestaddr_to_user(struct kvm_vcpu *vcpu,
22 unsigned long guestaddr) 22 unsigned long guestaddr)
23{ 23{
24 unsigned long prefix = vcpu->arch.sie_block->prefix; 24 unsigned long prefix = vcpu->arch.sie_block->prefix;
25 unsigned long origin = vcpu->arch.sie_block->gmsor;
26 unsigned long memsize = kvm_s390_vcpu_get_memsize(vcpu);
27 25
28 if (guestaddr < 2 * PAGE_SIZE) 26 if (guestaddr < 2 * PAGE_SIZE)
29 guestaddr += prefix; 27 guestaddr += prefix;
30 else if ((guestaddr >= prefix) && (guestaddr < prefix + 2 * PAGE_SIZE)) 28 else if ((guestaddr >= prefix) && (guestaddr < prefix + 2 * PAGE_SIZE))
31 guestaddr -= prefix; 29 guestaddr -= prefix;
32 30
33 if (guestaddr > memsize) 31 return (void __user *) gmap_fault(guestaddr, vcpu->arch.gmap);
34 return (void __user __force *) ERR_PTR(-EFAULT);
35
36 guestaddr += origin;
37
38 return (void __user *) guestaddr;
39} 32}
40 33
41static inline int get_guest_u64(struct kvm_vcpu *vcpu, unsigned long guestaddr, 34static inline int get_guest_u64(struct kvm_vcpu *vcpu, unsigned long guestaddr,
@@ -141,11 +134,11 @@ static inline int put_guest_u8(struct kvm_vcpu *vcpu, unsigned long guestaddr,
141 134
142static inline int __copy_to_guest_slow(struct kvm_vcpu *vcpu, 135static inline int __copy_to_guest_slow(struct kvm_vcpu *vcpu,
143 unsigned long guestdest, 136 unsigned long guestdest,
144 const void *from, unsigned long n) 137 void *from, unsigned long n)
145{ 138{
146 int rc; 139 int rc;
147 unsigned long i; 140 unsigned long i;
148 const u8 *data = from; 141 u8 *data = from;
149 142
150 for (i = 0; i < n; i++) { 143 for (i = 0; i < n; i++) {
151 rc = put_guest_u8(vcpu, guestdest++, *(data++)); 144 rc = put_guest_u8(vcpu, guestdest++, *(data++));
@@ -155,12 +148,95 @@ static inline int __copy_to_guest_slow(struct kvm_vcpu *vcpu,
155 return 0; 148 return 0;
156} 149}
157 150
151static inline int __copy_to_guest_fast(struct kvm_vcpu *vcpu,
152 unsigned long guestdest,
153 void *from, unsigned long n)
154{
155 int r;
156 void __user *uptr;
157 unsigned long size;
158
159 if (guestdest + n < guestdest)
160 return -EFAULT;
161
162 /* simple case: all within one segment table entry? */
163 if ((guestdest & PMD_MASK) == ((guestdest+n) & PMD_MASK)) {
164 uptr = (void __user *) gmap_fault(guestdest, vcpu->arch.gmap);
165
166 if (IS_ERR((void __force *) uptr))
167 return PTR_ERR((void __force *) uptr);
168
169 r = copy_to_user(uptr, from, n);
170
171 if (r)
172 r = -EFAULT;
173
174 goto out;
175 }
176
177 /* copy first segment */
178 uptr = (void __user *)gmap_fault(guestdest, vcpu->arch.gmap);
179
180 if (IS_ERR((void __force *) uptr))
181 return PTR_ERR((void __force *) uptr);
182
183 size = PMD_SIZE - (guestdest & ~PMD_MASK);
184
185 r = copy_to_user(uptr, from, size);
186
187 if (r) {
188 r = -EFAULT;
189 goto out;
190 }
191 from += size;
192 n -= size;
193 guestdest += size;
194
195 /* copy full segments */
196 while (n >= PMD_SIZE) {
197 uptr = (void __user *)gmap_fault(guestdest, vcpu->arch.gmap);
198
199 if (IS_ERR((void __force *) uptr))
200 return PTR_ERR((void __force *) uptr);
201
202 r = copy_to_user(uptr, from, PMD_SIZE);
203
204 if (r) {
205 r = -EFAULT;
206 goto out;
207 }
208 from += PMD_SIZE;
209 n -= PMD_SIZE;
210 guestdest += PMD_SIZE;
211 }
212
213 /* copy the tail segment */
214 if (n) {
215 uptr = (void __user *)gmap_fault(guestdest, vcpu->arch.gmap);
216
217 if (IS_ERR((void __force *) uptr))
218 return PTR_ERR((void __force *) uptr);
219
220 r = copy_to_user(uptr, from, n);
221
222 if (r)
223 r = -EFAULT;
224 }
225out:
226 return r;
227}
228
229static inline int copy_to_guest_absolute(struct kvm_vcpu *vcpu,
230 unsigned long guestdest,
231 void *from, unsigned long n)
232{
233 return __copy_to_guest_fast(vcpu, guestdest, from, n);
234}
235
158static inline int copy_to_guest(struct kvm_vcpu *vcpu, unsigned long guestdest, 236static inline int copy_to_guest(struct kvm_vcpu *vcpu, unsigned long guestdest,
159 const void *from, unsigned long n) 237 void *from, unsigned long n)
160{ 238{
161 unsigned long prefix = vcpu->arch.sie_block->prefix; 239 unsigned long prefix = vcpu->arch.sie_block->prefix;
162 unsigned long origin = vcpu->arch.sie_block->gmsor;
163 unsigned long memsize = kvm_s390_vcpu_get_memsize(vcpu);
164 240
165 if ((guestdest < 2 * PAGE_SIZE) && (guestdest + n > 2 * PAGE_SIZE)) 241 if ((guestdest < 2 * PAGE_SIZE) && (guestdest + n > 2 * PAGE_SIZE))
166 goto slowpath; 242 goto slowpath;
@@ -177,15 +253,7 @@ static inline int copy_to_guest(struct kvm_vcpu *vcpu, unsigned long guestdest,
177 else if ((guestdest >= prefix) && (guestdest < prefix + 2 * PAGE_SIZE)) 253 else if ((guestdest >= prefix) && (guestdest < prefix + 2 * PAGE_SIZE))
178 guestdest -= prefix; 254 guestdest -= prefix;
179 255
180 if (guestdest + n > memsize) 256 return __copy_to_guest_fast(vcpu, guestdest, from, n);
181 return -EFAULT;
182
183 if (guestdest + n < guestdest)
184 return -EFAULT;
185
186 guestdest += origin;
187
188 return copy_to_user((void __user *) guestdest, from, n);
189slowpath: 257slowpath:
190 return __copy_to_guest_slow(vcpu, guestdest, from, n); 258 return __copy_to_guest_slow(vcpu, guestdest, from, n);
191} 259}
@@ -206,74 +274,113 @@ static inline int __copy_from_guest_slow(struct kvm_vcpu *vcpu, void *to,
206 return 0; 274 return 0;
207} 275}
208 276
209static inline int copy_from_guest(struct kvm_vcpu *vcpu, void *to, 277static inline int __copy_from_guest_fast(struct kvm_vcpu *vcpu, void *to,
210 unsigned long guestsrc, unsigned long n) 278 unsigned long guestsrc,
279 unsigned long n)
211{ 280{
212 unsigned long prefix = vcpu->arch.sie_block->prefix; 281 int r;
213 unsigned long origin = vcpu->arch.sie_block->gmsor; 282 void __user *uptr;
214 unsigned long memsize = kvm_s390_vcpu_get_memsize(vcpu); 283 unsigned long size;
215 284
216 if ((guestsrc < 2 * PAGE_SIZE) && (guestsrc + n > 2 * PAGE_SIZE)) 285 if (guestsrc + n < guestsrc)
217 goto slowpath; 286 return -EFAULT;
218 287
219 if ((guestsrc < prefix) && (guestsrc + n > prefix)) 288 /* simple case: all within one segment table entry? */
220 goto slowpath; 289 if ((guestsrc & PMD_MASK) == ((guestsrc+n) & PMD_MASK)) {
290 uptr = (void __user *) gmap_fault(guestsrc, vcpu->arch.gmap);
221 291
222 if ((guestsrc < prefix + 2 * PAGE_SIZE) 292 if (IS_ERR((void __force *) uptr))
223 && (guestsrc + n > prefix + 2 * PAGE_SIZE)) 293 return PTR_ERR((void __force *) uptr);
224 goto slowpath;
225 294
226 if (guestsrc < 2 * PAGE_SIZE) 295 r = copy_from_user(to, uptr, n);
227 guestsrc += prefix;
228 else if ((guestsrc >= prefix) && (guestsrc < prefix + 2 * PAGE_SIZE))
229 guestsrc -= prefix;
230 296
231 if (guestsrc + n > memsize) 297 if (r)
232 return -EFAULT; 298 r = -EFAULT;
233 299
234 if (guestsrc + n < guestsrc) 300 goto out;
235 return -EFAULT; 301 }
236 302
237 guestsrc += origin; 303 /* copy first segment */
304 uptr = (void __user *)gmap_fault(guestsrc, vcpu->arch.gmap);
238 305
239 return copy_from_user(to, (void __user *) guestsrc, n); 306 if (IS_ERR((void __force *) uptr))
240slowpath: 307 return PTR_ERR((void __force *) uptr);
241 return __copy_from_guest_slow(vcpu, to, guestsrc, n);
242}
243 308
244static inline int copy_to_guest_absolute(struct kvm_vcpu *vcpu, 309 size = PMD_SIZE - (guestsrc & ~PMD_MASK);
245 unsigned long guestdest,
246 const void *from, unsigned long n)
247{
248 unsigned long origin = vcpu->arch.sie_block->gmsor;
249 unsigned long memsize = kvm_s390_vcpu_get_memsize(vcpu);
250 310
251 if (guestdest + n > memsize) 311 r = copy_from_user(to, uptr, size);
252 return -EFAULT;
253 312
254 if (guestdest + n < guestdest) 313 if (r) {
255 return -EFAULT; 314 r = -EFAULT;
315 goto out;
316 }
317 to += size;
318 n -= size;
319 guestsrc += size;
320
321 /* copy full segments */
322 while (n >= PMD_SIZE) {
323 uptr = (void __user *)gmap_fault(guestsrc, vcpu->arch.gmap);
324
325 if (IS_ERR((void __force *) uptr))
326 return PTR_ERR((void __force *) uptr);
327
328 r = copy_from_user(to, uptr, PMD_SIZE);
329
330 if (r) {
331 r = -EFAULT;
332 goto out;
333 }
334 to += PMD_SIZE;
335 n -= PMD_SIZE;
336 guestsrc += PMD_SIZE;
337 }
338
339 /* copy the tail segment */
340 if (n) {
341 uptr = (void __user *)gmap_fault(guestsrc, vcpu->arch.gmap);
256 342
257 guestdest += origin; 343 if (IS_ERR((void __force *) uptr))
344 return PTR_ERR((void __force *) uptr);
258 345
259 return copy_to_user((void __user *) guestdest, from, n); 346 r = copy_from_user(to, uptr, n);
347
348 if (r)
349 r = -EFAULT;
350 }
351out:
352 return r;
260} 353}
261 354
262static inline int copy_from_guest_absolute(struct kvm_vcpu *vcpu, void *to, 355static inline int copy_from_guest_absolute(struct kvm_vcpu *vcpu, void *to,
263 unsigned long guestsrc, 356 unsigned long guestsrc,
264 unsigned long n) 357 unsigned long n)
265{ 358{
266 unsigned long origin = vcpu->arch.sie_block->gmsor; 359 return __copy_from_guest_fast(vcpu, to, guestsrc, n);
267 unsigned long memsize = kvm_s390_vcpu_get_memsize(vcpu); 360}
268 361
269 if (guestsrc + n > memsize) 362static inline int copy_from_guest(struct kvm_vcpu *vcpu, void *to,
270 return -EFAULT; 363 unsigned long guestsrc, unsigned long n)
364{
365 unsigned long prefix = vcpu->arch.sie_block->prefix;
271 366
272 if (guestsrc + n < guestsrc) 367 if ((guestsrc < 2 * PAGE_SIZE) && (guestsrc + n > 2 * PAGE_SIZE))
273 return -EFAULT; 368 goto slowpath;
274 369
275 guestsrc += origin; 370 if ((guestsrc < prefix) && (guestsrc + n > prefix))
371 goto slowpath;
372
373 if ((guestsrc < prefix + 2 * PAGE_SIZE)
374 && (guestsrc + n > prefix + 2 * PAGE_SIZE))
375 goto slowpath;
376
377 if (guestsrc < 2 * PAGE_SIZE)
378 guestsrc += prefix;
379 else if ((guestsrc >= prefix) && (guestsrc < prefix + 2 * PAGE_SIZE))
380 guestsrc -= prefix;
276 381
277 return copy_from_user(to, (void __user *) guestsrc, n); 382 return __copy_from_guest_fast(vcpu, to, guestsrc, n);
383slowpath:
384 return __copy_from_guest_slow(vcpu, to, guestsrc, n);
278} 385}
279#endif 386#endif
diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c
index f7b6df45d8b..02434543eab 100644
--- a/arch/s390/kvm/intercept.c
+++ b/arch/s390/kvm/intercept.c
@@ -105,6 +105,7 @@ static intercept_handler_t instruction_handlers[256] = {
105 [0xae] = kvm_s390_handle_sigp, 105 [0xae] = kvm_s390_handle_sigp,
106 [0xb2] = kvm_s390_handle_b2, 106 [0xb2] = kvm_s390_handle_b2,
107 [0xb7] = handle_lctl, 107 [0xb7] = handle_lctl,
108 [0xe5] = kvm_s390_handle_e5,
108 [0xeb] = handle_lctlg, 109 [0xeb] = handle_lctlg,
109}; 110};
110 111
@@ -131,7 +132,6 @@ static int handle_stop(struct kvm_vcpu *vcpu)
131 int rc = 0; 132 int rc = 0;
132 133
133 vcpu->stat.exit_stop_request++; 134 vcpu->stat.exit_stop_request++;
134 atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
135 spin_lock_bh(&vcpu->arch.local_int.lock); 135 spin_lock_bh(&vcpu->arch.local_int.lock);
136 if (vcpu->arch.local_int.action_bits & ACTION_STORE_ON_STOP) { 136 if (vcpu->arch.local_int.action_bits & ACTION_STORE_ON_STOP) {
137 vcpu->arch.local_int.action_bits &= ~ACTION_STORE_ON_STOP; 137 vcpu->arch.local_int.action_bits &= ~ACTION_STORE_ON_STOP;
@@ -148,6 +148,8 @@ static int handle_stop(struct kvm_vcpu *vcpu)
148 } 148 }
149 149
150 if (vcpu->arch.local_int.action_bits & ACTION_STOP_ON_STOP) { 150 if (vcpu->arch.local_int.action_bits & ACTION_STOP_ON_STOP) {
151 atomic_set_mask(CPUSTAT_STOPPED,
152 &vcpu->arch.sie_block->cpuflags);
151 vcpu->arch.local_int.action_bits &= ~ACTION_STOP_ON_STOP; 153 vcpu->arch.local_int.action_bits &= ~ACTION_STOP_ON_STOP;
152 VCPU_EVENT(vcpu, 3, "%s", "cpu stopped"); 154 VCPU_EVENT(vcpu, 3, "%s", "cpu stopped");
153 rc = -EOPNOTSUPP; 155 rc = -EOPNOTSUPP;
@@ -159,22 +161,42 @@ static int handle_stop(struct kvm_vcpu *vcpu)
159 161
160static int handle_validity(struct kvm_vcpu *vcpu) 162static int handle_validity(struct kvm_vcpu *vcpu)
161{ 163{
164 unsigned long vmaddr;
162 int viwhy = vcpu->arch.sie_block->ipb >> 16; 165 int viwhy = vcpu->arch.sie_block->ipb >> 16;
163 int rc; 166 int rc;
164 167
165 vcpu->stat.exit_validity++; 168 vcpu->stat.exit_validity++;
166 if ((viwhy == 0x37) && (vcpu->arch.sie_block->prefix 169 if (viwhy == 0x37) {
167 <= kvm_s390_vcpu_get_memsize(vcpu) - 2*PAGE_SIZE)) { 170 vmaddr = gmap_fault(vcpu->arch.sie_block->prefix,
168 rc = fault_in_pages_writeable((char __user *) 171 vcpu->arch.gmap);
169 vcpu->arch.sie_block->gmsor + 172 if (IS_ERR_VALUE(vmaddr)) {
170 vcpu->arch.sie_block->prefix, 173 rc = -EOPNOTSUPP;
171 2*PAGE_SIZE); 174 goto out;
172 if (rc) 175 }
176 rc = fault_in_pages_writeable((char __user *) vmaddr,
177 PAGE_SIZE);
178 if (rc) {
179 /* user will receive sigsegv, exit to user */
180 rc = -EOPNOTSUPP;
181 goto out;
182 }
183 vmaddr = gmap_fault(vcpu->arch.sie_block->prefix + PAGE_SIZE,
184 vcpu->arch.gmap);
185 if (IS_ERR_VALUE(vmaddr)) {
186 rc = -EOPNOTSUPP;
187 goto out;
188 }
189 rc = fault_in_pages_writeable((char __user *) vmaddr,
190 PAGE_SIZE);
191 if (rc) {
173 /* user will receive sigsegv, exit to user */ 192 /* user will receive sigsegv, exit to user */
174 rc = -EOPNOTSUPP; 193 rc = -EOPNOTSUPP;
194 goto out;
195 }
175 } else 196 } else
176 rc = -EOPNOTSUPP; 197 rc = -EOPNOTSUPP;
177 198
199out:
178 if (rc) 200 if (rc)
179 VCPU_EVENT(vcpu, 2, "unhandled validity intercept code %d", 201 VCPU_EVENT(vcpu, 2, "unhandled validity intercept code %d",
180 viwhy); 202 viwhy);
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index 35c21bf910c..d4bd4c73faa 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -128,6 +128,10 @@ static void __do_deliver_interrupt(struct kvm_vcpu *vcpu,
128 if (rc == -EFAULT) 128 if (rc == -EFAULT)
129 exception = 1; 129 exception = 1;
130 130
131 rc = put_guest_u16(vcpu, __LC_CPU_ADDRESS, inti->emerg.code);
132 if (rc == -EFAULT)
133 exception = 1;
134
131 rc = copy_to_guest(vcpu, __LC_EXT_OLD_PSW, 135 rc = copy_to_guest(vcpu, __LC_EXT_OLD_PSW,
132 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 136 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
133 if (rc == -EFAULT) 137 if (rc == -EFAULT)
@@ -220,6 +224,7 @@ static void __do_deliver_interrupt(struct kvm_vcpu *vcpu,
220 offsetof(struct _lowcore, restart_psw), sizeof(psw_t)); 224 offsetof(struct _lowcore, restart_psw), sizeof(psw_t));
221 if (rc == -EFAULT) 225 if (rc == -EFAULT)
222 exception = 1; 226 exception = 1;
227 atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
223 break; 228 break;
224 229
225 case KVM_S390_PROGRAM_INT: 230 case KVM_S390_PROGRAM_INT:
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 67345ae7ce8..8cdb1bd5856 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -62,6 +62,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
62 { "instruction_chsc", VCPU_STAT(instruction_chsc) }, 62 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
63 { "instruction_stsi", VCPU_STAT(instruction_stsi) }, 63 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
64 { "instruction_stfl", VCPU_STAT(instruction_stfl) }, 64 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
65 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
65 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) }, 66 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
66 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) }, 67 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
67 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) }, 68 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
@@ -122,6 +123,7 @@ int kvm_dev_ioctl_check_extension(long ext)
122 123
123 switch (ext) { 124 switch (ext) {
124 case KVM_CAP_S390_PSW: 125 case KVM_CAP_S390_PSW:
126 case KVM_CAP_S390_GMAP:
125 r = 1; 127 r = 1;
126 break; 128 break;
127 default: 129 default:
@@ -189,7 +191,13 @@ int kvm_arch_init_vm(struct kvm *kvm)
189 debug_register_view(kvm->arch.dbf, &debug_sprintf_view); 191 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
190 VM_EVENT(kvm, 3, "%s", "vm created"); 192 VM_EVENT(kvm, 3, "%s", "vm created");
191 193
194 kvm->arch.gmap = gmap_alloc(current->mm);
195 if (!kvm->arch.gmap)
196 goto out_nogmap;
197
192 return 0; 198 return 0;
199out_nogmap:
200 debug_unregister(kvm->arch.dbf);
193out_nodbf: 201out_nodbf:
194 free_page((unsigned long)(kvm->arch.sca)); 202 free_page((unsigned long)(kvm->arch.sca));
195out_err: 203out_err:
@@ -234,11 +242,13 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
234 kvm_free_vcpus(kvm); 242 kvm_free_vcpus(kvm);
235 free_page((unsigned long)(kvm->arch.sca)); 243 free_page((unsigned long)(kvm->arch.sca));
236 debug_unregister(kvm->arch.dbf); 244 debug_unregister(kvm->arch.dbf);
245 gmap_free(kvm->arch.gmap);
237} 246}
238 247
239/* Section: vcpu related */ 248/* Section: vcpu related */
240int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) 249int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
241{ 250{
251 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
242 return 0; 252 return 0;
243} 253}
244 254
@@ -254,10 +264,14 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
254 vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK; 264 vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK;
255 restore_fp_regs(&vcpu->arch.guest_fpregs); 265 restore_fp_regs(&vcpu->arch.guest_fpregs);
256 restore_access_regs(vcpu->arch.guest_acrs); 266 restore_access_regs(vcpu->arch.guest_acrs);
267 gmap_enable(vcpu->arch.gmap);
268 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
257} 269}
258 270
259void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) 271void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
260{ 272{
273 atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
274 gmap_disable(vcpu->arch.gmap);
261 save_fp_regs(&vcpu->arch.guest_fpregs); 275 save_fp_regs(&vcpu->arch.guest_fpregs);
262 save_access_regs(vcpu->arch.guest_acrs); 276 save_access_regs(vcpu->arch.guest_acrs);
263 restore_fp_regs(&vcpu->arch.host_fpregs); 277 restore_fp_regs(&vcpu->arch.host_fpregs);
@@ -284,8 +298,9 @@ static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
284 298
285int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) 299int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
286{ 300{
287 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH); 301 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
288 set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests); 302 CPUSTAT_SM |
303 CPUSTAT_STOPPED);
289 vcpu->arch.sie_block->ecb = 6; 304 vcpu->arch.sie_block->ecb = 6;
290 vcpu->arch.sie_block->eca = 0xC1002001U; 305 vcpu->arch.sie_block->eca = 0xC1002001U;
291 vcpu->arch.sie_block->fac = (int) (long) facilities; 306 vcpu->arch.sie_block->fac = (int) (long) facilities;
@@ -301,11 +316,17 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
301struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, 316struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
302 unsigned int id) 317 unsigned int id)
303{ 318{
304 struct kvm_vcpu *vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL); 319 struct kvm_vcpu *vcpu;
305 int rc = -ENOMEM; 320 int rc = -EINVAL;
306 321
322 if (id >= KVM_MAX_VCPUS)
323 goto out;
324
325 rc = -ENOMEM;
326
327 vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
307 if (!vcpu) 328 if (!vcpu)
308 goto out_nomem; 329 goto out;
309 330
310 vcpu->arch.sie_block = (struct kvm_s390_sie_block *) 331 vcpu->arch.sie_block = (struct kvm_s390_sie_block *)
311 get_zeroed_page(GFP_KERNEL); 332 get_zeroed_page(GFP_KERNEL);
@@ -341,7 +362,7 @@ out_free_sie_block:
341 free_page((unsigned long)(vcpu->arch.sie_block)); 362 free_page((unsigned long)(vcpu->arch.sie_block));
342out_free_cpu: 363out_free_cpu:
343 kfree(vcpu); 364 kfree(vcpu);
344out_nomem: 365out:
345 return ERR_PTR(rc); 366 return ERR_PTR(rc);
346} 367}
347 368
@@ -404,7 +425,7 @@ static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
404{ 425{
405 int rc = 0; 426 int rc = 0;
406 427
407 if (atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_RUNNING) 428 if (!(atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_STOPPED))
408 rc = -EBUSY; 429 rc = -EBUSY;
409 else { 430 else {
410 vcpu->run->psw_mask = psw.mask; 431 vcpu->run->psw_mask = psw.mask;
@@ -474,21 +495,10 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
474 sigset_t sigsaved; 495 sigset_t sigsaved;
475 496
476rerun_vcpu: 497rerun_vcpu:
477 if (vcpu->requests)
478 if (test_and_clear_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
479 kvm_s390_vcpu_set_mem(vcpu);
480
481 /* verify, that memory has been registered */
482 if (!vcpu->arch.sie_block->gmslm) {
483 vcpu_put(vcpu);
484 VCPU_EVENT(vcpu, 3, "%s", "no memory registered to run vcpu");
485 return -EINVAL;
486 }
487
488 if (vcpu->sigset_active) 498 if (vcpu->sigset_active)
489 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); 499 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
490 500
491 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); 501 atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
492 502
493 BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL); 503 BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL);
494 504
@@ -545,7 +555,7 @@ rerun_vcpu:
545 return rc; 555 return rc;
546} 556}
547 557
548static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, const void *from, 558static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, void *from,
549 unsigned long n, int prefix) 559 unsigned long n, int prefix)
550{ 560{
551 if (prefix) 561 if (prefix)
@@ -562,7 +572,7 @@ static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, const void *from,
562 */ 572 */
563int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr) 573int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
564{ 574{
565 const unsigned char archmode = 1; 575 unsigned char archmode = 1;
566 int prefix; 576 int prefix;
567 577
568 if (addr == KVM_S390_STORE_STATUS_NOADDR) { 578 if (addr == KVM_S390_STORE_STATUS_NOADDR) {
@@ -680,10 +690,10 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
680 if (mem->guest_phys_addr) 690 if (mem->guest_phys_addr)
681 return -EINVAL; 691 return -EINVAL;
682 692
683 if (mem->userspace_addr & (PAGE_SIZE - 1)) 693 if (mem->userspace_addr & 0xffffful)
684 return -EINVAL; 694 return -EINVAL;
685 695
686 if (mem->memory_size & (PAGE_SIZE - 1)) 696 if (mem->memory_size & 0xffffful)
687 return -EINVAL; 697 return -EINVAL;
688 698
689 if (!user_alloc) 699 if (!user_alloc)
@@ -697,15 +707,14 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
697 struct kvm_memory_slot old, 707 struct kvm_memory_slot old,
698 int user_alloc) 708 int user_alloc)
699{ 709{
700 int i; 710 int rc;
701 struct kvm_vcpu *vcpu;
702 711
703 /* request update of sie control block for all available vcpus */ 712
704 kvm_for_each_vcpu(i, vcpu, kvm) { 713 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
705 if (test_and_set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests)) 714 mem->guest_phys_addr, mem->memory_size);
706 continue; 715 if (rc)
707 kvm_s390_inject_sigp_stop(vcpu, ACTION_RELOADVCPU_ON_STOP); 716 printk(KERN_WARNING "kvm-s390: failed to commit memory region\n");
708 } 717 return;
709} 718}
710 719
711void kvm_arch_flush_shadow(struct kvm *kvm) 720void kvm_arch_flush_shadow(struct kvm *kvm)
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
index a7b7586626d..99b0b759711 100644
--- a/arch/s390/kvm/kvm-s390.h
+++ b/arch/s390/kvm/kvm-s390.h
@@ -58,35 +58,9 @@ int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu,
58int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code); 58int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code);
59int kvm_s390_inject_sigp_stop(struct kvm_vcpu *vcpu, int action); 59int kvm_s390_inject_sigp_stop(struct kvm_vcpu *vcpu, int action);
60 60
61static inline long kvm_s390_vcpu_get_memsize(struct kvm_vcpu *vcpu)
62{
63 return vcpu->arch.sie_block->gmslm
64 - vcpu->arch.sie_block->gmsor
65 - VIRTIODESCSPACE + 1ul;
66}
67
68static inline void kvm_s390_vcpu_set_mem(struct kvm_vcpu *vcpu)
69{
70 int idx;
71 struct kvm_memory_slot *mem;
72 struct kvm_memslots *memslots;
73
74 idx = srcu_read_lock(&vcpu->kvm->srcu);
75 memslots = kvm_memslots(vcpu->kvm);
76
77 mem = &memslots->memslots[0];
78
79 vcpu->arch.sie_block->gmsor = mem->userspace_addr;
80 vcpu->arch.sie_block->gmslm =
81 mem->userspace_addr +
82 (mem->npages << PAGE_SHIFT) +
83 VIRTIODESCSPACE - 1ul;
84
85 srcu_read_unlock(&vcpu->kvm->srcu, idx);
86}
87
88/* implemented in priv.c */ 61/* implemented in priv.c */
89int kvm_s390_handle_b2(struct kvm_vcpu *vcpu); 62int kvm_s390_handle_b2(struct kvm_vcpu *vcpu);
63int kvm_s390_handle_e5(struct kvm_vcpu *vcpu);
90 64
91/* implemented in sigp.c */ 65/* implemented in sigp.c */
92int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu); 66int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu);
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
index 73c47bd95db..39162636108 100644
--- a/arch/s390/kvm/priv.c
+++ b/arch/s390/kvm/priv.c
@@ -326,3 +326,52 @@ int kvm_s390_handle_b2(struct kvm_vcpu *vcpu)
326 } 326 }
327 return -EOPNOTSUPP; 327 return -EOPNOTSUPP;
328} 328}
329
330static int handle_tprot(struct kvm_vcpu *vcpu)
331{
332 int base1 = (vcpu->arch.sie_block->ipb & 0xf0000000) >> 28;
333 int disp1 = (vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16;
334 int base2 = (vcpu->arch.sie_block->ipb & 0xf000) >> 12;
335 int disp2 = vcpu->arch.sie_block->ipb & 0x0fff;
336 u64 address1 = disp1 + base1 ? vcpu->arch.guest_gprs[base1] : 0;
337 u64 address2 = disp2 + base2 ? vcpu->arch.guest_gprs[base2] : 0;
338 struct vm_area_struct *vma;
339
340 vcpu->stat.instruction_tprot++;
341
342 /* we only handle the Linux memory detection case:
343 * access key == 0
344 * guest DAT == off
345 * everything else goes to userspace. */
346 if (address2 & 0xf0)
347 return -EOPNOTSUPP;
348 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT)
349 return -EOPNOTSUPP;
350
351
352 down_read(&current->mm->mmap_sem);
353 vma = find_vma(current->mm,
354 (unsigned long) __guestaddr_to_user(vcpu, address1));
355 if (!vma) {
356 up_read(&current->mm->mmap_sem);
357 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
358 }
359
360 vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
361 if (!(vma->vm_flags & VM_WRITE) && (vma->vm_flags & VM_READ))
362 vcpu->arch.sie_block->gpsw.mask |= (1ul << 44);
363 if (!(vma->vm_flags & VM_WRITE) && !(vma->vm_flags & VM_READ))
364 vcpu->arch.sie_block->gpsw.mask |= (2ul << 44);
365
366 up_read(&current->mm->mmap_sem);
367 return 0;
368}
369
370int kvm_s390_handle_e5(struct kvm_vcpu *vcpu)
371{
372 /* For e5xx... instructions we only handle TPROT */
373 if ((vcpu->arch.sie_block->ipa & 0x00ff) == 0x01)
374 return handle_tprot(vcpu);
375 return -EOPNOTSUPP;
376}
377
diff --git a/arch/s390/kvm/sie64a.S b/arch/s390/kvm/sie64a.S
deleted file mode 100644
index 5faa1b1b23f..00000000000
--- a/arch/s390/kvm/sie64a.S
+++ /dev/null
@@ -1,98 +0,0 @@
1/*
2 * sie64a.S - low level sie call
3 *
4 * Copyright IBM Corp. 2008,2010
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
11 * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
12 */
13
14#include <linux/errno.h>
15#include <asm/asm-offsets.h>
16#include <asm/setup.h>
17#include <asm/asm-offsets.h>
18#include <asm/ptrace.h>
19#include <asm/thread_info.h>
20
21_TIF_EXIT_SIE = (_TIF_SIGPENDING | _TIF_NEED_RESCHED | _TIF_MCCK_PENDING)
22
23/*
24 * offsets into stackframe
25 * SP_ = offsets into stack sie64 is called with
26 * SPI_ = offsets into irq stack
27 */
28SP_GREGS = __SF_EMPTY
29SP_HOOK = __SF_EMPTY+8
30SP_GPP = __SF_EMPTY+16
31SPI_PSW = STACK_FRAME_OVERHEAD + __PT_PSW
32
33
34 .macro SPP newpp
35 tm __LC_MACHINE_FLAGS+6,0x20 # MACHINE_FLAG_SPP
36 jz 0f
37 .insn s,0xb2800000,\newpp
380:
39 .endm
40
41sie_irq_handler:
42 SPP __LC_CMF_HPP # set host id
43 larl %r2,sie_inst
44 clg %r2,SPI_PSW+8(0,%r15) # intercepted sie
45 jne 1f
46 xc __LC_SIE_HOOK(8),__LC_SIE_HOOK
47 lg %r2,__LC_THREAD_INFO # pointer thread_info struct
48 tm __TI_flags+7(%r2),_TIF_EXIT_SIE
49 jz 0f
50 larl %r2,sie_exit # work pending, leave sie
51 stg %r2,SPI_PSW+8(0,%r15)
52 br %r14
530: larl %r2,sie_reenter # re-enter with guest id
54 stg %r2,SPI_PSW+8(0,%r15)
551: br %r14
56
57/*
58 * sie64a calling convention:
59 * %r2 pointer to sie control block
60 * %r3 guest register save area
61 */
62 .globl sie64a
63sie64a:
64 stg %r3,SP_GREGS(%r15) # save guest register save area
65 stmg %r6,%r14,__SF_GPRS(%r15) # save registers on entry
66 lgr %r14,%r2 # pointer to sie control block
67 larl %r5,sie_irq_handler
68 stg %r2,SP_GPP(%r15)
69 stg %r5,SP_HOOK(%r15) # save hook target
70 lmg %r0,%r13,0(%r3) # load guest gprs 0-13
71sie_reenter:
72 mvc __LC_SIE_HOOK(8),SP_HOOK(%r15)
73 SPP SP_GPP(%r15) # set guest id
74sie_inst:
75 sie 0(%r14)
76 xc __LC_SIE_HOOK(8),__LC_SIE_HOOK
77 SPP __LC_CMF_HPP # set host id
78sie_exit:
79 lg %r14,SP_GREGS(%r15)
80 stmg %r0,%r13,0(%r14) # save guest gprs 0-13
81 lghi %r2,0
82 lmg %r6,%r14,__SF_GPRS(%r15)
83 br %r14
84
85sie_err:
86 xc __LC_SIE_HOOK(8),__LC_SIE_HOOK
87 SPP __LC_CMF_HPP # set host id
88 lg %r14,SP_GREGS(%r15)
89 stmg %r0,%r13,0(%r14) # save guest gprs 0-13
90 lghi %r2,-EFAULT
91 lmg %r6,%r14,__SF_GPRS(%r15)
92 br %r14
93
94 .section __ex_table,"a"
95 .quad sie_inst,sie_err
96 .quad sie_exit,sie_err
97 .quad sie_reenter,sie_err
98 .previous
diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c
index 702276f5e2f..2a129bf44b9 100644
--- a/arch/s390/kvm/sigp.c
+++ b/arch/s390/kvm/sigp.c
@@ -57,8 +57,8 @@ static int __sigp_sense(struct kvm_vcpu *vcpu, u16 cpu_addr,
57 spin_lock(&fi->lock); 57 spin_lock(&fi->lock);
58 if (fi->local_int[cpu_addr] == NULL) 58 if (fi->local_int[cpu_addr] == NULL)
59 rc = 3; /* not operational */ 59 rc = 3; /* not operational */
60 else if (atomic_read(fi->local_int[cpu_addr]->cpuflags) 60 else if (!(atomic_read(fi->local_int[cpu_addr]->cpuflags)
61 & CPUSTAT_RUNNING) { 61 & CPUSTAT_STOPPED)) {
62 *reg &= 0xffffffff00000000UL; 62 *reg &= 0xffffffff00000000UL;
63 rc = 1; /* status stored */ 63 rc = 1; /* status stored */
64 } else { 64 } else {
@@ -189,10 +189,8 @@ static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address,
189 189
190 /* make sure that the new value is valid memory */ 190 /* make sure that the new value is valid memory */
191 address = address & 0x7fffe000u; 191 address = address & 0x7fffe000u;
192 if ((copy_from_user(&tmp, (void __user *) 192 if (copy_from_guest_absolute(vcpu, &tmp, address, 1) ||
193 (address + vcpu->arch.sie_block->gmsor) , 1)) || 193 copy_from_guest_absolute(vcpu, &tmp, address + PAGE_SIZE, 1)) {
194 (copy_from_user(&tmp, (void __user *)(address +
195 vcpu->arch.sie_block->gmsor + PAGE_SIZE), 1))) {
196 *reg |= SIGP_STAT_INVALID_PARAMETER; 194 *reg |= SIGP_STAT_INVALID_PARAMETER;
197 return 1; /* invalid parameter */ 195 return 1; /* invalid parameter */
198 } 196 }
@@ -214,7 +212,7 @@ static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address,
214 212
215 spin_lock_bh(&li->lock); 213 spin_lock_bh(&li->lock);
216 /* cpu must be in stopped state */ 214 /* cpu must be in stopped state */
217 if (atomic_read(li->cpuflags) & CPUSTAT_RUNNING) { 215 if (!(atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) {
218 rc = 1; /* incorrect state */ 216 rc = 1; /* incorrect state */
219 *reg &= SIGP_STAT_INCORRECT_STATE; 217 *reg &= SIGP_STAT_INCORRECT_STATE;
220 kfree(inti); 218 kfree(inti);