aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/kvm/priv.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390/kvm/priv.c')
-rw-r--r--arch/s390/kvm/priv.c270
1 files changed, 109 insertions, 161 deletions
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
index 0ef9894606e5..6bbd7b5a0bbe 100644
--- a/arch/s390/kvm/priv.c
+++ b/arch/s390/kvm/priv.c
@@ -14,6 +14,8 @@
14#include <linux/kvm.h> 14#include <linux/kvm.h>
15#include <linux/gfp.h> 15#include <linux/gfp.h>
16#include <linux/errno.h> 16#include <linux/errno.h>
17#include <linux/compat.h>
18#include <asm/asm-offsets.h>
17#include <asm/current.h> 19#include <asm/current.h>
18#include <asm/debug.h> 20#include <asm/debug.h>
19#include <asm/ebcdic.h> 21#include <asm/ebcdic.h>
@@ -35,31 +37,24 @@ static int handle_set_prefix(struct kvm_vcpu *vcpu)
35 operand2 = kvm_s390_get_base_disp_s(vcpu); 37 operand2 = kvm_s390_get_base_disp_s(vcpu);
36 38
37 /* must be word boundary */ 39 /* must be word boundary */
38 if (operand2 & 3) { 40 if (operand2 & 3)
39 kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 41 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
40 goto out;
41 }
42 42
43 /* get the value */ 43 /* get the value */
44 if (get_guest_u32(vcpu, operand2, &address)) { 44 if (get_guest(vcpu, address, (u32 __user *) operand2))
45 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 45 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
46 goto out;
47 }
48 46
49 address = address & 0x7fffe000u; 47 address = address & 0x7fffe000u;
50 48
51 /* make sure that the new value is valid memory */ 49 /* make sure that the new value is valid memory */
52 if (copy_from_guest_absolute(vcpu, &tmp, address, 1) || 50 if (copy_from_guest_absolute(vcpu, &tmp, address, 1) ||
53 (copy_from_guest_absolute(vcpu, &tmp, address + PAGE_SIZE, 1))) { 51 (copy_from_guest_absolute(vcpu, &tmp, address + PAGE_SIZE, 1)))
54 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 52 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
55 goto out;
56 }
57 53
58 kvm_s390_set_prefix(vcpu, address); 54 kvm_s390_set_prefix(vcpu, address);
59 55
60 VCPU_EVENT(vcpu, 5, "setting prefix to %x", address); 56 VCPU_EVENT(vcpu, 5, "setting prefix to %x", address);
61 trace_kvm_s390_handle_prefix(vcpu, 1, address); 57 trace_kvm_s390_handle_prefix(vcpu, 1, address);
62out:
63 return 0; 58 return 0;
64} 59}
65 60
@@ -73,49 +68,37 @@ static int handle_store_prefix(struct kvm_vcpu *vcpu)
73 operand2 = kvm_s390_get_base_disp_s(vcpu); 68 operand2 = kvm_s390_get_base_disp_s(vcpu);
74 69
75 /* must be word boundary */ 70 /* must be word boundary */
76 if (operand2 & 3) { 71 if (operand2 & 3)
77 kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 72 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
78 goto out;
79 }
80 73
81 address = vcpu->arch.sie_block->prefix; 74 address = vcpu->arch.sie_block->prefix;
82 address = address & 0x7fffe000u; 75 address = address & 0x7fffe000u;
83 76
84 /* get the value */ 77 /* get the value */
85 if (put_guest_u32(vcpu, operand2, address)) { 78 if (put_guest(vcpu, address, (u32 __user *)operand2))
86 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 79 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
87 goto out;
88 }
89 80
90 VCPU_EVENT(vcpu, 5, "storing prefix to %x", address); 81 VCPU_EVENT(vcpu, 5, "storing prefix to %x", address);
91 trace_kvm_s390_handle_prefix(vcpu, 0, address); 82 trace_kvm_s390_handle_prefix(vcpu, 0, address);
92out:
93 return 0; 83 return 0;
94} 84}
95 85
96static int handle_store_cpu_address(struct kvm_vcpu *vcpu) 86static int handle_store_cpu_address(struct kvm_vcpu *vcpu)
97{ 87{
98 u64 useraddr; 88 u64 useraddr;
99 int rc;
100 89
101 vcpu->stat.instruction_stap++; 90 vcpu->stat.instruction_stap++;
102 91
103 useraddr = kvm_s390_get_base_disp_s(vcpu); 92 useraddr = kvm_s390_get_base_disp_s(vcpu);
104 93
105 if (useraddr & 1) { 94 if (useraddr & 1)
106 kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 95 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
107 goto out;
108 }
109 96
110 rc = put_guest_u16(vcpu, useraddr, vcpu->vcpu_id); 97 if (put_guest(vcpu, vcpu->vcpu_id, (u16 __user *)useraddr))
111 if (rc == -EFAULT) { 98 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
112 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
113 goto out;
114 }
115 99
116 VCPU_EVENT(vcpu, 5, "storing cpu address to %llx", useraddr); 100 VCPU_EVENT(vcpu, 5, "storing cpu address to %llx", useraddr);
117 trace_kvm_s390_handle_stap(vcpu, useraddr); 101 trace_kvm_s390_handle_stap(vcpu, useraddr);
118out:
119 return 0; 102 return 0;
120} 103}
121 104
@@ -129,36 +112,38 @@ static int handle_skey(struct kvm_vcpu *vcpu)
129 112
130static int handle_tpi(struct kvm_vcpu *vcpu) 113static int handle_tpi(struct kvm_vcpu *vcpu)
131{ 114{
132 u64 addr;
133 struct kvm_s390_interrupt_info *inti; 115 struct kvm_s390_interrupt_info *inti;
116 u64 addr;
134 int cc; 117 int cc;
135 118
136 addr = kvm_s390_get_base_disp_s(vcpu); 119 addr = kvm_s390_get_base_disp_s(vcpu);
137 120 if (addr & 3)
121 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
122 cc = 0;
138 inti = kvm_s390_get_io_int(vcpu->kvm, vcpu->run->s.regs.crs[6], 0); 123 inti = kvm_s390_get_io_int(vcpu->kvm, vcpu->run->s.regs.crs[6], 0);
139 if (inti) { 124 if (!inti)
140 if (addr) { 125 goto no_interrupt;
141 /* 126 cc = 1;
142 * Store the two-word I/O interruption code into the 127 if (addr) {
143 * provided area. 128 /*
144 */ 129 * Store the two-word I/O interruption code into the
145 put_guest_u16(vcpu, addr, inti->io.subchannel_id); 130 * provided area.
146 put_guest_u16(vcpu, addr + 2, inti->io.subchannel_nr); 131 */
147 put_guest_u32(vcpu, addr + 4, inti->io.io_int_parm); 132 put_guest(vcpu, inti->io.subchannel_id, (u16 __user *) addr);
148 } else { 133 put_guest(vcpu, inti->io.subchannel_nr, (u16 __user *) (addr + 2));
149 /* 134 put_guest(vcpu, inti->io.io_int_parm, (u32 __user *) (addr + 4));
150 * Store the three-word I/O interruption code into 135 } else {
151 * the appropriate lowcore area. 136 /*
152 */ 137 * Store the three-word I/O interruption code into
153 put_guest_u16(vcpu, 184, inti->io.subchannel_id); 138 * the appropriate lowcore area.
154 put_guest_u16(vcpu, 186, inti->io.subchannel_nr); 139 */
155 put_guest_u32(vcpu, 188, inti->io.io_int_parm); 140 put_guest(vcpu, inti->io.subchannel_id, (u16 __user *) __LC_SUBCHANNEL_ID);
156 put_guest_u32(vcpu, 192, inti->io.io_int_word); 141 put_guest(vcpu, inti->io.subchannel_nr, (u16 __user *) __LC_SUBCHANNEL_NR);
157 } 142 put_guest(vcpu, inti->io.io_int_parm, (u32 __user *) __LC_IO_INT_PARM);
158 cc = 1; 143 put_guest(vcpu, inti->io.io_int_word, (u32 __user *) __LC_IO_INT_WORD);
159 } else 144 }
160 cc = 0;
161 kfree(inti); 145 kfree(inti);
146no_interrupt:
162 /* Set condition code and we're done. */ 147 /* Set condition code and we're done. */
163 vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44); 148 vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
164 vcpu->arch.sie_block->gpsw.mask |= (cc & 3ul) << 44; 149 vcpu->arch.sie_block->gpsw.mask |= (cc & 3ul) << 44;
@@ -230,13 +215,10 @@ static int handle_stfl(struct kvm_vcpu *vcpu)
230 215
231 rc = copy_to_guest(vcpu, offsetof(struct _lowcore, stfl_fac_list), 216 rc = copy_to_guest(vcpu, offsetof(struct _lowcore, stfl_fac_list),
232 &facility_list, sizeof(facility_list)); 217 &facility_list, sizeof(facility_list));
233 if (rc == -EFAULT) 218 if (rc)
234 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 219 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
235 else { 220 VCPU_EVENT(vcpu, 5, "store facility list value %x", facility_list);
236 VCPU_EVENT(vcpu, 5, "store facility list value %x", 221 trace_kvm_s390_handle_stfl(vcpu, facility_list);
237 facility_list);
238 trace_kvm_s390_handle_stfl(vcpu, facility_list);
239 }
240 return 0; 222 return 0;
241} 223}
242 224
@@ -249,112 +231,80 @@ static void handle_new_psw(struct kvm_vcpu *vcpu)
249 231
250#define PSW_MASK_ADDR_MODE (PSW_MASK_EA | PSW_MASK_BA) 232#define PSW_MASK_ADDR_MODE (PSW_MASK_EA | PSW_MASK_BA)
251#define PSW_MASK_UNASSIGNED 0xb80800fe7fffffffUL 233#define PSW_MASK_UNASSIGNED 0xb80800fe7fffffffUL
252#define PSW_ADDR_24 0x00000000000fffffUL 234#define PSW_ADDR_24 0x0000000000ffffffUL
253#define PSW_ADDR_31 0x000000007fffffffUL 235#define PSW_ADDR_31 0x000000007fffffffUL
254 236
237static int is_valid_psw(psw_t *psw) {
238 if (psw->mask & PSW_MASK_UNASSIGNED)
239 return 0;
240 if ((psw->mask & PSW_MASK_ADDR_MODE) == PSW_MASK_BA) {
241 if (psw->addr & ~PSW_ADDR_31)
242 return 0;
243 }
244 if (!(psw->mask & PSW_MASK_ADDR_MODE) && (psw->addr & ~PSW_ADDR_24))
245 return 0;
246 if ((psw->mask & PSW_MASK_ADDR_MODE) == PSW_MASK_EA)
247 return 0;
248 return 1;
249}
250
255int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu) 251int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu)
256{ 252{
257 u64 addr; 253 psw_t *gpsw = &vcpu->arch.sie_block->gpsw;
258 psw_compat_t new_psw; 254 psw_compat_t new_psw;
255 u64 addr;
259 256
260 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 257 if (gpsw->mask & PSW_MASK_PSTATE)
261 return kvm_s390_inject_program_int(vcpu, 258 return kvm_s390_inject_program_int(vcpu,
262 PGM_PRIVILEGED_OPERATION); 259 PGM_PRIVILEGED_OPERATION);
263
264 addr = kvm_s390_get_base_disp_s(vcpu); 260 addr = kvm_s390_get_base_disp_s(vcpu);
265 261 if (addr & 7)
266 if (addr & 7) { 262 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
267 kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 263 if (copy_from_guest(vcpu, &new_psw, addr, sizeof(new_psw)))
268 goto out; 264 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
269 } 265 if (!(new_psw.mask & PSW32_MASK_BASE))
270 266 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
271 if (copy_from_guest(vcpu, &new_psw, addr, sizeof(new_psw))) { 267 gpsw->mask = (new_psw.mask & ~PSW32_MASK_BASE) << 32;
272 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 268 gpsw->mask |= new_psw.addr & PSW32_ADDR_AMODE;
273 goto out; 269 gpsw->addr = new_psw.addr & ~PSW32_ADDR_AMODE;
274 } 270 if (!is_valid_psw(gpsw))
275 271 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
276 if (!(new_psw.mask & PSW32_MASK_BASE)) {
277 kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
278 goto out;
279 }
280
281 vcpu->arch.sie_block->gpsw.mask =
282 (new_psw.mask & ~PSW32_MASK_BASE) << 32;
283 vcpu->arch.sie_block->gpsw.addr = new_psw.addr;
284
285 if ((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_UNASSIGNED) ||
286 (!(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_ADDR_MODE) &&
287 (vcpu->arch.sie_block->gpsw.addr & ~PSW_ADDR_24)) ||
288 ((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_ADDR_MODE) ==
289 PSW_MASK_EA)) {
290 kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
291 goto out;
292 }
293
294 handle_new_psw(vcpu); 272 handle_new_psw(vcpu);
295out:
296 return 0; 273 return 0;
297} 274}
298 275
299static int handle_lpswe(struct kvm_vcpu *vcpu) 276static int handle_lpswe(struct kvm_vcpu *vcpu)
300{ 277{
301 u64 addr;
302 psw_t new_psw; 278 psw_t new_psw;
279 u64 addr;
303 280
304 addr = kvm_s390_get_base_disp_s(vcpu); 281 addr = kvm_s390_get_base_disp_s(vcpu);
305 282 if (addr & 7)
306 if (addr & 7) { 283 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
307 kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 284 if (copy_from_guest(vcpu, &new_psw, addr, sizeof(new_psw)))
308 goto out; 285 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
309 } 286 vcpu->arch.sie_block->gpsw = new_psw;
310 287 if (!is_valid_psw(&vcpu->arch.sie_block->gpsw))
311 if (copy_from_guest(vcpu, &new_psw, addr, sizeof(new_psw))) { 288 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
312 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
313 goto out;
314 }
315
316 vcpu->arch.sie_block->gpsw.mask = new_psw.mask;
317 vcpu->arch.sie_block->gpsw.addr = new_psw.addr;
318
319 if ((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_UNASSIGNED) ||
320 (((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_ADDR_MODE) ==
321 PSW_MASK_BA) &&
322 (vcpu->arch.sie_block->gpsw.addr & ~PSW_ADDR_31)) ||
323 (!(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_ADDR_MODE) &&
324 (vcpu->arch.sie_block->gpsw.addr & ~PSW_ADDR_24)) ||
325 ((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_ADDR_MODE) ==
326 PSW_MASK_EA)) {
327 kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
328 goto out;
329 }
330
331 handle_new_psw(vcpu); 289 handle_new_psw(vcpu);
332out:
333 return 0; 290 return 0;
334} 291}
335 292
336static int handle_stidp(struct kvm_vcpu *vcpu) 293static int handle_stidp(struct kvm_vcpu *vcpu)
337{ 294{
338 u64 operand2; 295 u64 operand2;
339 int rc;
340 296
341 vcpu->stat.instruction_stidp++; 297 vcpu->stat.instruction_stidp++;
342 298
343 operand2 = kvm_s390_get_base_disp_s(vcpu); 299 operand2 = kvm_s390_get_base_disp_s(vcpu);
344 300
345 if (operand2 & 7) { 301 if (operand2 & 7)
346 kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 302 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
347 goto out;
348 }
349 303
350 rc = put_guest_u64(vcpu, operand2, vcpu->arch.stidp_data); 304 if (put_guest(vcpu, vcpu->arch.stidp_data, (u64 __user *)operand2))
351 if (rc == -EFAULT) { 305 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
352 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
353 goto out;
354 }
355 306
356 VCPU_EVENT(vcpu, 5, "%s", "store cpu id"); 307 VCPU_EVENT(vcpu, 5, "%s", "store cpu id");
357out:
358 return 0; 308 return 0;
359} 309}
360 310
@@ -394,8 +344,9 @@ static int handle_stsi(struct kvm_vcpu *vcpu)
394 int fc = (vcpu->run->s.regs.gprs[0] & 0xf0000000) >> 28; 344 int fc = (vcpu->run->s.regs.gprs[0] & 0xf0000000) >> 28;
395 int sel1 = vcpu->run->s.regs.gprs[0] & 0xff; 345 int sel1 = vcpu->run->s.regs.gprs[0] & 0xff;
396 int sel2 = vcpu->run->s.regs.gprs[1] & 0xffff; 346 int sel2 = vcpu->run->s.regs.gprs[1] & 0xffff;
347 unsigned long mem = 0;
397 u64 operand2; 348 u64 operand2;
398 unsigned long mem; 349 int rc = 0;
399 350
400 vcpu->stat.instruction_stsi++; 351 vcpu->stat.instruction_stsi++;
401 VCPU_EVENT(vcpu, 4, "stsi: fc: %x sel1: %x sel2: %x", fc, sel1, sel2); 352 VCPU_EVENT(vcpu, 4, "stsi: fc: %x sel1: %x sel2: %x", fc, sel1, sel2);
@@ -414,37 +365,37 @@ static int handle_stsi(struct kvm_vcpu *vcpu)
414 case 2: 365 case 2:
415 mem = get_zeroed_page(GFP_KERNEL); 366 mem = get_zeroed_page(GFP_KERNEL);
416 if (!mem) 367 if (!mem)
417 goto out_fail; 368 goto out_no_data;
418 if (stsi((void *) mem, fc, sel1, sel2)) 369 if (stsi((void *) mem, fc, sel1, sel2))
419 goto out_mem; 370 goto out_no_data;
420 break; 371 break;
421 case 3: 372 case 3:
422 if (sel1 != 2 || sel2 != 2) 373 if (sel1 != 2 || sel2 != 2)
423 goto out_fail; 374 goto out_no_data;
424 mem = get_zeroed_page(GFP_KERNEL); 375 mem = get_zeroed_page(GFP_KERNEL);
425 if (!mem) 376 if (!mem)
426 goto out_fail; 377 goto out_no_data;
427 handle_stsi_3_2_2(vcpu, (void *) mem); 378 handle_stsi_3_2_2(vcpu, (void *) mem);
428 break; 379 break;
429 default: 380 default:
430 goto out_fail; 381 goto out_no_data;
431 } 382 }
432 383
433 if (copy_to_guest_absolute(vcpu, operand2, (void *) mem, PAGE_SIZE)) { 384 if (copy_to_guest_absolute(vcpu, operand2, (void *) mem, PAGE_SIZE)) {
434 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 385 rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
435 goto out_mem; 386 goto out_exception;
436 } 387 }
437 trace_kvm_s390_handle_stsi(vcpu, fc, sel1, sel2, operand2); 388 trace_kvm_s390_handle_stsi(vcpu, fc, sel1, sel2, operand2);
438 free_page(mem); 389 free_page(mem);
439 vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44); 390 vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
440 vcpu->run->s.regs.gprs[0] = 0; 391 vcpu->run->s.regs.gprs[0] = 0;
441 return 0; 392 return 0;
442out_mem: 393out_no_data:
443 free_page(mem);
444out_fail:
445 /* condition code 3 */ 394 /* condition code 3 */
446 vcpu->arch.sie_block->gpsw.mask |= 3ul << 44; 395 vcpu->arch.sie_block->gpsw.mask |= 3ul << 44;
447 return 0; 396out_exception:
397 free_page(mem);
398 return rc;
448} 399}
449 400
450static const intercept_handler_t b2_handlers[256] = { 401static const intercept_handler_t b2_handlers[256] = {
@@ -575,20 +526,13 @@ static int handle_tprot(struct kvm_vcpu *vcpu)
575 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT) 526 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT)
576 return -EOPNOTSUPP; 527 return -EOPNOTSUPP;
577 528
578
579 /* we must resolve the address without holding the mmap semaphore.
580 * This is ok since the userspace hypervisor is not supposed to change
581 * the mapping while the guest queries the memory. Otherwise the guest
582 * might crash or get wrong info anyway. */
583 user_address = (unsigned long) __guestaddr_to_user(vcpu, address1);
584
585 down_read(&current->mm->mmap_sem); 529 down_read(&current->mm->mmap_sem);
530 user_address = __gmap_translate(address1, vcpu->arch.gmap);
531 if (IS_ERR_VALUE(user_address))
532 goto out_inject;
586 vma = find_vma(current->mm, user_address); 533 vma = find_vma(current->mm, user_address);
587 if (!vma) { 534 if (!vma)
588 up_read(&current->mm->mmap_sem); 535 goto out_inject;
589 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
590 }
591
592 vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44); 536 vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
593 if (!(vma->vm_flags & VM_WRITE) && (vma->vm_flags & VM_READ)) 537 if (!(vma->vm_flags & VM_WRITE) && (vma->vm_flags & VM_READ))
594 vcpu->arch.sie_block->gpsw.mask |= (1ul << 44); 538 vcpu->arch.sie_block->gpsw.mask |= (1ul << 44);
@@ -597,6 +541,10 @@ static int handle_tprot(struct kvm_vcpu *vcpu)
597 541
598 up_read(&current->mm->mmap_sem); 542 up_read(&current->mm->mmap_sem);
599 return 0; 543 return 0;
544
545out_inject:
546 up_read(&current->mm->mmap_sem);
547 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
600} 548}
601 549
602int kvm_s390_handle_e5(struct kvm_vcpu *vcpu) 550int kvm_s390_handle_e5(struct kvm_vcpu *vcpu)