diff options
Diffstat (limited to 'arch/s390/kvm/priv.c')
-rw-r--r-- | arch/s390/kvm/priv.c | 274 |
1 files changed, 229 insertions, 45 deletions
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c index 6bbd7b5a0bbe..0da3e6eb6be6 100644 --- a/arch/s390/kvm/priv.c +++ b/arch/s390/kvm/priv.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * handling privileged instructions | 2 | * handling privileged instructions |
3 | * | 3 | * |
4 | * Copyright IBM Corp. 2008 | 4 | * Copyright IBM Corp. 2008, 2013 |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify | 6 | * This program is free software; you can redistribute it and/or modify |
7 | * it under the terms of the GNU General Public License (version 2 only) | 7 | * it under the terms of the GNU General Public License (version 2 only) |
@@ -20,6 +20,9 @@ | |||
20 | #include <asm/debug.h> | 20 | #include <asm/debug.h> |
21 | #include <asm/ebcdic.h> | 21 | #include <asm/ebcdic.h> |
22 | #include <asm/sysinfo.h> | 22 | #include <asm/sysinfo.h> |
23 | #include <asm/pgtable.h> | ||
24 | #include <asm/pgalloc.h> | ||
25 | #include <asm/io.h> | ||
23 | #include <asm/ptrace.h> | 26 | #include <asm/ptrace.h> |
24 | #include <asm/compat.h> | 27 | #include <asm/compat.h> |
25 | #include "gaccess.h" | 28 | #include "gaccess.h" |
@@ -34,6 +37,9 @@ static int handle_set_prefix(struct kvm_vcpu *vcpu) | |||
34 | 37 | ||
35 | vcpu->stat.instruction_spx++; | 38 | vcpu->stat.instruction_spx++; |
36 | 39 | ||
40 | if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) | ||
41 | return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); | ||
42 | |||
37 | operand2 = kvm_s390_get_base_disp_s(vcpu); | 43 | operand2 = kvm_s390_get_base_disp_s(vcpu); |
38 | 44 | ||
39 | /* must be word boundary */ | 45 | /* must be word boundary */ |
@@ -65,6 +71,9 @@ static int handle_store_prefix(struct kvm_vcpu *vcpu) | |||
65 | 71 | ||
66 | vcpu->stat.instruction_stpx++; | 72 | vcpu->stat.instruction_stpx++; |
67 | 73 | ||
74 | if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) | ||
75 | return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); | ||
76 | |||
68 | operand2 = kvm_s390_get_base_disp_s(vcpu); | 77 | operand2 = kvm_s390_get_base_disp_s(vcpu); |
69 | 78 | ||
70 | /* must be word boundary */ | 79 | /* must be word boundary */ |
@@ -89,6 +98,9 @@ static int handle_store_cpu_address(struct kvm_vcpu *vcpu) | |||
89 | 98 | ||
90 | vcpu->stat.instruction_stap++; | 99 | vcpu->stat.instruction_stap++; |
91 | 100 | ||
101 | if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) | ||
102 | return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); | ||
103 | |||
92 | useraddr = kvm_s390_get_base_disp_s(vcpu); | 104 | useraddr = kvm_s390_get_base_disp_s(vcpu); |
93 | 105 | ||
94 | if (useraddr & 1) | 106 | if (useraddr & 1) |
@@ -105,7 +117,12 @@ static int handle_store_cpu_address(struct kvm_vcpu *vcpu) | |||
105 | static int handle_skey(struct kvm_vcpu *vcpu) | 117 | static int handle_skey(struct kvm_vcpu *vcpu) |
106 | { | 118 | { |
107 | vcpu->stat.instruction_storage_key++; | 119 | vcpu->stat.instruction_storage_key++; |
108 | vcpu->arch.sie_block->gpsw.addr -= 4; | 120 | |
121 | if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) | ||
122 | return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); | ||
123 | |||
124 | vcpu->arch.sie_block->gpsw.addr = | ||
125 | __rewind_psw(vcpu->arch.sie_block->gpsw, 4); | ||
109 | VCPU_EVENT(vcpu, 4, "%s", "retrying storage key operation"); | 126 | VCPU_EVENT(vcpu, 4, "%s", "retrying storage key operation"); |
110 | return 0; | 127 | return 0; |
111 | } | 128 | } |
@@ -129,9 +146,10 @@ static int handle_tpi(struct kvm_vcpu *vcpu) | |||
129 | * Store the two-word I/O interruption code into the | 146 | * Store the two-word I/O interruption code into the |
130 | * provided area. | 147 | * provided area. |
131 | */ | 148 | */ |
132 | put_guest(vcpu, inti->io.subchannel_id, (u16 __user *) addr); | 149 | if (put_guest(vcpu, inti->io.subchannel_id, (u16 __user *)addr) |
133 | put_guest(vcpu, inti->io.subchannel_nr, (u16 __user *) (addr + 2)); | 150 | || put_guest(vcpu, inti->io.subchannel_nr, (u16 __user *)(addr + 2)) |
134 | put_guest(vcpu, inti->io.io_int_parm, (u32 __user *) (addr + 4)); | 151 | || put_guest(vcpu, inti->io.io_int_parm, (u32 __user *)(addr + 4))) |
152 | return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); | ||
135 | } else { | 153 | } else { |
136 | /* | 154 | /* |
137 | * Store the three-word I/O interruption code into | 155 | * Store the three-word I/O interruption code into |
@@ -182,6 +200,9 @@ static int handle_io_inst(struct kvm_vcpu *vcpu) | |||
182 | { | 200 | { |
183 | VCPU_EVENT(vcpu, 4, "%s", "I/O instruction"); | 201 | VCPU_EVENT(vcpu, 4, "%s", "I/O instruction"); |
184 | 202 | ||
203 | if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) | ||
204 | return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); | ||
205 | |||
185 | if (vcpu->kvm->arch.css_support) { | 206 | if (vcpu->kvm->arch.css_support) { |
186 | /* | 207 | /* |
187 | * Most I/O instructions will be handled by userspace. | 208 | * Most I/O instructions will be handled by userspace. |
@@ -210,8 +231,12 @@ static int handle_stfl(struct kvm_vcpu *vcpu) | |||
210 | int rc; | 231 | int rc; |
211 | 232 | ||
212 | vcpu->stat.instruction_stfl++; | 233 | vcpu->stat.instruction_stfl++; |
234 | |||
235 | if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) | ||
236 | return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); | ||
237 | |||
213 | /* only pass the facility bits, which we can handle */ | 238 | /* only pass the facility bits, which we can handle */ |
214 | facility_list = S390_lowcore.stfl_fac_list & 0xff00fff3; | 239 | facility_list = S390_lowcore.stfl_fac_list & 0xff82fff3; |
215 | 240 | ||
216 | rc = copy_to_guest(vcpu, offsetof(struct _lowcore, stfl_fac_list), | 241 | rc = copy_to_guest(vcpu, offsetof(struct _lowcore, stfl_fac_list), |
217 | &facility_list, sizeof(facility_list)); | 242 | &facility_list, sizeof(facility_list)); |
@@ -255,8 +280,8 @@ int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu) | |||
255 | u64 addr; | 280 | u64 addr; |
256 | 281 | ||
257 | if (gpsw->mask & PSW_MASK_PSTATE) | 282 | if (gpsw->mask & PSW_MASK_PSTATE) |
258 | return kvm_s390_inject_program_int(vcpu, | 283 | return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); |
259 | PGM_PRIVILEGED_OPERATION); | 284 | |
260 | addr = kvm_s390_get_base_disp_s(vcpu); | 285 | addr = kvm_s390_get_base_disp_s(vcpu); |
261 | if (addr & 7) | 286 | if (addr & 7) |
262 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); | 287 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); |
@@ -278,6 +303,9 @@ static int handle_lpswe(struct kvm_vcpu *vcpu) | |||
278 | psw_t new_psw; | 303 | psw_t new_psw; |
279 | u64 addr; | 304 | u64 addr; |
280 | 305 | ||
306 | if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) | ||
307 | return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); | ||
308 | |||
281 | addr = kvm_s390_get_base_disp_s(vcpu); | 309 | addr = kvm_s390_get_base_disp_s(vcpu); |
282 | if (addr & 7) | 310 | if (addr & 7) |
283 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); | 311 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); |
@@ -296,6 +324,9 @@ static int handle_stidp(struct kvm_vcpu *vcpu) | |||
296 | 324 | ||
297 | vcpu->stat.instruction_stidp++; | 325 | vcpu->stat.instruction_stidp++; |
298 | 326 | ||
327 | if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) | ||
328 | return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); | ||
329 | |||
299 | operand2 = kvm_s390_get_base_disp_s(vcpu); | 330 | operand2 = kvm_s390_get_base_disp_s(vcpu); |
300 | 331 | ||
301 | if (operand2 & 7) | 332 | if (operand2 & 7) |
@@ -351,16 +382,30 @@ static int handle_stsi(struct kvm_vcpu *vcpu) | |||
351 | vcpu->stat.instruction_stsi++; | 382 | vcpu->stat.instruction_stsi++; |
352 | VCPU_EVENT(vcpu, 4, "stsi: fc: %x sel1: %x sel2: %x", fc, sel1, sel2); | 383 | VCPU_EVENT(vcpu, 4, "stsi: fc: %x sel1: %x sel2: %x", fc, sel1, sel2); |
353 | 384 | ||
354 | operand2 = kvm_s390_get_base_disp_s(vcpu); | 385 | if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) |
386 | return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); | ||
387 | |||
388 | if (fc > 3) { | ||
389 | vcpu->arch.sie_block->gpsw.mask |= 3ul << 44; /* cc 3 */ | ||
390 | return 0; | ||
391 | } | ||
355 | 392 | ||
356 | if (operand2 & 0xfff && fc > 0) | 393 | if (vcpu->run->s.regs.gprs[0] & 0x0fffff00 |
394 | || vcpu->run->s.regs.gprs[1] & 0xffff0000) | ||
357 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); | 395 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); |
358 | 396 | ||
359 | switch (fc) { | 397 | if (fc == 0) { |
360 | case 0: | ||
361 | vcpu->run->s.regs.gprs[0] = 3 << 28; | 398 | vcpu->run->s.regs.gprs[0] = 3 << 28; |
362 | vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44); | 399 | vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44); /* cc 0 */ |
363 | return 0; | 400 | return 0; |
401 | } | ||
402 | |||
403 | operand2 = kvm_s390_get_base_disp_s(vcpu); | ||
404 | |||
405 | if (operand2 & 0xfff) | ||
406 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); | ||
407 | |||
408 | switch (fc) { | ||
364 | case 1: /* same handling for 1 and 2 */ | 409 | case 1: /* same handling for 1 and 2 */ |
365 | case 2: | 410 | case 2: |
366 | mem = get_zeroed_page(GFP_KERNEL); | 411 | mem = get_zeroed_page(GFP_KERNEL); |
@@ -377,8 +422,6 @@ static int handle_stsi(struct kvm_vcpu *vcpu) | |||
377 | goto out_no_data; | 422 | goto out_no_data; |
378 | handle_stsi_3_2_2(vcpu, (void *) mem); | 423 | handle_stsi_3_2_2(vcpu, (void *) mem); |
379 | break; | 424 | break; |
380 | default: | ||
381 | goto out_no_data; | ||
382 | } | 425 | } |
383 | 426 | ||
384 | if (copy_to_guest_absolute(vcpu, operand2, (void *) mem, PAGE_SIZE)) { | 427 | if (copy_to_guest_absolute(vcpu, operand2, (void *) mem, PAGE_SIZE)) { |
@@ -432,20 +475,14 @@ int kvm_s390_handle_b2(struct kvm_vcpu *vcpu) | |||
432 | intercept_handler_t handler; | 475 | intercept_handler_t handler; |
433 | 476 | ||
434 | /* | 477 | /* |
435 | * a lot of B2 instructions are priviledged. We first check for | 478 | * A lot of B2 instructions are priviledged. Here we check for |
436 | * the privileged ones, that we can handle in the kernel. If the | 479 | * the privileged ones, that we can handle in the kernel. |
437 | * kernel can handle this instruction, we check for the problem | 480 | * Anything else goes to userspace. |
438 | * state bit and (a) handle the instruction or (b) send a code 2 | 481 | */ |
439 | * program check. | ||
440 | * Anything else goes to userspace.*/ | ||
441 | handler = b2_handlers[vcpu->arch.sie_block->ipa & 0x00ff]; | 482 | handler = b2_handlers[vcpu->arch.sie_block->ipa & 0x00ff]; |
442 | if (handler) { | 483 | if (handler) |
443 | if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) | 484 | return handler(vcpu); |
444 | return kvm_s390_inject_program_int(vcpu, | 485 | |
445 | PGM_PRIVILEGED_OPERATION); | ||
446 | else | ||
447 | return handler(vcpu); | ||
448 | } | ||
449 | return -EOPNOTSUPP; | 486 | return -EOPNOTSUPP; |
450 | } | 487 | } |
451 | 488 | ||
@@ -453,8 +490,7 @@ static int handle_epsw(struct kvm_vcpu *vcpu) | |||
453 | { | 490 | { |
454 | int reg1, reg2; | 491 | int reg1, reg2; |
455 | 492 | ||
456 | reg1 = (vcpu->arch.sie_block->ipb & 0x00f00000) >> 24; | 493 | kvm_s390_get_regs_rre(vcpu, ®1, ®2); |
457 | reg2 = (vcpu->arch.sie_block->ipb & 0x000f0000) >> 16; | ||
458 | 494 | ||
459 | /* This basically extracts the mask half of the psw. */ | 495 | /* This basically extracts the mask half of the psw. */ |
460 | vcpu->run->s.regs.gprs[reg1] &= 0xffffffff00000000; | 496 | vcpu->run->s.regs.gprs[reg1] &= 0xffffffff00000000; |
@@ -467,9 +503,88 @@ static int handle_epsw(struct kvm_vcpu *vcpu) | |||
467 | return 0; | 503 | return 0; |
468 | } | 504 | } |
469 | 505 | ||
506 | #define PFMF_RESERVED 0xfffc0101UL | ||
507 | #define PFMF_SK 0x00020000UL | ||
508 | #define PFMF_CF 0x00010000UL | ||
509 | #define PFMF_UI 0x00008000UL | ||
510 | #define PFMF_FSC 0x00007000UL | ||
511 | #define PFMF_NQ 0x00000800UL | ||
512 | #define PFMF_MR 0x00000400UL | ||
513 | #define PFMF_MC 0x00000200UL | ||
514 | #define PFMF_KEY 0x000000feUL | ||
515 | |||
516 | static int handle_pfmf(struct kvm_vcpu *vcpu) | ||
517 | { | ||
518 | int reg1, reg2; | ||
519 | unsigned long start, end; | ||
520 | |||
521 | vcpu->stat.instruction_pfmf++; | ||
522 | |||
523 | kvm_s390_get_regs_rre(vcpu, ®1, ®2); | ||
524 | |||
525 | if (!MACHINE_HAS_PFMF) | ||
526 | return kvm_s390_inject_program_int(vcpu, PGM_OPERATION); | ||
527 | |||
528 | if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) | ||
529 | return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); | ||
530 | |||
531 | if (vcpu->run->s.regs.gprs[reg1] & PFMF_RESERVED) | ||
532 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); | ||
533 | |||
534 | /* Only provide non-quiescing support if the host supports it */ | ||
535 | if (vcpu->run->s.regs.gprs[reg1] & PFMF_NQ && | ||
536 | S390_lowcore.stfl_fac_list & 0x00020000) | ||
537 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); | ||
538 | |||
539 | /* No support for conditional-SSKE */ | ||
540 | if (vcpu->run->s.regs.gprs[reg1] & (PFMF_MR | PFMF_MC)) | ||
541 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); | ||
542 | |||
543 | start = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK; | ||
544 | switch (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) { | ||
545 | case 0x00000000: | ||
546 | end = (start + (1UL << 12)) & ~((1UL << 12) - 1); | ||
547 | break; | ||
548 | case 0x00001000: | ||
549 | end = (start + (1UL << 20)) & ~((1UL << 20) - 1); | ||
550 | break; | ||
551 | /* We dont support EDAT2 | ||
552 | case 0x00002000: | ||
553 | end = (start + (1UL << 31)) & ~((1UL << 31) - 1); | ||
554 | break;*/ | ||
555 | default: | ||
556 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); | ||
557 | } | ||
558 | while (start < end) { | ||
559 | unsigned long useraddr; | ||
560 | |||
561 | useraddr = gmap_translate(start, vcpu->arch.gmap); | ||
562 | if (IS_ERR((void *)useraddr)) | ||
563 | return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); | ||
564 | |||
565 | if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) { | ||
566 | if (clear_user((void __user *)useraddr, PAGE_SIZE)) | ||
567 | return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); | ||
568 | } | ||
569 | |||
570 | if (vcpu->run->s.regs.gprs[reg1] & PFMF_SK) { | ||
571 | if (set_guest_storage_key(current->mm, useraddr, | ||
572 | vcpu->run->s.regs.gprs[reg1] & PFMF_KEY, | ||
573 | vcpu->run->s.regs.gprs[reg1] & PFMF_NQ)) | ||
574 | return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); | ||
575 | } | ||
576 | |||
577 | start += PAGE_SIZE; | ||
578 | } | ||
579 | if (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) | ||
580 | vcpu->run->s.regs.gprs[reg2] = end; | ||
581 | return 0; | ||
582 | } | ||
583 | |||
470 | static const intercept_handler_t b9_handlers[256] = { | 584 | static const intercept_handler_t b9_handlers[256] = { |
471 | [0x8d] = handle_epsw, | 585 | [0x8d] = handle_epsw, |
472 | [0x9c] = handle_io_inst, | 586 | [0x9c] = handle_io_inst, |
587 | [0xaf] = handle_pfmf, | ||
473 | }; | 588 | }; |
474 | 589 | ||
475 | int kvm_s390_handle_b9(struct kvm_vcpu *vcpu) | 590 | int kvm_s390_handle_b9(struct kvm_vcpu *vcpu) |
@@ -478,29 +593,96 @@ int kvm_s390_handle_b9(struct kvm_vcpu *vcpu) | |||
478 | 593 | ||
479 | /* This is handled just as for the B2 instructions. */ | 594 | /* This is handled just as for the B2 instructions. */ |
480 | handler = b9_handlers[vcpu->arch.sie_block->ipa & 0x00ff]; | 595 | handler = b9_handlers[vcpu->arch.sie_block->ipa & 0x00ff]; |
481 | if (handler) { | 596 | if (handler) |
482 | if ((handler != handle_epsw) && | 597 | return handler(vcpu); |
483 | (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)) | 598 | |
484 | return kvm_s390_inject_program_int(vcpu, | ||
485 | PGM_PRIVILEGED_OPERATION); | ||
486 | else | ||
487 | return handler(vcpu); | ||
488 | } | ||
489 | return -EOPNOTSUPP; | 599 | return -EOPNOTSUPP; |
490 | } | 600 | } |
491 | 601 | ||
602 | int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu) | ||
603 | { | ||
604 | int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; | ||
605 | int reg3 = vcpu->arch.sie_block->ipa & 0x000f; | ||
606 | u64 useraddr; | ||
607 | u32 val = 0; | ||
608 | int reg, rc; | ||
609 | |||
610 | vcpu->stat.instruction_lctl++; | ||
611 | |||
612 | if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) | ||
613 | return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); | ||
614 | |||
615 | useraddr = kvm_s390_get_base_disp_rs(vcpu); | ||
616 | |||
617 | if (useraddr & 3) | ||
618 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); | ||
619 | |||
620 | VCPU_EVENT(vcpu, 5, "lctl r1:%x, r3:%x, addr:%llx", reg1, reg3, | ||
621 | useraddr); | ||
622 | trace_kvm_s390_handle_lctl(vcpu, 0, reg1, reg3, useraddr); | ||
623 | |||
624 | reg = reg1; | ||
625 | do { | ||
626 | rc = get_guest(vcpu, val, (u32 __user *) useraddr); | ||
627 | if (rc) | ||
628 | return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); | ||
629 | vcpu->arch.sie_block->gcr[reg] &= 0xffffffff00000000ul; | ||
630 | vcpu->arch.sie_block->gcr[reg] |= val; | ||
631 | useraddr += 4; | ||
632 | if (reg == reg3) | ||
633 | break; | ||
634 | reg = (reg + 1) % 16; | ||
635 | } while (1); | ||
636 | |||
637 | return 0; | ||
638 | } | ||
639 | |||
640 | static int handle_lctlg(struct kvm_vcpu *vcpu) | ||
641 | { | ||
642 | int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; | ||
643 | int reg3 = vcpu->arch.sie_block->ipa & 0x000f; | ||
644 | u64 useraddr; | ||
645 | int reg, rc; | ||
646 | |||
647 | vcpu->stat.instruction_lctlg++; | ||
648 | |||
649 | if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) | ||
650 | return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); | ||
651 | |||
652 | useraddr = kvm_s390_get_base_disp_rsy(vcpu); | ||
653 | |||
654 | if (useraddr & 7) | ||
655 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); | ||
656 | |||
657 | reg = reg1; | ||
658 | |||
659 | VCPU_EVENT(vcpu, 5, "lctlg r1:%x, r3:%x, addr:%llx", reg1, reg3, | ||
660 | useraddr); | ||
661 | trace_kvm_s390_handle_lctl(vcpu, 1, reg1, reg3, useraddr); | ||
662 | |||
663 | do { | ||
664 | rc = get_guest(vcpu, vcpu->arch.sie_block->gcr[reg], | ||
665 | (u64 __user *) useraddr); | ||
666 | if (rc) | ||
667 | return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); | ||
668 | useraddr += 8; | ||
669 | if (reg == reg3) | ||
670 | break; | ||
671 | reg = (reg + 1) % 16; | ||
672 | } while (1); | ||
673 | |||
674 | return 0; | ||
675 | } | ||
676 | |||
492 | static const intercept_handler_t eb_handlers[256] = { | 677 | static const intercept_handler_t eb_handlers[256] = { |
678 | [0x2f] = handle_lctlg, | ||
493 | [0x8a] = handle_io_inst, | 679 | [0x8a] = handle_io_inst, |
494 | }; | 680 | }; |
495 | 681 | ||
496 | int kvm_s390_handle_priv_eb(struct kvm_vcpu *vcpu) | 682 | int kvm_s390_handle_eb(struct kvm_vcpu *vcpu) |
497 | { | 683 | { |
498 | intercept_handler_t handler; | 684 | intercept_handler_t handler; |
499 | 685 | ||
500 | /* All eb instructions that end up here are privileged. */ | ||
501 | if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) | ||
502 | return kvm_s390_inject_program_int(vcpu, | ||
503 | PGM_PRIVILEGED_OPERATION); | ||
504 | handler = eb_handlers[vcpu->arch.sie_block->ipb & 0xff]; | 686 | handler = eb_handlers[vcpu->arch.sie_block->ipb & 0xff]; |
505 | if (handler) | 687 | if (handler) |
506 | return handler(vcpu); | 688 | return handler(vcpu); |
@@ -515,6 +697,9 @@ static int handle_tprot(struct kvm_vcpu *vcpu) | |||
515 | 697 | ||
516 | vcpu->stat.instruction_tprot++; | 698 | vcpu->stat.instruction_tprot++; |
517 | 699 | ||
700 | if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) | ||
701 | return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); | ||
702 | |||
518 | kvm_s390_get_base_disp_sse(vcpu, &address1, &address2); | 703 | kvm_s390_get_base_disp_sse(vcpu, &address1, &address2); |
519 | 704 | ||
520 | /* we only handle the Linux memory detection case: | 705 | /* we only handle the Linux memory detection case: |
@@ -560,8 +745,7 @@ static int handle_sckpf(struct kvm_vcpu *vcpu) | |||
560 | u32 value; | 745 | u32 value; |
561 | 746 | ||
562 | if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) | 747 | if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) |
563 | return kvm_s390_inject_program_int(vcpu, | 748 | return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); |
564 | PGM_PRIVILEGED_OPERATION); | ||
565 | 749 | ||
566 | if (vcpu->run->s.regs.gprs[0] & 0x00000000ffff0000) | 750 | if (vcpu->run->s.regs.gprs[0] & 0x00000000ffff0000) |
567 | return kvm_s390_inject_program_int(vcpu, | 751 | return kvm_s390_inject_program_int(vcpu, |