aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaolo Bonzini <pbonzini@redhat.com>2017-02-07 12:18:51 -0500
committerPaolo Bonzini <pbonzini@redhat.com>2017-02-07 12:18:51 -0500
commit8f00067a0d867e6ca63e266098ea18cf1a0b5e72 (patch)
tree3cf6668bc30d4a6248f06cbeb823906f5d1b1daa
parentd9c0e59f92d491a7be5172eaf2d600b4953a0bd4 (diff)
parentfb7dc1d4ddce744c8d8e1aca19d4982102cf72e1 (diff)
Merge tag 'kvm-s390-next-4.11-2' of git://git.kernel.org/pub/scm/linux/kernel/git/kvms390/linux into HEAD
KVM: s390: Fixes and features for 4.11 (via kvm/next) - enable some simd extensions for guests - enable nx for guests - debug log for cpu model - PER fixes - remove bitwise annotation from ar_t - detect guests in operation exception program check loops - fix potential null-pointer dereference for ucontrol guests - also contains merge for fix that went into 4.10 to avoid conflicts
-rw-r--r--arch/s390/kvm/gaccess.c26
-rw-r--r--arch/s390/kvm/gaccess.h19
-rw-r--r--arch/s390/kvm/guestdbg.c120
-rw-r--r--arch/s390/kvm/intercept.c25
-rw-r--r--arch/s390/kvm/kvm-s390.c46
-rw-r--r--arch/s390/kvm/kvm-s390.h12
-rw-r--r--arch/s390/kvm/priv.c30
-rw-r--r--arch/s390/kvm/vsie.c3
-rw-r--r--arch/s390/mm/pgtable.c2
-rw-r--r--arch/s390/tools/gen_facilities.c2
10 files changed, 225 insertions, 60 deletions
diff --git a/arch/s390/kvm/gaccess.c b/arch/s390/kvm/gaccess.c
index 4aa8a7e2a1da..4492c9363178 100644
--- a/arch/s390/kvm/gaccess.c
+++ b/arch/s390/kvm/gaccess.c
@@ -373,7 +373,7 @@ void ipte_unlock(struct kvm_vcpu *vcpu)
373 ipte_unlock_simple(vcpu); 373 ipte_unlock_simple(vcpu);
374} 374}
375 375
376static int ar_translation(struct kvm_vcpu *vcpu, union asce *asce, ar_t ar, 376static int ar_translation(struct kvm_vcpu *vcpu, union asce *asce, u8 ar,
377 enum gacc_mode mode) 377 enum gacc_mode mode)
378{ 378{
379 union alet alet; 379 union alet alet;
@@ -465,7 +465,9 @@ static int ar_translation(struct kvm_vcpu *vcpu, union asce *asce, ar_t ar,
465struct trans_exc_code_bits { 465struct trans_exc_code_bits {
466 unsigned long addr : 52; /* Translation-exception Address */ 466 unsigned long addr : 52; /* Translation-exception Address */
467 unsigned long fsi : 2; /* Access Exception Fetch/Store Indication */ 467 unsigned long fsi : 2; /* Access Exception Fetch/Store Indication */
468 unsigned long : 6; 468 unsigned long : 2;
469 unsigned long b56 : 1;
470 unsigned long : 3;
469 unsigned long b60 : 1; 471 unsigned long b60 : 1;
470 unsigned long b61 : 1; 472 unsigned long b61 : 1;
471 unsigned long as : 2; /* ASCE Identifier */ 473 unsigned long as : 2; /* ASCE Identifier */
@@ -485,7 +487,7 @@ enum prot_type {
485}; 487};
486 488
487static int trans_exc(struct kvm_vcpu *vcpu, int code, unsigned long gva, 489static int trans_exc(struct kvm_vcpu *vcpu, int code, unsigned long gva,
488 ar_t ar, enum gacc_mode mode, enum prot_type prot) 490 u8 ar, enum gacc_mode mode, enum prot_type prot)
489{ 491{
490 struct kvm_s390_pgm_info *pgm = &vcpu->arch.pgm; 492 struct kvm_s390_pgm_info *pgm = &vcpu->arch.pgm;
491 struct trans_exc_code_bits *tec; 493 struct trans_exc_code_bits *tec;
@@ -497,14 +499,18 @@ static int trans_exc(struct kvm_vcpu *vcpu, int code, unsigned long gva,
497 switch (code) { 499 switch (code) {
498 case PGM_PROTECTION: 500 case PGM_PROTECTION:
499 switch (prot) { 501 switch (prot) {
502 case PROT_TYPE_LA:
503 tec->b56 = 1;
504 break;
505 case PROT_TYPE_KEYC:
506 tec->b60 = 1;
507 break;
500 case PROT_TYPE_ALC: 508 case PROT_TYPE_ALC:
501 tec->b60 = 1; 509 tec->b60 = 1;
502 /* FALL THROUGH */ 510 /* FALL THROUGH */
503 case PROT_TYPE_DAT: 511 case PROT_TYPE_DAT:
504 tec->b61 = 1; 512 tec->b61 = 1;
505 break; 513 break;
506 default: /* LA and KEYC set b61 to 0, other params undefined */
507 return code;
508 } 514 }
509 /* FALL THROUGH */ 515 /* FALL THROUGH */
510 case PGM_ASCE_TYPE: 516 case PGM_ASCE_TYPE:
@@ -539,7 +545,7 @@ static int trans_exc(struct kvm_vcpu *vcpu, int code, unsigned long gva,
539} 545}
540 546
541static int get_vcpu_asce(struct kvm_vcpu *vcpu, union asce *asce, 547static int get_vcpu_asce(struct kvm_vcpu *vcpu, union asce *asce,
542 unsigned long ga, ar_t ar, enum gacc_mode mode) 548 unsigned long ga, u8 ar, enum gacc_mode mode)
543{ 549{
544 int rc; 550 int rc;
545 struct psw_bits psw = psw_bits(vcpu->arch.sie_block->gpsw); 551 struct psw_bits psw = psw_bits(vcpu->arch.sie_block->gpsw);
@@ -771,7 +777,7 @@ static int low_address_protection_enabled(struct kvm_vcpu *vcpu,
771 return 1; 777 return 1;
772} 778}
773 779
774static int guest_page_range(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, 780static int guest_page_range(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar,
775 unsigned long *pages, unsigned long nr_pages, 781 unsigned long *pages, unsigned long nr_pages,
776 const union asce asce, enum gacc_mode mode) 782 const union asce asce, enum gacc_mode mode)
777{ 783{
@@ -803,7 +809,7 @@ static int guest_page_range(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar,
803 return 0; 809 return 0;
804} 810}
805 811
806int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data, 812int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar, void *data,
807 unsigned long len, enum gacc_mode mode) 813 unsigned long len, enum gacc_mode mode)
808{ 814{
809 psw_t *psw = &vcpu->arch.sie_block->gpsw; 815 psw_t *psw = &vcpu->arch.sie_block->gpsw;
@@ -877,7 +883,7 @@ int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra,
877 * Note: The IPTE lock is not taken during this function, so the caller 883 * Note: The IPTE lock is not taken during this function, so the caller
878 * has to take care of this. 884 * has to take care of this.
879 */ 885 */
880int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva, ar_t ar, 886int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva, u8 ar,
881 unsigned long *gpa, enum gacc_mode mode) 887 unsigned long *gpa, enum gacc_mode mode)
882{ 888{
883 psw_t *psw = &vcpu->arch.sie_block->gpsw; 889 psw_t *psw = &vcpu->arch.sie_block->gpsw;
@@ -910,7 +916,7 @@ int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva, ar_t ar,
910/** 916/**
911 * check_gva_range - test a range of guest virtual addresses for accessibility 917 * check_gva_range - test a range of guest virtual addresses for accessibility
912 */ 918 */
913int check_gva_range(struct kvm_vcpu *vcpu, unsigned long gva, ar_t ar, 919int check_gva_range(struct kvm_vcpu *vcpu, unsigned long gva, u8 ar,
914 unsigned long length, enum gacc_mode mode) 920 unsigned long length, enum gacc_mode mode)
915{ 921{
916 unsigned long gpa; 922 unsigned long gpa;
diff --git a/arch/s390/kvm/gaccess.h b/arch/s390/kvm/gaccess.h
index 8756569ad938..7ce47fd36f28 100644
--- a/arch/s390/kvm/gaccess.h
+++ b/arch/s390/kvm/gaccess.h
@@ -162,11 +162,11 @@ enum gacc_mode {
162}; 162};
163 163
164int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva, 164int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva,
165 ar_t ar, unsigned long *gpa, enum gacc_mode mode); 165 u8 ar, unsigned long *gpa, enum gacc_mode mode);
166int check_gva_range(struct kvm_vcpu *vcpu, unsigned long gva, ar_t ar, 166int check_gva_range(struct kvm_vcpu *vcpu, unsigned long gva, u8 ar,
167 unsigned long length, enum gacc_mode mode); 167 unsigned long length, enum gacc_mode mode);
168 168
169int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data, 169int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar, void *data,
170 unsigned long len, enum gacc_mode mode); 170 unsigned long len, enum gacc_mode mode);
171 171
172int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra, 172int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra,
@@ -218,7 +218,7 @@ int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra,
218 * if data has been changed in guest space in case of an exception. 218 * if data has been changed in guest space in case of an exception.
219 */ 219 */
220static inline __must_check 220static inline __must_check
221int write_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data, 221int write_guest(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar, void *data,
222 unsigned long len) 222 unsigned long len)
223{ 223{
224 return access_guest(vcpu, ga, ar, data, len, GACC_STORE); 224 return access_guest(vcpu, ga, ar, data, len, GACC_STORE);
@@ -238,7 +238,7 @@ int write_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data,
238 * data will be copied from guest space to kernel space. 238 * data will be copied from guest space to kernel space.
239 */ 239 */
240static inline __must_check 240static inline __must_check
241int read_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data, 241int read_guest(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar, void *data,
242 unsigned long len) 242 unsigned long len)
243{ 243{
244 return access_guest(vcpu, ga, ar, data, len, GACC_FETCH); 244 return access_guest(vcpu, ga, ar, data, len, GACC_FETCH);
@@ -247,10 +247,11 @@ int read_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data,
247/** 247/**
248 * read_guest_instr - copy instruction data from guest space to kernel space 248 * read_guest_instr - copy instruction data from guest space to kernel space
249 * @vcpu: virtual cpu 249 * @vcpu: virtual cpu
250 * @ga: guest address
250 * @data: destination address in kernel space 251 * @data: destination address in kernel space
251 * @len: number of bytes to copy 252 * @len: number of bytes to copy
252 * 253 *
253 * Copy @len bytes from the current psw address (guest space) to @data (kernel 254 * Copy @len bytes from the given address (guest space) to @data (kernel
254 * space). 255 * space).
255 * 256 *
256 * The behaviour of read_guest_instr is identical to read_guest, except that 257 * The behaviour of read_guest_instr is identical to read_guest, except that
@@ -258,10 +259,10 @@ int read_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data,
258 * address-space mode. 259 * address-space mode.
259 */ 260 */
260static inline __must_check 261static inline __must_check
261int read_guest_instr(struct kvm_vcpu *vcpu, void *data, unsigned long len) 262int read_guest_instr(struct kvm_vcpu *vcpu, unsigned long ga, void *data,
263 unsigned long len)
262{ 264{
263 return access_guest(vcpu, vcpu->arch.sie_block->gpsw.addr, 0, data, len, 265 return access_guest(vcpu, ga, 0, data, len, GACC_IFETCH);
264 GACC_IFETCH);
265} 266}
266 267
267/** 268/**
diff --git a/arch/s390/kvm/guestdbg.c b/arch/s390/kvm/guestdbg.c
index d7c6a7f53ced..23d9a4e12da1 100644
--- a/arch/s390/kvm/guestdbg.c
+++ b/arch/s390/kvm/guestdbg.c
@@ -388,14 +388,13 @@ void kvm_s390_prepare_debug_exit(struct kvm_vcpu *vcpu)
388#define per_write_wp_event(code) \ 388#define per_write_wp_event(code) \
389 (code & (PER_CODE_STORE | PER_CODE_STORE_REAL)) 389 (code & (PER_CODE_STORE | PER_CODE_STORE_REAL))
390 390
391static int debug_exit_required(struct kvm_vcpu *vcpu) 391static int debug_exit_required(struct kvm_vcpu *vcpu, u8 perc,
392 unsigned long peraddr)
392{ 393{
393 u8 perc = vcpu->arch.sie_block->perc;
394 struct kvm_debug_exit_arch *debug_exit = &vcpu->run->debug.arch; 394 struct kvm_debug_exit_arch *debug_exit = &vcpu->run->debug.arch;
395 struct kvm_hw_wp_info_arch *wp_info = NULL; 395 struct kvm_hw_wp_info_arch *wp_info = NULL;
396 struct kvm_hw_bp_info_arch *bp_info = NULL; 396 struct kvm_hw_bp_info_arch *bp_info = NULL;
397 unsigned long addr = vcpu->arch.sie_block->gpsw.addr; 397 unsigned long addr = vcpu->arch.sie_block->gpsw.addr;
398 unsigned long peraddr = vcpu->arch.sie_block->peraddr;
399 398
400 if (guestdbg_hw_bp_enabled(vcpu)) { 399 if (guestdbg_hw_bp_enabled(vcpu)) {
401 if (per_write_wp_event(perc) && 400 if (per_write_wp_event(perc) &&
@@ -437,36 +436,118 @@ exit_required:
437 return 1; 436 return 1;
438} 437}
439 438
439static int per_fetched_addr(struct kvm_vcpu *vcpu, unsigned long *addr)
440{
441 u8 exec_ilen = 0;
442 u16 opcode[3];
443 int rc;
444
445 if (vcpu->arch.sie_block->icptcode == ICPT_PROGI) {
446 /* PER address references the fetched or the execute instr */
447 *addr = vcpu->arch.sie_block->peraddr;
448 /*
449 * Manually detect if we have an EXECUTE instruction. As
450 * instructions are always 2 byte aligned we can read the
451 * first two bytes unconditionally
452 */
453 rc = read_guest_instr(vcpu, *addr, &opcode, 2);
454 if (rc)
455 return rc;
456 if (opcode[0] >> 8 == 0x44)
457 exec_ilen = 4;
458 if ((opcode[0] & 0xff0f) == 0xc600)
459 exec_ilen = 6;
460 } else {
461 /* instr was suppressed, calculate the responsible instr */
462 *addr = __rewind_psw(vcpu->arch.sie_block->gpsw,
463 kvm_s390_get_ilen(vcpu));
464 if (vcpu->arch.sie_block->icptstatus & 0x01) {
465 exec_ilen = (vcpu->arch.sie_block->icptstatus & 0x60) >> 4;
466 if (!exec_ilen)
467 exec_ilen = 4;
468 }
469 }
470
471 if (exec_ilen) {
472 /* read the complete EXECUTE instr to detect the fetched addr */
473 rc = read_guest_instr(vcpu, *addr, &opcode, exec_ilen);
474 if (rc)
475 return rc;
476 if (exec_ilen == 6) {
477 /* EXECUTE RELATIVE LONG - RIL-b format */
478 s32 rl = *((s32 *) (opcode + 1));
479
480 /* rl is a _signed_ 32 bit value specifying halfwords */
481 *addr += (u64)(s64) rl * 2;
482 } else {
483 /* EXECUTE - RX-a format */
484 u32 base = (opcode[1] & 0xf000) >> 12;
485 u32 disp = opcode[1] & 0x0fff;
486 u32 index = opcode[0] & 0x000f;
487
488 *addr = base ? vcpu->run->s.regs.gprs[base] : 0;
489 *addr += index ? vcpu->run->s.regs.gprs[index] : 0;
490 *addr += disp;
491 }
492 *addr = kvm_s390_logical_to_effective(vcpu, *addr);
493 }
494 return 0;
495}
496
440#define guest_per_enabled(vcpu) \ 497#define guest_per_enabled(vcpu) \
441 (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PER) 498 (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PER)
442 499
443int kvm_s390_handle_per_ifetch_icpt(struct kvm_vcpu *vcpu) 500int kvm_s390_handle_per_ifetch_icpt(struct kvm_vcpu *vcpu)
444{ 501{
502 const u64 cr10 = vcpu->arch.sie_block->gcr[10];
503 const u64 cr11 = vcpu->arch.sie_block->gcr[11];
445 const u8 ilen = kvm_s390_get_ilen(vcpu); 504 const u8 ilen = kvm_s390_get_ilen(vcpu);
446 struct kvm_s390_pgm_info pgm_info = { 505 struct kvm_s390_pgm_info pgm_info = {
447 .code = PGM_PER, 506 .code = PGM_PER,
448 .per_code = PER_CODE_IFETCH, 507 .per_code = PER_CODE_IFETCH,
449 .per_address = __rewind_psw(vcpu->arch.sie_block->gpsw, ilen), 508 .per_address = __rewind_psw(vcpu->arch.sie_block->gpsw, ilen),
450 }; 509 };
510 unsigned long fetched_addr;
511 int rc;
451 512
452 /* 513 /*
453 * The PSW points to the next instruction, therefore the intercepted 514 * The PSW points to the next instruction, therefore the intercepted
454 * instruction generated a PER i-fetch event. PER address therefore 515 * instruction generated a PER i-fetch event. PER address therefore
455 * points at the previous PSW address (could be an EXECUTE function). 516 * points at the previous PSW address (could be an EXECUTE function).
456 */ 517 */
457 return kvm_s390_inject_prog_irq(vcpu, &pgm_info); 518 if (!guestdbg_enabled(vcpu))
519 return kvm_s390_inject_prog_irq(vcpu, &pgm_info);
520
521 if (debug_exit_required(vcpu, pgm_info.per_code, pgm_info.per_address))
522 vcpu->guest_debug |= KVM_GUESTDBG_EXIT_PENDING;
523
524 if (!guest_per_enabled(vcpu) ||
525 !(vcpu->arch.sie_block->gcr[9] & PER_EVENT_IFETCH))
526 return 0;
527
528 rc = per_fetched_addr(vcpu, &fetched_addr);
529 if (rc < 0)
530 return rc;
531 if (rc)
532 /* instruction-fetching exceptions */
533 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
534
535 if (in_addr_range(fetched_addr, cr10, cr11))
536 return kvm_s390_inject_prog_irq(vcpu, &pgm_info);
537 return 0;
458} 538}
459 539
460static void filter_guest_per_event(struct kvm_vcpu *vcpu) 540static int filter_guest_per_event(struct kvm_vcpu *vcpu)
461{ 541{
462 const u8 perc = vcpu->arch.sie_block->perc; 542 const u8 perc = vcpu->arch.sie_block->perc;
463 u64 peraddr = vcpu->arch.sie_block->peraddr;
464 u64 addr = vcpu->arch.sie_block->gpsw.addr; 543 u64 addr = vcpu->arch.sie_block->gpsw.addr;
465 u64 cr9 = vcpu->arch.sie_block->gcr[9]; 544 u64 cr9 = vcpu->arch.sie_block->gcr[9];
466 u64 cr10 = vcpu->arch.sie_block->gcr[10]; 545 u64 cr10 = vcpu->arch.sie_block->gcr[10];
467 u64 cr11 = vcpu->arch.sie_block->gcr[11]; 546 u64 cr11 = vcpu->arch.sie_block->gcr[11];
468 /* filter all events, demanded by the guest */ 547 /* filter all events, demanded by the guest */
469 u8 guest_perc = perc & (cr9 >> 24) & PER_CODE_MASK; 548 u8 guest_perc = perc & (cr9 >> 24) & PER_CODE_MASK;
549 unsigned long fetched_addr;
550 int rc;
470 551
471 if (!guest_per_enabled(vcpu)) 552 if (!guest_per_enabled(vcpu))
472 guest_perc = 0; 553 guest_perc = 0;
@@ -478,9 +559,17 @@ static void filter_guest_per_event(struct kvm_vcpu *vcpu)
478 guest_perc &= ~PER_CODE_BRANCH; 559 guest_perc &= ~PER_CODE_BRANCH;
479 560
480 /* filter "instruction-fetching" events */ 561 /* filter "instruction-fetching" events */
481 if (guest_perc & PER_CODE_IFETCH && 562 if (guest_perc & PER_CODE_IFETCH) {
482 !in_addr_range(peraddr, cr10, cr11)) 563 rc = per_fetched_addr(vcpu, &fetched_addr);
483 guest_perc &= ~PER_CODE_IFETCH; 564 if (rc < 0)
565 return rc;
566 /*
567 * Don't inject an irq on exceptions. This would make handling
568 * on icpt code 8 very complex (as PSW was already rewound).
569 */
570 if (rc || !in_addr_range(fetched_addr, cr10, cr11))
571 guest_perc &= ~PER_CODE_IFETCH;
572 }
484 573
485 /* All other PER events will be given to the guest */ 574 /* All other PER events will be given to the guest */
486 /* TODO: Check altered address/address space */ 575 /* TODO: Check altered address/address space */
@@ -489,6 +578,7 @@ static void filter_guest_per_event(struct kvm_vcpu *vcpu)
489 578
490 if (!guest_perc) 579 if (!guest_perc)
491 vcpu->arch.sie_block->iprcc &= ~PGM_PER; 580 vcpu->arch.sie_block->iprcc &= ~PGM_PER;
581 return 0;
492} 582}
493 583
494#define pssec(vcpu) (vcpu->arch.sie_block->gcr[1] & _ASCE_SPACE_SWITCH) 584#define pssec(vcpu) (vcpu->arch.sie_block->gcr[1] & _ASCE_SPACE_SWITCH)
@@ -496,14 +586,17 @@ static void filter_guest_per_event(struct kvm_vcpu *vcpu)
496#define old_ssec(vcpu) ((vcpu->arch.sie_block->tecmc >> 31) & 0x1) 586#define old_ssec(vcpu) ((vcpu->arch.sie_block->tecmc >> 31) & 0x1)
497#define old_as_is_home(vcpu) !(vcpu->arch.sie_block->tecmc & 0xffff) 587#define old_as_is_home(vcpu) !(vcpu->arch.sie_block->tecmc & 0xffff)
498 588
499void kvm_s390_handle_per_event(struct kvm_vcpu *vcpu) 589int kvm_s390_handle_per_event(struct kvm_vcpu *vcpu)
500{ 590{
501 int new_as; 591 int rc, new_as;
502 592
503 if (debug_exit_required(vcpu)) 593 if (debug_exit_required(vcpu, vcpu->arch.sie_block->perc,
594 vcpu->arch.sie_block->peraddr))
504 vcpu->guest_debug |= KVM_GUESTDBG_EXIT_PENDING; 595 vcpu->guest_debug |= KVM_GUESTDBG_EXIT_PENDING;
505 596
506 filter_guest_per_event(vcpu); 597 rc = filter_guest_per_event(vcpu);
598 if (rc)
599 return rc;
507 600
508 /* 601 /*
509 * Only RP, SAC, SACF, PT, PTI, PR, PC instructions can trigger 602 * Only RP, SAC, SACF, PT, PTI, PR, PC instructions can trigger
@@ -532,4 +625,5 @@ void kvm_s390_handle_per_event(struct kvm_vcpu *vcpu)
532 (pssec(vcpu) || old_ssec(vcpu))) 625 (pssec(vcpu) || old_ssec(vcpu)))
533 vcpu->arch.sie_block->iprcc = PGM_SPACE_SWITCH; 626 vcpu->arch.sie_block->iprcc = PGM_SPACE_SWITCH;
534 } 627 }
628 return 0;
535} 629}
diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c
index 7a27eebab28a..59920f96ebc0 100644
--- a/arch/s390/kvm/intercept.c
+++ b/arch/s390/kvm/intercept.c
@@ -238,7 +238,9 @@ static int handle_prog(struct kvm_vcpu *vcpu)
238 vcpu->stat.exit_program_interruption++; 238 vcpu->stat.exit_program_interruption++;
239 239
240 if (guestdbg_enabled(vcpu) && per_event(vcpu)) { 240 if (guestdbg_enabled(vcpu) && per_event(vcpu)) {
241 kvm_s390_handle_per_event(vcpu); 241 rc = kvm_s390_handle_per_event(vcpu);
242 if (rc)
243 return rc;
242 /* the interrupt might have been filtered out completely */ 244 /* the interrupt might have been filtered out completely */
243 if (vcpu->arch.sie_block->iprcc == 0) 245 if (vcpu->arch.sie_block->iprcc == 0)
244 return 0; 246 return 0;
@@ -359,6 +361,9 @@ static int handle_partial_execution(struct kvm_vcpu *vcpu)
359 361
360static int handle_operexc(struct kvm_vcpu *vcpu) 362static int handle_operexc(struct kvm_vcpu *vcpu)
361{ 363{
364 psw_t oldpsw, newpsw;
365 int rc;
366
362 vcpu->stat.exit_operation_exception++; 367 vcpu->stat.exit_operation_exception++;
363 trace_kvm_s390_handle_operexc(vcpu, vcpu->arch.sie_block->ipa, 368 trace_kvm_s390_handle_operexc(vcpu, vcpu->arch.sie_block->ipa,
364 vcpu->arch.sie_block->ipb); 369 vcpu->arch.sie_block->ipb);
@@ -369,6 +374,24 @@ static int handle_operexc(struct kvm_vcpu *vcpu)
369 374
370 if (vcpu->arch.sie_block->ipa == 0 && vcpu->kvm->arch.user_instr0) 375 if (vcpu->arch.sie_block->ipa == 0 && vcpu->kvm->arch.user_instr0)
371 return -EOPNOTSUPP; 376 return -EOPNOTSUPP;
377 rc = read_guest_lc(vcpu, __LC_PGM_NEW_PSW, &newpsw, sizeof(psw_t));
378 if (rc)
379 return rc;
380 /*
381 * Avoid endless loops of operation exceptions, if the pgm new
382 * PSW will cause a new operation exception.
383 * The heuristic checks if the pgm new psw is within 6 bytes before
384 * the faulting psw address (with same DAT, AS settings) and the
385 * new psw is not a wait psw and the fault was not triggered by
386 * problem state.
387 */
388 oldpsw = vcpu->arch.sie_block->gpsw;
389 if (oldpsw.addr - newpsw.addr <= 6 &&
390 !(newpsw.mask & PSW_MASK_WAIT) &&
391 !(oldpsw.mask & PSW_MASK_PSTATE) &&
392 (newpsw.mask & PSW_MASK_ASC) == (oldpsw.mask & PSW_MASK_ASC) &&
393 (newpsw.mask & PSW_MASK_DAT) == (oldpsw.mask & PSW_MASK_DAT))
394 return -EOPNOTSUPP;
372 395
373 return kvm_s390_inject_program_int(vcpu, PGM_OPERATION); 396 return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
374} 397}
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index bec71e902be3..502de74ea984 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -217,7 +217,7 @@ static void allow_cpu_feat(unsigned long nr)
217static inline int plo_test_bit(unsigned char nr) 217static inline int plo_test_bit(unsigned char nr)
218{ 218{
219 register unsigned long r0 asm("0") = (unsigned long) nr | 0x100; 219 register unsigned long r0 asm("0") = (unsigned long) nr | 0x100;
220 int cc = 3; /* subfunction not available */ 220 int cc;
221 221
222 asm volatile( 222 asm volatile(
223 /* Parameter registers are ignored for "test bit" */ 223 /* Parameter registers are ignored for "test bit" */
@@ -442,6 +442,9 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
442 struct kvm_memory_slot *memslot; 442 struct kvm_memory_slot *memslot;
443 int is_dirty = 0; 443 int is_dirty = 0;
444 444
445 if (kvm_is_ucontrol(kvm))
446 return -EINVAL;
447
445 mutex_lock(&kvm->slots_lock); 448 mutex_lock(&kvm->slots_lock);
446 449
447 r = -EINVAL; 450 r = -EINVAL;
@@ -505,6 +508,14 @@ static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
505 } else if (MACHINE_HAS_VX) { 508 } else if (MACHINE_HAS_VX) {
506 set_kvm_facility(kvm->arch.model.fac_mask, 129); 509 set_kvm_facility(kvm->arch.model.fac_mask, 129);
507 set_kvm_facility(kvm->arch.model.fac_list, 129); 510 set_kvm_facility(kvm->arch.model.fac_list, 129);
511 if (test_facility(134)) {
512 set_kvm_facility(kvm->arch.model.fac_mask, 134);
513 set_kvm_facility(kvm->arch.model.fac_list, 134);
514 }
515 if (test_facility(135)) {
516 set_kvm_facility(kvm->arch.model.fac_mask, 135);
517 set_kvm_facility(kvm->arch.model.fac_list, 135);
518 }
508 r = 0; 519 r = 0;
509 } else 520 } else
510 r = -EINVAL; 521 r = -EINVAL;
@@ -821,6 +832,13 @@ static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr)
821 } 832 }
822 memcpy(kvm->arch.model.fac_list, proc->fac_list, 833 memcpy(kvm->arch.model.fac_list, proc->fac_list,
823 S390_ARCH_FAC_LIST_SIZE_BYTE); 834 S390_ARCH_FAC_LIST_SIZE_BYTE);
835 VM_EVENT(kvm, 3, "SET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx",
836 kvm->arch.model.ibc,
837 kvm->arch.model.cpuid);
838 VM_EVENT(kvm, 3, "SET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx",
839 kvm->arch.model.fac_list[0],
840 kvm->arch.model.fac_list[1],
841 kvm->arch.model.fac_list[2]);
824 } else 842 } else
825 ret = -EFAULT; 843 ret = -EFAULT;
826 kfree(proc); 844 kfree(proc);
@@ -894,6 +912,13 @@ static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr)
894 proc->ibc = kvm->arch.model.ibc; 912 proc->ibc = kvm->arch.model.ibc;
895 memcpy(&proc->fac_list, kvm->arch.model.fac_list, 913 memcpy(&proc->fac_list, kvm->arch.model.fac_list,
896 S390_ARCH_FAC_LIST_SIZE_BYTE); 914 S390_ARCH_FAC_LIST_SIZE_BYTE);
915 VM_EVENT(kvm, 3, "GET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx",
916 kvm->arch.model.ibc,
917 kvm->arch.model.cpuid);
918 VM_EVENT(kvm, 3, "GET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx",
919 kvm->arch.model.fac_list[0],
920 kvm->arch.model.fac_list[1],
921 kvm->arch.model.fac_list[2]);
897 if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc))) 922 if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc)))
898 ret = -EFAULT; 923 ret = -EFAULT;
899 kfree(proc); 924 kfree(proc);
@@ -916,7 +941,18 @@ static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
916 memcpy(&mach->fac_mask, kvm->arch.model.fac_mask, 941 memcpy(&mach->fac_mask, kvm->arch.model.fac_mask,
917 S390_ARCH_FAC_LIST_SIZE_BYTE); 942 S390_ARCH_FAC_LIST_SIZE_BYTE);
918 memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list, 943 memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list,
919 S390_ARCH_FAC_LIST_SIZE_BYTE); 944 sizeof(S390_lowcore.stfle_fac_list));
945 VM_EVENT(kvm, 3, "GET: host ibc: 0x%4.4x, host cpuid: 0x%16.16llx",
946 kvm->arch.model.ibc,
947 kvm->arch.model.cpuid);
948 VM_EVENT(kvm, 3, "GET: host facmask: 0x%16.16llx.%16.16llx.%16.16llx",
949 mach->fac_mask[0],
950 mach->fac_mask[1],
951 mach->fac_mask[2]);
952 VM_EVENT(kvm, 3, "GET: host faclist: 0x%16.16llx.%16.16llx.%16.16llx",
953 mach->fac_list[0],
954 mach->fac_list[1],
955 mach->fac_list[2]);
920 if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach))) 956 if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach)))
921 ret = -EFAULT; 957 ret = -EFAULT;
922 kfree(mach); 958 kfree(mach);
@@ -1437,7 +1473,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
1437 1473
1438 /* Populate the facility mask initially. */ 1474 /* Populate the facility mask initially. */
1439 memcpy(kvm->arch.model.fac_mask, S390_lowcore.stfle_fac_list, 1475 memcpy(kvm->arch.model.fac_mask, S390_lowcore.stfle_fac_list,
1440 S390_ARCH_FAC_LIST_SIZE_BYTE); 1476 sizeof(S390_lowcore.stfle_fac_list));
1441 for (i = 0; i < S390_ARCH_FAC_LIST_SIZE_U64; i++) { 1477 for (i = 0; i < S390_ARCH_FAC_LIST_SIZE_U64; i++) {
1442 if (i < kvm_s390_fac_list_mask_size()) 1478 if (i < kvm_s390_fac_list_mask_size())
1443 kvm->arch.model.fac_mask[i] &= kvm_s390_fac_list_mask[i]; 1479 kvm->arch.model.fac_mask[i] &= kvm_s390_fac_list_mask[i];
@@ -1938,6 +1974,8 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
1938 1974
1939 if (test_kvm_facility(vcpu->kvm, 8) && sclp.has_pfmfi) 1975 if (test_kvm_facility(vcpu->kvm, 8) && sclp.has_pfmfi)
1940 vcpu->arch.sie_block->ecb2 |= 0x08; 1976 vcpu->arch.sie_block->ecb2 |= 0x08;
1977 if (test_kvm_facility(vcpu->kvm, 130))
1978 vcpu->arch.sie_block->ecb2 |= 0x20;
1941 vcpu->arch.sie_block->eca = 0x1002000U; 1979 vcpu->arch.sie_block->eca = 0x1002000U;
1942 if (sclp.has_cei) 1980 if (sclp.has_cei)
1943 vcpu->arch.sie_block->eca |= 0x80000000U; 1981 vcpu->arch.sie_block->eca |= 0x80000000U;
@@ -2578,7 +2616,7 @@ static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu)
2578 * to look up the current opcode to get the length of the instruction 2616 * to look up the current opcode to get the length of the instruction
2579 * to be able to forward the PSW. 2617 * to be able to forward the PSW.
2580 */ 2618 */
2581 rc = read_guest_instr(vcpu, &opcode, 1); 2619 rc = read_guest_instr(vcpu, vcpu->arch.sie_block->gpsw.addr, &opcode, 1);
2582 ilen = insn_length(opcode); 2620 ilen = insn_length(opcode);
2583 if (rc < 0) { 2621 if (rc < 0) {
2584 return rc; 2622 return rc;
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
index 3a4e97f1a9e6..af9fa91a0c91 100644
--- a/arch/s390/kvm/kvm-s390.h
+++ b/arch/s390/kvm/kvm-s390.h
@@ -86,9 +86,7 @@ static inline void kvm_s390_set_prefix(struct kvm_vcpu *vcpu, u32 prefix)
86 kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu); 86 kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
87} 87}
88 88
89typedef u8 __bitwise ar_t; 89static inline u64 kvm_s390_get_base_disp_s(struct kvm_vcpu *vcpu, u8 *ar)
90
91static inline u64 kvm_s390_get_base_disp_s(struct kvm_vcpu *vcpu, ar_t *ar)
92{ 90{
93 u32 base2 = vcpu->arch.sie_block->ipb >> 28; 91 u32 base2 = vcpu->arch.sie_block->ipb >> 28;
94 u32 disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16); 92 u32 disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16);
@@ -101,7 +99,7 @@ static inline u64 kvm_s390_get_base_disp_s(struct kvm_vcpu *vcpu, ar_t *ar)
101 99
102static inline void kvm_s390_get_base_disp_sse(struct kvm_vcpu *vcpu, 100static inline void kvm_s390_get_base_disp_sse(struct kvm_vcpu *vcpu,
103 u64 *address1, u64 *address2, 101 u64 *address1, u64 *address2,
104 ar_t *ar_b1, ar_t *ar_b2) 102 u8 *ar_b1, u8 *ar_b2)
105{ 103{
106 u32 base1 = (vcpu->arch.sie_block->ipb & 0xf0000000) >> 28; 104 u32 base1 = (vcpu->arch.sie_block->ipb & 0xf0000000) >> 28;
107 u32 disp1 = (vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16; 105 u32 disp1 = (vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16;
@@ -125,7 +123,7 @@ static inline void kvm_s390_get_regs_rre(struct kvm_vcpu *vcpu, int *r1, int *r2
125 *r2 = (vcpu->arch.sie_block->ipb & 0x000f0000) >> 16; 123 *r2 = (vcpu->arch.sie_block->ipb & 0x000f0000) >> 16;
126} 124}
127 125
128static inline u64 kvm_s390_get_base_disp_rsy(struct kvm_vcpu *vcpu, ar_t *ar) 126static inline u64 kvm_s390_get_base_disp_rsy(struct kvm_vcpu *vcpu, u8 *ar)
129{ 127{
130 u32 base2 = vcpu->arch.sie_block->ipb >> 28; 128 u32 base2 = vcpu->arch.sie_block->ipb >> 28;
131 u32 disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16) + 129 u32 disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16) +
@@ -140,7 +138,7 @@ static inline u64 kvm_s390_get_base_disp_rsy(struct kvm_vcpu *vcpu, ar_t *ar)
140 return (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + (long)(int)disp2; 138 return (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + (long)(int)disp2;
141} 139}
142 140
143static inline u64 kvm_s390_get_base_disp_rs(struct kvm_vcpu *vcpu, ar_t *ar) 141static inline u64 kvm_s390_get_base_disp_rs(struct kvm_vcpu *vcpu, u8 *ar)
144{ 142{
145 u32 base2 = vcpu->arch.sie_block->ipb >> 28; 143 u32 base2 = vcpu->arch.sie_block->ipb >> 28;
146 u32 disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16); 144 u32 disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16);
@@ -379,7 +377,7 @@ int kvm_s390_import_bp_data(struct kvm_vcpu *vcpu,
379void kvm_s390_clear_bp_data(struct kvm_vcpu *vcpu); 377void kvm_s390_clear_bp_data(struct kvm_vcpu *vcpu);
380void kvm_s390_prepare_debug_exit(struct kvm_vcpu *vcpu); 378void kvm_s390_prepare_debug_exit(struct kvm_vcpu *vcpu);
381int kvm_s390_handle_per_ifetch_icpt(struct kvm_vcpu *vcpu); 379int kvm_s390_handle_per_ifetch_icpt(struct kvm_vcpu *vcpu);
382void kvm_s390_handle_per_event(struct kvm_vcpu *vcpu); 380int kvm_s390_handle_per_event(struct kvm_vcpu *vcpu);
383 381
384/* support for Basic/Extended SCA handling */ 382/* support for Basic/Extended SCA handling */
385static inline union ipte_control *kvm_s390_get_ipte_control(struct kvm *kvm) 383static inline union ipte_control *kvm_s390_get_ipte_control(struct kvm *kvm)
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
index e18435355c16..1ecc1cffdf7c 100644
--- a/arch/s390/kvm/priv.c
+++ b/arch/s390/kvm/priv.c
@@ -54,7 +54,7 @@ int kvm_s390_handle_aa(struct kvm_vcpu *vcpu)
54static int handle_set_clock(struct kvm_vcpu *vcpu) 54static int handle_set_clock(struct kvm_vcpu *vcpu)
55{ 55{
56 int rc; 56 int rc;
57 ar_t ar; 57 u8 ar;
58 u64 op2, val; 58 u64 op2, val;
59 59
60 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 60 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
@@ -79,7 +79,7 @@ static int handle_set_prefix(struct kvm_vcpu *vcpu)
79 u64 operand2; 79 u64 operand2;
80 u32 address; 80 u32 address;
81 int rc; 81 int rc;
82 ar_t ar; 82 u8 ar;
83 83
84 vcpu->stat.instruction_spx++; 84 vcpu->stat.instruction_spx++;
85 85
@@ -117,7 +117,7 @@ static int handle_store_prefix(struct kvm_vcpu *vcpu)
117 u64 operand2; 117 u64 operand2;
118 u32 address; 118 u32 address;
119 int rc; 119 int rc;
120 ar_t ar; 120 u8 ar;
121 121
122 vcpu->stat.instruction_stpx++; 122 vcpu->stat.instruction_stpx++;
123 123
@@ -147,7 +147,7 @@ static int handle_store_cpu_address(struct kvm_vcpu *vcpu)
147 u16 vcpu_id = vcpu->vcpu_id; 147 u16 vcpu_id = vcpu->vcpu_id;
148 u64 ga; 148 u64 ga;
149 int rc; 149 int rc;
150 ar_t ar; 150 u8 ar;
151 151
152 vcpu->stat.instruction_stap++; 152 vcpu->stat.instruction_stap++;
153 153
@@ -380,7 +380,7 @@ static int handle_tpi(struct kvm_vcpu *vcpu)
380 u32 tpi_data[3]; 380 u32 tpi_data[3];
381 int rc; 381 int rc;
382 u64 addr; 382 u64 addr;
383 ar_t ar; 383 u8 ar;
384 384
385 addr = kvm_s390_get_base_disp_s(vcpu, &ar); 385 addr = kvm_s390_get_base_disp_s(vcpu, &ar);
386 if (addr & 3) 386 if (addr & 3)
@@ -548,7 +548,7 @@ int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu)
548 psw_compat_t new_psw; 548 psw_compat_t new_psw;
549 u64 addr; 549 u64 addr;
550 int rc; 550 int rc;
551 ar_t ar; 551 u8 ar;
552 552
553 if (gpsw->mask & PSW_MASK_PSTATE) 553 if (gpsw->mask & PSW_MASK_PSTATE)
554 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 554 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
@@ -575,7 +575,7 @@ static int handle_lpswe(struct kvm_vcpu *vcpu)
575 psw_t new_psw; 575 psw_t new_psw;
576 u64 addr; 576 u64 addr;
577 int rc; 577 int rc;
578 ar_t ar; 578 u8 ar;
579 579
580 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 580 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
581 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 581 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
@@ -597,7 +597,7 @@ static int handle_stidp(struct kvm_vcpu *vcpu)
597 u64 stidp_data = vcpu->kvm->arch.model.cpuid; 597 u64 stidp_data = vcpu->kvm->arch.model.cpuid;
598 u64 operand2; 598 u64 operand2;
599 int rc; 599 int rc;
600 ar_t ar; 600 u8 ar;
601 601
602 vcpu->stat.instruction_stidp++; 602 vcpu->stat.instruction_stidp++;
603 603
@@ -644,7 +644,7 @@ static void handle_stsi_3_2_2(struct kvm_vcpu *vcpu, struct sysinfo_3_2_2 *mem)
644 ASCEBC(mem->vm[0].cpi, 16); 644 ASCEBC(mem->vm[0].cpi, 16);
645} 645}
646 646
647static void insert_stsi_usr_data(struct kvm_vcpu *vcpu, u64 addr, ar_t ar, 647static void insert_stsi_usr_data(struct kvm_vcpu *vcpu, u64 addr, u8 ar,
648 u8 fc, u8 sel1, u16 sel2) 648 u8 fc, u8 sel1, u16 sel2)
649{ 649{
650 vcpu->run->exit_reason = KVM_EXIT_S390_STSI; 650 vcpu->run->exit_reason = KVM_EXIT_S390_STSI;
@@ -663,7 +663,7 @@ static int handle_stsi(struct kvm_vcpu *vcpu)
663 unsigned long mem = 0; 663 unsigned long mem = 0;
664 u64 operand2; 664 u64 operand2;
665 int rc = 0; 665 int rc = 0;
666 ar_t ar; 666 u8 ar;
667 667
668 vcpu->stat.instruction_stsi++; 668 vcpu->stat.instruction_stsi++;
669 VCPU_EVENT(vcpu, 3, "STSI: fc: %u sel1: %u sel2: %u", fc, sel1, sel2); 669 VCPU_EVENT(vcpu, 3, "STSI: fc: %u sel1: %u sel2: %u", fc, sel1, sel2);
@@ -970,7 +970,7 @@ int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu)
970 int reg, rc, nr_regs; 970 int reg, rc, nr_regs;
971 u32 ctl_array[16]; 971 u32 ctl_array[16];
972 u64 ga; 972 u64 ga;
973 ar_t ar; 973 u8 ar;
974 974
975 vcpu->stat.instruction_lctl++; 975 vcpu->stat.instruction_lctl++;
976 976
@@ -1009,7 +1009,7 @@ int kvm_s390_handle_stctl(struct kvm_vcpu *vcpu)
1009 int reg, rc, nr_regs; 1009 int reg, rc, nr_regs;
1010 u32 ctl_array[16]; 1010 u32 ctl_array[16];
1011 u64 ga; 1011 u64 ga;
1012 ar_t ar; 1012 u8 ar;
1013 1013
1014 vcpu->stat.instruction_stctl++; 1014 vcpu->stat.instruction_stctl++;
1015 1015
@@ -1043,7 +1043,7 @@ static int handle_lctlg(struct kvm_vcpu *vcpu)
1043 int reg, rc, nr_regs; 1043 int reg, rc, nr_regs;
1044 u64 ctl_array[16]; 1044 u64 ctl_array[16];
1045 u64 ga; 1045 u64 ga;
1046 ar_t ar; 1046 u8 ar;
1047 1047
1048 vcpu->stat.instruction_lctlg++; 1048 vcpu->stat.instruction_lctlg++;
1049 1049
@@ -1081,7 +1081,7 @@ static int handle_stctg(struct kvm_vcpu *vcpu)
1081 int reg, rc, nr_regs; 1081 int reg, rc, nr_regs;
1082 u64 ctl_array[16]; 1082 u64 ctl_array[16];
1083 u64 ga; 1083 u64 ga;
1084 ar_t ar; 1084 u8 ar;
1085 1085
1086 vcpu->stat.instruction_stctg++; 1086 vcpu->stat.instruction_stctg++;
1087 1087
@@ -1132,7 +1132,7 @@ static int handle_tprot(struct kvm_vcpu *vcpu)
1132 unsigned long hva, gpa; 1132 unsigned long hva, gpa;
1133 int ret = 0, cc = 0; 1133 int ret = 0, cc = 0;
1134 bool writable; 1134 bool writable;
1135 ar_t ar; 1135 u8 ar;
1136 1136
1137 vcpu->stat.instruction_tprot++; 1137 vcpu->stat.instruction_tprot++;
1138 1138
diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c
index d8673e243f13..ed62c6d57d93 100644
--- a/arch/s390/kvm/vsie.c
+++ b/arch/s390/kvm/vsie.c
@@ -324,6 +324,9 @@ static int shadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
324 /* Run-time-Instrumentation */ 324 /* Run-time-Instrumentation */
325 if (test_kvm_facility(vcpu->kvm, 64)) 325 if (test_kvm_facility(vcpu->kvm, 64))
326 scb_s->ecb3 |= scb_o->ecb3 & 0x01U; 326 scb_s->ecb3 |= scb_o->ecb3 & 0x01U;
327 /* Instruction Execution Prevention */
328 if (test_kvm_facility(vcpu->kvm, 130))
329 scb_s->ecb2 |= scb_o->ecb2 & 0x20U;
327 if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_SIIF)) 330 if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_SIIF))
328 scb_s->eca |= scb_o->eca & 0x00000001U; 331 scb_s->eca |= scb_o->eca & 0x00000001U;
329 if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_IB)) 332 if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_IB))
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index 7a1897c51c54..f70db837ddc4 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -741,7 +741,7 @@ int reset_guest_reference_bit(struct mm_struct *mm, unsigned long addr)
741 741
742 pgste_set_unlock(ptep, new); 742 pgste_set_unlock(ptep, new);
743 pte_unmap_unlock(ptep, ptl); 743 pte_unmap_unlock(ptep, ptl);
744 return 0; 744 return cc;
745} 745}
746EXPORT_SYMBOL(reset_guest_reference_bit); 746EXPORT_SYMBOL(reset_guest_reference_bit);
747 747
diff --git a/arch/s390/tools/gen_facilities.c b/arch/s390/tools/gen_facilities.c
index 8cc53b1e6d03..0cf802de52a1 100644
--- a/arch/s390/tools/gen_facilities.c
+++ b/arch/s390/tools/gen_facilities.c
@@ -80,6 +80,8 @@ static struct facility_def facility_defs[] = {
80 76, /* msa extension 3 */ 80 76, /* msa extension 3 */
81 77, /* msa extension 4 */ 81 77, /* msa extension 4 */
82 78, /* enhanced-DAT 2 */ 82 78, /* enhanced-DAT 2 */
83 130, /* instruction-execution-protection */
84 131, /* enhanced-SOP 2 and side-effect */
83 -1 /* END */ 85 -1 /* END */
84 } 86 }
85 }, 87 },