aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/virtual/kvm/api.txt132
-rw-r--r--arch/s390/include/asm/kvm_host.h2
-rw-r--r--arch/s390/kvm/diag.c4
-rw-r--r--arch/s390/kvm/gaccess.c294
-rw-r--r--arch/s390/kvm/gaccess.h21
-rw-r--r--arch/s390/kvm/intercept.c4
-rw-r--r--arch/s390/kvm/kvm-s390.c238
-rw-r--r--arch/s390/kvm/kvm-s390.h38
-rw-r--r--arch/s390/kvm/priv.c93
-rw-r--r--arch/s390/kvm/sigp.c4
-rw-r--r--include/uapi/linux/kvm.h46
11 files changed, 752 insertions, 124 deletions
diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
index ee47998ec368..0d7fc66289a0 100644
--- a/Documentation/virtual/kvm/api.txt
+++ b/Documentation/virtual/kvm/api.txt
@@ -2716,6 +2716,110 @@ The fields in each entry are defined as follows:
2716 eax, ebx, ecx, edx: the values returned by the cpuid instruction for 2716 eax, ebx, ecx, edx: the values returned by the cpuid instruction for
2717 this function/index combination 2717 this function/index combination
2718 2718
27194.89 KVM_S390_MEM_OP
2720
2721Capability: KVM_CAP_S390_MEM_OP
2722Architectures: s390
2723Type: vcpu ioctl
2724Parameters: struct kvm_s390_mem_op (in)
2725Returns: = 0 on success,
2726 < 0 on generic error (e.g. -EFAULT or -ENOMEM),
2727 > 0 if an exception occurred while walking the page tables
2728
2729Read or write data from/to the logical (virtual) memory of a VPCU.
2730
2731Parameters are specified via the following structure:
2732
2733struct kvm_s390_mem_op {
2734 __u64 gaddr; /* the guest address */
2735 __u64 flags; /* flags */
2736 __u32 size; /* amount of bytes */
2737 __u32 op; /* type of operation */
2738 __u64 buf; /* buffer in userspace */
2739 __u8 ar; /* the access register number */
2740 __u8 reserved[31]; /* should be set to 0 */
2741};
2742
2743The type of operation is specified in the "op" field. It is either
2744KVM_S390_MEMOP_LOGICAL_READ for reading from logical memory space or
2745KVM_S390_MEMOP_LOGICAL_WRITE for writing to logical memory space. The
2746KVM_S390_MEMOP_F_CHECK_ONLY flag can be set in the "flags" field to check
2747whether the corresponding memory access would create an access exception
2748(without touching the data in the memory at the destination). In case an
2749access exception occurred while walking the MMU tables of the guest, the
2750ioctl returns a positive error number to indicate the type of exception.
2751This exception is also raised directly at the corresponding VCPU if the
2752flag KVM_S390_MEMOP_F_INJECT_EXCEPTION is set in the "flags" field.
2753
2754The start address of the memory region has to be specified in the "gaddr"
2755field, and the length of the region in the "size" field. "buf" is the buffer
2756supplied by the userspace application where the read data should be written
2757to for KVM_S390_MEMOP_LOGICAL_READ, or where the data that should be written
2758is stored for a KVM_S390_MEMOP_LOGICAL_WRITE. "buf" is unused and can be NULL
2759when KVM_S390_MEMOP_F_CHECK_ONLY is specified. "ar" designates the access
2760register number to be used.
2761
2762The "reserved" field is meant for future extensions. It is not used by
2763KVM with the currently defined set of flags.
2764
27654.90 KVM_S390_GET_SKEYS
2766
2767Capability: KVM_CAP_S390_SKEYS
2768Architectures: s390
2769Type: vm ioctl
2770Parameters: struct kvm_s390_skeys
2771Returns: 0 on success, KVM_S390_GET_KEYS_NONE if guest is not using storage
2772 keys, negative value on error
2773
2774This ioctl is used to get guest storage key values on the s390
2775architecture. The ioctl takes parameters via the kvm_s390_skeys struct.
2776
2777struct kvm_s390_skeys {
2778 __u64 start_gfn;
2779 __u64 count;
2780 __u64 skeydata_addr;
2781 __u32 flags;
2782 __u32 reserved[9];
2783};
2784
2785The start_gfn field is the number of the first guest frame whose storage keys
2786you want to get.
2787
2788The count field is the number of consecutive frames (starting from start_gfn)
2789whose storage keys to get. The count field must be at least 1 and the maximum
2790allowed value is defined as KVM_S390_SKEYS_ALLOC_MAX. Values outside this range
2791will cause the ioctl to return -EINVAL.
2792
2793The skeydata_addr field is the address to a buffer large enough to hold count
2794bytes. This buffer will be filled with storage key data by the ioctl.
2795
27964.91 KVM_S390_SET_SKEYS
2797
2798Capability: KVM_CAP_S390_SKEYS
2799Architectures: s390
2800Type: vm ioctl
2801Parameters: struct kvm_s390_skeys
2802Returns: 0 on success, negative value on error
2803
2804This ioctl is used to set guest storage key values on the s390
2805architecture. The ioctl takes parameters via the kvm_s390_skeys struct.
2806See section on KVM_S390_GET_SKEYS for struct definition.
2807
2808The start_gfn field is the number of the first guest frame whose storage keys
2809you want to set.
2810
2811The count field is the number of consecutive frames (starting from start_gfn)
2812whose storage keys to get. The count field must be at least 1 and the maximum
2813allowed value is defined as KVM_S390_SKEYS_ALLOC_MAX. Values outside this range
2814will cause the ioctl to return -EINVAL.
2815
2816The skeydata_addr field is the address to a buffer containing count bytes of
2817storage keys. Each byte in the buffer will be set as the storage key for a
2818single frame starting at start_gfn for count frames.
2819
2820Note: If any architecturally invalid key value is found in the given data then
2821the ioctl will return -EINVAL.
2822
27195. The kvm_run structure 28235. The kvm_run structure
2720------------------------ 2824------------------------
2721 2825
@@ -3258,3 +3362,31 @@ Returns: 0 on success, negative value on error
3258Allows use of the vector registers introduced with z13 processor, and 3362Allows use of the vector registers introduced with z13 processor, and
3259provides for the synchronization between host and user space. Will 3363provides for the synchronization between host and user space. Will
3260return -EINVAL if the machine does not support vectors. 3364return -EINVAL if the machine does not support vectors.
3365
33667.4 KVM_CAP_S390_USER_STSI
3367
3368Architectures: s390
3369Parameters: none
3370
3371This capability allows post-handlers for the STSI instruction. After
3372initial handling in the kernel, KVM exits to user space with
3373KVM_EXIT_S390_STSI to allow user space to insert further data.
3374
3375Before exiting to userspace, kvm handlers should fill in s390_stsi field of
3376vcpu->run:
3377struct {
3378 __u64 addr;
3379 __u8 ar;
3380 __u8 reserved;
3381 __u8 fc;
3382 __u8 sel1;
3383 __u16 sel2;
3384} s390_stsi;
3385
3386@addr - guest address of STSI SYSIB
3387@fc - function code
3388@sel1 - selector 1
3389@sel2 - selector 2
3390@ar - access register number
3391
3392KVM handlers should exit to userspace with rc = -EREMOTE.
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index 347a3333d618..b8d1e97fb201 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -562,9 +562,9 @@ struct kvm_arch{
562 int css_support; 562 int css_support;
563 int use_irqchip; 563 int use_irqchip;
564 int use_cmma; 564 int use_cmma;
565 int use_vectors;
566 int user_cpu_state_ctrl; 565 int user_cpu_state_ctrl;
567 int user_sigp; 566 int user_sigp;
567 int user_stsi;
568 struct s390_io_adapter *adapters[MAX_S390_IO_ADAPTERS]; 568 struct s390_io_adapter *adapters[MAX_S390_IO_ADAPTERS];
569 wait_queue_head_t ipte_wq; 569 wait_queue_head_t ipte_wq;
570 int ipte_lock_count; 570 int ipte_lock_count;
diff --git a/arch/s390/kvm/diag.c b/arch/s390/kvm/diag.c
index 9254afff250c..89140ddb998c 100644
--- a/arch/s390/kvm/diag.c
+++ b/arch/s390/kvm/diag.c
@@ -77,7 +77,7 @@ static int __diag_page_ref_service(struct kvm_vcpu *vcpu)
77 77
78 if (vcpu->run->s.regs.gprs[rx] & 7) 78 if (vcpu->run->s.regs.gprs[rx] & 7)
79 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 79 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
80 rc = read_guest(vcpu, vcpu->run->s.regs.gprs[rx], &parm, sizeof(parm)); 80 rc = read_guest(vcpu, vcpu->run->s.regs.gprs[rx], rx, &parm, sizeof(parm));
81 if (rc) 81 if (rc)
82 return kvm_s390_inject_prog_cond(vcpu, rc); 82 return kvm_s390_inject_prog_cond(vcpu, rc);
83 if (parm.parm_version != 2 || parm.parm_len < 5 || parm.code != 0x258) 83 if (parm.parm_version != 2 || parm.parm_len < 5 || parm.code != 0x258)
@@ -230,7 +230,7 @@ static int __diag_virtio_hypercall(struct kvm_vcpu *vcpu)
230 230
231int kvm_s390_handle_diag(struct kvm_vcpu *vcpu) 231int kvm_s390_handle_diag(struct kvm_vcpu *vcpu)
232{ 232{
233 int code = kvm_s390_get_base_disp_rs(vcpu) & 0xffff; 233 int code = kvm_s390_get_base_disp_rs(vcpu, NULL) & 0xffff;
234 234
235 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 235 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
236 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 236 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
diff --git a/arch/s390/kvm/gaccess.c b/arch/s390/kvm/gaccess.c
index 633fe9bd75a9..a7559f7207df 100644
--- a/arch/s390/kvm/gaccess.c
+++ b/arch/s390/kvm/gaccess.c
@@ -10,6 +10,7 @@
10#include <asm/pgtable.h> 10#include <asm/pgtable.h>
11#include "kvm-s390.h" 11#include "kvm-s390.h"
12#include "gaccess.h" 12#include "gaccess.h"
13#include <asm/switch_to.h>
13 14
14union asce { 15union asce {
15 unsigned long val; 16 unsigned long val;
@@ -207,6 +208,54 @@ union raddress {
207 unsigned long pfra : 52; /* Page-Frame Real Address */ 208 unsigned long pfra : 52; /* Page-Frame Real Address */
208}; 209};
209 210
211union alet {
212 u32 val;
213 struct {
214 u32 reserved : 7;
215 u32 p : 1;
216 u32 alesn : 8;
217 u32 alen : 16;
218 };
219};
220
221union ald {
222 u32 val;
223 struct {
224 u32 : 1;
225 u32 alo : 24;
226 u32 all : 7;
227 };
228};
229
230struct ale {
231 unsigned long i : 1; /* ALEN-Invalid Bit */
232 unsigned long : 5;
233 unsigned long fo : 1; /* Fetch-Only Bit */
234 unsigned long p : 1; /* Private Bit */
235 unsigned long alesn : 8; /* Access-List-Entry Sequence Number */
236 unsigned long aleax : 16; /* Access-List-Entry Authorization Index */
237 unsigned long : 32;
238 unsigned long : 1;
239 unsigned long asteo : 25; /* ASN-Second-Table-Entry Origin */
240 unsigned long : 6;
241 unsigned long astesn : 32; /* ASTE Sequence Number */
242} __packed;
243
244struct aste {
245 unsigned long i : 1; /* ASX-Invalid Bit */
246 unsigned long ato : 29; /* Authority-Table Origin */
247 unsigned long : 1;
248 unsigned long b : 1; /* Base-Space Bit */
249 unsigned long ax : 16; /* Authorization Index */
250 unsigned long atl : 12; /* Authority-Table Length */
251 unsigned long : 2;
252 unsigned long ca : 1; /* Controlled-ASN Bit */
253 unsigned long ra : 1; /* Reusable-ASN Bit */
254 unsigned long asce : 64; /* Address-Space-Control Element */
255 unsigned long ald : 32;
256 unsigned long astesn : 32;
257 /* .. more fields there */
258} __packed;
210 259
211int ipte_lock_held(struct kvm_vcpu *vcpu) 260int ipte_lock_held(struct kvm_vcpu *vcpu)
212{ 261{
@@ -307,15 +356,157 @@ void ipte_unlock(struct kvm_vcpu *vcpu)
307 ipte_unlock_simple(vcpu); 356 ipte_unlock_simple(vcpu);
308} 357}
309 358
310static unsigned long get_vcpu_asce(struct kvm_vcpu *vcpu) 359static int ar_translation(struct kvm_vcpu *vcpu, union asce *asce, ar_t ar,
360 int write)
361{
362 union alet alet;
363 struct ale ale;
364 struct aste aste;
365 unsigned long ald_addr, authority_table_addr;
366 union ald ald;
367 int eax, rc;
368 u8 authority_table;
369
370 if (ar >= NUM_ACRS)
371 return -EINVAL;
372
373 save_access_regs(vcpu->run->s.regs.acrs);
374 alet.val = vcpu->run->s.regs.acrs[ar];
375
376 if (ar == 0 || alet.val == 0) {
377 asce->val = vcpu->arch.sie_block->gcr[1];
378 return 0;
379 } else if (alet.val == 1) {
380 asce->val = vcpu->arch.sie_block->gcr[7];
381 return 0;
382 }
383
384 if (alet.reserved)
385 return PGM_ALET_SPECIFICATION;
386
387 if (alet.p)
388 ald_addr = vcpu->arch.sie_block->gcr[5];
389 else
390 ald_addr = vcpu->arch.sie_block->gcr[2];
391 ald_addr &= 0x7fffffc0;
392
393 rc = read_guest_real(vcpu, ald_addr + 16, &ald.val, sizeof(union ald));
394 if (rc)
395 return rc;
396
397 if (alet.alen / 8 > ald.all)
398 return PGM_ALEN_TRANSLATION;
399
400 if (0x7fffffff - ald.alo * 128 < alet.alen * 16)
401 return PGM_ADDRESSING;
402
403 rc = read_guest_real(vcpu, ald.alo * 128 + alet.alen * 16, &ale,
404 sizeof(struct ale));
405 if (rc)
406 return rc;
407
408 if (ale.i == 1)
409 return PGM_ALEN_TRANSLATION;
410 if (ale.alesn != alet.alesn)
411 return PGM_ALE_SEQUENCE;
412
413 rc = read_guest_real(vcpu, ale.asteo * 64, &aste, sizeof(struct aste));
414 if (rc)
415 return rc;
416
417 if (aste.i)
418 return PGM_ASTE_VALIDITY;
419 if (aste.astesn != ale.astesn)
420 return PGM_ASTE_SEQUENCE;
421
422 if (ale.p == 1) {
423 eax = (vcpu->arch.sie_block->gcr[8] >> 16) & 0xffff;
424 if (ale.aleax != eax) {
425 if (eax / 16 > aste.atl)
426 return PGM_EXTENDED_AUTHORITY;
427
428 authority_table_addr = aste.ato * 4 + eax / 4;
429
430 rc = read_guest_real(vcpu, authority_table_addr,
431 &authority_table,
432 sizeof(u8));
433 if (rc)
434 return rc;
435
436 if ((authority_table & (0x40 >> ((eax & 3) * 2))) == 0)
437 return PGM_EXTENDED_AUTHORITY;
438 }
439 }
440
441 if (ale.fo == 1 && write)
442 return PGM_PROTECTION;
443
444 asce->val = aste.asce;
445 return 0;
446}
447
448struct trans_exc_code_bits {
449 unsigned long addr : 52; /* Translation-exception Address */
450 unsigned long fsi : 2; /* Access Exception Fetch/Store Indication */
451 unsigned long : 6;
452 unsigned long b60 : 1;
453 unsigned long b61 : 1;
454 unsigned long as : 2; /* ASCE Identifier */
455};
456
457enum {
458 FSI_UNKNOWN = 0, /* Unknown wether fetch or store */
459 FSI_STORE = 1, /* Exception was due to store operation */
460 FSI_FETCH = 2 /* Exception was due to fetch operation */
461};
462
463static int get_vcpu_asce(struct kvm_vcpu *vcpu, union asce *asce,
464 ar_t ar, int write)
311{ 465{
466 int rc;
467 psw_t *psw = &vcpu->arch.sie_block->gpsw;
468 struct kvm_s390_pgm_info *pgm = &vcpu->arch.pgm;
469 struct trans_exc_code_bits *tec_bits;
470
471 memset(pgm, 0, sizeof(*pgm));
472 tec_bits = (struct trans_exc_code_bits *)&pgm->trans_exc_code;
473 tec_bits->fsi = write ? FSI_STORE : FSI_FETCH;
474 tec_bits->as = psw_bits(*psw).as;
475
476 if (!psw_bits(*psw).t) {
477 asce->val = 0;
478 asce->r = 1;
479 return 0;
480 }
481
312 switch (psw_bits(vcpu->arch.sie_block->gpsw).as) { 482 switch (psw_bits(vcpu->arch.sie_block->gpsw).as) {
313 case PSW_AS_PRIMARY: 483 case PSW_AS_PRIMARY:
314 return vcpu->arch.sie_block->gcr[1]; 484 asce->val = vcpu->arch.sie_block->gcr[1];
485 return 0;
315 case PSW_AS_SECONDARY: 486 case PSW_AS_SECONDARY:
316 return vcpu->arch.sie_block->gcr[7]; 487 asce->val = vcpu->arch.sie_block->gcr[7];
488 return 0;
317 case PSW_AS_HOME: 489 case PSW_AS_HOME:
318 return vcpu->arch.sie_block->gcr[13]; 490 asce->val = vcpu->arch.sie_block->gcr[13];
491 return 0;
492 case PSW_AS_ACCREG:
493 rc = ar_translation(vcpu, asce, ar, write);
494 switch (rc) {
495 case PGM_ALEN_TRANSLATION:
496 case PGM_ALE_SEQUENCE:
497 case PGM_ASTE_VALIDITY:
498 case PGM_ASTE_SEQUENCE:
499 case PGM_EXTENDED_AUTHORITY:
500 vcpu->arch.pgm.exc_access_id = ar;
501 break;
502 case PGM_PROTECTION:
503 tec_bits->b60 = 1;
504 tec_bits->b61 = 1;
505 break;
506 }
507 if (rc > 0)
508 pgm->code = rc;
509 return rc;
319 } 510 }
320 return 0; 511 return 0;
321} 512}
@@ -330,6 +521,7 @@ static int deref_table(struct kvm *kvm, unsigned long gpa, unsigned long *val)
330 * @vcpu: virtual cpu 521 * @vcpu: virtual cpu
331 * @gva: guest virtual address 522 * @gva: guest virtual address
332 * @gpa: points to where guest physical (absolute) address should be stored 523 * @gpa: points to where guest physical (absolute) address should be stored
524 * @asce: effective asce
333 * @write: indicates if access is a write access 525 * @write: indicates if access is a write access
334 * 526 *
335 * Translate a guest virtual address into a guest absolute address by means 527 * Translate a guest virtual address into a guest absolute address by means
@@ -345,7 +537,8 @@ static int deref_table(struct kvm *kvm, unsigned long gpa, unsigned long *val)
345 * by the architecture 537 * by the architecture
346 */ 538 */
347static unsigned long guest_translate(struct kvm_vcpu *vcpu, unsigned long gva, 539static unsigned long guest_translate(struct kvm_vcpu *vcpu, unsigned long gva,
348 unsigned long *gpa, int write) 540 unsigned long *gpa, const union asce asce,
541 int write)
349{ 542{
350 union vaddress vaddr = {.addr = gva}; 543 union vaddress vaddr = {.addr = gva};
351 union raddress raddr = {.addr = gva}; 544 union raddress raddr = {.addr = gva};
@@ -354,12 +547,10 @@ static unsigned long guest_translate(struct kvm_vcpu *vcpu, unsigned long gva,
354 union ctlreg0 ctlreg0; 547 union ctlreg0 ctlreg0;
355 unsigned long ptr; 548 unsigned long ptr;
356 int edat1, edat2; 549 int edat1, edat2;
357 union asce asce;
358 550
359 ctlreg0.val = vcpu->arch.sie_block->gcr[0]; 551 ctlreg0.val = vcpu->arch.sie_block->gcr[0];
360 edat1 = ctlreg0.edat && test_kvm_facility(vcpu->kvm, 8); 552 edat1 = ctlreg0.edat && test_kvm_facility(vcpu->kvm, 8);
361 edat2 = edat1 && test_kvm_facility(vcpu->kvm, 78); 553 edat2 = edat1 && test_kvm_facility(vcpu->kvm, 78);
362 asce.val = get_vcpu_asce(vcpu);
363 if (asce.r) 554 if (asce.r)
364 goto real_address; 555 goto real_address;
365 ptr = asce.origin * 4096; 556 ptr = asce.origin * 4096;
@@ -506,48 +697,30 @@ static inline int is_low_address(unsigned long ga)
506 return (ga & ~0x11fful) == 0; 697 return (ga & ~0x11fful) == 0;
507} 698}
508 699
509static int low_address_protection_enabled(struct kvm_vcpu *vcpu) 700static int low_address_protection_enabled(struct kvm_vcpu *vcpu,
701 const union asce asce)
510{ 702{
511 union ctlreg0 ctlreg0 = {.val = vcpu->arch.sie_block->gcr[0]}; 703 union ctlreg0 ctlreg0 = {.val = vcpu->arch.sie_block->gcr[0]};
512 psw_t *psw = &vcpu->arch.sie_block->gpsw; 704 psw_t *psw = &vcpu->arch.sie_block->gpsw;
513 union asce asce;
514 705
515 if (!ctlreg0.lap) 706 if (!ctlreg0.lap)
516 return 0; 707 return 0;
517 asce.val = get_vcpu_asce(vcpu);
518 if (psw_bits(*psw).t && asce.p) 708 if (psw_bits(*psw).t && asce.p)
519 return 0; 709 return 0;
520 return 1; 710 return 1;
521} 711}
522 712
523struct trans_exc_code_bits {
524 unsigned long addr : 52; /* Translation-exception Address */
525 unsigned long fsi : 2; /* Access Exception Fetch/Store Indication */
526 unsigned long : 7;
527 unsigned long b61 : 1;
528 unsigned long as : 2; /* ASCE Identifier */
529};
530
531enum {
532 FSI_UNKNOWN = 0, /* Unknown wether fetch or store */
533 FSI_STORE = 1, /* Exception was due to store operation */
534 FSI_FETCH = 2 /* Exception was due to fetch operation */
535};
536
537static int guest_page_range(struct kvm_vcpu *vcpu, unsigned long ga, 713static int guest_page_range(struct kvm_vcpu *vcpu, unsigned long ga,
538 unsigned long *pages, unsigned long nr_pages, 714 unsigned long *pages, unsigned long nr_pages,
539 int write) 715 const union asce asce, int write)
540{ 716{
541 struct kvm_s390_pgm_info *pgm = &vcpu->arch.pgm; 717 struct kvm_s390_pgm_info *pgm = &vcpu->arch.pgm;
542 psw_t *psw = &vcpu->arch.sie_block->gpsw; 718 psw_t *psw = &vcpu->arch.sie_block->gpsw;
543 struct trans_exc_code_bits *tec_bits; 719 struct trans_exc_code_bits *tec_bits;
544 int lap_enabled, rc; 720 int lap_enabled, rc;
545 721
546 memset(pgm, 0, sizeof(*pgm));
547 tec_bits = (struct trans_exc_code_bits *)&pgm->trans_exc_code; 722 tec_bits = (struct trans_exc_code_bits *)&pgm->trans_exc_code;
548 tec_bits->fsi = write ? FSI_STORE : FSI_FETCH; 723 lap_enabled = low_address_protection_enabled(vcpu, asce);
549 tec_bits->as = psw_bits(*psw).as;
550 lap_enabled = low_address_protection_enabled(vcpu);
551 while (nr_pages) { 724 while (nr_pages) {
552 ga = kvm_s390_logical_to_effective(vcpu, ga); 725 ga = kvm_s390_logical_to_effective(vcpu, ga);
553 tec_bits->addr = ga >> PAGE_SHIFT; 726 tec_bits->addr = ga >> PAGE_SHIFT;
@@ -557,7 +730,7 @@ static int guest_page_range(struct kvm_vcpu *vcpu, unsigned long ga,
557 } 730 }
558 ga &= PAGE_MASK; 731 ga &= PAGE_MASK;
559 if (psw_bits(*psw).t) { 732 if (psw_bits(*psw).t) {
560 rc = guest_translate(vcpu, ga, pages, write); 733 rc = guest_translate(vcpu, ga, pages, asce, write);
561 if (rc < 0) 734 if (rc < 0)
562 return rc; 735 return rc;
563 if (rc == PGM_PROTECTION) 736 if (rc == PGM_PROTECTION)
@@ -578,7 +751,7 @@ static int guest_page_range(struct kvm_vcpu *vcpu, unsigned long ga,
578 return 0; 751 return 0;
579} 752}
580 753
581int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, void *data, 754int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data,
582 unsigned long len, int write) 755 unsigned long len, int write)
583{ 756{
584 psw_t *psw = &vcpu->arch.sie_block->gpsw; 757 psw_t *psw = &vcpu->arch.sie_block->gpsw;
@@ -591,20 +764,19 @@ int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, void *data,
591 764
592 if (!len) 765 if (!len)
593 return 0; 766 return 0;
594 /* Access register mode is not supported yet. */ 767 rc = get_vcpu_asce(vcpu, &asce, ar, write);
595 if (psw_bits(*psw).t && psw_bits(*psw).as == PSW_AS_ACCREG) 768 if (rc)
596 return -EOPNOTSUPP; 769 return rc;
597 nr_pages = (((ga & ~PAGE_MASK) + len - 1) >> PAGE_SHIFT) + 1; 770 nr_pages = (((ga & ~PAGE_MASK) + len - 1) >> PAGE_SHIFT) + 1;
598 pages = pages_array; 771 pages = pages_array;
599 if (nr_pages > ARRAY_SIZE(pages_array)) 772 if (nr_pages > ARRAY_SIZE(pages_array))
600 pages = vmalloc(nr_pages * sizeof(unsigned long)); 773 pages = vmalloc(nr_pages * sizeof(unsigned long));
601 if (!pages) 774 if (!pages)
602 return -ENOMEM; 775 return -ENOMEM;
603 asce.val = get_vcpu_asce(vcpu);
604 need_ipte_lock = psw_bits(*psw).t && !asce.r; 776 need_ipte_lock = psw_bits(*psw).t && !asce.r;
605 if (need_ipte_lock) 777 if (need_ipte_lock)
606 ipte_lock(vcpu); 778 ipte_lock(vcpu);
607 rc = guest_page_range(vcpu, ga, pages, nr_pages, write); 779 rc = guest_page_range(vcpu, ga, pages, nr_pages, asce, write);
608 for (idx = 0; idx < nr_pages && !rc; idx++) { 780 for (idx = 0; idx < nr_pages && !rc; idx++) {
609 gpa = *(pages + idx) + (ga & ~PAGE_MASK); 781 gpa = *(pages + idx) + (ga & ~PAGE_MASK);
610 _len = min(PAGE_SIZE - (gpa & ~PAGE_MASK), len); 782 _len = min(PAGE_SIZE - (gpa & ~PAGE_MASK), len);
@@ -652,7 +824,7 @@ int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra,
652 * Note: The IPTE lock is not taken during this function, so the caller 824 * Note: The IPTE lock is not taken during this function, so the caller
653 * has to take care of this. 825 * has to take care of this.
654 */ 826 */
655int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva, 827int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva, ar_t ar,
656 unsigned long *gpa, int write) 828 unsigned long *gpa, int write)
657{ 829{
658 struct kvm_s390_pgm_info *pgm = &vcpu->arch.pgm; 830 struct kvm_s390_pgm_info *pgm = &vcpu->arch.pgm;
@@ -661,26 +833,21 @@ int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva,
661 union asce asce; 833 union asce asce;
662 int rc; 834 int rc;
663 835
664 /* Access register mode is not supported yet. */
665 if (psw_bits(*psw).t && psw_bits(*psw).as == PSW_AS_ACCREG)
666 return -EOPNOTSUPP;
667
668 gva = kvm_s390_logical_to_effective(vcpu, gva); 836 gva = kvm_s390_logical_to_effective(vcpu, gva);
669 memset(pgm, 0, sizeof(*pgm));
670 tec = (struct trans_exc_code_bits *)&pgm->trans_exc_code; 837 tec = (struct trans_exc_code_bits *)&pgm->trans_exc_code;
671 tec->as = psw_bits(*psw).as; 838 rc = get_vcpu_asce(vcpu, &asce, ar, write);
672 tec->fsi = write ? FSI_STORE : FSI_FETCH;
673 tec->addr = gva >> PAGE_SHIFT; 839 tec->addr = gva >> PAGE_SHIFT;
674 if (is_low_address(gva) && low_address_protection_enabled(vcpu)) { 840 if (rc)
841 return rc;
842 if (is_low_address(gva) && low_address_protection_enabled(vcpu, asce)) {
675 if (write) { 843 if (write) {
676 rc = pgm->code = PGM_PROTECTION; 844 rc = pgm->code = PGM_PROTECTION;
677 return rc; 845 return rc;
678 } 846 }
679 } 847 }
680 848
681 asce.val = get_vcpu_asce(vcpu);
682 if (psw_bits(*psw).t && !asce.r) { /* Use DAT? */ 849 if (psw_bits(*psw).t && !asce.r) { /* Use DAT? */
683 rc = guest_translate(vcpu, gva, gpa, write); 850 rc = guest_translate(vcpu, gva, gpa, asce, write);
684 if (rc > 0) { 851 if (rc > 0) {
685 if (rc == PGM_PROTECTION) 852 if (rc == PGM_PROTECTION)
686 tec->b61 = 1; 853 tec->b61 = 1;
@@ -697,28 +864,51 @@ int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva,
697} 864}
698 865
699/** 866/**
700 * kvm_s390_check_low_addr_protection - check for low-address protection 867 * check_gva_range - test a range of guest virtual addresses for accessibility
701 * @ga: Guest address 868 */
869int check_gva_range(struct kvm_vcpu *vcpu, unsigned long gva, ar_t ar,
870 unsigned long length, int is_write)
871{
872 unsigned long gpa;
873 unsigned long currlen;
874 int rc = 0;
875
876 ipte_lock(vcpu);
877 while (length > 0 && !rc) {
878 currlen = min(length, PAGE_SIZE - (gva % PAGE_SIZE));
879 rc = guest_translate_address(vcpu, gva, ar, &gpa, is_write);
880 gva += currlen;
881 length -= currlen;
882 }
883 ipte_unlock(vcpu);
884
885 return rc;
886}
887
888/**
889 * kvm_s390_check_low_addr_prot_real - check for low-address protection
890 * @gra: Guest real address
702 * 891 *
703 * Checks whether an address is subject to low-address protection and set 892 * Checks whether an address is subject to low-address protection and set
704 * up vcpu->arch.pgm accordingly if necessary. 893 * up vcpu->arch.pgm accordingly if necessary.
705 * 894 *
706 * Return: 0 if no protection exception, or PGM_PROTECTION if protected. 895 * Return: 0 if no protection exception, or PGM_PROTECTION if protected.
707 */ 896 */
708int kvm_s390_check_low_addr_protection(struct kvm_vcpu *vcpu, unsigned long ga) 897int kvm_s390_check_low_addr_prot_real(struct kvm_vcpu *vcpu, unsigned long gra)
709{ 898{
710 struct kvm_s390_pgm_info *pgm = &vcpu->arch.pgm; 899 struct kvm_s390_pgm_info *pgm = &vcpu->arch.pgm;
711 psw_t *psw = &vcpu->arch.sie_block->gpsw; 900 psw_t *psw = &vcpu->arch.sie_block->gpsw;
712 struct trans_exc_code_bits *tec_bits; 901 struct trans_exc_code_bits *tec_bits;
902 union ctlreg0 ctlreg0 = {.val = vcpu->arch.sie_block->gcr[0]};
713 903
714 if (!is_low_address(ga) || !low_address_protection_enabled(vcpu)) 904 if (!ctlreg0.lap || !is_low_address(gra))
715 return 0; 905 return 0;
716 906
717 memset(pgm, 0, sizeof(*pgm)); 907 memset(pgm, 0, sizeof(*pgm));
718 tec_bits = (struct trans_exc_code_bits *)&pgm->trans_exc_code; 908 tec_bits = (struct trans_exc_code_bits *)&pgm->trans_exc_code;
719 tec_bits->fsi = FSI_STORE; 909 tec_bits->fsi = FSI_STORE;
720 tec_bits->as = psw_bits(*psw).as; 910 tec_bits->as = psw_bits(*psw).as;
721 tec_bits->addr = ga >> PAGE_SHIFT; 911 tec_bits->addr = gra >> PAGE_SHIFT;
722 pgm->code = PGM_PROTECTION; 912 pgm->code = PGM_PROTECTION;
723 913
724 return pgm->code; 914 return pgm->code;
diff --git a/arch/s390/kvm/gaccess.h b/arch/s390/kvm/gaccess.h
index 0149cf15058a..ef03726cc661 100644
--- a/arch/s390/kvm/gaccess.h
+++ b/arch/s390/kvm/gaccess.h
@@ -156,9 +156,11 @@ int read_guest_lc(struct kvm_vcpu *vcpu, unsigned long gra, void *data,
156} 156}
157 157
158int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva, 158int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva,
159 unsigned long *gpa, int write); 159 ar_t ar, unsigned long *gpa, int write);
160int check_gva_range(struct kvm_vcpu *vcpu, unsigned long gva, ar_t ar,
161 unsigned long length, int is_write);
160 162
161int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, void *data, 163int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data,
162 unsigned long len, int write); 164 unsigned long len, int write);
163 165
164int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra, 166int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra,
@@ -168,6 +170,7 @@ int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra,
168 * write_guest - copy data from kernel space to guest space 170 * write_guest - copy data from kernel space to guest space
169 * @vcpu: virtual cpu 171 * @vcpu: virtual cpu
170 * @ga: guest address 172 * @ga: guest address
173 * @ar: access register
171 * @data: source address in kernel space 174 * @data: source address in kernel space
172 * @len: number of bytes to copy 175 * @len: number of bytes to copy
173 * 176 *
@@ -176,8 +179,7 @@ int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra,
176 * If DAT is off data will be copied to guest real or absolute memory. 179 * If DAT is off data will be copied to guest real or absolute memory.
177 * If DAT is on data will be copied to the address space as specified by 180 * If DAT is on data will be copied to the address space as specified by
178 * the address space bits of the PSW: 181 * the address space bits of the PSW:
179 * Primary, secondory or home space (access register mode is currently not 182 * Primary, secondary, home space or access register mode.
180 * implemented).
181 * The addressing mode of the PSW is also inspected, so that address wrap 183 * The addressing mode of the PSW is also inspected, so that address wrap
182 * around is taken into account for 24-, 31- and 64-bit addressing mode, 184 * around is taken into account for 24-, 31- and 64-bit addressing mode,
183 * if the to be copied data crosses page boundaries in guest address space. 185 * if the to be copied data crosses page boundaries in guest address space.
@@ -210,16 +212,17 @@ int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra,
210 * if data has been changed in guest space in case of an exception. 212 * if data has been changed in guest space in case of an exception.
211 */ 213 */
212static inline __must_check 214static inline __must_check
213int write_guest(struct kvm_vcpu *vcpu, unsigned long ga, void *data, 215int write_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data,
214 unsigned long len) 216 unsigned long len)
215{ 217{
216 return access_guest(vcpu, ga, data, len, 1); 218 return access_guest(vcpu, ga, ar, data, len, 1);
217} 219}
218 220
219/** 221/**
220 * read_guest - copy data from guest space to kernel space 222 * read_guest - copy data from guest space to kernel space
221 * @vcpu: virtual cpu 223 * @vcpu: virtual cpu
222 * @ga: guest address 224 * @ga: guest address
225 * @ar: access register
223 * @data: destination address in kernel space 226 * @data: destination address in kernel space
224 * @len: number of bytes to copy 227 * @len: number of bytes to copy
225 * 228 *
@@ -229,10 +232,10 @@ int write_guest(struct kvm_vcpu *vcpu, unsigned long ga, void *data,
229 * data will be copied from guest space to kernel space. 232 * data will be copied from guest space to kernel space.
230 */ 233 */
231static inline __must_check 234static inline __must_check
232int read_guest(struct kvm_vcpu *vcpu, unsigned long ga, void *data, 235int read_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data,
233 unsigned long len) 236 unsigned long len)
234{ 237{
235 return access_guest(vcpu, ga, data, len, 0); 238 return access_guest(vcpu, ga, ar, data, len, 0);
236} 239}
237 240
238/** 241/**
@@ -330,6 +333,6 @@ int read_guest_real(struct kvm_vcpu *vcpu, unsigned long gra, void *data,
330void ipte_lock(struct kvm_vcpu *vcpu); 333void ipte_lock(struct kvm_vcpu *vcpu);
331void ipte_unlock(struct kvm_vcpu *vcpu); 334void ipte_unlock(struct kvm_vcpu *vcpu);
332int ipte_lock_held(struct kvm_vcpu *vcpu); 335int ipte_lock_held(struct kvm_vcpu *vcpu);
333int kvm_s390_check_low_addr_protection(struct kvm_vcpu *vcpu, unsigned long ga); 336int kvm_s390_check_low_addr_prot_real(struct kvm_vcpu *vcpu, unsigned long gra);
334 337
335#endif /* __KVM_S390_GACCESS_H */ 338#endif /* __KVM_S390_GACCESS_H */
diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c
index 08ae10a3b406..9e3779e3e496 100644
--- a/arch/s390/kvm/intercept.c
+++ b/arch/s390/kvm/intercept.c
@@ -320,7 +320,7 @@ static int handle_mvpg_pei(struct kvm_vcpu *vcpu)
320 320
321 /* Make sure that the source is paged-in */ 321 /* Make sure that the source is paged-in */
322 rc = guest_translate_address(vcpu, vcpu->run->s.regs.gprs[reg2], 322 rc = guest_translate_address(vcpu, vcpu->run->s.regs.gprs[reg2],
323 &srcaddr, 0); 323 reg2, &srcaddr, 0);
324 if (rc) 324 if (rc)
325 return kvm_s390_inject_prog_cond(vcpu, rc); 325 return kvm_s390_inject_prog_cond(vcpu, rc);
326 rc = kvm_arch_fault_in_page(vcpu, srcaddr, 0); 326 rc = kvm_arch_fault_in_page(vcpu, srcaddr, 0);
@@ -329,7 +329,7 @@ static int handle_mvpg_pei(struct kvm_vcpu *vcpu)
329 329
330 /* Make sure that the destination is paged-in */ 330 /* Make sure that the destination is paged-in */
331 rc = guest_translate_address(vcpu, vcpu->run->s.regs.gprs[reg1], 331 rc = guest_translate_address(vcpu, vcpu->run->s.regs.gprs[reg1],
332 &dstaddr, 1); 332 reg1, &dstaddr, 1);
333 if (rc) 333 if (rc)
334 return kvm_s390_inject_prog_cond(vcpu, rc); 334 return kvm_s390_inject_prog_cond(vcpu, rc);
335 rc = kvm_arch_fault_in_page(vcpu, dstaddr, 1); 335 rc = kvm_arch_fault_in_page(vcpu, dstaddr, 1);
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 02e03c862a60..9072127bd51b 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -25,6 +25,7 @@
25#include <linux/random.h> 25#include <linux/random.h>
26#include <linux/slab.h> 26#include <linux/slab.h>
27#include <linux/timer.h> 27#include <linux/timer.h>
28#include <linux/vmalloc.h>
28#include <asm/asm-offsets.h> 29#include <asm/asm-offsets.h>
29#include <asm/lowcore.h> 30#include <asm/lowcore.h>
30#include <asm/pgtable.h> 31#include <asm/pgtable.h>
@@ -38,6 +39,8 @@
38#include "trace.h" 39#include "trace.h"
39#include "trace-s390.h" 40#include "trace-s390.h"
40 41
42#define MEM_OP_MAX_SIZE 65536 /* Maximum transfer size for KVM_S390_MEM_OP */
43
41#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU 44#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
42 45
43struct kvm_stats_debugfs_item debugfs_entries[] = { 46struct kvm_stats_debugfs_item debugfs_entries[] = {
@@ -104,7 +107,6 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
104unsigned long kvm_s390_fac_list_mask[] = { 107unsigned long kvm_s390_fac_list_mask[] = {
105 0xff82fffbf4fc2000UL, 108 0xff82fffbf4fc2000UL,
106 0x005c000000000000UL, 109 0x005c000000000000UL,
107 0x4000000000000000UL,
108}; 110};
109 111
110unsigned long kvm_s390_fac_list_mask_size(void) 112unsigned long kvm_s390_fac_list_mask_size(void)
@@ -175,8 +177,13 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
175 case KVM_CAP_VM_ATTRIBUTES: 177 case KVM_CAP_VM_ATTRIBUTES:
176 case KVM_CAP_MP_STATE: 178 case KVM_CAP_MP_STATE:
177 case KVM_CAP_S390_USER_SIGP: 179 case KVM_CAP_S390_USER_SIGP:
180 case KVM_CAP_S390_USER_STSI:
181 case KVM_CAP_S390_SKEYS:
178 r = 1; 182 r = 1;
179 break; 183 break;
184 case KVM_CAP_S390_MEM_OP:
185 r = MEM_OP_MAX_SIZE;
186 break;
180 case KVM_CAP_NR_VCPUS: 187 case KVM_CAP_NR_VCPUS:
181 case KVM_CAP_MAX_VCPUS: 188 case KVM_CAP_MAX_VCPUS:
182 r = KVM_MAX_VCPUS; 189 r = KVM_MAX_VCPUS;
@@ -271,8 +278,16 @@ static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
271 r = 0; 278 r = 0;
272 break; 279 break;
273 case KVM_CAP_S390_VECTOR_REGISTERS: 280 case KVM_CAP_S390_VECTOR_REGISTERS:
274 kvm->arch.use_vectors = MACHINE_HAS_VX; 281 if (MACHINE_HAS_VX) {
275 r = MACHINE_HAS_VX ? 0 : -EINVAL; 282 set_kvm_facility(kvm->arch.model.fac->mask, 129);
283 set_kvm_facility(kvm->arch.model.fac->list, 129);
284 r = 0;
285 } else
286 r = -EINVAL;
287 break;
288 case KVM_CAP_S390_USER_STSI:
289 kvm->arch.user_stsi = 1;
290 r = 0;
276 break; 291 break;
277 default: 292 default:
278 r = -EINVAL; 293 r = -EINVAL;
@@ -718,6 +733,108 @@ static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
718 return ret; 733 return ret;
719} 734}
720 735
736static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
737{
738 uint8_t *keys;
739 uint64_t hva;
740 unsigned long curkey;
741 int i, r = 0;
742
743 if (args->flags != 0)
744 return -EINVAL;
745
746 /* Is this guest using storage keys? */
747 if (!mm_use_skey(current->mm))
748 return KVM_S390_GET_SKEYS_NONE;
749
750 /* Enforce sane limit on memory allocation */
751 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
752 return -EINVAL;
753
754 keys = kmalloc_array(args->count, sizeof(uint8_t),
755 GFP_KERNEL | __GFP_NOWARN);
756 if (!keys)
757 keys = vmalloc(sizeof(uint8_t) * args->count);
758 if (!keys)
759 return -ENOMEM;
760
761 for (i = 0; i < args->count; i++) {
762 hva = gfn_to_hva(kvm, args->start_gfn + i);
763 if (kvm_is_error_hva(hva)) {
764 r = -EFAULT;
765 goto out;
766 }
767
768 curkey = get_guest_storage_key(current->mm, hva);
769 if (IS_ERR_VALUE(curkey)) {
770 r = curkey;
771 goto out;
772 }
773 keys[i] = curkey;
774 }
775
776 r = copy_to_user((uint8_t __user *)args->skeydata_addr, keys,
777 sizeof(uint8_t) * args->count);
778 if (r)
779 r = -EFAULT;
780out:
781 kvfree(keys);
782 return r;
783}
784
785static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
786{
787 uint8_t *keys;
788 uint64_t hva;
789 int i, r = 0;
790
791 if (args->flags != 0)
792 return -EINVAL;
793
794 /* Enforce sane limit on memory allocation */
795 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
796 return -EINVAL;
797
798 keys = kmalloc_array(args->count, sizeof(uint8_t),
799 GFP_KERNEL | __GFP_NOWARN);
800 if (!keys)
801 keys = vmalloc(sizeof(uint8_t) * args->count);
802 if (!keys)
803 return -ENOMEM;
804
805 r = copy_from_user(keys, (uint8_t __user *)args->skeydata_addr,
806 sizeof(uint8_t) * args->count);
807 if (r) {
808 r = -EFAULT;
809 goto out;
810 }
811
812 /* Enable storage key handling for the guest */
813 s390_enable_skey();
814
815 for (i = 0; i < args->count; i++) {
816 hva = gfn_to_hva(kvm, args->start_gfn + i);
817 if (kvm_is_error_hva(hva)) {
818 r = -EFAULT;
819 goto out;
820 }
821
822 /* Lowest order bit is reserved */
823 if (keys[i] & 0x01) {
824 r = -EINVAL;
825 goto out;
826 }
827
828 r = set_guest_storage_key(current->mm, hva,
829 (unsigned long)keys[i], 0);
830 if (r)
831 goto out;
832 }
833out:
834 kvfree(keys);
835 return r;
836}
837
721long kvm_arch_vm_ioctl(struct file *filp, 838long kvm_arch_vm_ioctl(struct file *filp,
722 unsigned int ioctl, unsigned long arg) 839 unsigned int ioctl, unsigned long arg)
723{ 840{
@@ -777,6 +894,26 @@ long kvm_arch_vm_ioctl(struct file *filp,
777 r = kvm_s390_vm_has_attr(kvm, &attr); 894 r = kvm_s390_vm_has_attr(kvm, &attr);
778 break; 895 break;
779 } 896 }
897 case KVM_S390_GET_SKEYS: {
898 struct kvm_s390_skeys args;
899
900 r = -EFAULT;
901 if (copy_from_user(&args, argp,
902 sizeof(struct kvm_s390_skeys)))
903 break;
904 r = kvm_s390_get_skeys(kvm, &args);
905 break;
906 }
907 case KVM_S390_SET_SKEYS: {
908 struct kvm_s390_skeys args;
909
910 r = -EFAULT;
911 if (copy_from_user(&args, argp,
912 sizeof(struct kvm_s390_skeys)))
913 break;
914 r = kvm_s390_set_skeys(kvm, &args);
915 break;
916 }
780 default: 917 default:
781 r = -ENOTTY; 918 r = -ENOTTY;
782 } 919 }
@@ -897,7 +1034,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
897 1034
898 kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long)); 1035 kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
899 if (!kvm->arch.dbf) 1036 if (!kvm->arch.dbf)
900 goto out_nodbf; 1037 goto out_err;
901 1038
902 /* 1039 /*
903 * The architectural maximum amount of facilities is 16 kbit. To store 1040 * The architectural maximum amount of facilities is 16 kbit. To store
@@ -909,7 +1046,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
909 kvm->arch.model.fac = 1046 kvm->arch.model.fac =
910 (struct kvm_s390_fac *) get_zeroed_page(GFP_KERNEL | GFP_DMA); 1047 (struct kvm_s390_fac *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
911 if (!kvm->arch.model.fac) 1048 if (!kvm->arch.model.fac)
912 goto out_nofac; 1049 goto out_err;
913 1050
914 /* Populate the facility mask initially. */ 1051 /* Populate the facility mask initially. */
915 memcpy(kvm->arch.model.fac->mask, S390_lowcore.stfle_fac_list, 1052 memcpy(kvm->arch.model.fac->mask, S390_lowcore.stfle_fac_list,
@@ -929,7 +1066,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
929 kvm->arch.model.ibc = sclp_get_ibc() & 0x0fff; 1066 kvm->arch.model.ibc = sclp_get_ibc() & 0x0fff;
930 1067
931 if (kvm_s390_crypto_init(kvm) < 0) 1068 if (kvm_s390_crypto_init(kvm) < 0)
932 goto out_crypto; 1069 goto out_err;
933 1070
934 spin_lock_init(&kvm->arch.float_int.lock); 1071 spin_lock_init(&kvm->arch.float_int.lock);
935 INIT_LIST_HEAD(&kvm->arch.float_int.list); 1072 INIT_LIST_HEAD(&kvm->arch.float_int.list);
@@ -944,28 +1081,23 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
944 } else { 1081 } else {
945 kvm->arch.gmap = gmap_alloc(current->mm, (1UL << 44) - 1); 1082 kvm->arch.gmap = gmap_alloc(current->mm, (1UL << 44) - 1);
946 if (!kvm->arch.gmap) 1083 if (!kvm->arch.gmap)
947 goto out_nogmap; 1084 goto out_err;
948 kvm->arch.gmap->private = kvm; 1085 kvm->arch.gmap->private = kvm;
949 kvm->arch.gmap->pfault_enabled = 0; 1086 kvm->arch.gmap->pfault_enabled = 0;
950 } 1087 }
951 1088
952 kvm->arch.css_support = 0; 1089 kvm->arch.css_support = 0;
953 kvm->arch.use_irqchip = 0; 1090 kvm->arch.use_irqchip = 0;
954 kvm->arch.use_vectors = 0;
955 kvm->arch.epoch = 0; 1091 kvm->arch.epoch = 0;
956 1092
957 spin_lock_init(&kvm->arch.start_stop_lock); 1093 spin_lock_init(&kvm->arch.start_stop_lock);
958 1094
959 return 0; 1095 return 0;
960out_nogmap: 1096out_err:
961 kfree(kvm->arch.crypto.crycb); 1097 kfree(kvm->arch.crypto.crycb);
962out_crypto:
963 free_page((unsigned long)kvm->arch.model.fac); 1098 free_page((unsigned long)kvm->arch.model.fac);
964out_nofac:
965 debug_unregister(kvm->arch.dbf); 1099 debug_unregister(kvm->arch.dbf);
966out_nodbf:
967 free_page((unsigned long)(kvm->arch.sca)); 1100 free_page((unsigned long)(kvm->arch.sca));
968out_err:
969 return rc; 1101 return rc;
970} 1102}
971 1103
@@ -1057,12 +1189,12 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
1057void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) 1189void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1058{ 1190{
1059 save_fp_ctl(&vcpu->arch.host_fpregs.fpc); 1191 save_fp_ctl(&vcpu->arch.host_fpregs.fpc);
1060 if (vcpu->kvm->arch.use_vectors) 1192 if (test_kvm_facility(vcpu->kvm, 129))
1061 save_vx_regs((__vector128 *)&vcpu->arch.host_vregs->vrs); 1193 save_vx_regs((__vector128 *)&vcpu->arch.host_vregs->vrs);
1062 else 1194 else
1063 save_fp_regs(vcpu->arch.host_fpregs.fprs); 1195 save_fp_regs(vcpu->arch.host_fpregs.fprs);
1064 save_access_regs(vcpu->arch.host_acrs); 1196 save_access_regs(vcpu->arch.host_acrs);
1065 if (vcpu->kvm->arch.use_vectors) { 1197 if (test_kvm_facility(vcpu->kvm, 129)) {
1066 restore_fp_ctl(&vcpu->run->s.regs.fpc); 1198 restore_fp_ctl(&vcpu->run->s.regs.fpc);
1067 restore_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs); 1199 restore_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs);
1068 } else { 1200 } else {
@@ -1078,7 +1210,7 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
1078{ 1210{
1079 atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); 1211 atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
1080 gmap_disable(vcpu->arch.gmap); 1212 gmap_disable(vcpu->arch.gmap);
1081 if (vcpu->kvm->arch.use_vectors) { 1213 if (test_kvm_facility(vcpu->kvm, 129)) {
1082 save_fp_ctl(&vcpu->run->s.regs.fpc); 1214 save_fp_ctl(&vcpu->run->s.regs.fpc);
1083 save_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs); 1215 save_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs);
1084 } else { 1216 } else {
@@ -1087,7 +1219,7 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
1087 } 1219 }
1088 save_access_regs(vcpu->run->s.regs.acrs); 1220 save_access_regs(vcpu->run->s.regs.acrs);
1089 restore_fp_ctl(&vcpu->arch.host_fpregs.fpc); 1221 restore_fp_ctl(&vcpu->arch.host_fpregs.fpc);
1090 if (vcpu->kvm->arch.use_vectors) 1222 if (test_kvm_facility(vcpu->kvm, 129))
1091 restore_vx_regs((__vector128 *)&vcpu->arch.host_vregs->vrs); 1223 restore_vx_regs((__vector128 *)&vcpu->arch.host_vregs->vrs);
1092 else 1224 else
1093 restore_fp_regs(vcpu->arch.host_fpregs.fprs); 1225 restore_fp_regs(vcpu->arch.host_fpregs.fprs);
@@ -1187,7 +1319,7 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
1187 vcpu->arch.sie_block->eca |= 1; 1319 vcpu->arch.sie_block->eca |= 1;
1188 if (sclp_has_sigpif()) 1320 if (sclp_has_sigpif())
1189 vcpu->arch.sie_block->eca |= 0x10000000U; 1321 vcpu->arch.sie_block->eca |= 0x10000000U;
1190 if (vcpu->kvm->arch.use_vectors) { 1322 if (test_kvm_facility(vcpu->kvm, 129)) {
1191 vcpu->arch.sie_block->eca |= 0x00020000; 1323 vcpu->arch.sie_block->eca |= 0x00020000;
1192 vcpu->arch.sie_block->ecd |= 0x20000000; 1324 vcpu->arch.sie_block->ecd |= 0x20000000;
1193 } 1325 }
@@ -1780,7 +1912,7 @@ static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu)
1780 * to look up the current opcode to get the length of the instruction 1912 * to look up the current opcode to get the length of the instruction
1781 * to be able to forward the PSW. 1913 * to be able to forward the PSW.
1782 */ 1914 */
1783 rc = read_guest(vcpu, psw->addr, &opcode, 1); 1915 rc = read_guest(vcpu, psw->addr, 0, &opcode, 1);
1784 if (rc) 1916 if (rc)
1785 return kvm_s390_inject_prog_cond(vcpu, rc); 1917 return kvm_s390_inject_prog_cond(vcpu, rc);
1786 psw->addr = __rewind_psw(*psw, -insn_length(opcode)); 1918 psw->addr = __rewind_psw(*psw, -insn_length(opcode));
@@ -2189,6 +2321,65 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
2189 return r; 2321 return r;
2190} 2322}
2191 2323
2324static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu,
2325 struct kvm_s390_mem_op *mop)
2326{
2327 void __user *uaddr = (void __user *)mop->buf;
2328 void *tmpbuf = NULL;
2329 int r, srcu_idx;
2330 const u64 supported_flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION
2331 | KVM_S390_MEMOP_F_CHECK_ONLY;
2332
2333 if (mop->flags & ~supported_flags)
2334 return -EINVAL;
2335
2336 if (mop->size > MEM_OP_MAX_SIZE)
2337 return -E2BIG;
2338
2339 if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) {
2340 tmpbuf = vmalloc(mop->size);
2341 if (!tmpbuf)
2342 return -ENOMEM;
2343 }
2344
2345 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
2346
2347 switch (mop->op) {
2348 case KVM_S390_MEMOP_LOGICAL_READ:
2349 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
2350 r = check_gva_range(vcpu, mop->gaddr, mop->ar, mop->size, false);
2351 break;
2352 }
2353 r = read_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
2354 if (r == 0) {
2355 if (copy_to_user(uaddr, tmpbuf, mop->size))
2356 r = -EFAULT;
2357 }
2358 break;
2359 case KVM_S390_MEMOP_LOGICAL_WRITE:
2360 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
2361 r = check_gva_range(vcpu, mop->gaddr, mop->ar, mop->size, true);
2362 break;
2363 }
2364 if (copy_from_user(tmpbuf, uaddr, mop->size)) {
2365 r = -EFAULT;
2366 break;
2367 }
2368 r = write_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
2369 break;
2370 default:
2371 r = -EINVAL;
2372 }
2373
2374 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
2375
2376 if (r > 0 && (mop->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION) != 0)
2377 kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
2378
2379 vfree(tmpbuf);
2380 return r;
2381}
2382
2192long kvm_arch_vcpu_ioctl(struct file *filp, 2383long kvm_arch_vcpu_ioctl(struct file *filp,
2193 unsigned int ioctl, unsigned long arg) 2384 unsigned int ioctl, unsigned long arg)
2194{ 2385{
@@ -2288,6 +2479,15 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
2288 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap); 2479 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
2289 break; 2480 break;
2290 } 2481 }
2482 case KVM_S390_MEM_OP: {
2483 struct kvm_s390_mem_op mem_op;
2484
2485 if (copy_from_user(&mem_op, argp, sizeof(mem_op)) == 0)
2486 r = kvm_s390_guest_mem_op(vcpu, &mem_op);
2487 else
2488 r = -EFAULT;
2489 break;
2490 }
2291 default: 2491 default:
2292 r = -ENOTTY; 2492 r = -ENOTTY;
2293 } 2493 }
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
index fda3f3146eb6..c5aefef158e5 100644
--- a/arch/s390/kvm/kvm-s390.h
+++ b/arch/s390/kvm/kvm-s390.h
@@ -70,16 +70,22 @@ static inline void kvm_s390_set_prefix(struct kvm_vcpu *vcpu, u32 prefix)
70 kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu); 70 kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
71} 71}
72 72
73static inline u64 kvm_s390_get_base_disp_s(struct kvm_vcpu *vcpu) 73typedef u8 __bitwise ar_t;
74
75static inline u64 kvm_s390_get_base_disp_s(struct kvm_vcpu *vcpu, ar_t *ar)
74{ 76{
75 u32 base2 = vcpu->arch.sie_block->ipb >> 28; 77 u32 base2 = vcpu->arch.sie_block->ipb >> 28;
76 u32 disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16); 78 u32 disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16);
77 79
80 if (ar)
81 *ar = base2;
82
78 return (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + disp2; 83 return (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + disp2;
79} 84}
80 85
81static inline void kvm_s390_get_base_disp_sse(struct kvm_vcpu *vcpu, 86static inline void kvm_s390_get_base_disp_sse(struct kvm_vcpu *vcpu,
82 u64 *address1, u64 *address2) 87 u64 *address1, u64 *address2,
88 ar_t *ar_b1, ar_t *ar_b2)
83{ 89{
84 u32 base1 = (vcpu->arch.sie_block->ipb & 0xf0000000) >> 28; 90 u32 base1 = (vcpu->arch.sie_block->ipb & 0xf0000000) >> 28;
85 u32 disp1 = (vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16; 91 u32 disp1 = (vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16;
@@ -88,6 +94,11 @@ static inline void kvm_s390_get_base_disp_sse(struct kvm_vcpu *vcpu,
88 94
89 *address1 = (base1 ? vcpu->run->s.regs.gprs[base1] : 0) + disp1; 95 *address1 = (base1 ? vcpu->run->s.regs.gprs[base1] : 0) + disp1;
90 *address2 = (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + disp2; 96 *address2 = (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + disp2;
97
98 if (ar_b1)
99 *ar_b1 = base1;
100 if (ar_b2)
101 *ar_b2 = base2;
91} 102}
92 103
93static inline void kvm_s390_get_regs_rre(struct kvm_vcpu *vcpu, int *r1, int *r2) 104static inline void kvm_s390_get_regs_rre(struct kvm_vcpu *vcpu, int *r1, int *r2)
@@ -98,7 +109,7 @@ static inline void kvm_s390_get_regs_rre(struct kvm_vcpu *vcpu, int *r1, int *r2
98 *r2 = (vcpu->arch.sie_block->ipb & 0x000f0000) >> 16; 109 *r2 = (vcpu->arch.sie_block->ipb & 0x000f0000) >> 16;
99} 110}
100 111
101static inline u64 kvm_s390_get_base_disp_rsy(struct kvm_vcpu *vcpu) 112static inline u64 kvm_s390_get_base_disp_rsy(struct kvm_vcpu *vcpu, ar_t *ar)
102{ 113{
103 u32 base2 = vcpu->arch.sie_block->ipb >> 28; 114 u32 base2 = vcpu->arch.sie_block->ipb >> 28;
104 u32 disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16) + 115 u32 disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16) +
@@ -107,14 +118,20 @@ static inline u64 kvm_s390_get_base_disp_rsy(struct kvm_vcpu *vcpu)
107 if (disp2 & 0x80000) 118 if (disp2 & 0x80000)
108 disp2+=0xfff00000; 119 disp2+=0xfff00000;
109 120
121 if (ar)
122 *ar = base2;
123
110 return (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + (long)(int)disp2; 124 return (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + (long)(int)disp2;
111} 125}
112 126
113static inline u64 kvm_s390_get_base_disp_rs(struct kvm_vcpu *vcpu) 127static inline u64 kvm_s390_get_base_disp_rs(struct kvm_vcpu *vcpu, ar_t *ar)
114{ 128{
115 u32 base2 = vcpu->arch.sie_block->ipb >> 28; 129 u32 base2 = vcpu->arch.sie_block->ipb >> 28;
116 u32 disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16); 130 u32 disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16);
117 131
132 if (ar)
133 *ar = base2;
134
118 return (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + disp2; 135 return (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + disp2;
119} 136}
120 137
@@ -125,13 +142,24 @@ static inline void kvm_s390_set_psw_cc(struct kvm_vcpu *vcpu, unsigned long cc)
125 vcpu->arch.sie_block->gpsw.mask |= cc << 44; 142 vcpu->arch.sie_block->gpsw.mask |= cc << 44;
126} 143}
127 144
128/* test availability of facility in a kvm intance */ 145/* test availability of facility in a kvm instance */
129static inline int test_kvm_facility(struct kvm *kvm, unsigned long nr) 146static inline int test_kvm_facility(struct kvm *kvm, unsigned long nr)
130{ 147{
131 return __test_facility(nr, kvm->arch.model.fac->mask) && 148 return __test_facility(nr, kvm->arch.model.fac->mask) &&
132 __test_facility(nr, kvm->arch.model.fac->list); 149 __test_facility(nr, kvm->arch.model.fac->list);
133} 150}
134 151
152static inline int set_kvm_facility(u64 *fac_list, unsigned long nr)
153{
154 unsigned char *ptr;
155
156 if (nr >= MAX_FACILITY_BIT)
157 return -EINVAL;
158 ptr = (unsigned char *) fac_list + (nr >> 3);
159 *ptr |= (0x80UL >> (nr & 7));
160 return 0;
161}
162
135/* are cpu states controlled by user space */ 163/* are cpu states controlled by user space */
136static inline int kvm_s390_user_cpu_state_ctrl(struct kvm *kvm) 164static inline int kvm_s390_user_cpu_state_ctrl(struct kvm *kvm)
137{ 165{
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
index b982fbca34df..5e4658d20c77 100644
--- a/arch/s390/kvm/priv.c
+++ b/arch/s390/kvm/priv.c
@@ -36,15 +36,16 @@ static int handle_set_clock(struct kvm_vcpu *vcpu)
36 struct kvm_vcpu *cpup; 36 struct kvm_vcpu *cpup;
37 s64 hostclk, val; 37 s64 hostclk, val;
38 int i, rc; 38 int i, rc;
39 ar_t ar;
39 u64 op2; 40 u64 op2;
40 41
41 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 42 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
42 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 43 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
43 44
44 op2 = kvm_s390_get_base_disp_s(vcpu); 45 op2 = kvm_s390_get_base_disp_s(vcpu, &ar);
45 if (op2 & 7) /* Operand must be on a doubleword boundary */ 46 if (op2 & 7) /* Operand must be on a doubleword boundary */
46 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 47 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
47 rc = read_guest(vcpu, op2, &val, sizeof(val)); 48 rc = read_guest(vcpu, op2, ar, &val, sizeof(val));
48 if (rc) 49 if (rc)
49 return kvm_s390_inject_prog_cond(vcpu, rc); 50 return kvm_s390_inject_prog_cond(vcpu, rc);
50 51
@@ -68,20 +69,21 @@ static int handle_set_prefix(struct kvm_vcpu *vcpu)
68 u64 operand2; 69 u64 operand2;
69 u32 address; 70 u32 address;
70 int rc; 71 int rc;
72 ar_t ar;
71 73
72 vcpu->stat.instruction_spx++; 74 vcpu->stat.instruction_spx++;
73 75
74 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 76 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
75 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 77 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
76 78
77 operand2 = kvm_s390_get_base_disp_s(vcpu); 79 operand2 = kvm_s390_get_base_disp_s(vcpu, &ar);
78 80
79 /* must be word boundary */ 81 /* must be word boundary */
80 if (operand2 & 3) 82 if (operand2 & 3)
81 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 83 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
82 84
83 /* get the value */ 85 /* get the value */
84 rc = read_guest(vcpu, operand2, &address, sizeof(address)); 86 rc = read_guest(vcpu, operand2, ar, &address, sizeof(address));
85 if (rc) 87 if (rc)
86 return kvm_s390_inject_prog_cond(vcpu, rc); 88 return kvm_s390_inject_prog_cond(vcpu, rc);
87 89
@@ -107,13 +109,14 @@ static int handle_store_prefix(struct kvm_vcpu *vcpu)
107 u64 operand2; 109 u64 operand2;
108 u32 address; 110 u32 address;
109 int rc; 111 int rc;
112 ar_t ar;
110 113
111 vcpu->stat.instruction_stpx++; 114 vcpu->stat.instruction_stpx++;
112 115
113 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 116 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
114 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 117 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
115 118
116 operand2 = kvm_s390_get_base_disp_s(vcpu); 119 operand2 = kvm_s390_get_base_disp_s(vcpu, &ar);
117 120
118 /* must be word boundary */ 121 /* must be word boundary */
119 if (operand2 & 3) 122 if (operand2 & 3)
@@ -122,7 +125,7 @@ static int handle_store_prefix(struct kvm_vcpu *vcpu)
122 address = kvm_s390_get_prefix(vcpu); 125 address = kvm_s390_get_prefix(vcpu);
123 126
124 /* get the value */ 127 /* get the value */
125 rc = write_guest(vcpu, operand2, &address, sizeof(address)); 128 rc = write_guest(vcpu, operand2, ar, &address, sizeof(address));
126 if (rc) 129 if (rc)
127 return kvm_s390_inject_prog_cond(vcpu, rc); 130 return kvm_s390_inject_prog_cond(vcpu, rc);
128 131
@@ -136,18 +139,19 @@ static int handle_store_cpu_address(struct kvm_vcpu *vcpu)
136 u16 vcpu_id = vcpu->vcpu_id; 139 u16 vcpu_id = vcpu->vcpu_id;
137 u64 ga; 140 u64 ga;
138 int rc; 141 int rc;
142 ar_t ar;
139 143
140 vcpu->stat.instruction_stap++; 144 vcpu->stat.instruction_stap++;
141 145
142 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 146 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
143 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 147 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
144 148
145 ga = kvm_s390_get_base_disp_s(vcpu); 149 ga = kvm_s390_get_base_disp_s(vcpu, &ar);
146 150
147 if (ga & 1) 151 if (ga & 1)
148 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 152 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
149 153
150 rc = write_guest(vcpu, ga, &vcpu_id, sizeof(vcpu_id)); 154 rc = write_guest(vcpu, ga, ar, &vcpu_id, sizeof(vcpu_id));
151 if (rc) 155 if (rc)
152 return kvm_s390_inject_prog_cond(vcpu, rc); 156 return kvm_s390_inject_prog_cond(vcpu, rc);
153 157
@@ -207,7 +211,7 @@ static int handle_test_block(struct kvm_vcpu *vcpu)
207 kvm_s390_get_regs_rre(vcpu, NULL, &reg2); 211 kvm_s390_get_regs_rre(vcpu, NULL, &reg2);
208 addr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK; 212 addr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
209 addr = kvm_s390_logical_to_effective(vcpu, addr); 213 addr = kvm_s390_logical_to_effective(vcpu, addr);
210 if (kvm_s390_check_low_addr_protection(vcpu, addr)) 214 if (kvm_s390_check_low_addr_prot_real(vcpu, addr))
211 return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm); 215 return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
212 addr = kvm_s390_real_to_abs(vcpu, addr); 216 addr = kvm_s390_real_to_abs(vcpu, addr);
213 217
@@ -231,8 +235,9 @@ static int handle_tpi(struct kvm_vcpu *vcpu)
231 u32 tpi_data[3]; 235 u32 tpi_data[3];
232 int rc; 236 int rc;
233 u64 addr; 237 u64 addr;
238 ar_t ar;
234 239
235 addr = kvm_s390_get_base_disp_s(vcpu); 240 addr = kvm_s390_get_base_disp_s(vcpu, &ar);
236 if (addr & 3) 241 if (addr & 3)
237 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 242 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
238 243
@@ -251,7 +256,7 @@ static int handle_tpi(struct kvm_vcpu *vcpu)
251 * provided area. 256 * provided area.
252 */ 257 */
253 len = sizeof(tpi_data) - 4; 258 len = sizeof(tpi_data) - 4;
254 rc = write_guest(vcpu, addr, &tpi_data, len); 259 rc = write_guest(vcpu, addr, ar, &tpi_data, len);
255 if (rc) { 260 if (rc) {
256 rc = kvm_s390_inject_prog_cond(vcpu, rc); 261 rc = kvm_s390_inject_prog_cond(vcpu, rc);
257 goto reinject_interrupt; 262 goto reinject_interrupt;
@@ -395,15 +400,16 @@ int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu)
395 psw_compat_t new_psw; 400 psw_compat_t new_psw;
396 u64 addr; 401 u64 addr;
397 int rc; 402 int rc;
403 ar_t ar;
398 404
399 if (gpsw->mask & PSW_MASK_PSTATE) 405 if (gpsw->mask & PSW_MASK_PSTATE)
400 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 406 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
401 407
402 addr = kvm_s390_get_base_disp_s(vcpu); 408 addr = kvm_s390_get_base_disp_s(vcpu, &ar);
403 if (addr & 7) 409 if (addr & 7)
404 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 410 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
405 411
406 rc = read_guest(vcpu, addr, &new_psw, sizeof(new_psw)); 412 rc = read_guest(vcpu, addr, ar, &new_psw, sizeof(new_psw));
407 if (rc) 413 if (rc)
408 return kvm_s390_inject_prog_cond(vcpu, rc); 414 return kvm_s390_inject_prog_cond(vcpu, rc);
409 if (!(new_psw.mask & PSW32_MASK_BASE)) 415 if (!(new_psw.mask & PSW32_MASK_BASE))
@@ -421,14 +427,15 @@ static int handle_lpswe(struct kvm_vcpu *vcpu)
421 psw_t new_psw; 427 psw_t new_psw;
422 u64 addr; 428 u64 addr;
423 int rc; 429 int rc;
430 ar_t ar;
424 431
425 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 432 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
426 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 433 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
427 434
428 addr = kvm_s390_get_base_disp_s(vcpu); 435 addr = kvm_s390_get_base_disp_s(vcpu, &ar);
429 if (addr & 7) 436 if (addr & 7)
430 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 437 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
431 rc = read_guest(vcpu, addr, &new_psw, sizeof(new_psw)); 438 rc = read_guest(vcpu, addr, ar, &new_psw, sizeof(new_psw));
432 if (rc) 439 if (rc)
433 return kvm_s390_inject_prog_cond(vcpu, rc); 440 return kvm_s390_inject_prog_cond(vcpu, rc);
434 vcpu->arch.sie_block->gpsw = new_psw; 441 vcpu->arch.sie_block->gpsw = new_psw;
@@ -442,18 +449,19 @@ static int handle_stidp(struct kvm_vcpu *vcpu)
442 u64 stidp_data = vcpu->arch.stidp_data; 449 u64 stidp_data = vcpu->arch.stidp_data;
443 u64 operand2; 450 u64 operand2;
444 int rc; 451 int rc;
452 ar_t ar;
445 453
446 vcpu->stat.instruction_stidp++; 454 vcpu->stat.instruction_stidp++;
447 455
448 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 456 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
449 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 457 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
450 458
451 operand2 = kvm_s390_get_base_disp_s(vcpu); 459 operand2 = kvm_s390_get_base_disp_s(vcpu, &ar);
452 460
453 if (operand2 & 7) 461 if (operand2 & 7)
454 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 462 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
455 463
456 rc = write_guest(vcpu, operand2, &stidp_data, sizeof(stidp_data)); 464 rc = write_guest(vcpu, operand2, ar, &stidp_data, sizeof(stidp_data));
457 if (rc) 465 if (rc)
458 return kvm_s390_inject_prog_cond(vcpu, rc); 466 return kvm_s390_inject_prog_cond(vcpu, rc);
459 467
@@ -488,6 +496,17 @@ static void handle_stsi_3_2_2(struct kvm_vcpu *vcpu, struct sysinfo_3_2_2 *mem)
488 ASCEBC(mem->vm[0].cpi, 16); 496 ASCEBC(mem->vm[0].cpi, 16);
489} 497}
490 498
499static void insert_stsi_usr_data(struct kvm_vcpu *vcpu, u64 addr, ar_t ar,
500 u8 fc, u8 sel1, u16 sel2)
501{
502 vcpu->run->exit_reason = KVM_EXIT_S390_STSI;
503 vcpu->run->s390_stsi.addr = addr;
504 vcpu->run->s390_stsi.ar = ar;
505 vcpu->run->s390_stsi.fc = fc;
506 vcpu->run->s390_stsi.sel1 = sel1;
507 vcpu->run->s390_stsi.sel2 = sel2;
508}
509
491static int handle_stsi(struct kvm_vcpu *vcpu) 510static int handle_stsi(struct kvm_vcpu *vcpu)
492{ 511{
493 int fc = (vcpu->run->s.regs.gprs[0] & 0xf0000000) >> 28; 512 int fc = (vcpu->run->s.regs.gprs[0] & 0xf0000000) >> 28;
@@ -496,6 +515,7 @@ static int handle_stsi(struct kvm_vcpu *vcpu)
496 unsigned long mem = 0; 515 unsigned long mem = 0;
497 u64 operand2; 516 u64 operand2;
498 int rc = 0; 517 int rc = 0;
518 ar_t ar;
499 519
500 vcpu->stat.instruction_stsi++; 520 vcpu->stat.instruction_stsi++;
501 VCPU_EVENT(vcpu, 4, "stsi: fc: %x sel1: %x sel2: %x", fc, sel1, sel2); 521 VCPU_EVENT(vcpu, 4, "stsi: fc: %x sel1: %x sel2: %x", fc, sel1, sel2);
@@ -518,7 +538,7 @@ static int handle_stsi(struct kvm_vcpu *vcpu)
518 return 0; 538 return 0;
519 } 539 }
520 540
521 operand2 = kvm_s390_get_base_disp_s(vcpu); 541 operand2 = kvm_s390_get_base_disp_s(vcpu, &ar);
522 542
523 if (operand2 & 0xfff) 543 if (operand2 & 0xfff)
524 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 544 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
@@ -542,16 +562,20 @@ static int handle_stsi(struct kvm_vcpu *vcpu)
542 break; 562 break;
543 } 563 }
544 564
545 rc = write_guest(vcpu, operand2, (void *)mem, PAGE_SIZE); 565 rc = write_guest(vcpu, operand2, ar, (void *)mem, PAGE_SIZE);
546 if (rc) { 566 if (rc) {
547 rc = kvm_s390_inject_prog_cond(vcpu, rc); 567 rc = kvm_s390_inject_prog_cond(vcpu, rc);
548 goto out; 568 goto out;
549 } 569 }
570 if (vcpu->kvm->arch.user_stsi) {
571 insert_stsi_usr_data(vcpu, operand2, ar, fc, sel1, sel2);
572 rc = -EREMOTE;
573 }
550 trace_kvm_s390_handle_stsi(vcpu, fc, sel1, sel2, operand2); 574 trace_kvm_s390_handle_stsi(vcpu, fc, sel1, sel2, operand2);
551 free_page(mem); 575 free_page(mem);
552 kvm_s390_set_psw_cc(vcpu, 0); 576 kvm_s390_set_psw_cc(vcpu, 0);
553 vcpu->run->s.regs.gprs[0] = 0; 577 vcpu->run->s.regs.gprs[0] = 0;
554 return 0; 578 return rc;
555out_no_data: 579out_no_data:
556 kvm_s390_set_psw_cc(vcpu, 3); 580 kvm_s390_set_psw_cc(vcpu, 3);
557out: 581out:
@@ -680,7 +704,7 @@ static int handle_pfmf(struct kvm_vcpu *vcpu)
680 } 704 }
681 705
682 if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) { 706 if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) {
683 if (kvm_s390_check_low_addr_protection(vcpu, start)) 707 if (kvm_s390_check_low_addr_prot_real(vcpu, start))
684 return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm); 708 return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
685 } 709 }
686 710
@@ -786,13 +810,14 @@ int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu)
786 int reg, rc, nr_regs; 810 int reg, rc, nr_regs;
787 u32 ctl_array[16]; 811 u32 ctl_array[16];
788 u64 ga; 812 u64 ga;
813 ar_t ar;
789 814
790 vcpu->stat.instruction_lctl++; 815 vcpu->stat.instruction_lctl++;
791 816
792 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 817 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
793 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 818 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
794 819
795 ga = kvm_s390_get_base_disp_rs(vcpu); 820 ga = kvm_s390_get_base_disp_rs(vcpu, &ar);
796 821
797 if (ga & 3) 822 if (ga & 3)
798 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 823 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
@@ -801,7 +826,7 @@ int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu)
801 trace_kvm_s390_handle_lctl(vcpu, 0, reg1, reg3, ga); 826 trace_kvm_s390_handle_lctl(vcpu, 0, reg1, reg3, ga);
802 827
803 nr_regs = ((reg3 - reg1) & 0xf) + 1; 828 nr_regs = ((reg3 - reg1) & 0xf) + 1;
804 rc = read_guest(vcpu, ga, ctl_array, nr_regs * sizeof(u32)); 829 rc = read_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u32));
805 if (rc) 830 if (rc)
806 return kvm_s390_inject_prog_cond(vcpu, rc); 831 return kvm_s390_inject_prog_cond(vcpu, rc);
807 reg = reg1; 832 reg = reg1;
@@ -824,13 +849,14 @@ int kvm_s390_handle_stctl(struct kvm_vcpu *vcpu)
824 int reg, rc, nr_regs; 849 int reg, rc, nr_regs;
825 u32 ctl_array[16]; 850 u32 ctl_array[16];
826 u64 ga; 851 u64 ga;
852 ar_t ar;
827 853
828 vcpu->stat.instruction_stctl++; 854 vcpu->stat.instruction_stctl++;
829 855
830 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 856 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
831 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 857 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
832 858
833 ga = kvm_s390_get_base_disp_rs(vcpu); 859 ga = kvm_s390_get_base_disp_rs(vcpu, &ar);
834 860
835 if (ga & 3) 861 if (ga & 3)
836 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 862 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
@@ -846,7 +872,7 @@ int kvm_s390_handle_stctl(struct kvm_vcpu *vcpu)
846 break; 872 break;
847 reg = (reg + 1) % 16; 873 reg = (reg + 1) % 16;
848 } while (1); 874 } while (1);
849 rc = write_guest(vcpu, ga, ctl_array, nr_regs * sizeof(u32)); 875 rc = write_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u32));
850 return rc ? kvm_s390_inject_prog_cond(vcpu, rc) : 0; 876 return rc ? kvm_s390_inject_prog_cond(vcpu, rc) : 0;
851} 877}
852 878
@@ -857,13 +883,14 @@ static int handle_lctlg(struct kvm_vcpu *vcpu)
857 int reg, rc, nr_regs; 883 int reg, rc, nr_regs;
858 u64 ctl_array[16]; 884 u64 ctl_array[16];
859 u64 ga; 885 u64 ga;
886 ar_t ar;
860 887
861 vcpu->stat.instruction_lctlg++; 888 vcpu->stat.instruction_lctlg++;
862 889
863 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 890 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
864 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 891 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
865 892
866 ga = kvm_s390_get_base_disp_rsy(vcpu); 893 ga = kvm_s390_get_base_disp_rsy(vcpu, &ar);
867 894
868 if (ga & 7) 895 if (ga & 7)
869 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 896 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
@@ -872,7 +899,7 @@ static int handle_lctlg(struct kvm_vcpu *vcpu)
872 trace_kvm_s390_handle_lctl(vcpu, 1, reg1, reg3, ga); 899 trace_kvm_s390_handle_lctl(vcpu, 1, reg1, reg3, ga);
873 900
874 nr_regs = ((reg3 - reg1) & 0xf) + 1; 901 nr_regs = ((reg3 - reg1) & 0xf) + 1;
875 rc = read_guest(vcpu, ga, ctl_array, nr_regs * sizeof(u64)); 902 rc = read_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u64));
876 if (rc) 903 if (rc)
877 return kvm_s390_inject_prog_cond(vcpu, rc); 904 return kvm_s390_inject_prog_cond(vcpu, rc);
878 reg = reg1; 905 reg = reg1;
@@ -894,13 +921,14 @@ static int handle_stctg(struct kvm_vcpu *vcpu)
894 int reg, rc, nr_regs; 921 int reg, rc, nr_regs;
895 u64 ctl_array[16]; 922 u64 ctl_array[16];
896 u64 ga; 923 u64 ga;
924 ar_t ar;
897 925
898 vcpu->stat.instruction_stctg++; 926 vcpu->stat.instruction_stctg++;
899 927
900 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 928 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
901 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 929 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
902 930
903 ga = kvm_s390_get_base_disp_rsy(vcpu); 931 ga = kvm_s390_get_base_disp_rsy(vcpu, &ar);
904 932
905 if (ga & 7) 933 if (ga & 7)
906 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 934 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
@@ -916,7 +944,7 @@ static int handle_stctg(struct kvm_vcpu *vcpu)
916 break; 944 break;
917 reg = (reg + 1) % 16; 945 reg = (reg + 1) % 16;
918 } while (1); 946 } while (1);
919 rc = write_guest(vcpu, ga, ctl_array, nr_regs * sizeof(u64)); 947 rc = write_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u64));
920 return rc ? kvm_s390_inject_prog_cond(vcpu, rc) : 0; 948 return rc ? kvm_s390_inject_prog_cond(vcpu, rc) : 0;
921} 949}
922 950
@@ -941,13 +969,14 @@ static int handle_tprot(struct kvm_vcpu *vcpu)
941 unsigned long hva, gpa; 969 unsigned long hva, gpa;
942 int ret = 0, cc = 0; 970 int ret = 0, cc = 0;
943 bool writable; 971 bool writable;
972 ar_t ar;
944 973
945 vcpu->stat.instruction_tprot++; 974 vcpu->stat.instruction_tprot++;
946 975
947 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 976 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
948 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 977 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
949 978
950 kvm_s390_get_base_disp_sse(vcpu, &address1, &address2); 979 kvm_s390_get_base_disp_sse(vcpu, &address1, &address2, &ar, NULL);
951 980
952 /* we only handle the Linux memory detection case: 981 /* we only handle the Linux memory detection case:
953 * access key == 0 982 * access key == 0
@@ -956,11 +985,11 @@ static int handle_tprot(struct kvm_vcpu *vcpu)
956 return -EOPNOTSUPP; 985 return -EOPNOTSUPP;
957 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT) 986 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT)
958 ipte_lock(vcpu); 987 ipte_lock(vcpu);
959 ret = guest_translate_address(vcpu, address1, &gpa, 1); 988 ret = guest_translate_address(vcpu, address1, ar, &gpa, 1);
960 if (ret == PGM_PROTECTION) { 989 if (ret == PGM_PROTECTION) {
961 /* Write protected? Try again with read-only... */ 990 /* Write protected? Try again with read-only... */
962 cc = 1; 991 cc = 1;
963 ret = guest_translate_address(vcpu, address1, &gpa, 0); 992 ret = guest_translate_address(vcpu, address1, ar, &gpa, 0);
964 } 993 }
965 if (ret) { 994 if (ret) {
966 if (ret == PGM_ADDRESSING || ret == PGM_TRANSLATION_SPEC) { 995 if (ret == PGM_ADDRESSING || ret == PGM_TRANSLATION_SPEC) {
diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c
index 755a7330d361..72e58bd2bee7 100644
--- a/arch/s390/kvm/sigp.c
+++ b/arch/s390/kvm/sigp.c
@@ -434,7 +434,7 @@ int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu)
434 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 434 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
435 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 435 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
436 436
437 order_code = kvm_s390_get_base_disp_rs(vcpu); 437 order_code = kvm_s390_get_base_disp_rs(vcpu, NULL);
438 if (handle_sigp_order_in_user_space(vcpu, order_code)) 438 if (handle_sigp_order_in_user_space(vcpu, order_code))
439 return -EOPNOTSUPP; 439 return -EOPNOTSUPP;
440 440
@@ -476,7 +476,7 @@ int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu)
476 int r3 = vcpu->arch.sie_block->ipa & 0x000f; 476 int r3 = vcpu->arch.sie_block->ipa & 0x000f;
477 u16 cpu_addr = vcpu->run->s.regs.gprs[r3]; 477 u16 cpu_addr = vcpu->run->s.regs.gprs[r3];
478 struct kvm_vcpu *dest_vcpu; 478 struct kvm_vcpu *dest_vcpu;
479 u8 order_code = kvm_s390_get_base_disp_rs(vcpu); 479 u8 order_code = kvm_s390_get_base_disp_rs(vcpu, NULL);
480 480
481 trace_kvm_s390_handle_sigp_pei(vcpu, order_code, cpu_addr); 481 trace_kvm_s390_handle_sigp_pei(vcpu, order_code, cpu_addr);
482 482
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index 82634a492fe0..1162ef7a3fa1 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -147,6 +147,16 @@ struct kvm_pit_config {
147 147
148#define KVM_PIT_SPEAKER_DUMMY 1 148#define KVM_PIT_SPEAKER_DUMMY 1
149 149
150struct kvm_s390_skeys {
151 __u64 start_gfn;
152 __u64 count;
153 __u64 skeydata_addr;
154 __u32 flags;
155 __u32 reserved[9];
156};
157#define KVM_S390_GET_SKEYS_NONE 1
158#define KVM_S390_SKEYS_MAX 1048576
159
150#define KVM_EXIT_UNKNOWN 0 160#define KVM_EXIT_UNKNOWN 0
151#define KVM_EXIT_EXCEPTION 1 161#define KVM_EXIT_EXCEPTION 1
152#define KVM_EXIT_IO 2 162#define KVM_EXIT_IO 2
@@ -172,6 +182,7 @@ struct kvm_pit_config {
172#define KVM_EXIT_S390_TSCH 22 182#define KVM_EXIT_S390_TSCH 22
173#define KVM_EXIT_EPR 23 183#define KVM_EXIT_EPR 23
174#define KVM_EXIT_SYSTEM_EVENT 24 184#define KVM_EXIT_SYSTEM_EVENT 24
185#define KVM_EXIT_S390_STSI 25
175 186
176/* For KVM_EXIT_INTERNAL_ERROR */ 187/* For KVM_EXIT_INTERNAL_ERROR */
177/* Emulate instruction failed. */ 188/* Emulate instruction failed. */
@@ -309,6 +320,15 @@ struct kvm_run {
309 __u32 type; 320 __u32 type;
310 __u64 flags; 321 __u64 flags;
311 } system_event; 322 } system_event;
323 /* KVM_EXIT_S390_STSI */
324 struct {
325 __u64 addr;
326 __u8 ar;
327 __u8 reserved;
328 __u8 fc;
329 __u8 sel1;
330 __u16 sel2;
331 } s390_stsi;
312 /* Fix the size of the union. */ 332 /* Fix the size of the union. */
313 char padding[256]; 333 char padding[256];
314 }; 334 };
@@ -365,6 +385,24 @@ struct kvm_translation {
365 __u8 pad[5]; 385 __u8 pad[5];
366}; 386};
367 387
388/* for KVM_S390_MEM_OP */
389struct kvm_s390_mem_op {
390 /* in */
391 __u64 gaddr; /* the guest address */
392 __u64 flags; /* flags */
393 __u32 size; /* amount of bytes */
394 __u32 op; /* type of operation */
395 __u64 buf; /* buffer in userspace */
396 __u8 ar; /* the access register number */
397 __u8 reserved[31]; /* should be set to 0 */
398};
399/* types for kvm_s390_mem_op->op */
400#define KVM_S390_MEMOP_LOGICAL_READ 0
401#define KVM_S390_MEMOP_LOGICAL_WRITE 1
402/* flags for kvm_s390_mem_op->flags */
403#define KVM_S390_MEMOP_F_CHECK_ONLY (1ULL << 0)
404#define KVM_S390_MEMOP_F_INJECT_EXCEPTION (1ULL << 1)
405
368/* for KVM_INTERRUPT */ 406/* for KVM_INTERRUPT */
369struct kvm_interrupt { 407struct kvm_interrupt {
370 /* in */ 408 /* in */
@@ -761,6 +799,9 @@ struct kvm_ppc_smmu_info {
761#define KVM_CAP_CHECK_EXTENSION_VM 105 799#define KVM_CAP_CHECK_EXTENSION_VM 105
762#define KVM_CAP_S390_USER_SIGP 106 800#define KVM_CAP_S390_USER_SIGP 106
763#define KVM_CAP_S390_VECTOR_REGISTERS 107 801#define KVM_CAP_S390_VECTOR_REGISTERS 107
802#define KVM_CAP_S390_MEM_OP 108
803#define KVM_CAP_S390_USER_STSI 109
804#define KVM_CAP_S390_SKEYS 110
764 805
765#ifdef KVM_CAP_IRQ_ROUTING 806#ifdef KVM_CAP_IRQ_ROUTING
766 807
@@ -1136,6 +1177,11 @@ struct kvm_s390_ucas_mapping {
1136#define KVM_ARM_VCPU_INIT _IOW(KVMIO, 0xae, struct kvm_vcpu_init) 1177#define KVM_ARM_VCPU_INIT _IOW(KVMIO, 0xae, struct kvm_vcpu_init)
1137#define KVM_ARM_PREFERRED_TARGET _IOR(KVMIO, 0xaf, struct kvm_vcpu_init) 1178#define KVM_ARM_PREFERRED_TARGET _IOR(KVMIO, 0xaf, struct kvm_vcpu_init)
1138#define KVM_GET_REG_LIST _IOWR(KVMIO, 0xb0, struct kvm_reg_list) 1179#define KVM_GET_REG_LIST _IOWR(KVMIO, 0xb0, struct kvm_reg_list)
1180/* Available with KVM_CAP_S390_MEM_OP */
1181#define KVM_S390_MEM_OP _IOW(KVMIO, 0xb1, struct kvm_s390_mem_op)
1182/* Available with KVM_CAP_S390_SKEYS */
1183#define KVM_S390_GET_SKEYS _IOW(KVMIO, 0xb2, struct kvm_s390_skeys)
1184#define KVM_S390_SET_SKEYS _IOW(KVMIO, 0xb3, struct kvm_s390_skeys)
1139 1185
1140#define KVM_DEV_ASSIGN_ENABLE_IOMMU (1 << 0) 1186#define KVM_DEV_ASSIGN_ENABLE_IOMMU (1 << 0)
1141#define KVM_DEV_ASSIGN_PCI_2_3 (1 << 1) 1187#define KVM_DEV_ASSIGN_PCI_2_3 (1 << 1)