diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2015-04-13 12:47:01 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2015-04-13 12:47:01 -0400 |
commit | 900360131066f192c82311a098d03d6ac6429e20 (patch) | |
tree | e9681537a2d1f75fa5be21d8f1116f9f0ba8a391 /arch/s390/kvm | |
parent | 4541fec3104bef0c60633f9e180be94ea5ccc2b7 (diff) | |
parent | ca3f0874723fad81d0c701b63ae3a17a408d5f25 (diff) |
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull KVM updates from Paolo Bonzini:
"First batch of KVM changes for 4.1
The most interesting bit here is irqfd/ioeventfd support for ARM and
ARM64.
Summary:
ARM/ARM64:
fixes for live migration, irqfd and ioeventfd support (enabling
vhost, too), page aging
s390:
interrupt handling rework, allowing to inject all local interrupts
via new ioctl and to get/set the full local irq state for migration
and introspection. New ioctls to access memory by virtual address,
and to get/set the guest storage keys. SIMD support.
MIPS:
FPU and MIPS SIMD Architecture (MSA) support. Includes some
patches from Ralf Baechle's MIPS tree.
x86:
bugfixes (notably for pvclock, the others are small) and cleanups.
Another small latency improvement for the TSC deadline timer"
* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (146 commits)
KVM: use slowpath for cross page cached accesses
kvm: mmu: lazy collapse small sptes into large sptes
KVM: x86: Clear CR2 on VCPU reset
KVM: x86: DR0-DR3 are not clear on reset
KVM: x86: BSP in MSR_IA32_APICBASE is writable
KVM: x86: simplify kvm_apic_map
KVM: x86: avoid logical_map when it is invalid
KVM: x86: fix mixed APIC mode broadcast
KVM: x86: use MDA for interrupt matching
kvm/ppc/mpic: drop unused IRQ_testbit
KVM: nVMX: remove unnecessary double caching of MAXPHYADDR
KVM: nVMX: checks for address bits beyond MAXPHYADDR on VM-entry
KVM: x86: cache maxphyaddr CPUID leaf in struct kvm_vcpu
KVM: vmx: pass error code with internal error #2
x86: vdso: fix pvclock races with task migration
KVM: remove kvm_read_hva and kvm_read_hva_atomic
KVM: x86: optimize delivery of TSC deadline timer interrupt
KVM: x86: extract blocking logic from __vcpu_run
kvm: x86: fix x86 eflags fixed bit
KVM: s390: migrate vcpu interrupt state
...
Diffstat (limited to 'arch/s390/kvm')
-rw-r--r-- | arch/s390/kvm/diag.c | 6 | ||||
-rw-r--r-- | arch/s390/kvm/gaccess.c | 296 | ||||
-rw-r--r-- | arch/s390/kvm/gaccess.h | 21 | ||||
-rw-r--r-- | arch/s390/kvm/guestdbg.c | 8 | ||||
-rw-r--r-- | arch/s390/kvm/intercept.c | 5 | ||||
-rw-r--r-- | arch/s390/kvm/interrupt.c | 1101 | ||||
-rw-r--r-- | arch/s390/kvm/kvm-s390.c | 398 | ||||
-rw-r--r-- | arch/s390/kvm/kvm-s390.h | 51 | ||||
-rw-r--r-- | arch/s390/kvm/priv.c | 144 | ||||
-rw-r--r-- | arch/s390/kvm/sigp.c | 7 |
10 files changed, 1479 insertions, 558 deletions
diff --git a/arch/s390/kvm/diag.c b/arch/s390/kvm/diag.c index 9254afff250c..fc7ec95848c3 100644 --- a/arch/s390/kvm/diag.c +++ b/arch/s390/kvm/diag.c | |||
@@ -77,7 +77,7 @@ static int __diag_page_ref_service(struct kvm_vcpu *vcpu) | |||
77 | 77 | ||
78 | if (vcpu->run->s.regs.gprs[rx] & 7) | 78 | if (vcpu->run->s.regs.gprs[rx] & 7) |
79 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); | 79 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); |
80 | rc = read_guest(vcpu, vcpu->run->s.regs.gprs[rx], &parm, sizeof(parm)); | 80 | rc = read_guest(vcpu, vcpu->run->s.regs.gprs[rx], rx, &parm, sizeof(parm)); |
81 | if (rc) | 81 | if (rc) |
82 | return kvm_s390_inject_prog_cond(vcpu, rc); | 82 | return kvm_s390_inject_prog_cond(vcpu, rc); |
83 | if (parm.parm_version != 2 || parm.parm_len < 5 || parm.code != 0x258) | 83 | if (parm.parm_version != 2 || parm.parm_len < 5 || parm.code != 0x258) |
@@ -213,7 +213,7 @@ static int __diag_virtio_hypercall(struct kvm_vcpu *vcpu) | |||
213 | * - gpr 3 contains the virtqueue index (passed as datamatch) | 213 | * - gpr 3 contains the virtqueue index (passed as datamatch) |
214 | * - gpr 4 contains the index on the bus (optionally) | 214 | * - gpr 4 contains the index on the bus (optionally) |
215 | */ | 215 | */ |
216 | ret = kvm_io_bus_write_cookie(vcpu->kvm, KVM_VIRTIO_CCW_NOTIFY_BUS, | 216 | ret = kvm_io_bus_write_cookie(vcpu, KVM_VIRTIO_CCW_NOTIFY_BUS, |
217 | vcpu->run->s.regs.gprs[2] & 0xffffffff, | 217 | vcpu->run->s.regs.gprs[2] & 0xffffffff, |
218 | 8, &vcpu->run->s.regs.gprs[3], | 218 | 8, &vcpu->run->s.regs.gprs[3], |
219 | vcpu->run->s.regs.gprs[4]); | 219 | vcpu->run->s.regs.gprs[4]); |
@@ -230,7 +230,7 @@ static int __diag_virtio_hypercall(struct kvm_vcpu *vcpu) | |||
230 | 230 | ||
231 | int kvm_s390_handle_diag(struct kvm_vcpu *vcpu) | 231 | int kvm_s390_handle_diag(struct kvm_vcpu *vcpu) |
232 | { | 232 | { |
233 | int code = kvm_s390_get_base_disp_rs(vcpu) & 0xffff; | 233 | int code = kvm_s390_get_base_disp_rs(vcpu, NULL) & 0xffff; |
234 | 234 | ||
235 | if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) | 235 | if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) |
236 | return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); | 236 | return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); |
diff --git a/arch/s390/kvm/gaccess.c b/arch/s390/kvm/gaccess.c index 267523cac6de..a7559f7207df 100644 --- a/arch/s390/kvm/gaccess.c +++ b/arch/s390/kvm/gaccess.c | |||
@@ -10,6 +10,7 @@ | |||
10 | #include <asm/pgtable.h> | 10 | #include <asm/pgtable.h> |
11 | #include "kvm-s390.h" | 11 | #include "kvm-s390.h" |
12 | #include "gaccess.h" | 12 | #include "gaccess.h" |
13 | #include <asm/switch_to.h> | ||
13 | 14 | ||
14 | union asce { | 15 | union asce { |
15 | unsigned long val; | 16 | unsigned long val; |
@@ -207,6 +208,54 @@ union raddress { | |||
207 | unsigned long pfra : 52; /* Page-Frame Real Address */ | 208 | unsigned long pfra : 52; /* Page-Frame Real Address */ |
208 | }; | 209 | }; |
209 | 210 | ||
211 | union alet { | ||
212 | u32 val; | ||
213 | struct { | ||
214 | u32 reserved : 7; | ||
215 | u32 p : 1; | ||
216 | u32 alesn : 8; | ||
217 | u32 alen : 16; | ||
218 | }; | ||
219 | }; | ||
220 | |||
221 | union ald { | ||
222 | u32 val; | ||
223 | struct { | ||
224 | u32 : 1; | ||
225 | u32 alo : 24; | ||
226 | u32 all : 7; | ||
227 | }; | ||
228 | }; | ||
229 | |||
230 | struct ale { | ||
231 | unsigned long i : 1; /* ALEN-Invalid Bit */ | ||
232 | unsigned long : 5; | ||
233 | unsigned long fo : 1; /* Fetch-Only Bit */ | ||
234 | unsigned long p : 1; /* Private Bit */ | ||
235 | unsigned long alesn : 8; /* Access-List-Entry Sequence Number */ | ||
236 | unsigned long aleax : 16; /* Access-List-Entry Authorization Index */ | ||
237 | unsigned long : 32; | ||
238 | unsigned long : 1; | ||
239 | unsigned long asteo : 25; /* ASN-Second-Table-Entry Origin */ | ||
240 | unsigned long : 6; | ||
241 | unsigned long astesn : 32; /* ASTE Sequence Number */ | ||
242 | } __packed; | ||
243 | |||
244 | struct aste { | ||
245 | unsigned long i : 1; /* ASX-Invalid Bit */ | ||
246 | unsigned long ato : 29; /* Authority-Table Origin */ | ||
247 | unsigned long : 1; | ||
248 | unsigned long b : 1; /* Base-Space Bit */ | ||
249 | unsigned long ax : 16; /* Authorization Index */ | ||
250 | unsigned long atl : 12; /* Authority-Table Length */ | ||
251 | unsigned long : 2; | ||
252 | unsigned long ca : 1; /* Controlled-ASN Bit */ | ||
253 | unsigned long ra : 1; /* Reusable-ASN Bit */ | ||
254 | unsigned long asce : 64; /* Address-Space-Control Element */ | ||
255 | unsigned long ald : 32; | ||
256 | unsigned long astesn : 32; | ||
257 | /* .. more fields there */ | ||
258 | } __packed; | ||
210 | 259 | ||
211 | int ipte_lock_held(struct kvm_vcpu *vcpu) | 260 | int ipte_lock_held(struct kvm_vcpu *vcpu) |
212 | { | 261 | { |
@@ -307,15 +356,157 @@ void ipte_unlock(struct kvm_vcpu *vcpu) | |||
307 | ipte_unlock_simple(vcpu); | 356 | ipte_unlock_simple(vcpu); |
308 | } | 357 | } |
309 | 358 | ||
310 | static unsigned long get_vcpu_asce(struct kvm_vcpu *vcpu) | 359 | static int ar_translation(struct kvm_vcpu *vcpu, union asce *asce, ar_t ar, |
360 | int write) | ||
361 | { | ||
362 | union alet alet; | ||
363 | struct ale ale; | ||
364 | struct aste aste; | ||
365 | unsigned long ald_addr, authority_table_addr; | ||
366 | union ald ald; | ||
367 | int eax, rc; | ||
368 | u8 authority_table; | ||
369 | |||
370 | if (ar >= NUM_ACRS) | ||
371 | return -EINVAL; | ||
372 | |||
373 | save_access_regs(vcpu->run->s.regs.acrs); | ||
374 | alet.val = vcpu->run->s.regs.acrs[ar]; | ||
375 | |||
376 | if (ar == 0 || alet.val == 0) { | ||
377 | asce->val = vcpu->arch.sie_block->gcr[1]; | ||
378 | return 0; | ||
379 | } else if (alet.val == 1) { | ||
380 | asce->val = vcpu->arch.sie_block->gcr[7]; | ||
381 | return 0; | ||
382 | } | ||
383 | |||
384 | if (alet.reserved) | ||
385 | return PGM_ALET_SPECIFICATION; | ||
386 | |||
387 | if (alet.p) | ||
388 | ald_addr = vcpu->arch.sie_block->gcr[5]; | ||
389 | else | ||
390 | ald_addr = vcpu->arch.sie_block->gcr[2]; | ||
391 | ald_addr &= 0x7fffffc0; | ||
392 | |||
393 | rc = read_guest_real(vcpu, ald_addr + 16, &ald.val, sizeof(union ald)); | ||
394 | if (rc) | ||
395 | return rc; | ||
396 | |||
397 | if (alet.alen / 8 > ald.all) | ||
398 | return PGM_ALEN_TRANSLATION; | ||
399 | |||
400 | if (0x7fffffff - ald.alo * 128 < alet.alen * 16) | ||
401 | return PGM_ADDRESSING; | ||
402 | |||
403 | rc = read_guest_real(vcpu, ald.alo * 128 + alet.alen * 16, &ale, | ||
404 | sizeof(struct ale)); | ||
405 | if (rc) | ||
406 | return rc; | ||
407 | |||
408 | if (ale.i == 1) | ||
409 | return PGM_ALEN_TRANSLATION; | ||
410 | if (ale.alesn != alet.alesn) | ||
411 | return PGM_ALE_SEQUENCE; | ||
412 | |||
413 | rc = read_guest_real(vcpu, ale.asteo * 64, &aste, sizeof(struct aste)); | ||
414 | if (rc) | ||
415 | return rc; | ||
416 | |||
417 | if (aste.i) | ||
418 | return PGM_ASTE_VALIDITY; | ||
419 | if (aste.astesn != ale.astesn) | ||
420 | return PGM_ASTE_SEQUENCE; | ||
421 | |||
422 | if (ale.p == 1) { | ||
423 | eax = (vcpu->arch.sie_block->gcr[8] >> 16) & 0xffff; | ||
424 | if (ale.aleax != eax) { | ||
425 | if (eax / 16 > aste.atl) | ||
426 | return PGM_EXTENDED_AUTHORITY; | ||
427 | |||
428 | authority_table_addr = aste.ato * 4 + eax / 4; | ||
429 | |||
430 | rc = read_guest_real(vcpu, authority_table_addr, | ||
431 | &authority_table, | ||
432 | sizeof(u8)); | ||
433 | if (rc) | ||
434 | return rc; | ||
435 | |||
436 | if ((authority_table & (0x40 >> ((eax & 3) * 2))) == 0) | ||
437 | return PGM_EXTENDED_AUTHORITY; | ||
438 | } | ||
439 | } | ||
440 | |||
441 | if (ale.fo == 1 && write) | ||
442 | return PGM_PROTECTION; | ||
443 | |||
444 | asce->val = aste.asce; | ||
445 | return 0; | ||
446 | } | ||
447 | |||
448 | struct trans_exc_code_bits { | ||
449 | unsigned long addr : 52; /* Translation-exception Address */ | ||
450 | unsigned long fsi : 2; /* Access Exception Fetch/Store Indication */ | ||
451 | unsigned long : 6; | ||
452 | unsigned long b60 : 1; | ||
453 | unsigned long b61 : 1; | ||
454 | unsigned long as : 2; /* ASCE Identifier */ | ||
455 | }; | ||
456 | |||
457 | enum { | ||
458 | FSI_UNKNOWN = 0, /* Unknown wether fetch or store */ | ||
459 | FSI_STORE = 1, /* Exception was due to store operation */ | ||
460 | FSI_FETCH = 2 /* Exception was due to fetch operation */ | ||
461 | }; | ||
462 | |||
463 | static int get_vcpu_asce(struct kvm_vcpu *vcpu, union asce *asce, | ||
464 | ar_t ar, int write) | ||
311 | { | 465 | { |
466 | int rc; | ||
467 | psw_t *psw = &vcpu->arch.sie_block->gpsw; | ||
468 | struct kvm_s390_pgm_info *pgm = &vcpu->arch.pgm; | ||
469 | struct trans_exc_code_bits *tec_bits; | ||
470 | |||
471 | memset(pgm, 0, sizeof(*pgm)); | ||
472 | tec_bits = (struct trans_exc_code_bits *)&pgm->trans_exc_code; | ||
473 | tec_bits->fsi = write ? FSI_STORE : FSI_FETCH; | ||
474 | tec_bits->as = psw_bits(*psw).as; | ||
475 | |||
476 | if (!psw_bits(*psw).t) { | ||
477 | asce->val = 0; | ||
478 | asce->r = 1; | ||
479 | return 0; | ||
480 | } | ||
481 | |||
312 | switch (psw_bits(vcpu->arch.sie_block->gpsw).as) { | 482 | switch (psw_bits(vcpu->arch.sie_block->gpsw).as) { |
313 | case PSW_AS_PRIMARY: | 483 | case PSW_AS_PRIMARY: |
314 | return vcpu->arch.sie_block->gcr[1]; | 484 | asce->val = vcpu->arch.sie_block->gcr[1]; |
485 | return 0; | ||
315 | case PSW_AS_SECONDARY: | 486 | case PSW_AS_SECONDARY: |
316 | return vcpu->arch.sie_block->gcr[7]; | 487 | asce->val = vcpu->arch.sie_block->gcr[7]; |
488 | return 0; | ||
317 | case PSW_AS_HOME: | 489 | case PSW_AS_HOME: |
318 | return vcpu->arch.sie_block->gcr[13]; | 490 | asce->val = vcpu->arch.sie_block->gcr[13]; |
491 | return 0; | ||
492 | case PSW_AS_ACCREG: | ||
493 | rc = ar_translation(vcpu, asce, ar, write); | ||
494 | switch (rc) { | ||
495 | case PGM_ALEN_TRANSLATION: | ||
496 | case PGM_ALE_SEQUENCE: | ||
497 | case PGM_ASTE_VALIDITY: | ||
498 | case PGM_ASTE_SEQUENCE: | ||
499 | case PGM_EXTENDED_AUTHORITY: | ||
500 | vcpu->arch.pgm.exc_access_id = ar; | ||
501 | break; | ||
502 | case PGM_PROTECTION: | ||
503 | tec_bits->b60 = 1; | ||
504 | tec_bits->b61 = 1; | ||
505 | break; | ||
506 | } | ||
507 | if (rc > 0) | ||
508 | pgm->code = rc; | ||
509 | return rc; | ||
319 | } | 510 | } |
320 | return 0; | 511 | return 0; |
321 | } | 512 | } |
@@ -330,10 +521,11 @@ static int deref_table(struct kvm *kvm, unsigned long gpa, unsigned long *val) | |||
330 | * @vcpu: virtual cpu | 521 | * @vcpu: virtual cpu |
331 | * @gva: guest virtual address | 522 | * @gva: guest virtual address |
332 | * @gpa: points to where guest physical (absolute) address should be stored | 523 | * @gpa: points to where guest physical (absolute) address should be stored |
524 | * @asce: effective asce | ||
333 | * @write: indicates if access is a write access | 525 | * @write: indicates if access is a write access |
334 | * | 526 | * |
335 | * Translate a guest virtual address into a guest absolute address by means | 527 | * Translate a guest virtual address into a guest absolute address by means |
336 | * of dynamic address translation as specified by the architecuture. | 528 | * of dynamic address translation as specified by the architecture. |
337 | * If the resulting absolute address is not available in the configuration | 529 | * If the resulting absolute address is not available in the configuration |
338 | * an addressing exception is indicated and @gpa will not be changed. | 530 | * an addressing exception is indicated and @gpa will not be changed. |
339 | * | 531 | * |
@@ -345,7 +537,8 @@ static int deref_table(struct kvm *kvm, unsigned long gpa, unsigned long *val) | |||
345 | * by the architecture | 537 | * by the architecture |
346 | */ | 538 | */ |
347 | static unsigned long guest_translate(struct kvm_vcpu *vcpu, unsigned long gva, | 539 | static unsigned long guest_translate(struct kvm_vcpu *vcpu, unsigned long gva, |
348 | unsigned long *gpa, int write) | 540 | unsigned long *gpa, const union asce asce, |
541 | int write) | ||
349 | { | 542 | { |
350 | union vaddress vaddr = {.addr = gva}; | 543 | union vaddress vaddr = {.addr = gva}; |
351 | union raddress raddr = {.addr = gva}; | 544 | union raddress raddr = {.addr = gva}; |
@@ -354,12 +547,10 @@ static unsigned long guest_translate(struct kvm_vcpu *vcpu, unsigned long gva, | |||
354 | union ctlreg0 ctlreg0; | 547 | union ctlreg0 ctlreg0; |
355 | unsigned long ptr; | 548 | unsigned long ptr; |
356 | int edat1, edat2; | 549 | int edat1, edat2; |
357 | union asce asce; | ||
358 | 550 | ||
359 | ctlreg0.val = vcpu->arch.sie_block->gcr[0]; | 551 | ctlreg0.val = vcpu->arch.sie_block->gcr[0]; |
360 | edat1 = ctlreg0.edat && test_kvm_facility(vcpu->kvm, 8); | 552 | edat1 = ctlreg0.edat && test_kvm_facility(vcpu->kvm, 8); |
361 | edat2 = edat1 && test_kvm_facility(vcpu->kvm, 78); | 553 | edat2 = edat1 && test_kvm_facility(vcpu->kvm, 78); |
362 | asce.val = get_vcpu_asce(vcpu); | ||
363 | if (asce.r) | 554 | if (asce.r) |
364 | goto real_address; | 555 | goto real_address; |
365 | ptr = asce.origin * 4096; | 556 | ptr = asce.origin * 4096; |
@@ -506,48 +697,30 @@ static inline int is_low_address(unsigned long ga) | |||
506 | return (ga & ~0x11fful) == 0; | 697 | return (ga & ~0x11fful) == 0; |
507 | } | 698 | } |
508 | 699 | ||
509 | static int low_address_protection_enabled(struct kvm_vcpu *vcpu) | 700 | static int low_address_protection_enabled(struct kvm_vcpu *vcpu, |
701 | const union asce asce) | ||
510 | { | 702 | { |
511 | union ctlreg0 ctlreg0 = {.val = vcpu->arch.sie_block->gcr[0]}; | 703 | union ctlreg0 ctlreg0 = {.val = vcpu->arch.sie_block->gcr[0]}; |
512 | psw_t *psw = &vcpu->arch.sie_block->gpsw; | 704 | psw_t *psw = &vcpu->arch.sie_block->gpsw; |
513 | union asce asce; | ||
514 | 705 | ||
515 | if (!ctlreg0.lap) | 706 | if (!ctlreg0.lap) |
516 | return 0; | 707 | return 0; |
517 | asce.val = get_vcpu_asce(vcpu); | ||
518 | if (psw_bits(*psw).t && asce.p) | 708 | if (psw_bits(*psw).t && asce.p) |
519 | return 0; | 709 | return 0; |
520 | return 1; | 710 | return 1; |
521 | } | 711 | } |
522 | 712 | ||
523 | struct trans_exc_code_bits { | ||
524 | unsigned long addr : 52; /* Translation-exception Address */ | ||
525 | unsigned long fsi : 2; /* Access Exception Fetch/Store Indication */ | ||
526 | unsigned long : 7; | ||
527 | unsigned long b61 : 1; | ||
528 | unsigned long as : 2; /* ASCE Identifier */ | ||
529 | }; | ||
530 | |||
531 | enum { | ||
532 | FSI_UNKNOWN = 0, /* Unknown wether fetch or store */ | ||
533 | FSI_STORE = 1, /* Exception was due to store operation */ | ||
534 | FSI_FETCH = 2 /* Exception was due to fetch operation */ | ||
535 | }; | ||
536 | |||
537 | static int guest_page_range(struct kvm_vcpu *vcpu, unsigned long ga, | 713 | static int guest_page_range(struct kvm_vcpu *vcpu, unsigned long ga, |
538 | unsigned long *pages, unsigned long nr_pages, | 714 | unsigned long *pages, unsigned long nr_pages, |
539 | int write) | 715 | const union asce asce, int write) |
540 | { | 716 | { |
541 | struct kvm_s390_pgm_info *pgm = &vcpu->arch.pgm; | 717 | struct kvm_s390_pgm_info *pgm = &vcpu->arch.pgm; |
542 | psw_t *psw = &vcpu->arch.sie_block->gpsw; | 718 | psw_t *psw = &vcpu->arch.sie_block->gpsw; |
543 | struct trans_exc_code_bits *tec_bits; | 719 | struct trans_exc_code_bits *tec_bits; |
544 | int lap_enabled, rc; | 720 | int lap_enabled, rc; |
545 | 721 | ||
546 | memset(pgm, 0, sizeof(*pgm)); | ||
547 | tec_bits = (struct trans_exc_code_bits *)&pgm->trans_exc_code; | 722 | tec_bits = (struct trans_exc_code_bits *)&pgm->trans_exc_code; |
548 | tec_bits->fsi = write ? FSI_STORE : FSI_FETCH; | 723 | lap_enabled = low_address_protection_enabled(vcpu, asce); |
549 | tec_bits->as = psw_bits(*psw).as; | ||
550 | lap_enabled = low_address_protection_enabled(vcpu); | ||
551 | while (nr_pages) { | 724 | while (nr_pages) { |
552 | ga = kvm_s390_logical_to_effective(vcpu, ga); | 725 | ga = kvm_s390_logical_to_effective(vcpu, ga); |
553 | tec_bits->addr = ga >> PAGE_SHIFT; | 726 | tec_bits->addr = ga >> PAGE_SHIFT; |
@@ -557,7 +730,7 @@ static int guest_page_range(struct kvm_vcpu *vcpu, unsigned long ga, | |||
557 | } | 730 | } |
558 | ga &= PAGE_MASK; | 731 | ga &= PAGE_MASK; |
559 | if (psw_bits(*psw).t) { | 732 | if (psw_bits(*psw).t) { |
560 | rc = guest_translate(vcpu, ga, pages, write); | 733 | rc = guest_translate(vcpu, ga, pages, asce, write); |
561 | if (rc < 0) | 734 | if (rc < 0) |
562 | return rc; | 735 | return rc; |
563 | if (rc == PGM_PROTECTION) | 736 | if (rc == PGM_PROTECTION) |
@@ -578,7 +751,7 @@ static int guest_page_range(struct kvm_vcpu *vcpu, unsigned long ga, | |||
578 | return 0; | 751 | return 0; |
579 | } | 752 | } |
580 | 753 | ||
581 | int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, void *data, | 754 | int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data, |
582 | unsigned long len, int write) | 755 | unsigned long len, int write) |
583 | { | 756 | { |
584 | psw_t *psw = &vcpu->arch.sie_block->gpsw; | 757 | psw_t *psw = &vcpu->arch.sie_block->gpsw; |
@@ -591,20 +764,19 @@ int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, void *data, | |||
591 | 764 | ||
592 | if (!len) | 765 | if (!len) |
593 | return 0; | 766 | return 0; |
594 | /* Access register mode is not supported yet. */ | 767 | rc = get_vcpu_asce(vcpu, &asce, ar, write); |
595 | if (psw_bits(*psw).t && psw_bits(*psw).as == PSW_AS_ACCREG) | 768 | if (rc) |
596 | return -EOPNOTSUPP; | 769 | return rc; |
597 | nr_pages = (((ga & ~PAGE_MASK) + len - 1) >> PAGE_SHIFT) + 1; | 770 | nr_pages = (((ga & ~PAGE_MASK) + len - 1) >> PAGE_SHIFT) + 1; |
598 | pages = pages_array; | 771 | pages = pages_array; |
599 | if (nr_pages > ARRAY_SIZE(pages_array)) | 772 | if (nr_pages > ARRAY_SIZE(pages_array)) |
600 | pages = vmalloc(nr_pages * sizeof(unsigned long)); | 773 | pages = vmalloc(nr_pages * sizeof(unsigned long)); |
601 | if (!pages) | 774 | if (!pages) |
602 | return -ENOMEM; | 775 | return -ENOMEM; |
603 | asce.val = get_vcpu_asce(vcpu); | ||
604 | need_ipte_lock = psw_bits(*psw).t && !asce.r; | 776 | need_ipte_lock = psw_bits(*psw).t && !asce.r; |
605 | if (need_ipte_lock) | 777 | if (need_ipte_lock) |
606 | ipte_lock(vcpu); | 778 | ipte_lock(vcpu); |
607 | rc = guest_page_range(vcpu, ga, pages, nr_pages, write); | 779 | rc = guest_page_range(vcpu, ga, pages, nr_pages, asce, write); |
608 | for (idx = 0; idx < nr_pages && !rc; idx++) { | 780 | for (idx = 0; idx < nr_pages && !rc; idx++) { |
609 | gpa = *(pages + idx) + (ga & ~PAGE_MASK); | 781 | gpa = *(pages + idx) + (ga & ~PAGE_MASK); |
610 | _len = min(PAGE_SIZE - (gpa & ~PAGE_MASK), len); | 782 | _len = min(PAGE_SIZE - (gpa & ~PAGE_MASK), len); |
@@ -652,7 +824,7 @@ int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra, | |||
652 | * Note: The IPTE lock is not taken during this function, so the caller | 824 | * Note: The IPTE lock is not taken during this function, so the caller |
653 | * has to take care of this. | 825 | * has to take care of this. |
654 | */ | 826 | */ |
655 | int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva, | 827 | int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva, ar_t ar, |
656 | unsigned long *gpa, int write) | 828 | unsigned long *gpa, int write) |
657 | { | 829 | { |
658 | struct kvm_s390_pgm_info *pgm = &vcpu->arch.pgm; | 830 | struct kvm_s390_pgm_info *pgm = &vcpu->arch.pgm; |
@@ -661,26 +833,21 @@ int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva, | |||
661 | union asce asce; | 833 | union asce asce; |
662 | int rc; | 834 | int rc; |
663 | 835 | ||
664 | /* Access register mode is not supported yet. */ | ||
665 | if (psw_bits(*psw).t && psw_bits(*psw).as == PSW_AS_ACCREG) | ||
666 | return -EOPNOTSUPP; | ||
667 | |||
668 | gva = kvm_s390_logical_to_effective(vcpu, gva); | 836 | gva = kvm_s390_logical_to_effective(vcpu, gva); |
669 | memset(pgm, 0, sizeof(*pgm)); | ||
670 | tec = (struct trans_exc_code_bits *)&pgm->trans_exc_code; | 837 | tec = (struct trans_exc_code_bits *)&pgm->trans_exc_code; |
671 | tec->as = psw_bits(*psw).as; | 838 | rc = get_vcpu_asce(vcpu, &asce, ar, write); |
672 | tec->fsi = write ? FSI_STORE : FSI_FETCH; | ||
673 | tec->addr = gva >> PAGE_SHIFT; | 839 | tec->addr = gva >> PAGE_SHIFT; |
674 | if (is_low_address(gva) && low_address_protection_enabled(vcpu)) { | 840 | if (rc) |
841 | return rc; | ||
842 | if (is_low_address(gva) && low_address_protection_enabled(vcpu, asce)) { | ||
675 | if (write) { | 843 | if (write) { |
676 | rc = pgm->code = PGM_PROTECTION; | 844 | rc = pgm->code = PGM_PROTECTION; |
677 | return rc; | 845 | return rc; |
678 | } | 846 | } |
679 | } | 847 | } |
680 | 848 | ||
681 | asce.val = get_vcpu_asce(vcpu); | ||
682 | if (psw_bits(*psw).t && !asce.r) { /* Use DAT? */ | 849 | if (psw_bits(*psw).t && !asce.r) { /* Use DAT? */ |
683 | rc = guest_translate(vcpu, gva, gpa, write); | 850 | rc = guest_translate(vcpu, gva, gpa, asce, write); |
684 | if (rc > 0) { | 851 | if (rc > 0) { |
685 | if (rc == PGM_PROTECTION) | 852 | if (rc == PGM_PROTECTION) |
686 | tec->b61 = 1; | 853 | tec->b61 = 1; |
@@ -697,28 +864,51 @@ int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva, | |||
697 | } | 864 | } |
698 | 865 | ||
699 | /** | 866 | /** |
700 | * kvm_s390_check_low_addr_protection - check for low-address protection | 867 | * check_gva_range - test a range of guest virtual addresses for accessibility |
701 | * @ga: Guest address | 868 | */ |
869 | int check_gva_range(struct kvm_vcpu *vcpu, unsigned long gva, ar_t ar, | ||
870 | unsigned long length, int is_write) | ||
871 | { | ||
872 | unsigned long gpa; | ||
873 | unsigned long currlen; | ||
874 | int rc = 0; | ||
875 | |||
876 | ipte_lock(vcpu); | ||
877 | while (length > 0 && !rc) { | ||
878 | currlen = min(length, PAGE_SIZE - (gva % PAGE_SIZE)); | ||
879 | rc = guest_translate_address(vcpu, gva, ar, &gpa, is_write); | ||
880 | gva += currlen; | ||
881 | length -= currlen; | ||
882 | } | ||
883 | ipte_unlock(vcpu); | ||
884 | |||
885 | return rc; | ||
886 | } | ||
887 | |||
888 | /** | ||
889 | * kvm_s390_check_low_addr_prot_real - check for low-address protection | ||
890 | * @gra: Guest real address | ||
702 | * | 891 | * |
703 | * Checks whether an address is subject to low-address protection and set | 892 | * Checks whether an address is subject to low-address protection and set |
704 | * up vcpu->arch.pgm accordingly if necessary. | 893 | * up vcpu->arch.pgm accordingly if necessary. |
705 | * | 894 | * |
706 | * Return: 0 if no protection exception, or PGM_PROTECTION if protected. | 895 | * Return: 0 if no protection exception, or PGM_PROTECTION if protected. |
707 | */ | 896 | */ |
708 | int kvm_s390_check_low_addr_protection(struct kvm_vcpu *vcpu, unsigned long ga) | 897 | int kvm_s390_check_low_addr_prot_real(struct kvm_vcpu *vcpu, unsigned long gra) |
709 | { | 898 | { |
710 | struct kvm_s390_pgm_info *pgm = &vcpu->arch.pgm; | 899 | struct kvm_s390_pgm_info *pgm = &vcpu->arch.pgm; |
711 | psw_t *psw = &vcpu->arch.sie_block->gpsw; | 900 | psw_t *psw = &vcpu->arch.sie_block->gpsw; |
712 | struct trans_exc_code_bits *tec_bits; | 901 | struct trans_exc_code_bits *tec_bits; |
902 | union ctlreg0 ctlreg0 = {.val = vcpu->arch.sie_block->gcr[0]}; | ||
713 | 903 | ||
714 | if (!is_low_address(ga) || !low_address_protection_enabled(vcpu)) | 904 | if (!ctlreg0.lap || !is_low_address(gra)) |
715 | return 0; | 905 | return 0; |
716 | 906 | ||
717 | memset(pgm, 0, sizeof(*pgm)); | 907 | memset(pgm, 0, sizeof(*pgm)); |
718 | tec_bits = (struct trans_exc_code_bits *)&pgm->trans_exc_code; | 908 | tec_bits = (struct trans_exc_code_bits *)&pgm->trans_exc_code; |
719 | tec_bits->fsi = FSI_STORE; | 909 | tec_bits->fsi = FSI_STORE; |
720 | tec_bits->as = psw_bits(*psw).as; | 910 | tec_bits->as = psw_bits(*psw).as; |
721 | tec_bits->addr = ga >> PAGE_SHIFT; | 911 | tec_bits->addr = gra >> PAGE_SHIFT; |
722 | pgm->code = PGM_PROTECTION; | 912 | pgm->code = PGM_PROTECTION; |
723 | 913 | ||
724 | return pgm->code; | 914 | return pgm->code; |
diff --git a/arch/s390/kvm/gaccess.h b/arch/s390/kvm/gaccess.h index 0149cf15058a..ef03726cc661 100644 --- a/arch/s390/kvm/gaccess.h +++ b/arch/s390/kvm/gaccess.h | |||
@@ -156,9 +156,11 @@ int read_guest_lc(struct kvm_vcpu *vcpu, unsigned long gra, void *data, | |||
156 | } | 156 | } |
157 | 157 | ||
158 | int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva, | 158 | int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva, |
159 | unsigned long *gpa, int write); | 159 | ar_t ar, unsigned long *gpa, int write); |
160 | int check_gva_range(struct kvm_vcpu *vcpu, unsigned long gva, ar_t ar, | ||
161 | unsigned long length, int is_write); | ||
160 | 162 | ||
161 | int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, void *data, | 163 | int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data, |
162 | unsigned long len, int write); | 164 | unsigned long len, int write); |
163 | 165 | ||
164 | int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra, | 166 | int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra, |
@@ -168,6 +170,7 @@ int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra, | |||
168 | * write_guest - copy data from kernel space to guest space | 170 | * write_guest - copy data from kernel space to guest space |
169 | * @vcpu: virtual cpu | 171 | * @vcpu: virtual cpu |
170 | * @ga: guest address | 172 | * @ga: guest address |
173 | * @ar: access register | ||
171 | * @data: source address in kernel space | 174 | * @data: source address in kernel space |
172 | * @len: number of bytes to copy | 175 | * @len: number of bytes to copy |
173 | * | 176 | * |
@@ -176,8 +179,7 @@ int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra, | |||
176 | * If DAT is off data will be copied to guest real or absolute memory. | 179 | * If DAT is off data will be copied to guest real or absolute memory. |
177 | * If DAT is on data will be copied to the address space as specified by | 180 | * If DAT is on data will be copied to the address space as specified by |
178 | * the address space bits of the PSW: | 181 | * the address space bits of the PSW: |
179 | * Primary, secondory or home space (access register mode is currently not | 182 | * Primary, secondary, home space or access register mode. |
180 | * implemented). | ||
181 | * The addressing mode of the PSW is also inspected, so that address wrap | 183 | * The addressing mode of the PSW is also inspected, so that address wrap |
182 | * around is taken into account for 24-, 31- and 64-bit addressing mode, | 184 | * around is taken into account for 24-, 31- and 64-bit addressing mode, |
183 | * if the to be copied data crosses page boundaries in guest address space. | 185 | * if the to be copied data crosses page boundaries in guest address space. |
@@ -210,16 +212,17 @@ int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra, | |||
210 | * if data has been changed in guest space in case of an exception. | 212 | * if data has been changed in guest space in case of an exception. |
211 | */ | 213 | */ |
212 | static inline __must_check | 214 | static inline __must_check |
213 | int write_guest(struct kvm_vcpu *vcpu, unsigned long ga, void *data, | 215 | int write_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data, |
214 | unsigned long len) | 216 | unsigned long len) |
215 | { | 217 | { |
216 | return access_guest(vcpu, ga, data, len, 1); | 218 | return access_guest(vcpu, ga, ar, data, len, 1); |
217 | } | 219 | } |
218 | 220 | ||
219 | /** | 221 | /** |
220 | * read_guest - copy data from guest space to kernel space | 222 | * read_guest - copy data from guest space to kernel space |
221 | * @vcpu: virtual cpu | 223 | * @vcpu: virtual cpu |
222 | * @ga: guest address | 224 | * @ga: guest address |
225 | * @ar: access register | ||
223 | * @data: destination address in kernel space | 226 | * @data: destination address in kernel space |
224 | * @len: number of bytes to copy | 227 | * @len: number of bytes to copy |
225 | * | 228 | * |
@@ -229,10 +232,10 @@ int write_guest(struct kvm_vcpu *vcpu, unsigned long ga, void *data, | |||
229 | * data will be copied from guest space to kernel space. | 232 | * data will be copied from guest space to kernel space. |
230 | */ | 233 | */ |
231 | static inline __must_check | 234 | static inline __must_check |
232 | int read_guest(struct kvm_vcpu *vcpu, unsigned long ga, void *data, | 235 | int read_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data, |
233 | unsigned long len) | 236 | unsigned long len) |
234 | { | 237 | { |
235 | return access_guest(vcpu, ga, data, len, 0); | 238 | return access_guest(vcpu, ga, ar, data, len, 0); |
236 | } | 239 | } |
237 | 240 | ||
238 | /** | 241 | /** |
@@ -330,6 +333,6 @@ int read_guest_real(struct kvm_vcpu *vcpu, unsigned long gra, void *data, | |||
330 | void ipte_lock(struct kvm_vcpu *vcpu); | 333 | void ipte_lock(struct kvm_vcpu *vcpu); |
331 | void ipte_unlock(struct kvm_vcpu *vcpu); | 334 | void ipte_unlock(struct kvm_vcpu *vcpu); |
332 | int ipte_lock_held(struct kvm_vcpu *vcpu); | 335 | int ipte_lock_held(struct kvm_vcpu *vcpu); |
333 | int kvm_s390_check_low_addr_protection(struct kvm_vcpu *vcpu, unsigned long ga); | 336 | int kvm_s390_check_low_addr_prot_real(struct kvm_vcpu *vcpu, unsigned long gra); |
334 | 337 | ||
335 | #endif /* __KVM_S390_GACCESS_H */ | 338 | #endif /* __KVM_S390_GACCESS_H */ |
diff --git a/arch/s390/kvm/guestdbg.c b/arch/s390/kvm/guestdbg.c index 3e8d4092ce30..e97b3455d7e6 100644 --- a/arch/s390/kvm/guestdbg.c +++ b/arch/s390/kvm/guestdbg.c | |||
@@ -191,8 +191,8 @@ static int __import_wp_info(struct kvm_vcpu *vcpu, | |||
191 | if (!wp_info->old_data) | 191 | if (!wp_info->old_data) |
192 | return -ENOMEM; | 192 | return -ENOMEM; |
193 | /* try to backup the original value */ | 193 | /* try to backup the original value */ |
194 | ret = read_guest(vcpu, wp_info->phys_addr, wp_info->old_data, | 194 | ret = read_guest_abs(vcpu, wp_info->phys_addr, wp_info->old_data, |
195 | wp_info->len); | 195 | wp_info->len); |
196 | if (ret) { | 196 | if (ret) { |
197 | kfree(wp_info->old_data); | 197 | kfree(wp_info->old_data); |
198 | wp_info->old_data = NULL; | 198 | wp_info->old_data = NULL; |
@@ -362,8 +362,8 @@ static struct kvm_hw_wp_info_arch *any_wp_changed(struct kvm_vcpu *vcpu) | |||
362 | continue; | 362 | continue; |
363 | 363 | ||
364 | /* refetch the wp data and compare it to the old value */ | 364 | /* refetch the wp data and compare it to the old value */ |
365 | if (!read_guest(vcpu, wp_info->phys_addr, temp, | 365 | if (!read_guest_abs(vcpu, wp_info->phys_addr, temp, |
366 | wp_info->len)) { | 366 | wp_info->len)) { |
367 | if (memcmp(temp, wp_info->old_data, wp_info->len)) { | 367 | if (memcmp(temp, wp_info->old_data, wp_info->len)) { |
368 | kfree(temp); | 368 | kfree(temp); |
369 | return wp_info; | 369 | return wp_info; |
diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c index bebd2157edd0..9e3779e3e496 100644 --- a/arch/s390/kvm/intercept.c +++ b/arch/s390/kvm/intercept.c | |||
@@ -165,6 +165,7 @@ static void __extract_prog_irq(struct kvm_vcpu *vcpu, | |||
165 | pgm_info->mon_class_nr = vcpu->arch.sie_block->mcn; | 165 | pgm_info->mon_class_nr = vcpu->arch.sie_block->mcn; |
166 | pgm_info->mon_code = vcpu->arch.sie_block->tecmc; | 166 | pgm_info->mon_code = vcpu->arch.sie_block->tecmc; |
167 | break; | 167 | break; |
168 | case PGM_VECTOR_PROCESSING: | ||
168 | case PGM_DATA: | 169 | case PGM_DATA: |
169 | pgm_info->data_exc_code = vcpu->arch.sie_block->dxc; | 170 | pgm_info->data_exc_code = vcpu->arch.sie_block->dxc; |
170 | break; | 171 | break; |
@@ -319,7 +320,7 @@ static int handle_mvpg_pei(struct kvm_vcpu *vcpu) | |||
319 | 320 | ||
320 | /* Make sure that the source is paged-in */ | 321 | /* Make sure that the source is paged-in */ |
321 | rc = guest_translate_address(vcpu, vcpu->run->s.regs.gprs[reg2], | 322 | rc = guest_translate_address(vcpu, vcpu->run->s.regs.gprs[reg2], |
322 | &srcaddr, 0); | 323 | reg2, &srcaddr, 0); |
323 | if (rc) | 324 | if (rc) |
324 | return kvm_s390_inject_prog_cond(vcpu, rc); | 325 | return kvm_s390_inject_prog_cond(vcpu, rc); |
325 | rc = kvm_arch_fault_in_page(vcpu, srcaddr, 0); | 326 | rc = kvm_arch_fault_in_page(vcpu, srcaddr, 0); |
@@ -328,7 +329,7 @@ static int handle_mvpg_pei(struct kvm_vcpu *vcpu) | |||
328 | 329 | ||
329 | /* Make sure that the destination is paged-in */ | 330 | /* Make sure that the destination is paged-in */ |
330 | rc = guest_translate_address(vcpu, vcpu->run->s.regs.gprs[reg1], | 331 | rc = guest_translate_address(vcpu, vcpu->run->s.regs.gprs[reg1], |
331 | &dstaddr, 1); | 332 | reg1, &dstaddr, 1); |
332 | if (rc) | 333 | if (rc) |
333 | return kvm_s390_inject_prog_cond(vcpu, rc); | 334 | return kvm_s390_inject_prog_cond(vcpu, rc); |
334 | rc = kvm_arch_fault_in_page(vcpu, dstaddr, 1); | 335 | rc = kvm_arch_fault_in_page(vcpu, dstaddr, 1); |
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index 073b5f387d1d..9de47265ef73 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * handling kvm guest interrupts | 2 | * handling kvm guest interrupts |
3 | * | 3 | * |
4 | * Copyright IBM Corp. 2008,2014 | 4 | * Copyright IBM Corp. 2008, 2015 |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify | 6 | * This program is free software; you can redistribute it and/or modify |
7 | * it under the terms of the GNU General Public License (version 2 only) | 7 | * it under the terms of the GNU General Public License (version 2 only) |
@@ -17,9 +17,12 @@ | |||
17 | #include <linux/signal.h> | 17 | #include <linux/signal.h> |
18 | #include <linux/slab.h> | 18 | #include <linux/slab.h> |
19 | #include <linux/bitmap.h> | 19 | #include <linux/bitmap.h> |
20 | #include <linux/vmalloc.h> | ||
20 | #include <asm/asm-offsets.h> | 21 | #include <asm/asm-offsets.h> |
22 | #include <asm/dis.h> | ||
21 | #include <asm/uaccess.h> | 23 | #include <asm/uaccess.h> |
22 | #include <asm/sclp.h> | 24 | #include <asm/sclp.h> |
25 | #include <asm/isc.h> | ||
23 | #include "kvm-s390.h" | 26 | #include "kvm-s390.h" |
24 | #include "gaccess.h" | 27 | #include "gaccess.h" |
25 | #include "trace-s390.h" | 28 | #include "trace-s390.h" |
@@ -32,11 +35,6 @@ | |||
32 | #define PFAULT_DONE 0x0680 | 35 | #define PFAULT_DONE 0x0680 |
33 | #define VIRTIO_PARAM 0x0d00 | 36 | #define VIRTIO_PARAM 0x0d00 |
34 | 37 | ||
35 | static int is_ioint(u64 type) | ||
36 | { | ||
37 | return ((type & 0xfffe0000u) != 0xfffe0000u); | ||
38 | } | ||
39 | |||
40 | int psw_extint_disabled(struct kvm_vcpu *vcpu) | 38 | int psw_extint_disabled(struct kvm_vcpu *vcpu) |
41 | { | 39 | { |
42 | return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT); | 40 | return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT); |
@@ -72,70 +70,45 @@ static int ckc_interrupts_enabled(struct kvm_vcpu *vcpu) | |||
72 | return 1; | 70 | return 1; |
73 | } | 71 | } |
74 | 72 | ||
75 | static u64 int_word_to_isc_bits(u32 int_word) | 73 | static int ckc_irq_pending(struct kvm_vcpu *vcpu) |
74 | { | ||
75 | if (!(vcpu->arch.sie_block->ckc < | ||
76 | get_tod_clock_fast() + vcpu->arch.sie_block->epoch)) | ||
77 | return 0; | ||
78 | return ckc_interrupts_enabled(vcpu); | ||
79 | } | ||
80 | |||
81 | static int cpu_timer_interrupts_enabled(struct kvm_vcpu *vcpu) | ||
82 | { | ||
83 | return !psw_extint_disabled(vcpu) && | ||
84 | (vcpu->arch.sie_block->gcr[0] & 0x400ul); | ||
85 | } | ||
86 | |||
87 | static int cpu_timer_irq_pending(struct kvm_vcpu *vcpu) | ||
88 | { | ||
89 | return (vcpu->arch.sie_block->cputm >> 63) && | ||
90 | cpu_timer_interrupts_enabled(vcpu); | ||
91 | } | ||
92 | |||
93 | static inline int is_ioirq(unsigned long irq_type) | ||
76 | { | 94 | { |
77 | u8 isc = (int_word & 0x38000000) >> 27; | 95 | return ((irq_type >= IRQ_PEND_IO_ISC_0) && |
96 | (irq_type <= IRQ_PEND_IO_ISC_7)); | ||
97 | } | ||
78 | 98 | ||
99 | static uint64_t isc_to_isc_bits(int isc) | ||
100 | { | ||
79 | return (0x80 >> isc) << 24; | 101 | return (0x80 >> isc) << 24; |
80 | } | 102 | } |
81 | 103 | ||
82 | static int __must_check __interrupt_is_deliverable(struct kvm_vcpu *vcpu, | 104 | static inline u8 int_word_to_isc(u32 int_word) |
83 | struct kvm_s390_interrupt_info *inti) | ||
84 | { | 105 | { |
85 | switch (inti->type) { | 106 | return (int_word & 0x38000000) >> 27; |
86 | case KVM_S390_INT_EXTERNAL_CALL: | 107 | } |
87 | if (psw_extint_disabled(vcpu)) | 108 | |
88 | return 0; | 109 | static inline unsigned long pending_floating_irqs(struct kvm_vcpu *vcpu) |
89 | if (vcpu->arch.sie_block->gcr[0] & 0x2000ul) | 110 | { |
90 | return 1; | 111 | return vcpu->kvm->arch.float_int.pending_irqs; |
91 | return 0; | ||
92 | case KVM_S390_INT_EMERGENCY: | ||
93 | if (psw_extint_disabled(vcpu)) | ||
94 | return 0; | ||
95 | if (vcpu->arch.sie_block->gcr[0] & 0x4000ul) | ||
96 | return 1; | ||
97 | return 0; | ||
98 | case KVM_S390_INT_CLOCK_COMP: | ||
99 | return ckc_interrupts_enabled(vcpu); | ||
100 | case KVM_S390_INT_CPU_TIMER: | ||
101 | if (psw_extint_disabled(vcpu)) | ||
102 | return 0; | ||
103 | if (vcpu->arch.sie_block->gcr[0] & 0x400ul) | ||
104 | return 1; | ||
105 | return 0; | ||
106 | case KVM_S390_INT_SERVICE: | ||
107 | case KVM_S390_INT_PFAULT_INIT: | ||
108 | case KVM_S390_INT_PFAULT_DONE: | ||
109 | case KVM_S390_INT_VIRTIO: | ||
110 | if (psw_extint_disabled(vcpu)) | ||
111 | return 0; | ||
112 | if (vcpu->arch.sie_block->gcr[0] & 0x200ul) | ||
113 | return 1; | ||
114 | return 0; | ||
115 | case KVM_S390_PROGRAM_INT: | ||
116 | case KVM_S390_SIGP_STOP: | ||
117 | case KVM_S390_SIGP_SET_PREFIX: | ||
118 | case KVM_S390_RESTART: | ||
119 | return 1; | ||
120 | case KVM_S390_MCHK: | ||
121 | if (psw_mchk_disabled(vcpu)) | ||
122 | return 0; | ||
123 | if (vcpu->arch.sie_block->gcr[14] & inti->mchk.cr14) | ||
124 | return 1; | ||
125 | return 0; | ||
126 | case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: | ||
127 | if (psw_ioint_disabled(vcpu)) | ||
128 | return 0; | ||
129 | if (vcpu->arch.sie_block->gcr[6] & | ||
130 | int_word_to_isc_bits(inti->io.io_int_word)) | ||
131 | return 1; | ||
132 | return 0; | ||
133 | default: | ||
134 | printk(KERN_WARNING "illegal interrupt type %llx\n", | ||
135 | inti->type); | ||
136 | BUG(); | ||
137 | } | ||
138 | return 0; | ||
139 | } | 112 | } |
140 | 113 | ||
141 | static inline unsigned long pending_local_irqs(struct kvm_vcpu *vcpu) | 114 | static inline unsigned long pending_local_irqs(struct kvm_vcpu *vcpu) |
@@ -143,12 +116,31 @@ static inline unsigned long pending_local_irqs(struct kvm_vcpu *vcpu) | |||
143 | return vcpu->arch.local_int.pending_irqs; | 116 | return vcpu->arch.local_int.pending_irqs; |
144 | } | 117 | } |
145 | 118 | ||
146 | static unsigned long deliverable_local_irqs(struct kvm_vcpu *vcpu) | 119 | static unsigned long disable_iscs(struct kvm_vcpu *vcpu, |
120 | unsigned long active_mask) | ||
121 | { | ||
122 | int i; | ||
123 | |||
124 | for (i = 0; i <= MAX_ISC; i++) | ||
125 | if (!(vcpu->arch.sie_block->gcr[6] & isc_to_isc_bits(i))) | ||
126 | active_mask &= ~(1UL << (IRQ_PEND_IO_ISC_0 + i)); | ||
127 | |||
128 | return active_mask; | ||
129 | } | ||
130 | |||
131 | static unsigned long deliverable_irqs(struct kvm_vcpu *vcpu) | ||
147 | { | 132 | { |
148 | unsigned long active_mask = pending_local_irqs(vcpu); | 133 | unsigned long active_mask; |
134 | |||
135 | active_mask = pending_local_irqs(vcpu); | ||
136 | active_mask |= pending_floating_irqs(vcpu); | ||
149 | 137 | ||
150 | if (psw_extint_disabled(vcpu)) | 138 | if (psw_extint_disabled(vcpu)) |
151 | active_mask &= ~IRQ_PEND_EXT_MASK; | 139 | active_mask &= ~IRQ_PEND_EXT_MASK; |
140 | if (psw_ioint_disabled(vcpu)) | ||
141 | active_mask &= ~IRQ_PEND_IO_MASK; | ||
142 | else | ||
143 | active_mask = disable_iscs(vcpu, active_mask); | ||
152 | if (!(vcpu->arch.sie_block->gcr[0] & 0x2000ul)) | 144 | if (!(vcpu->arch.sie_block->gcr[0] & 0x2000ul)) |
153 | __clear_bit(IRQ_PEND_EXT_EXTERNAL, &active_mask); | 145 | __clear_bit(IRQ_PEND_EXT_EXTERNAL, &active_mask); |
154 | if (!(vcpu->arch.sie_block->gcr[0] & 0x4000ul)) | 146 | if (!(vcpu->arch.sie_block->gcr[0] & 0x4000ul)) |
@@ -157,8 +149,13 @@ static unsigned long deliverable_local_irqs(struct kvm_vcpu *vcpu) | |||
157 | __clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &active_mask); | 149 | __clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &active_mask); |
158 | if (!(vcpu->arch.sie_block->gcr[0] & 0x400ul)) | 150 | if (!(vcpu->arch.sie_block->gcr[0] & 0x400ul)) |
159 | __clear_bit(IRQ_PEND_EXT_CPU_TIMER, &active_mask); | 151 | __clear_bit(IRQ_PEND_EXT_CPU_TIMER, &active_mask); |
152 | if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul)) | ||
153 | __clear_bit(IRQ_PEND_EXT_SERVICE, &active_mask); | ||
160 | if (psw_mchk_disabled(vcpu)) | 154 | if (psw_mchk_disabled(vcpu)) |
161 | active_mask &= ~IRQ_PEND_MCHK_MASK; | 155 | active_mask &= ~IRQ_PEND_MCHK_MASK; |
156 | if (!(vcpu->arch.sie_block->gcr[14] & | ||
157 | vcpu->kvm->arch.float_int.mchk.cr14)) | ||
158 | __clear_bit(IRQ_PEND_MCHK_REP, &active_mask); | ||
162 | 159 | ||
163 | /* | 160 | /* |
164 | * STOP irqs will never be actively delivered. They are triggered via | 161 | * STOP irqs will never be actively delivered. They are triggered via |
@@ -200,6 +197,16 @@ static void __set_cpuflag(struct kvm_vcpu *vcpu, u32 flag) | |||
200 | atomic_set_mask(flag, &vcpu->arch.sie_block->cpuflags); | 197 | atomic_set_mask(flag, &vcpu->arch.sie_block->cpuflags); |
201 | } | 198 | } |
202 | 199 | ||
200 | static void set_intercept_indicators_io(struct kvm_vcpu *vcpu) | ||
201 | { | ||
202 | if (!(pending_floating_irqs(vcpu) & IRQ_PEND_IO_MASK)) | ||
203 | return; | ||
204 | else if (psw_ioint_disabled(vcpu)) | ||
205 | __set_cpuflag(vcpu, CPUSTAT_IO_INT); | ||
206 | else | ||
207 | vcpu->arch.sie_block->lctl |= LCTL_CR6; | ||
208 | } | ||
209 | |||
203 | static void set_intercept_indicators_ext(struct kvm_vcpu *vcpu) | 210 | static void set_intercept_indicators_ext(struct kvm_vcpu *vcpu) |
204 | { | 211 | { |
205 | if (!(pending_local_irqs(vcpu) & IRQ_PEND_EXT_MASK)) | 212 | if (!(pending_local_irqs(vcpu) & IRQ_PEND_EXT_MASK)) |
@@ -226,47 +233,17 @@ static void set_intercept_indicators_stop(struct kvm_vcpu *vcpu) | |||
226 | __set_cpuflag(vcpu, CPUSTAT_STOP_INT); | 233 | __set_cpuflag(vcpu, CPUSTAT_STOP_INT); |
227 | } | 234 | } |
228 | 235 | ||
229 | /* Set interception request for non-deliverable local interrupts */ | 236 | /* Set interception request for non-deliverable interrupts */ |
230 | static void set_intercept_indicators_local(struct kvm_vcpu *vcpu) | 237 | static void set_intercept_indicators(struct kvm_vcpu *vcpu) |
231 | { | 238 | { |
239 | set_intercept_indicators_io(vcpu); | ||
232 | set_intercept_indicators_ext(vcpu); | 240 | set_intercept_indicators_ext(vcpu); |
233 | set_intercept_indicators_mchk(vcpu); | 241 | set_intercept_indicators_mchk(vcpu); |
234 | set_intercept_indicators_stop(vcpu); | 242 | set_intercept_indicators_stop(vcpu); |
235 | } | 243 | } |
236 | 244 | ||
237 | static void __set_intercept_indicator(struct kvm_vcpu *vcpu, | ||
238 | struct kvm_s390_interrupt_info *inti) | ||
239 | { | ||
240 | switch (inti->type) { | ||
241 | case KVM_S390_INT_SERVICE: | ||
242 | case KVM_S390_INT_PFAULT_DONE: | ||
243 | case KVM_S390_INT_VIRTIO: | ||
244 | if (psw_extint_disabled(vcpu)) | ||
245 | __set_cpuflag(vcpu, CPUSTAT_EXT_INT); | ||
246 | else | ||
247 | vcpu->arch.sie_block->lctl |= LCTL_CR0; | ||
248 | break; | ||
249 | case KVM_S390_MCHK: | ||
250 | if (psw_mchk_disabled(vcpu)) | ||
251 | vcpu->arch.sie_block->ictl |= ICTL_LPSW; | ||
252 | else | ||
253 | vcpu->arch.sie_block->lctl |= LCTL_CR14; | ||
254 | break; | ||
255 | case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: | ||
256 | if (psw_ioint_disabled(vcpu)) | ||
257 | __set_cpuflag(vcpu, CPUSTAT_IO_INT); | ||
258 | else | ||
259 | vcpu->arch.sie_block->lctl |= LCTL_CR6; | ||
260 | break; | ||
261 | default: | ||
262 | BUG(); | ||
263 | } | ||
264 | } | ||
265 | |||
266 | static u16 get_ilc(struct kvm_vcpu *vcpu) | 245 | static u16 get_ilc(struct kvm_vcpu *vcpu) |
267 | { | 246 | { |
268 | const unsigned short table[] = { 2, 4, 4, 6 }; | ||
269 | |||
270 | switch (vcpu->arch.sie_block->icptcode) { | 247 | switch (vcpu->arch.sie_block->icptcode) { |
271 | case ICPT_INST: | 248 | case ICPT_INST: |
272 | case ICPT_INSTPROGI: | 249 | case ICPT_INSTPROGI: |
@@ -274,7 +251,7 @@ static u16 get_ilc(struct kvm_vcpu *vcpu) | |||
274 | case ICPT_PARTEXEC: | 251 | case ICPT_PARTEXEC: |
275 | case ICPT_IOINST: | 252 | case ICPT_IOINST: |
276 | /* last instruction only stored for these icptcodes */ | 253 | /* last instruction only stored for these icptcodes */ |
277 | return table[vcpu->arch.sie_block->ipa >> 14]; | 254 | return insn_length(vcpu->arch.sie_block->ipa >> 8); |
278 | case ICPT_PROGI: | 255 | case ICPT_PROGI: |
279 | return vcpu->arch.sie_block->pgmilc; | 256 | return vcpu->arch.sie_block->pgmilc; |
280 | default: | 257 | default: |
@@ -350,38 +327,72 @@ static int __must_check __deliver_pfault_init(struct kvm_vcpu *vcpu) | |||
350 | 327 | ||
351 | static int __must_check __deliver_machine_check(struct kvm_vcpu *vcpu) | 328 | static int __must_check __deliver_machine_check(struct kvm_vcpu *vcpu) |
352 | { | 329 | { |
330 | struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int; | ||
353 | struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; | 331 | struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; |
354 | struct kvm_s390_mchk_info mchk; | 332 | struct kvm_s390_mchk_info mchk = {}; |
355 | int rc; | 333 | unsigned long adtl_status_addr; |
334 | int deliver = 0; | ||
335 | int rc = 0; | ||
356 | 336 | ||
337 | spin_lock(&fi->lock); | ||
357 | spin_lock(&li->lock); | 338 | spin_lock(&li->lock); |
358 | mchk = li->irq.mchk; | 339 | if (test_bit(IRQ_PEND_MCHK_EX, &li->pending_irqs) || |
340 | test_bit(IRQ_PEND_MCHK_REP, &li->pending_irqs)) { | ||
341 | /* | ||
342 | * If there was an exigent machine check pending, then any | ||
343 | * repressible machine checks that might have been pending | ||
344 | * are indicated along with it, so always clear bits for | ||
345 | * repressible and exigent interrupts | ||
346 | */ | ||
347 | mchk = li->irq.mchk; | ||
348 | clear_bit(IRQ_PEND_MCHK_EX, &li->pending_irqs); | ||
349 | clear_bit(IRQ_PEND_MCHK_REP, &li->pending_irqs); | ||
350 | memset(&li->irq.mchk, 0, sizeof(mchk)); | ||
351 | deliver = 1; | ||
352 | } | ||
359 | /* | 353 | /* |
360 | * If there was an exigent machine check pending, then any repressible | 354 | * We indicate floating repressible conditions along with |
361 | * machine checks that might have been pending are indicated along | 355 | * other pending conditions. Channel Report Pending and Channel |
362 | * with it, so always clear both bits | 356 | * Subsystem damage are the only two and and are indicated by |
357 | * bits in mcic and masked in cr14. | ||
363 | */ | 358 | */ |
364 | clear_bit(IRQ_PEND_MCHK_EX, &li->pending_irqs); | 359 | if (test_and_clear_bit(IRQ_PEND_MCHK_REP, &fi->pending_irqs)) { |
365 | clear_bit(IRQ_PEND_MCHK_REP, &li->pending_irqs); | 360 | mchk.mcic |= fi->mchk.mcic; |
366 | memset(&li->irq.mchk, 0, sizeof(mchk)); | 361 | mchk.cr14 |= fi->mchk.cr14; |
362 | memset(&fi->mchk, 0, sizeof(mchk)); | ||
363 | deliver = 1; | ||
364 | } | ||
367 | spin_unlock(&li->lock); | 365 | spin_unlock(&li->lock); |
366 | spin_unlock(&fi->lock); | ||
368 | 367 | ||
369 | VCPU_EVENT(vcpu, 4, "interrupt: machine check mcic=%llx", | 368 | if (deliver) { |
370 | mchk.mcic); | 369 | VCPU_EVENT(vcpu, 4, "interrupt: machine check mcic=%llx", |
371 | trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_MCHK, | 370 | mchk.mcic); |
372 | mchk.cr14, mchk.mcic); | 371 | trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, |
373 | 372 | KVM_S390_MCHK, | |
374 | rc = kvm_s390_vcpu_store_status(vcpu, KVM_S390_STORE_STATUS_PREFIXED); | 373 | mchk.cr14, mchk.mcic); |
375 | rc |= put_guest_lc(vcpu, mchk.mcic, | 374 | |
376 | (u64 __user *) __LC_MCCK_CODE); | 375 | rc = kvm_s390_vcpu_store_status(vcpu, |
377 | rc |= put_guest_lc(vcpu, mchk.failing_storage_address, | 376 | KVM_S390_STORE_STATUS_PREFIXED); |
378 | (u64 __user *) __LC_MCCK_FAIL_STOR_ADDR); | 377 | rc |= read_guest_lc(vcpu, __LC_VX_SAVE_AREA_ADDR, |
379 | rc |= write_guest_lc(vcpu, __LC_PSW_SAVE_AREA, | 378 | &adtl_status_addr, |
380 | &mchk.fixed_logout, sizeof(mchk.fixed_logout)); | 379 | sizeof(unsigned long)); |
381 | rc |= write_guest_lc(vcpu, __LC_MCK_OLD_PSW, | 380 | rc |= kvm_s390_vcpu_store_adtl_status(vcpu, |
382 | &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); | 381 | adtl_status_addr); |
383 | rc |= read_guest_lc(vcpu, __LC_MCK_NEW_PSW, | 382 | rc |= put_guest_lc(vcpu, mchk.mcic, |
384 | &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); | 383 | (u64 __user *) __LC_MCCK_CODE); |
384 | rc |= put_guest_lc(vcpu, mchk.failing_storage_address, | ||
385 | (u64 __user *) __LC_MCCK_FAIL_STOR_ADDR); | ||
386 | rc |= write_guest_lc(vcpu, __LC_PSW_SAVE_AREA, | ||
387 | &mchk.fixed_logout, | ||
388 | sizeof(mchk.fixed_logout)); | ||
389 | rc |= write_guest_lc(vcpu, __LC_MCK_OLD_PSW, | ||
390 | &vcpu->arch.sie_block->gpsw, | ||
391 | sizeof(psw_t)); | ||
392 | rc |= read_guest_lc(vcpu, __LC_MCK_NEW_PSW, | ||
393 | &vcpu->arch.sie_block->gpsw, | ||
394 | sizeof(psw_t)); | ||
395 | } | ||
385 | return rc ? -EFAULT : 0; | 396 | return rc ? -EFAULT : 0; |
386 | } | 397 | } |
387 | 398 | ||
@@ -484,7 +495,7 @@ static int __must_check __deliver_prog(struct kvm_vcpu *vcpu) | |||
484 | { | 495 | { |
485 | struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; | 496 | struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; |
486 | struct kvm_s390_pgm_info pgm_info; | 497 | struct kvm_s390_pgm_info pgm_info; |
487 | int rc = 0; | 498 | int rc = 0, nullifying = false; |
488 | u16 ilc = get_ilc(vcpu); | 499 | u16 ilc = get_ilc(vcpu); |
489 | 500 | ||
490 | spin_lock(&li->lock); | 501 | spin_lock(&li->lock); |
@@ -509,6 +520,8 @@ static int __must_check __deliver_prog(struct kvm_vcpu *vcpu) | |||
509 | case PGM_LX_TRANSLATION: | 520 | case PGM_LX_TRANSLATION: |
510 | case PGM_PRIMARY_AUTHORITY: | 521 | case PGM_PRIMARY_AUTHORITY: |
511 | case PGM_SECONDARY_AUTHORITY: | 522 | case PGM_SECONDARY_AUTHORITY: |
523 | nullifying = true; | ||
524 | /* fall through */ | ||
512 | case PGM_SPACE_SWITCH: | 525 | case PGM_SPACE_SWITCH: |
513 | rc = put_guest_lc(vcpu, pgm_info.trans_exc_code, | 526 | rc = put_guest_lc(vcpu, pgm_info.trans_exc_code, |
514 | (u64 *)__LC_TRANS_EXC_CODE); | 527 | (u64 *)__LC_TRANS_EXC_CODE); |
@@ -521,6 +534,7 @@ static int __must_check __deliver_prog(struct kvm_vcpu *vcpu) | |||
521 | case PGM_EXTENDED_AUTHORITY: | 534 | case PGM_EXTENDED_AUTHORITY: |
522 | rc = put_guest_lc(vcpu, pgm_info.exc_access_id, | 535 | rc = put_guest_lc(vcpu, pgm_info.exc_access_id, |
523 | (u8 *)__LC_EXC_ACCESS_ID); | 536 | (u8 *)__LC_EXC_ACCESS_ID); |
537 | nullifying = true; | ||
524 | break; | 538 | break; |
525 | case PGM_ASCE_TYPE: | 539 | case PGM_ASCE_TYPE: |
526 | case PGM_PAGE_TRANSLATION: | 540 | case PGM_PAGE_TRANSLATION: |
@@ -534,6 +548,7 @@ static int __must_check __deliver_prog(struct kvm_vcpu *vcpu) | |||
534 | (u8 *)__LC_EXC_ACCESS_ID); | 548 | (u8 *)__LC_EXC_ACCESS_ID); |
535 | rc |= put_guest_lc(vcpu, pgm_info.op_access_id, | 549 | rc |= put_guest_lc(vcpu, pgm_info.op_access_id, |
536 | (u8 *)__LC_OP_ACCESS_ID); | 550 | (u8 *)__LC_OP_ACCESS_ID); |
551 | nullifying = true; | ||
537 | break; | 552 | break; |
538 | case PGM_MONITOR: | 553 | case PGM_MONITOR: |
539 | rc = put_guest_lc(vcpu, pgm_info.mon_class_nr, | 554 | rc = put_guest_lc(vcpu, pgm_info.mon_class_nr, |
@@ -541,6 +556,7 @@ static int __must_check __deliver_prog(struct kvm_vcpu *vcpu) | |||
541 | rc |= put_guest_lc(vcpu, pgm_info.mon_code, | 556 | rc |= put_guest_lc(vcpu, pgm_info.mon_code, |
542 | (u64 *)__LC_MON_CODE); | 557 | (u64 *)__LC_MON_CODE); |
543 | break; | 558 | break; |
559 | case PGM_VECTOR_PROCESSING: | ||
544 | case PGM_DATA: | 560 | case PGM_DATA: |
545 | rc = put_guest_lc(vcpu, pgm_info.data_exc_code, | 561 | rc = put_guest_lc(vcpu, pgm_info.data_exc_code, |
546 | (u32 *)__LC_DATA_EXC_CODE); | 562 | (u32 *)__LC_DATA_EXC_CODE); |
@@ -551,6 +567,15 @@ static int __must_check __deliver_prog(struct kvm_vcpu *vcpu) | |||
551 | rc |= put_guest_lc(vcpu, pgm_info.exc_access_id, | 567 | rc |= put_guest_lc(vcpu, pgm_info.exc_access_id, |
552 | (u8 *)__LC_EXC_ACCESS_ID); | 568 | (u8 *)__LC_EXC_ACCESS_ID); |
553 | break; | 569 | break; |
570 | case PGM_STACK_FULL: | ||
571 | case PGM_STACK_EMPTY: | ||
572 | case PGM_STACK_SPECIFICATION: | ||
573 | case PGM_STACK_TYPE: | ||
574 | case PGM_STACK_OPERATION: | ||
575 | case PGM_TRACE_TABEL: | ||
576 | case PGM_CRYPTO_OPERATION: | ||
577 | nullifying = true; | ||
578 | break; | ||
554 | } | 579 | } |
555 | 580 | ||
556 | if (pgm_info.code & PGM_PER) { | 581 | if (pgm_info.code & PGM_PER) { |
@@ -564,7 +589,12 @@ static int __must_check __deliver_prog(struct kvm_vcpu *vcpu) | |||
564 | (u8 *) __LC_PER_ACCESS_ID); | 589 | (u8 *) __LC_PER_ACCESS_ID); |
565 | } | 590 | } |
566 | 591 | ||
592 | if (nullifying && vcpu->arch.sie_block->icptcode == ICPT_INST) | ||
593 | kvm_s390_rewind_psw(vcpu, ilc); | ||
594 | |||
567 | rc |= put_guest_lc(vcpu, ilc, (u16 *) __LC_PGM_ILC); | 595 | rc |= put_guest_lc(vcpu, ilc, (u16 *) __LC_PGM_ILC); |
596 | rc |= put_guest_lc(vcpu, vcpu->arch.sie_block->gbea, | ||
597 | (u64 *) __LC_LAST_BREAK); | ||
568 | rc |= put_guest_lc(vcpu, pgm_info.code, | 598 | rc |= put_guest_lc(vcpu, pgm_info.code, |
569 | (u16 *)__LC_PGM_INT_CODE); | 599 | (u16 *)__LC_PGM_INT_CODE); |
570 | rc |= write_guest_lc(vcpu, __LC_PGM_OLD_PSW, | 600 | rc |= write_guest_lc(vcpu, __LC_PGM_OLD_PSW, |
@@ -574,16 +604,27 @@ static int __must_check __deliver_prog(struct kvm_vcpu *vcpu) | |||
574 | return rc ? -EFAULT : 0; | 604 | return rc ? -EFAULT : 0; |
575 | } | 605 | } |
576 | 606 | ||
577 | static int __must_check __deliver_service(struct kvm_vcpu *vcpu, | 607 | static int __must_check __deliver_service(struct kvm_vcpu *vcpu) |
578 | struct kvm_s390_interrupt_info *inti) | ||
579 | { | 608 | { |
580 | int rc; | 609 | struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int; |
610 | struct kvm_s390_ext_info ext; | ||
611 | int rc = 0; | ||
612 | |||
613 | spin_lock(&fi->lock); | ||
614 | if (!(test_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs))) { | ||
615 | spin_unlock(&fi->lock); | ||
616 | return 0; | ||
617 | } | ||
618 | ext = fi->srv_signal; | ||
619 | memset(&fi->srv_signal, 0, sizeof(ext)); | ||
620 | clear_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs); | ||
621 | spin_unlock(&fi->lock); | ||
581 | 622 | ||
582 | VCPU_EVENT(vcpu, 4, "interrupt: sclp parm:%x", | 623 | VCPU_EVENT(vcpu, 4, "interrupt: sclp parm:%x", |
583 | inti->ext.ext_params); | 624 | ext.ext_params); |
584 | vcpu->stat.deliver_service_signal++; | 625 | vcpu->stat.deliver_service_signal++; |
585 | trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, | 626 | trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_SERVICE, |
586 | inti->ext.ext_params, 0); | 627 | ext.ext_params, 0); |
587 | 628 | ||
588 | rc = put_guest_lc(vcpu, EXT_IRQ_SERVICE_SIG, (u16 *)__LC_EXT_INT_CODE); | 629 | rc = put_guest_lc(vcpu, EXT_IRQ_SERVICE_SIG, (u16 *)__LC_EXT_INT_CODE); |
589 | rc |= put_guest_lc(vcpu, 0, (u16 *)__LC_EXT_CPU_ADDR); | 630 | rc |= put_guest_lc(vcpu, 0, (u16 *)__LC_EXT_CPU_ADDR); |
@@ -591,106 +632,146 @@ static int __must_check __deliver_service(struct kvm_vcpu *vcpu, | |||
591 | &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); | 632 | &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); |
592 | rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, | 633 | rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, |
593 | &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); | 634 | &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); |
594 | rc |= put_guest_lc(vcpu, inti->ext.ext_params, | 635 | rc |= put_guest_lc(vcpu, ext.ext_params, |
595 | (u32 *)__LC_EXT_PARAMS); | 636 | (u32 *)__LC_EXT_PARAMS); |
637 | |||
596 | return rc ? -EFAULT : 0; | 638 | return rc ? -EFAULT : 0; |
597 | } | 639 | } |
598 | 640 | ||
599 | static int __must_check __deliver_pfault_done(struct kvm_vcpu *vcpu, | 641 | static int __must_check __deliver_pfault_done(struct kvm_vcpu *vcpu) |
600 | struct kvm_s390_interrupt_info *inti) | ||
601 | { | 642 | { |
602 | int rc; | 643 | struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int; |
644 | struct kvm_s390_interrupt_info *inti; | ||
645 | int rc = 0; | ||
603 | 646 | ||
604 | trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, | 647 | spin_lock(&fi->lock); |
605 | KVM_S390_INT_PFAULT_DONE, 0, | 648 | inti = list_first_entry_or_null(&fi->lists[FIRQ_LIST_PFAULT], |
606 | inti->ext.ext_params2); | 649 | struct kvm_s390_interrupt_info, |
650 | list); | ||
651 | if (inti) { | ||
652 | trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, | ||
653 | KVM_S390_INT_PFAULT_DONE, 0, | ||
654 | inti->ext.ext_params2); | ||
655 | list_del(&inti->list); | ||
656 | fi->counters[FIRQ_CNTR_PFAULT] -= 1; | ||
657 | } | ||
658 | if (list_empty(&fi->lists[FIRQ_LIST_PFAULT])) | ||
659 | clear_bit(IRQ_PEND_PFAULT_DONE, &fi->pending_irqs); | ||
660 | spin_unlock(&fi->lock); | ||
607 | 661 | ||
608 | rc = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE, (u16 *)__LC_EXT_INT_CODE); | 662 | if (inti) { |
609 | rc |= put_guest_lc(vcpu, PFAULT_DONE, (u16 *)__LC_EXT_CPU_ADDR); | 663 | rc = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE, |
610 | rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, | 664 | (u16 *)__LC_EXT_INT_CODE); |
611 | &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); | 665 | rc |= put_guest_lc(vcpu, PFAULT_DONE, |
612 | rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, | 666 | (u16 *)__LC_EXT_CPU_ADDR); |
613 | &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); | 667 | rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, |
614 | rc |= put_guest_lc(vcpu, inti->ext.ext_params2, | 668 | &vcpu->arch.sie_block->gpsw, |
615 | (u64 *)__LC_EXT_PARAMS2); | 669 | sizeof(psw_t)); |
670 | rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, | ||
671 | &vcpu->arch.sie_block->gpsw, | ||
672 | sizeof(psw_t)); | ||
673 | rc |= put_guest_lc(vcpu, inti->ext.ext_params2, | ||
674 | (u64 *)__LC_EXT_PARAMS2); | ||
675 | kfree(inti); | ||
676 | } | ||
616 | return rc ? -EFAULT : 0; | 677 | return rc ? -EFAULT : 0; |
617 | } | 678 | } |
618 | 679 | ||
619 | static int __must_check __deliver_virtio(struct kvm_vcpu *vcpu, | 680 | static int __must_check __deliver_virtio(struct kvm_vcpu *vcpu) |
620 | struct kvm_s390_interrupt_info *inti) | ||
621 | { | 681 | { |
622 | int rc; | 682 | struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int; |
683 | struct kvm_s390_interrupt_info *inti; | ||
684 | int rc = 0; | ||
623 | 685 | ||
624 | VCPU_EVENT(vcpu, 4, "interrupt: virtio parm:%x,parm64:%llx", | 686 | spin_lock(&fi->lock); |
625 | inti->ext.ext_params, inti->ext.ext_params2); | 687 | inti = list_first_entry_or_null(&fi->lists[FIRQ_LIST_VIRTIO], |
626 | vcpu->stat.deliver_virtio_interrupt++; | 688 | struct kvm_s390_interrupt_info, |
627 | trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, | 689 | list); |
628 | inti->ext.ext_params, | 690 | if (inti) { |
629 | inti->ext.ext_params2); | 691 | VCPU_EVENT(vcpu, 4, |
692 | "interrupt: virtio parm:%x,parm64:%llx", | ||
693 | inti->ext.ext_params, inti->ext.ext_params2); | ||
694 | vcpu->stat.deliver_virtio_interrupt++; | ||
695 | trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, | ||
696 | inti->type, | ||
697 | inti->ext.ext_params, | ||
698 | inti->ext.ext_params2); | ||
699 | list_del(&inti->list); | ||
700 | fi->counters[FIRQ_CNTR_VIRTIO] -= 1; | ||
701 | } | ||
702 | if (list_empty(&fi->lists[FIRQ_LIST_VIRTIO])) | ||
703 | clear_bit(IRQ_PEND_VIRTIO, &fi->pending_irqs); | ||
704 | spin_unlock(&fi->lock); | ||
630 | 705 | ||
631 | rc = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE, (u16 *)__LC_EXT_INT_CODE); | 706 | if (inti) { |
632 | rc |= put_guest_lc(vcpu, VIRTIO_PARAM, (u16 *)__LC_EXT_CPU_ADDR); | 707 | rc = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE, |
633 | rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, | 708 | (u16 *)__LC_EXT_INT_CODE); |
634 | &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); | 709 | rc |= put_guest_lc(vcpu, VIRTIO_PARAM, |
635 | rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, | 710 | (u16 *)__LC_EXT_CPU_ADDR); |
636 | &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); | 711 | rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, |
637 | rc |= put_guest_lc(vcpu, inti->ext.ext_params, | 712 | &vcpu->arch.sie_block->gpsw, |
638 | (u32 *)__LC_EXT_PARAMS); | 713 | sizeof(psw_t)); |
639 | rc |= put_guest_lc(vcpu, inti->ext.ext_params2, | 714 | rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, |
640 | (u64 *)__LC_EXT_PARAMS2); | 715 | &vcpu->arch.sie_block->gpsw, |
716 | sizeof(psw_t)); | ||
717 | rc |= put_guest_lc(vcpu, inti->ext.ext_params, | ||
718 | (u32 *)__LC_EXT_PARAMS); | ||
719 | rc |= put_guest_lc(vcpu, inti->ext.ext_params2, | ||
720 | (u64 *)__LC_EXT_PARAMS2); | ||
721 | kfree(inti); | ||
722 | } | ||
641 | return rc ? -EFAULT : 0; | 723 | return rc ? -EFAULT : 0; |
642 | } | 724 | } |
643 | 725 | ||
644 | static int __must_check __deliver_io(struct kvm_vcpu *vcpu, | 726 | static int __must_check __deliver_io(struct kvm_vcpu *vcpu, |
645 | struct kvm_s390_interrupt_info *inti) | 727 | unsigned long irq_type) |
646 | { | 728 | { |
647 | int rc; | 729 | struct list_head *isc_list; |
730 | struct kvm_s390_float_interrupt *fi; | ||
731 | struct kvm_s390_interrupt_info *inti = NULL; | ||
732 | int rc = 0; | ||
648 | 733 | ||
649 | VCPU_EVENT(vcpu, 4, "interrupt: I/O %llx", inti->type); | 734 | fi = &vcpu->kvm->arch.float_int; |
650 | vcpu->stat.deliver_io_int++; | ||
651 | trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, | ||
652 | ((__u32)inti->io.subchannel_id << 16) | | ||
653 | inti->io.subchannel_nr, | ||
654 | ((__u64)inti->io.io_int_parm << 32) | | ||
655 | inti->io.io_int_word); | ||
656 | |||
657 | rc = put_guest_lc(vcpu, inti->io.subchannel_id, | ||
658 | (u16 *)__LC_SUBCHANNEL_ID); | ||
659 | rc |= put_guest_lc(vcpu, inti->io.subchannel_nr, | ||
660 | (u16 *)__LC_SUBCHANNEL_NR); | ||
661 | rc |= put_guest_lc(vcpu, inti->io.io_int_parm, | ||
662 | (u32 *)__LC_IO_INT_PARM); | ||
663 | rc |= put_guest_lc(vcpu, inti->io.io_int_word, | ||
664 | (u32 *)__LC_IO_INT_WORD); | ||
665 | rc |= write_guest_lc(vcpu, __LC_IO_OLD_PSW, | ||
666 | &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); | ||
667 | rc |= read_guest_lc(vcpu, __LC_IO_NEW_PSW, | ||
668 | &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); | ||
669 | return rc ? -EFAULT : 0; | ||
670 | } | ||
671 | 735 | ||
672 | static int __must_check __deliver_mchk_floating(struct kvm_vcpu *vcpu, | 736 | spin_lock(&fi->lock); |
673 | struct kvm_s390_interrupt_info *inti) | 737 | isc_list = &fi->lists[irq_type - IRQ_PEND_IO_ISC_0]; |
674 | { | 738 | inti = list_first_entry_or_null(isc_list, |
675 | struct kvm_s390_mchk_info *mchk = &inti->mchk; | 739 | struct kvm_s390_interrupt_info, |
676 | int rc; | 740 | list); |
741 | if (inti) { | ||
742 | VCPU_EVENT(vcpu, 4, "interrupt: I/O %llx", inti->type); | ||
743 | vcpu->stat.deliver_io_int++; | ||
744 | trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, | ||
745 | inti->type, | ||
746 | ((__u32)inti->io.subchannel_id << 16) | | ||
747 | inti->io.subchannel_nr, | ||
748 | ((__u64)inti->io.io_int_parm << 32) | | ||
749 | inti->io.io_int_word); | ||
750 | list_del(&inti->list); | ||
751 | fi->counters[FIRQ_CNTR_IO] -= 1; | ||
752 | } | ||
753 | if (list_empty(isc_list)) | ||
754 | clear_bit(irq_type, &fi->pending_irqs); | ||
755 | spin_unlock(&fi->lock); | ||
756 | |||
757 | if (inti) { | ||
758 | rc = put_guest_lc(vcpu, inti->io.subchannel_id, | ||
759 | (u16 *)__LC_SUBCHANNEL_ID); | ||
760 | rc |= put_guest_lc(vcpu, inti->io.subchannel_nr, | ||
761 | (u16 *)__LC_SUBCHANNEL_NR); | ||
762 | rc |= put_guest_lc(vcpu, inti->io.io_int_parm, | ||
763 | (u32 *)__LC_IO_INT_PARM); | ||
764 | rc |= put_guest_lc(vcpu, inti->io.io_int_word, | ||
765 | (u32 *)__LC_IO_INT_WORD); | ||
766 | rc |= write_guest_lc(vcpu, __LC_IO_OLD_PSW, | ||
767 | &vcpu->arch.sie_block->gpsw, | ||
768 | sizeof(psw_t)); | ||
769 | rc |= read_guest_lc(vcpu, __LC_IO_NEW_PSW, | ||
770 | &vcpu->arch.sie_block->gpsw, | ||
771 | sizeof(psw_t)); | ||
772 | kfree(inti); | ||
773 | } | ||
677 | 774 | ||
678 | VCPU_EVENT(vcpu, 4, "interrupt: machine check mcic=%llx", | ||
679 | mchk->mcic); | ||
680 | trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_MCHK, | ||
681 | mchk->cr14, mchk->mcic); | ||
682 | |||
683 | rc = kvm_s390_vcpu_store_status(vcpu, KVM_S390_STORE_STATUS_PREFIXED); | ||
684 | rc |= put_guest_lc(vcpu, mchk->mcic, | ||
685 | (u64 __user *) __LC_MCCK_CODE); | ||
686 | rc |= put_guest_lc(vcpu, mchk->failing_storage_address, | ||
687 | (u64 __user *) __LC_MCCK_FAIL_STOR_ADDR); | ||
688 | rc |= write_guest_lc(vcpu, __LC_PSW_SAVE_AREA, | ||
689 | &mchk->fixed_logout, sizeof(mchk->fixed_logout)); | ||
690 | rc |= write_guest_lc(vcpu, __LC_MCK_OLD_PSW, | ||
691 | &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); | ||
692 | rc |= read_guest_lc(vcpu, __LC_MCK_NEW_PSW, | ||
693 | &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); | ||
694 | return rc ? -EFAULT : 0; | 775 | return rc ? -EFAULT : 0; |
695 | } | 776 | } |
696 | 777 | ||
@@ -698,6 +779,7 @@ typedef int (*deliver_irq_t)(struct kvm_vcpu *vcpu); | |||
698 | 779 | ||
699 | static const deliver_irq_t deliver_irq_funcs[] = { | 780 | static const deliver_irq_t deliver_irq_funcs[] = { |
700 | [IRQ_PEND_MCHK_EX] = __deliver_machine_check, | 781 | [IRQ_PEND_MCHK_EX] = __deliver_machine_check, |
782 | [IRQ_PEND_MCHK_REP] = __deliver_machine_check, | ||
701 | [IRQ_PEND_PROG] = __deliver_prog, | 783 | [IRQ_PEND_PROG] = __deliver_prog, |
702 | [IRQ_PEND_EXT_EMERGENCY] = __deliver_emergency_signal, | 784 | [IRQ_PEND_EXT_EMERGENCY] = __deliver_emergency_signal, |
703 | [IRQ_PEND_EXT_EXTERNAL] = __deliver_external_call, | 785 | [IRQ_PEND_EXT_EXTERNAL] = __deliver_external_call, |
@@ -706,36 +788,11 @@ static const deliver_irq_t deliver_irq_funcs[] = { | |||
706 | [IRQ_PEND_RESTART] = __deliver_restart, | 788 | [IRQ_PEND_RESTART] = __deliver_restart, |
707 | [IRQ_PEND_SET_PREFIX] = __deliver_set_prefix, | 789 | [IRQ_PEND_SET_PREFIX] = __deliver_set_prefix, |
708 | [IRQ_PEND_PFAULT_INIT] = __deliver_pfault_init, | 790 | [IRQ_PEND_PFAULT_INIT] = __deliver_pfault_init, |
791 | [IRQ_PEND_EXT_SERVICE] = __deliver_service, | ||
792 | [IRQ_PEND_PFAULT_DONE] = __deliver_pfault_done, | ||
793 | [IRQ_PEND_VIRTIO] = __deliver_virtio, | ||
709 | }; | 794 | }; |
710 | 795 | ||
711 | static int __must_check __deliver_floating_interrupt(struct kvm_vcpu *vcpu, | ||
712 | struct kvm_s390_interrupt_info *inti) | ||
713 | { | ||
714 | int rc; | ||
715 | |||
716 | switch (inti->type) { | ||
717 | case KVM_S390_INT_SERVICE: | ||
718 | rc = __deliver_service(vcpu, inti); | ||
719 | break; | ||
720 | case KVM_S390_INT_PFAULT_DONE: | ||
721 | rc = __deliver_pfault_done(vcpu, inti); | ||
722 | break; | ||
723 | case KVM_S390_INT_VIRTIO: | ||
724 | rc = __deliver_virtio(vcpu, inti); | ||
725 | break; | ||
726 | case KVM_S390_MCHK: | ||
727 | rc = __deliver_mchk_floating(vcpu, inti); | ||
728 | break; | ||
729 | case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: | ||
730 | rc = __deliver_io(vcpu, inti); | ||
731 | break; | ||
732 | default: | ||
733 | BUG(); | ||
734 | } | ||
735 | |||
736 | return rc; | ||
737 | } | ||
738 | |||
739 | /* Check whether an external call is pending (deliverable or not) */ | 796 | /* Check whether an external call is pending (deliverable or not) */ |
740 | int kvm_s390_ext_call_pending(struct kvm_vcpu *vcpu) | 797 | int kvm_s390_ext_call_pending(struct kvm_vcpu *vcpu) |
741 | { | 798 | { |
@@ -751,21 +808,9 @@ int kvm_s390_ext_call_pending(struct kvm_vcpu *vcpu) | |||
751 | 808 | ||
752 | int kvm_s390_vcpu_has_irq(struct kvm_vcpu *vcpu, int exclude_stop) | 809 | int kvm_s390_vcpu_has_irq(struct kvm_vcpu *vcpu, int exclude_stop) |
753 | { | 810 | { |
754 | struct kvm_s390_float_interrupt *fi = vcpu->arch.local_int.float_int; | ||
755 | struct kvm_s390_interrupt_info *inti; | ||
756 | int rc; | 811 | int rc; |
757 | 812 | ||
758 | rc = !!deliverable_local_irqs(vcpu); | 813 | rc = !!deliverable_irqs(vcpu); |
759 | |||
760 | if ((!rc) && atomic_read(&fi->active)) { | ||
761 | spin_lock(&fi->lock); | ||
762 | list_for_each_entry(inti, &fi->list, list) | ||
763 | if (__interrupt_is_deliverable(vcpu, inti)) { | ||
764 | rc = 1; | ||
765 | break; | ||
766 | } | ||
767 | spin_unlock(&fi->lock); | ||
768 | } | ||
769 | 814 | ||
770 | if (!rc && kvm_cpu_has_pending_timer(vcpu)) | 815 | if (!rc && kvm_cpu_has_pending_timer(vcpu)) |
771 | rc = 1; | 816 | rc = 1; |
@@ -784,12 +829,7 @@ int kvm_s390_vcpu_has_irq(struct kvm_vcpu *vcpu, int exclude_stop) | |||
784 | 829 | ||
785 | int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) | 830 | int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) |
786 | { | 831 | { |
787 | if (!(vcpu->arch.sie_block->ckc < | 832 | return ckc_irq_pending(vcpu) || cpu_timer_irq_pending(vcpu); |
788 | get_tod_clock_fast() + vcpu->arch.sie_block->epoch)) | ||
789 | return 0; | ||
790 | if (!ckc_interrupts_enabled(vcpu)) | ||
791 | return 0; | ||
792 | return 1; | ||
793 | } | 833 | } |
794 | 834 | ||
795 | int kvm_s390_handle_wait(struct kvm_vcpu *vcpu) | 835 | int kvm_s390_handle_wait(struct kvm_vcpu *vcpu) |
@@ -884,60 +924,45 @@ void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu) | |||
884 | int __must_check kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu) | 924 | int __must_check kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu) |
885 | { | 925 | { |
886 | struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; | 926 | struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; |
887 | struct kvm_s390_float_interrupt *fi = vcpu->arch.local_int.float_int; | ||
888 | struct kvm_s390_interrupt_info *n, *inti = NULL; | ||
889 | deliver_irq_t func; | 927 | deliver_irq_t func; |
890 | int deliver; | ||
891 | int rc = 0; | 928 | int rc = 0; |
892 | unsigned long irq_type; | 929 | unsigned long irq_type; |
893 | unsigned long deliverable_irqs; | 930 | unsigned long irqs; |
894 | 931 | ||
895 | __reset_intercept_indicators(vcpu); | 932 | __reset_intercept_indicators(vcpu); |
896 | 933 | ||
897 | /* pending ckc conditions might have been invalidated */ | 934 | /* pending ckc conditions might have been invalidated */ |
898 | clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs); | 935 | clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs); |
899 | if (kvm_cpu_has_pending_timer(vcpu)) | 936 | if (ckc_irq_pending(vcpu)) |
900 | set_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs); | 937 | set_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs); |
901 | 938 | ||
939 | /* pending cpu timer conditions might have been invalidated */ | ||
940 | clear_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs); | ||
941 | if (cpu_timer_irq_pending(vcpu)) | ||
942 | set_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs); | ||
943 | |||
902 | do { | 944 | do { |
903 | deliverable_irqs = deliverable_local_irqs(vcpu); | 945 | irqs = deliverable_irqs(vcpu); |
904 | /* bits are in the order of interrupt priority */ | 946 | /* bits are in the order of interrupt priority */ |
905 | irq_type = find_first_bit(&deliverable_irqs, IRQ_PEND_COUNT); | 947 | irq_type = find_first_bit(&irqs, IRQ_PEND_COUNT); |
906 | if (irq_type == IRQ_PEND_COUNT) | 948 | if (irq_type == IRQ_PEND_COUNT) |
907 | break; | 949 | break; |
908 | func = deliver_irq_funcs[irq_type]; | 950 | if (is_ioirq(irq_type)) { |
909 | if (!func) { | 951 | rc = __deliver_io(vcpu, irq_type); |
910 | WARN_ON_ONCE(func == NULL); | 952 | } else { |
911 | clear_bit(irq_type, &li->pending_irqs); | 953 | func = deliver_irq_funcs[irq_type]; |
912 | continue; | 954 | if (!func) { |
955 | WARN_ON_ONCE(func == NULL); | ||
956 | clear_bit(irq_type, &li->pending_irqs); | ||
957 | continue; | ||
958 | } | ||
959 | rc = func(vcpu); | ||
913 | } | 960 | } |
914 | rc = func(vcpu); | 961 | if (rc) |
915 | } while (!rc && irq_type != IRQ_PEND_COUNT); | 962 | break; |
963 | } while (!rc); | ||
916 | 964 | ||
917 | set_intercept_indicators_local(vcpu); | 965 | set_intercept_indicators(vcpu); |
918 | |||
919 | if (!rc && atomic_read(&fi->active)) { | ||
920 | do { | ||
921 | deliver = 0; | ||
922 | spin_lock(&fi->lock); | ||
923 | list_for_each_entry_safe(inti, n, &fi->list, list) { | ||
924 | if (__interrupt_is_deliverable(vcpu, inti)) { | ||
925 | list_del(&inti->list); | ||
926 | fi->irq_count--; | ||
927 | deliver = 1; | ||
928 | break; | ||
929 | } | ||
930 | __set_intercept_indicator(vcpu, inti); | ||
931 | } | ||
932 | if (list_empty(&fi->list)) | ||
933 | atomic_set(&fi->active, 0); | ||
934 | spin_unlock(&fi->lock); | ||
935 | if (deliver) { | ||
936 | rc = __deliver_floating_interrupt(vcpu, inti); | ||
937 | kfree(inti); | ||
938 | } | ||
939 | } while (!rc && deliver); | ||
940 | } | ||
941 | 966 | ||
942 | return rc; | 967 | return rc; |
943 | } | 968 | } |
@@ -1172,80 +1197,182 @@ static int __inject_cpu_timer(struct kvm_vcpu *vcpu) | |||
1172 | return 0; | 1197 | return 0; |
1173 | } | 1198 | } |
1174 | 1199 | ||
1200 | static struct kvm_s390_interrupt_info *get_io_int(struct kvm *kvm, | ||
1201 | int isc, u32 schid) | ||
1202 | { | ||
1203 | struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int; | ||
1204 | struct list_head *isc_list = &fi->lists[FIRQ_LIST_IO_ISC_0 + isc]; | ||
1205 | struct kvm_s390_interrupt_info *iter; | ||
1206 | u16 id = (schid & 0xffff0000U) >> 16; | ||
1207 | u16 nr = schid & 0x0000ffffU; | ||
1175 | 1208 | ||
1209 | spin_lock(&fi->lock); | ||
1210 | list_for_each_entry(iter, isc_list, list) { | ||
1211 | if (schid && (id != iter->io.subchannel_id || | ||
1212 | nr != iter->io.subchannel_nr)) | ||
1213 | continue; | ||
1214 | /* found an appropriate entry */ | ||
1215 | list_del_init(&iter->list); | ||
1216 | fi->counters[FIRQ_CNTR_IO] -= 1; | ||
1217 | if (list_empty(isc_list)) | ||
1218 | clear_bit(IRQ_PEND_IO_ISC_0 + isc, &fi->pending_irqs); | ||
1219 | spin_unlock(&fi->lock); | ||
1220 | return iter; | ||
1221 | } | ||
1222 | spin_unlock(&fi->lock); | ||
1223 | return NULL; | ||
1224 | } | ||
1225 | |||
1226 | /* | ||
1227 | * Dequeue and return an I/O interrupt matching any of the interruption | ||
1228 | * subclasses as designated by the isc mask in cr6 and the schid (if != 0). | ||
1229 | */ | ||
1176 | struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm, | 1230 | struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm, |
1177 | u64 cr6, u64 schid) | 1231 | u64 isc_mask, u32 schid) |
1232 | { | ||
1233 | struct kvm_s390_interrupt_info *inti = NULL; | ||
1234 | int isc; | ||
1235 | |||
1236 | for (isc = 0; isc <= MAX_ISC && !inti; isc++) { | ||
1237 | if (isc_mask & isc_to_isc_bits(isc)) | ||
1238 | inti = get_io_int(kvm, isc, schid); | ||
1239 | } | ||
1240 | return inti; | ||
1241 | } | ||
1242 | |||
1243 | #define SCCB_MASK 0xFFFFFFF8 | ||
1244 | #define SCCB_EVENT_PENDING 0x3 | ||
1245 | |||
1246 | static int __inject_service(struct kvm *kvm, | ||
1247 | struct kvm_s390_interrupt_info *inti) | ||
1248 | { | ||
1249 | struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int; | ||
1250 | |||
1251 | spin_lock(&fi->lock); | ||
1252 | fi->srv_signal.ext_params |= inti->ext.ext_params & SCCB_EVENT_PENDING; | ||
1253 | /* | ||
1254 | * Early versions of the QEMU s390 bios will inject several | ||
1255 | * service interrupts after another without handling a | ||
1256 | * condition code indicating busy. | ||
1257 | * We will silently ignore those superfluous sccb values. | ||
1258 | * A future version of QEMU will take care of serialization | ||
1259 | * of servc requests | ||
1260 | */ | ||
1261 | if (fi->srv_signal.ext_params & SCCB_MASK) | ||
1262 | goto out; | ||
1263 | fi->srv_signal.ext_params |= inti->ext.ext_params & SCCB_MASK; | ||
1264 | set_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs); | ||
1265 | out: | ||
1266 | spin_unlock(&fi->lock); | ||
1267 | kfree(inti); | ||
1268 | return 0; | ||
1269 | } | ||
1270 | |||
1271 | static int __inject_virtio(struct kvm *kvm, | ||
1272 | struct kvm_s390_interrupt_info *inti) | ||
1273 | { | ||
1274 | struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int; | ||
1275 | |||
1276 | spin_lock(&fi->lock); | ||
1277 | if (fi->counters[FIRQ_CNTR_VIRTIO] >= KVM_S390_MAX_VIRTIO_IRQS) { | ||
1278 | spin_unlock(&fi->lock); | ||
1279 | return -EBUSY; | ||
1280 | } | ||
1281 | fi->counters[FIRQ_CNTR_VIRTIO] += 1; | ||
1282 | list_add_tail(&inti->list, &fi->lists[FIRQ_LIST_VIRTIO]); | ||
1283 | set_bit(IRQ_PEND_VIRTIO, &fi->pending_irqs); | ||
1284 | spin_unlock(&fi->lock); | ||
1285 | return 0; | ||
1286 | } | ||
1287 | |||
1288 | static int __inject_pfault_done(struct kvm *kvm, | ||
1289 | struct kvm_s390_interrupt_info *inti) | ||
1290 | { | ||
1291 | struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int; | ||
1292 | |||
1293 | spin_lock(&fi->lock); | ||
1294 | if (fi->counters[FIRQ_CNTR_PFAULT] >= | ||
1295 | (ASYNC_PF_PER_VCPU * KVM_MAX_VCPUS)) { | ||
1296 | spin_unlock(&fi->lock); | ||
1297 | return -EBUSY; | ||
1298 | } | ||
1299 | fi->counters[FIRQ_CNTR_PFAULT] += 1; | ||
1300 | list_add_tail(&inti->list, &fi->lists[FIRQ_LIST_PFAULT]); | ||
1301 | set_bit(IRQ_PEND_PFAULT_DONE, &fi->pending_irqs); | ||
1302 | spin_unlock(&fi->lock); | ||
1303 | return 0; | ||
1304 | } | ||
1305 | |||
1306 | #define CR_PENDING_SUBCLASS 28 | ||
1307 | static int __inject_float_mchk(struct kvm *kvm, | ||
1308 | struct kvm_s390_interrupt_info *inti) | ||
1309 | { | ||
1310 | struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int; | ||
1311 | |||
1312 | spin_lock(&fi->lock); | ||
1313 | fi->mchk.cr14 |= inti->mchk.cr14 & (1UL << CR_PENDING_SUBCLASS); | ||
1314 | fi->mchk.mcic |= inti->mchk.mcic; | ||
1315 | set_bit(IRQ_PEND_MCHK_REP, &fi->pending_irqs); | ||
1316 | spin_unlock(&fi->lock); | ||
1317 | kfree(inti); | ||
1318 | return 0; | ||
1319 | } | ||
1320 | |||
1321 | static int __inject_io(struct kvm *kvm, struct kvm_s390_interrupt_info *inti) | ||
1178 | { | 1322 | { |
1179 | struct kvm_s390_float_interrupt *fi; | 1323 | struct kvm_s390_float_interrupt *fi; |
1180 | struct kvm_s390_interrupt_info *inti, *iter; | 1324 | struct list_head *list; |
1325 | int isc; | ||
1181 | 1326 | ||
1182 | if ((!schid && !cr6) || (schid && cr6)) | ||
1183 | return NULL; | ||
1184 | fi = &kvm->arch.float_int; | 1327 | fi = &kvm->arch.float_int; |
1185 | spin_lock(&fi->lock); | 1328 | spin_lock(&fi->lock); |
1186 | inti = NULL; | 1329 | if (fi->counters[FIRQ_CNTR_IO] >= KVM_S390_MAX_FLOAT_IRQS) { |
1187 | list_for_each_entry(iter, &fi->list, list) { | 1330 | spin_unlock(&fi->lock); |
1188 | if (!is_ioint(iter->type)) | 1331 | return -EBUSY; |
1189 | continue; | ||
1190 | if (cr6 && | ||
1191 | ((cr6 & int_word_to_isc_bits(iter->io.io_int_word)) == 0)) | ||
1192 | continue; | ||
1193 | if (schid) { | ||
1194 | if (((schid & 0x00000000ffff0000) >> 16) != | ||
1195 | iter->io.subchannel_id) | ||
1196 | continue; | ||
1197 | if ((schid & 0x000000000000ffff) != | ||
1198 | iter->io.subchannel_nr) | ||
1199 | continue; | ||
1200 | } | ||
1201 | inti = iter; | ||
1202 | break; | ||
1203 | } | ||
1204 | if (inti) { | ||
1205 | list_del_init(&inti->list); | ||
1206 | fi->irq_count--; | ||
1207 | } | 1332 | } |
1208 | if (list_empty(&fi->list)) | 1333 | fi->counters[FIRQ_CNTR_IO] += 1; |
1209 | atomic_set(&fi->active, 0); | 1334 | |
1335 | isc = int_word_to_isc(inti->io.io_int_word); | ||
1336 | list = &fi->lists[FIRQ_LIST_IO_ISC_0 + isc]; | ||
1337 | list_add_tail(&inti->list, list); | ||
1338 | set_bit(IRQ_PEND_IO_ISC_0 + isc, &fi->pending_irqs); | ||
1210 | spin_unlock(&fi->lock); | 1339 | spin_unlock(&fi->lock); |
1211 | return inti; | 1340 | return 0; |
1212 | } | 1341 | } |
1213 | 1342 | ||
1214 | static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti) | 1343 | static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti) |
1215 | { | 1344 | { |
1216 | struct kvm_s390_local_interrupt *li; | 1345 | struct kvm_s390_local_interrupt *li; |
1217 | struct kvm_s390_float_interrupt *fi; | 1346 | struct kvm_s390_float_interrupt *fi; |
1218 | struct kvm_s390_interrupt_info *iter; | ||
1219 | struct kvm_vcpu *dst_vcpu = NULL; | 1347 | struct kvm_vcpu *dst_vcpu = NULL; |
1220 | int sigcpu; | 1348 | int sigcpu; |
1221 | int rc = 0; | 1349 | u64 type = READ_ONCE(inti->type); |
1350 | int rc; | ||
1222 | 1351 | ||
1223 | fi = &kvm->arch.float_int; | 1352 | fi = &kvm->arch.float_int; |
1224 | spin_lock(&fi->lock); | 1353 | |
1225 | if (fi->irq_count >= KVM_S390_MAX_FLOAT_IRQS) { | 1354 | switch (type) { |
1355 | case KVM_S390_MCHK: | ||
1356 | rc = __inject_float_mchk(kvm, inti); | ||
1357 | break; | ||
1358 | case KVM_S390_INT_VIRTIO: | ||
1359 | rc = __inject_virtio(kvm, inti); | ||
1360 | break; | ||
1361 | case KVM_S390_INT_SERVICE: | ||
1362 | rc = __inject_service(kvm, inti); | ||
1363 | break; | ||
1364 | case KVM_S390_INT_PFAULT_DONE: | ||
1365 | rc = __inject_pfault_done(kvm, inti); | ||
1366 | break; | ||
1367 | case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: | ||
1368 | rc = __inject_io(kvm, inti); | ||
1369 | break; | ||
1370 | default: | ||
1226 | rc = -EINVAL; | 1371 | rc = -EINVAL; |
1227 | goto unlock_fi; | ||
1228 | } | 1372 | } |
1229 | fi->irq_count++; | 1373 | if (rc) |
1230 | if (!is_ioint(inti->type)) { | 1374 | return rc; |
1231 | list_add_tail(&inti->list, &fi->list); | ||
1232 | } else { | ||
1233 | u64 isc_bits = int_word_to_isc_bits(inti->io.io_int_word); | ||
1234 | 1375 | ||
1235 | /* Keep I/O interrupts sorted in isc order. */ | ||
1236 | list_for_each_entry(iter, &fi->list, list) { | ||
1237 | if (!is_ioint(iter->type)) | ||
1238 | continue; | ||
1239 | if (int_word_to_isc_bits(iter->io.io_int_word) | ||
1240 | <= isc_bits) | ||
1241 | continue; | ||
1242 | break; | ||
1243 | } | ||
1244 | list_add_tail(&inti->list, &iter->list); | ||
1245 | } | ||
1246 | atomic_set(&fi->active, 1); | ||
1247 | if (atomic_read(&kvm->online_vcpus) == 0) | ||
1248 | goto unlock_fi; | ||
1249 | sigcpu = find_first_bit(fi->idle_mask, KVM_MAX_VCPUS); | 1376 | sigcpu = find_first_bit(fi->idle_mask, KVM_MAX_VCPUS); |
1250 | if (sigcpu == KVM_MAX_VCPUS) { | 1377 | if (sigcpu == KVM_MAX_VCPUS) { |
1251 | do { | 1378 | do { |
@@ -1257,7 +1384,7 @@ static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti) | |||
1257 | dst_vcpu = kvm_get_vcpu(kvm, sigcpu); | 1384 | dst_vcpu = kvm_get_vcpu(kvm, sigcpu); |
1258 | li = &dst_vcpu->arch.local_int; | 1385 | li = &dst_vcpu->arch.local_int; |
1259 | spin_lock(&li->lock); | 1386 | spin_lock(&li->lock); |
1260 | switch (inti->type) { | 1387 | switch (type) { |
1261 | case KVM_S390_MCHK: | 1388 | case KVM_S390_MCHK: |
1262 | atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags); | 1389 | atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags); |
1263 | break; | 1390 | break; |
@@ -1270,9 +1397,8 @@ static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti) | |||
1270 | } | 1397 | } |
1271 | spin_unlock(&li->lock); | 1398 | spin_unlock(&li->lock); |
1272 | kvm_s390_vcpu_wakeup(kvm_get_vcpu(kvm, sigcpu)); | 1399 | kvm_s390_vcpu_wakeup(kvm_get_vcpu(kvm, sigcpu)); |
1273 | unlock_fi: | 1400 | return 0; |
1274 | spin_unlock(&fi->lock); | 1401 | |
1275 | return rc; | ||
1276 | } | 1402 | } |
1277 | 1403 | ||
1278 | int kvm_s390_inject_vm(struct kvm *kvm, | 1404 | int kvm_s390_inject_vm(struct kvm *kvm, |
@@ -1332,10 +1458,10 @@ int kvm_s390_inject_vm(struct kvm *kvm, | |||
1332 | return rc; | 1458 | return rc; |
1333 | } | 1459 | } |
1334 | 1460 | ||
1335 | void kvm_s390_reinject_io_int(struct kvm *kvm, | 1461 | int kvm_s390_reinject_io_int(struct kvm *kvm, |
1336 | struct kvm_s390_interrupt_info *inti) | 1462 | struct kvm_s390_interrupt_info *inti) |
1337 | { | 1463 | { |
1338 | __inject_vm(kvm, inti); | 1464 | return __inject_vm(kvm, inti); |
1339 | } | 1465 | } |
1340 | 1466 | ||
1341 | int s390int_to_s390irq(struct kvm_s390_interrupt *s390int, | 1467 | int s390int_to_s390irq(struct kvm_s390_interrupt *s390int, |
@@ -1388,12 +1514,10 @@ void kvm_s390_clear_stop_irq(struct kvm_vcpu *vcpu) | |||
1388 | spin_unlock(&li->lock); | 1514 | spin_unlock(&li->lock); |
1389 | } | 1515 | } |
1390 | 1516 | ||
1391 | int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) | 1517 | static int do_inject_vcpu(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) |
1392 | { | 1518 | { |
1393 | struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; | ||
1394 | int rc; | 1519 | int rc; |
1395 | 1520 | ||
1396 | spin_lock(&li->lock); | ||
1397 | switch (irq->type) { | 1521 | switch (irq->type) { |
1398 | case KVM_S390_PROGRAM_INT: | 1522 | case KVM_S390_PROGRAM_INT: |
1399 | VCPU_EVENT(vcpu, 3, "inject: program check %d (from user)", | 1523 | VCPU_EVENT(vcpu, 3, "inject: program check %d (from user)", |
@@ -1433,83 +1557,130 @@ int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) | |||
1433 | default: | 1557 | default: |
1434 | rc = -EINVAL; | 1558 | rc = -EINVAL; |
1435 | } | 1559 | } |
1560 | |||
1561 | return rc; | ||
1562 | } | ||
1563 | |||
1564 | int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) | ||
1565 | { | ||
1566 | struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; | ||
1567 | int rc; | ||
1568 | |||
1569 | spin_lock(&li->lock); | ||
1570 | rc = do_inject_vcpu(vcpu, irq); | ||
1436 | spin_unlock(&li->lock); | 1571 | spin_unlock(&li->lock); |
1437 | if (!rc) | 1572 | if (!rc) |
1438 | kvm_s390_vcpu_wakeup(vcpu); | 1573 | kvm_s390_vcpu_wakeup(vcpu); |
1439 | return rc; | 1574 | return rc; |
1440 | } | 1575 | } |
1441 | 1576 | ||
1442 | void kvm_s390_clear_float_irqs(struct kvm *kvm) | 1577 | static inline void clear_irq_list(struct list_head *_list) |
1443 | { | 1578 | { |
1444 | struct kvm_s390_float_interrupt *fi; | 1579 | struct kvm_s390_interrupt_info *inti, *n; |
1445 | struct kvm_s390_interrupt_info *n, *inti = NULL; | ||
1446 | 1580 | ||
1447 | fi = &kvm->arch.float_int; | 1581 | list_for_each_entry_safe(inti, n, _list, list) { |
1448 | spin_lock(&fi->lock); | ||
1449 | list_for_each_entry_safe(inti, n, &fi->list, list) { | ||
1450 | list_del(&inti->list); | 1582 | list_del(&inti->list); |
1451 | kfree(inti); | 1583 | kfree(inti); |
1452 | } | 1584 | } |
1453 | fi->irq_count = 0; | ||
1454 | atomic_set(&fi->active, 0); | ||
1455 | spin_unlock(&fi->lock); | ||
1456 | } | 1585 | } |
1457 | 1586 | ||
1458 | static inline int copy_irq_to_user(struct kvm_s390_interrupt_info *inti, | 1587 | static void inti_to_irq(struct kvm_s390_interrupt_info *inti, |
1459 | u8 *addr) | 1588 | struct kvm_s390_irq *irq) |
1460 | { | 1589 | { |
1461 | struct kvm_s390_irq __user *uptr = (struct kvm_s390_irq __user *) addr; | 1590 | irq->type = inti->type; |
1462 | struct kvm_s390_irq irq = {0}; | ||
1463 | |||
1464 | irq.type = inti->type; | ||
1465 | switch (inti->type) { | 1591 | switch (inti->type) { |
1466 | case KVM_S390_INT_PFAULT_INIT: | 1592 | case KVM_S390_INT_PFAULT_INIT: |
1467 | case KVM_S390_INT_PFAULT_DONE: | 1593 | case KVM_S390_INT_PFAULT_DONE: |
1468 | case KVM_S390_INT_VIRTIO: | 1594 | case KVM_S390_INT_VIRTIO: |
1469 | case KVM_S390_INT_SERVICE: | 1595 | irq->u.ext = inti->ext; |
1470 | irq.u.ext = inti->ext; | ||
1471 | break; | 1596 | break; |
1472 | case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: | 1597 | case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: |
1473 | irq.u.io = inti->io; | 1598 | irq->u.io = inti->io; |
1474 | break; | 1599 | break; |
1475 | case KVM_S390_MCHK: | ||
1476 | irq.u.mchk = inti->mchk; | ||
1477 | break; | ||
1478 | default: | ||
1479 | return -EINVAL; | ||
1480 | } | 1600 | } |
1601 | } | ||
1481 | 1602 | ||
1482 | if (copy_to_user(uptr, &irq, sizeof(irq))) | 1603 | void kvm_s390_clear_float_irqs(struct kvm *kvm) |
1483 | return -EFAULT; | 1604 | { |
1605 | struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int; | ||
1606 | int i; | ||
1484 | 1607 | ||
1485 | return 0; | 1608 | spin_lock(&fi->lock); |
1486 | } | 1609 | for (i = 0; i < FIRQ_LIST_COUNT; i++) |
1610 | clear_irq_list(&fi->lists[i]); | ||
1611 | for (i = 0; i < FIRQ_MAX_COUNT; i++) | ||
1612 | fi->counters[i] = 0; | ||
1613 | spin_unlock(&fi->lock); | ||
1614 | }; | ||
1487 | 1615 | ||
1488 | static int get_all_floating_irqs(struct kvm *kvm, __u8 *buf, __u64 len) | 1616 | static int get_all_floating_irqs(struct kvm *kvm, u8 __user *usrbuf, u64 len) |
1489 | { | 1617 | { |
1490 | struct kvm_s390_interrupt_info *inti; | 1618 | struct kvm_s390_interrupt_info *inti; |
1491 | struct kvm_s390_float_interrupt *fi; | 1619 | struct kvm_s390_float_interrupt *fi; |
1620 | struct kvm_s390_irq *buf; | ||
1621 | struct kvm_s390_irq *irq; | ||
1622 | int max_irqs; | ||
1492 | int ret = 0; | 1623 | int ret = 0; |
1493 | int n = 0; | 1624 | int n = 0; |
1625 | int i; | ||
1626 | |||
1627 | if (len > KVM_S390_FLIC_MAX_BUFFER || len == 0) | ||
1628 | return -EINVAL; | ||
1629 | |||
1630 | /* | ||
1631 | * We are already using -ENOMEM to signal | ||
1632 | * userspace it may retry with a bigger buffer, | ||
1633 | * so we need to use something else for this case | ||
1634 | */ | ||
1635 | buf = vzalloc(len); | ||
1636 | if (!buf) | ||
1637 | return -ENOBUFS; | ||
1638 | |||
1639 | max_irqs = len / sizeof(struct kvm_s390_irq); | ||
1494 | 1640 | ||
1495 | fi = &kvm->arch.float_int; | 1641 | fi = &kvm->arch.float_int; |
1496 | spin_lock(&fi->lock); | 1642 | spin_lock(&fi->lock); |
1497 | 1643 | for (i = 0; i < FIRQ_LIST_COUNT; i++) { | |
1498 | list_for_each_entry(inti, &fi->list, list) { | 1644 | list_for_each_entry(inti, &fi->lists[i], list) { |
1499 | if (len < sizeof(struct kvm_s390_irq)) { | 1645 | if (n == max_irqs) { |
1646 | /* signal userspace to try again */ | ||
1647 | ret = -ENOMEM; | ||
1648 | goto out; | ||
1649 | } | ||
1650 | inti_to_irq(inti, &buf[n]); | ||
1651 | n++; | ||
1652 | } | ||
1653 | } | ||
1654 | if (test_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs)) { | ||
1655 | if (n == max_irqs) { | ||
1500 | /* signal userspace to try again */ | 1656 | /* signal userspace to try again */ |
1501 | ret = -ENOMEM; | 1657 | ret = -ENOMEM; |
1502 | break; | 1658 | goto out; |
1503 | } | 1659 | } |
1504 | ret = copy_irq_to_user(inti, buf); | 1660 | irq = (struct kvm_s390_irq *) &buf[n]; |
1505 | if (ret) | 1661 | irq->type = KVM_S390_INT_SERVICE; |
1506 | break; | 1662 | irq->u.ext = fi->srv_signal; |
1507 | buf += sizeof(struct kvm_s390_irq); | ||
1508 | len -= sizeof(struct kvm_s390_irq); | ||
1509 | n++; | 1663 | n++; |
1510 | } | 1664 | } |
1665 | if (test_bit(IRQ_PEND_MCHK_REP, &fi->pending_irqs)) { | ||
1666 | if (n == max_irqs) { | ||
1667 | /* signal userspace to try again */ | ||
1668 | ret = -ENOMEM; | ||
1669 | goto out; | ||
1670 | } | ||
1671 | irq = (struct kvm_s390_irq *) &buf[n]; | ||
1672 | irq->type = KVM_S390_MCHK; | ||
1673 | irq->u.mchk = fi->mchk; | ||
1674 | n++; | ||
1675 | } | ||
1511 | 1676 | ||
1677 | out: | ||
1512 | spin_unlock(&fi->lock); | 1678 | spin_unlock(&fi->lock); |
1679 | if (!ret && n > 0) { | ||
1680 | if (copy_to_user(usrbuf, buf, sizeof(struct kvm_s390_irq) * n)) | ||
1681 | ret = -EFAULT; | ||
1682 | } | ||
1683 | vfree(buf); | ||
1513 | 1684 | ||
1514 | return ret < 0 ? ret : n; | 1685 | return ret < 0 ? ret : n; |
1515 | } | 1686 | } |
@@ -1520,7 +1691,7 @@ static int flic_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr) | |||
1520 | 1691 | ||
1521 | switch (attr->group) { | 1692 | switch (attr->group) { |
1522 | case KVM_DEV_FLIC_GET_ALL_IRQS: | 1693 | case KVM_DEV_FLIC_GET_ALL_IRQS: |
1523 | r = get_all_floating_irqs(dev->kvm, (u8 *) attr->addr, | 1694 | r = get_all_floating_irqs(dev->kvm, (u8 __user *) attr->addr, |
1524 | attr->attr); | 1695 | attr->attr); |
1525 | break; | 1696 | break; |
1526 | default: | 1697 | default: |
@@ -1952,3 +2123,143 @@ int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e, struct kvm *kvm, | |||
1952 | { | 2123 | { |
1953 | return -EINVAL; | 2124 | return -EINVAL; |
1954 | } | 2125 | } |
2126 | |||
2127 | int kvm_s390_set_irq_state(struct kvm_vcpu *vcpu, void __user *irqstate, int len) | ||
2128 | { | ||
2129 | struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; | ||
2130 | struct kvm_s390_irq *buf; | ||
2131 | int r = 0; | ||
2132 | int n; | ||
2133 | |||
2134 | buf = vmalloc(len); | ||
2135 | if (!buf) | ||
2136 | return -ENOMEM; | ||
2137 | |||
2138 | if (copy_from_user((void *) buf, irqstate, len)) { | ||
2139 | r = -EFAULT; | ||
2140 | goto out_free; | ||
2141 | } | ||
2142 | |||
2143 | /* | ||
2144 | * Don't allow setting the interrupt state | ||
2145 | * when there are already interrupts pending | ||
2146 | */ | ||
2147 | spin_lock(&li->lock); | ||
2148 | if (li->pending_irqs) { | ||
2149 | r = -EBUSY; | ||
2150 | goto out_unlock; | ||
2151 | } | ||
2152 | |||
2153 | for (n = 0; n < len / sizeof(*buf); n++) { | ||
2154 | r = do_inject_vcpu(vcpu, &buf[n]); | ||
2155 | if (r) | ||
2156 | break; | ||
2157 | } | ||
2158 | |||
2159 | out_unlock: | ||
2160 | spin_unlock(&li->lock); | ||
2161 | out_free: | ||
2162 | vfree(buf); | ||
2163 | |||
2164 | return r; | ||
2165 | } | ||
2166 | |||
2167 | static void store_local_irq(struct kvm_s390_local_interrupt *li, | ||
2168 | struct kvm_s390_irq *irq, | ||
2169 | unsigned long irq_type) | ||
2170 | { | ||
2171 | switch (irq_type) { | ||
2172 | case IRQ_PEND_MCHK_EX: | ||
2173 | case IRQ_PEND_MCHK_REP: | ||
2174 | irq->type = KVM_S390_MCHK; | ||
2175 | irq->u.mchk = li->irq.mchk; | ||
2176 | break; | ||
2177 | case IRQ_PEND_PROG: | ||
2178 | irq->type = KVM_S390_PROGRAM_INT; | ||
2179 | irq->u.pgm = li->irq.pgm; | ||
2180 | break; | ||
2181 | case IRQ_PEND_PFAULT_INIT: | ||
2182 | irq->type = KVM_S390_INT_PFAULT_INIT; | ||
2183 | irq->u.ext = li->irq.ext; | ||
2184 | break; | ||
2185 | case IRQ_PEND_EXT_EXTERNAL: | ||
2186 | irq->type = KVM_S390_INT_EXTERNAL_CALL; | ||
2187 | irq->u.extcall = li->irq.extcall; | ||
2188 | break; | ||
2189 | case IRQ_PEND_EXT_CLOCK_COMP: | ||
2190 | irq->type = KVM_S390_INT_CLOCK_COMP; | ||
2191 | break; | ||
2192 | case IRQ_PEND_EXT_CPU_TIMER: | ||
2193 | irq->type = KVM_S390_INT_CPU_TIMER; | ||
2194 | break; | ||
2195 | case IRQ_PEND_SIGP_STOP: | ||
2196 | irq->type = KVM_S390_SIGP_STOP; | ||
2197 | irq->u.stop = li->irq.stop; | ||
2198 | break; | ||
2199 | case IRQ_PEND_RESTART: | ||
2200 | irq->type = KVM_S390_RESTART; | ||
2201 | break; | ||
2202 | case IRQ_PEND_SET_PREFIX: | ||
2203 | irq->type = KVM_S390_SIGP_SET_PREFIX; | ||
2204 | irq->u.prefix = li->irq.prefix; | ||
2205 | break; | ||
2206 | } | ||
2207 | } | ||
2208 | |||
2209 | int kvm_s390_get_irq_state(struct kvm_vcpu *vcpu, __u8 __user *buf, int len) | ||
2210 | { | ||
2211 | uint8_t sigp_ctrl = vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sigp_ctrl; | ||
2212 | unsigned long sigp_emerg_pending[BITS_TO_LONGS(KVM_MAX_VCPUS)]; | ||
2213 | struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; | ||
2214 | unsigned long pending_irqs; | ||
2215 | struct kvm_s390_irq irq; | ||
2216 | unsigned long irq_type; | ||
2217 | int cpuaddr; | ||
2218 | int n = 0; | ||
2219 | |||
2220 | spin_lock(&li->lock); | ||
2221 | pending_irqs = li->pending_irqs; | ||
2222 | memcpy(&sigp_emerg_pending, &li->sigp_emerg_pending, | ||
2223 | sizeof(sigp_emerg_pending)); | ||
2224 | spin_unlock(&li->lock); | ||
2225 | |||
2226 | for_each_set_bit(irq_type, &pending_irqs, IRQ_PEND_COUNT) { | ||
2227 | memset(&irq, 0, sizeof(irq)); | ||
2228 | if (irq_type == IRQ_PEND_EXT_EMERGENCY) | ||
2229 | continue; | ||
2230 | if (n + sizeof(irq) > len) | ||
2231 | return -ENOBUFS; | ||
2232 | store_local_irq(&vcpu->arch.local_int, &irq, irq_type); | ||
2233 | if (copy_to_user(&buf[n], &irq, sizeof(irq))) | ||
2234 | return -EFAULT; | ||
2235 | n += sizeof(irq); | ||
2236 | } | ||
2237 | |||
2238 | if (test_bit(IRQ_PEND_EXT_EMERGENCY, &pending_irqs)) { | ||
2239 | for_each_set_bit(cpuaddr, sigp_emerg_pending, KVM_MAX_VCPUS) { | ||
2240 | memset(&irq, 0, sizeof(irq)); | ||
2241 | if (n + sizeof(irq) > len) | ||
2242 | return -ENOBUFS; | ||
2243 | irq.type = KVM_S390_INT_EMERGENCY; | ||
2244 | irq.u.emerg.code = cpuaddr; | ||
2245 | if (copy_to_user(&buf[n], &irq, sizeof(irq))) | ||
2246 | return -EFAULT; | ||
2247 | n += sizeof(irq); | ||
2248 | } | ||
2249 | } | ||
2250 | |||
2251 | if ((sigp_ctrl & SIGP_CTRL_C) && | ||
2252 | (atomic_read(&vcpu->arch.sie_block->cpuflags) & | ||
2253 | CPUSTAT_ECALL_PEND)) { | ||
2254 | if (n + sizeof(irq) > len) | ||
2255 | return -ENOBUFS; | ||
2256 | memset(&irq, 0, sizeof(irq)); | ||
2257 | irq.type = KVM_S390_INT_EXTERNAL_CALL; | ||
2258 | irq.u.extcall.code = sigp_ctrl & SIGP_CTRL_SCN_MASK; | ||
2259 | if (copy_to_user(&buf[n], &irq, sizeof(irq))) | ||
2260 | return -EFAULT; | ||
2261 | n += sizeof(irq); | ||
2262 | } | ||
2263 | |||
2264 | return n; | ||
2265 | } | ||
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 19e17bd7aec0..afa2bd750ffc 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c | |||
@@ -25,11 +25,13 @@ | |||
25 | #include <linux/random.h> | 25 | #include <linux/random.h> |
26 | #include <linux/slab.h> | 26 | #include <linux/slab.h> |
27 | #include <linux/timer.h> | 27 | #include <linux/timer.h> |
28 | #include <linux/vmalloc.h> | ||
28 | #include <asm/asm-offsets.h> | 29 | #include <asm/asm-offsets.h> |
29 | #include <asm/lowcore.h> | 30 | #include <asm/lowcore.h> |
30 | #include <asm/pgtable.h> | 31 | #include <asm/pgtable.h> |
31 | #include <asm/nmi.h> | 32 | #include <asm/nmi.h> |
32 | #include <asm/switch_to.h> | 33 | #include <asm/switch_to.h> |
34 | #include <asm/isc.h> | ||
33 | #include <asm/sclp.h> | 35 | #include <asm/sclp.h> |
34 | #include "kvm-s390.h" | 36 | #include "kvm-s390.h" |
35 | #include "gaccess.h" | 37 | #include "gaccess.h" |
@@ -38,6 +40,11 @@ | |||
38 | #include "trace.h" | 40 | #include "trace.h" |
39 | #include "trace-s390.h" | 41 | #include "trace-s390.h" |
40 | 42 | ||
43 | #define MEM_OP_MAX_SIZE 65536 /* Maximum transfer size for KVM_S390_MEM_OP */ | ||
44 | #define LOCAL_IRQS 32 | ||
45 | #define VCPU_IRQS_MAX_BUF (sizeof(struct kvm_s390_irq) * \ | ||
46 | (KVM_MAX_VCPUS + LOCAL_IRQS)) | ||
47 | |||
41 | #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU | 48 | #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU |
42 | 49 | ||
43 | struct kvm_stats_debugfs_item debugfs_entries[] = { | 50 | struct kvm_stats_debugfs_item debugfs_entries[] = { |
@@ -87,6 +94,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { | |||
87 | { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) }, | 94 | { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) }, |
88 | { "instruction_sigp_stop_store_status", VCPU_STAT(instruction_sigp_stop_store_status) }, | 95 | { "instruction_sigp_stop_store_status", VCPU_STAT(instruction_sigp_stop_store_status) }, |
89 | { "instruction_sigp_store_status", VCPU_STAT(instruction_sigp_store_status) }, | 96 | { "instruction_sigp_store_status", VCPU_STAT(instruction_sigp_store_status) }, |
97 | { "instruction_sigp_store_adtl_status", VCPU_STAT(instruction_sigp_store_adtl_status) }, | ||
90 | { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) }, | 98 | { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) }, |
91 | { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) }, | 99 | { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) }, |
92 | { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) }, | 100 | { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) }, |
@@ -101,8 +109,8 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { | |||
101 | 109 | ||
102 | /* upper facilities limit for kvm */ | 110 | /* upper facilities limit for kvm */ |
103 | unsigned long kvm_s390_fac_list_mask[] = { | 111 | unsigned long kvm_s390_fac_list_mask[] = { |
104 | 0xff82fffbf4fc2000UL, | 112 | 0xffe6fffbfcfdfc40UL, |
105 | 0x005c000000000000UL, | 113 | 0x205c800000000000UL, |
106 | }; | 114 | }; |
107 | 115 | ||
108 | unsigned long kvm_s390_fac_list_mask_size(void) | 116 | unsigned long kvm_s390_fac_list_mask_size(void) |
@@ -171,9 +179,16 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) | |||
171 | case KVM_CAP_S390_IRQCHIP: | 179 | case KVM_CAP_S390_IRQCHIP: |
172 | case KVM_CAP_VM_ATTRIBUTES: | 180 | case KVM_CAP_VM_ATTRIBUTES: |
173 | case KVM_CAP_MP_STATE: | 181 | case KVM_CAP_MP_STATE: |
182 | case KVM_CAP_S390_INJECT_IRQ: | ||
174 | case KVM_CAP_S390_USER_SIGP: | 183 | case KVM_CAP_S390_USER_SIGP: |
184 | case KVM_CAP_S390_USER_STSI: | ||
185 | case KVM_CAP_S390_SKEYS: | ||
186 | case KVM_CAP_S390_IRQ_STATE: | ||
175 | r = 1; | 187 | r = 1; |
176 | break; | 188 | break; |
189 | case KVM_CAP_S390_MEM_OP: | ||
190 | r = MEM_OP_MAX_SIZE; | ||
191 | break; | ||
177 | case KVM_CAP_NR_VCPUS: | 192 | case KVM_CAP_NR_VCPUS: |
178 | case KVM_CAP_MAX_VCPUS: | 193 | case KVM_CAP_MAX_VCPUS: |
179 | r = KVM_MAX_VCPUS; | 194 | r = KVM_MAX_VCPUS; |
@@ -184,6 +199,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) | |||
184 | case KVM_CAP_S390_COW: | 199 | case KVM_CAP_S390_COW: |
185 | r = MACHINE_HAS_ESOP; | 200 | r = MACHINE_HAS_ESOP; |
186 | break; | 201 | break; |
202 | case KVM_CAP_S390_VECTOR_REGISTERS: | ||
203 | r = MACHINE_HAS_VX; | ||
204 | break; | ||
187 | default: | 205 | default: |
188 | r = 0; | 206 | r = 0; |
189 | } | 207 | } |
@@ -264,6 +282,18 @@ static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap) | |||
264 | kvm->arch.user_sigp = 1; | 282 | kvm->arch.user_sigp = 1; |
265 | r = 0; | 283 | r = 0; |
266 | break; | 284 | break; |
285 | case KVM_CAP_S390_VECTOR_REGISTERS: | ||
286 | if (MACHINE_HAS_VX) { | ||
287 | set_kvm_facility(kvm->arch.model.fac->mask, 129); | ||
288 | set_kvm_facility(kvm->arch.model.fac->list, 129); | ||
289 | r = 0; | ||
290 | } else | ||
291 | r = -EINVAL; | ||
292 | break; | ||
293 | case KVM_CAP_S390_USER_STSI: | ||
294 | kvm->arch.user_stsi = 1; | ||
295 | r = 0; | ||
296 | break; | ||
267 | default: | 297 | default: |
268 | r = -EINVAL; | 298 | r = -EINVAL; |
269 | break; | 299 | break; |
@@ -708,6 +738,108 @@ static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr) | |||
708 | return ret; | 738 | return ret; |
709 | } | 739 | } |
710 | 740 | ||
741 | static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args) | ||
742 | { | ||
743 | uint8_t *keys; | ||
744 | uint64_t hva; | ||
745 | unsigned long curkey; | ||
746 | int i, r = 0; | ||
747 | |||
748 | if (args->flags != 0) | ||
749 | return -EINVAL; | ||
750 | |||
751 | /* Is this guest using storage keys? */ | ||
752 | if (!mm_use_skey(current->mm)) | ||
753 | return KVM_S390_GET_SKEYS_NONE; | ||
754 | |||
755 | /* Enforce sane limit on memory allocation */ | ||
756 | if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX) | ||
757 | return -EINVAL; | ||
758 | |||
759 | keys = kmalloc_array(args->count, sizeof(uint8_t), | ||
760 | GFP_KERNEL | __GFP_NOWARN); | ||
761 | if (!keys) | ||
762 | keys = vmalloc(sizeof(uint8_t) * args->count); | ||
763 | if (!keys) | ||
764 | return -ENOMEM; | ||
765 | |||
766 | for (i = 0; i < args->count; i++) { | ||
767 | hva = gfn_to_hva(kvm, args->start_gfn + i); | ||
768 | if (kvm_is_error_hva(hva)) { | ||
769 | r = -EFAULT; | ||
770 | goto out; | ||
771 | } | ||
772 | |||
773 | curkey = get_guest_storage_key(current->mm, hva); | ||
774 | if (IS_ERR_VALUE(curkey)) { | ||
775 | r = curkey; | ||
776 | goto out; | ||
777 | } | ||
778 | keys[i] = curkey; | ||
779 | } | ||
780 | |||
781 | r = copy_to_user((uint8_t __user *)args->skeydata_addr, keys, | ||
782 | sizeof(uint8_t) * args->count); | ||
783 | if (r) | ||
784 | r = -EFAULT; | ||
785 | out: | ||
786 | kvfree(keys); | ||
787 | return r; | ||
788 | } | ||
789 | |||
790 | static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args) | ||
791 | { | ||
792 | uint8_t *keys; | ||
793 | uint64_t hva; | ||
794 | int i, r = 0; | ||
795 | |||
796 | if (args->flags != 0) | ||
797 | return -EINVAL; | ||
798 | |||
799 | /* Enforce sane limit on memory allocation */ | ||
800 | if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX) | ||
801 | return -EINVAL; | ||
802 | |||
803 | keys = kmalloc_array(args->count, sizeof(uint8_t), | ||
804 | GFP_KERNEL | __GFP_NOWARN); | ||
805 | if (!keys) | ||
806 | keys = vmalloc(sizeof(uint8_t) * args->count); | ||
807 | if (!keys) | ||
808 | return -ENOMEM; | ||
809 | |||
810 | r = copy_from_user(keys, (uint8_t __user *)args->skeydata_addr, | ||
811 | sizeof(uint8_t) * args->count); | ||
812 | if (r) { | ||
813 | r = -EFAULT; | ||
814 | goto out; | ||
815 | } | ||
816 | |||
817 | /* Enable storage key handling for the guest */ | ||
818 | s390_enable_skey(); | ||
819 | |||
820 | for (i = 0; i < args->count; i++) { | ||
821 | hva = gfn_to_hva(kvm, args->start_gfn + i); | ||
822 | if (kvm_is_error_hva(hva)) { | ||
823 | r = -EFAULT; | ||
824 | goto out; | ||
825 | } | ||
826 | |||
827 | /* Lowest order bit is reserved */ | ||
828 | if (keys[i] & 0x01) { | ||
829 | r = -EINVAL; | ||
830 | goto out; | ||
831 | } | ||
832 | |||
833 | r = set_guest_storage_key(current->mm, hva, | ||
834 | (unsigned long)keys[i], 0); | ||
835 | if (r) | ||
836 | goto out; | ||
837 | } | ||
838 | out: | ||
839 | kvfree(keys); | ||
840 | return r; | ||
841 | } | ||
842 | |||
711 | long kvm_arch_vm_ioctl(struct file *filp, | 843 | long kvm_arch_vm_ioctl(struct file *filp, |
712 | unsigned int ioctl, unsigned long arg) | 844 | unsigned int ioctl, unsigned long arg) |
713 | { | 845 | { |
@@ -767,6 +899,26 @@ long kvm_arch_vm_ioctl(struct file *filp, | |||
767 | r = kvm_s390_vm_has_attr(kvm, &attr); | 899 | r = kvm_s390_vm_has_attr(kvm, &attr); |
768 | break; | 900 | break; |
769 | } | 901 | } |
902 | case KVM_S390_GET_SKEYS: { | ||
903 | struct kvm_s390_skeys args; | ||
904 | |||
905 | r = -EFAULT; | ||
906 | if (copy_from_user(&args, argp, | ||
907 | sizeof(struct kvm_s390_skeys))) | ||
908 | break; | ||
909 | r = kvm_s390_get_skeys(kvm, &args); | ||
910 | break; | ||
911 | } | ||
912 | case KVM_S390_SET_SKEYS: { | ||
913 | struct kvm_s390_skeys args; | ||
914 | |||
915 | r = -EFAULT; | ||
916 | if (copy_from_user(&args, argp, | ||
917 | sizeof(struct kvm_s390_skeys))) | ||
918 | break; | ||
919 | r = kvm_s390_set_skeys(kvm, &args); | ||
920 | break; | ||
921 | } | ||
770 | default: | 922 | default: |
771 | r = -ENOTTY; | 923 | r = -ENOTTY; |
772 | } | 924 | } |
@@ -887,7 +1039,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) | |||
887 | 1039 | ||
888 | kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long)); | 1040 | kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long)); |
889 | if (!kvm->arch.dbf) | 1041 | if (!kvm->arch.dbf) |
890 | goto out_nodbf; | 1042 | goto out_err; |
891 | 1043 | ||
892 | /* | 1044 | /* |
893 | * The architectural maximum amount of facilities is 16 kbit. To store | 1045 | * The architectural maximum amount of facilities is 16 kbit. To store |
@@ -899,7 +1051,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) | |||
899 | kvm->arch.model.fac = | 1051 | kvm->arch.model.fac = |
900 | (struct kvm_s390_fac *) get_zeroed_page(GFP_KERNEL | GFP_DMA); | 1052 | (struct kvm_s390_fac *) get_zeroed_page(GFP_KERNEL | GFP_DMA); |
901 | if (!kvm->arch.model.fac) | 1053 | if (!kvm->arch.model.fac) |
902 | goto out_nofac; | 1054 | goto out_err; |
903 | 1055 | ||
904 | /* Populate the facility mask initially. */ | 1056 | /* Populate the facility mask initially. */ |
905 | memcpy(kvm->arch.model.fac->mask, S390_lowcore.stfle_fac_list, | 1057 | memcpy(kvm->arch.model.fac->mask, S390_lowcore.stfle_fac_list, |
@@ -919,10 +1071,11 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) | |||
919 | kvm->arch.model.ibc = sclp_get_ibc() & 0x0fff; | 1071 | kvm->arch.model.ibc = sclp_get_ibc() & 0x0fff; |
920 | 1072 | ||
921 | if (kvm_s390_crypto_init(kvm) < 0) | 1073 | if (kvm_s390_crypto_init(kvm) < 0) |
922 | goto out_crypto; | 1074 | goto out_err; |
923 | 1075 | ||
924 | spin_lock_init(&kvm->arch.float_int.lock); | 1076 | spin_lock_init(&kvm->arch.float_int.lock); |
925 | INIT_LIST_HEAD(&kvm->arch.float_int.list); | 1077 | for (i = 0; i < FIRQ_LIST_COUNT; i++) |
1078 | INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]); | ||
926 | init_waitqueue_head(&kvm->arch.ipte_wq); | 1079 | init_waitqueue_head(&kvm->arch.ipte_wq); |
927 | mutex_init(&kvm->arch.ipte_mutex); | 1080 | mutex_init(&kvm->arch.ipte_mutex); |
928 | 1081 | ||
@@ -934,7 +1087,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) | |||
934 | } else { | 1087 | } else { |
935 | kvm->arch.gmap = gmap_alloc(current->mm, (1UL << 44) - 1); | 1088 | kvm->arch.gmap = gmap_alloc(current->mm, (1UL << 44) - 1); |
936 | if (!kvm->arch.gmap) | 1089 | if (!kvm->arch.gmap) |
937 | goto out_nogmap; | 1090 | goto out_err; |
938 | kvm->arch.gmap->private = kvm; | 1091 | kvm->arch.gmap->private = kvm; |
939 | kvm->arch.gmap->pfault_enabled = 0; | 1092 | kvm->arch.gmap->pfault_enabled = 0; |
940 | } | 1093 | } |
@@ -946,15 +1099,11 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) | |||
946 | spin_lock_init(&kvm->arch.start_stop_lock); | 1099 | spin_lock_init(&kvm->arch.start_stop_lock); |
947 | 1100 | ||
948 | return 0; | 1101 | return 0; |
949 | out_nogmap: | 1102 | out_err: |
950 | kfree(kvm->arch.crypto.crycb); | 1103 | kfree(kvm->arch.crypto.crycb); |
951 | out_crypto: | ||
952 | free_page((unsigned long)kvm->arch.model.fac); | 1104 | free_page((unsigned long)kvm->arch.model.fac); |
953 | out_nofac: | ||
954 | debug_unregister(kvm->arch.dbf); | 1105 | debug_unregister(kvm->arch.dbf); |
955 | out_nodbf: | ||
956 | free_page((unsigned long)(kvm->arch.sca)); | 1106 | free_page((unsigned long)(kvm->arch.sca)); |
957 | out_err: | ||
958 | return rc; | 1107 | return rc; |
959 | } | 1108 | } |
960 | 1109 | ||
@@ -1034,6 +1183,8 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) | |||
1034 | KVM_SYNC_CRS | | 1183 | KVM_SYNC_CRS | |
1035 | KVM_SYNC_ARCH0 | | 1184 | KVM_SYNC_ARCH0 | |
1036 | KVM_SYNC_PFAULT; | 1185 | KVM_SYNC_PFAULT; |
1186 | if (test_kvm_facility(vcpu->kvm, 129)) | ||
1187 | vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS; | ||
1037 | 1188 | ||
1038 | if (kvm_is_ucontrol(vcpu->kvm)) | 1189 | if (kvm_is_ucontrol(vcpu->kvm)) |
1039 | return __kvm_ucontrol_vcpu_init(vcpu); | 1190 | return __kvm_ucontrol_vcpu_init(vcpu); |
@@ -1044,10 +1195,18 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) | |||
1044 | void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | 1195 | void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) |
1045 | { | 1196 | { |
1046 | save_fp_ctl(&vcpu->arch.host_fpregs.fpc); | 1197 | save_fp_ctl(&vcpu->arch.host_fpregs.fpc); |
1047 | save_fp_regs(vcpu->arch.host_fpregs.fprs); | 1198 | if (test_kvm_facility(vcpu->kvm, 129)) |
1199 | save_vx_regs((__vector128 *)&vcpu->arch.host_vregs->vrs); | ||
1200 | else | ||
1201 | save_fp_regs(vcpu->arch.host_fpregs.fprs); | ||
1048 | save_access_regs(vcpu->arch.host_acrs); | 1202 | save_access_regs(vcpu->arch.host_acrs); |
1049 | restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc); | 1203 | if (test_kvm_facility(vcpu->kvm, 129)) { |
1050 | restore_fp_regs(vcpu->arch.guest_fpregs.fprs); | 1204 | restore_fp_ctl(&vcpu->run->s.regs.fpc); |
1205 | restore_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs); | ||
1206 | } else { | ||
1207 | restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc); | ||
1208 | restore_fp_regs(vcpu->arch.guest_fpregs.fprs); | ||
1209 | } | ||
1051 | restore_access_regs(vcpu->run->s.regs.acrs); | 1210 | restore_access_regs(vcpu->run->s.regs.acrs); |
1052 | gmap_enable(vcpu->arch.gmap); | 1211 | gmap_enable(vcpu->arch.gmap); |
1053 | atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); | 1212 | atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); |
@@ -1057,11 +1216,19 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) | |||
1057 | { | 1216 | { |
1058 | atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); | 1217 | atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); |
1059 | gmap_disable(vcpu->arch.gmap); | 1218 | gmap_disable(vcpu->arch.gmap); |
1060 | save_fp_ctl(&vcpu->arch.guest_fpregs.fpc); | 1219 | if (test_kvm_facility(vcpu->kvm, 129)) { |
1061 | save_fp_regs(vcpu->arch.guest_fpregs.fprs); | 1220 | save_fp_ctl(&vcpu->run->s.regs.fpc); |
1221 | save_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs); | ||
1222 | } else { | ||
1223 | save_fp_ctl(&vcpu->arch.guest_fpregs.fpc); | ||
1224 | save_fp_regs(vcpu->arch.guest_fpregs.fprs); | ||
1225 | } | ||
1062 | save_access_regs(vcpu->run->s.regs.acrs); | 1226 | save_access_regs(vcpu->run->s.regs.acrs); |
1063 | restore_fp_ctl(&vcpu->arch.host_fpregs.fpc); | 1227 | restore_fp_ctl(&vcpu->arch.host_fpregs.fpc); |
1064 | restore_fp_regs(vcpu->arch.host_fpregs.fprs); | 1228 | if (test_kvm_facility(vcpu->kvm, 129)) |
1229 | restore_vx_regs((__vector128 *)&vcpu->arch.host_vregs->vrs); | ||
1230 | else | ||
1231 | restore_fp_regs(vcpu->arch.host_fpregs.fprs); | ||
1065 | restore_access_regs(vcpu->arch.host_acrs); | 1232 | restore_access_regs(vcpu->arch.host_acrs); |
1066 | } | 1233 | } |
1067 | 1234 | ||
@@ -1129,6 +1296,15 @@ int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu) | |||
1129 | return 0; | 1296 | return 0; |
1130 | } | 1297 | } |
1131 | 1298 | ||
1299 | static void kvm_s390_vcpu_setup_model(struct kvm_vcpu *vcpu) | ||
1300 | { | ||
1301 | struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model; | ||
1302 | |||
1303 | vcpu->arch.cpu_id = model->cpu_id; | ||
1304 | vcpu->arch.sie_block->ibc = model->ibc; | ||
1305 | vcpu->arch.sie_block->fac = (int) (long) model->fac->list; | ||
1306 | } | ||
1307 | |||
1132 | int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) | 1308 | int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) |
1133 | { | 1309 | { |
1134 | int rc = 0; | 1310 | int rc = 0; |
@@ -1137,6 +1313,8 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) | |||
1137 | CPUSTAT_SM | | 1313 | CPUSTAT_SM | |
1138 | CPUSTAT_STOPPED | | 1314 | CPUSTAT_STOPPED | |
1139 | CPUSTAT_GED); | 1315 | CPUSTAT_GED); |
1316 | kvm_s390_vcpu_setup_model(vcpu); | ||
1317 | |||
1140 | vcpu->arch.sie_block->ecb = 6; | 1318 | vcpu->arch.sie_block->ecb = 6; |
1141 | if (test_kvm_facility(vcpu->kvm, 50) && test_kvm_facility(vcpu->kvm, 73)) | 1319 | if (test_kvm_facility(vcpu->kvm, 50) && test_kvm_facility(vcpu->kvm, 73)) |
1142 | vcpu->arch.sie_block->ecb |= 0x10; | 1320 | vcpu->arch.sie_block->ecb |= 0x10; |
@@ -1147,8 +1325,11 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) | |||
1147 | vcpu->arch.sie_block->eca |= 1; | 1325 | vcpu->arch.sie_block->eca |= 1; |
1148 | if (sclp_has_sigpif()) | 1326 | if (sclp_has_sigpif()) |
1149 | vcpu->arch.sie_block->eca |= 0x10000000U; | 1327 | vcpu->arch.sie_block->eca |= 0x10000000U; |
1150 | vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE | | 1328 | if (test_kvm_facility(vcpu->kvm, 129)) { |
1151 | ICTL_TPROT; | 1329 | vcpu->arch.sie_block->eca |= 0x00020000; |
1330 | vcpu->arch.sie_block->ecd |= 0x20000000; | ||
1331 | } | ||
1332 | vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE; | ||
1152 | 1333 | ||
1153 | if (kvm_s390_cmma_enabled(vcpu->kvm)) { | 1334 | if (kvm_s390_cmma_enabled(vcpu->kvm)) { |
1154 | rc = kvm_s390_vcpu_setup_cmma(vcpu); | 1335 | rc = kvm_s390_vcpu_setup_cmma(vcpu); |
@@ -1158,11 +1339,6 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) | |||
1158 | hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); | 1339 | hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); |
1159 | vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup; | 1340 | vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup; |
1160 | 1341 | ||
1161 | mutex_lock(&vcpu->kvm->lock); | ||
1162 | vcpu->arch.cpu_id = vcpu->kvm->arch.model.cpu_id; | ||
1163 | vcpu->arch.sie_block->ibc = vcpu->kvm->arch.model.ibc; | ||
1164 | mutex_unlock(&vcpu->kvm->lock); | ||
1165 | |||
1166 | kvm_s390_vcpu_crypto_setup(vcpu); | 1342 | kvm_s390_vcpu_crypto_setup(vcpu); |
1167 | 1343 | ||
1168 | return rc; | 1344 | return rc; |
@@ -1190,6 +1366,7 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, | |||
1190 | 1366 | ||
1191 | vcpu->arch.sie_block = &sie_page->sie_block; | 1367 | vcpu->arch.sie_block = &sie_page->sie_block; |
1192 | vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb; | 1368 | vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb; |
1369 | vcpu->arch.host_vregs = &sie_page->vregs; | ||
1193 | 1370 | ||
1194 | vcpu->arch.sie_block->icpua = id; | 1371 | vcpu->arch.sie_block->icpua = id; |
1195 | if (!kvm_is_ucontrol(kvm)) { | 1372 | if (!kvm_is_ucontrol(kvm)) { |
@@ -1205,7 +1382,6 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, | |||
1205 | vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca; | 1382 | vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca; |
1206 | set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn); | 1383 | set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn); |
1207 | } | 1384 | } |
1208 | vcpu->arch.sie_block->fac = (int) (long) kvm->arch.model.fac->list; | ||
1209 | 1385 | ||
1210 | spin_lock_init(&vcpu->arch.local_int.lock); | 1386 | spin_lock_init(&vcpu->arch.local_int.lock); |
1211 | vcpu->arch.local_int.float_int = &kvm->arch.float_int; | 1387 | vcpu->arch.local_int.float_int = &kvm->arch.float_int; |
@@ -1725,6 +1901,31 @@ static int vcpu_pre_run(struct kvm_vcpu *vcpu) | |||
1725 | return 0; | 1901 | return 0; |
1726 | } | 1902 | } |
1727 | 1903 | ||
1904 | static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu) | ||
1905 | { | ||
1906 | psw_t *psw = &vcpu->arch.sie_block->gpsw; | ||
1907 | u8 opcode; | ||
1908 | int rc; | ||
1909 | |||
1910 | VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction"); | ||
1911 | trace_kvm_s390_sie_fault(vcpu); | ||
1912 | |||
1913 | /* | ||
1914 | * We want to inject an addressing exception, which is defined as a | ||
1915 | * suppressing or terminating exception. However, since we came here | ||
1916 | * by a DAT access exception, the PSW still points to the faulting | ||
1917 | * instruction since DAT exceptions are nullifying. So we've got | ||
1918 | * to look up the current opcode to get the length of the instruction | ||
1919 | * to be able to forward the PSW. | ||
1920 | */ | ||
1921 | rc = read_guest(vcpu, psw->addr, 0, &opcode, 1); | ||
1922 | if (rc) | ||
1923 | return kvm_s390_inject_prog_cond(vcpu, rc); | ||
1924 | psw->addr = __rewind_psw(*psw, -insn_length(opcode)); | ||
1925 | |||
1926 | return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); | ||
1927 | } | ||
1928 | |||
1728 | static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason) | 1929 | static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason) |
1729 | { | 1930 | { |
1730 | int rc = -1; | 1931 | int rc = -1; |
@@ -1756,11 +1957,8 @@ static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason) | |||
1756 | } | 1957 | } |
1757 | } | 1958 | } |
1758 | 1959 | ||
1759 | if (rc == -1) { | 1960 | if (rc == -1) |
1760 | VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction"); | 1961 | rc = vcpu_post_run_fault_in_sie(vcpu); |
1761 | trace_kvm_s390_sie_fault(vcpu); | ||
1762 | rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); | ||
1763 | } | ||
1764 | 1962 | ||
1765 | memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16); | 1963 | memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16); |
1766 | 1964 | ||
@@ -1976,6 +2174,35 @@ int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr) | |||
1976 | return kvm_s390_store_status_unloaded(vcpu, addr); | 2174 | return kvm_s390_store_status_unloaded(vcpu, addr); |
1977 | } | 2175 | } |
1978 | 2176 | ||
2177 | /* | ||
2178 | * store additional status at address | ||
2179 | */ | ||
2180 | int kvm_s390_store_adtl_status_unloaded(struct kvm_vcpu *vcpu, | ||
2181 | unsigned long gpa) | ||
2182 | { | ||
2183 | /* Only bits 0-53 are used for address formation */ | ||
2184 | if (!(gpa & ~0x3ff)) | ||
2185 | return 0; | ||
2186 | |||
2187 | return write_guest_abs(vcpu, gpa & ~0x3ff, | ||
2188 | (void *)&vcpu->run->s.regs.vrs, 512); | ||
2189 | } | ||
2190 | |||
2191 | int kvm_s390_vcpu_store_adtl_status(struct kvm_vcpu *vcpu, unsigned long addr) | ||
2192 | { | ||
2193 | if (!test_kvm_facility(vcpu->kvm, 129)) | ||
2194 | return 0; | ||
2195 | |||
2196 | /* | ||
2197 | * The guest VXRS are in the host VXRs due to the lazy | ||
2198 | * copying in vcpu load/put. Let's update our copies before we save | ||
2199 | * it into the save area. | ||
2200 | */ | ||
2201 | save_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs); | ||
2202 | |||
2203 | return kvm_s390_store_adtl_status_unloaded(vcpu, addr); | ||
2204 | } | ||
2205 | |||
1979 | static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu) | 2206 | static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu) |
1980 | { | 2207 | { |
1981 | kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu); | 2208 | kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu); |
@@ -2100,6 +2327,65 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, | |||
2100 | return r; | 2327 | return r; |
2101 | } | 2328 | } |
2102 | 2329 | ||
2330 | static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu, | ||
2331 | struct kvm_s390_mem_op *mop) | ||
2332 | { | ||
2333 | void __user *uaddr = (void __user *)mop->buf; | ||
2334 | void *tmpbuf = NULL; | ||
2335 | int r, srcu_idx; | ||
2336 | const u64 supported_flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION | ||
2337 | | KVM_S390_MEMOP_F_CHECK_ONLY; | ||
2338 | |||
2339 | if (mop->flags & ~supported_flags) | ||
2340 | return -EINVAL; | ||
2341 | |||
2342 | if (mop->size > MEM_OP_MAX_SIZE) | ||
2343 | return -E2BIG; | ||
2344 | |||
2345 | if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) { | ||
2346 | tmpbuf = vmalloc(mop->size); | ||
2347 | if (!tmpbuf) | ||
2348 | return -ENOMEM; | ||
2349 | } | ||
2350 | |||
2351 | srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); | ||
2352 | |||
2353 | switch (mop->op) { | ||
2354 | case KVM_S390_MEMOP_LOGICAL_READ: | ||
2355 | if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) { | ||
2356 | r = check_gva_range(vcpu, mop->gaddr, mop->ar, mop->size, false); | ||
2357 | break; | ||
2358 | } | ||
2359 | r = read_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size); | ||
2360 | if (r == 0) { | ||
2361 | if (copy_to_user(uaddr, tmpbuf, mop->size)) | ||
2362 | r = -EFAULT; | ||
2363 | } | ||
2364 | break; | ||
2365 | case KVM_S390_MEMOP_LOGICAL_WRITE: | ||
2366 | if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) { | ||
2367 | r = check_gva_range(vcpu, mop->gaddr, mop->ar, mop->size, true); | ||
2368 | break; | ||
2369 | } | ||
2370 | if (copy_from_user(tmpbuf, uaddr, mop->size)) { | ||
2371 | r = -EFAULT; | ||
2372 | break; | ||
2373 | } | ||
2374 | r = write_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size); | ||
2375 | break; | ||
2376 | default: | ||
2377 | r = -EINVAL; | ||
2378 | } | ||
2379 | |||
2380 | srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx); | ||
2381 | |||
2382 | if (r > 0 && (mop->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION) != 0) | ||
2383 | kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm); | ||
2384 | |||
2385 | vfree(tmpbuf); | ||
2386 | return r; | ||
2387 | } | ||
2388 | |||
2103 | long kvm_arch_vcpu_ioctl(struct file *filp, | 2389 | long kvm_arch_vcpu_ioctl(struct file *filp, |
2104 | unsigned int ioctl, unsigned long arg) | 2390 | unsigned int ioctl, unsigned long arg) |
2105 | { | 2391 | { |
@@ -2109,6 +2395,15 @@ long kvm_arch_vcpu_ioctl(struct file *filp, | |||
2109 | long r; | 2395 | long r; |
2110 | 2396 | ||
2111 | switch (ioctl) { | 2397 | switch (ioctl) { |
2398 | case KVM_S390_IRQ: { | ||
2399 | struct kvm_s390_irq s390irq; | ||
2400 | |||
2401 | r = -EFAULT; | ||
2402 | if (copy_from_user(&s390irq, argp, sizeof(s390irq))) | ||
2403 | break; | ||
2404 | r = kvm_s390_inject_vcpu(vcpu, &s390irq); | ||
2405 | break; | ||
2406 | } | ||
2112 | case KVM_S390_INTERRUPT: { | 2407 | case KVM_S390_INTERRUPT: { |
2113 | struct kvm_s390_interrupt s390int; | 2408 | struct kvm_s390_interrupt s390int; |
2114 | struct kvm_s390_irq s390irq; | 2409 | struct kvm_s390_irq s390irq; |
@@ -2199,6 +2494,47 @@ long kvm_arch_vcpu_ioctl(struct file *filp, | |||
2199 | r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap); | 2494 | r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap); |
2200 | break; | 2495 | break; |
2201 | } | 2496 | } |
2497 | case KVM_S390_MEM_OP: { | ||
2498 | struct kvm_s390_mem_op mem_op; | ||
2499 | |||
2500 | if (copy_from_user(&mem_op, argp, sizeof(mem_op)) == 0) | ||
2501 | r = kvm_s390_guest_mem_op(vcpu, &mem_op); | ||
2502 | else | ||
2503 | r = -EFAULT; | ||
2504 | break; | ||
2505 | } | ||
2506 | case KVM_S390_SET_IRQ_STATE: { | ||
2507 | struct kvm_s390_irq_state irq_state; | ||
2508 | |||
2509 | r = -EFAULT; | ||
2510 | if (copy_from_user(&irq_state, argp, sizeof(irq_state))) | ||
2511 | break; | ||
2512 | if (irq_state.len > VCPU_IRQS_MAX_BUF || | ||
2513 | irq_state.len == 0 || | ||
2514 | irq_state.len % sizeof(struct kvm_s390_irq) > 0) { | ||
2515 | r = -EINVAL; | ||
2516 | break; | ||
2517 | } | ||
2518 | r = kvm_s390_set_irq_state(vcpu, | ||
2519 | (void __user *) irq_state.buf, | ||
2520 | irq_state.len); | ||
2521 | break; | ||
2522 | } | ||
2523 | case KVM_S390_GET_IRQ_STATE: { | ||
2524 | struct kvm_s390_irq_state irq_state; | ||
2525 | |||
2526 | r = -EFAULT; | ||
2527 | if (copy_from_user(&irq_state, argp, sizeof(irq_state))) | ||
2528 | break; | ||
2529 | if (irq_state.len == 0) { | ||
2530 | r = -EINVAL; | ||
2531 | break; | ||
2532 | } | ||
2533 | r = kvm_s390_get_irq_state(vcpu, | ||
2534 | (__u8 __user *) irq_state.buf, | ||
2535 | irq_state.len); | ||
2536 | break; | ||
2537 | } | ||
2202 | default: | 2538 | default: |
2203 | r = -ENOTTY; | 2539 | r = -ENOTTY; |
2204 | } | 2540 | } |
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h index c34109aa552d..ca108b90ae56 100644 --- a/arch/s390/kvm/kvm-s390.h +++ b/arch/s390/kvm/kvm-s390.h | |||
@@ -70,16 +70,22 @@ static inline void kvm_s390_set_prefix(struct kvm_vcpu *vcpu, u32 prefix) | |||
70 | kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu); | 70 | kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu); |
71 | } | 71 | } |
72 | 72 | ||
73 | static inline u64 kvm_s390_get_base_disp_s(struct kvm_vcpu *vcpu) | 73 | typedef u8 __bitwise ar_t; |
74 | |||
75 | static inline u64 kvm_s390_get_base_disp_s(struct kvm_vcpu *vcpu, ar_t *ar) | ||
74 | { | 76 | { |
75 | u32 base2 = vcpu->arch.sie_block->ipb >> 28; | 77 | u32 base2 = vcpu->arch.sie_block->ipb >> 28; |
76 | u32 disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16); | 78 | u32 disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16); |
77 | 79 | ||
80 | if (ar) | ||
81 | *ar = base2; | ||
82 | |||
78 | return (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + disp2; | 83 | return (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + disp2; |
79 | } | 84 | } |
80 | 85 | ||
81 | static inline void kvm_s390_get_base_disp_sse(struct kvm_vcpu *vcpu, | 86 | static inline void kvm_s390_get_base_disp_sse(struct kvm_vcpu *vcpu, |
82 | u64 *address1, u64 *address2) | 87 | u64 *address1, u64 *address2, |
88 | ar_t *ar_b1, ar_t *ar_b2) | ||
83 | { | 89 | { |
84 | u32 base1 = (vcpu->arch.sie_block->ipb & 0xf0000000) >> 28; | 90 | u32 base1 = (vcpu->arch.sie_block->ipb & 0xf0000000) >> 28; |
85 | u32 disp1 = (vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16; | 91 | u32 disp1 = (vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16; |
@@ -88,6 +94,11 @@ static inline void kvm_s390_get_base_disp_sse(struct kvm_vcpu *vcpu, | |||
88 | 94 | ||
89 | *address1 = (base1 ? vcpu->run->s.regs.gprs[base1] : 0) + disp1; | 95 | *address1 = (base1 ? vcpu->run->s.regs.gprs[base1] : 0) + disp1; |
90 | *address2 = (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + disp2; | 96 | *address2 = (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + disp2; |
97 | |||
98 | if (ar_b1) | ||
99 | *ar_b1 = base1; | ||
100 | if (ar_b2) | ||
101 | *ar_b2 = base2; | ||
91 | } | 102 | } |
92 | 103 | ||
93 | static inline void kvm_s390_get_regs_rre(struct kvm_vcpu *vcpu, int *r1, int *r2) | 104 | static inline void kvm_s390_get_regs_rre(struct kvm_vcpu *vcpu, int *r1, int *r2) |
@@ -98,7 +109,7 @@ static inline void kvm_s390_get_regs_rre(struct kvm_vcpu *vcpu, int *r1, int *r2 | |||
98 | *r2 = (vcpu->arch.sie_block->ipb & 0x000f0000) >> 16; | 109 | *r2 = (vcpu->arch.sie_block->ipb & 0x000f0000) >> 16; |
99 | } | 110 | } |
100 | 111 | ||
101 | static inline u64 kvm_s390_get_base_disp_rsy(struct kvm_vcpu *vcpu) | 112 | static inline u64 kvm_s390_get_base_disp_rsy(struct kvm_vcpu *vcpu, ar_t *ar) |
102 | { | 113 | { |
103 | u32 base2 = vcpu->arch.sie_block->ipb >> 28; | 114 | u32 base2 = vcpu->arch.sie_block->ipb >> 28; |
104 | u32 disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16) + | 115 | u32 disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16) + |
@@ -107,14 +118,20 @@ static inline u64 kvm_s390_get_base_disp_rsy(struct kvm_vcpu *vcpu) | |||
107 | if (disp2 & 0x80000) | 118 | if (disp2 & 0x80000) |
108 | disp2+=0xfff00000; | 119 | disp2+=0xfff00000; |
109 | 120 | ||
121 | if (ar) | ||
122 | *ar = base2; | ||
123 | |||
110 | return (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + (long)(int)disp2; | 124 | return (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + (long)(int)disp2; |
111 | } | 125 | } |
112 | 126 | ||
113 | static inline u64 kvm_s390_get_base_disp_rs(struct kvm_vcpu *vcpu) | 127 | static inline u64 kvm_s390_get_base_disp_rs(struct kvm_vcpu *vcpu, ar_t *ar) |
114 | { | 128 | { |
115 | u32 base2 = vcpu->arch.sie_block->ipb >> 28; | 129 | u32 base2 = vcpu->arch.sie_block->ipb >> 28; |
116 | u32 disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16); | 130 | u32 disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16); |
117 | 131 | ||
132 | if (ar) | ||
133 | *ar = base2; | ||
134 | |||
118 | return (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + disp2; | 135 | return (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + disp2; |
119 | } | 136 | } |
120 | 137 | ||
@@ -125,13 +142,24 @@ static inline void kvm_s390_set_psw_cc(struct kvm_vcpu *vcpu, unsigned long cc) | |||
125 | vcpu->arch.sie_block->gpsw.mask |= cc << 44; | 142 | vcpu->arch.sie_block->gpsw.mask |= cc << 44; |
126 | } | 143 | } |
127 | 144 | ||
128 | /* test availability of facility in a kvm intance */ | 145 | /* test availability of facility in a kvm instance */ |
129 | static inline int test_kvm_facility(struct kvm *kvm, unsigned long nr) | 146 | static inline int test_kvm_facility(struct kvm *kvm, unsigned long nr) |
130 | { | 147 | { |
131 | return __test_facility(nr, kvm->arch.model.fac->mask) && | 148 | return __test_facility(nr, kvm->arch.model.fac->mask) && |
132 | __test_facility(nr, kvm->arch.model.fac->list); | 149 | __test_facility(nr, kvm->arch.model.fac->list); |
133 | } | 150 | } |
134 | 151 | ||
152 | static inline int set_kvm_facility(u64 *fac_list, unsigned long nr) | ||
153 | { | ||
154 | unsigned char *ptr; | ||
155 | |||
156 | if (nr >= MAX_FACILITY_BIT) | ||
157 | return -EINVAL; | ||
158 | ptr = (unsigned char *) fac_list + (nr >> 3); | ||
159 | *ptr |= (0x80UL >> (nr & 7)); | ||
160 | return 0; | ||
161 | } | ||
162 | |||
135 | /* are cpu states controlled by user space */ | 163 | /* are cpu states controlled by user space */ |
136 | static inline int kvm_s390_user_cpu_state_ctrl(struct kvm *kvm) | 164 | static inline int kvm_s390_user_cpu_state_ctrl(struct kvm *kvm) |
137 | { | 165 | { |
@@ -150,9 +178,9 @@ int __must_check kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, | |||
150 | struct kvm_s390_irq *irq); | 178 | struct kvm_s390_irq *irq); |
151 | int __must_check kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code); | 179 | int __must_check kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code); |
152 | struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm, | 180 | struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm, |
153 | u64 cr6, u64 schid); | 181 | u64 isc_mask, u32 schid); |
154 | void kvm_s390_reinject_io_int(struct kvm *kvm, | 182 | int kvm_s390_reinject_io_int(struct kvm *kvm, |
155 | struct kvm_s390_interrupt_info *inti); | 183 | struct kvm_s390_interrupt_info *inti); |
156 | int kvm_s390_mask_adapter(struct kvm *kvm, unsigned int id, bool masked); | 184 | int kvm_s390_mask_adapter(struct kvm *kvm, unsigned int id, bool masked); |
157 | 185 | ||
158 | /* implemented in intercept.c */ | 186 | /* implemented in intercept.c */ |
@@ -177,7 +205,10 @@ int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu); | |||
177 | /* implemented in kvm-s390.c */ | 205 | /* implemented in kvm-s390.c */ |
178 | long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable); | 206 | long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable); |
179 | int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long addr); | 207 | int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long addr); |
208 | int kvm_s390_store_adtl_status_unloaded(struct kvm_vcpu *vcpu, | ||
209 | unsigned long addr); | ||
180 | int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr); | 210 | int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr); |
211 | int kvm_s390_vcpu_store_adtl_status(struct kvm_vcpu *vcpu, unsigned long addr); | ||
181 | void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu); | 212 | void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu); |
182 | void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu); | 213 | void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu); |
183 | void s390_vcpu_block(struct kvm_vcpu *vcpu); | 214 | void s390_vcpu_block(struct kvm_vcpu *vcpu); |
@@ -241,6 +272,10 @@ int kvm_s390_ext_call_pending(struct kvm_vcpu *vcpu); | |||
241 | extern struct kvm_device_ops kvm_flic_ops; | 272 | extern struct kvm_device_ops kvm_flic_ops; |
242 | int kvm_s390_is_stop_irq_pending(struct kvm_vcpu *vcpu); | 273 | int kvm_s390_is_stop_irq_pending(struct kvm_vcpu *vcpu); |
243 | void kvm_s390_clear_stop_irq(struct kvm_vcpu *vcpu); | 274 | void kvm_s390_clear_stop_irq(struct kvm_vcpu *vcpu); |
275 | int kvm_s390_set_irq_state(struct kvm_vcpu *vcpu, | ||
276 | void __user *buf, int len); | ||
277 | int kvm_s390_get_irq_state(struct kvm_vcpu *vcpu, | ||
278 | __u8 __user *buf, int len); | ||
244 | 279 | ||
245 | /* implemented in guestdbg.c */ | 280 | /* implemented in guestdbg.c */ |
246 | void kvm_s390_backup_guest_per_regs(struct kvm_vcpu *vcpu); | 281 | void kvm_s390_backup_guest_per_regs(struct kvm_vcpu *vcpu); |
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c index 351116939ea2..d22d8ee1ff9d 100644 --- a/arch/s390/kvm/priv.c +++ b/arch/s390/kvm/priv.c | |||
@@ -36,15 +36,16 @@ static int handle_set_clock(struct kvm_vcpu *vcpu) | |||
36 | struct kvm_vcpu *cpup; | 36 | struct kvm_vcpu *cpup; |
37 | s64 hostclk, val; | 37 | s64 hostclk, val; |
38 | int i, rc; | 38 | int i, rc; |
39 | ar_t ar; | ||
39 | u64 op2; | 40 | u64 op2; |
40 | 41 | ||
41 | if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) | 42 | if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) |
42 | return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); | 43 | return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); |
43 | 44 | ||
44 | op2 = kvm_s390_get_base_disp_s(vcpu); | 45 | op2 = kvm_s390_get_base_disp_s(vcpu, &ar); |
45 | if (op2 & 7) /* Operand must be on a doubleword boundary */ | 46 | if (op2 & 7) /* Operand must be on a doubleword boundary */ |
46 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); | 47 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); |
47 | rc = read_guest(vcpu, op2, &val, sizeof(val)); | 48 | rc = read_guest(vcpu, op2, ar, &val, sizeof(val)); |
48 | if (rc) | 49 | if (rc) |
49 | return kvm_s390_inject_prog_cond(vcpu, rc); | 50 | return kvm_s390_inject_prog_cond(vcpu, rc); |
50 | 51 | ||
@@ -68,20 +69,21 @@ static int handle_set_prefix(struct kvm_vcpu *vcpu) | |||
68 | u64 operand2; | 69 | u64 operand2; |
69 | u32 address; | 70 | u32 address; |
70 | int rc; | 71 | int rc; |
72 | ar_t ar; | ||
71 | 73 | ||
72 | vcpu->stat.instruction_spx++; | 74 | vcpu->stat.instruction_spx++; |
73 | 75 | ||
74 | if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) | 76 | if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) |
75 | return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); | 77 | return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); |
76 | 78 | ||
77 | operand2 = kvm_s390_get_base_disp_s(vcpu); | 79 | operand2 = kvm_s390_get_base_disp_s(vcpu, &ar); |
78 | 80 | ||
79 | /* must be word boundary */ | 81 | /* must be word boundary */ |
80 | if (operand2 & 3) | 82 | if (operand2 & 3) |
81 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); | 83 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); |
82 | 84 | ||
83 | /* get the value */ | 85 | /* get the value */ |
84 | rc = read_guest(vcpu, operand2, &address, sizeof(address)); | 86 | rc = read_guest(vcpu, operand2, ar, &address, sizeof(address)); |
85 | if (rc) | 87 | if (rc) |
86 | return kvm_s390_inject_prog_cond(vcpu, rc); | 88 | return kvm_s390_inject_prog_cond(vcpu, rc); |
87 | 89 | ||
@@ -107,13 +109,14 @@ static int handle_store_prefix(struct kvm_vcpu *vcpu) | |||
107 | u64 operand2; | 109 | u64 operand2; |
108 | u32 address; | 110 | u32 address; |
109 | int rc; | 111 | int rc; |
112 | ar_t ar; | ||
110 | 113 | ||
111 | vcpu->stat.instruction_stpx++; | 114 | vcpu->stat.instruction_stpx++; |
112 | 115 | ||
113 | if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) | 116 | if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) |
114 | return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); | 117 | return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); |
115 | 118 | ||
116 | operand2 = kvm_s390_get_base_disp_s(vcpu); | 119 | operand2 = kvm_s390_get_base_disp_s(vcpu, &ar); |
117 | 120 | ||
118 | /* must be word boundary */ | 121 | /* must be word boundary */ |
119 | if (operand2 & 3) | 122 | if (operand2 & 3) |
@@ -122,7 +125,7 @@ static int handle_store_prefix(struct kvm_vcpu *vcpu) | |||
122 | address = kvm_s390_get_prefix(vcpu); | 125 | address = kvm_s390_get_prefix(vcpu); |
123 | 126 | ||
124 | /* get the value */ | 127 | /* get the value */ |
125 | rc = write_guest(vcpu, operand2, &address, sizeof(address)); | 128 | rc = write_guest(vcpu, operand2, ar, &address, sizeof(address)); |
126 | if (rc) | 129 | if (rc) |
127 | return kvm_s390_inject_prog_cond(vcpu, rc); | 130 | return kvm_s390_inject_prog_cond(vcpu, rc); |
128 | 131 | ||
@@ -136,18 +139,19 @@ static int handle_store_cpu_address(struct kvm_vcpu *vcpu) | |||
136 | u16 vcpu_id = vcpu->vcpu_id; | 139 | u16 vcpu_id = vcpu->vcpu_id; |
137 | u64 ga; | 140 | u64 ga; |
138 | int rc; | 141 | int rc; |
142 | ar_t ar; | ||
139 | 143 | ||
140 | vcpu->stat.instruction_stap++; | 144 | vcpu->stat.instruction_stap++; |
141 | 145 | ||
142 | if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) | 146 | if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) |
143 | return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); | 147 | return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); |
144 | 148 | ||
145 | ga = kvm_s390_get_base_disp_s(vcpu); | 149 | ga = kvm_s390_get_base_disp_s(vcpu, &ar); |
146 | 150 | ||
147 | if (ga & 1) | 151 | if (ga & 1) |
148 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); | 152 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); |
149 | 153 | ||
150 | rc = write_guest(vcpu, ga, &vcpu_id, sizeof(vcpu_id)); | 154 | rc = write_guest(vcpu, ga, ar, &vcpu_id, sizeof(vcpu_id)); |
151 | if (rc) | 155 | if (rc) |
152 | return kvm_s390_inject_prog_cond(vcpu, rc); | 156 | return kvm_s390_inject_prog_cond(vcpu, rc); |
153 | 157 | ||
@@ -207,7 +211,7 @@ static int handle_test_block(struct kvm_vcpu *vcpu) | |||
207 | kvm_s390_get_regs_rre(vcpu, NULL, ®2); | 211 | kvm_s390_get_regs_rre(vcpu, NULL, ®2); |
208 | addr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK; | 212 | addr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK; |
209 | addr = kvm_s390_logical_to_effective(vcpu, addr); | 213 | addr = kvm_s390_logical_to_effective(vcpu, addr); |
210 | if (kvm_s390_check_low_addr_protection(vcpu, addr)) | 214 | if (kvm_s390_check_low_addr_prot_real(vcpu, addr)) |
211 | return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm); | 215 | return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm); |
212 | addr = kvm_s390_real_to_abs(vcpu, addr); | 216 | addr = kvm_s390_real_to_abs(vcpu, addr); |
213 | 217 | ||
@@ -229,18 +233,20 @@ static int handle_tpi(struct kvm_vcpu *vcpu) | |||
229 | struct kvm_s390_interrupt_info *inti; | 233 | struct kvm_s390_interrupt_info *inti; |
230 | unsigned long len; | 234 | unsigned long len; |
231 | u32 tpi_data[3]; | 235 | u32 tpi_data[3]; |
232 | int cc, rc; | 236 | int rc; |
233 | u64 addr; | 237 | u64 addr; |
238 | ar_t ar; | ||
234 | 239 | ||
235 | rc = 0; | 240 | addr = kvm_s390_get_base_disp_s(vcpu, &ar); |
236 | addr = kvm_s390_get_base_disp_s(vcpu); | ||
237 | if (addr & 3) | 241 | if (addr & 3) |
238 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); | 242 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); |
239 | cc = 0; | 243 | |
240 | inti = kvm_s390_get_io_int(vcpu->kvm, vcpu->arch.sie_block->gcr[6], 0); | 244 | inti = kvm_s390_get_io_int(vcpu->kvm, vcpu->arch.sie_block->gcr[6], 0); |
241 | if (!inti) | 245 | if (!inti) { |
242 | goto no_interrupt; | 246 | kvm_s390_set_psw_cc(vcpu, 0); |
243 | cc = 1; | 247 | return 0; |
248 | } | ||
249 | |||
244 | tpi_data[0] = inti->io.subchannel_id << 16 | inti->io.subchannel_nr; | 250 | tpi_data[0] = inti->io.subchannel_id << 16 | inti->io.subchannel_nr; |
245 | tpi_data[1] = inti->io.io_int_parm; | 251 | tpi_data[1] = inti->io.io_int_parm; |
246 | tpi_data[2] = inti->io.io_int_word; | 252 | tpi_data[2] = inti->io.io_int_word; |
@@ -250,40 +256,51 @@ static int handle_tpi(struct kvm_vcpu *vcpu) | |||
250 | * provided area. | 256 | * provided area. |
251 | */ | 257 | */ |
252 | len = sizeof(tpi_data) - 4; | 258 | len = sizeof(tpi_data) - 4; |
253 | rc = write_guest(vcpu, addr, &tpi_data, len); | 259 | rc = write_guest(vcpu, addr, ar, &tpi_data, len); |
254 | if (rc) | 260 | if (rc) { |
255 | return kvm_s390_inject_prog_cond(vcpu, rc); | 261 | rc = kvm_s390_inject_prog_cond(vcpu, rc); |
262 | goto reinject_interrupt; | ||
263 | } | ||
256 | } else { | 264 | } else { |
257 | /* | 265 | /* |
258 | * Store the three-word I/O interruption code into | 266 | * Store the three-word I/O interruption code into |
259 | * the appropriate lowcore area. | 267 | * the appropriate lowcore area. |
260 | */ | 268 | */ |
261 | len = sizeof(tpi_data); | 269 | len = sizeof(tpi_data); |
262 | if (write_guest_lc(vcpu, __LC_SUBCHANNEL_ID, &tpi_data, len)) | 270 | if (write_guest_lc(vcpu, __LC_SUBCHANNEL_ID, &tpi_data, len)) { |
271 | /* failed writes to the low core are not recoverable */ | ||
263 | rc = -EFAULT; | 272 | rc = -EFAULT; |
273 | goto reinject_interrupt; | ||
274 | } | ||
264 | } | 275 | } |
276 | |||
277 | /* irq was successfully handed to the guest */ | ||
278 | kfree(inti); | ||
279 | kvm_s390_set_psw_cc(vcpu, 1); | ||
280 | return 0; | ||
281 | reinject_interrupt: | ||
265 | /* | 282 | /* |
266 | * If we encounter a problem storing the interruption code, the | 283 | * If we encounter a problem storing the interruption code, the |
267 | * instruction is suppressed from the guest's view: reinject the | 284 | * instruction is suppressed from the guest's view: reinject the |
268 | * interrupt. | 285 | * interrupt. |
269 | */ | 286 | */ |
270 | if (!rc) | 287 | if (kvm_s390_reinject_io_int(vcpu->kvm, inti)) { |
271 | kfree(inti); | 288 | kfree(inti); |
272 | else | 289 | rc = -EFAULT; |
273 | kvm_s390_reinject_io_int(vcpu->kvm, inti); | 290 | } |
274 | no_interrupt: | 291 | /* don't set the cc, a pgm irq was injected or we drop to user space */ |
275 | /* Set condition code and we're done. */ | ||
276 | if (!rc) | ||
277 | kvm_s390_set_psw_cc(vcpu, cc); | ||
278 | return rc ? -EFAULT : 0; | 292 | return rc ? -EFAULT : 0; |
279 | } | 293 | } |
280 | 294 | ||
281 | static int handle_tsch(struct kvm_vcpu *vcpu) | 295 | static int handle_tsch(struct kvm_vcpu *vcpu) |
282 | { | 296 | { |
283 | struct kvm_s390_interrupt_info *inti; | 297 | struct kvm_s390_interrupt_info *inti = NULL; |
298 | const u64 isc_mask = 0xffUL << 24; /* all iscs set */ | ||
284 | 299 | ||
285 | inti = kvm_s390_get_io_int(vcpu->kvm, 0, | 300 | /* a valid schid has at least one bit set */ |
286 | vcpu->run->s.regs.gprs[1]); | 301 | if (vcpu->run->s.regs.gprs[1]) |
302 | inti = kvm_s390_get_io_int(vcpu->kvm, isc_mask, | ||
303 | vcpu->run->s.regs.gprs[1]); | ||
287 | 304 | ||
288 | /* | 305 | /* |
289 | * Prepare exit to userspace. | 306 | * Prepare exit to userspace. |
@@ -386,15 +403,16 @@ int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu) | |||
386 | psw_compat_t new_psw; | 403 | psw_compat_t new_psw; |
387 | u64 addr; | 404 | u64 addr; |
388 | int rc; | 405 | int rc; |
406 | ar_t ar; | ||
389 | 407 | ||
390 | if (gpsw->mask & PSW_MASK_PSTATE) | 408 | if (gpsw->mask & PSW_MASK_PSTATE) |
391 | return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); | 409 | return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); |
392 | 410 | ||
393 | addr = kvm_s390_get_base_disp_s(vcpu); | 411 | addr = kvm_s390_get_base_disp_s(vcpu, &ar); |
394 | if (addr & 7) | 412 | if (addr & 7) |
395 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); | 413 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); |
396 | 414 | ||
397 | rc = read_guest(vcpu, addr, &new_psw, sizeof(new_psw)); | 415 | rc = read_guest(vcpu, addr, ar, &new_psw, sizeof(new_psw)); |
398 | if (rc) | 416 | if (rc) |
399 | return kvm_s390_inject_prog_cond(vcpu, rc); | 417 | return kvm_s390_inject_prog_cond(vcpu, rc); |
400 | if (!(new_psw.mask & PSW32_MASK_BASE)) | 418 | if (!(new_psw.mask & PSW32_MASK_BASE)) |
@@ -412,14 +430,15 @@ static int handle_lpswe(struct kvm_vcpu *vcpu) | |||
412 | psw_t new_psw; | 430 | psw_t new_psw; |
413 | u64 addr; | 431 | u64 addr; |
414 | int rc; | 432 | int rc; |
433 | ar_t ar; | ||
415 | 434 | ||
416 | if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) | 435 | if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) |
417 | return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); | 436 | return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); |
418 | 437 | ||
419 | addr = kvm_s390_get_base_disp_s(vcpu); | 438 | addr = kvm_s390_get_base_disp_s(vcpu, &ar); |
420 | if (addr & 7) | 439 | if (addr & 7) |
421 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); | 440 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); |
422 | rc = read_guest(vcpu, addr, &new_psw, sizeof(new_psw)); | 441 | rc = read_guest(vcpu, addr, ar, &new_psw, sizeof(new_psw)); |
423 | if (rc) | 442 | if (rc) |
424 | return kvm_s390_inject_prog_cond(vcpu, rc); | 443 | return kvm_s390_inject_prog_cond(vcpu, rc); |
425 | vcpu->arch.sie_block->gpsw = new_psw; | 444 | vcpu->arch.sie_block->gpsw = new_psw; |
@@ -433,18 +452,19 @@ static int handle_stidp(struct kvm_vcpu *vcpu) | |||
433 | u64 stidp_data = vcpu->arch.stidp_data; | 452 | u64 stidp_data = vcpu->arch.stidp_data; |
434 | u64 operand2; | 453 | u64 operand2; |
435 | int rc; | 454 | int rc; |
455 | ar_t ar; | ||
436 | 456 | ||
437 | vcpu->stat.instruction_stidp++; | 457 | vcpu->stat.instruction_stidp++; |
438 | 458 | ||
439 | if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) | 459 | if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) |
440 | return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); | 460 | return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); |
441 | 461 | ||
442 | operand2 = kvm_s390_get_base_disp_s(vcpu); | 462 | operand2 = kvm_s390_get_base_disp_s(vcpu, &ar); |
443 | 463 | ||
444 | if (operand2 & 7) | 464 | if (operand2 & 7) |
445 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); | 465 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); |
446 | 466 | ||
447 | rc = write_guest(vcpu, operand2, &stidp_data, sizeof(stidp_data)); | 467 | rc = write_guest(vcpu, operand2, ar, &stidp_data, sizeof(stidp_data)); |
448 | if (rc) | 468 | if (rc) |
449 | return kvm_s390_inject_prog_cond(vcpu, rc); | 469 | return kvm_s390_inject_prog_cond(vcpu, rc); |
450 | 470 | ||
@@ -467,6 +487,7 @@ static void handle_stsi_3_2_2(struct kvm_vcpu *vcpu, struct sysinfo_3_2_2 *mem) | |||
467 | for (n = mem->count - 1; n > 0 ; n--) | 487 | for (n = mem->count - 1; n > 0 ; n--) |
468 | memcpy(&mem->vm[n], &mem->vm[n - 1], sizeof(mem->vm[0])); | 488 | memcpy(&mem->vm[n], &mem->vm[n - 1], sizeof(mem->vm[0])); |
469 | 489 | ||
490 | memset(&mem->vm[0], 0, sizeof(mem->vm[0])); | ||
470 | mem->vm[0].cpus_total = cpus; | 491 | mem->vm[0].cpus_total = cpus; |
471 | mem->vm[0].cpus_configured = cpus; | 492 | mem->vm[0].cpus_configured = cpus; |
472 | mem->vm[0].cpus_standby = 0; | 493 | mem->vm[0].cpus_standby = 0; |
@@ -478,6 +499,17 @@ static void handle_stsi_3_2_2(struct kvm_vcpu *vcpu, struct sysinfo_3_2_2 *mem) | |||
478 | ASCEBC(mem->vm[0].cpi, 16); | 499 | ASCEBC(mem->vm[0].cpi, 16); |
479 | } | 500 | } |
480 | 501 | ||
502 | static void insert_stsi_usr_data(struct kvm_vcpu *vcpu, u64 addr, ar_t ar, | ||
503 | u8 fc, u8 sel1, u16 sel2) | ||
504 | { | ||
505 | vcpu->run->exit_reason = KVM_EXIT_S390_STSI; | ||
506 | vcpu->run->s390_stsi.addr = addr; | ||
507 | vcpu->run->s390_stsi.ar = ar; | ||
508 | vcpu->run->s390_stsi.fc = fc; | ||
509 | vcpu->run->s390_stsi.sel1 = sel1; | ||
510 | vcpu->run->s390_stsi.sel2 = sel2; | ||
511 | } | ||
512 | |||
481 | static int handle_stsi(struct kvm_vcpu *vcpu) | 513 | static int handle_stsi(struct kvm_vcpu *vcpu) |
482 | { | 514 | { |
483 | int fc = (vcpu->run->s.regs.gprs[0] & 0xf0000000) >> 28; | 515 | int fc = (vcpu->run->s.regs.gprs[0] & 0xf0000000) >> 28; |
@@ -486,6 +518,7 @@ static int handle_stsi(struct kvm_vcpu *vcpu) | |||
486 | unsigned long mem = 0; | 518 | unsigned long mem = 0; |
487 | u64 operand2; | 519 | u64 operand2; |
488 | int rc = 0; | 520 | int rc = 0; |
521 | ar_t ar; | ||
489 | 522 | ||
490 | vcpu->stat.instruction_stsi++; | 523 | vcpu->stat.instruction_stsi++; |
491 | VCPU_EVENT(vcpu, 4, "stsi: fc: %x sel1: %x sel2: %x", fc, sel1, sel2); | 524 | VCPU_EVENT(vcpu, 4, "stsi: fc: %x sel1: %x sel2: %x", fc, sel1, sel2); |
@@ -508,7 +541,7 @@ static int handle_stsi(struct kvm_vcpu *vcpu) | |||
508 | return 0; | 541 | return 0; |
509 | } | 542 | } |
510 | 543 | ||
511 | operand2 = kvm_s390_get_base_disp_s(vcpu); | 544 | operand2 = kvm_s390_get_base_disp_s(vcpu, &ar); |
512 | 545 | ||
513 | if (operand2 & 0xfff) | 546 | if (operand2 & 0xfff) |
514 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); | 547 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); |
@@ -532,16 +565,20 @@ static int handle_stsi(struct kvm_vcpu *vcpu) | |||
532 | break; | 565 | break; |
533 | } | 566 | } |
534 | 567 | ||
535 | rc = write_guest(vcpu, operand2, (void *)mem, PAGE_SIZE); | 568 | rc = write_guest(vcpu, operand2, ar, (void *)mem, PAGE_SIZE); |
536 | if (rc) { | 569 | if (rc) { |
537 | rc = kvm_s390_inject_prog_cond(vcpu, rc); | 570 | rc = kvm_s390_inject_prog_cond(vcpu, rc); |
538 | goto out; | 571 | goto out; |
539 | } | 572 | } |
573 | if (vcpu->kvm->arch.user_stsi) { | ||
574 | insert_stsi_usr_data(vcpu, operand2, ar, fc, sel1, sel2); | ||
575 | rc = -EREMOTE; | ||
576 | } | ||
540 | trace_kvm_s390_handle_stsi(vcpu, fc, sel1, sel2, operand2); | 577 | trace_kvm_s390_handle_stsi(vcpu, fc, sel1, sel2, operand2); |
541 | free_page(mem); | 578 | free_page(mem); |
542 | kvm_s390_set_psw_cc(vcpu, 0); | 579 | kvm_s390_set_psw_cc(vcpu, 0); |
543 | vcpu->run->s.regs.gprs[0] = 0; | 580 | vcpu->run->s.regs.gprs[0] = 0; |
544 | return 0; | 581 | return rc; |
545 | out_no_data: | 582 | out_no_data: |
546 | kvm_s390_set_psw_cc(vcpu, 3); | 583 | kvm_s390_set_psw_cc(vcpu, 3); |
547 | out: | 584 | out: |
@@ -670,7 +707,7 @@ static int handle_pfmf(struct kvm_vcpu *vcpu) | |||
670 | } | 707 | } |
671 | 708 | ||
672 | if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) { | 709 | if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) { |
673 | if (kvm_s390_check_low_addr_protection(vcpu, start)) | 710 | if (kvm_s390_check_low_addr_prot_real(vcpu, start)) |
674 | return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm); | 711 | return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm); |
675 | } | 712 | } |
676 | 713 | ||
@@ -776,13 +813,14 @@ int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu) | |||
776 | int reg, rc, nr_regs; | 813 | int reg, rc, nr_regs; |
777 | u32 ctl_array[16]; | 814 | u32 ctl_array[16]; |
778 | u64 ga; | 815 | u64 ga; |
816 | ar_t ar; | ||
779 | 817 | ||
780 | vcpu->stat.instruction_lctl++; | 818 | vcpu->stat.instruction_lctl++; |
781 | 819 | ||
782 | if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) | 820 | if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) |
783 | return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); | 821 | return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); |
784 | 822 | ||
785 | ga = kvm_s390_get_base_disp_rs(vcpu); | 823 | ga = kvm_s390_get_base_disp_rs(vcpu, &ar); |
786 | 824 | ||
787 | if (ga & 3) | 825 | if (ga & 3) |
788 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); | 826 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); |
@@ -791,7 +829,7 @@ int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu) | |||
791 | trace_kvm_s390_handle_lctl(vcpu, 0, reg1, reg3, ga); | 829 | trace_kvm_s390_handle_lctl(vcpu, 0, reg1, reg3, ga); |
792 | 830 | ||
793 | nr_regs = ((reg3 - reg1) & 0xf) + 1; | 831 | nr_regs = ((reg3 - reg1) & 0xf) + 1; |
794 | rc = read_guest(vcpu, ga, ctl_array, nr_regs * sizeof(u32)); | 832 | rc = read_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u32)); |
795 | if (rc) | 833 | if (rc) |
796 | return kvm_s390_inject_prog_cond(vcpu, rc); | 834 | return kvm_s390_inject_prog_cond(vcpu, rc); |
797 | reg = reg1; | 835 | reg = reg1; |
@@ -814,13 +852,14 @@ int kvm_s390_handle_stctl(struct kvm_vcpu *vcpu) | |||
814 | int reg, rc, nr_regs; | 852 | int reg, rc, nr_regs; |
815 | u32 ctl_array[16]; | 853 | u32 ctl_array[16]; |
816 | u64 ga; | 854 | u64 ga; |
855 | ar_t ar; | ||
817 | 856 | ||
818 | vcpu->stat.instruction_stctl++; | 857 | vcpu->stat.instruction_stctl++; |
819 | 858 | ||
820 | if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) | 859 | if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) |
821 | return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); | 860 | return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); |
822 | 861 | ||
823 | ga = kvm_s390_get_base_disp_rs(vcpu); | 862 | ga = kvm_s390_get_base_disp_rs(vcpu, &ar); |
824 | 863 | ||
825 | if (ga & 3) | 864 | if (ga & 3) |
826 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); | 865 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); |
@@ -836,7 +875,7 @@ int kvm_s390_handle_stctl(struct kvm_vcpu *vcpu) | |||
836 | break; | 875 | break; |
837 | reg = (reg + 1) % 16; | 876 | reg = (reg + 1) % 16; |
838 | } while (1); | 877 | } while (1); |
839 | rc = write_guest(vcpu, ga, ctl_array, nr_regs * sizeof(u32)); | 878 | rc = write_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u32)); |
840 | return rc ? kvm_s390_inject_prog_cond(vcpu, rc) : 0; | 879 | return rc ? kvm_s390_inject_prog_cond(vcpu, rc) : 0; |
841 | } | 880 | } |
842 | 881 | ||
@@ -847,13 +886,14 @@ static int handle_lctlg(struct kvm_vcpu *vcpu) | |||
847 | int reg, rc, nr_regs; | 886 | int reg, rc, nr_regs; |
848 | u64 ctl_array[16]; | 887 | u64 ctl_array[16]; |
849 | u64 ga; | 888 | u64 ga; |
889 | ar_t ar; | ||
850 | 890 | ||
851 | vcpu->stat.instruction_lctlg++; | 891 | vcpu->stat.instruction_lctlg++; |
852 | 892 | ||
853 | if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) | 893 | if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) |
854 | return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); | 894 | return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); |
855 | 895 | ||
856 | ga = kvm_s390_get_base_disp_rsy(vcpu); | 896 | ga = kvm_s390_get_base_disp_rsy(vcpu, &ar); |
857 | 897 | ||
858 | if (ga & 7) | 898 | if (ga & 7) |
859 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); | 899 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); |
@@ -862,7 +902,7 @@ static int handle_lctlg(struct kvm_vcpu *vcpu) | |||
862 | trace_kvm_s390_handle_lctl(vcpu, 1, reg1, reg3, ga); | 902 | trace_kvm_s390_handle_lctl(vcpu, 1, reg1, reg3, ga); |
863 | 903 | ||
864 | nr_regs = ((reg3 - reg1) & 0xf) + 1; | 904 | nr_regs = ((reg3 - reg1) & 0xf) + 1; |
865 | rc = read_guest(vcpu, ga, ctl_array, nr_regs * sizeof(u64)); | 905 | rc = read_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u64)); |
866 | if (rc) | 906 | if (rc) |
867 | return kvm_s390_inject_prog_cond(vcpu, rc); | 907 | return kvm_s390_inject_prog_cond(vcpu, rc); |
868 | reg = reg1; | 908 | reg = reg1; |
@@ -884,13 +924,14 @@ static int handle_stctg(struct kvm_vcpu *vcpu) | |||
884 | int reg, rc, nr_regs; | 924 | int reg, rc, nr_regs; |
885 | u64 ctl_array[16]; | 925 | u64 ctl_array[16]; |
886 | u64 ga; | 926 | u64 ga; |
927 | ar_t ar; | ||
887 | 928 | ||
888 | vcpu->stat.instruction_stctg++; | 929 | vcpu->stat.instruction_stctg++; |
889 | 930 | ||
890 | if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) | 931 | if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) |
891 | return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); | 932 | return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); |
892 | 933 | ||
893 | ga = kvm_s390_get_base_disp_rsy(vcpu); | 934 | ga = kvm_s390_get_base_disp_rsy(vcpu, &ar); |
894 | 935 | ||
895 | if (ga & 7) | 936 | if (ga & 7) |
896 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); | 937 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); |
@@ -906,7 +947,7 @@ static int handle_stctg(struct kvm_vcpu *vcpu) | |||
906 | break; | 947 | break; |
907 | reg = (reg + 1) % 16; | 948 | reg = (reg + 1) % 16; |
908 | } while (1); | 949 | } while (1); |
909 | rc = write_guest(vcpu, ga, ctl_array, nr_regs * sizeof(u64)); | 950 | rc = write_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u64)); |
910 | return rc ? kvm_s390_inject_prog_cond(vcpu, rc) : 0; | 951 | return rc ? kvm_s390_inject_prog_cond(vcpu, rc) : 0; |
911 | } | 952 | } |
912 | 953 | ||
@@ -931,13 +972,14 @@ static int handle_tprot(struct kvm_vcpu *vcpu) | |||
931 | unsigned long hva, gpa; | 972 | unsigned long hva, gpa; |
932 | int ret = 0, cc = 0; | 973 | int ret = 0, cc = 0; |
933 | bool writable; | 974 | bool writable; |
975 | ar_t ar; | ||
934 | 976 | ||
935 | vcpu->stat.instruction_tprot++; | 977 | vcpu->stat.instruction_tprot++; |
936 | 978 | ||
937 | if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) | 979 | if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) |
938 | return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); | 980 | return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); |
939 | 981 | ||
940 | kvm_s390_get_base_disp_sse(vcpu, &address1, &address2); | 982 | kvm_s390_get_base_disp_sse(vcpu, &address1, &address2, &ar, NULL); |
941 | 983 | ||
942 | /* we only handle the Linux memory detection case: | 984 | /* we only handle the Linux memory detection case: |
943 | * access key == 0 | 985 | * access key == 0 |
@@ -946,11 +988,11 @@ static int handle_tprot(struct kvm_vcpu *vcpu) | |||
946 | return -EOPNOTSUPP; | 988 | return -EOPNOTSUPP; |
947 | if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT) | 989 | if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT) |
948 | ipte_lock(vcpu); | 990 | ipte_lock(vcpu); |
949 | ret = guest_translate_address(vcpu, address1, &gpa, 1); | 991 | ret = guest_translate_address(vcpu, address1, ar, &gpa, 1); |
950 | if (ret == PGM_PROTECTION) { | 992 | if (ret == PGM_PROTECTION) { |
951 | /* Write protected? Try again with read-only... */ | 993 | /* Write protected? Try again with read-only... */ |
952 | cc = 1; | 994 | cc = 1; |
953 | ret = guest_translate_address(vcpu, address1, &gpa, 0); | 995 | ret = guest_translate_address(vcpu, address1, ar, &gpa, 0); |
954 | } | 996 | } |
955 | if (ret) { | 997 | if (ret) { |
956 | if (ret == PGM_ADDRESSING || ret == PGM_TRANSLATION_SPEC) { | 998 | if (ret == PGM_ADDRESSING || ret == PGM_TRANSLATION_SPEC) { |
diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c index 23b1e86b2122..72e58bd2bee7 100644 --- a/arch/s390/kvm/sigp.c +++ b/arch/s390/kvm/sigp.c | |||
@@ -393,6 +393,9 @@ static int handle_sigp_order_in_user_space(struct kvm_vcpu *vcpu, u8 order_code) | |||
393 | case SIGP_STORE_STATUS_AT_ADDRESS: | 393 | case SIGP_STORE_STATUS_AT_ADDRESS: |
394 | vcpu->stat.instruction_sigp_store_status++; | 394 | vcpu->stat.instruction_sigp_store_status++; |
395 | break; | 395 | break; |
396 | case SIGP_STORE_ADDITIONAL_STATUS: | ||
397 | vcpu->stat.instruction_sigp_store_adtl_status++; | ||
398 | break; | ||
396 | case SIGP_SET_PREFIX: | 399 | case SIGP_SET_PREFIX: |
397 | vcpu->stat.instruction_sigp_prefix++; | 400 | vcpu->stat.instruction_sigp_prefix++; |
398 | break; | 401 | break; |
@@ -431,7 +434,7 @@ int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu) | |||
431 | if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) | 434 | if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) |
432 | return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); | 435 | return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); |
433 | 436 | ||
434 | order_code = kvm_s390_get_base_disp_rs(vcpu); | 437 | order_code = kvm_s390_get_base_disp_rs(vcpu, NULL); |
435 | if (handle_sigp_order_in_user_space(vcpu, order_code)) | 438 | if (handle_sigp_order_in_user_space(vcpu, order_code)) |
436 | return -EOPNOTSUPP; | 439 | return -EOPNOTSUPP; |
437 | 440 | ||
@@ -473,7 +476,7 @@ int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu) | |||
473 | int r3 = vcpu->arch.sie_block->ipa & 0x000f; | 476 | int r3 = vcpu->arch.sie_block->ipa & 0x000f; |
474 | u16 cpu_addr = vcpu->run->s.regs.gprs[r3]; | 477 | u16 cpu_addr = vcpu->run->s.regs.gprs[r3]; |
475 | struct kvm_vcpu *dest_vcpu; | 478 | struct kvm_vcpu *dest_vcpu; |
476 | u8 order_code = kvm_s390_get_base_disp_rs(vcpu); | 479 | u8 order_code = kvm_s390_get_base_disp_rs(vcpu, NULL); |
477 | 480 | ||
478 | trace_kvm_s390_handle_sigp_pei(vcpu, order_code, cpu_addr); | 481 | trace_kvm_s390_handle_sigp_pei(vcpu, order_code, cpu_addr); |
479 | 482 | ||