aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/kvm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-08-31 11:27:44 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2015-08-31 11:27:44 -0400
commit44e98edcd11a48619b342d8f442d447b094ab2fc (patch)
tree4b35ceb134086fddc6e32610932ece05fcb1998d /arch/s390/kvm
parent64291f7db5bd8150a74ad2036f1037e6a0428df2 (diff)
parent4d283ec908e617fa28bcb06bce310206f0655d67 (diff)
Merge tag 'kvm-4.3-1' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull kvm updates from Paolo Bonzini: "A very small release for x86 and s390 KVM. - s390: timekeeping changes, cleanups and fixes - x86: support for Hyper-V MSRs to report crashes, and a bunch of cleanups. One interesting feature that was planned for 4.3 (emulating the local APIC in kernel while keeping the IOAPIC and 8254 in userspace) had to be delayed because Intel complained about my reading of the manual" * tag 'kvm-4.3-1' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (42 commits) x86/kvm: Rename VMX's segment access rights defines KVM: x86/vPMU: Fix unnecessary signed extension for AMD PERFCTRn kvm: x86: Fix error handling in the function kvm_lapic_sync_from_vapic KVM: s390: Fix assumption that kvm_set_irq_routing is always run successfully KVM: VMX: drop ept misconfig check KVM: MMU: fully check zero bits for sptes KVM: MMU: introduce is_shadow_zero_bits_set() KVM: MMU: introduce the framework to check zero bits on sptes KVM: MMU: split reset_rsvds_bits_mask_ept KVM: MMU: split reset_rsvds_bits_mask KVM: MMU: introduce rsvd_bits_validate KVM: MMU: move FNAME(is_rsvd_bits_set) to mmu.c KVM: MMU: fix validation of mmio page fault KVM: MTRR: Use default type for non-MTRR-covered gfn before WARN_ON KVM: s390: host STP toleration for VMs KVM: x86: clean/fix memory barriers in irqchip_in_kernel KVM: document memory barriers for kvm->vcpus/kvm->online_vcpus KVM: x86: remove unnecessary memory barriers for shared MSRs KVM: move code related to KVM_SET_BOOT_CPU_ID to x86 KVM: s390: log capability enablement and vm attribute changes ...
Diffstat (limited to 'arch/s390/kvm')
-rw-r--r--arch/s390/kvm/diag.c13
-rw-r--r--arch/s390/kvm/guestdbg.c35
-rw-r--r--arch/s390/kvm/interrupt.c98
-rw-r--r--arch/s390/kvm/kvm-s390.c114
-rw-r--r--arch/s390/kvm/kvm-s390.h11
-rw-r--r--arch/s390/kvm/priv.c28
-rw-r--r--arch/s390/kvm/sigp.c13
-rw-r--r--arch/s390/kvm/trace-s390.h33
8 files changed, 240 insertions, 105 deletions
diff --git a/arch/s390/kvm/diag.c b/arch/s390/kvm/diag.c
index fc7ec95848c3..5fbfb88f8477 100644
--- a/arch/s390/kvm/diag.c
+++ b/arch/s390/kvm/diag.c
@@ -27,13 +27,13 @@ static int diag_release_pages(struct kvm_vcpu *vcpu)
27 27
28 start = vcpu->run->s.regs.gprs[(vcpu->arch.sie_block->ipa & 0xf0) >> 4]; 28 start = vcpu->run->s.regs.gprs[(vcpu->arch.sie_block->ipa & 0xf0) >> 4];
29 end = vcpu->run->s.regs.gprs[vcpu->arch.sie_block->ipa & 0xf] + 4096; 29 end = vcpu->run->s.regs.gprs[vcpu->arch.sie_block->ipa & 0xf] + 4096;
30 vcpu->stat.diagnose_10++;
30 31
31 if (start & ~PAGE_MASK || end & ~PAGE_MASK || start >= end 32 if (start & ~PAGE_MASK || end & ~PAGE_MASK || start >= end
32 || start < 2 * PAGE_SIZE) 33 || start < 2 * PAGE_SIZE)
33 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 34 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
34 35
35 VCPU_EVENT(vcpu, 5, "diag release pages %lX %lX", start, end); 36 VCPU_EVENT(vcpu, 5, "diag release pages %lX %lX", start, end);
36 vcpu->stat.diagnose_10++;
37 37
38 /* 38 /*
39 * We checked for start >= end above, so lets check for the 39 * We checked for start >= end above, so lets check for the
@@ -75,6 +75,9 @@ static int __diag_page_ref_service(struct kvm_vcpu *vcpu)
75 u16 rx = (vcpu->arch.sie_block->ipa & 0xf0) >> 4; 75 u16 rx = (vcpu->arch.sie_block->ipa & 0xf0) >> 4;
76 u16 ry = (vcpu->arch.sie_block->ipa & 0x0f); 76 u16 ry = (vcpu->arch.sie_block->ipa & 0x0f);
77 77
78 VCPU_EVENT(vcpu, 3, "diag page reference parameter block at 0x%llx",
79 vcpu->run->s.regs.gprs[rx]);
80 vcpu->stat.diagnose_258++;
78 if (vcpu->run->s.regs.gprs[rx] & 7) 81 if (vcpu->run->s.regs.gprs[rx] & 7)
79 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 82 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
80 rc = read_guest(vcpu, vcpu->run->s.regs.gprs[rx], rx, &parm, sizeof(parm)); 83 rc = read_guest(vcpu, vcpu->run->s.regs.gprs[rx], rx, &parm, sizeof(parm));
@@ -85,6 +88,9 @@ static int __diag_page_ref_service(struct kvm_vcpu *vcpu)
85 88
86 switch (parm.subcode) { 89 switch (parm.subcode) {
87 case 0: /* TOKEN */ 90 case 0: /* TOKEN */
91 VCPU_EVENT(vcpu, 3, "pageref token addr 0x%llx "
92 "select mask 0x%llx compare mask 0x%llx",
93 parm.token_addr, parm.select_mask, parm.compare_mask);
88 if (vcpu->arch.pfault_token != KVM_S390_PFAULT_TOKEN_INVALID) { 94 if (vcpu->arch.pfault_token != KVM_S390_PFAULT_TOKEN_INVALID) {
89 /* 95 /*
90 * If the pagefault handshake is already activated, 96 * If the pagefault handshake is already activated,
@@ -114,6 +120,7 @@ static int __diag_page_ref_service(struct kvm_vcpu *vcpu)
114 * the cancel, therefore to reduce code complexity, we assume 120 * the cancel, therefore to reduce code complexity, we assume
115 * all outstanding tokens are already pending. 121 * all outstanding tokens are already pending.
116 */ 122 */
123 VCPU_EVENT(vcpu, 3, "pageref cancel addr 0x%llx", parm.token_addr);
117 if (parm.token_addr || parm.select_mask || 124 if (parm.token_addr || parm.select_mask ||
118 parm.compare_mask || parm.zarch) 125 parm.compare_mask || parm.zarch)
119 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 126 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
@@ -174,7 +181,8 @@ static int __diag_ipl_functions(struct kvm_vcpu *vcpu)
174 unsigned int reg = vcpu->arch.sie_block->ipa & 0xf; 181 unsigned int reg = vcpu->arch.sie_block->ipa & 0xf;
175 unsigned long subcode = vcpu->run->s.regs.gprs[reg] & 0xffff; 182 unsigned long subcode = vcpu->run->s.regs.gprs[reg] & 0xffff;
176 183
177 VCPU_EVENT(vcpu, 5, "diag ipl functions, subcode %lx", subcode); 184 VCPU_EVENT(vcpu, 3, "diag ipl functions, subcode %lx", subcode);
185 vcpu->stat.diagnose_308++;
178 switch (subcode) { 186 switch (subcode) {
179 case 3: 187 case 3:
180 vcpu->run->s390_reset_flags = KVM_S390_RESET_CLEAR; 188 vcpu->run->s390_reset_flags = KVM_S390_RESET_CLEAR;
@@ -202,6 +210,7 @@ static int __diag_virtio_hypercall(struct kvm_vcpu *vcpu)
202{ 210{
203 int ret; 211 int ret;
204 212
213 vcpu->stat.diagnose_500++;
205 /* No virtio-ccw notification? Get out quickly. */ 214 /* No virtio-ccw notification? Get out quickly. */
206 if (!vcpu->kvm->arch.css_support || 215 if (!vcpu->kvm->arch.css_support ||
207 (vcpu->run->s.regs.gprs[1] != KVM_S390_VIRTIO_CCW_NOTIFY)) 216 (vcpu->run->s.regs.gprs[1] != KVM_S390_VIRTIO_CCW_NOTIFY))
diff --git a/arch/s390/kvm/guestdbg.c b/arch/s390/kvm/guestdbg.c
index e97b3455d7e6..47518a324d75 100644
--- a/arch/s390/kvm/guestdbg.c
+++ b/arch/s390/kvm/guestdbg.c
@@ -473,10 +473,45 @@ static void filter_guest_per_event(struct kvm_vcpu *vcpu)
473 vcpu->arch.sie_block->iprcc &= ~PGM_PER; 473 vcpu->arch.sie_block->iprcc &= ~PGM_PER;
474} 474}
475 475
476#define pssec(vcpu) (vcpu->arch.sie_block->gcr[1] & _ASCE_SPACE_SWITCH)
477#define hssec(vcpu) (vcpu->arch.sie_block->gcr[13] & _ASCE_SPACE_SWITCH)
478#define old_ssec(vcpu) ((vcpu->arch.sie_block->tecmc >> 31) & 0x1)
479#define old_as_is_home(vcpu) !(vcpu->arch.sie_block->tecmc & 0xffff)
480
476void kvm_s390_handle_per_event(struct kvm_vcpu *vcpu) 481void kvm_s390_handle_per_event(struct kvm_vcpu *vcpu)
477{ 482{
483 int new_as;
484
478 if (debug_exit_required(vcpu)) 485 if (debug_exit_required(vcpu))
479 vcpu->guest_debug |= KVM_GUESTDBG_EXIT_PENDING; 486 vcpu->guest_debug |= KVM_GUESTDBG_EXIT_PENDING;
480 487
481 filter_guest_per_event(vcpu); 488 filter_guest_per_event(vcpu);
489
490 /*
491 * Only RP, SAC, SACF, PT, PTI, PR, PC instructions can trigger
492 * a space-switch event. PER events enforce space-switch events
493 * for these instructions. So if no PER event for the guest is left,
494 * we might have to filter the space-switch element out, too.
495 */
496 if (vcpu->arch.sie_block->iprcc == PGM_SPACE_SWITCH) {
497 vcpu->arch.sie_block->iprcc = 0;
498 new_as = psw_bits(vcpu->arch.sie_block->gpsw).as;
499
500 /*
501 * If the AS changed from / to home, we had RP, SAC or SACF
502 * instruction. Check primary and home space-switch-event
503 * controls. (theoretically home -> home produced no event)
504 */
505 if (((new_as == PSW_AS_HOME) ^ old_as_is_home(vcpu)) &&
506 (pssec(vcpu) || hssec(vcpu)))
507 vcpu->arch.sie_block->iprcc = PGM_SPACE_SWITCH;
508
509 /*
510 * PT, PTI, PR, PC instruction operate on primary AS only. Check
511 * if the primary-space-switch-event control was or got set.
512 */
513 if (new_as == PSW_AS_PRIMARY && !old_as_is_home(vcpu) &&
514 (pssec(vcpu) || old_ssec(vcpu)))
515 vcpu->arch.sie_block->iprcc = PGM_SPACE_SWITCH;
516 }
482} 517}
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index c98d89708e99..b277d50dcf76 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -30,7 +30,6 @@
30#define IOINT_SCHID_MASK 0x0000ffff 30#define IOINT_SCHID_MASK 0x0000ffff
31#define IOINT_SSID_MASK 0x00030000 31#define IOINT_SSID_MASK 0x00030000
32#define IOINT_CSSID_MASK 0x03fc0000 32#define IOINT_CSSID_MASK 0x03fc0000
33#define IOINT_AI_MASK 0x04000000
34#define PFAULT_INIT 0x0600 33#define PFAULT_INIT 0x0600
35#define PFAULT_DONE 0x0680 34#define PFAULT_DONE 0x0680
36#define VIRTIO_PARAM 0x0d00 35#define VIRTIO_PARAM 0x0d00
@@ -72,9 +71,13 @@ static int ckc_interrupts_enabled(struct kvm_vcpu *vcpu)
72 71
73static int ckc_irq_pending(struct kvm_vcpu *vcpu) 72static int ckc_irq_pending(struct kvm_vcpu *vcpu)
74{ 73{
74 preempt_disable();
75 if (!(vcpu->arch.sie_block->ckc < 75 if (!(vcpu->arch.sie_block->ckc <
76 get_tod_clock_fast() + vcpu->arch.sie_block->epoch)) 76 get_tod_clock_fast() + vcpu->arch.sie_block->epoch)) {
77 preempt_enable();
77 return 0; 78 return 0;
79 }
80 preempt_enable();
78 return ckc_interrupts_enabled(vcpu); 81 return ckc_interrupts_enabled(vcpu);
79} 82}
80 83
@@ -311,8 +314,8 @@ static int __must_check __deliver_pfault_init(struct kvm_vcpu *vcpu)
311 li->irq.ext.ext_params2 = 0; 314 li->irq.ext.ext_params2 = 0;
312 spin_unlock(&li->lock); 315 spin_unlock(&li->lock);
313 316
314 VCPU_EVENT(vcpu, 4, "interrupt: pfault init parm:%x,parm64:%llx", 317 VCPU_EVENT(vcpu, 4, "deliver: pfault init token 0x%llx",
315 0, ext.ext_params2); 318 ext.ext_params2);
316 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, 319 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
317 KVM_S390_INT_PFAULT_INIT, 320 KVM_S390_INT_PFAULT_INIT,
318 0, ext.ext_params2); 321 0, ext.ext_params2);
@@ -368,7 +371,7 @@ static int __must_check __deliver_machine_check(struct kvm_vcpu *vcpu)
368 spin_unlock(&fi->lock); 371 spin_unlock(&fi->lock);
369 372
370 if (deliver) { 373 if (deliver) {
371 VCPU_EVENT(vcpu, 4, "interrupt: machine check mcic=%llx", 374 VCPU_EVENT(vcpu, 3, "deliver: machine check mcic 0x%llx",
372 mchk.mcic); 375 mchk.mcic);
373 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, 376 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
374 KVM_S390_MCHK, 377 KVM_S390_MCHK,
@@ -403,7 +406,7 @@ static int __must_check __deliver_restart(struct kvm_vcpu *vcpu)
403 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 406 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
404 int rc; 407 int rc;
405 408
406 VCPU_EVENT(vcpu, 4, "%s", "interrupt: cpu restart"); 409 VCPU_EVENT(vcpu, 3, "%s", "deliver: cpu restart");
407 vcpu->stat.deliver_restart_signal++; 410 vcpu->stat.deliver_restart_signal++;
408 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_RESTART, 0, 0); 411 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_RESTART, 0, 0);
409 412
@@ -427,7 +430,6 @@ static int __must_check __deliver_set_prefix(struct kvm_vcpu *vcpu)
427 clear_bit(IRQ_PEND_SET_PREFIX, &li->pending_irqs); 430 clear_bit(IRQ_PEND_SET_PREFIX, &li->pending_irqs);
428 spin_unlock(&li->lock); 431 spin_unlock(&li->lock);
429 432
430 VCPU_EVENT(vcpu, 4, "interrupt: set prefix to %x", prefix.address);
431 vcpu->stat.deliver_prefix_signal++; 433 vcpu->stat.deliver_prefix_signal++;
432 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, 434 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
433 KVM_S390_SIGP_SET_PREFIX, 435 KVM_S390_SIGP_SET_PREFIX,
@@ -450,7 +452,7 @@ static int __must_check __deliver_emergency_signal(struct kvm_vcpu *vcpu)
450 clear_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs); 452 clear_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs);
451 spin_unlock(&li->lock); 453 spin_unlock(&li->lock);
452 454
453 VCPU_EVENT(vcpu, 4, "%s", "interrupt: sigp emerg"); 455 VCPU_EVENT(vcpu, 4, "%s", "deliver: sigp emerg");
454 vcpu->stat.deliver_emergency_signal++; 456 vcpu->stat.deliver_emergency_signal++;
455 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_EMERGENCY, 457 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_EMERGENCY,
456 cpu_addr, 0); 458 cpu_addr, 0);
@@ -477,7 +479,7 @@ static int __must_check __deliver_external_call(struct kvm_vcpu *vcpu)
477 clear_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs); 479 clear_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs);
478 spin_unlock(&li->lock); 480 spin_unlock(&li->lock);
479 481
480 VCPU_EVENT(vcpu, 4, "%s", "interrupt: sigp ext call"); 482 VCPU_EVENT(vcpu, 4, "%s", "deliver: sigp ext call");
481 vcpu->stat.deliver_external_call++; 483 vcpu->stat.deliver_external_call++;
482 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, 484 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
483 KVM_S390_INT_EXTERNAL_CALL, 485 KVM_S390_INT_EXTERNAL_CALL,
@@ -506,7 +508,7 @@ static int __must_check __deliver_prog(struct kvm_vcpu *vcpu)
506 memset(&li->irq.pgm, 0, sizeof(pgm_info)); 508 memset(&li->irq.pgm, 0, sizeof(pgm_info));
507 spin_unlock(&li->lock); 509 spin_unlock(&li->lock);
508 510
509 VCPU_EVENT(vcpu, 4, "interrupt: pgm check code:%x, ilc:%x", 511 VCPU_EVENT(vcpu, 3, "deliver: program irq code 0x%x, ilc:%d",
510 pgm_info.code, ilc); 512 pgm_info.code, ilc);
511 vcpu->stat.deliver_program_int++; 513 vcpu->stat.deliver_program_int++;
512 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_PROGRAM_INT, 514 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_PROGRAM_INT,
@@ -622,7 +624,7 @@ static int __must_check __deliver_service(struct kvm_vcpu *vcpu)
622 clear_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs); 624 clear_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs);
623 spin_unlock(&fi->lock); 625 spin_unlock(&fi->lock);
624 626
625 VCPU_EVENT(vcpu, 4, "interrupt: sclp parm:%x", 627 VCPU_EVENT(vcpu, 4, "deliver: sclp parameter 0x%x",
626 ext.ext_params); 628 ext.ext_params);
627 vcpu->stat.deliver_service_signal++; 629 vcpu->stat.deliver_service_signal++;
628 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_SERVICE, 630 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_SERVICE,
@@ -651,9 +653,6 @@ static int __must_check __deliver_pfault_done(struct kvm_vcpu *vcpu)
651 struct kvm_s390_interrupt_info, 653 struct kvm_s390_interrupt_info,
652 list); 654 list);
653 if (inti) { 655 if (inti) {
654 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
655 KVM_S390_INT_PFAULT_DONE, 0,
656 inti->ext.ext_params2);
657 list_del(&inti->list); 656 list_del(&inti->list);
658 fi->counters[FIRQ_CNTR_PFAULT] -= 1; 657 fi->counters[FIRQ_CNTR_PFAULT] -= 1;
659 } 658 }
@@ -662,6 +661,12 @@ static int __must_check __deliver_pfault_done(struct kvm_vcpu *vcpu)
662 spin_unlock(&fi->lock); 661 spin_unlock(&fi->lock);
663 662
664 if (inti) { 663 if (inti) {
664 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
665 KVM_S390_INT_PFAULT_DONE, 0,
666 inti->ext.ext_params2);
667 VCPU_EVENT(vcpu, 4, "deliver: pfault done token 0x%llx",
668 inti->ext.ext_params2);
669
665 rc = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE, 670 rc = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE,
666 (u16 *)__LC_EXT_INT_CODE); 671 (u16 *)__LC_EXT_INT_CODE);
667 rc |= put_guest_lc(vcpu, PFAULT_DONE, 672 rc |= put_guest_lc(vcpu, PFAULT_DONE,
@@ -691,7 +696,7 @@ static int __must_check __deliver_virtio(struct kvm_vcpu *vcpu)
691 list); 696 list);
692 if (inti) { 697 if (inti) {
693 VCPU_EVENT(vcpu, 4, 698 VCPU_EVENT(vcpu, 4,
694 "interrupt: virtio parm:%x,parm64:%llx", 699 "deliver: virtio parm: 0x%x,parm64: 0x%llx",
695 inti->ext.ext_params, inti->ext.ext_params2); 700 inti->ext.ext_params, inti->ext.ext_params2);
696 vcpu->stat.deliver_virtio_interrupt++; 701 vcpu->stat.deliver_virtio_interrupt++;
697 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, 702 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
@@ -741,7 +746,7 @@ static int __must_check __deliver_io(struct kvm_vcpu *vcpu,
741 struct kvm_s390_interrupt_info, 746 struct kvm_s390_interrupt_info,
742 list); 747 list);
743 if (inti) { 748 if (inti) {
744 VCPU_EVENT(vcpu, 4, "interrupt: I/O %llx", inti->type); 749 VCPU_EVENT(vcpu, 4, "deliver: I/O 0x%llx", inti->type);
745 vcpu->stat.deliver_io_int++; 750 vcpu->stat.deliver_io_int++;
746 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, 751 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
747 inti->type, 752 inti->type,
@@ -855,7 +860,9 @@ int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
855 goto no_timer; 860 goto no_timer;
856 } 861 }
857 862
863 preempt_disable();
858 now = get_tod_clock_fast() + vcpu->arch.sie_block->epoch; 864 now = get_tod_clock_fast() + vcpu->arch.sie_block->epoch;
865 preempt_enable();
859 sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now); 866 sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now);
860 867
861 /* underflow */ 868 /* underflow */
@@ -864,7 +871,7 @@ int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
864 871
865 __set_cpu_idle(vcpu); 872 __set_cpu_idle(vcpu);
866 hrtimer_start(&vcpu->arch.ckc_timer, ktime_set (0, sltime) , HRTIMER_MODE_REL); 873 hrtimer_start(&vcpu->arch.ckc_timer, ktime_set (0, sltime) , HRTIMER_MODE_REL);
867 VCPU_EVENT(vcpu, 5, "enabled wait via clock comparator: %llx ns", sltime); 874 VCPU_EVENT(vcpu, 4, "enabled wait via clock comparator: %llu ns", sltime);
868no_timer: 875no_timer:
869 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); 876 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
870 kvm_vcpu_block(vcpu); 877 kvm_vcpu_block(vcpu);
@@ -894,7 +901,9 @@ enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer)
894 u64 now, sltime; 901 u64 now, sltime;
895 902
896 vcpu = container_of(timer, struct kvm_vcpu, arch.ckc_timer); 903 vcpu = container_of(timer, struct kvm_vcpu, arch.ckc_timer);
904 preempt_disable();
897 now = get_tod_clock_fast() + vcpu->arch.sie_block->epoch; 905 now = get_tod_clock_fast() + vcpu->arch.sie_block->epoch;
906 preempt_enable();
898 sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now); 907 sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now);
899 908
900 /* 909 /*
@@ -968,6 +977,10 @@ static int __inject_prog(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
968{ 977{
969 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 978 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
970 979
980 VCPU_EVENT(vcpu, 3, "inject: program irq code 0x%x", irq->u.pgm.code);
981 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_PROGRAM_INT,
982 irq->u.pgm.code, 0);
983
971 li->irq.pgm = irq->u.pgm; 984 li->irq.pgm = irq->u.pgm;
972 set_bit(IRQ_PEND_PROG, &li->pending_irqs); 985 set_bit(IRQ_PEND_PROG, &li->pending_irqs);
973 return 0; 986 return 0;
@@ -978,9 +991,6 @@ int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code)
978 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 991 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
979 struct kvm_s390_irq irq; 992 struct kvm_s390_irq irq;
980 993
981 VCPU_EVENT(vcpu, 3, "inject: program check %d (from kernel)", code);
982 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_PROGRAM_INT, code,
983 0, 1);
984 spin_lock(&li->lock); 994 spin_lock(&li->lock);
985 irq.u.pgm.code = code; 995 irq.u.pgm.code = code;
986 __inject_prog(vcpu, &irq); 996 __inject_prog(vcpu, &irq);
@@ -996,10 +1006,6 @@ int kvm_s390_inject_prog_irq(struct kvm_vcpu *vcpu,
996 struct kvm_s390_irq irq; 1006 struct kvm_s390_irq irq;
997 int rc; 1007 int rc;
998 1008
999 VCPU_EVENT(vcpu, 3, "inject: prog irq %d (from kernel)",
1000 pgm_info->code);
1001 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_PROGRAM_INT,
1002 pgm_info->code, 0, 1);
1003 spin_lock(&li->lock); 1009 spin_lock(&li->lock);
1004 irq.u.pgm = *pgm_info; 1010 irq.u.pgm = *pgm_info;
1005 rc = __inject_prog(vcpu, &irq); 1011 rc = __inject_prog(vcpu, &irq);
@@ -1012,11 +1018,11 @@ static int __inject_pfault_init(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1012{ 1018{
1013 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 1019 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1014 1020
1015 VCPU_EVENT(vcpu, 3, "inject: external irq params:%x, params2:%llx", 1021 VCPU_EVENT(vcpu, 4, "inject: pfault init parameter block at 0x%llx",
1016 irq->u.ext.ext_params, irq->u.ext.ext_params2); 1022 irq->u.ext.ext_params2);
1017 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_PFAULT_INIT, 1023 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_PFAULT_INIT,
1018 irq->u.ext.ext_params, 1024 irq->u.ext.ext_params,
1019 irq->u.ext.ext_params2, 2); 1025 irq->u.ext.ext_params2);
1020 1026
1021 li->irq.ext = irq->u.ext; 1027 li->irq.ext = irq->u.ext;
1022 set_bit(IRQ_PEND_PFAULT_INIT, &li->pending_irqs); 1028 set_bit(IRQ_PEND_PFAULT_INIT, &li->pending_irqs);
@@ -1045,10 +1051,10 @@ static int __inject_extcall(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1045 struct kvm_s390_extcall_info *extcall = &li->irq.extcall; 1051 struct kvm_s390_extcall_info *extcall = &li->irq.extcall;
1046 uint16_t src_id = irq->u.extcall.code; 1052 uint16_t src_id = irq->u.extcall.code;
1047 1053
1048 VCPU_EVENT(vcpu, 3, "inject: external call source-cpu:%u", 1054 VCPU_EVENT(vcpu, 4, "inject: external call source-cpu:%u",
1049 src_id); 1055 src_id);
1050 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_EXTERNAL_CALL, 1056 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_EXTERNAL_CALL,
1051 src_id, 0, 2); 1057 src_id, 0);
1052 1058
1053 /* sending vcpu invalid */ 1059 /* sending vcpu invalid */
1054 if (src_id >= KVM_MAX_VCPUS || 1060 if (src_id >= KVM_MAX_VCPUS ||
@@ -1070,10 +1076,10 @@ static int __inject_set_prefix(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1070 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 1076 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1071 struct kvm_s390_prefix_info *prefix = &li->irq.prefix; 1077 struct kvm_s390_prefix_info *prefix = &li->irq.prefix;
1072 1078
1073 VCPU_EVENT(vcpu, 3, "inject: set prefix to %x (from user)", 1079 VCPU_EVENT(vcpu, 3, "inject: set prefix to %x",
1074 irq->u.prefix.address); 1080 irq->u.prefix.address);
1075 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_SIGP_SET_PREFIX, 1081 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_SIGP_SET_PREFIX,
1076 irq->u.prefix.address, 0, 2); 1082 irq->u.prefix.address, 0);
1077 1083
1078 if (!is_vcpu_stopped(vcpu)) 1084 if (!is_vcpu_stopped(vcpu))
1079 return -EBUSY; 1085 return -EBUSY;
@@ -1090,7 +1096,7 @@ static int __inject_sigp_stop(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1090 struct kvm_s390_stop_info *stop = &li->irq.stop; 1096 struct kvm_s390_stop_info *stop = &li->irq.stop;
1091 int rc = 0; 1097 int rc = 0;
1092 1098
1093 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_SIGP_STOP, 0, 0, 2); 1099 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_SIGP_STOP, 0, 0);
1094 1100
1095 if (irq->u.stop.flags & ~KVM_S390_STOP_SUPP_FLAGS) 1101 if (irq->u.stop.flags & ~KVM_S390_STOP_SUPP_FLAGS)
1096 return -EINVAL; 1102 return -EINVAL;
@@ -1114,8 +1120,8 @@ static int __inject_sigp_restart(struct kvm_vcpu *vcpu,
1114{ 1120{
1115 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 1121 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1116 1122
1117 VCPU_EVENT(vcpu, 3, "inject: restart type %llx", irq->type); 1123 VCPU_EVENT(vcpu, 3, "%s", "inject: restart int");
1118 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_RESTART, 0, 0, 2); 1124 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_RESTART, 0, 0);
1119 1125
1120 set_bit(IRQ_PEND_RESTART, &li->pending_irqs); 1126 set_bit(IRQ_PEND_RESTART, &li->pending_irqs);
1121 return 0; 1127 return 0;
@@ -1126,10 +1132,10 @@ static int __inject_sigp_emergency(struct kvm_vcpu *vcpu,
1126{ 1132{
1127 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 1133 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1128 1134
1129 VCPU_EVENT(vcpu, 3, "inject: emergency %u\n", 1135 VCPU_EVENT(vcpu, 4, "inject: emergency from cpu %u",
1130 irq->u.emerg.code); 1136 irq->u.emerg.code);
1131 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_EMERGENCY, 1137 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_EMERGENCY,
1132 irq->u.emerg.code, 0, 2); 1138 irq->u.emerg.code, 0);
1133 1139
1134 set_bit(irq->u.emerg.code, li->sigp_emerg_pending); 1140 set_bit(irq->u.emerg.code, li->sigp_emerg_pending);
1135 set_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs); 1141 set_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs);
@@ -1142,10 +1148,10 @@ static int __inject_mchk(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1142 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 1148 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1143 struct kvm_s390_mchk_info *mchk = &li->irq.mchk; 1149 struct kvm_s390_mchk_info *mchk = &li->irq.mchk;
1144 1150
1145 VCPU_EVENT(vcpu, 5, "inject: machine check parm64:%llx", 1151 VCPU_EVENT(vcpu, 3, "inject: machine check mcic 0x%llx",
1146 irq->u.mchk.mcic); 1152 irq->u.mchk.mcic);
1147 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_MCHK, 0, 1153 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_MCHK, 0,
1148 irq->u.mchk.mcic, 2); 1154 irq->u.mchk.mcic);
1149 1155
1150 /* 1156 /*
1151 * Because repressible machine checks can be indicated along with 1157 * Because repressible machine checks can be indicated along with
@@ -1172,9 +1178,9 @@ static int __inject_ckc(struct kvm_vcpu *vcpu)
1172{ 1178{
1173 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 1179 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1174 1180
1175 VCPU_EVENT(vcpu, 3, "inject: type %x", KVM_S390_INT_CLOCK_COMP); 1181 VCPU_EVENT(vcpu, 3, "%s", "inject: clock comparator external");
1176 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_CLOCK_COMP, 1182 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_CLOCK_COMP,
1177 0, 0, 2); 1183 0, 0);
1178 1184
1179 set_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs); 1185 set_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
1180 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); 1186 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
@@ -1185,9 +1191,9 @@ static int __inject_cpu_timer(struct kvm_vcpu *vcpu)
1185{ 1191{
1186 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 1192 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1187 1193
1188 VCPU_EVENT(vcpu, 3, "inject: type %x", KVM_S390_INT_CPU_TIMER); 1194 VCPU_EVENT(vcpu, 3, "%s", "inject: cpu timer external");
1189 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_CPU_TIMER, 1195 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_CPU_TIMER,
1190 0, 0, 2); 1196 0, 0);
1191 1197
1192 set_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs); 1198 set_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs);
1193 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); 1199 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
@@ -1435,20 +1441,20 @@ int kvm_s390_inject_vm(struct kvm *kvm,
1435 inti->ext.ext_params2 = s390int->parm64; 1441 inti->ext.ext_params2 = s390int->parm64;
1436 break; 1442 break;
1437 case KVM_S390_INT_SERVICE: 1443 case KVM_S390_INT_SERVICE:
1438 VM_EVENT(kvm, 5, "inject: sclp parm:%x", s390int->parm); 1444 VM_EVENT(kvm, 4, "inject: sclp parm:%x", s390int->parm);
1439 inti->ext.ext_params = s390int->parm; 1445 inti->ext.ext_params = s390int->parm;
1440 break; 1446 break;
1441 case KVM_S390_INT_PFAULT_DONE: 1447 case KVM_S390_INT_PFAULT_DONE:
1442 inti->ext.ext_params2 = s390int->parm64; 1448 inti->ext.ext_params2 = s390int->parm64;
1443 break; 1449 break;
1444 case KVM_S390_MCHK: 1450 case KVM_S390_MCHK:
1445 VM_EVENT(kvm, 5, "inject: machine check parm64:%llx", 1451 VM_EVENT(kvm, 3, "inject: machine check mcic 0x%llx",
1446 s390int->parm64); 1452 s390int->parm64);
1447 inti->mchk.cr14 = s390int->parm; /* upper bits are not used */ 1453 inti->mchk.cr14 = s390int->parm; /* upper bits are not used */
1448 inti->mchk.mcic = s390int->parm64; 1454 inti->mchk.mcic = s390int->parm64;
1449 break; 1455 break;
1450 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: 1456 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
1451 if (inti->type & IOINT_AI_MASK) 1457 if (inti->type & KVM_S390_INT_IO_AI_MASK)
1452 VM_EVENT(kvm, 5, "%s", "inject: I/O (AI)"); 1458 VM_EVENT(kvm, 5, "%s", "inject: I/O (AI)");
1453 else 1459 else
1454 VM_EVENT(kvm, 5, "inject: I/O css %x ss %x schid %04x", 1460 VM_EVENT(kvm, 5, "inject: I/O css %x ss %x schid %04x",
@@ -1535,8 +1541,6 @@ static int do_inject_vcpu(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1535 1541
1536 switch (irq->type) { 1542 switch (irq->type) {
1537 case KVM_S390_PROGRAM_INT: 1543 case KVM_S390_PROGRAM_INT:
1538 VCPU_EVENT(vcpu, 3, "inject: program check %d (from user)",
1539 irq->u.pgm.code);
1540 rc = __inject_prog(vcpu, irq); 1544 rc = __inject_prog(vcpu, irq);
1541 break; 1545 break;
1542 case KVM_S390_SIGP_SET_PREFIX: 1546 case KVM_S390_SIGP_SET_PREFIX:
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index f32f843a3631..6861b74649ae 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -28,6 +28,7 @@
28#include <linux/vmalloc.h> 28#include <linux/vmalloc.h>
29#include <asm/asm-offsets.h> 29#include <asm/asm-offsets.h>
30#include <asm/lowcore.h> 30#include <asm/lowcore.h>
31#include <asm/etr.h>
31#include <asm/pgtable.h> 32#include <asm/pgtable.h>
32#include <asm/nmi.h> 33#include <asm/nmi.h>
33#include <asm/switch_to.h> 34#include <asm/switch_to.h>
@@ -108,6 +109,9 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
108 { "diagnose_10", VCPU_STAT(diagnose_10) }, 109 { "diagnose_10", VCPU_STAT(diagnose_10) },
109 { "diagnose_44", VCPU_STAT(diagnose_44) }, 110 { "diagnose_44", VCPU_STAT(diagnose_44) },
110 { "diagnose_9c", VCPU_STAT(diagnose_9c) }, 111 { "diagnose_9c", VCPU_STAT(diagnose_9c) },
112 { "diagnose_258", VCPU_STAT(diagnose_258) },
113 { "diagnose_308", VCPU_STAT(diagnose_308) },
114 { "diagnose_500", VCPU_STAT(diagnose_500) },
111 { NULL } 115 { NULL }
112}; 116};
113 117
@@ -124,6 +128,7 @@ unsigned long kvm_s390_fac_list_mask_size(void)
124} 128}
125 129
126static struct gmap_notifier gmap_notifier; 130static struct gmap_notifier gmap_notifier;
131debug_info_t *kvm_s390_dbf;
127 132
128/* Section: not file related */ 133/* Section: not file related */
129int kvm_arch_hardware_enable(void) 134int kvm_arch_hardware_enable(void)
@@ -134,24 +139,69 @@ int kvm_arch_hardware_enable(void)
134 139
135static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address); 140static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address);
136 141
142/*
143 * This callback is executed during stop_machine(). All CPUs are therefore
144 * temporarily stopped. In order not to change guest behavior, we have to
145 * disable preemption whenever we touch the epoch of kvm and the VCPUs,
146 * so a CPU won't be stopped while calculating with the epoch.
147 */
148static int kvm_clock_sync(struct notifier_block *notifier, unsigned long val,
149 void *v)
150{
151 struct kvm *kvm;
152 struct kvm_vcpu *vcpu;
153 int i;
154 unsigned long long *delta = v;
155
156 list_for_each_entry(kvm, &vm_list, vm_list) {
157 kvm->arch.epoch -= *delta;
158 kvm_for_each_vcpu(i, vcpu, kvm) {
159 vcpu->arch.sie_block->epoch -= *delta;
160 }
161 }
162 return NOTIFY_OK;
163}
164
165static struct notifier_block kvm_clock_notifier = {
166 .notifier_call = kvm_clock_sync,
167};
168
137int kvm_arch_hardware_setup(void) 169int kvm_arch_hardware_setup(void)
138{ 170{
139 gmap_notifier.notifier_call = kvm_gmap_notifier; 171 gmap_notifier.notifier_call = kvm_gmap_notifier;
140 gmap_register_ipte_notifier(&gmap_notifier); 172 gmap_register_ipte_notifier(&gmap_notifier);
173 atomic_notifier_chain_register(&s390_epoch_delta_notifier,
174 &kvm_clock_notifier);
141 return 0; 175 return 0;
142} 176}
143 177
144void kvm_arch_hardware_unsetup(void) 178void kvm_arch_hardware_unsetup(void)
145{ 179{
146 gmap_unregister_ipte_notifier(&gmap_notifier); 180 gmap_unregister_ipte_notifier(&gmap_notifier);
181 atomic_notifier_chain_unregister(&s390_epoch_delta_notifier,
182 &kvm_clock_notifier);
147} 183}
148 184
149int kvm_arch_init(void *opaque) 185int kvm_arch_init(void *opaque)
150{ 186{
187 kvm_s390_dbf = debug_register("kvm-trace", 32, 1, 7 * sizeof(long));
188 if (!kvm_s390_dbf)
189 return -ENOMEM;
190
191 if (debug_register_view(kvm_s390_dbf, &debug_sprintf_view)) {
192 debug_unregister(kvm_s390_dbf);
193 return -ENOMEM;
194 }
195
151 /* Register floating interrupt controller interface. */ 196 /* Register floating interrupt controller interface. */
152 return kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC); 197 return kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
153} 198}
154 199
200void kvm_arch_exit(void)
201{
202 debug_unregister(kvm_s390_dbf);
203}
204
155/* Section: device related */ 205/* Section: device related */
156long kvm_arch_dev_ioctl(struct file *filp, 206long kvm_arch_dev_ioctl(struct file *filp,
157 unsigned int ioctl, unsigned long arg) 207 unsigned int ioctl, unsigned long arg)
@@ -281,10 +331,12 @@ static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
281 331
282 switch (cap->cap) { 332 switch (cap->cap) {
283 case KVM_CAP_S390_IRQCHIP: 333 case KVM_CAP_S390_IRQCHIP:
334 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_IRQCHIP");
284 kvm->arch.use_irqchip = 1; 335 kvm->arch.use_irqchip = 1;
285 r = 0; 336 r = 0;
286 break; 337 break;
287 case KVM_CAP_S390_USER_SIGP: 338 case KVM_CAP_S390_USER_SIGP:
339 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_SIGP");
288 kvm->arch.user_sigp = 1; 340 kvm->arch.user_sigp = 1;
289 r = 0; 341 r = 0;
290 break; 342 break;
@@ -295,8 +347,11 @@ static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
295 r = 0; 347 r = 0;
296 } else 348 } else
297 r = -EINVAL; 349 r = -EINVAL;
350 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_VECTOR_REGISTERS %s",
351 r ? "(not available)" : "(success)");
298 break; 352 break;
299 case KVM_CAP_S390_USER_STSI: 353 case KVM_CAP_S390_USER_STSI:
354 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_STSI");
300 kvm->arch.user_stsi = 1; 355 kvm->arch.user_stsi = 1;
301 r = 0; 356 r = 0;
302 break; 357 break;
@@ -314,6 +369,8 @@ static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *att
314 switch (attr->attr) { 369 switch (attr->attr) {
315 case KVM_S390_VM_MEM_LIMIT_SIZE: 370 case KVM_S390_VM_MEM_LIMIT_SIZE:
316 ret = 0; 371 ret = 0;
372 VM_EVENT(kvm, 3, "QUERY: max guest memory: %lu bytes",
373 kvm->arch.gmap->asce_end);
317 if (put_user(kvm->arch.gmap->asce_end, (u64 __user *)attr->addr)) 374 if (put_user(kvm->arch.gmap->asce_end, (u64 __user *)attr->addr))
318 ret = -EFAULT; 375 ret = -EFAULT;
319 break; 376 break;
@@ -330,7 +387,13 @@ static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *att
330 unsigned int idx; 387 unsigned int idx;
331 switch (attr->attr) { 388 switch (attr->attr) {
332 case KVM_S390_VM_MEM_ENABLE_CMMA: 389 case KVM_S390_VM_MEM_ENABLE_CMMA:
390 /* enable CMMA only for z10 and later (EDAT_1) */
391 ret = -EINVAL;
392 if (!MACHINE_IS_LPAR || !MACHINE_HAS_EDAT1)
393 break;
394
333 ret = -EBUSY; 395 ret = -EBUSY;
396 VM_EVENT(kvm, 3, "%s", "ENABLE: CMMA support");
334 mutex_lock(&kvm->lock); 397 mutex_lock(&kvm->lock);
335 if (atomic_read(&kvm->online_vcpus) == 0) { 398 if (atomic_read(&kvm->online_vcpus) == 0) {
336 kvm->arch.use_cmma = 1; 399 kvm->arch.use_cmma = 1;
@@ -339,6 +402,11 @@ static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *att
339 mutex_unlock(&kvm->lock); 402 mutex_unlock(&kvm->lock);
340 break; 403 break;
341 case KVM_S390_VM_MEM_CLR_CMMA: 404 case KVM_S390_VM_MEM_CLR_CMMA:
405 ret = -EINVAL;
406 if (!kvm->arch.use_cmma)
407 break;
408
409 VM_EVENT(kvm, 3, "%s", "RESET: CMMA states");
342 mutex_lock(&kvm->lock); 410 mutex_lock(&kvm->lock);
343 idx = srcu_read_lock(&kvm->srcu); 411 idx = srcu_read_lock(&kvm->srcu);
344 s390_reset_cmma(kvm->arch.gmap->mm); 412 s390_reset_cmma(kvm->arch.gmap->mm);
@@ -374,6 +442,7 @@ static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *att
374 } 442 }
375 } 443 }
376 mutex_unlock(&kvm->lock); 444 mutex_unlock(&kvm->lock);
445 VM_EVENT(kvm, 3, "SET: max guest memory: %lu bytes", new_limit);
377 break; 446 break;
378 } 447 }
379 default: 448 default:
@@ -400,22 +469,26 @@ static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
400 kvm->arch.crypto.crycb->aes_wrapping_key_mask, 469 kvm->arch.crypto.crycb->aes_wrapping_key_mask,
401 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask)); 470 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
402 kvm->arch.crypto.aes_kw = 1; 471 kvm->arch.crypto.aes_kw = 1;
472 VM_EVENT(kvm, 3, "%s", "ENABLE: AES keywrapping support");
403 break; 473 break;
404 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW: 474 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
405 get_random_bytes( 475 get_random_bytes(
406 kvm->arch.crypto.crycb->dea_wrapping_key_mask, 476 kvm->arch.crypto.crycb->dea_wrapping_key_mask,
407 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask)); 477 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
408 kvm->arch.crypto.dea_kw = 1; 478 kvm->arch.crypto.dea_kw = 1;
479 VM_EVENT(kvm, 3, "%s", "ENABLE: DEA keywrapping support");
409 break; 480 break;
410 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW: 481 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
411 kvm->arch.crypto.aes_kw = 0; 482 kvm->arch.crypto.aes_kw = 0;
412 memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0, 483 memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0,
413 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask)); 484 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
485 VM_EVENT(kvm, 3, "%s", "DISABLE: AES keywrapping support");
414 break; 486 break;
415 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW: 487 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
416 kvm->arch.crypto.dea_kw = 0; 488 kvm->arch.crypto.dea_kw = 0;
417 memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0, 489 memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0,
418 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask)); 490 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
491 VM_EVENT(kvm, 3, "%s", "DISABLE: DEA keywrapping support");
419 break; 492 break;
420 default: 493 default:
421 mutex_unlock(&kvm->lock); 494 mutex_unlock(&kvm->lock);
@@ -440,6 +513,7 @@ static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
440 513
441 if (gtod_high != 0) 514 if (gtod_high != 0)
442 return -EINVAL; 515 return -EINVAL;
516 VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x\n", gtod_high);
443 517
444 return 0; 518 return 0;
445} 519}
@@ -459,12 +533,15 @@ static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
459 return r; 533 return r;
460 534
461 mutex_lock(&kvm->lock); 535 mutex_lock(&kvm->lock);
536 preempt_disable();
462 kvm->arch.epoch = gtod - host_tod; 537 kvm->arch.epoch = gtod - host_tod;
463 kvm_s390_vcpu_block_all(kvm); 538 kvm_s390_vcpu_block_all(kvm);
464 kvm_for_each_vcpu(vcpu_idx, cur_vcpu, kvm) 539 kvm_for_each_vcpu(vcpu_idx, cur_vcpu, kvm)
465 cur_vcpu->arch.sie_block->epoch = kvm->arch.epoch; 540 cur_vcpu->arch.sie_block->epoch = kvm->arch.epoch;
466 kvm_s390_vcpu_unblock_all(kvm); 541 kvm_s390_vcpu_unblock_all(kvm);
542 preempt_enable();
467 mutex_unlock(&kvm->lock); 543 mutex_unlock(&kvm->lock);
544 VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx\n", gtod);
468 return 0; 545 return 0;
469} 546}
470 547
@@ -496,6 +573,7 @@ static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
496 if (copy_to_user((void __user *)attr->addr, &gtod_high, 573 if (copy_to_user((void __user *)attr->addr, &gtod_high,
497 sizeof(gtod_high))) 574 sizeof(gtod_high)))
498 return -EFAULT; 575 return -EFAULT;
576 VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x\n", gtod_high);
499 577
500 return 0; 578 return 0;
501} 579}
@@ -509,9 +587,12 @@ static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
509 if (r) 587 if (r)
510 return r; 588 return r;
511 589
590 preempt_disable();
512 gtod = host_tod + kvm->arch.epoch; 591 gtod = host_tod + kvm->arch.epoch;
592 preempt_enable();
513 if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod))) 593 if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
514 return -EFAULT; 594 return -EFAULT;
595 VM_EVENT(kvm, 3, "QUERY: TOD base: 0x%llx\n", gtod);
515 596
516 return 0; 597 return 0;
517} 598}
@@ -821,7 +902,9 @@ static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
821 } 902 }
822 903
823 /* Enable storage key handling for the guest */ 904 /* Enable storage key handling for the guest */
824 s390_enable_skey(); 905 r = s390_enable_skey();
906 if (r)
907 goto out;
825 908
826 for (i = 0; i < args->count; i++) { 909 for (i = 0; i < args->count; i++) {
827 hva = gfn_to_hva(kvm, args->start_gfn + i); 910 hva = gfn_to_hva(kvm, args->start_gfn + i);
@@ -879,8 +962,7 @@ long kvm_arch_vm_ioctl(struct file *filp,
879 if (kvm->arch.use_irqchip) { 962 if (kvm->arch.use_irqchip) {
880 /* Set up dummy routing. */ 963 /* Set up dummy routing. */
881 memset(&routing, 0, sizeof(routing)); 964 memset(&routing, 0, sizeof(routing));
882 kvm_set_irq_routing(kvm, &routing, 0, 0); 965 r = kvm_set_irq_routing(kvm, &routing, 0, 0);
883 r = 0;
884 } 966 }
885 break; 967 break;
886 } 968 }
@@ -1043,7 +1125,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
1043 1125
1044 sprintf(debug_name, "kvm-%u", current->pid); 1126 sprintf(debug_name, "kvm-%u", current->pid);
1045 1127
1046 kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long)); 1128 kvm->arch.dbf = debug_register(debug_name, 32, 1, 7 * sizeof(long));
1047 if (!kvm->arch.dbf) 1129 if (!kvm->arch.dbf)
1048 goto out_err; 1130 goto out_err;
1049 1131
@@ -1086,7 +1168,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
1086 mutex_init(&kvm->arch.ipte_mutex); 1168 mutex_init(&kvm->arch.ipte_mutex);
1087 1169
1088 debug_register_view(kvm->arch.dbf, &debug_sprintf_view); 1170 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
1089 VM_EVENT(kvm, 3, "%s", "vm created"); 1171 VM_EVENT(kvm, 3, "vm created with type %lu", type);
1090 1172
1091 if (type & KVM_VM_S390_UCONTROL) { 1173 if (type & KVM_VM_S390_UCONTROL) {
1092 kvm->arch.gmap = NULL; 1174 kvm->arch.gmap = NULL;
@@ -1103,6 +1185,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
1103 kvm->arch.epoch = 0; 1185 kvm->arch.epoch = 0;
1104 1186
1105 spin_lock_init(&kvm->arch.start_stop_lock); 1187 spin_lock_init(&kvm->arch.start_stop_lock);
1188 KVM_EVENT(3, "vm 0x%p created by pid %u", kvm, current->pid);
1106 1189
1107 return 0; 1190 return 0;
1108out_err: 1191out_err:
@@ -1110,6 +1193,7 @@ out_err:
1110 free_page((unsigned long)kvm->arch.model.fac); 1193 free_page((unsigned long)kvm->arch.model.fac);
1111 debug_unregister(kvm->arch.dbf); 1194 debug_unregister(kvm->arch.dbf);
1112 free_page((unsigned long)(kvm->arch.sca)); 1195 free_page((unsigned long)(kvm->arch.sca));
1196 KVM_EVENT(3, "creation of vm failed: %d", rc);
1113 return rc; 1197 return rc;
1114} 1198}
1115 1199
@@ -1131,7 +1215,7 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
1131 if (kvm_is_ucontrol(vcpu->kvm)) 1215 if (kvm_is_ucontrol(vcpu->kvm))
1132 gmap_free(vcpu->arch.gmap); 1216 gmap_free(vcpu->arch.gmap);
1133 1217
1134 if (kvm_s390_cmma_enabled(vcpu->kvm)) 1218 if (vcpu->kvm->arch.use_cmma)
1135 kvm_s390_vcpu_unsetup_cmma(vcpu); 1219 kvm_s390_vcpu_unsetup_cmma(vcpu);
1136 free_page((unsigned long)(vcpu->arch.sie_block)); 1220 free_page((unsigned long)(vcpu->arch.sie_block));
1137 1221
@@ -1166,6 +1250,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
1166 gmap_free(kvm->arch.gmap); 1250 gmap_free(kvm->arch.gmap);
1167 kvm_s390_destroy_adapters(kvm); 1251 kvm_s390_destroy_adapters(kvm);
1168 kvm_s390_clear_float_irqs(kvm); 1252 kvm_s390_clear_float_irqs(kvm);
1253 KVM_EVENT(3, "vm 0x%p destroyed", kvm);
1169} 1254}
1170 1255
1171/* Section: vcpu related */ 1256/* Section: vcpu related */
@@ -1264,7 +1349,9 @@ static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
1264void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) 1349void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
1265{ 1350{
1266 mutex_lock(&vcpu->kvm->lock); 1351 mutex_lock(&vcpu->kvm->lock);
1352 preempt_disable();
1267 vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch; 1353 vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
1354 preempt_enable();
1268 mutex_unlock(&vcpu->kvm->lock); 1355 mutex_unlock(&vcpu->kvm->lock);
1269 if (!kvm_is_ucontrol(vcpu->kvm)) 1356 if (!kvm_is_ucontrol(vcpu->kvm))
1270 vcpu->arch.gmap = vcpu->kvm->arch.gmap; 1357 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
@@ -1342,7 +1429,7 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
1342 } 1429 }
1343 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE; 1430 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
1344 1431
1345 if (kvm_s390_cmma_enabled(vcpu->kvm)) { 1432 if (vcpu->kvm->arch.use_cmma) {
1346 rc = kvm_s390_vcpu_setup_cmma(vcpu); 1433 rc = kvm_s390_vcpu_setup_cmma(vcpu);
1347 if (rc) 1434 if (rc)
1348 return rc; 1435 return rc;
@@ -1723,18 +1810,6 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
1723 return rc; 1810 return rc;
1724} 1811}
1725 1812
1726bool kvm_s390_cmma_enabled(struct kvm *kvm)
1727{
1728 if (!MACHINE_IS_LPAR)
1729 return false;
1730 /* only enable for z10 and later */
1731 if (!MACHINE_HAS_EDAT1)
1732 return false;
1733 if (!kvm->arch.use_cmma)
1734 return false;
1735 return true;
1736}
1737
1738static bool ibs_enabled(struct kvm_vcpu *vcpu) 1813static bool ibs_enabled(struct kvm_vcpu *vcpu)
1739{ 1814{
1740 return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_IBS; 1815 return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_IBS;
@@ -2340,6 +2415,7 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
2340 case KVM_CAP_S390_CSS_SUPPORT: 2415 case KVM_CAP_S390_CSS_SUPPORT:
2341 if (!vcpu->kvm->arch.css_support) { 2416 if (!vcpu->kvm->arch.css_support) {
2342 vcpu->kvm->arch.css_support = 1; 2417 vcpu->kvm->arch.css_support = 1;
2418 VM_EVENT(vcpu->kvm, 3, "%s", "ENABLE: CSS support");
2343 trace_kvm_s390_enable_css(vcpu->kvm); 2419 trace_kvm_s390_enable_css(vcpu->kvm);
2344 } 2420 }
2345 r = 0; 2421 r = 0;
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
index c5704786e473..c446aabf60d3 100644
--- a/arch/s390/kvm/kvm-s390.h
+++ b/arch/s390/kvm/kvm-s390.h
@@ -27,6 +27,13 @@ typedef int (*intercept_handler_t)(struct kvm_vcpu *vcpu);
27#define TDB_FORMAT1 1 27#define TDB_FORMAT1 1
28#define IS_ITDB_VALID(vcpu) ((*(char *)vcpu->arch.sie_block->itdba == TDB_FORMAT1)) 28#define IS_ITDB_VALID(vcpu) ((*(char *)vcpu->arch.sie_block->itdba == TDB_FORMAT1))
29 29
30extern debug_info_t *kvm_s390_dbf;
31#define KVM_EVENT(d_loglevel, d_string, d_args...)\
32do { \
33 debug_sprintf_event(kvm_s390_dbf, d_loglevel, d_string "\n", \
34 d_args); \
35} while (0)
36
30#define VM_EVENT(d_kvm, d_loglevel, d_string, d_args...)\ 37#define VM_EVENT(d_kvm, d_loglevel, d_string, d_args...)\
31do { \ 38do { \
32 debug_sprintf_event(d_kvm->arch.dbf, d_loglevel, d_string "\n", \ 39 debug_sprintf_event(d_kvm->arch.dbf, d_loglevel, d_string "\n", \
@@ -65,6 +72,8 @@ static inline u32 kvm_s390_get_prefix(struct kvm_vcpu *vcpu)
65 72
66static inline void kvm_s390_set_prefix(struct kvm_vcpu *vcpu, u32 prefix) 73static inline void kvm_s390_set_prefix(struct kvm_vcpu *vcpu, u32 prefix)
67{ 74{
75 VCPU_EVENT(vcpu, 3, "set prefix of cpu %03u to 0x%x", vcpu->vcpu_id,
76 prefix);
68 vcpu->arch.sie_block->prefix = prefix >> GUEST_PREFIX_SHIFT; 77 vcpu->arch.sie_block->prefix = prefix >> GUEST_PREFIX_SHIFT;
69 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); 78 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
70 kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu); 79 kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
@@ -217,8 +226,6 @@ void exit_sie(struct kvm_vcpu *vcpu);
217void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu); 226void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu);
218int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu); 227int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu);
219void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu); 228void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu);
220/* is cmma enabled */
221bool kvm_s390_cmma_enabled(struct kvm *kvm);
222unsigned long kvm_s390_fac_list_mask_size(void); 229unsigned long kvm_s390_fac_list_mask_size(void);
223extern unsigned long kvm_s390_fac_list_mask[]; 230extern unsigned long kvm_s390_fac_list_mask[];
224 231
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
index ad4242245771..4d21dc4d1a84 100644
--- a/arch/s390/kvm/priv.c
+++ b/arch/s390/kvm/priv.c
@@ -53,11 +53,14 @@ static int handle_set_clock(struct kvm_vcpu *vcpu)
53 kvm_s390_set_psw_cc(vcpu, 3); 53 kvm_s390_set_psw_cc(vcpu, 3);
54 return 0; 54 return 0;
55 } 55 }
56 VCPU_EVENT(vcpu, 3, "SCK: setting guest TOD to 0x%llx", val);
56 val = (val - hostclk) & ~0x3fUL; 57 val = (val - hostclk) & ~0x3fUL;
57 58
58 mutex_lock(&vcpu->kvm->lock); 59 mutex_lock(&vcpu->kvm->lock);
60 preempt_disable();
59 kvm_for_each_vcpu(i, cpup, vcpu->kvm) 61 kvm_for_each_vcpu(i, cpup, vcpu->kvm)
60 cpup->arch.sie_block->epoch = val; 62 cpup->arch.sie_block->epoch = val;
63 preempt_enable();
61 mutex_unlock(&vcpu->kvm->lock); 64 mutex_unlock(&vcpu->kvm->lock);
62 65
63 kvm_s390_set_psw_cc(vcpu, 0); 66 kvm_s390_set_psw_cc(vcpu, 0);
@@ -98,8 +101,6 @@ static int handle_set_prefix(struct kvm_vcpu *vcpu)
98 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 101 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
99 102
100 kvm_s390_set_prefix(vcpu, address); 103 kvm_s390_set_prefix(vcpu, address);
101
102 VCPU_EVENT(vcpu, 5, "setting prefix to %x", address);
103 trace_kvm_s390_handle_prefix(vcpu, 1, address); 104 trace_kvm_s390_handle_prefix(vcpu, 1, address);
104 return 0; 105 return 0;
105} 106}
@@ -129,7 +130,7 @@ static int handle_store_prefix(struct kvm_vcpu *vcpu)
129 if (rc) 130 if (rc)
130 return kvm_s390_inject_prog_cond(vcpu, rc); 131 return kvm_s390_inject_prog_cond(vcpu, rc);
131 132
132 VCPU_EVENT(vcpu, 5, "storing prefix to %x", address); 133 VCPU_EVENT(vcpu, 3, "STPX: storing prefix 0x%x into 0x%llx", address, operand2);
133 trace_kvm_s390_handle_prefix(vcpu, 0, address); 134 trace_kvm_s390_handle_prefix(vcpu, 0, address);
134 return 0; 135 return 0;
135} 136}
@@ -155,7 +156,7 @@ static int handle_store_cpu_address(struct kvm_vcpu *vcpu)
155 if (rc) 156 if (rc)
156 return kvm_s390_inject_prog_cond(vcpu, rc); 157 return kvm_s390_inject_prog_cond(vcpu, rc);
157 158
158 VCPU_EVENT(vcpu, 5, "storing cpu address to %llx", ga); 159 VCPU_EVENT(vcpu, 3, "STAP: storing cpu address (%u) to 0x%llx", vcpu_id, ga);
159 trace_kvm_s390_handle_stap(vcpu, ga); 160 trace_kvm_s390_handle_stap(vcpu, ga);
160 return 0; 161 return 0;
161} 162}
@@ -167,6 +168,7 @@ static int __skey_check_enable(struct kvm_vcpu *vcpu)
167 return rc; 168 return rc;
168 169
169 rc = s390_enable_skey(); 170 rc = s390_enable_skey();
171 VCPU_EVENT(vcpu, 3, "%s", "enabling storage keys for guest");
170 trace_kvm_s390_skey_related_inst(vcpu); 172 trace_kvm_s390_skey_related_inst(vcpu);
171 vcpu->arch.sie_block->ictl &= ~(ICTL_ISKE | ICTL_SSKE | ICTL_RRBE); 173 vcpu->arch.sie_block->ictl &= ~(ICTL_ISKE | ICTL_SSKE | ICTL_RRBE);
172 return rc; 174 return rc;
@@ -370,7 +372,7 @@ static int handle_stfl(struct kvm_vcpu *vcpu)
370 &fac, sizeof(fac)); 372 &fac, sizeof(fac));
371 if (rc) 373 if (rc)
372 return rc; 374 return rc;
373 VCPU_EVENT(vcpu, 5, "store facility list value %x", fac); 375 VCPU_EVENT(vcpu, 3, "STFL: store facility list 0x%x", fac);
374 trace_kvm_s390_handle_stfl(vcpu, fac); 376 trace_kvm_s390_handle_stfl(vcpu, fac);
375 return 0; 377 return 0;
376} 378}
@@ -468,7 +470,7 @@ static int handle_stidp(struct kvm_vcpu *vcpu)
468 if (rc) 470 if (rc)
469 return kvm_s390_inject_prog_cond(vcpu, rc); 471 return kvm_s390_inject_prog_cond(vcpu, rc);
470 472
471 VCPU_EVENT(vcpu, 5, "%s", "store cpu id"); 473 VCPU_EVENT(vcpu, 3, "STIDP: store cpu id 0x%llx", stidp_data);
472 return 0; 474 return 0;
473} 475}
474 476
@@ -521,7 +523,7 @@ static int handle_stsi(struct kvm_vcpu *vcpu)
521 ar_t ar; 523 ar_t ar;
522 524
523 vcpu->stat.instruction_stsi++; 525 vcpu->stat.instruction_stsi++;
524 VCPU_EVENT(vcpu, 4, "stsi: fc: %x sel1: %x sel2: %x", fc, sel1, sel2); 526 VCPU_EVENT(vcpu, 3, "STSI: fc: %u sel1: %u sel2: %u", fc, sel1, sel2);
525 527
526 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 528 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
527 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 529 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
@@ -758,10 +760,10 @@ static int handle_essa(struct kvm_vcpu *vcpu)
758 struct gmap *gmap; 760 struct gmap *gmap;
759 int i; 761 int i;
760 762
761 VCPU_EVENT(vcpu, 5, "cmma release %d pages", entries); 763 VCPU_EVENT(vcpu, 4, "ESSA: release %d pages", entries);
762 gmap = vcpu->arch.gmap; 764 gmap = vcpu->arch.gmap;
763 vcpu->stat.instruction_essa++; 765 vcpu->stat.instruction_essa++;
764 if (!kvm_s390_cmma_enabled(vcpu->kvm)) 766 if (!vcpu->kvm->arch.use_cmma)
765 return kvm_s390_inject_program_int(vcpu, PGM_OPERATION); 767 return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
766 768
767 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 769 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
@@ -829,7 +831,7 @@ int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu)
829 if (ga & 3) 831 if (ga & 3)
830 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 832 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
831 833
832 VCPU_EVENT(vcpu, 5, "lctl r1:%x, r3:%x, addr:%llx", reg1, reg3, ga); 834 VCPU_EVENT(vcpu, 4, "LCTL: r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga);
833 trace_kvm_s390_handle_lctl(vcpu, 0, reg1, reg3, ga); 835 trace_kvm_s390_handle_lctl(vcpu, 0, reg1, reg3, ga);
834 836
835 nr_regs = ((reg3 - reg1) & 0xf) + 1; 837 nr_regs = ((reg3 - reg1) & 0xf) + 1;
@@ -868,7 +870,7 @@ int kvm_s390_handle_stctl(struct kvm_vcpu *vcpu)
868 if (ga & 3) 870 if (ga & 3)
869 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 871 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
870 872
871 VCPU_EVENT(vcpu, 5, "stctl r1:%x, r3:%x, addr:%llx", reg1, reg3, ga); 873 VCPU_EVENT(vcpu, 4, "STCTL r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga);
872 trace_kvm_s390_handle_stctl(vcpu, 0, reg1, reg3, ga); 874 trace_kvm_s390_handle_stctl(vcpu, 0, reg1, reg3, ga);
873 875
874 reg = reg1; 876 reg = reg1;
@@ -902,7 +904,7 @@ static int handle_lctlg(struct kvm_vcpu *vcpu)
902 if (ga & 7) 904 if (ga & 7)
903 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 905 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
904 906
905 VCPU_EVENT(vcpu, 5, "lctlg r1:%x, r3:%x, addr:%llx", reg1, reg3, ga); 907 VCPU_EVENT(vcpu, 4, "LCTLG: r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga);
906 trace_kvm_s390_handle_lctl(vcpu, 1, reg1, reg3, ga); 908 trace_kvm_s390_handle_lctl(vcpu, 1, reg1, reg3, ga);
907 909
908 nr_regs = ((reg3 - reg1) & 0xf) + 1; 910 nr_regs = ((reg3 - reg1) & 0xf) + 1;
@@ -940,7 +942,7 @@ static int handle_stctg(struct kvm_vcpu *vcpu)
940 if (ga & 7) 942 if (ga & 7)
941 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 943 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
942 944
943 VCPU_EVENT(vcpu, 5, "stctg r1:%x, r3:%x, addr:%llx", reg1, reg3, ga); 945 VCPU_EVENT(vcpu, 4, "STCTG r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga);
944 trace_kvm_s390_handle_stctl(vcpu, 1, reg1, reg3, ga); 946 trace_kvm_s390_handle_stctl(vcpu, 1, reg1, reg3, ga);
945 947
946 reg = reg1; 948 reg = reg1;
diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c
index 72e58bd2bee7..da690b69f9fe 100644
--- a/arch/s390/kvm/sigp.c
+++ b/arch/s390/kvm/sigp.c
@@ -205,9 +205,6 @@ static int __sigp_set_prefix(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu,
205 *reg &= 0xffffffff00000000UL; 205 *reg &= 0xffffffff00000000UL;
206 *reg |= SIGP_STATUS_INCORRECT_STATE; 206 *reg |= SIGP_STATUS_INCORRECT_STATE;
207 return SIGP_CC_STATUS_STORED; 207 return SIGP_CC_STATUS_STORED;
208 } else if (rc == 0) {
209 VCPU_EVENT(vcpu, 4, "set prefix of cpu %02x to %x",
210 dst_vcpu->vcpu_id, irq.u.prefix.address);
211 } 208 }
212 209
213 return rc; 210 return rc;
@@ -371,7 +368,8 @@ static int handle_sigp_dst(struct kvm_vcpu *vcpu, u8 order_code,
371 return rc; 368 return rc;
372} 369}
373 370
374static int handle_sigp_order_in_user_space(struct kvm_vcpu *vcpu, u8 order_code) 371static int handle_sigp_order_in_user_space(struct kvm_vcpu *vcpu, u8 order_code,
372 u16 cpu_addr)
375{ 373{
376 if (!vcpu->kvm->arch.user_sigp) 374 if (!vcpu->kvm->arch.user_sigp)
377 return 0; 375 return 0;
@@ -414,9 +412,8 @@ static int handle_sigp_order_in_user_space(struct kvm_vcpu *vcpu, u8 order_code)
414 default: 412 default:
415 vcpu->stat.instruction_sigp_unknown++; 413 vcpu->stat.instruction_sigp_unknown++;
416 } 414 }
417 415 VCPU_EVENT(vcpu, 3, "SIGP: order %u for CPU %d handled in userspace",
418 VCPU_EVENT(vcpu, 4, "sigp order %u: completely handled in user space", 416 order_code, cpu_addr);
419 order_code);
420 417
421 return 1; 418 return 1;
422} 419}
@@ -435,7 +432,7 @@ int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu)
435 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 432 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
436 433
437 order_code = kvm_s390_get_base_disp_rs(vcpu, NULL); 434 order_code = kvm_s390_get_base_disp_rs(vcpu, NULL);
438 if (handle_sigp_order_in_user_space(vcpu, order_code)) 435 if (handle_sigp_order_in_user_space(vcpu, order_code, cpu_addr))
439 return -EOPNOTSUPP; 436 return -EOPNOTSUPP;
440 437
441 if (r1 % 2) 438 if (r1 % 2)
diff --git a/arch/s390/kvm/trace-s390.h b/arch/s390/kvm/trace-s390.h
index 3208d33a48cb..cc1d6c68356f 100644
--- a/arch/s390/kvm/trace-s390.h
+++ b/arch/s390/kvm/trace-s390.h
@@ -105,11 +105,22 @@ TRACE_EVENT(kvm_s390_vcpu_start_stop,
105 {KVM_S390_PROGRAM_INT, "program interrupt"}, \ 105 {KVM_S390_PROGRAM_INT, "program interrupt"}, \
106 {KVM_S390_SIGP_SET_PREFIX, "sigp set prefix"}, \ 106 {KVM_S390_SIGP_SET_PREFIX, "sigp set prefix"}, \
107 {KVM_S390_RESTART, "sigp restart"}, \ 107 {KVM_S390_RESTART, "sigp restart"}, \
108 {KVM_S390_INT_PFAULT_INIT, "pfault init"}, \
109 {KVM_S390_INT_PFAULT_DONE, "pfault done"}, \
110 {KVM_S390_MCHK, "machine check"}, \
111 {KVM_S390_INT_CLOCK_COMP, "clock comparator"}, \
112 {KVM_S390_INT_CPU_TIMER, "cpu timer"}, \
108 {KVM_S390_INT_VIRTIO, "virtio interrupt"}, \ 113 {KVM_S390_INT_VIRTIO, "virtio interrupt"}, \
109 {KVM_S390_INT_SERVICE, "sclp interrupt"}, \ 114 {KVM_S390_INT_SERVICE, "sclp interrupt"}, \
110 {KVM_S390_INT_EMERGENCY, "sigp emergency"}, \ 115 {KVM_S390_INT_EMERGENCY, "sigp emergency"}, \
111 {KVM_S390_INT_EXTERNAL_CALL, "sigp ext call"} 116 {KVM_S390_INT_EXTERNAL_CALL, "sigp ext call"}
112 117
118#define get_irq_name(__type) \
119 (__type > KVM_S390_INT_IO_MAX ? \
120 __print_symbolic(__type, kvm_s390_int_type) : \
121 (__type & KVM_S390_INT_IO_AI_MASK ? \
122 "adapter I/O interrupt" : "subchannel I/O interrupt"))
123
113TRACE_EVENT(kvm_s390_inject_vm, 124TRACE_EVENT(kvm_s390_inject_vm,
114 TP_PROTO(__u64 type, __u32 parm, __u64 parm64, int who), 125 TP_PROTO(__u64 type, __u32 parm, __u64 parm64, int who),
115 TP_ARGS(type, parm, parm64, who), 126 TP_ARGS(type, parm, parm64, who),
@@ -131,22 +142,19 @@ TRACE_EVENT(kvm_s390_inject_vm,
131 TP_printk("inject%s: type:%x (%s) parm:%x parm64:%llx", 142 TP_printk("inject%s: type:%x (%s) parm:%x parm64:%llx",
132 (__entry->who == 1) ? " (from kernel)" : 143 (__entry->who == 1) ? " (from kernel)" :
133 (__entry->who == 2) ? " (from user)" : "", 144 (__entry->who == 2) ? " (from user)" : "",
134 __entry->inttype, 145 __entry->inttype, get_irq_name(__entry->inttype),
135 __print_symbolic(__entry->inttype, kvm_s390_int_type),
136 __entry->parm, __entry->parm64) 146 __entry->parm, __entry->parm64)
137 ); 147 );
138 148
139TRACE_EVENT(kvm_s390_inject_vcpu, 149TRACE_EVENT(kvm_s390_inject_vcpu,
140 TP_PROTO(unsigned int id, __u64 type, __u32 parm, __u64 parm64, \ 150 TP_PROTO(unsigned int id, __u64 type, __u32 parm, __u64 parm64),
141 int who), 151 TP_ARGS(id, type, parm, parm64),
142 TP_ARGS(id, type, parm, parm64, who),
143 152
144 TP_STRUCT__entry( 153 TP_STRUCT__entry(
145 __field(int, id) 154 __field(int, id)
146 __field(__u32, inttype) 155 __field(__u32, inttype)
147 __field(__u32, parm) 156 __field(__u32, parm)
148 __field(__u64, parm64) 157 __field(__u64, parm64)
149 __field(int, who)
150 ), 158 ),
151 159
152 TP_fast_assign( 160 TP_fast_assign(
@@ -154,15 +162,12 @@ TRACE_EVENT(kvm_s390_inject_vcpu,
154 __entry->inttype = type & 0x00000000ffffffff; 162 __entry->inttype = type & 0x00000000ffffffff;
155 __entry->parm = parm; 163 __entry->parm = parm;
156 __entry->parm64 = parm64; 164 __entry->parm64 = parm64;
157 __entry->who = who;
158 ), 165 ),
159 166
160 TP_printk("inject%s (vcpu %d): type:%x (%s) parm:%x parm64:%llx", 167 TP_printk("inject (vcpu %d): type:%x (%s) parm:%x parm64:%llx",
161 (__entry->who == 1) ? " (from kernel)" :
162 (__entry->who == 2) ? " (from user)" : "",
163 __entry->id, __entry->inttype, 168 __entry->id, __entry->inttype,
164 __print_symbolic(__entry->inttype, kvm_s390_int_type), 169 get_irq_name(__entry->inttype), __entry->parm,
165 __entry->parm, __entry->parm64) 170 __entry->parm64)
166 ); 171 );
167 172
168/* 173/*
@@ -189,8 +194,8 @@ TRACE_EVENT(kvm_s390_deliver_interrupt,
189 TP_printk("deliver interrupt (vcpu %d): type:%x (%s) " \ 194 TP_printk("deliver interrupt (vcpu %d): type:%x (%s) " \
190 "data:%08llx %016llx", 195 "data:%08llx %016llx",
191 __entry->id, __entry->inttype, 196 __entry->id, __entry->inttype,
192 __print_symbolic(__entry->inttype, kvm_s390_int_type), 197 get_irq_name(__entry->inttype), __entry->data0,
193 __entry->data0, __entry->data1) 198 __entry->data1)
194 ); 199 );
195 200
196/* 201/*