aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaolo Bonzini <pbonzini@redhat.com>2015-07-29 06:53:58 -0400
committerPaolo Bonzini <pbonzini@redhat.com>2015-07-29 06:53:58 -0400
commit554726d33c20791653dbd1c047c83f93459bc586 (patch)
tree422a4fa2cd8bed59bed081722d86e6af37db18a2
parent5492830370171b6a4ede8a3bfba687a8d0f25fa5 (diff)
parentc92ea7b9f7d256cabf7ee08a7627a5227e356dec (diff)
Merge tag 'kvm-s390-next-20150728' of git://git.kernel.org/pub/scm/linux/kernel/git/kvms390/linux into kvm-next
KVM: s390: Fixes and features for kvm/next (4.3) 1. Rework logging infrastructure (s390dbf) to integrate feedback learned when debugging performance and test issues 2. Some cleanups and simplifications for CMMA handling 3. Fix gdb debugging and single stepping on some instructions 4. Error handling for storage key setup
-rw-r--r--Documentation/s390/00-INDEX2
-rw-r--r--Documentation/s390/kvm.txt125
-rw-r--r--arch/s390/include/asm/kvm_host.h4
-rw-r--r--arch/s390/kvm/diag.c13
-rw-r--r--arch/s390/kvm/guestdbg.c35
-rw-r--r--arch/s390/kvm/interrupt.c88
-rw-r--r--arch/s390/kvm/kvm-s390.c73
-rw-r--r--arch/s390/kvm/kvm-s390.h11
-rw-r--r--arch/s390/kvm/priv.c26
-rw-r--r--arch/s390/kvm/sigp.c13
-rw-r--r--arch/s390/kvm/trace-s390.h33
-rw-r--r--include/uapi/linux/kvm.h1
12 files changed, 194 insertions, 230 deletions
diff --git a/Documentation/s390/00-INDEX b/Documentation/s390/00-INDEX
index 10c874ebdfe5..9189535f6cd2 100644
--- a/Documentation/s390/00-INDEX
+++ b/Documentation/s390/00-INDEX
@@ -16,8 +16,6 @@ Debugging390.txt
16 - hints for debugging on s390 systems. 16 - hints for debugging on s390 systems.
17driver-model.txt 17driver-model.txt
18 - information on s390 devices and the driver model. 18 - information on s390 devices and the driver model.
19kvm.txt
20 - ioctl calls to /dev/kvm on s390.
21monreader.txt 19monreader.txt
22 - information on accessing the z/VM monitor stream from Linux. 20 - information on accessing the z/VM monitor stream from Linux.
23qeth.txt 21qeth.txt
diff --git a/Documentation/s390/kvm.txt b/Documentation/s390/kvm.txt
deleted file mode 100644
index 85f3280d7ef6..000000000000
--- a/Documentation/s390/kvm.txt
+++ /dev/null
@@ -1,125 +0,0 @@
1*** BIG FAT WARNING ***
2The kvm module is currently in EXPERIMENTAL state for s390. This means that
3the interface to the module is not yet considered to remain stable. Thus, be
4prepared that we keep breaking your userspace application and guest
5compatibility over and over again until we feel happy with the result. Make sure
6your guest kernel, your host kernel, and your userspace launcher are in a
7consistent state.
8
9This Documentation describes the unique ioctl calls to /dev/kvm, the resulting
10kvm-vm file descriptors, and the kvm-vcpu file descriptors that differ from x86.
11
121. ioctl calls to /dev/kvm
13KVM does support the following ioctls on s390 that are common with other
14architectures and do behave the same:
15KVM_GET_API_VERSION
16KVM_CREATE_VM (*) see note
17KVM_CHECK_EXTENSION
18KVM_GET_VCPU_MMAP_SIZE
19
20Notes:
21* KVM_CREATE_VM may fail on s390, if the calling process has multiple
22threads and has not called KVM_S390_ENABLE_SIE before.
23
24In addition, on s390 the following architecture specific ioctls are supported:
25ioctl: KVM_S390_ENABLE_SIE
26args: none
27see also: include/linux/kvm.h
28This call causes the kernel to switch on PGSTE in the user page table. This
29operation is needed in order to run a virtual machine, and it requires the
30calling process to be single-threaded. Note that the first call to KVM_CREATE_VM
31will implicitly try to switch on PGSTE if the user process has not called
32KVM_S390_ENABLE_SIE before. User processes that want to launch multiple threads
33before creating a virtual machine have to call KVM_S390_ENABLE_SIE, or will
34observe an error calling KVM_CREATE_VM. Switching on PGSTE is a one-time
35operation, is not reversible, and will persist over the entire lifetime of
36the calling process. It does not have any user-visible effect other than a small
37performance penalty.
38
392. ioctl calls to the kvm-vm file descriptor
40KVM does support the following ioctls on s390 that are common with other
41architectures and do behave the same:
42KVM_CREATE_VCPU
43KVM_SET_USER_MEMORY_REGION (*) see note
44KVM_GET_DIRTY_LOG (**) see note
45
46Notes:
47* kvm does only allow exactly one memory slot on s390, which has to start
48 at guest absolute address zero and at a user address that is aligned on any
49 page boundary. This hardware "limitation" allows us to have a few unique
50 optimizations. The memory slot doesn't have to be filled
51 with memory actually, it may contain sparse holes. That said, with different
52 user memory layout this does still allow a large flexibility when
53 doing the guest memory setup.
54** KVM_GET_DIRTY_LOG doesn't work properly yet. The user will receive an empty
55log. This ioctl call is only needed for guest migration, and we intend to
56implement this one in the future.
57
58In addition, on s390 the following architecture specific ioctls for the kvm-vm
59file descriptor are supported:
60ioctl: KVM_S390_INTERRUPT
61args: struct kvm_s390_interrupt *
62see also: include/linux/kvm.h
63This ioctl is used to submit a floating interrupt for a virtual machine.
64Floating interrupts may be delivered to any virtual cpu in the configuration.
65Only some interrupt types defined in include/linux/kvm.h make sense when
66submitted as floating interrupts. The following interrupts are not considered
67to be useful as floating interrupts, and a call to inject them will result in
68-EINVAL error code: program interrupts and interprocessor signals. Valid
69floating interrupts are:
70KVM_S390_INT_VIRTIO
71KVM_S390_INT_SERVICE
72
733. ioctl calls to the kvm-vcpu file descriptor
74KVM does support the following ioctls on s390 that are common with other
75architectures and do behave the same:
76KVM_RUN
77KVM_GET_REGS
78KVM_SET_REGS
79KVM_GET_SREGS
80KVM_SET_SREGS
81KVM_GET_FPU
82KVM_SET_FPU
83
84In addition, on s390 the following architecture specific ioctls for the
85kvm-vcpu file descriptor are supported:
86ioctl: KVM_S390_INTERRUPT
87args: struct kvm_s390_interrupt *
88see also: include/linux/kvm.h
89This ioctl is used to submit an interrupt for a specific virtual cpu.
90Only some interrupt types defined in include/linux/kvm.h make sense when
91submitted for a specific cpu. The following interrupts are not considered
92to be useful, and a call to inject them will result in -EINVAL error code:
93service processor calls and virtio interrupts. Valid interrupt types are:
94KVM_S390_PROGRAM_INT
95KVM_S390_SIGP_STOP
96KVM_S390_RESTART
97KVM_S390_SIGP_SET_PREFIX
98KVM_S390_INT_EMERGENCY
99
100ioctl: KVM_S390_STORE_STATUS
101args: unsigned long
102see also: include/linux/kvm.h
103This ioctl stores the state of the cpu at the guest real address given as
104argument, unless one of the following values defined in include/linux/kvm.h
105is given as argument:
106KVM_S390_STORE_STATUS_NOADDR - the CPU stores its status to the save area in
107absolute lowcore as defined by the principles of operation
108KVM_S390_STORE_STATUS_PREFIXED - the CPU stores its status to the save area in
109its prefix page just like the dump tool that comes with zipl. This is useful
110to create a system dump for use with lkcdutils or crash.
111
112ioctl: KVM_S390_SET_INITIAL_PSW
113args: struct kvm_s390_psw *
114see also: include/linux/kvm.h
115This ioctl can be used to set the processor status word (psw) of a stopped cpu
116prior to running it with KVM_RUN. Note that this call is not required to modify
117the psw during sie intercepts that fall back to userspace because struct kvm_run
118does contain the psw, and this value is evaluated during reentry of KVM_RUN
119after the intercept exit was recognized.
120
121ioctl: KVM_S390_INITIAL_RESET
122args: none
123see also: include/linux/kvm.h
124This ioctl can be used to perform an initial cpu reset as defined by the
125principles of operation. The target cpu has to be in stopped state.
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index 3024acbe1f9d..df4db81254d3 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -258,6 +258,9 @@ struct kvm_vcpu_stat {
258 u32 diagnose_10; 258 u32 diagnose_10;
259 u32 diagnose_44; 259 u32 diagnose_44;
260 u32 diagnose_9c; 260 u32 diagnose_9c;
261 u32 diagnose_258;
262 u32 diagnose_308;
263 u32 diagnose_500;
261}; 264};
262 265
263#define PGM_OPERATION 0x01 266#define PGM_OPERATION 0x01
@@ -630,7 +633,6 @@ extern char sie_exit;
630 633
631static inline void kvm_arch_hardware_disable(void) {} 634static inline void kvm_arch_hardware_disable(void) {}
632static inline void kvm_arch_check_processor_compat(void *rtn) {} 635static inline void kvm_arch_check_processor_compat(void *rtn) {}
633static inline void kvm_arch_exit(void) {}
634static inline void kvm_arch_sync_events(struct kvm *kvm) {} 636static inline void kvm_arch_sync_events(struct kvm *kvm) {}
635static inline void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) {} 637static inline void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) {}
636static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {} 638static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
diff --git a/arch/s390/kvm/diag.c b/arch/s390/kvm/diag.c
index fc7ec95848c3..5fbfb88f8477 100644
--- a/arch/s390/kvm/diag.c
+++ b/arch/s390/kvm/diag.c
@@ -27,13 +27,13 @@ static int diag_release_pages(struct kvm_vcpu *vcpu)
27 27
28 start = vcpu->run->s.regs.gprs[(vcpu->arch.sie_block->ipa & 0xf0) >> 4]; 28 start = vcpu->run->s.regs.gprs[(vcpu->arch.sie_block->ipa & 0xf0) >> 4];
29 end = vcpu->run->s.regs.gprs[vcpu->arch.sie_block->ipa & 0xf] + 4096; 29 end = vcpu->run->s.regs.gprs[vcpu->arch.sie_block->ipa & 0xf] + 4096;
30 vcpu->stat.diagnose_10++;
30 31
31 if (start & ~PAGE_MASK || end & ~PAGE_MASK || start >= end 32 if (start & ~PAGE_MASK || end & ~PAGE_MASK || start >= end
32 || start < 2 * PAGE_SIZE) 33 || start < 2 * PAGE_SIZE)
33 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 34 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
34 35
35 VCPU_EVENT(vcpu, 5, "diag release pages %lX %lX", start, end); 36 VCPU_EVENT(vcpu, 5, "diag release pages %lX %lX", start, end);
36 vcpu->stat.diagnose_10++;
37 37
38 /* 38 /*
39 * We checked for start >= end above, so lets check for the 39 * We checked for start >= end above, so lets check for the
@@ -75,6 +75,9 @@ static int __diag_page_ref_service(struct kvm_vcpu *vcpu)
75 u16 rx = (vcpu->arch.sie_block->ipa & 0xf0) >> 4; 75 u16 rx = (vcpu->arch.sie_block->ipa & 0xf0) >> 4;
76 u16 ry = (vcpu->arch.sie_block->ipa & 0x0f); 76 u16 ry = (vcpu->arch.sie_block->ipa & 0x0f);
77 77
78 VCPU_EVENT(vcpu, 3, "diag page reference parameter block at 0x%llx",
79 vcpu->run->s.regs.gprs[rx]);
80 vcpu->stat.diagnose_258++;
78 if (vcpu->run->s.regs.gprs[rx] & 7) 81 if (vcpu->run->s.regs.gprs[rx] & 7)
79 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 82 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
80 rc = read_guest(vcpu, vcpu->run->s.regs.gprs[rx], rx, &parm, sizeof(parm)); 83 rc = read_guest(vcpu, vcpu->run->s.regs.gprs[rx], rx, &parm, sizeof(parm));
@@ -85,6 +88,9 @@ static int __diag_page_ref_service(struct kvm_vcpu *vcpu)
85 88
86 switch (parm.subcode) { 89 switch (parm.subcode) {
87 case 0: /* TOKEN */ 90 case 0: /* TOKEN */
91 VCPU_EVENT(vcpu, 3, "pageref token addr 0x%llx "
92 "select mask 0x%llx compare mask 0x%llx",
93 parm.token_addr, parm.select_mask, parm.compare_mask);
88 if (vcpu->arch.pfault_token != KVM_S390_PFAULT_TOKEN_INVALID) { 94 if (vcpu->arch.pfault_token != KVM_S390_PFAULT_TOKEN_INVALID) {
89 /* 95 /*
90 * If the pagefault handshake is already activated, 96 * If the pagefault handshake is already activated,
@@ -114,6 +120,7 @@ static int __diag_page_ref_service(struct kvm_vcpu *vcpu)
114 * the cancel, therefore to reduce code complexity, we assume 120 * the cancel, therefore to reduce code complexity, we assume
115 * all outstanding tokens are already pending. 121 * all outstanding tokens are already pending.
116 */ 122 */
123 VCPU_EVENT(vcpu, 3, "pageref cancel addr 0x%llx", parm.token_addr);
117 if (parm.token_addr || parm.select_mask || 124 if (parm.token_addr || parm.select_mask ||
118 parm.compare_mask || parm.zarch) 125 parm.compare_mask || parm.zarch)
119 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 126 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
@@ -174,7 +181,8 @@ static int __diag_ipl_functions(struct kvm_vcpu *vcpu)
174 unsigned int reg = vcpu->arch.sie_block->ipa & 0xf; 181 unsigned int reg = vcpu->arch.sie_block->ipa & 0xf;
175 unsigned long subcode = vcpu->run->s.regs.gprs[reg] & 0xffff; 182 unsigned long subcode = vcpu->run->s.regs.gprs[reg] & 0xffff;
176 183
177 VCPU_EVENT(vcpu, 5, "diag ipl functions, subcode %lx", subcode); 184 VCPU_EVENT(vcpu, 3, "diag ipl functions, subcode %lx", subcode);
185 vcpu->stat.diagnose_308++;
178 switch (subcode) { 186 switch (subcode) {
179 case 3: 187 case 3:
180 vcpu->run->s390_reset_flags = KVM_S390_RESET_CLEAR; 188 vcpu->run->s390_reset_flags = KVM_S390_RESET_CLEAR;
@@ -202,6 +210,7 @@ static int __diag_virtio_hypercall(struct kvm_vcpu *vcpu)
202{ 210{
203 int ret; 211 int ret;
204 212
213 vcpu->stat.diagnose_500++;
205 /* No virtio-ccw notification? Get out quickly. */ 214 /* No virtio-ccw notification? Get out quickly. */
206 if (!vcpu->kvm->arch.css_support || 215 if (!vcpu->kvm->arch.css_support ||
207 (vcpu->run->s.regs.gprs[1] != KVM_S390_VIRTIO_CCW_NOTIFY)) 216 (vcpu->run->s.regs.gprs[1] != KVM_S390_VIRTIO_CCW_NOTIFY))
diff --git a/arch/s390/kvm/guestdbg.c b/arch/s390/kvm/guestdbg.c
index e97b3455d7e6..47518a324d75 100644
--- a/arch/s390/kvm/guestdbg.c
+++ b/arch/s390/kvm/guestdbg.c
@@ -473,10 +473,45 @@ static void filter_guest_per_event(struct kvm_vcpu *vcpu)
473 vcpu->arch.sie_block->iprcc &= ~PGM_PER; 473 vcpu->arch.sie_block->iprcc &= ~PGM_PER;
474} 474}
475 475
476#define pssec(vcpu) (vcpu->arch.sie_block->gcr[1] & _ASCE_SPACE_SWITCH)
477#define hssec(vcpu) (vcpu->arch.sie_block->gcr[13] & _ASCE_SPACE_SWITCH)
478#define old_ssec(vcpu) ((vcpu->arch.sie_block->tecmc >> 31) & 0x1)
479#define old_as_is_home(vcpu) !(vcpu->arch.sie_block->tecmc & 0xffff)
480
476void kvm_s390_handle_per_event(struct kvm_vcpu *vcpu) 481void kvm_s390_handle_per_event(struct kvm_vcpu *vcpu)
477{ 482{
483 int new_as;
484
478 if (debug_exit_required(vcpu)) 485 if (debug_exit_required(vcpu))
479 vcpu->guest_debug |= KVM_GUESTDBG_EXIT_PENDING; 486 vcpu->guest_debug |= KVM_GUESTDBG_EXIT_PENDING;
480 487
481 filter_guest_per_event(vcpu); 488 filter_guest_per_event(vcpu);
489
490 /*
491 * Only RP, SAC, SACF, PT, PTI, PR, PC instructions can trigger
492 * a space-switch event. PER events enforce space-switch events
493 * for these instructions. So if no PER event for the guest is left,
494 * we might have to filter the space-switch element out, too.
495 */
496 if (vcpu->arch.sie_block->iprcc == PGM_SPACE_SWITCH) {
497 vcpu->arch.sie_block->iprcc = 0;
498 new_as = psw_bits(vcpu->arch.sie_block->gpsw).as;
499
500 /*
501 * If the AS changed from / to home, we had RP, SAC or SACF
502 * instruction. Check primary and home space-switch-event
503 * controls. (theoretically home -> home produced no event)
504 */
505 if (((new_as == PSW_AS_HOME) ^ old_as_is_home(vcpu)) &&
506 (pssec(vcpu) || hssec(vcpu)))
507 vcpu->arch.sie_block->iprcc = PGM_SPACE_SWITCH;
508
509 /*
510 * PT, PTI, PR, PC instruction operate on primary AS only. Check
511 * if the primary-space-switch-event control was or got set.
512 */
513 if (new_as == PSW_AS_PRIMARY && !old_as_is_home(vcpu) &&
514 (pssec(vcpu) || old_ssec(vcpu)))
515 vcpu->arch.sie_block->iprcc = PGM_SPACE_SWITCH;
516 }
482} 517}
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index c98d89708e99..a5781404b83f 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -30,7 +30,6 @@
30#define IOINT_SCHID_MASK 0x0000ffff 30#define IOINT_SCHID_MASK 0x0000ffff
31#define IOINT_SSID_MASK 0x00030000 31#define IOINT_SSID_MASK 0x00030000
32#define IOINT_CSSID_MASK 0x03fc0000 32#define IOINT_CSSID_MASK 0x03fc0000
33#define IOINT_AI_MASK 0x04000000
34#define PFAULT_INIT 0x0600 33#define PFAULT_INIT 0x0600
35#define PFAULT_DONE 0x0680 34#define PFAULT_DONE 0x0680
36#define VIRTIO_PARAM 0x0d00 35#define VIRTIO_PARAM 0x0d00
@@ -311,8 +310,8 @@ static int __must_check __deliver_pfault_init(struct kvm_vcpu *vcpu)
311 li->irq.ext.ext_params2 = 0; 310 li->irq.ext.ext_params2 = 0;
312 spin_unlock(&li->lock); 311 spin_unlock(&li->lock);
313 312
314 VCPU_EVENT(vcpu, 4, "interrupt: pfault init parm:%x,parm64:%llx", 313 VCPU_EVENT(vcpu, 4, "deliver: pfault init token 0x%llx",
315 0, ext.ext_params2); 314 ext.ext_params2);
316 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, 315 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
317 KVM_S390_INT_PFAULT_INIT, 316 KVM_S390_INT_PFAULT_INIT,
318 0, ext.ext_params2); 317 0, ext.ext_params2);
@@ -368,7 +367,7 @@ static int __must_check __deliver_machine_check(struct kvm_vcpu *vcpu)
368 spin_unlock(&fi->lock); 367 spin_unlock(&fi->lock);
369 368
370 if (deliver) { 369 if (deliver) {
371 VCPU_EVENT(vcpu, 4, "interrupt: machine check mcic=%llx", 370 VCPU_EVENT(vcpu, 3, "deliver: machine check mcic 0x%llx",
372 mchk.mcic); 371 mchk.mcic);
373 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, 372 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
374 KVM_S390_MCHK, 373 KVM_S390_MCHK,
@@ -403,7 +402,7 @@ static int __must_check __deliver_restart(struct kvm_vcpu *vcpu)
403 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 402 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
404 int rc; 403 int rc;
405 404
406 VCPU_EVENT(vcpu, 4, "%s", "interrupt: cpu restart"); 405 VCPU_EVENT(vcpu, 3, "%s", "deliver: cpu restart");
407 vcpu->stat.deliver_restart_signal++; 406 vcpu->stat.deliver_restart_signal++;
408 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_RESTART, 0, 0); 407 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_RESTART, 0, 0);
409 408
@@ -427,7 +426,6 @@ static int __must_check __deliver_set_prefix(struct kvm_vcpu *vcpu)
427 clear_bit(IRQ_PEND_SET_PREFIX, &li->pending_irqs); 426 clear_bit(IRQ_PEND_SET_PREFIX, &li->pending_irqs);
428 spin_unlock(&li->lock); 427 spin_unlock(&li->lock);
429 428
430 VCPU_EVENT(vcpu, 4, "interrupt: set prefix to %x", prefix.address);
431 vcpu->stat.deliver_prefix_signal++; 429 vcpu->stat.deliver_prefix_signal++;
432 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, 430 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
433 KVM_S390_SIGP_SET_PREFIX, 431 KVM_S390_SIGP_SET_PREFIX,
@@ -450,7 +448,7 @@ static int __must_check __deliver_emergency_signal(struct kvm_vcpu *vcpu)
450 clear_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs); 448 clear_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs);
451 spin_unlock(&li->lock); 449 spin_unlock(&li->lock);
452 450
453 VCPU_EVENT(vcpu, 4, "%s", "interrupt: sigp emerg"); 451 VCPU_EVENT(vcpu, 4, "%s", "deliver: sigp emerg");
454 vcpu->stat.deliver_emergency_signal++; 452 vcpu->stat.deliver_emergency_signal++;
455 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_EMERGENCY, 453 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_EMERGENCY,
456 cpu_addr, 0); 454 cpu_addr, 0);
@@ -477,7 +475,7 @@ static int __must_check __deliver_external_call(struct kvm_vcpu *vcpu)
477 clear_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs); 475 clear_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs);
478 spin_unlock(&li->lock); 476 spin_unlock(&li->lock);
479 477
480 VCPU_EVENT(vcpu, 4, "%s", "interrupt: sigp ext call"); 478 VCPU_EVENT(vcpu, 4, "%s", "deliver: sigp ext call");
481 vcpu->stat.deliver_external_call++; 479 vcpu->stat.deliver_external_call++;
482 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, 480 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
483 KVM_S390_INT_EXTERNAL_CALL, 481 KVM_S390_INT_EXTERNAL_CALL,
@@ -506,7 +504,7 @@ static int __must_check __deliver_prog(struct kvm_vcpu *vcpu)
506 memset(&li->irq.pgm, 0, sizeof(pgm_info)); 504 memset(&li->irq.pgm, 0, sizeof(pgm_info));
507 spin_unlock(&li->lock); 505 spin_unlock(&li->lock);
508 506
509 VCPU_EVENT(vcpu, 4, "interrupt: pgm check code:%x, ilc:%x", 507 VCPU_EVENT(vcpu, 3, "deliver: program irq code 0x%x, ilc:%d",
510 pgm_info.code, ilc); 508 pgm_info.code, ilc);
511 vcpu->stat.deliver_program_int++; 509 vcpu->stat.deliver_program_int++;
512 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_PROGRAM_INT, 510 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_PROGRAM_INT,
@@ -622,7 +620,7 @@ static int __must_check __deliver_service(struct kvm_vcpu *vcpu)
622 clear_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs); 620 clear_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs);
623 spin_unlock(&fi->lock); 621 spin_unlock(&fi->lock);
624 622
625 VCPU_EVENT(vcpu, 4, "interrupt: sclp parm:%x", 623 VCPU_EVENT(vcpu, 4, "deliver: sclp parameter 0x%x",
626 ext.ext_params); 624 ext.ext_params);
627 vcpu->stat.deliver_service_signal++; 625 vcpu->stat.deliver_service_signal++;
628 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_SERVICE, 626 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_SERVICE,
@@ -651,9 +649,6 @@ static int __must_check __deliver_pfault_done(struct kvm_vcpu *vcpu)
651 struct kvm_s390_interrupt_info, 649 struct kvm_s390_interrupt_info,
652 list); 650 list);
653 if (inti) { 651 if (inti) {
654 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
655 KVM_S390_INT_PFAULT_DONE, 0,
656 inti->ext.ext_params2);
657 list_del(&inti->list); 652 list_del(&inti->list);
658 fi->counters[FIRQ_CNTR_PFAULT] -= 1; 653 fi->counters[FIRQ_CNTR_PFAULT] -= 1;
659 } 654 }
@@ -662,6 +657,12 @@ static int __must_check __deliver_pfault_done(struct kvm_vcpu *vcpu)
662 spin_unlock(&fi->lock); 657 spin_unlock(&fi->lock);
663 658
664 if (inti) { 659 if (inti) {
660 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
661 KVM_S390_INT_PFAULT_DONE, 0,
662 inti->ext.ext_params2);
663 VCPU_EVENT(vcpu, 4, "deliver: pfault done token 0x%llx",
664 inti->ext.ext_params2);
665
665 rc = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE, 666 rc = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE,
666 (u16 *)__LC_EXT_INT_CODE); 667 (u16 *)__LC_EXT_INT_CODE);
667 rc |= put_guest_lc(vcpu, PFAULT_DONE, 668 rc |= put_guest_lc(vcpu, PFAULT_DONE,
@@ -691,7 +692,7 @@ static int __must_check __deliver_virtio(struct kvm_vcpu *vcpu)
691 list); 692 list);
692 if (inti) { 693 if (inti) {
693 VCPU_EVENT(vcpu, 4, 694 VCPU_EVENT(vcpu, 4,
694 "interrupt: virtio parm:%x,parm64:%llx", 695 "deliver: virtio parm: 0x%x,parm64: 0x%llx",
695 inti->ext.ext_params, inti->ext.ext_params2); 696 inti->ext.ext_params, inti->ext.ext_params2);
696 vcpu->stat.deliver_virtio_interrupt++; 697 vcpu->stat.deliver_virtio_interrupt++;
697 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, 698 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
@@ -741,7 +742,7 @@ static int __must_check __deliver_io(struct kvm_vcpu *vcpu,
741 struct kvm_s390_interrupt_info, 742 struct kvm_s390_interrupt_info,
742 list); 743 list);
743 if (inti) { 744 if (inti) {
744 VCPU_EVENT(vcpu, 4, "interrupt: I/O %llx", inti->type); 745 VCPU_EVENT(vcpu, 4, "deliver: I/O 0x%llx", inti->type);
745 vcpu->stat.deliver_io_int++; 746 vcpu->stat.deliver_io_int++;
746 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, 747 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
747 inti->type, 748 inti->type,
@@ -864,7 +865,7 @@ int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
864 865
865 __set_cpu_idle(vcpu); 866 __set_cpu_idle(vcpu);
866 hrtimer_start(&vcpu->arch.ckc_timer, ktime_set (0, sltime) , HRTIMER_MODE_REL); 867 hrtimer_start(&vcpu->arch.ckc_timer, ktime_set (0, sltime) , HRTIMER_MODE_REL);
867 VCPU_EVENT(vcpu, 5, "enabled wait via clock comparator: %llx ns", sltime); 868 VCPU_EVENT(vcpu, 4, "enabled wait via clock comparator: %llu ns", sltime);
868no_timer: 869no_timer:
869 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); 870 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
870 kvm_vcpu_block(vcpu); 871 kvm_vcpu_block(vcpu);
@@ -968,6 +969,10 @@ static int __inject_prog(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
968{ 969{
969 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 970 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
970 971
972 VCPU_EVENT(vcpu, 3, "inject: program irq code 0x%x", irq->u.pgm.code);
973 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_PROGRAM_INT,
974 irq->u.pgm.code, 0);
975
971 li->irq.pgm = irq->u.pgm; 976 li->irq.pgm = irq->u.pgm;
972 set_bit(IRQ_PEND_PROG, &li->pending_irqs); 977 set_bit(IRQ_PEND_PROG, &li->pending_irqs);
973 return 0; 978 return 0;
@@ -978,9 +983,6 @@ int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code)
978 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 983 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
979 struct kvm_s390_irq irq; 984 struct kvm_s390_irq irq;
980 985
981 VCPU_EVENT(vcpu, 3, "inject: program check %d (from kernel)", code);
982 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_PROGRAM_INT, code,
983 0, 1);
984 spin_lock(&li->lock); 986 spin_lock(&li->lock);
985 irq.u.pgm.code = code; 987 irq.u.pgm.code = code;
986 __inject_prog(vcpu, &irq); 988 __inject_prog(vcpu, &irq);
@@ -996,10 +998,6 @@ int kvm_s390_inject_prog_irq(struct kvm_vcpu *vcpu,
996 struct kvm_s390_irq irq; 998 struct kvm_s390_irq irq;
997 int rc; 999 int rc;
998 1000
999 VCPU_EVENT(vcpu, 3, "inject: prog irq %d (from kernel)",
1000 pgm_info->code);
1001 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_PROGRAM_INT,
1002 pgm_info->code, 0, 1);
1003 spin_lock(&li->lock); 1001 spin_lock(&li->lock);
1004 irq.u.pgm = *pgm_info; 1002 irq.u.pgm = *pgm_info;
1005 rc = __inject_prog(vcpu, &irq); 1003 rc = __inject_prog(vcpu, &irq);
@@ -1012,11 +1010,11 @@ static int __inject_pfault_init(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1012{ 1010{
1013 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 1011 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1014 1012
1015 VCPU_EVENT(vcpu, 3, "inject: external irq params:%x, params2:%llx", 1013 VCPU_EVENT(vcpu, 4, "inject: pfault init parameter block at 0x%llx",
1016 irq->u.ext.ext_params, irq->u.ext.ext_params2); 1014 irq->u.ext.ext_params2);
1017 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_PFAULT_INIT, 1015 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_PFAULT_INIT,
1018 irq->u.ext.ext_params, 1016 irq->u.ext.ext_params,
1019 irq->u.ext.ext_params2, 2); 1017 irq->u.ext.ext_params2);
1020 1018
1021 li->irq.ext = irq->u.ext; 1019 li->irq.ext = irq->u.ext;
1022 set_bit(IRQ_PEND_PFAULT_INIT, &li->pending_irqs); 1020 set_bit(IRQ_PEND_PFAULT_INIT, &li->pending_irqs);
@@ -1045,10 +1043,10 @@ static int __inject_extcall(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1045 struct kvm_s390_extcall_info *extcall = &li->irq.extcall; 1043 struct kvm_s390_extcall_info *extcall = &li->irq.extcall;
1046 uint16_t src_id = irq->u.extcall.code; 1044 uint16_t src_id = irq->u.extcall.code;
1047 1045
1048 VCPU_EVENT(vcpu, 3, "inject: external call source-cpu:%u", 1046 VCPU_EVENT(vcpu, 4, "inject: external call source-cpu:%u",
1049 src_id); 1047 src_id);
1050 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_EXTERNAL_CALL, 1048 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_EXTERNAL_CALL,
1051 src_id, 0, 2); 1049 src_id, 0);
1052 1050
1053 /* sending vcpu invalid */ 1051 /* sending vcpu invalid */
1054 if (src_id >= KVM_MAX_VCPUS || 1052 if (src_id >= KVM_MAX_VCPUS ||
@@ -1070,10 +1068,10 @@ static int __inject_set_prefix(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1070 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 1068 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1071 struct kvm_s390_prefix_info *prefix = &li->irq.prefix; 1069 struct kvm_s390_prefix_info *prefix = &li->irq.prefix;
1072 1070
1073 VCPU_EVENT(vcpu, 3, "inject: set prefix to %x (from user)", 1071 VCPU_EVENT(vcpu, 3, "inject: set prefix to %x",
1074 irq->u.prefix.address); 1072 irq->u.prefix.address);
1075 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_SIGP_SET_PREFIX, 1073 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_SIGP_SET_PREFIX,
1076 irq->u.prefix.address, 0, 2); 1074 irq->u.prefix.address, 0);
1077 1075
1078 if (!is_vcpu_stopped(vcpu)) 1076 if (!is_vcpu_stopped(vcpu))
1079 return -EBUSY; 1077 return -EBUSY;
@@ -1090,7 +1088,7 @@ static int __inject_sigp_stop(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1090 struct kvm_s390_stop_info *stop = &li->irq.stop; 1088 struct kvm_s390_stop_info *stop = &li->irq.stop;
1091 int rc = 0; 1089 int rc = 0;
1092 1090
1093 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_SIGP_STOP, 0, 0, 2); 1091 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_SIGP_STOP, 0, 0);
1094 1092
1095 if (irq->u.stop.flags & ~KVM_S390_STOP_SUPP_FLAGS) 1093 if (irq->u.stop.flags & ~KVM_S390_STOP_SUPP_FLAGS)
1096 return -EINVAL; 1094 return -EINVAL;
@@ -1114,8 +1112,8 @@ static int __inject_sigp_restart(struct kvm_vcpu *vcpu,
1114{ 1112{
1115 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 1113 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1116 1114
1117 VCPU_EVENT(vcpu, 3, "inject: restart type %llx", irq->type); 1115 VCPU_EVENT(vcpu, 3, "%s", "inject: restart int");
1118 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_RESTART, 0, 0, 2); 1116 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_RESTART, 0, 0);
1119 1117
1120 set_bit(IRQ_PEND_RESTART, &li->pending_irqs); 1118 set_bit(IRQ_PEND_RESTART, &li->pending_irqs);
1121 return 0; 1119 return 0;
@@ -1126,10 +1124,10 @@ static int __inject_sigp_emergency(struct kvm_vcpu *vcpu,
1126{ 1124{
1127 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 1125 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1128 1126
1129 VCPU_EVENT(vcpu, 3, "inject: emergency %u\n", 1127 VCPU_EVENT(vcpu, 4, "inject: emergency from cpu %u",
1130 irq->u.emerg.code); 1128 irq->u.emerg.code);
1131 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_EMERGENCY, 1129 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_EMERGENCY,
1132 irq->u.emerg.code, 0, 2); 1130 irq->u.emerg.code, 0);
1133 1131
1134 set_bit(irq->u.emerg.code, li->sigp_emerg_pending); 1132 set_bit(irq->u.emerg.code, li->sigp_emerg_pending);
1135 set_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs); 1133 set_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs);
@@ -1142,10 +1140,10 @@ static int __inject_mchk(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1142 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 1140 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1143 struct kvm_s390_mchk_info *mchk = &li->irq.mchk; 1141 struct kvm_s390_mchk_info *mchk = &li->irq.mchk;
1144 1142
1145 VCPU_EVENT(vcpu, 5, "inject: machine check parm64:%llx", 1143 VCPU_EVENT(vcpu, 3, "inject: machine check mcic 0x%llx",
1146 irq->u.mchk.mcic); 1144 irq->u.mchk.mcic);
1147 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_MCHK, 0, 1145 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_MCHK, 0,
1148 irq->u.mchk.mcic, 2); 1146 irq->u.mchk.mcic);
1149 1147
1150 /* 1148 /*
1151 * Because repressible machine checks can be indicated along with 1149 * Because repressible machine checks can be indicated along with
@@ -1172,9 +1170,9 @@ static int __inject_ckc(struct kvm_vcpu *vcpu)
1172{ 1170{
1173 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 1171 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1174 1172
1175 VCPU_EVENT(vcpu, 3, "inject: type %x", KVM_S390_INT_CLOCK_COMP); 1173 VCPU_EVENT(vcpu, 3, "%s", "inject: clock comparator external");
1176 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_CLOCK_COMP, 1174 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_CLOCK_COMP,
1177 0, 0, 2); 1175 0, 0);
1178 1176
1179 set_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs); 1177 set_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
1180 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); 1178 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
@@ -1185,9 +1183,9 @@ static int __inject_cpu_timer(struct kvm_vcpu *vcpu)
1185{ 1183{
1186 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 1184 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1187 1185
1188 VCPU_EVENT(vcpu, 3, "inject: type %x", KVM_S390_INT_CPU_TIMER); 1186 VCPU_EVENT(vcpu, 3, "%s", "inject: cpu timer external");
1189 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_CPU_TIMER, 1187 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_CPU_TIMER,
1190 0, 0, 2); 1188 0, 0);
1191 1189
1192 set_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs); 1190 set_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs);
1193 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); 1191 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
@@ -1435,20 +1433,20 @@ int kvm_s390_inject_vm(struct kvm *kvm,
1435 inti->ext.ext_params2 = s390int->parm64; 1433 inti->ext.ext_params2 = s390int->parm64;
1436 break; 1434 break;
1437 case KVM_S390_INT_SERVICE: 1435 case KVM_S390_INT_SERVICE:
1438 VM_EVENT(kvm, 5, "inject: sclp parm:%x", s390int->parm); 1436 VM_EVENT(kvm, 4, "inject: sclp parm:%x", s390int->parm);
1439 inti->ext.ext_params = s390int->parm; 1437 inti->ext.ext_params = s390int->parm;
1440 break; 1438 break;
1441 case KVM_S390_INT_PFAULT_DONE: 1439 case KVM_S390_INT_PFAULT_DONE:
1442 inti->ext.ext_params2 = s390int->parm64; 1440 inti->ext.ext_params2 = s390int->parm64;
1443 break; 1441 break;
1444 case KVM_S390_MCHK: 1442 case KVM_S390_MCHK:
1445 VM_EVENT(kvm, 5, "inject: machine check parm64:%llx", 1443 VM_EVENT(kvm, 3, "inject: machine check mcic 0x%llx",
1446 s390int->parm64); 1444 s390int->parm64);
1447 inti->mchk.cr14 = s390int->parm; /* upper bits are not used */ 1445 inti->mchk.cr14 = s390int->parm; /* upper bits are not used */
1448 inti->mchk.mcic = s390int->parm64; 1446 inti->mchk.mcic = s390int->parm64;
1449 break; 1447 break;
1450 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: 1448 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
1451 if (inti->type & IOINT_AI_MASK) 1449 if (inti->type & KVM_S390_INT_IO_AI_MASK)
1452 VM_EVENT(kvm, 5, "%s", "inject: I/O (AI)"); 1450 VM_EVENT(kvm, 5, "%s", "inject: I/O (AI)");
1453 else 1451 else
1454 VM_EVENT(kvm, 5, "inject: I/O css %x ss %x schid %04x", 1452 VM_EVENT(kvm, 5, "inject: I/O css %x ss %x schid %04x",
@@ -1535,8 +1533,6 @@ static int do_inject_vcpu(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1535 1533
1536 switch (irq->type) { 1534 switch (irq->type) {
1537 case KVM_S390_PROGRAM_INT: 1535 case KVM_S390_PROGRAM_INT:
1538 VCPU_EVENT(vcpu, 3, "inject: program check %d (from user)",
1539 irq->u.pgm.code);
1540 rc = __inject_prog(vcpu, irq); 1536 rc = __inject_prog(vcpu, irq);
1541 break; 1537 break;
1542 case KVM_S390_SIGP_SET_PREFIX: 1538 case KVM_S390_SIGP_SET_PREFIX:
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 2078f92d15ac..924b1ae86caf 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -108,6 +108,9 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
108 { "diagnose_10", VCPU_STAT(diagnose_10) }, 108 { "diagnose_10", VCPU_STAT(diagnose_10) },
109 { "diagnose_44", VCPU_STAT(diagnose_44) }, 109 { "diagnose_44", VCPU_STAT(diagnose_44) },
110 { "diagnose_9c", VCPU_STAT(diagnose_9c) }, 110 { "diagnose_9c", VCPU_STAT(diagnose_9c) },
111 { "diagnose_258", VCPU_STAT(diagnose_258) },
112 { "diagnose_308", VCPU_STAT(diagnose_308) },
113 { "diagnose_500", VCPU_STAT(diagnose_500) },
111 { NULL } 114 { NULL }
112}; 115};
113 116
@@ -124,6 +127,7 @@ unsigned long kvm_s390_fac_list_mask_size(void)
124} 127}
125 128
126static struct gmap_notifier gmap_notifier; 129static struct gmap_notifier gmap_notifier;
130debug_info_t *kvm_s390_dbf;
127 131
128/* Section: not file related */ 132/* Section: not file related */
129int kvm_arch_hardware_enable(void) 133int kvm_arch_hardware_enable(void)
@@ -148,10 +152,24 @@ void kvm_arch_hardware_unsetup(void)
148 152
149int kvm_arch_init(void *opaque) 153int kvm_arch_init(void *opaque)
150{ 154{
155 kvm_s390_dbf = debug_register("kvm-trace", 32, 1, 7 * sizeof(long));
156 if (!kvm_s390_dbf)
157 return -ENOMEM;
158
159 if (debug_register_view(kvm_s390_dbf, &debug_sprintf_view)) {
160 debug_unregister(kvm_s390_dbf);
161 return -ENOMEM;
162 }
163
151 /* Register floating interrupt controller interface. */ 164 /* Register floating interrupt controller interface. */
152 return kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC); 165 return kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
153} 166}
154 167
168void kvm_arch_exit(void)
169{
170 debug_unregister(kvm_s390_dbf);
171}
172
155/* Section: device related */ 173/* Section: device related */
156long kvm_arch_dev_ioctl(struct file *filp, 174long kvm_arch_dev_ioctl(struct file *filp,
157 unsigned int ioctl, unsigned long arg) 175 unsigned int ioctl, unsigned long arg)
@@ -281,10 +299,12 @@ static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
281 299
282 switch (cap->cap) { 300 switch (cap->cap) {
283 case KVM_CAP_S390_IRQCHIP: 301 case KVM_CAP_S390_IRQCHIP:
302 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_IRQCHIP");
284 kvm->arch.use_irqchip = 1; 303 kvm->arch.use_irqchip = 1;
285 r = 0; 304 r = 0;
286 break; 305 break;
287 case KVM_CAP_S390_USER_SIGP: 306 case KVM_CAP_S390_USER_SIGP:
307 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_SIGP");
288 kvm->arch.user_sigp = 1; 308 kvm->arch.user_sigp = 1;
289 r = 0; 309 r = 0;
290 break; 310 break;
@@ -295,8 +315,11 @@ static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
295 r = 0; 315 r = 0;
296 } else 316 } else
297 r = -EINVAL; 317 r = -EINVAL;
318 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_VECTOR_REGISTERS %s",
319 r ? "(not available)" : "(success)");
298 break; 320 break;
299 case KVM_CAP_S390_USER_STSI: 321 case KVM_CAP_S390_USER_STSI:
322 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_STSI");
300 kvm->arch.user_stsi = 1; 323 kvm->arch.user_stsi = 1;
301 r = 0; 324 r = 0;
302 break; 325 break;
@@ -314,6 +337,8 @@ static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *att
314 switch (attr->attr) { 337 switch (attr->attr) {
315 case KVM_S390_VM_MEM_LIMIT_SIZE: 338 case KVM_S390_VM_MEM_LIMIT_SIZE:
316 ret = 0; 339 ret = 0;
340 VM_EVENT(kvm, 3, "QUERY: max guest memory: %lu bytes",
341 kvm->arch.gmap->asce_end);
317 if (put_user(kvm->arch.gmap->asce_end, (u64 __user *)attr->addr)) 342 if (put_user(kvm->arch.gmap->asce_end, (u64 __user *)attr->addr))
318 ret = -EFAULT; 343 ret = -EFAULT;
319 break; 344 break;
@@ -330,7 +355,13 @@ static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *att
330 unsigned int idx; 355 unsigned int idx;
331 switch (attr->attr) { 356 switch (attr->attr) {
332 case KVM_S390_VM_MEM_ENABLE_CMMA: 357 case KVM_S390_VM_MEM_ENABLE_CMMA:
358 /* enable CMMA only for z10 and later (EDAT_1) */
359 ret = -EINVAL;
360 if (!MACHINE_IS_LPAR || !MACHINE_HAS_EDAT1)
361 break;
362
333 ret = -EBUSY; 363 ret = -EBUSY;
364 VM_EVENT(kvm, 3, "%s", "ENABLE: CMMA support");
334 mutex_lock(&kvm->lock); 365 mutex_lock(&kvm->lock);
335 if (atomic_read(&kvm->online_vcpus) == 0) { 366 if (atomic_read(&kvm->online_vcpus) == 0) {
336 kvm->arch.use_cmma = 1; 367 kvm->arch.use_cmma = 1;
@@ -339,6 +370,11 @@ static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *att
339 mutex_unlock(&kvm->lock); 370 mutex_unlock(&kvm->lock);
340 break; 371 break;
341 case KVM_S390_VM_MEM_CLR_CMMA: 372 case KVM_S390_VM_MEM_CLR_CMMA:
373 ret = -EINVAL;
374 if (!kvm->arch.use_cmma)
375 break;
376
377 VM_EVENT(kvm, 3, "%s", "RESET: CMMA states");
342 mutex_lock(&kvm->lock); 378 mutex_lock(&kvm->lock);
343 idx = srcu_read_lock(&kvm->srcu); 379 idx = srcu_read_lock(&kvm->srcu);
344 s390_reset_cmma(kvm->arch.gmap->mm); 380 s390_reset_cmma(kvm->arch.gmap->mm);
@@ -374,6 +410,7 @@ static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *att
374 } 410 }
375 } 411 }
376 mutex_unlock(&kvm->lock); 412 mutex_unlock(&kvm->lock);
413 VM_EVENT(kvm, 3, "SET: max guest memory: %lu bytes", new_limit);
377 break; 414 break;
378 } 415 }
379 default: 416 default:
@@ -400,22 +437,26 @@ static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
400 kvm->arch.crypto.crycb->aes_wrapping_key_mask, 437 kvm->arch.crypto.crycb->aes_wrapping_key_mask,
401 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask)); 438 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
402 kvm->arch.crypto.aes_kw = 1; 439 kvm->arch.crypto.aes_kw = 1;
440 VM_EVENT(kvm, 3, "%s", "ENABLE: AES keywrapping support");
403 break; 441 break;
404 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW: 442 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
405 get_random_bytes( 443 get_random_bytes(
406 kvm->arch.crypto.crycb->dea_wrapping_key_mask, 444 kvm->arch.crypto.crycb->dea_wrapping_key_mask,
407 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask)); 445 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
408 kvm->arch.crypto.dea_kw = 1; 446 kvm->arch.crypto.dea_kw = 1;
447 VM_EVENT(kvm, 3, "%s", "ENABLE: DEA keywrapping support");
409 break; 448 break;
410 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW: 449 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
411 kvm->arch.crypto.aes_kw = 0; 450 kvm->arch.crypto.aes_kw = 0;
412 memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0, 451 memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0,
413 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask)); 452 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
453 VM_EVENT(kvm, 3, "%s", "DISABLE: AES keywrapping support");
414 break; 454 break;
415 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW: 455 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
416 kvm->arch.crypto.dea_kw = 0; 456 kvm->arch.crypto.dea_kw = 0;
417 memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0, 457 memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0,
418 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask)); 458 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
459 VM_EVENT(kvm, 3, "%s", "DISABLE: DEA keywrapping support");
419 break; 460 break;
420 default: 461 default:
421 mutex_unlock(&kvm->lock); 462 mutex_unlock(&kvm->lock);
@@ -440,6 +481,7 @@ static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
440 481
441 if (gtod_high != 0) 482 if (gtod_high != 0)
442 return -EINVAL; 483 return -EINVAL;
484 VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x\n", gtod_high);
443 485
444 return 0; 486 return 0;
445} 487}
@@ -465,6 +507,7 @@ static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
465 cur_vcpu->arch.sie_block->epoch = kvm->arch.epoch; 507 cur_vcpu->arch.sie_block->epoch = kvm->arch.epoch;
466 kvm_s390_vcpu_unblock_all(kvm); 508 kvm_s390_vcpu_unblock_all(kvm);
467 mutex_unlock(&kvm->lock); 509 mutex_unlock(&kvm->lock);
510 VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx\n", gtod);
468 return 0; 511 return 0;
469} 512}
470 513
@@ -496,6 +539,7 @@ static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
496 if (copy_to_user((void __user *)attr->addr, &gtod_high, 539 if (copy_to_user((void __user *)attr->addr, &gtod_high,
497 sizeof(gtod_high))) 540 sizeof(gtod_high)))
498 return -EFAULT; 541 return -EFAULT;
542 VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x\n", gtod_high);
499 543
500 return 0; 544 return 0;
501} 545}
@@ -512,6 +556,7 @@ static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
512 gtod = host_tod + kvm->arch.epoch; 556 gtod = host_tod + kvm->arch.epoch;
513 if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod))) 557 if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
514 return -EFAULT; 558 return -EFAULT;
559 VM_EVENT(kvm, 3, "QUERY: TOD base: 0x%llx\n", gtod);
515 560
516 return 0; 561 return 0;
517} 562}
@@ -821,7 +866,9 @@ static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
821 } 866 }
822 867
823 /* Enable storage key handling for the guest */ 868 /* Enable storage key handling for the guest */
824 s390_enable_skey(); 869 r = s390_enable_skey();
870 if (r)
871 goto out;
825 872
826 for (i = 0; i < args->count; i++) { 873 for (i = 0; i < args->count; i++) {
827 hva = gfn_to_hva(kvm, args->start_gfn + i); 874 hva = gfn_to_hva(kvm, args->start_gfn + i);
@@ -1043,7 +1090,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
1043 1090
1044 sprintf(debug_name, "kvm-%u", current->pid); 1091 sprintf(debug_name, "kvm-%u", current->pid);
1045 1092
1046 kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long)); 1093 kvm->arch.dbf = debug_register(debug_name, 32, 1, 7 * sizeof(long));
1047 if (!kvm->arch.dbf) 1094 if (!kvm->arch.dbf)
1048 goto out_err; 1095 goto out_err;
1049 1096
@@ -1086,7 +1133,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
1086 mutex_init(&kvm->arch.ipte_mutex); 1133 mutex_init(&kvm->arch.ipte_mutex);
1087 1134
1088 debug_register_view(kvm->arch.dbf, &debug_sprintf_view); 1135 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
1089 VM_EVENT(kvm, 3, "%s", "vm created"); 1136 VM_EVENT(kvm, 3, "vm created with type %lu", type);
1090 1137
1091 if (type & KVM_VM_S390_UCONTROL) { 1138 if (type & KVM_VM_S390_UCONTROL) {
1092 kvm->arch.gmap = NULL; 1139 kvm->arch.gmap = NULL;
@@ -1103,6 +1150,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
1103 kvm->arch.epoch = 0; 1150 kvm->arch.epoch = 0;
1104 1151
1105 spin_lock_init(&kvm->arch.start_stop_lock); 1152 spin_lock_init(&kvm->arch.start_stop_lock);
1153 KVM_EVENT(3, "vm 0x%p created by pid %u", kvm, current->pid);
1106 1154
1107 return 0; 1155 return 0;
1108out_err: 1156out_err:
@@ -1110,6 +1158,7 @@ out_err:
1110 free_page((unsigned long)kvm->arch.model.fac); 1158 free_page((unsigned long)kvm->arch.model.fac);
1111 debug_unregister(kvm->arch.dbf); 1159 debug_unregister(kvm->arch.dbf);
1112 free_page((unsigned long)(kvm->arch.sca)); 1160 free_page((unsigned long)(kvm->arch.sca));
1161 KVM_EVENT(3, "creation of vm failed: %d", rc);
1113 return rc; 1162 return rc;
1114} 1163}
1115 1164
@@ -1131,7 +1180,7 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
1131 if (kvm_is_ucontrol(vcpu->kvm)) 1180 if (kvm_is_ucontrol(vcpu->kvm))
1132 gmap_free(vcpu->arch.gmap); 1181 gmap_free(vcpu->arch.gmap);
1133 1182
1134 if (kvm_s390_cmma_enabled(vcpu->kvm)) 1183 if (vcpu->kvm->arch.use_cmma)
1135 kvm_s390_vcpu_unsetup_cmma(vcpu); 1184 kvm_s390_vcpu_unsetup_cmma(vcpu);
1136 free_page((unsigned long)(vcpu->arch.sie_block)); 1185 free_page((unsigned long)(vcpu->arch.sie_block));
1137 1186
@@ -1166,6 +1215,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
1166 gmap_free(kvm->arch.gmap); 1215 gmap_free(kvm->arch.gmap);
1167 kvm_s390_destroy_adapters(kvm); 1216 kvm_s390_destroy_adapters(kvm);
1168 kvm_s390_clear_float_irqs(kvm); 1217 kvm_s390_clear_float_irqs(kvm);
1218 KVM_EVENT(3, "vm 0x%p destroyed", kvm);
1169} 1219}
1170 1220
1171/* Section: vcpu related */ 1221/* Section: vcpu related */
@@ -1342,7 +1392,7 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
1342 } 1392 }
1343 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE; 1393 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
1344 1394
1345 if (kvm_s390_cmma_enabled(vcpu->kvm)) { 1395 if (vcpu->kvm->arch.use_cmma) {
1346 rc = kvm_s390_vcpu_setup_cmma(vcpu); 1396 rc = kvm_s390_vcpu_setup_cmma(vcpu);
1347 if (rc) 1397 if (rc)
1348 return rc; 1398 return rc;
@@ -1723,18 +1773,6 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
1723 return rc; 1773 return rc;
1724} 1774}
1725 1775
1726bool kvm_s390_cmma_enabled(struct kvm *kvm)
1727{
1728 if (!MACHINE_IS_LPAR)
1729 return false;
1730 /* only enable for z10 and later */
1731 if (!MACHINE_HAS_EDAT1)
1732 return false;
1733 if (!kvm->arch.use_cmma)
1734 return false;
1735 return true;
1736}
1737
1738static bool ibs_enabled(struct kvm_vcpu *vcpu) 1776static bool ibs_enabled(struct kvm_vcpu *vcpu)
1739{ 1777{
1740 return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_IBS; 1778 return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_IBS;
@@ -2340,6 +2378,7 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
2340 case KVM_CAP_S390_CSS_SUPPORT: 2378 case KVM_CAP_S390_CSS_SUPPORT:
2341 if (!vcpu->kvm->arch.css_support) { 2379 if (!vcpu->kvm->arch.css_support) {
2342 vcpu->kvm->arch.css_support = 1; 2380 vcpu->kvm->arch.css_support = 1;
2381 VM_EVENT(vcpu->kvm, 3, "%s", "ENABLE: CSS support");
2343 trace_kvm_s390_enable_css(vcpu->kvm); 2382 trace_kvm_s390_enable_css(vcpu->kvm);
2344 } 2383 }
2345 r = 0; 2384 r = 0;
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
index c5704786e473..c446aabf60d3 100644
--- a/arch/s390/kvm/kvm-s390.h
+++ b/arch/s390/kvm/kvm-s390.h
@@ -27,6 +27,13 @@ typedef int (*intercept_handler_t)(struct kvm_vcpu *vcpu);
27#define TDB_FORMAT1 1 27#define TDB_FORMAT1 1
28#define IS_ITDB_VALID(vcpu) ((*(char *)vcpu->arch.sie_block->itdba == TDB_FORMAT1)) 28#define IS_ITDB_VALID(vcpu) ((*(char *)vcpu->arch.sie_block->itdba == TDB_FORMAT1))
29 29
30extern debug_info_t *kvm_s390_dbf;
31#define KVM_EVENT(d_loglevel, d_string, d_args...)\
32do { \
33 debug_sprintf_event(kvm_s390_dbf, d_loglevel, d_string "\n", \
34 d_args); \
35} while (0)
36
30#define VM_EVENT(d_kvm, d_loglevel, d_string, d_args...)\ 37#define VM_EVENT(d_kvm, d_loglevel, d_string, d_args...)\
31do { \ 38do { \
32 debug_sprintf_event(d_kvm->arch.dbf, d_loglevel, d_string "\n", \ 39 debug_sprintf_event(d_kvm->arch.dbf, d_loglevel, d_string "\n", \
@@ -65,6 +72,8 @@ static inline u32 kvm_s390_get_prefix(struct kvm_vcpu *vcpu)
65 72
66static inline void kvm_s390_set_prefix(struct kvm_vcpu *vcpu, u32 prefix) 73static inline void kvm_s390_set_prefix(struct kvm_vcpu *vcpu, u32 prefix)
67{ 74{
75 VCPU_EVENT(vcpu, 3, "set prefix of cpu %03u to 0x%x", vcpu->vcpu_id,
76 prefix);
68 vcpu->arch.sie_block->prefix = prefix >> GUEST_PREFIX_SHIFT; 77 vcpu->arch.sie_block->prefix = prefix >> GUEST_PREFIX_SHIFT;
69 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); 78 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
70 kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu); 79 kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
@@ -217,8 +226,6 @@ void exit_sie(struct kvm_vcpu *vcpu);
217void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu); 226void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu);
218int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu); 227int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu);
219void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu); 228void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu);
220/* is cmma enabled */
221bool kvm_s390_cmma_enabled(struct kvm *kvm);
222unsigned long kvm_s390_fac_list_mask_size(void); 229unsigned long kvm_s390_fac_list_mask_size(void);
223extern unsigned long kvm_s390_fac_list_mask[]; 230extern unsigned long kvm_s390_fac_list_mask[];
224 231
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
index ad4242245771..afefa3bb2f13 100644
--- a/arch/s390/kvm/priv.c
+++ b/arch/s390/kvm/priv.c
@@ -53,6 +53,7 @@ static int handle_set_clock(struct kvm_vcpu *vcpu)
53 kvm_s390_set_psw_cc(vcpu, 3); 53 kvm_s390_set_psw_cc(vcpu, 3);
54 return 0; 54 return 0;
55 } 55 }
56 VCPU_EVENT(vcpu, 3, "SCK: setting guest TOD to 0x%llx", val);
56 val = (val - hostclk) & ~0x3fUL; 57 val = (val - hostclk) & ~0x3fUL;
57 58
58 mutex_lock(&vcpu->kvm->lock); 59 mutex_lock(&vcpu->kvm->lock);
@@ -98,8 +99,6 @@ static int handle_set_prefix(struct kvm_vcpu *vcpu)
98 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 99 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
99 100
100 kvm_s390_set_prefix(vcpu, address); 101 kvm_s390_set_prefix(vcpu, address);
101
102 VCPU_EVENT(vcpu, 5, "setting prefix to %x", address);
103 trace_kvm_s390_handle_prefix(vcpu, 1, address); 102 trace_kvm_s390_handle_prefix(vcpu, 1, address);
104 return 0; 103 return 0;
105} 104}
@@ -129,7 +128,7 @@ static int handle_store_prefix(struct kvm_vcpu *vcpu)
129 if (rc) 128 if (rc)
130 return kvm_s390_inject_prog_cond(vcpu, rc); 129 return kvm_s390_inject_prog_cond(vcpu, rc);
131 130
132 VCPU_EVENT(vcpu, 5, "storing prefix to %x", address); 131 VCPU_EVENT(vcpu, 3, "STPX: storing prefix 0x%x into 0x%llx", address, operand2);
133 trace_kvm_s390_handle_prefix(vcpu, 0, address); 132 trace_kvm_s390_handle_prefix(vcpu, 0, address);
134 return 0; 133 return 0;
135} 134}
@@ -155,7 +154,7 @@ static int handle_store_cpu_address(struct kvm_vcpu *vcpu)
155 if (rc) 154 if (rc)
156 return kvm_s390_inject_prog_cond(vcpu, rc); 155 return kvm_s390_inject_prog_cond(vcpu, rc);
157 156
158 VCPU_EVENT(vcpu, 5, "storing cpu address to %llx", ga); 157 VCPU_EVENT(vcpu, 3, "STAP: storing cpu address (%u) to 0x%llx", vcpu_id, ga);
159 trace_kvm_s390_handle_stap(vcpu, ga); 158 trace_kvm_s390_handle_stap(vcpu, ga);
160 return 0; 159 return 0;
161} 160}
@@ -167,6 +166,7 @@ static int __skey_check_enable(struct kvm_vcpu *vcpu)
167 return rc; 166 return rc;
168 167
169 rc = s390_enable_skey(); 168 rc = s390_enable_skey();
169 VCPU_EVENT(vcpu, 3, "%s", "enabling storage keys for guest");
170 trace_kvm_s390_skey_related_inst(vcpu); 170 trace_kvm_s390_skey_related_inst(vcpu);
171 vcpu->arch.sie_block->ictl &= ~(ICTL_ISKE | ICTL_SSKE | ICTL_RRBE); 171 vcpu->arch.sie_block->ictl &= ~(ICTL_ISKE | ICTL_SSKE | ICTL_RRBE);
172 return rc; 172 return rc;
@@ -370,7 +370,7 @@ static int handle_stfl(struct kvm_vcpu *vcpu)
370 &fac, sizeof(fac)); 370 &fac, sizeof(fac));
371 if (rc) 371 if (rc)
372 return rc; 372 return rc;
373 VCPU_EVENT(vcpu, 5, "store facility list value %x", fac); 373 VCPU_EVENT(vcpu, 3, "STFL: store facility list 0x%x", fac);
374 trace_kvm_s390_handle_stfl(vcpu, fac); 374 trace_kvm_s390_handle_stfl(vcpu, fac);
375 return 0; 375 return 0;
376} 376}
@@ -468,7 +468,7 @@ static int handle_stidp(struct kvm_vcpu *vcpu)
468 if (rc) 468 if (rc)
469 return kvm_s390_inject_prog_cond(vcpu, rc); 469 return kvm_s390_inject_prog_cond(vcpu, rc);
470 470
471 VCPU_EVENT(vcpu, 5, "%s", "store cpu id"); 471 VCPU_EVENT(vcpu, 3, "STIDP: store cpu id 0x%llx", stidp_data);
472 return 0; 472 return 0;
473} 473}
474 474
@@ -521,7 +521,7 @@ static int handle_stsi(struct kvm_vcpu *vcpu)
521 ar_t ar; 521 ar_t ar;
522 522
523 vcpu->stat.instruction_stsi++; 523 vcpu->stat.instruction_stsi++;
524 VCPU_EVENT(vcpu, 4, "stsi: fc: %x sel1: %x sel2: %x", fc, sel1, sel2); 524 VCPU_EVENT(vcpu, 3, "STSI: fc: %u sel1: %u sel2: %u", fc, sel1, sel2);
525 525
526 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 526 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
527 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 527 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
@@ -758,10 +758,10 @@ static int handle_essa(struct kvm_vcpu *vcpu)
758 struct gmap *gmap; 758 struct gmap *gmap;
759 int i; 759 int i;
760 760
761 VCPU_EVENT(vcpu, 5, "cmma release %d pages", entries); 761 VCPU_EVENT(vcpu, 4, "ESSA: release %d pages", entries);
762 gmap = vcpu->arch.gmap; 762 gmap = vcpu->arch.gmap;
763 vcpu->stat.instruction_essa++; 763 vcpu->stat.instruction_essa++;
764 if (!kvm_s390_cmma_enabled(vcpu->kvm)) 764 if (!vcpu->kvm->arch.use_cmma)
765 return kvm_s390_inject_program_int(vcpu, PGM_OPERATION); 765 return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
766 766
767 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 767 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
@@ -829,7 +829,7 @@ int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu)
829 if (ga & 3) 829 if (ga & 3)
830 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 830 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
831 831
832 VCPU_EVENT(vcpu, 5, "lctl r1:%x, r3:%x, addr:%llx", reg1, reg3, ga); 832 VCPU_EVENT(vcpu, 4, "LCTL: r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga);
833 trace_kvm_s390_handle_lctl(vcpu, 0, reg1, reg3, ga); 833 trace_kvm_s390_handle_lctl(vcpu, 0, reg1, reg3, ga);
834 834
835 nr_regs = ((reg3 - reg1) & 0xf) + 1; 835 nr_regs = ((reg3 - reg1) & 0xf) + 1;
@@ -868,7 +868,7 @@ int kvm_s390_handle_stctl(struct kvm_vcpu *vcpu)
868 if (ga & 3) 868 if (ga & 3)
869 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 869 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
870 870
871 VCPU_EVENT(vcpu, 5, "stctl r1:%x, r3:%x, addr:%llx", reg1, reg3, ga); 871 VCPU_EVENT(vcpu, 4, "STCTL r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga);
872 trace_kvm_s390_handle_stctl(vcpu, 0, reg1, reg3, ga); 872 trace_kvm_s390_handle_stctl(vcpu, 0, reg1, reg3, ga);
873 873
874 reg = reg1; 874 reg = reg1;
@@ -902,7 +902,7 @@ static int handle_lctlg(struct kvm_vcpu *vcpu)
902 if (ga & 7) 902 if (ga & 7)
903 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 903 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
904 904
905 VCPU_EVENT(vcpu, 5, "lctlg r1:%x, r3:%x, addr:%llx", reg1, reg3, ga); 905 VCPU_EVENT(vcpu, 4, "LCTLG: r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga);
906 trace_kvm_s390_handle_lctl(vcpu, 1, reg1, reg3, ga); 906 trace_kvm_s390_handle_lctl(vcpu, 1, reg1, reg3, ga);
907 907
908 nr_regs = ((reg3 - reg1) & 0xf) + 1; 908 nr_regs = ((reg3 - reg1) & 0xf) + 1;
@@ -940,7 +940,7 @@ static int handle_stctg(struct kvm_vcpu *vcpu)
940 if (ga & 7) 940 if (ga & 7)
941 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 941 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
942 942
943 VCPU_EVENT(vcpu, 5, "stctg r1:%x, r3:%x, addr:%llx", reg1, reg3, ga); 943 VCPU_EVENT(vcpu, 4, "STCTG r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga);
944 trace_kvm_s390_handle_stctl(vcpu, 1, reg1, reg3, ga); 944 trace_kvm_s390_handle_stctl(vcpu, 1, reg1, reg3, ga);
945 945
946 reg = reg1; 946 reg = reg1;
diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c
index 72e58bd2bee7..da690b69f9fe 100644
--- a/arch/s390/kvm/sigp.c
+++ b/arch/s390/kvm/sigp.c
@@ -205,9 +205,6 @@ static int __sigp_set_prefix(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu,
205 *reg &= 0xffffffff00000000UL; 205 *reg &= 0xffffffff00000000UL;
206 *reg |= SIGP_STATUS_INCORRECT_STATE; 206 *reg |= SIGP_STATUS_INCORRECT_STATE;
207 return SIGP_CC_STATUS_STORED; 207 return SIGP_CC_STATUS_STORED;
208 } else if (rc == 0) {
209 VCPU_EVENT(vcpu, 4, "set prefix of cpu %02x to %x",
210 dst_vcpu->vcpu_id, irq.u.prefix.address);
211 } 208 }
212 209
213 return rc; 210 return rc;
@@ -371,7 +368,8 @@ static int handle_sigp_dst(struct kvm_vcpu *vcpu, u8 order_code,
371 return rc; 368 return rc;
372} 369}
373 370
374static int handle_sigp_order_in_user_space(struct kvm_vcpu *vcpu, u8 order_code) 371static int handle_sigp_order_in_user_space(struct kvm_vcpu *vcpu, u8 order_code,
372 u16 cpu_addr)
375{ 373{
376 if (!vcpu->kvm->arch.user_sigp) 374 if (!vcpu->kvm->arch.user_sigp)
377 return 0; 375 return 0;
@@ -414,9 +412,8 @@ static int handle_sigp_order_in_user_space(struct kvm_vcpu *vcpu, u8 order_code)
414 default: 412 default:
415 vcpu->stat.instruction_sigp_unknown++; 413 vcpu->stat.instruction_sigp_unknown++;
416 } 414 }
417 415 VCPU_EVENT(vcpu, 3, "SIGP: order %u for CPU %d handled in userspace",
418 VCPU_EVENT(vcpu, 4, "sigp order %u: completely handled in user space", 416 order_code, cpu_addr);
419 order_code);
420 417
421 return 1; 418 return 1;
422} 419}
@@ -435,7 +432,7 @@ int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu)
435 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 432 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
436 433
437 order_code = kvm_s390_get_base_disp_rs(vcpu, NULL); 434 order_code = kvm_s390_get_base_disp_rs(vcpu, NULL);
438 if (handle_sigp_order_in_user_space(vcpu, order_code)) 435 if (handle_sigp_order_in_user_space(vcpu, order_code, cpu_addr))
439 return -EOPNOTSUPP; 436 return -EOPNOTSUPP;
440 437
441 if (r1 % 2) 438 if (r1 % 2)
diff --git a/arch/s390/kvm/trace-s390.h b/arch/s390/kvm/trace-s390.h
index 3208d33a48cb..cc1d6c68356f 100644
--- a/arch/s390/kvm/trace-s390.h
+++ b/arch/s390/kvm/trace-s390.h
@@ -105,11 +105,22 @@ TRACE_EVENT(kvm_s390_vcpu_start_stop,
105 {KVM_S390_PROGRAM_INT, "program interrupt"}, \ 105 {KVM_S390_PROGRAM_INT, "program interrupt"}, \
106 {KVM_S390_SIGP_SET_PREFIX, "sigp set prefix"}, \ 106 {KVM_S390_SIGP_SET_PREFIX, "sigp set prefix"}, \
107 {KVM_S390_RESTART, "sigp restart"}, \ 107 {KVM_S390_RESTART, "sigp restart"}, \
108 {KVM_S390_INT_PFAULT_INIT, "pfault init"}, \
109 {KVM_S390_INT_PFAULT_DONE, "pfault done"}, \
110 {KVM_S390_MCHK, "machine check"}, \
111 {KVM_S390_INT_CLOCK_COMP, "clock comparator"}, \
112 {KVM_S390_INT_CPU_TIMER, "cpu timer"}, \
108 {KVM_S390_INT_VIRTIO, "virtio interrupt"}, \ 113 {KVM_S390_INT_VIRTIO, "virtio interrupt"}, \
109 {KVM_S390_INT_SERVICE, "sclp interrupt"}, \ 114 {KVM_S390_INT_SERVICE, "sclp interrupt"}, \
110 {KVM_S390_INT_EMERGENCY, "sigp emergency"}, \ 115 {KVM_S390_INT_EMERGENCY, "sigp emergency"}, \
111 {KVM_S390_INT_EXTERNAL_CALL, "sigp ext call"} 116 {KVM_S390_INT_EXTERNAL_CALL, "sigp ext call"}
112 117
118#define get_irq_name(__type) \
119 (__type > KVM_S390_INT_IO_MAX ? \
120 __print_symbolic(__type, kvm_s390_int_type) : \
121 (__type & KVM_S390_INT_IO_AI_MASK ? \
122 "adapter I/O interrupt" : "subchannel I/O interrupt"))
123
113TRACE_EVENT(kvm_s390_inject_vm, 124TRACE_EVENT(kvm_s390_inject_vm,
114 TP_PROTO(__u64 type, __u32 parm, __u64 parm64, int who), 125 TP_PROTO(__u64 type, __u32 parm, __u64 parm64, int who),
115 TP_ARGS(type, parm, parm64, who), 126 TP_ARGS(type, parm, parm64, who),
@@ -131,22 +142,19 @@ TRACE_EVENT(kvm_s390_inject_vm,
131 TP_printk("inject%s: type:%x (%s) parm:%x parm64:%llx", 142 TP_printk("inject%s: type:%x (%s) parm:%x parm64:%llx",
132 (__entry->who == 1) ? " (from kernel)" : 143 (__entry->who == 1) ? " (from kernel)" :
133 (__entry->who == 2) ? " (from user)" : "", 144 (__entry->who == 2) ? " (from user)" : "",
134 __entry->inttype, 145 __entry->inttype, get_irq_name(__entry->inttype),
135 __print_symbolic(__entry->inttype, kvm_s390_int_type),
136 __entry->parm, __entry->parm64) 146 __entry->parm, __entry->parm64)
137 ); 147 );
138 148
139TRACE_EVENT(kvm_s390_inject_vcpu, 149TRACE_EVENT(kvm_s390_inject_vcpu,
140 TP_PROTO(unsigned int id, __u64 type, __u32 parm, __u64 parm64, \ 150 TP_PROTO(unsigned int id, __u64 type, __u32 parm, __u64 parm64),
141 int who), 151 TP_ARGS(id, type, parm, parm64),
142 TP_ARGS(id, type, parm, parm64, who),
143 152
144 TP_STRUCT__entry( 153 TP_STRUCT__entry(
145 __field(int, id) 154 __field(int, id)
146 __field(__u32, inttype) 155 __field(__u32, inttype)
147 __field(__u32, parm) 156 __field(__u32, parm)
148 __field(__u64, parm64) 157 __field(__u64, parm64)
149 __field(int, who)
150 ), 158 ),
151 159
152 TP_fast_assign( 160 TP_fast_assign(
@@ -154,15 +162,12 @@ TRACE_EVENT(kvm_s390_inject_vcpu,
154 __entry->inttype = type & 0x00000000ffffffff; 162 __entry->inttype = type & 0x00000000ffffffff;
155 __entry->parm = parm; 163 __entry->parm = parm;
156 __entry->parm64 = parm64; 164 __entry->parm64 = parm64;
157 __entry->who = who;
158 ), 165 ),
159 166
160 TP_printk("inject%s (vcpu %d): type:%x (%s) parm:%x parm64:%llx", 167 TP_printk("inject (vcpu %d): type:%x (%s) parm:%x parm64:%llx",
161 (__entry->who == 1) ? " (from kernel)" :
162 (__entry->who == 2) ? " (from user)" : "",
163 __entry->id, __entry->inttype, 168 __entry->id, __entry->inttype,
164 __print_symbolic(__entry->inttype, kvm_s390_int_type), 169 get_irq_name(__entry->inttype), __entry->parm,
165 __entry->parm, __entry->parm64) 170 __entry->parm64)
166 ); 171 );
167 172
168/* 173/*
@@ -189,8 +194,8 @@ TRACE_EVENT(kvm_s390_deliver_interrupt,
189 TP_printk("deliver interrupt (vcpu %d): type:%x (%s) " \ 194 TP_printk("deliver interrupt (vcpu %d): type:%x (%s) " \
190 "data:%08llx %016llx", 195 "data:%08llx %016llx",
191 __entry->id, __entry->inttype, 196 __entry->id, __entry->inttype,
192 __print_symbolic(__entry->inttype, kvm_s390_int_type), 197 get_irq_name(__entry->inttype), __entry->data0,
193 __entry->data0, __entry->data1) 198 __entry->data1)
194 ); 199 );
195 200
196/* 201/*
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index 9ef19ebd9df4..0d831f94f8a8 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -482,6 +482,7 @@ struct kvm_s390_psw {
482 ((ai) << 26)) 482 ((ai) << 26))
483#define KVM_S390_INT_IO_MIN 0x00000000u 483#define KVM_S390_INT_IO_MIN 0x00000000u
484#define KVM_S390_INT_IO_MAX 0xfffdffffu 484#define KVM_S390_INT_IO_MAX 0xfffdffffu
485#define KVM_S390_INT_IO_AI_MASK 0x04000000u
485 486
486 487
487struct kvm_s390_interrupt { 488struct kvm_s390_interrupt {