aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/kvm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-10-04 12:30:33 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-10-04 12:30:33 -0400
commitecefbd94b834fa32559d854646d777c56749ef1c (patch)
treeca8958900ad9e208a8e5fb7704f1b66dc76131b4 /arch/s390/kvm
parentce57e981f2b996aaca2031003b3f866368307766 (diff)
parent3d11df7abbff013b811d5615320580cd5d9d7d31 (diff)
Merge tag 'kvm-3.7-1' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull KVM updates from Avi Kivity: "Highlights of the changes for this release include support for vfio level triggered interrupts, improved big real mode support on older Intels, a streamlines guest page table walker, guest APIC speedups, PIO optimizations, better overcommit handling, and read-only memory." * tag 'kvm-3.7-1' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (138 commits) KVM: s390: Fix vcpu_load handling in interrupt code KVM: x86: Fix guest debug across vcpu INIT reset KVM: Add resampling irqfds for level triggered interrupts KVM: optimize apic interrupt delivery KVM: MMU: Eliminate pointless temporary 'ac' KVM: MMU: Avoid access/dirty update loop if all is well KVM: MMU: Eliminate eperm temporary KVM: MMU: Optimize is_last_gpte() KVM: MMU: Simplify walk_addr_generic() loop KVM: MMU: Optimize pte permission checks KVM: MMU: Update accessed and dirty bits after guest pagetable walk KVM: MMU: Move gpte_access() out of paging_tmpl.h KVM: MMU: Optimize gpte_access() slightly KVM: MMU: Push clean gpte write protection out of gpte_access() KVM: clarify kvmclock documentation KVM: make processes waiting on vcpu mutex killable KVM: SVM: Make use of asm.h KVM: VMX: Make use of asm.h KVM: VMX: Make lto-friendly KVM: x86: lapic: Clean up find_highest_vector() and count_vectors() ... Conflicts: arch/s390/include/asm/processor.h arch/x86/kvm/i8259.c
Diffstat (limited to 'arch/s390/kvm')
-rw-r--r--arch/s390/kvm/Kconfig1
-rw-r--r--arch/s390/kvm/diag.c4
-rw-r--r--arch/s390/kvm/intercept.c11
-rw-r--r--arch/s390/kvm/interrupt.c25
-rw-r--r--arch/s390/kvm/kvm-s390.c17
-rw-r--r--arch/s390/kvm/priv.c9
-rw-r--r--arch/s390/kvm/sigp.c2
-rw-r--r--arch/s390/kvm/trace-s390.h210
-rw-r--r--arch/s390/kvm/trace.h341
9 files changed, 616 insertions, 4 deletions
diff --git a/arch/s390/kvm/Kconfig b/arch/s390/kvm/Kconfig
index 9b04a32e5695..b58dd869cb32 100644
--- a/arch/s390/kvm/Kconfig
+++ b/arch/s390/kvm/Kconfig
@@ -21,6 +21,7 @@ config KVM
21 depends on HAVE_KVM && EXPERIMENTAL 21 depends on HAVE_KVM && EXPERIMENTAL
22 select PREEMPT_NOTIFIERS 22 select PREEMPT_NOTIFIERS
23 select ANON_INODES 23 select ANON_INODES
24 select HAVE_KVM_CPU_RELAX_INTERCEPT
24 ---help--- 25 ---help---
25 Support hosting paravirtualized guest machines using the SIE 26 Support hosting paravirtualized guest machines using the SIE
26 virtualization capability on the mainframe. This should work 27 virtualization capability on the mainframe. This should work
diff --git a/arch/s390/kvm/diag.c b/arch/s390/kvm/diag.c
index c88bb7793390..a390687feb13 100644
--- a/arch/s390/kvm/diag.c
+++ b/arch/s390/kvm/diag.c
@@ -14,6 +14,8 @@
14#include <linux/kvm.h> 14#include <linux/kvm.h>
15#include <linux/kvm_host.h> 15#include <linux/kvm_host.h>
16#include "kvm-s390.h" 16#include "kvm-s390.h"
17#include "trace.h"
18#include "trace-s390.h"
17 19
18static int diag_release_pages(struct kvm_vcpu *vcpu) 20static int diag_release_pages(struct kvm_vcpu *vcpu)
19{ 21{
@@ -98,6 +100,7 @@ static int __diag_ipl_functions(struct kvm_vcpu *vcpu)
98 vcpu->run->exit_reason = KVM_EXIT_S390_RESET; 100 vcpu->run->exit_reason = KVM_EXIT_S390_RESET;
99 VCPU_EVENT(vcpu, 3, "requesting userspace resets %llx", 101 VCPU_EVENT(vcpu, 3, "requesting userspace resets %llx",
100 vcpu->run->s390_reset_flags); 102 vcpu->run->s390_reset_flags);
103 trace_kvm_s390_request_resets(vcpu->run->s390_reset_flags);
101 return -EREMOTE; 104 return -EREMOTE;
102} 105}
103 106
@@ -105,6 +108,7 @@ int kvm_s390_handle_diag(struct kvm_vcpu *vcpu)
105{ 108{
106 int code = (vcpu->arch.sie_block->ipb & 0xfff0000) >> 16; 109 int code = (vcpu->arch.sie_block->ipb & 0xfff0000) >> 16;
107 110
111 trace_kvm_s390_handle_diag(vcpu, code);
108 switch (code) { 112 switch (code) {
109 case 0x10: 113 case 0x10:
110 return diag_release_pages(vcpu); 114 return diag_release_pages(vcpu);
diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c
index adae539f12e2..22798ec33fd1 100644
--- a/arch/s390/kvm/intercept.c
+++ b/arch/s390/kvm/intercept.c
@@ -19,6 +19,8 @@
19 19
20#include "kvm-s390.h" 20#include "kvm-s390.h"
21#include "gaccess.h" 21#include "gaccess.h"
22#include "trace.h"
23#include "trace-s390.h"
22 24
23static int handle_lctlg(struct kvm_vcpu *vcpu) 25static int handle_lctlg(struct kvm_vcpu *vcpu)
24{ 26{
@@ -45,6 +47,7 @@ static int handle_lctlg(struct kvm_vcpu *vcpu)
45 47
46 VCPU_EVENT(vcpu, 5, "lctlg r1:%x, r3:%x,b2:%x,d2:%x", reg1, reg3, base2, 48 VCPU_EVENT(vcpu, 5, "lctlg r1:%x, r3:%x,b2:%x,d2:%x", reg1, reg3, base2,
47 disp2); 49 disp2);
50 trace_kvm_s390_handle_lctl(vcpu, 1, reg1, reg3, useraddr);
48 51
49 do { 52 do {
50 rc = get_guest_u64(vcpu, useraddr, 53 rc = get_guest_u64(vcpu, useraddr,
@@ -82,6 +85,7 @@ static int handle_lctl(struct kvm_vcpu *vcpu)
82 85
83 VCPU_EVENT(vcpu, 5, "lctl r1:%x, r3:%x,b2:%x,d2:%x", reg1, reg3, base2, 86 VCPU_EVENT(vcpu, 5, "lctl r1:%x, r3:%x,b2:%x,d2:%x", reg1, reg3, base2,
84 disp2); 87 disp2);
88 trace_kvm_s390_handle_lctl(vcpu, 0, reg1, reg3, useraddr);
85 89
86 reg = reg1; 90 reg = reg1;
87 do { 91 do {
@@ -135,6 +139,8 @@ static int handle_stop(struct kvm_vcpu *vcpu)
135 vcpu->stat.exit_stop_request++; 139 vcpu->stat.exit_stop_request++;
136 spin_lock_bh(&vcpu->arch.local_int.lock); 140 spin_lock_bh(&vcpu->arch.local_int.lock);
137 141
142 trace_kvm_s390_stop_request(vcpu->arch.local_int.action_bits);
143
138 if (vcpu->arch.local_int.action_bits & ACTION_RELOADVCPU_ON_STOP) { 144 if (vcpu->arch.local_int.action_bits & ACTION_RELOADVCPU_ON_STOP) {
139 vcpu->arch.local_int.action_bits &= ~ACTION_RELOADVCPU_ON_STOP; 145 vcpu->arch.local_int.action_bits &= ~ACTION_RELOADVCPU_ON_STOP;
140 rc = SIE_INTERCEPT_RERUNVCPU; 146 rc = SIE_INTERCEPT_RERUNVCPU;
@@ -171,6 +177,7 @@ static int handle_validity(struct kvm_vcpu *vcpu)
171 int rc; 177 int rc;
172 178
173 vcpu->stat.exit_validity++; 179 vcpu->stat.exit_validity++;
180 trace_kvm_s390_intercept_validity(vcpu, viwhy);
174 if (viwhy == 0x37) { 181 if (viwhy == 0x37) {
175 vmaddr = gmap_fault(vcpu->arch.sie_block->prefix, 182 vmaddr = gmap_fault(vcpu->arch.sie_block->prefix,
176 vcpu->arch.gmap); 183 vcpu->arch.gmap);
@@ -213,6 +220,9 @@ static int handle_instruction(struct kvm_vcpu *vcpu)
213 intercept_handler_t handler; 220 intercept_handler_t handler;
214 221
215 vcpu->stat.exit_instruction++; 222 vcpu->stat.exit_instruction++;
223 trace_kvm_s390_intercept_instruction(vcpu,
224 vcpu->arch.sie_block->ipa,
225 vcpu->arch.sie_block->ipb);
216 handler = instruction_handlers[vcpu->arch.sie_block->ipa >> 8]; 226 handler = instruction_handlers[vcpu->arch.sie_block->ipa >> 8];
217 if (handler) 227 if (handler)
218 return handler(vcpu); 228 return handler(vcpu);
@@ -222,6 +232,7 @@ static int handle_instruction(struct kvm_vcpu *vcpu)
222static int handle_prog(struct kvm_vcpu *vcpu) 232static int handle_prog(struct kvm_vcpu *vcpu)
223{ 233{
224 vcpu->stat.exit_program_interruption++; 234 vcpu->stat.exit_program_interruption++;
235 trace_kvm_s390_intercept_prog(vcpu, vcpu->arch.sie_block->iprcc);
225 return kvm_s390_inject_program_int(vcpu, vcpu->arch.sie_block->iprcc); 236 return kvm_s390_inject_program_int(vcpu, vcpu->arch.sie_block->iprcc);
226} 237}
227 238
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index b7bc1aac8ed2..ff1e2f8ef94a 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -19,6 +19,7 @@
19#include <asm/uaccess.h> 19#include <asm/uaccess.h>
20#include "kvm-s390.h" 20#include "kvm-s390.h"
21#include "gaccess.h" 21#include "gaccess.h"
22#include "trace-s390.h"
22 23
23static int psw_extint_disabled(struct kvm_vcpu *vcpu) 24static int psw_extint_disabled(struct kvm_vcpu *vcpu)
24{ 25{
@@ -130,6 +131,8 @@ static void __do_deliver_interrupt(struct kvm_vcpu *vcpu,
130 case KVM_S390_INT_EMERGENCY: 131 case KVM_S390_INT_EMERGENCY:
131 VCPU_EVENT(vcpu, 4, "%s", "interrupt: sigp emerg"); 132 VCPU_EVENT(vcpu, 4, "%s", "interrupt: sigp emerg");
132 vcpu->stat.deliver_emergency_signal++; 133 vcpu->stat.deliver_emergency_signal++;
134 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
135 inti->emerg.code, 0);
133 rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x1201); 136 rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x1201);
134 if (rc == -EFAULT) 137 if (rc == -EFAULT)
135 exception = 1; 138 exception = 1;
@@ -152,6 +155,8 @@ static void __do_deliver_interrupt(struct kvm_vcpu *vcpu,
152 case KVM_S390_INT_EXTERNAL_CALL: 155 case KVM_S390_INT_EXTERNAL_CALL:
153 VCPU_EVENT(vcpu, 4, "%s", "interrupt: sigp ext call"); 156 VCPU_EVENT(vcpu, 4, "%s", "interrupt: sigp ext call");
154 vcpu->stat.deliver_external_call++; 157 vcpu->stat.deliver_external_call++;
158 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
159 inti->extcall.code, 0);
155 rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x1202); 160 rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x1202);
156 if (rc == -EFAULT) 161 if (rc == -EFAULT)
157 exception = 1; 162 exception = 1;
@@ -175,6 +180,8 @@ static void __do_deliver_interrupt(struct kvm_vcpu *vcpu,
175 VCPU_EVENT(vcpu, 4, "interrupt: sclp parm:%x", 180 VCPU_EVENT(vcpu, 4, "interrupt: sclp parm:%x",
176 inti->ext.ext_params); 181 inti->ext.ext_params);
177 vcpu->stat.deliver_service_signal++; 182 vcpu->stat.deliver_service_signal++;
183 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
184 inti->ext.ext_params, 0);
178 rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x2401); 185 rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x2401);
179 if (rc == -EFAULT) 186 if (rc == -EFAULT)
180 exception = 1; 187 exception = 1;
@@ -198,6 +205,9 @@ static void __do_deliver_interrupt(struct kvm_vcpu *vcpu,
198 VCPU_EVENT(vcpu, 4, "interrupt: virtio parm:%x,parm64:%llx", 205 VCPU_EVENT(vcpu, 4, "interrupt: virtio parm:%x,parm64:%llx",
199 inti->ext.ext_params, inti->ext.ext_params2); 206 inti->ext.ext_params, inti->ext.ext_params2);
200 vcpu->stat.deliver_virtio_interrupt++; 207 vcpu->stat.deliver_virtio_interrupt++;
208 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
209 inti->ext.ext_params,
210 inti->ext.ext_params2);
201 rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x2603); 211 rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x2603);
202 if (rc == -EFAULT) 212 if (rc == -EFAULT)
203 exception = 1; 213 exception = 1;
@@ -229,6 +239,8 @@ static void __do_deliver_interrupt(struct kvm_vcpu *vcpu,
229 case KVM_S390_SIGP_STOP: 239 case KVM_S390_SIGP_STOP:
230 VCPU_EVENT(vcpu, 4, "%s", "interrupt: cpu stop"); 240 VCPU_EVENT(vcpu, 4, "%s", "interrupt: cpu stop");
231 vcpu->stat.deliver_stop_signal++; 241 vcpu->stat.deliver_stop_signal++;
242 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
243 0, 0);
232 __set_intercept_indicator(vcpu, inti); 244 __set_intercept_indicator(vcpu, inti);
233 break; 245 break;
234 246
@@ -236,12 +248,16 @@ static void __do_deliver_interrupt(struct kvm_vcpu *vcpu,
236 VCPU_EVENT(vcpu, 4, "interrupt: set prefix to %x", 248 VCPU_EVENT(vcpu, 4, "interrupt: set prefix to %x",
237 inti->prefix.address); 249 inti->prefix.address);
238 vcpu->stat.deliver_prefix_signal++; 250 vcpu->stat.deliver_prefix_signal++;
251 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
252 inti->prefix.address, 0);
239 kvm_s390_set_prefix(vcpu, inti->prefix.address); 253 kvm_s390_set_prefix(vcpu, inti->prefix.address);
240 break; 254 break;
241 255
242 case KVM_S390_RESTART: 256 case KVM_S390_RESTART:
243 VCPU_EVENT(vcpu, 4, "%s", "interrupt: cpu restart"); 257 VCPU_EVENT(vcpu, 4, "%s", "interrupt: cpu restart");
244 vcpu->stat.deliver_restart_signal++; 258 vcpu->stat.deliver_restart_signal++;
259 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
260 0, 0);
245 rc = copy_to_guest(vcpu, offsetof(struct _lowcore, 261 rc = copy_to_guest(vcpu, offsetof(struct _lowcore,
246 restart_old_psw), &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 262 restart_old_psw), &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
247 if (rc == -EFAULT) 263 if (rc == -EFAULT)
@@ -259,6 +275,8 @@ static void __do_deliver_interrupt(struct kvm_vcpu *vcpu,
259 inti->pgm.code, 275 inti->pgm.code,
260 table[vcpu->arch.sie_block->ipa >> 14]); 276 table[vcpu->arch.sie_block->ipa >> 14]);
261 vcpu->stat.deliver_program_int++; 277 vcpu->stat.deliver_program_int++;
278 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
279 inti->pgm.code, 0);
262 rc = put_guest_u16(vcpu, __LC_PGM_INT_CODE, inti->pgm.code); 280 rc = put_guest_u16(vcpu, __LC_PGM_INT_CODE, inti->pgm.code);
263 if (rc == -EFAULT) 281 if (rc == -EFAULT)
264 exception = 1; 282 exception = 1;
@@ -405,9 +423,7 @@ no_timer:
405 set_current_state(TASK_INTERRUPTIBLE); 423 set_current_state(TASK_INTERRUPTIBLE);
406 spin_unlock_bh(&vcpu->arch.local_int.lock); 424 spin_unlock_bh(&vcpu->arch.local_int.lock);
407 spin_unlock(&vcpu->arch.local_int.float_int->lock); 425 spin_unlock(&vcpu->arch.local_int.float_int->lock);
408 vcpu_put(vcpu);
409 schedule(); 426 schedule();
410 vcpu_load(vcpu);
411 spin_lock(&vcpu->arch.local_int.float_int->lock); 427 spin_lock(&vcpu->arch.local_int.float_int->lock);
412 spin_lock_bh(&vcpu->arch.local_int.lock); 428 spin_lock_bh(&vcpu->arch.local_int.lock);
413 } 429 }
@@ -515,6 +531,7 @@ int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code)
515 inti->pgm.code = code; 531 inti->pgm.code = code;
516 532
517 VCPU_EVENT(vcpu, 3, "inject: program check %d (from kernel)", code); 533 VCPU_EVENT(vcpu, 3, "inject: program check %d (from kernel)", code);
534 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, inti->type, code, 0, 1);
518 spin_lock_bh(&li->lock); 535 spin_lock_bh(&li->lock);
519 list_add(&inti->list, &li->list); 536 list_add(&inti->list, &li->list);
520 atomic_set(&li->active, 1); 537 atomic_set(&li->active, 1);
@@ -556,6 +573,8 @@ int kvm_s390_inject_vm(struct kvm *kvm,
556 kfree(inti); 573 kfree(inti);
557 return -EINVAL; 574 return -EINVAL;
558 } 575 }
576 trace_kvm_s390_inject_vm(s390int->type, s390int->parm, s390int->parm64,
577 2);
559 578
560 mutex_lock(&kvm->lock); 579 mutex_lock(&kvm->lock);
561 fi = &kvm->arch.float_int; 580 fi = &kvm->arch.float_int;
@@ -621,6 +640,8 @@ int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu,
621 kfree(inti); 640 kfree(inti);
622 return -EINVAL; 641 return -EINVAL;
623 } 642 }
643 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, s390int->type, s390int->parm,
644 s390int->parm64, 2);
624 645
625 mutex_lock(&vcpu->kvm->lock); 646 mutex_lock(&vcpu->kvm->lock);
626 li = &vcpu->arch.local_int; 647 li = &vcpu->arch.local_int;
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index d470ccbfabae..ecced9d18986 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -32,6 +32,10 @@
32#include "kvm-s390.h" 32#include "kvm-s390.h"
33#include "gaccess.h" 33#include "gaccess.h"
34 34
35#define CREATE_TRACE_POINTS
36#include "trace.h"
37#include "trace-s390.h"
38
35#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU 39#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
36 40
37struct kvm_stats_debugfs_item debugfs_entries[] = { 41struct kvm_stats_debugfs_item debugfs_entries[] = {
@@ -242,6 +246,7 @@ out_err:
242void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) 246void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
243{ 247{
244 VCPU_EVENT(vcpu, 3, "%s", "free cpu"); 248 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
249 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
245 if (!kvm_is_ucontrol(vcpu->kvm)) { 250 if (!kvm_is_ucontrol(vcpu->kvm)) {
246 clear_bit(63 - vcpu->vcpu_id, 251 clear_bit(63 - vcpu->vcpu_id,
247 (unsigned long *) &vcpu->kvm->arch.sca->mcn); 252 (unsigned long *) &vcpu->kvm->arch.sca->mcn);
@@ -417,6 +422,7 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
417 goto out_free_sie_block; 422 goto out_free_sie_block;
418 VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu, 423 VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
419 vcpu->arch.sie_block); 424 vcpu->arch.sie_block);
425 trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
420 426
421 return vcpu; 427 return vcpu;
422out_free_sie_block: 428out_free_sie_block:
@@ -607,18 +613,22 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
607 local_irq_enable(); 613 local_irq_enable();
608 VCPU_EVENT(vcpu, 6, "entering sie flags %x", 614 VCPU_EVENT(vcpu, 6, "entering sie flags %x",
609 atomic_read(&vcpu->arch.sie_block->cpuflags)); 615 atomic_read(&vcpu->arch.sie_block->cpuflags));
616 trace_kvm_s390_sie_enter(vcpu,
617 atomic_read(&vcpu->arch.sie_block->cpuflags));
610 rc = sie64a(vcpu->arch.sie_block, vcpu->run->s.regs.gprs); 618 rc = sie64a(vcpu->arch.sie_block, vcpu->run->s.regs.gprs);
611 if (rc) { 619 if (rc) {
612 if (kvm_is_ucontrol(vcpu->kvm)) { 620 if (kvm_is_ucontrol(vcpu->kvm)) {
613 rc = SIE_INTERCEPT_UCONTROL; 621 rc = SIE_INTERCEPT_UCONTROL;
614 } else { 622 } else {
615 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction"); 623 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
624 trace_kvm_s390_sie_fault(vcpu);
616 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 625 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
617 rc = 0; 626 rc = 0;
618 } 627 }
619 } 628 }
620 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d", 629 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
621 vcpu->arch.sie_block->icptcode); 630 vcpu->arch.sie_block->icptcode);
631 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
622 local_irq_disable(); 632 local_irq_disable();
623 kvm_guest_exit(); 633 kvm_guest_exit();
624 local_irq_enable(); 634 local_irq_enable();
@@ -959,7 +969,12 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
959 return; 969 return;
960} 970}
961 971
962void kvm_arch_flush_shadow(struct kvm *kvm) 972void kvm_arch_flush_shadow_all(struct kvm *kvm)
973{
974}
975
976void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
977 struct kvm_memory_slot *slot)
963{ 978{
964} 979}
965 980
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
index 310be61bead7..d768906f15c8 100644
--- a/arch/s390/kvm/priv.c
+++ b/arch/s390/kvm/priv.c
@@ -20,6 +20,7 @@
20#include <asm/sysinfo.h> 20#include <asm/sysinfo.h>
21#include "gaccess.h" 21#include "gaccess.h"
22#include "kvm-s390.h" 22#include "kvm-s390.h"
23#include "trace.h"
23 24
24static int handle_set_prefix(struct kvm_vcpu *vcpu) 25static int handle_set_prefix(struct kvm_vcpu *vcpu)
25{ 26{
@@ -59,6 +60,7 @@ static int handle_set_prefix(struct kvm_vcpu *vcpu)
59 kvm_s390_set_prefix(vcpu, address); 60 kvm_s390_set_prefix(vcpu, address);
60 61
61 VCPU_EVENT(vcpu, 5, "setting prefix to %x", address); 62 VCPU_EVENT(vcpu, 5, "setting prefix to %x", address);
63 trace_kvm_s390_handle_prefix(vcpu, 1, address);
62out: 64out:
63 return 0; 65 return 0;
64} 66}
@@ -91,6 +93,7 @@ static int handle_store_prefix(struct kvm_vcpu *vcpu)
91 } 93 }
92 94
93 VCPU_EVENT(vcpu, 5, "storing prefix to %x", address); 95 VCPU_EVENT(vcpu, 5, "storing prefix to %x", address);
96 trace_kvm_s390_handle_prefix(vcpu, 0, address);
94out: 97out:
95 return 0; 98 return 0;
96} 99}
@@ -119,6 +122,7 @@ static int handle_store_cpu_address(struct kvm_vcpu *vcpu)
119 } 122 }
120 123
121 VCPU_EVENT(vcpu, 5, "storing cpu address to %llx", useraddr); 124 VCPU_EVENT(vcpu, 5, "storing cpu address to %llx", useraddr);
125 trace_kvm_s390_handle_stap(vcpu, useraddr);
122out: 126out:
123 return 0; 127 return 0;
124} 128}
@@ -164,9 +168,11 @@ static int handle_stfl(struct kvm_vcpu *vcpu)
164 &facility_list, sizeof(facility_list)); 168 &facility_list, sizeof(facility_list));
165 if (rc == -EFAULT) 169 if (rc == -EFAULT)
166 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 170 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
167 else 171 else {
168 VCPU_EVENT(vcpu, 5, "store facility list value %x", 172 VCPU_EVENT(vcpu, 5, "store facility list value %x",
169 facility_list); 173 facility_list);
174 trace_kvm_s390_handle_stfl(vcpu, facility_list);
175 }
170 return 0; 176 return 0;
171} 177}
172 178
@@ -278,6 +284,7 @@ static int handle_stsi(struct kvm_vcpu *vcpu)
278 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 284 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
279 goto out_mem; 285 goto out_mem;
280 } 286 }
287 trace_kvm_s390_handle_stsi(vcpu, fc, sel1, sel2, operand2);
281 free_page(mem); 288 free_page(mem);
282 vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44); 289 vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
283 vcpu->run->s.regs.gprs[0] = 0; 290 vcpu->run->s.regs.gprs[0] = 0;
diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c
index 56f80e1f98f7..566ddf6e8dfb 100644
--- a/arch/s390/kvm/sigp.c
+++ b/arch/s390/kvm/sigp.c
@@ -18,6 +18,7 @@
18#include <asm/sigp.h> 18#include <asm/sigp.h>
19#include "gaccess.h" 19#include "gaccess.h"
20#include "kvm-s390.h" 20#include "kvm-s390.h"
21#include "trace.h"
21 22
22static int __sigp_sense(struct kvm_vcpu *vcpu, u16 cpu_addr, 23static int __sigp_sense(struct kvm_vcpu *vcpu, u16 cpu_addr,
23 u64 *reg) 24 u64 *reg)
@@ -344,6 +345,7 @@ int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu)
344 else 345 else
345 parameter = vcpu->run->s.regs.gprs[r1 + 1]; 346 parameter = vcpu->run->s.regs.gprs[r1 + 1];
346 347
348 trace_kvm_s390_handle_sigp(vcpu, order_code, cpu_addr, parameter);
347 switch (order_code) { 349 switch (order_code) {
348 case SIGP_SENSE: 350 case SIGP_SENSE:
349 vcpu->stat.instruction_sigp_sense++; 351 vcpu->stat.instruction_sigp_sense++;
diff --git a/arch/s390/kvm/trace-s390.h b/arch/s390/kvm/trace-s390.h
new file mode 100644
index 000000000000..90fdf85b5ff7
--- /dev/null
+++ b/arch/s390/kvm/trace-s390.h
@@ -0,0 +1,210 @@
1#if !defined(_TRACE_KVMS390_H) || defined(TRACE_HEADER_MULTI_READ)
2#define _TRACE_KVMS390_H
3
4#include <linux/tracepoint.h>
5
6#undef TRACE_SYSTEM
7#define TRACE_SYSTEM kvm-s390
8#define TRACE_INCLUDE_PATH .
9#undef TRACE_INCLUDE_FILE
10#define TRACE_INCLUDE_FILE trace-s390
11
12/*
13 * Trace point for the creation of the kvm instance.
14 */
15TRACE_EVENT(kvm_s390_create_vm,
16 TP_PROTO(unsigned long type),
17 TP_ARGS(type),
18
19 TP_STRUCT__entry(
20 __field(unsigned long, type)
21 ),
22
23 TP_fast_assign(
24 __entry->type = type;
25 ),
26
27 TP_printk("create vm%s",
28 __entry->type & KVM_VM_S390_UCONTROL ? " (UCONTROL)" : "")
29 );
30
31/*
32 * Trace points for creation and destruction of vpcus.
33 */
34TRACE_EVENT(kvm_s390_create_vcpu,
35 TP_PROTO(unsigned int id, struct kvm_vcpu *vcpu,
36 struct kvm_s390_sie_block *sie_block),
37 TP_ARGS(id, vcpu, sie_block),
38
39 TP_STRUCT__entry(
40 __field(unsigned int, id)
41 __field(struct kvm_vcpu *, vcpu)
42 __field(struct kvm_s390_sie_block *, sie_block)
43 ),
44
45 TP_fast_assign(
46 __entry->id = id;
47 __entry->vcpu = vcpu;
48 __entry->sie_block = sie_block;
49 ),
50
51 TP_printk("create cpu %d at %p, sie block at %p", __entry->id,
52 __entry->vcpu, __entry->sie_block)
53 );
54
55TRACE_EVENT(kvm_s390_destroy_vcpu,
56 TP_PROTO(unsigned int id),
57 TP_ARGS(id),
58
59 TP_STRUCT__entry(
60 __field(unsigned int, id)
61 ),
62
63 TP_fast_assign(
64 __entry->id = id;
65 ),
66
67 TP_printk("destroy cpu %d", __entry->id)
68 );
69
70/*
71 * Trace points for injection of interrupts, either per machine or
72 * per vcpu.
73 */
74
75#define kvm_s390_int_type \
76 {KVM_S390_SIGP_STOP, "sigp stop"}, \
77 {KVM_S390_PROGRAM_INT, "program interrupt"}, \
78 {KVM_S390_SIGP_SET_PREFIX, "sigp set prefix"}, \
79 {KVM_S390_RESTART, "sigp restart"}, \
80 {KVM_S390_INT_VIRTIO, "virtio interrupt"}, \
81 {KVM_S390_INT_SERVICE, "sclp interrupt"}, \
82 {KVM_S390_INT_EMERGENCY, "sigp emergency"}, \
83 {KVM_S390_INT_EXTERNAL_CALL, "sigp ext call"}
84
85TRACE_EVENT(kvm_s390_inject_vm,
86 TP_PROTO(__u64 type, __u32 parm, __u64 parm64, int who),
87 TP_ARGS(type, parm, parm64, who),
88
89 TP_STRUCT__entry(
90 __field(__u32, inttype)
91 __field(__u32, parm)
92 __field(__u64, parm64)
93 __field(int, who)
94 ),
95
96 TP_fast_assign(
97 __entry->inttype = type & 0x00000000ffffffff;
98 __entry->parm = parm;
99 __entry->parm64 = parm64;
100 __entry->who = who;
101 ),
102
103 TP_printk("inject%s: type:%x (%s) parm:%x parm64:%llx",
104 (__entry->who == 1) ? " (from kernel)" :
105 (__entry->who == 2) ? " (from user)" : "",
106 __entry->inttype,
107 __print_symbolic(__entry->inttype, kvm_s390_int_type),
108 __entry->parm, __entry->parm64)
109 );
110
111TRACE_EVENT(kvm_s390_inject_vcpu,
112 TP_PROTO(unsigned int id, __u64 type, __u32 parm, __u64 parm64, \
113 int who),
114 TP_ARGS(id, type, parm, parm64, who),
115
116 TP_STRUCT__entry(
117 __field(int, id)
118 __field(__u32, inttype)
119 __field(__u32, parm)
120 __field(__u64, parm64)
121 __field(int, who)
122 ),
123
124 TP_fast_assign(
125 __entry->id = id;
126 __entry->inttype = type & 0x00000000ffffffff;
127 __entry->parm = parm;
128 __entry->parm64 = parm64;
129 __entry->who = who;
130 ),
131
132 TP_printk("inject%s (vcpu %d): type:%x (%s) parm:%x parm64:%llx",
133 (__entry->who == 1) ? " (from kernel)" :
134 (__entry->who == 2) ? " (from user)" : "",
135 __entry->id, __entry->inttype,
136 __print_symbolic(__entry->inttype, kvm_s390_int_type),
137 __entry->parm, __entry->parm64)
138 );
139
140/*
141 * Trace point for the actual delivery of interrupts.
142 */
143TRACE_EVENT(kvm_s390_deliver_interrupt,
144 TP_PROTO(unsigned int id, __u64 type, __u32 data0, __u64 data1),
145 TP_ARGS(id, type, data0, data1),
146
147 TP_STRUCT__entry(
148 __field(int, id)
149 __field(__u32, inttype)
150 __field(__u32, data0)
151 __field(__u64, data1)
152 ),
153
154 TP_fast_assign(
155 __entry->id = id;
156 __entry->inttype = type & 0x00000000ffffffff;
157 __entry->data0 = data0;
158 __entry->data1 = data1;
159 ),
160
161 TP_printk("deliver interrupt (vcpu %d): type:%x (%s) " \
162 "data:%08x %016llx",
163 __entry->id, __entry->inttype,
164 __print_symbolic(__entry->inttype, kvm_s390_int_type),
165 __entry->data0, __entry->data1)
166 );
167
168/*
169 * Trace point for resets that may be requested from userspace.
170 */
171TRACE_EVENT(kvm_s390_request_resets,
172 TP_PROTO(__u64 resets),
173 TP_ARGS(resets),
174
175 TP_STRUCT__entry(
176 __field(__u64, resets)
177 ),
178
179 TP_fast_assign(
180 __entry->resets = resets;
181 ),
182
183 TP_printk("requesting userspace resets %llx",
184 __entry->resets)
185 );
186
187/*
188 * Trace point for a vcpu's stop requests.
189 */
190TRACE_EVENT(kvm_s390_stop_request,
191 TP_PROTO(unsigned int action_bits),
192 TP_ARGS(action_bits),
193
194 TP_STRUCT__entry(
195 __field(unsigned int, action_bits)
196 ),
197
198 TP_fast_assign(
199 __entry->action_bits = action_bits;
200 ),
201
202 TP_printk("stop request, action_bits = %08x",
203 __entry->action_bits)
204 );
205
206
207#endif /* _TRACE_KVMS390_H */
208
209/* This part must be outside protection */
210#include <trace/define_trace.h>
diff --git a/arch/s390/kvm/trace.h b/arch/s390/kvm/trace.h
new file mode 100644
index 000000000000..2b29e62351d3
--- /dev/null
+++ b/arch/s390/kvm/trace.h
@@ -0,0 +1,341 @@
1#if !defined(_TRACE_KVM_H) || defined(TRACE_HEADER_MULTI_READ)
2#define _TRACE_KVM_H
3
4#include <linux/tracepoint.h>
5#include <asm/sigp.h>
6#include <asm/debug.h>
7
8#undef TRACE_SYSTEM
9#define TRACE_SYSTEM kvm
10#define TRACE_INCLUDE_PATH .
11#undef TRACE_INCLUDE_FILE
12#define TRACE_INCLUDE_FILE trace
13
14/*
15 * Helpers for vcpu-specific tracepoints containing the same information
16 * as s390dbf VCPU_EVENTs.
17 */
18#define VCPU_PROTO_COMMON struct kvm_vcpu *vcpu
19#define VCPU_ARGS_COMMON vcpu
20#define VCPU_FIELD_COMMON __field(int, id) \
21 __field(unsigned long, pswmask) \
22 __field(unsigned long, pswaddr)
23#define VCPU_ASSIGN_COMMON do { \
24 __entry->id = vcpu->vcpu_id; \
25 __entry->pswmask = vcpu->arch.sie_block->gpsw.mask; \
26 __entry->pswaddr = vcpu->arch.sie_block->gpsw.addr; \
27 } while (0);
28#define VCPU_TP_PRINTK(p_str, p_args...) \
29 TP_printk("%02d[%016lx-%016lx]: " p_str, __entry->id, \
30 __entry->pswmask, __entry->pswaddr, p_args)
31
32/*
33 * Tracepoints for SIE entry and exit.
34 */
35TRACE_EVENT(kvm_s390_sie_enter,
36 TP_PROTO(VCPU_PROTO_COMMON, int cpuflags),
37 TP_ARGS(VCPU_ARGS_COMMON, cpuflags),
38
39 TP_STRUCT__entry(
40 VCPU_FIELD_COMMON
41 __field(int, cpuflags)
42 ),
43
44 TP_fast_assign(
45 VCPU_ASSIGN_COMMON
46 __entry->cpuflags = cpuflags;
47 ),
48
49 VCPU_TP_PRINTK("entering sie flags %x", __entry->cpuflags)
50 );
51
52TRACE_EVENT(kvm_s390_sie_fault,
53 TP_PROTO(VCPU_PROTO_COMMON),
54 TP_ARGS(VCPU_ARGS_COMMON),
55
56 TP_STRUCT__entry(
57 VCPU_FIELD_COMMON
58 ),
59
60 TP_fast_assign(
61 VCPU_ASSIGN_COMMON
62 ),
63
64 VCPU_TP_PRINTK("%s", "fault in sie instruction")
65 );
66
67#define sie_intercept_code \
68 {0x04, "Instruction"}, \
69 {0x08, "Program interruption"}, \
70 {0x0C, "Instruction and program interuption"}, \
71 {0x10, "External request"}, \
72 {0x14, "External interruption"}, \
73 {0x18, "I/O request"}, \
74 {0x1C, "Wait state"}, \
75 {0x20, "Validity"}, \
76 {0x28, "Stop request"}
77
78TRACE_EVENT(kvm_s390_sie_exit,
79 TP_PROTO(VCPU_PROTO_COMMON, u8 icptcode),
80 TP_ARGS(VCPU_ARGS_COMMON, icptcode),
81
82 TP_STRUCT__entry(
83 VCPU_FIELD_COMMON
84 __field(u8, icptcode)
85 ),
86
87 TP_fast_assign(
88 VCPU_ASSIGN_COMMON
89 __entry->icptcode = icptcode;
90 ),
91
92 VCPU_TP_PRINTK("exit sie icptcode %d (%s)", __entry->icptcode,
93 __print_symbolic(__entry->icptcode,
94 sie_intercept_code))
95 );
96
97/*
98 * Trace point for intercepted instructions.
99 */
100TRACE_EVENT(kvm_s390_intercept_instruction,
101 TP_PROTO(VCPU_PROTO_COMMON, __u16 ipa, __u32 ipb),
102 TP_ARGS(VCPU_ARGS_COMMON, ipa, ipb),
103
104 TP_STRUCT__entry(
105 VCPU_FIELD_COMMON
106 __field(__u64, instruction)
107 __field(char, insn[8])
108 ),
109
110 TP_fast_assign(
111 VCPU_ASSIGN_COMMON
112 __entry->instruction = ((__u64)ipa << 48) |
113 ((__u64)ipb << 16);
114 ),
115
116 VCPU_TP_PRINTK("intercepted instruction %016llx (%s)",
117 __entry->instruction,
118 insn_to_mnemonic((unsigned char *)
119 &__entry->instruction,
120 __entry->insn) ?
121 "unknown" : __entry->insn)
122 );
123
124/*
125 * Trace point for intercepted program interruptions.
126 */
127TRACE_EVENT(kvm_s390_intercept_prog,
128 TP_PROTO(VCPU_PROTO_COMMON, __u16 code),
129 TP_ARGS(VCPU_ARGS_COMMON, code),
130
131 TP_STRUCT__entry(
132 VCPU_FIELD_COMMON
133 __field(__u16, code)
134 ),
135
136 TP_fast_assign(
137 VCPU_ASSIGN_COMMON
138 __entry->code = code;
139 ),
140
141 VCPU_TP_PRINTK("intercepted program interruption %04x",
142 __entry->code)
143 );
144
145/*
146 * Trace point for validity intercepts.
147 */
148TRACE_EVENT(kvm_s390_intercept_validity,
149 TP_PROTO(VCPU_PROTO_COMMON, __u16 viwhy),
150 TP_ARGS(VCPU_ARGS_COMMON, viwhy),
151
152 TP_STRUCT__entry(
153 VCPU_FIELD_COMMON
154 __field(__u16, viwhy)
155 ),
156
157 TP_fast_assign(
158 VCPU_ASSIGN_COMMON
159 __entry->viwhy = viwhy;
160 ),
161
162 VCPU_TP_PRINTK("got validity intercept %04x", __entry->viwhy)
163 );
164
165/*
166 * Trace points for instructions that are of special interest.
167 */
168
169#define sigp_order_codes \
170 {SIGP_SENSE, "sense"}, \
171 {SIGP_EXTERNAL_CALL, "external call"}, \
172 {SIGP_EMERGENCY_SIGNAL, "emergency signal"}, \
173 {SIGP_STOP, "stop"}, \
174 {SIGP_STOP_AND_STORE_STATUS, "stop and store status"}, \
175 {SIGP_SET_ARCHITECTURE, "set architecture"}, \
176 {SIGP_SET_PREFIX, "set prefix"}, \
177 {SIGP_SENSE_RUNNING, "sense running"}, \
178 {SIGP_RESTART, "restart"}
179
180TRACE_EVENT(kvm_s390_handle_sigp,
181 TP_PROTO(VCPU_PROTO_COMMON, __u8 order_code, __u16 cpu_addr, \
182 __u32 parameter),
183 TP_ARGS(VCPU_ARGS_COMMON, order_code, cpu_addr, parameter),
184
185 TP_STRUCT__entry(
186 VCPU_FIELD_COMMON
187 __field(__u8, order_code)
188 __field(__u16, cpu_addr)
189 __field(__u32, parameter)
190 ),
191
192 TP_fast_assign(
193 VCPU_ASSIGN_COMMON
194 __entry->order_code = order_code;
195 __entry->cpu_addr = cpu_addr;
196 __entry->parameter = parameter;
197 ),
198
199 VCPU_TP_PRINTK("handle sigp order %02x (%s), cpu address %04x, " \
200 "parameter %08x", __entry->order_code,
201 __print_symbolic(__entry->order_code,
202 sigp_order_codes),
203 __entry->cpu_addr, __entry->parameter)
204 );
205
206#define diagnose_codes \
207 {0x10, "release pages"}, \
208 {0x44, "time slice end"}, \
209 {0x308, "ipl functions"}, \
210 {0x500, "kvm hypercall"}, \
211 {0x501, "kvm breakpoint"}
212
213TRACE_EVENT(kvm_s390_handle_diag,
214 TP_PROTO(VCPU_PROTO_COMMON, __u16 code),
215 TP_ARGS(VCPU_ARGS_COMMON, code),
216
217 TP_STRUCT__entry(
218 VCPU_FIELD_COMMON
219 __field(__u16, code)
220 ),
221
222 TP_fast_assign(
223 VCPU_ASSIGN_COMMON
224 __entry->code = code;
225 ),
226
227 VCPU_TP_PRINTK("handle diagnose call %04x (%s)", __entry->code,
228 __print_symbolic(__entry->code, diagnose_codes))
229 );
230
231TRACE_EVENT(kvm_s390_handle_lctl,
232 TP_PROTO(VCPU_PROTO_COMMON, int g, int reg1, int reg3, u64 addr),
233 TP_ARGS(VCPU_ARGS_COMMON, g, reg1, reg3, addr),
234
235 TP_STRUCT__entry(
236 VCPU_FIELD_COMMON
237 __field(int, g)
238 __field(int, reg1)
239 __field(int, reg3)
240 __field(u64, addr)
241 ),
242
243 TP_fast_assign(
244 VCPU_ASSIGN_COMMON
245 __entry->g = g;
246 __entry->reg1 = reg1;
247 __entry->reg3 = reg3;
248 __entry->addr = addr;
249 ),
250
251 VCPU_TP_PRINTK("%s: loading cr %x-%x from %016llx",
252 __entry->g ? "lctlg" : "lctl",
253 __entry->reg1, __entry->reg3, __entry->addr)
254 );
255
256TRACE_EVENT(kvm_s390_handle_prefix,
257 TP_PROTO(VCPU_PROTO_COMMON, int set, u32 address),
258 TP_ARGS(VCPU_ARGS_COMMON, set, address),
259
260 TP_STRUCT__entry(
261 VCPU_FIELD_COMMON
262 __field(int, set)
263 __field(u32, address)
264 ),
265
266 TP_fast_assign(
267 VCPU_ASSIGN_COMMON
268 __entry->set = set;
269 __entry->address = address;
270 ),
271
272 VCPU_TP_PRINTK("%s prefix to %08x",
273 __entry->set ? "setting" : "storing",
274 __entry->address)
275 );
276
277TRACE_EVENT(kvm_s390_handle_stap,
278 TP_PROTO(VCPU_PROTO_COMMON, u64 address),
279 TP_ARGS(VCPU_ARGS_COMMON, address),
280
281 TP_STRUCT__entry(
282 VCPU_FIELD_COMMON
283 __field(u64, address)
284 ),
285
286 TP_fast_assign(
287 VCPU_ASSIGN_COMMON
288 __entry->address = address;
289 ),
290
291 VCPU_TP_PRINTK("storing cpu address to %016llx",
292 __entry->address)
293 );
294
295TRACE_EVENT(kvm_s390_handle_stfl,
296 TP_PROTO(VCPU_PROTO_COMMON, unsigned int facility_list),
297 TP_ARGS(VCPU_ARGS_COMMON, facility_list),
298
299 TP_STRUCT__entry(
300 VCPU_FIELD_COMMON
301 __field(unsigned int, facility_list)
302 ),
303
304 TP_fast_assign(
305 VCPU_ASSIGN_COMMON
306 __entry->facility_list = facility_list;
307 ),
308
309 VCPU_TP_PRINTK("store facility list value %08x",
310 __entry->facility_list)
311 );
312
313TRACE_EVENT(kvm_s390_handle_stsi,
314 TP_PROTO(VCPU_PROTO_COMMON, int fc, int sel1, int sel2, u64 addr),
315 TP_ARGS(VCPU_ARGS_COMMON, fc, sel1, sel2, addr),
316
317 TP_STRUCT__entry(
318 VCPU_FIELD_COMMON
319 __field(int, fc)
320 __field(int, sel1)
321 __field(int, sel2)
322 __field(u64, addr)
323 ),
324
325 TP_fast_assign(
326 VCPU_ASSIGN_COMMON
327 __entry->fc = fc;
328 __entry->sel1 = sel1;
329 __entry->sel2 = sel2;
330 __entry->addr = addr;
331 ),
332
333 VCPU_TP_PRINTK("STSI %d.%d.%d information stored to %016llx",
334 __entry->fc, __entry->sel1, __entry->sel2,
335 __entry->addr)
336 );
337
338#endif /* _TRACE_KVM_H */
339
340/* This part must be outside protection */
341#include <trace/define_trace.h>