aboutsummaryrefslogtreecommitdiffstats
path: root/include/trace
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2011-01-13 13:14:24 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2011-01-13 13:14:24 -0500
commit55065bc52795faae549abfb912aacc622dd63876 (patch)
tree63683547e41ed459a2a8747eeafb5e969633d54f /include/trace
parent008d23e4852d78bb2618f2035f8b2110b6a6b968 (diff)
parente5c301428294cb8925667c9ee39f817c4ab1c2c9 (diff)
Merge branch 'kvm-updates/2.6.38' of git://git.kernel.org/pub/scm/virt/kvm/kvm
* 'kvm-updates/2.6.38' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (142 commits) KVM: Initialize fpu state in preemptible context KVM: VMX: when entering real mode align segment base to 16 bytes KVM: MMU: handle 'map_writable' in set_spte() function KVM: MMU: audit: allow audit more guests at the same time KVM: Fetch guest cr3 from hardware on demand KVM: Replace reads of vcpu->arch.cr3 by an accessor KVM: MMU: only write protect mappings at pagetable level KVM: VMX: Correct asm constraint in vmcs_load()/vmcs_clear() KVM: MMU: Initialize base_role for tdp mmus KVM: VMX: Optimize atomic EFER load KVM: VMX: Add definitions for more vm entry/exit control bits KVM: SVM: copy instruction bytes from VMCB KVM: SVM: implement enhanced INVLPG intercept KVM: SVM: enhance mov DR intercept handler KVM: SVM: enhance MOV CR intercept handler KVM: SVM: add new SVM feature bit names KVM: cleanup emulate_instruction KVM: move complete_insn_gp() into x86.c KVM: x86: fix CR8 handling KVM guest: Fix kvm clock initialization when it's configured out ...
Diffstat (limited to 'include/trace')
-rw-r--r--include/trace/events/kvm.h121
1 files changed, 121 insertions, 0 deletions
diff --git a/include/trace/events/kvm.h b/include/trace/events/kvm.h
index 6dd3a51ab1cb..46e3cd8e197a 100644
--- a/include/trace/events/kvm.h
+++ b/include/trace/events/kvm.h
@@ -6,6 +6,36 @@
6#undef TRACE_SYSTEM 6#undef TRACE_SYSTEM
7#define TRACE_SYSTEM kvm 7#define TRACE_SYSTEM kvm
8 8
9#define ERSN(x) { KVM_EXIT_##x, "KVM_EXIT_" #x }
10
11#define kvm_trace_exit_reason \
12 ERSN(UNKNOWN), ERSN(EXCEPTION), ERSN(IO), ERSN(HYPERCALL), \
13 ERSN(DEBUG), ERSN(HLT), ERSN(MMIO), ERSN(IRQ_WINDOW_OPEN), \
14 ERSN(SHUTDOWN), ERSN(FAIL_ENTRY), ERSN(INTR), ERSN(SET_TPR), \
15 ERSN(TPR_ACCESS), ERSN(S390_SIEIC), ERSN(S390_RESET), ERSN(DCR),\
16 ERSN(NMI), ERSN(INTERNAL_ERROR), ERSN(OSI)
17
18TRACE_EVENT(kvm_userspace_exit,
19 TP_PROTO(__u32 reason, int errno),
20 TP_ARGS(reason, errno),
21
22 TP_STRUCT__entry(
23 __field( __u32, reason )
24 __field( int, errno )
25 ),
26
27 TP_fast_assign(
28 __entry->reason = reason;
29 __entry->errno = errno;
30 ),
31
32 TP_printk("reason %s (%d)",
33 __entry->errno < 0 ?
34 (__entry->errno == -EINTR ? "restart" : "error") :
35 __print_symbolic(__entry->reason, kvm_trace_exit_reason),
36 __entry->errno < 0 ? -__entry->errno : __entry->reason)
37);
38
9#if defined(__KVM_HAVE_IOAPIC) 39#if defined(__KVM_HAVE_IOAPIC)
10TRACE_EVENT(kvm_set_irq, 40TRACE_EVENT(kvm_set_irq,
11 TP_PROTO(unsigned int gsi, int level, int irq_source_id), 41 TP_PROTO(unsigned int gsi, int level, int irq_source_id),
@@ -185,6 +215,97 @@ TRACE_EVENT(kvm_age_page,
185 __entry->referenced ? "YOUNG" : "OLD") 215 __entry->referenced ? "YOUNG" : "OLD")
186); 216);
187 217
218#ifdef CONFIG_KVM_ASYNC_PF
219DECLARE_EVENT_CLASS(kvm_async_get_page_class,
220
221 TP_PROTO(u64 gva, u64 gfn),
222
223 TP_ARGS(gva, gfn),
224
225 TP_STRUCT__entry(
226 __field(__u64, gva)
227 __field(u64, gfn)
228 ),
229
230 TP_fast_assign(
231 __entry->gva = gva;
232 __entry->gfn = gfn;
233 ),
234
235 TP_printk("gva = %#llx, gfn = %#llx", __entry->gva, __entry->gfn)
236);
237
238DEFINE_EVENT(kvm_async_get_page_class, kvm_try_async_get_page,
239
240 TP_PROTO(u64 gva, u64 gfn),
241
242 TP_ARGS(gva, gfn)
243);
244
245DEFINE_EVENT(kvm_async_get_page_class, kvm_async_pf_doublefault,
246
247 TP_PROTO(u64 gva, u64 gfn),
248
249 TP_ARGS(gva, gfn)
250);
251
252DECLARE_EVENT_CLASS(kvm_async_pf_nopresent_ready,
253
254 TP_PROTO(u64 token, u64 gva),
255
256 TP_ARGS(token, gva),
257
258 TP_STRUCT__entry(
259 __field(__u64, token)
260 __field(__u64, gva)
261 ),
262
263 TP_fast_assign(
264 __entry->token = token;
265 __entry->gva = gva;
266 ),
267
268 TP_printk("token %#llx gva %#llx", __entry->token, __entry->gva)
269
270);
271
272DEFINE_EVENT(kvm_async_pf_nopresent_ready, kvm_async_pf_not_present,
273
274 TP_PROTO(u64 token, u64 gva),
275
276 TP_ARGS(token, gva)
277);
278
279DEFINE_EVENT(kvm_async_pf_nopresent_ready, kvm_async_pf_ready,
280
281 TP_PROTO(u64 token, u64 gva),
282
283 TP_ARGS(token, gva)
284);
285
286TRACE_EVENT(
287 kvm_async_pf_completed,
288 TP_PROTO(unsigned long address, struct page *page, u64 gva),
289 TP_ARGS(address, page, gva),
290
291 TP_STRUCT__entry(
292 __field(unsigned long, address)
293 __field(pfn_t, pfn)
294 __field(u64, gva)
295 ),
296
297 TP_fast_assign(
298 __entry->address = address;
299 __entry->pfn = page ? page_to_pfn(page) : 0;
300 __entry->gva = gva;
301 ),
302
303 TP_printk("gva %#llx address %#lx pfn %#llx", __entry->gva,
304 __entry->address, __entry->pfn)
305);
306
307#endif
308
188#endif /* _TRACE_KVM_MAIN_H */ 309#endif /* _TRACE_KVM_MAIN_H */
189 310
190/* This part must be outside protection */ 311/* This part must be outside protection */