aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorAvi Kivity <avi@redhat.com>2009-07-06 05:21:32 -0400
committerAvi Kivity <avi@redhat.com>2009-09-10 01:33:09 -0400
commit07420171593908406c3a59d6f884d426a921a5ea (patch)
tree987488098509c4687ed8898c44d0bf4e061c1b4f /arch
parentdc7e795e3dd2a763e5ceaa1615f307e808cf3932 (diff)
KVM: MMU: Trace guest pagetable walker
Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/kvm/mmu.c3
-rw-r--r--arch/x86/kvm/mmutrace.h117
-rw-r--r--arch/x86/kvm/paging_tmpl.h11
3 files changed, 128 insertions, 3 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index b67585c1ef08..c0dda6447b9f 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -140,6 +140,9 @@ module_param(oos_shadow, bool, 0644);
140#define ACC_USER_MASK PT_USER_MASK 140#define ACC_USER_MASK PT_USER_MASK
141#define ACC_ALL (ACC_EXEC_MASK | ACC_WRITE_MASK | ACC_USER_MASK) 141#define ACC_ALL (ACC_EXEC_MASK | ACC_WRITE_MASK | ACC_USER_MASK)
142 142
143#define CREATE_TRACE_POINTS
144#include "mmutrace.h"
145
143#define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level) 146#define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)
144 147
145struct kvm_rmap_desc { 148struct kvm_rmap_desc {
diff --git a/arch/x86/kvm/mmutrace.h b/arch/x86/kvm/mmutrace.h
new file mode 100644
index 000000000000..1367f82717d1
--- /dev/null
+++ b/arch/x86/kvm/mmutrace.h
@@ -0,0 +1,117 @@
1#if !defined(_TRACE_KVMMMU_H) || defined(TRACE_HEADER_MULTI_READ)
2#define _TRACE_KVMMMU_H
3
4#include <linux/tracepoint.h>
5
6#undef TRACE_SYSTEM
7#define TRACE_SYSTEM kvmmmu
8#define TRACE_INCLUDE_PATH .
9#define TRACE_INCLUDE_FILE mmutrace
10
11#define kvm_mmu_trace_pferr_flags \
12 { PFERR_PRESENT_MASK, "P" }, \
13 { PFERR_WRITE_MASK, "W" }, \
14 { PFERR_USER_MASK, "U" }, \
15 { PFERR_RSVD_MASK, "RSVD" }, \
16 { PFERR_FETCH_MASK, "F" }
17
18/*
19 * A pagetable walk has started
20 */
21TRACE_EVENT(
22 kvm_mmu_pagetable_walk,
23 TP_PROTO(u64 addr, int write_fault, int user_fault, int fetch_fault),
24 TP_ARGS(addr, write_fault, user_fault, fetch_fault),
25
26 TP_STRUCT__entry(
27 __field(__u64, addr)
28 __field(__u32, pferr)
29 ),
30
31 TP_fast_assign(
32 __entry->addr = addr;
33 __entry->pferr = (!!write_fault << 1) | (!!user_fault << 2)
34 | (!!fetch_fault << 4);
35 ),
36
37 TP_printk("addr %llx pferr %x %s", __entry->addr, __entry->pferr,
38 __print_flags(__entry->pferr, "|", kvm_mmu_trace_pferr_flags))
39);
40
41
42/* We just walked a paging element */
43TRACE_EVENT(
44 kvm_mmu_paging_element,
45 TP_PROTO(u64 pte, int level),
46 TP_ARGS(pte, level),
47
48 TP_STRUCT__entry(
49 __field(__u64, pte)
50 __field(__u32, level)
51 ),
52
53 TP_fast_assign(
54 __entry->pte = pte;
55 __entry->level = level;
56 ),
57
58 TP_printk("pte %llx level %u", __entry->pte, __entry->level)
59);
60
61/* We set a pte accessed bit */
62TRACE_EVENT(
63 kvm_mmu_set_accessed_bit,
64 TP_PROTO(unsigned long table_gfn, unsigned index, unsigned size),
65 TP_ARGS(table_gfn, index, size),
66
67 TP_STRUCT__entry(
68 __field(__u64, gpa)
69 ),
70
71 TP_fast_assign(
72 __entry->gpa = ((u64)table_gfn << PAGE_SHIFT)
73 + index * size;
74 ),
75
76 TP_printk("gpa %llx", __entry->gpa)
77);
78
79/* We set a pte dirty bit */
80TRACE_EVENT(
81 kvm_mmu_set_dirty_bit,
82 TP_PROTO(unsigned long table_gfn, unsigned index, unsigned size),
83 TP_ARGS(table_gfn, index, size),
84
85 TP_STRUCT__entry(
86 __field(__u64, gpa)
87 ),
88
89 TP_fast_assign(
90 __entry->gpa = ((u64)table_gfn << PAGE_SHIFT)
91 + index * size;
92 ),
93
94 TP_printk("gpa %llx", __entry->gpa)
95);
96
97TRACE_EVENT(
98 kvm_mmu_walker_error,
99 TP_PROTO(u32 pferr),
100 TP_ARGS(pferr),
101
102 TP_STRUCT__entry(
103 __field(__u32, pferr)
104 ),
105
106 TP_fast_assign(
107 __entry->pferr = pferr;
108 ),
109
110 TP_printk("pferr %x %s", __entry->pferr,
111 __print_flags(__entry->pferr, "|", kvm_mmu_trace_pferr_flags))
112);
113
114#endif /* _TRACE_KVMMMU_H */
115
116/* This part must be outside protection */
117#include <trace/define_trace.h>
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 53e129cec5fd..36ac6d70a847 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -125,13 +125,15 @@ static int FNAME(walk_addr)(struct guest_walker *walker,
125 gpa_t pte_gpa; 125 gpa_t pte_gpa;
126 int rsvd_fault = 0; 126 int rsvd_fault = 0;
127 127
128 pgprintk("%s: addr %lx\n", __func__, addr); 128 trace_kvm_mmu_pagetable_walk(addr, write_fault, user_fault,
129 fetch_fault);
129walk: 130walk:
130 walker->level = vcpu->arch.mmu.root_level; 131 walker->level = vcpu->arch.mmu.root_level;
131 pte = vcpu->arch.cr3; 132 pte = vcpu->arch.cr3;
132#if PTTYPE == 64 133#if PTTYPE == 64
133 if (!is_long_mode(vcpu)) { 134 if (!is_long_mode(vcpu)) {
134 pte = kvm_pdptr_read(vcpu, (addr >> 30) & 3); 135 pte = kvm_pdptr_read(vcpu, (addr >> 30) & 3);
136 trace_kvm_mmu_paging_element(pte, walker->level);
135 if (!is_present_gpte(pte)) 137 if (!is_present_gpte(pte))
136 goto not_present; 138 goto not_present;
137 --walker->level; 139 --walker->level;
@@ -150,10 +152,9 @@ walk:
150 pte_gpa += index * sizeof(pt_element_t); 152 pte_gpa += index * sizeof(pt_element_t);
151 walker->table_gfn[walker->level - 1] = table_gfn; 153 walker->table_gfn[walker->level - 1] = table_gfn;
152 walker->pte_gpa[walker->level - 1] = pte_gpa; 154 walker->pte_gpa[walker->level - 1] = pte_gpa;
153 pgprintk("%s: table_gfn[%d] %lx\n", __func__,
154 walker->level - 1, table_gfn);
155 155
156 kvm_read_guest(vcpu->kvm, pte_gpa, &pte, sizeof(pte)); 156 kvm_read_guest(vcpu->kvm, pte_gpa, &pte, sizeof(pte));
157 trace_kvm_mmu_paging_element(pte, walker->level);
157 158
158 if (!is_present_gpte(pte)) 159 if (!is_present_gpte(pte))
159 goto not_present; 160 goto not_present;
@@ -175,6 +176,8 @@ walk:
175#endif 176#endif
176 177
177 if (!(pte & PT_ACCESSED_MASK)) { 178 if (!(pte & PT_ACCESSED_MASK)) {
179 trace_kvm_mmu_set_accessed_bit(table_gfn, index,
180 sizeof(pte));
178 mark_page_dirty(vcpu->kvm, table_gfn); 181 mark_page_dirty(vcpu->kvm, table_gfn);
179 if (FNAME(cmpxchg_gpte)(vcpu->kvm, table_gfn, 182 if (FNAME(cmpxchg_gpte)(vcpu->kvm, table_gfn,
180 index, pte, pte|PT_ACCESSED_MASK)) 183 index, pte, pte|PT_ACCESSED_MASK))
@@ -208,6 +211,7 @@ walk:
208 if (write_fault && !is_dirty_gpte(pte)) { 211 if (write_fault && !is_dirty_gpte(pte)) {
209 bool ret; 212 bool ret;
210 213
214 trace_kvm_mmu_set_dirty_bit(table_gfn, index, sizeof(pte));
211 mark_page_dirty(vcpu->kvm, table_gfn); 215 mark_page_dirty(vcpu->kvm, table_gfn);
212 ret = FNAME(cmpxchg_gpte)(vcpu->kvm, table_gfn, index, pte, 216 ret = FNAME(cmpxchg_gpte)(vcpu->kvm, table_gfn, index, pte,
213 pte|PT_DIRTY_MASK); 217 pte|PT_DIRTY_MASK);
@@ -239,6 +243,7 @@ err:
239 walker->error_code |= PFERR_FETCH_MASK; 243 walker->error_code |= PFERR_FETCH_MASK;
240 if (rsvd_fault) 244 if (rsvd_fault)
241 walker->error_code |= PFERR_RSVD_MASK; 245 walker->error_code |= PFERR_RSVD_MASK;
246 trace_kvm_mmu_walker_error(walker->error_code);
242 return 0; 247 return 0;
243} 248}
244 249