aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorAvi Kivity <avi@redhat.com>2009-07-06 08:58:14 -0400
committerAvi Kivity <avi@redhat.com>2009-09-10 01:33:10 -0400
commitf691fe1da7e2715137d21ae5a80bec64db4625db (patch)
tree831a24e7094543cc327cffe7a6fbecb4b58eb82d /arch/x86
parent9c1b96e34717d001873b603d85434aa78e730282 (diff)
KVM: Trace shadow page lifecycle
Create, sync, unsync, zap. Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/kvm/mmu.c10
-rw-r--r--arch/x86/kvm/mmutrace.h103
2 files changed, 109 insertions, 4 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index c0dda6447b9f..ac121b39a5bc 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1122,6 +1122,7 @@ static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
1122 return 1; 1122 return 1;
1123 } 1123 }
1124 1124
1125 trace_kvm_mmu_sync_page(sp);
1125 if (rmap_write_protect(vcpu->kvm, sp->gfn)) 1126 if (rmap_write_protect(vcpu->kvm, sp->gfn))
1126 kvm_flush_remote_tlbs(vcpu->kvm); 1127 kvm_flush_remote_tlbs(vcpu->kvm);
1127 kvm_unlink_unsync_page(vcpu->kvm, sp); 1128 kvm_unlink_unsync_page(vcpu->kvm, sp);
@@ -1244,8 +1245,6 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
1244 quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1; 1245 quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1;
1245 role.quadrant = quadrant; 1246 role.quadrant = quadrant;
1246 } 1247 }
1247 pgprintk("%s: looking gfn %lx role %x\n", __func__,
1248 gfn, role.word);
1249 index = kvm_page_table_hashfn(gfn); 1248 index = kvm_page_table_hashfn(gfn);
1250 bucket = &vcpu->kvm->arch.mmu_page_hash[index]; 1249 bucket = &vcpu->kvm->arch.mmu_page_hash[index];
1251 hlist_for_each_entry_safe(sp, node, tmp, bucket, hash_link) 1250 hlist_for_each_entry_safe(sp, node, tmp, bucket, hash_link)
@@ -1262,14 +1261,13 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
1262 set_bit(KVM_REQ_MMU_SYNC, &vcpu->requests); 1261 set_bit(KVM_REQ_MMU_SYNC, &vcpu->requests);
1263 kvm_mmu_mark_parents_unsync(vcpu, sp); 1262 kvm_mmu_mark_parents_unsync(vcpu, sp);
1264 } 1263 }
1265 pgprintk("%s: found\n", __func__); 1264 trace_kvm_mmu_get_page(sp, false);
1266 return sp; 1265 return sp;
1267 } 1266 }
1268 ++vcpu->kvm->stat.mmu_cache_miss; 1267 ++vcpu->kvm->stat.mmu_cache_miss;
1269 sp = kvm_mmu_alloc_page(vcpu, parent_pte); 1268 sp = kvm_mmu_alloc_page(vcpu, parent_pte);
1270 if (!sp) 1269 if (!sp)
1271 return sp; 1270 return sp;
1272 pgprintk("%s: adding gfn %lx role %x\n", __func__, gfn, role.word);
1273 sp->gfn = gfn; 1271 sp->gfn = gfn;
1274 sp->role = role; 1272 sp->role = role;
1275 hlist_add_head(&sp->hash_link, bucket); 1273 hlist_add_head(&sp->hash_link, bucket);
@@ -1282,6 +1280,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
1282 vcpu->arch.mmu.prefetch_page(vcpu, sp); 1280 vcpu->arch.mmu.prefetch_page(vcpu, sp);
1283 else 1281 else
1284 nonpaging_prefetch_page(vcpu, sp); 1282 nonpaging_prefetch_page(vcpu, sp);
1283 trace_kvm_mmu_get_page(sp, true);
1285 return sp; 1284 return sp;
1286} 1285}
1287 1286
@@ -1410,6 +1409,8 @@ static int mmu_zap_unsync_children(struct kvm *kvm,
1410static int kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp) 1409static int kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp)
1411{ 1410{
1412 int ret; 1411 int ret;
1412
1413 trace_kvm_mmu_zap_page(sp);
1413 ++kvm->stat.mmu_shadow_zapped; 1414 ++kvm->stat.mmu_shadow_zapped;
1414 ret = mmu_zap_unsync_children(kvm, sp); 1415 ret = mmu_zap_unsync_children(kvm, sp);
1415 kvm_mmu_page_unlink_children(kvm, sp); 1416 kvm_mmu_page_unlink_children(kvm, sp);
@@ -1656,6 +1657,7 @@ static int kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
1656 struct kvm_mmu_page *s; 1657 struct kvm_mmu_page *s;
1657 struct hlist_node *node, *n; 1658 struct hlist_node *node, *n;
1658 1659
1660 trace_kvm_mmu_unsync_page(sp);
1659 index = kvm_page_table_hashfn(sp->gfn); 1661 index = kvm_page_table_hashfn(sp->gfn);
1660 bucket = &vcpu->kvm->arch.mmu_page_hash[index]; 1662 bucket = &vcpu->kvm->arch.mmu_page_hash[index];
1661 /* don't unsync if pagetable is shadowed with multiple roles */ 1663 /* don't unsync if pagetable is shadowed with multiple roles */
diff --git a/arch/x86/kvm/mmutrace.h b/arch/x86/kvm/mmutrace.h
index 1367f82717d1..3e4a5c6ca2a9 100644
--- a/arch/x86/kvm/mmutrace.h
+++ b/arch/x86/kvm/mmutrace.h
@@ -2,12 +2,48 @@
2#define _TRACE_KVMMMU_H 2#define _TRACE_KVMMMU_H
3 3
4#include <linux/tracepoint.h> 4#include <linux/tracepoint.h>
5#include <linux/ftrace_event.h>
5 6
6#undef TRACE_SYSTEM 7#undef TRACE_SYSTEM
7#define TRACE_SYSTEM kvmmmu 8#define TRACE_SYSTEM kvmmmu
8#define TRACE_INCLUDE_PATH . 9#define TRACE_INCLUDE_PATH .
9#define TRACE_INCLUDE_FILE mmutrace 10#define TRACE_INCLUDE_FILE mmutrace
10 11
12#define KVM_MMU_PAGE_FIELDS \
13 __field(__u64, gfn) \
14 __field(__u32, role) \
15 __field(__u32, root_count) \
16 __field(__u32, unsync)
17
18#define KVM_MMU_PAGE_ASSIGN(sp) \
19 __entry->gfn = sp->gfn; \
20 __entry->role = sp->role.word; \
21 __entry->root_count = sp->root_count; \
22 __entry->unsync = sp->unsync;
23
24#define KVM_MMU_PAGE_PRINTK() ({ \
25 const char *ret = p->buffer + p->len; \
26 static const char *access_str[] = { \
27 "---", "--x", "w--", "w-x", "-u-", "-ux", "wu-", "wux" \
28 }; \
29 union kvm_mmu_page_role role; \
30 \
31 role.word = __entry->role; \
32 \
33 trace_seq_printf(p, "sp gfn %llx %u/%u q%u%s %s%s %spge" \
34 " %snxe root %u %s%c", \
35 __entry->gfn, role.level, role.glevels, \
36 role.quadrant, \
37 role.direct ? " direct" : "", \
38 access_str[role.access], \
39 role.invalid ? " invalid" : "", \
40 role.cr4_pge ? "" : "!", \
41 role.nxe ? "" : "!", \
42 __entry->root_count, \
43 __entry->unsync ? "unsync" : "sync", 0); \
44 ret; \
45 })
46
11#define kvm_mmu_trace_pferr_flags \ 47#define kvm_mmu_trace_pferr_flags \
12 { PFERR_PRESENT_MASK, "P" }, \ 48 { PFERR_PRESENT_MASK, "P" }, \
13 { PFERR_WRITE_MASK, "W" }, \ 49 { PFERR_WRITE_MASK, "W" }, \
@@ -111,6 +147,73 @@ TRACE_EVENT(
111 __print_flags(__entry->pferr, "|", kvm_mmu_trace_pferr_flags)) 147 __print_flags(__entry->pferr, "|", kvm_mmu_trace_pferr_flags))
112); 148);
113 149
150TRACE_EVENT(
151 kvm_mmu_get_page,
152 TP_PROTO(struct kvm_mmu_page *sp, bool created),
153 TP_ARGS(sp, created),
154
155 TP_STRUCT__entry(
156 KVM_MMU_PAGE_FIELDS
157 __field(bool, created)
158 ),
159
160 TP_fast_assign(
161 KVM_MMU_PAGE_ASSIGN(sp)
162 __entry->created = created;
163 ),
164
165 TP_printk("%s %s", KVM_MMU_PAGE_PRINTK(),
166 __entry->created ? "new" : "existing")
167);
168
169TRACE_EVENT(
170 kvm_mmu_sync_page,
171 TP_PROTO(struct kvm_mmu_page *sp),
172 TP_ARGS(sp),
173
174 TP_STRUCT__entry(
175 KVM_MMU_PAGE_FIELDS
176 ),
177
178 TP_fast_assign(
179 KVM_MMU_PAGE_ASSIGN(sp)
180 ),
181
182 TP_printk("%s", KVM_MMU_PAGE_PRINTK())
183);
184
185TRACE_EVENT(
186 kvm_mmu_unsync_page,
187 TP_PROTO(struct kvm_mmu_page *sp),
188 TP_ARGS(sp),
189
190 TP_STRUCT__entry(
191 KVM_MMU_PAGE_FIELDS
192 ),
193
194 TP_fast_assign(
195 KVM_MMU_PAGE_ASSIGN(sp)
196 ),
197
198 TP_printk("%s", KVM_MMU_PAGE_PRINTK())
199);
200
201TRACE_EVENT(
202 kvm_mmu_zap_page,
203 TP_PROTO(struct kvm_mmu_page *sp),
204 TP_ARGS(sp),
205
206 TP_STRUCT__entry(
207 KVM_MMU_PAGE_FIELDS
208 ),
209
210 TP_fast_assign(
211 KVM_MMU_PAGE_ASSIGN(sp)
212 ),
213
214 TP_printk("%s", KVM_MMU_PAGE_PRINTK())
215);
216
114#endif /* _TRACE_KVMMMU_H */ 217#endif /* _TRACE_KVMMMU_H */
115 218
116/* This part must be outside protection */ 219/* This part must be outside protection */