aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
authorAlexander Graf <agraf@suse.de>2010-08-02 15:25:33 -0400
committerAvi Kivity <avi@redhat.com>2010-10-24 04:52:07 -0400
commit928d78be54014e65498e289fdc3f82acc4b804a9 (patch)
treefea2b1f5c4c322d3381f32ba43a4a77ec82936af /arch/powerpc
parente7c1d14e3bf40b87e6a3f68964b36dbb2c875c0f (diff)
KVM: PPC: Move slb debugging to tracepoints
This patch moves debugging printks for shadow SLB debugging over to tracepoints. Signed-off-by: Alexander Graf <agraf@suse.de>
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_host.c22
-rw-r--r--arch/powerpc/kvm/trace.h73
2 files changed, 78 insertions, 17 deletions
diff --git a/arch/powerpc/kvm/book3s_64_mmu_host.c b/arch/powerpc/kvm/book3s_64_mmu_host.c
index ebb1b5ddabfb..321c931f691c 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_host.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_host.c
@@ -33,14 +33,6 @@
33#define PTE_SIZE 12 33#define PTE_SIZE 12
34#define VSID_ALL 0 34#define VSID_ALL 0
35 35
36/* #define DEBUG_SLB */
37
38#ifdef DEBUG_SLB
39#define dprintk_slb(a, ...) printk(KERN_INFO a, __VA_ARGS__)
40#else
41#define dprintk_slb(a, ...) do { } while(0)
42#endif
43
44void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte) 36void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
45{ 37{
46 ppc_md.hpte_invalidate(pte->slot, pte->host_va, 38 ppc_md.hpte_invalidate(pte->slot, pte->host_va,
@@ -66,20 +58,17 @@ static struct kvmppc_sid_map *find_sid_vsid(struct kvm_vcpu *vcpu, u64 gvsid)
66 sid_map_mask = kvmppc_sid_hash(vcpu, gvsid); 58 sid_map_mask = kvmppc_sid_hash(vcpu, gvsid);
67 map = &to_book3s(vcpu)->sid_map[sid_map_mask]; 59 map = &to_book3s(vcpu)->sid_map[sid_map_mask];
68 if (map->valid && (map->guest_vsid == gvsid)) { 60 if (map->valid && (map->guest_vsid == gvsid)) {
69 dprintk_slb("SLB: Searching: 0x%llx -> 0x%llx\n", 61 trace_kvm_book3s_slb_found(gvsid, map->host_vsid);
70 gvsid, map->host_vsid);
71 return map; 62 return map;
72 } 63 }
73 64
74 map = &to_book3s(vcpu)->sid_map[SID_MAP_MASK - sid_map_mask]; 65 map = &to_book3s(vcpu)->sid_map[SID_MAP_MASK - sid_map_mask];
75 if (map->valid && (map->guest_vsid == gvsid)) { 66 if (map->valid && (map->guest_vsid == gvsid)) {
76 dprintk_slb("SLB: Searching 0x%llx -> 0x%llx\n", 67 trace_kvm_book3s_slb_found(gvsid, map->host_vsid);
77 gvsid, map->host_vsid);
78 return map; 68 return map;
79 } 69 }
80 70
81 dprintk_slb("SLB: Searching %d/%d: 0x%llx -> not found\n", 71 trace_kvm_book3s_slb_fail(sid_map_mask, gvsid);
82 sid_map_mask, SID_MAP_MASK - sid_map_mask, gvsid);
83 return NULL; 72 return NULL;
84} 73}
85 74
@@ -205,8 +194,7 @@ static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid)
205 map->guest_vsid = gvsid; 194 map->guest_vsid = gvsid;
206 map->valid = true; 195 map->valid = true;
207 196
208 dprintk_slb("SLB: New mapping at %d: 0x%llx -> 0x%llx\n", 197 trace_kvm_book3s_slb_map(sid_map_mask, gvsid, map->host_vsid);
209 sid_map_mask, gvsid, map->host_vsid);
210 198
211 return map; 199 return map;
212} 200}
@@ -278,7 +266,7 @@ int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr)
278 to_svcpu(vcpu)->slb[slb_index].esid = slb_esid; 266 to_svcpu(vcpu)->slb[slb_index].esid = slb_esid;
279 to_svcpu(vcpu)->slb[slb_index].vsid = slb_vsid; 267 to_svcpu(vcpu)->slb[slb_index].vsid = slb_vsid;
280 268
281 dprintk_slb("slbmte %#llx, %#llx\n", slb_vsid, slb_esid); 269 trace_kvm_book3s_slbmte(slb_vsid, slb_esid);
282 270
283 return 0; 271 return 0;
284} 272}
diff --git a/arch/powerpc/kvm/trace.h b/arch/powerpc/kvm/trace.h
index 23f757a69163..3aca1b042b8c 100644
--- a/arch/powerpc/kvm/trace.h
+++ b/arch/powerpc/kvm/trace.h
@@ -262,6 +262,79 @@ TRACE_EVENT(kvm_book3s_mmu_flush,
262 __entry->count, __entry->type, __entry->p1, __entry->p2) 262 __entry->count, __entry->type, __entry->p1, __entry->p2)
263); 263);
264 264
265TRACE_EVENT(kvm_book3s_slb_found,
266 TP_PROTO(unsigned long long gvsid, unsigned long long hvsid),
267 TP_ARGS(gvsid, hvsid),
268
269 TP_STRUCT__entry(
270 __field( unsigned long long, gvsid )
271 __field( unsigned long long, hvsid )
272 ),
273
274 TP_fast_assign(
275 __entry->gvsid = gvsid;
276 __entry->hvsid = hvsid;
277 ),
278
279 TP_printk("%llx -> %llx", __entry->gvsid, __entry->hvsid)
280);
281
282TRACE_EVENT(kvm_book3s_slb_fail,
283 TP_PROTO(u16 sid_map_mask, unsigned long long gvsid),
284 TP_ARGS(sid_map_mask, gvsid),
285
286 TP_STRUCT__entry(
287 __field( unsigned short, sid_map_mask )
288 __field( unsigned long long, gvsid )
289 ),
290
291 TP_fast_assign(
292 __entry->sid_map_mask = sid_map_mask;
293 __entry->gvsid = gvsid;
294 ),
295
296 TP_printk("%x/%x: %llx", __entry->sid_map_mask,
297 SID_MAP_MASK - __entry->sid_map_mask, __entry->gvsid)
298);
299
300TRACE_EVENT(kvm_book3s_slb_map,
301 TP_PROTO(u16 sid_map_mask, unsigned long long gvsid,
302 unsigned long long hvsid),
303 TP_ARGS(sid_map_mask, gvsid, hvsid),
304
305 TP_STRUCT__entry(
306 __field( unsigned short, sid_map_mask )
307 __field( unsigned long long, guest_vsid )
308 __field( unsigned long long, host_vsid )
309 ),
310
311 TP_fast_assign(
312 __entry->sid_map_mask = sid_map_mask;
313 __entry->guest_vsid = gvsid;
314 __entry->host_vsid = hvsid;
315 ),
316
317 TP_printk("%x: %llx -> %llx", __entry->sid_map_mask,
318 __entry->guest_vsid, __entry->host_vsid)
319);
320
321TRACE_EVENT(kvm_book3s_slbmte,
322 TP_PROTO(u64 slb_vsid, u64 slb_esid),
323 TP_ARGS(slb_vsid, slb_esid),
324
325 TP_STRUCT__entry(
326 __field( u64, slb_vsid )
327 __field( u64, slb_esid )
328 ),
329
330 TP_fast_assign(
331 __entry->slb_vsid = slb_vsid;
332 __entry->slb_esid = slb_esid;
333 ),
334
335 TP_printk("%llx, %llx", __entry->slb_vsid, __entry->slb_esid)
336);
337
265#endif /* CONFIG_PPC_BOOK3S */ 338#endif /* CONFIG_PPC_BOOK3S */
266 339
267#endif /* _TRACE_KVM_H */ 340#endif /* _TRACE_KVM_H */