aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLiu Yu <yu.liu@freescale.com>2011-12-20 09:42:56 -0500
committerAvi Kivity <avi@redhat.com>2012-03-05 07:52:40 -0500
commitd37b1a037cae725e69e5bf96f58544b69d7c93a6 (patch)
tree73db07a65bd9ebbeae0e982cbb5291a047dddb38
parent82ed36164c8a8ee685ea3fb3c4f741214ac070ca (diff)
KVM: PPC: booke: Add booke206 TLB trace
The existing kvm_stlb_write/kvm_gtlb_write were a poor match for the e500/book3e MMU -- mas1 was passed as "tid", mas2 was limited to "unsigned int" which will be a problem on 64-bit, mas3/7 got split up rather than treated as a single 64-bit word, etc. Signed-off-by: Liu Yu <yu.liu@freescale.com> [scottwood@freescale.com: made mas2 64-bit, and added mas8 init] Signed-off-by: Scott Wood <scottwood@freescale.com> Signed-off-by: Alexander Graf <agraf@suse.de> Signed-off-by: Avi Kivity <avi@redhat.com>
-rw-r--r--arch/powerpc/kvm/e500_tlb.c10
-rw-r--r--arch/powerpc/kvm/trace.h57
2 files changed, 63 insertions, 4 deletions
diff --git a/arch/powerpc/kvm/e500_tlb.c b/arch/powerpc/kvm/e500_tlb.c
index 1746e677bf37..6e53e4164de1 100644
--- a/arch/powerpc/kvm/e500_tlb.c
+++ b/arch/powerpc/kvm/e500_tlb.c
@@ -294,6 +294,9 @@ static inline void __write_host_tlbe(struct kvm_book3e_206_tlb_entry *stlbe,
294 mtspr(SPRN_MAS7, (u32)(stlbe->mas7_3 >> 32)); 294 mtspr(SPRN_MAS7, (u32)(stlbe->mas7_3 >> 32));
295 asm volatile("isync; tlbwe" : : : "memory"); 295 asm volatile("isync; tlbwe" : : : "memory");
296 local_irq_restore(flags); 296 local_irq_restore(flags);
297
298 trace_kvm_booke206_stlb_write(mas0, stlbe->mas8, stlbe->mas1,
299 stlbe->mas2, stlbe->mas7_3);
297} 300}
298 301
299/* 302/*
@@ -332,8 +335,6 @@ static inline void write_host_tlbe(struct kvmppc_vcpu_e500 *vcpu_e500,
332 MAS0_TLBSEL(1) | 335 MAS0_TLBSEL(1) |
333 MAS0_ESEL(to_htlb1_esel(sesel))); 336 MAS0_ESEL(to_htlb1_esel(sesel)));
334 } 337 }
335 trace_kvm_stlb_write(index_of(tlbsel, esel), stlbe->mas1, stlbe->mas2,
336 (u32)stlbe->mas7_3, (u32)(stlbe->mas7_3 >> 32));
337} 338}
338 339
339void kvmppc_map_magic(struct kvm_vcpu *vcpu) 340void kvmppc_map_magic(struct kvm_vcpu *vcpu)
@@ -355,6 +356,7 @@ void kvmppc_map_magic(struct kvm_vcpu *vcpu)
355 magic.mas2 = vcpu->arch.magic_page_ea | MAS2_M; 356 magic.mas2 = vcpu->arch.magic_page_ea | MAS2_M;
356 magic.mas7_3 = ((u64)pfn << PAGE_SHIFT) | 357 magic.mas7_3 = ((u64)pfn << PAGE_SHIFT) |
357 MAS3_SW | MAS3_SR | MAS3_UW | MAS3_UR; 358 MAS3_SW | MAS3_SR | MAS3_UW | MAS3_UR;
359 magic.mas8 = 0;
358 360
359 __write_host_tlbe(&magic, MAS0_TLBSEL(1) | MAS0_ESEL(tlbcam_index)); 361 __write_host_tlbe(&magic, MAS0_TLBSEL(1) | MAS0_ESEL(tlbcam_index));
360 preempt_enable(); 362 preempt_enable();
@@ -954,8 +956,8 @@ int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu)
954 gtlbe->mas2 = vcpu->arch.shared->mas2; 956 gtlbe->mas2 = vcpu->arch.shared->mas2;
955 gtlbe->mas7_3 = vcpu->arch.shared->mas7_3; 957 gtlbe->mas7_3 = vcpu->arch.shared->mas7_3;
956 958
957 trace_kvm_gtlb_write(vcpu->arch.shared->mas0, gtlbe->mas1, gtlbe->mas2, 959 trace_kvm_booke206_gtlb_write(vcpu->arch.shared->mas0, gtlbe->mas1,
958 (u32)gtlbe->mas7_3, (u32)(gtlbe->mas7_3 >> 32)); 960 gtlbe->mas2, gtlbe->mas7_3);
959 961
960 /* Invalidate shadow mappings for the about-to-be-clobbered TLBE. */ 962 /* Invalidate shadow mappings for the about-to-be-clobbered TLBE. */
961 if (tlbe_is_host_safe(vcpu, gtlbe)) { 963 if (tlbe_is_host_safe(vcpu, gtlbe)) {
diff --git a/arch/powerpc/kvm/trace.h b/arch/powerpc/kvm/trace.h
index 609d8bfb54e3..877186b7b1c3 100644
--- a/arch/powerpc/kvm/trace.h
+++ b/arch/powerpc/kvm/trace.h
@@ -340,6 +340,63 @@ TRACE_EVENT(kvm_book3s_slbmte,
340 340
341#endif /* CONFIG_PPC_BOOK3S */ 341#endif /* CONFIG_PPC_BOOK3S */
342 342
343
344/*************************************************************************
345 * Book3E trace points *
346 *************************************************************************/
347
348#ifdef CONFIG_BOOKE
349
350TRACE_EVENT(kvm_booke206_stlb_write,
351 TP_PROTO(__u32 mas0, __u32 mas8, __u32 mas1, __u64 mas2, __u64 mas7_3),
352 TP_ARGS(mas0, mas8, mas1, mas2, mas7_3),
353
354 TP_STRUCT__entry(
355 __field( __u32, mas0 )
356 __field( __u32, mas8 )
357 __field( __u32, mas1 )
358 __field( __u64, mas2 )
359 __field( __u64, mas7_3 )
360 ),
361
362 TP_fast_assign(
363 __entry->mas0 = mas0;
364 __entry->mas8 = mas8;
365 __entry->mas1 = mas1;
366 __entry->mas2 = mas2;
367 __entry->mas7_3 = mas7_3;
368 ),
369
370 TP_printk("mas0=%x mas8=%x mas1=%x mas2=%llx mas7_3=%llx",
371 __entry->mas0, __entry->mas8, __entry->mas1,
372 __entry->mas2, __entry->mas7_3)
373);
374
375TRACE_EVENT(kvm_booke206_gtlb_write,
376 TP_PROTO(__u32 mas0, __u32 mas1, __u64 mas2, __u64 mas7_3),
377 TP_ARGS(mas0, mas1, mas2, mas7_3),
378
379 TP_STRUCT__entry(
380 __field( __u32, mas0 )
381 __field( __u32, mas1 )
382 __field( __u64, mas2 )
383 __field( __u64, mas7_3 )
384 ),
385
386 TP_fast_assign(
387 __entry->mas0 = mas0;
388 __entry->mas1 = mas1;
389 __entry->mas2 = mas2;
390 __entry->mas7_3 = mas7_3;
391 ),
392
393 TP_printk("mas0=%x mas1=%x mas2=%llx mas7_3=%llx",
394 __entry->mas0, __entry->mas1,
395 __entry->mas2, __entry->mas7_3)
396);
397
398#endif
399
343#endif /* _TRACE_KVM_H */ 400#endif /* _TRACE_KVM_H */
344 401
345/* This part must be outside protection */ 402/* This part must be outside protection */