aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorXiao Guangrong <xiaoguangrong@cn.fujitsu.com>2011-07-11 15:34:24 -0400
committerAvi Kivity <avi@redhat.com>2011-07-24 04:50:41 -0400
commit4f0226482d20f104e943ee9e6f1218b573953f63 (patch)
tree5e74a1b7829e48433640de3708e593b365cb7405 /arch
parentce88decffd17bf9f373cc233c961ad2054965667 (diff)
KVM: MMU: trace mmio page fault
Add tracepoints to trace mmio page fault Signed-off-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/kvm/mmu.c5
-rw-r--r--arch/x86/kvm/mmutrace.h48
-rw-r--r--arch/x86/kvm/trace.h23
-rw-r--r--arch/x86/kvm/x86.c5
4 files changed, 80 insertions, 1 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 4e22df6f93ec..9335e1bf72ad 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -211,6 +211,7 @@ static void mark_mmio_spte(u64 *sptep, u64 gfn, unsigned access)
211{ 211{
212 access &= ACC_WRITE_MASK | ACC_USER_MASK; 212 access &= ACC_WRITE_MASK | ACC_USER_MASK;
213 213
214 trace_mark_mmio_spte(sptep, gfn, access);
214 mmu_spte_set(sptep, shadow_mmio_mask | access | gfn << PAGE_SHIFT); 215 mmu_spte_set(sptep, shadow_mmio_mask | access | gfn << PAGE_SHIFT);
215} 216}
216 217
@@ -1940,6 +1941,8 @@ static void kvm_mmu_commit_zap_page(struct kvm *kvm,
1940 kvm_mmu_isolate_pages(invalid_list); 1941 kvm_mmu_isolate_pages(invalid_list);
1941 sp = list_first_entry(invalid_list, struct kvm_mmu_page, link); 1942 sp = list_first_entry(invalid_list, struct kvm_mmu_page, link);
1942 list_del_init(invalid_list); 1943 list_del_init(invalid_list);
1944
1945 trace_kvm_mmu_delay_free_pages(sp);
1943 call_rcu(&sp->rcu, free_pages_rcu); 1946 call_rcu(&sp->rcu, free_pages_rcu);
1944 return; 1947 return;
1945 } 1948 }
@@ -2938,6 +2941,8 @@ int handle_mmio_page_fault_common(struct kvm_vcpu *vcpu, u64 addr, bool direct)
2938 2941
2939 if (direct) 2942 if (direct)
2940 addr = 0; 2943 addr = 0;
2944
2945 trace_handle_mmio_page_fault(addr, gfn, access);
2941 vcpu_cache_mmio_info(vcpu, addr, gfn, access); 2946 vcpu_cache_mmio_info(vcpu, addr, gfn, access);
2942 return 1; 2947 return 1;
2943 } 2948 }
diff --git a/arch/x86/kvm/mmutrace.h b/arch/x86/kvm/mmutrace.h
index b60b4fdb3eda..eed67f34146d 100644
--- a/arch/x86/kvm/mmutrace.h
+++ b/arch/x86/kvm/mmutrace.h
@@ -196,6 +196,54 @@ DEFINE_EVENT(kvm_mmu_page_class, kvm_mmu_prepare_zap_page,
196 TP_ARGS(sp) 196 TP_ARGS(sp)
197); 197);
198 198
199DEFINE_EVENT(kvm_mmu_page_class, kvm_mmu_delay_free_pages,
200 TP_PROTO(struct kvm_mmu_page *sp),
201
202 TP_ARGS(sp)
203);
204
205TRACE_EVENT(
206 mark_mmio_spte,
207 TP_PROTO(u64 *sptep, gfn_t gfn, unsigned access),
208 TP_ARGS(sptep, gfn, access),
209
210 TP_STRUCT__entry(
211 __field(void *, sptep)
212 __field(gfn_t, gfn)
213 __field(unsigned, access)
214 ),
215
216 TP_fast_assign(
217 __entry->sptep = sptep;
218 __entry->gfn = gfn;
219 __entry->access = access;
220 ),
221
222 TP_printk("sptep:%p gfn %llx access %x", __entry->sptep, __entry->gfn,
223 __entry->access)
224);
225
226TRACE_EVENT(
227 handle_mmio_page_fault,
228 TP_PROTO(u64 addr, gfn_t gfn, unsigned access),
229 TP_ARGS(addr, gfn, access),
230
231 TP_STRUCT__entry(
232 __field(u64, addr)
233 __field(gfn_t, gfn)
234 __field(unsigned, access)
235 ),
236
237 TP_fast_assign(
238 __entry->addr = addr;
239 __entry->gfn = gfn;
240 __entry->access = access;
241 ),
242
243 TP_printk("addr:%llx gfn %llx access %x", __entry->addr, __entry->gfn,
244 __entry->access)
245);
246
199TRACE_EVENT( 247TRACE_EVENT(
200 kvm_mmu_audit, 248 kvm_mmu_audit,
201 TP_PROTO(struct kvm_vcpu *vcpu, int audit_point), 249 TP_PROTO(struct kvm_vcpu *vcpu, int audit_point),
diff --git a/arch/x86/kvm/trace.h b/arch/x86/kvm/trace.h
index 624f8cb46a6b..3ff898c104f7 100644
--- a/arch/x86/kvm/trace.h
+++ b/arch/x86/kvm/trace.h
@@ -698,6 +698,29 @@ TRACE_EVENT(kvm_emulate_insn,
698#define trace_kvm_emulate_insn_start(vcpu) trace_kvm_emulate_insn(vcpu, 0) 698#define trace_kvm_emulate_insn_start(vcpu) trace_kvm_emulate_insn(vcpu, 0)
699#define trace_kvm_emulate_insn_failed(vcpu) trace_kvm_emulate_insn(vcpu, 1) 699#define trace_kvm_emulate_insn_failed(vcpu) trace_kvm_emulate_insn(vcpu, 1)
700 700
701TRACE_EVENT(
702 vcpu_match_mmio,
703 TP_PROTO(gva_t gva, gpa_t gpa, bool write, bool gpa_match),
704 TP_ARGS(gva, gpa, write, gpa_match),
705
706 TP_STRUCT__entry(
707 __field(gva_t, gva)
708 __field(gpa_t, gpa)
709 __field(bool, write)
710 __field(bool, gpa_match)
711 ),
712
713 TP_fast_assign(
714 __entry->gva = gva;
715 __entry->gpa = gpa;
716 __entry->write = write;
717 __entry->gpa_match = gpa_match
718 ),
719
720 TP_printk("gva %#lx gpa %#llx %s %s", __entry->gva, __entry->gpa,
721 __entry->write ? "Write" : "Read",
722 __entry->gpa_match ? "GPA" : "GVA")
723);
701#endif /* _TRACE_KVM_H */ 724#endif /* _TRACE_KVM_H */
702 725
703#undef TRACE_INCLUDE_PATH 726#undef TRACE_INCLUDE_PATH
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 2c9661f230a9..84a28ea45fa4 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -4021,6 +4021,7 @@ static int vcpu_mmio_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva,
4021 vcpu->arch.access)) { 4021 vcpu->arch.access)) {
4022 *gpa = vcpu->arch.mmio_gfn << PAGE_SHIFT | 4022 *gpa = vcpu->arch.mmio_gfn << PAGE_SHIFT |
4023 (gva & (PAGE_SIZE - 1)); 4023 (gva & (PAGE_SIZE - 1));
4024 trace_vcpu_match_mmio(gva, *gpa, write, false);
4024 return 1; 4025 return 1;
4025 } 4026 }
4026 4027
@@ -4036,8 +4037,10 @@ static int vcpu_mmio_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva,
4036 if ((*gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE) 4037 if ((*gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
4037 return 1; 4038 return 1;
4038 4039
4039 if (vcpu_match_mmio_gpa(vcpu, *gpa)) 4040 if (vcpu_match_mmio_gpa(vcpu, *gpa)) {
4041 trace_vcpu_match_mmio(gva, *gpa, write, true);
4040 return 1; 4042 return 1;
4043 }
4041 4044
4042 return 0; 4045 return 0;
4043} 4046}