aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/cpu/perf_counter.c
diff options
context:
space:
mode:
authorFrederic Weisbecker <fweisbec@gmail.com>2009-07-01 15:02:09 -0400
committerIngo Molnar <mingo@elte.hu>2009-07-01 16:37:23 -0400
commit0406ca6d8e849d9dd027c8cb6791448e81411aef (patch)
tree3de38f9c75f41ec28b5b15b211f4e6df7ea7f437 /arch/x86/kernel/cpu/perf_counter.c
parent5da50258584469ddfee6545feb4eb2252a8d7e7b (diff)
perf_counter: Ignore the nmi call frames in the x86-64 backtraces
About every callchains recorded with perf record are filled up including the internal perfcounter nmi frame: perf_callchain perf_counter_overflow intel_pmu_handle_irq perf_counter_nmi_handler notifier_call_chain atomic_notifier_call_chain notify_die do_nmi nmi We want ignore this frame as it's not interesting for instrumentation. To solve this, we simply ignore every frames from nmi context. New example of "perf report -s sym -c" after this patch: 9.59% [k] search_by_key 4.88% search_by_key reiserfs_read_locked_inode reiserfs_iget reiserfs_lookup do_lookup __link_path_walk path_walk do_path_lookup user_path_at vfs_fstatat vfs_lstat sys_newlstat system_call_fastpath __lxstat 0x406fb1 3.19% search_by_key search_by_entry_key reiserfs_find_entry reiserfs_lookup do_lookup __link_path_walk path_walk do_path_lookup user_path_at vfs_fstatat vfs_lstat sys_newlstat system_call_fastpath __lxstat 0x406fb1 [...] For now this patch only solves the problem in x86-64. Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Mike Galbraith <efault@gmx.de> Cc: Paul Mackerras <paulus@samba.org> Cc: Anton Blanchard <anton@samba.org> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> LKML-Reference: <1246474930-6088-1-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel/cpu/perf_counter.c')
-rw-r--r--arch/x86/kernel/cpu/perf_counter.c8
1 files changed, 7 insertions, 1 deletions
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c
index d4cf4ce19aac..36c3dc7b8991 100644
--- a/arch/x86/kernel/cpu/perf_counter.c
+++ b/arch/x86/kernel/cpu/perf_counter.c
@@ -1561,6 +1561,7 @@ void callchain_store(struct perf_callchain_entry *entry, u64 ip)
1561 1561
1562static DEFINE_PER_CPU(struct perf_callchain_entry, irq_entry); 1562static DEFINE_PER_CPU(struct perf_callchain_entry, irq_entry);
1563static DEFINE_PER_CPU(struct perf_callchain_entry, nmi_entry); 1563static DEFINE_PER_CPU(struct perf_callchain_entry, nmi_entry);
1564static DEFINE_PER_CPU(int, in_nmi_frame);
1564 1565
1565 1566
1566static void 1567static void
@@ -1576,7 +1577,9 @@ static void backtrace_warning(void *data, char *msg)
1576 1577
1577static int backtrace_stack(void *data, char *name) 1578static int backtrace_stack(void *data, char *name)
1578{ 1579{
1579 /* Process all stacks: */ 1580 per_cpu(in_nmi_frame, smp_processor_id()) =
1581 x86_is_stack_id(NMI_STACK, name);
1582
1580 return 0; 1583 return 0;
1581} 1584}
1582 1585
@@ -1584,6 +1587,9 @@ static void backtrace_address(void *data, unsigned long addr, int reliable)
1584{ 1587{
1585 struct perf_callchain_entry *entry = data; 1588 struct perf_callchain_entry *entry = data;
1586 1589
1590 if (per_cpu(in_nmi_frame, smp_processor_id()))
1591 return;
1592
1587 if (reliable) 1593 if (reliable)
1588 callchain_store(entry, addr); 1594 callchain_store(entry, addr);
1589} 1595}