aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sh
diff options
context:
space:
mode:
authorFrederic Weisbecker <fweisbec@gmail.com>2010-06-30 17:03:51 -0400
committerFrederic Weisbecker <fweisbec@gmail.com>2010-08-18 19:30:59 -0400
commit56962b4449af34070bb1994621ef4f0265eed4d8 (patch)
treeb4c5dfee35d272c71cba80e75a51cb3e7070e430 /arch/sh
parent70791ce9ba68a5921c9905ef05d23f62a90bc10c (diff)
perf: Generalize some arch callchain code
- Most archs use one callchain buffer per cpu, except x86 that needs to deal with NMIs. Provide a default perf_callchain_buffer() implementation that x86 overrides. - Centralize all the kernel/user regs handling and invoke new arch handlers from there: perf_callchain_user() / perf_callchain_kernel() That avoid all the user_mode(), current->mm checks and so... - Invert some parameters in perf_callchain_*() helpers: entry to the left, regs to the right, following the traditional (dst, src). Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Acked-by: Paul Mackerras <paulus@samba.org> Tested-by: Will Deacon <will.deacon@arm.com> Cc: Ingo Molnar <mingo@elte.hu> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Stephane Eranian <eranian@google.com> Cc: David Miller <davem@davemloft.net> Cc: Paul Mundt <lethal@linux-sh.org> Cc: Borislav Petkov <bp@amd64.org>
Diffstat (limited to 'arch/sh')
-rw-r--r--arch/sh/kernel/perf_callchain.c37
1 files changed, 2 insertions, 35 deletions
diff --git a/arch/sh/kernel/perf_callchain.c b/arch/sh/kernel/perf_callchain.c
index 00143f3dd196..ef076a91292a 100644
--- a/arch/sh/kernel/perf_callchain.c
+++ b/arch/sh/kernel/perf_callchain.c
@@ -44,44 +44,11 @@ static const struct stacktrace_ops callchain_ops = {
44 .address = callchain_address, 44 .address = callchain_address,
45}; 45};
46 46
47static void 47void
48perf_callchain_kernel(struct pt_regs *regs, struct perf_callchain_entry *entry) 48perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
49{ 49{
50 perf_callchain_store(entry, PERF_CONTEXT_KERNEL); 50 perf_callchain_store(entry, PERF_CONTEXT_KERNEL);
51 perf_callchain_store(entry, regs->pc); 51 perf_callchain_store(entry, regs->pc);
52 52
53 unwind_stack(NULL, regs, NULL, &callchain_ops, entry); 53 unwind_stack(NULL, regs, NULL, &callchain_ops, entry);
54} 54}
55
56static void
57perf_do_callchain(struct pt_regs *regs, struct perf_callchain_entry *entry)
58{
59 int is_user;
60
61 if (!regs)
62 return;
63
64 is_user = user_mode(regs);
65
66 /*
67 * Only the kernel side is implemented for now.
68 */
69 if (!is_user)
70 perf_callchain_kernel(regs, entry);
71}
72
73/*
74 * No need for separate IRQ and NMI entries.
75 */
76static DEFINE_PER_CPU(struct perf_callchain_entry, callchain);
77
78struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
79{
80 struct perf_callchain_entry *entry = &__get_cpu_var(callchain);
81
82 entry->nr = 0;
83
84 perf_do_callchain(regs, entry);
85
86 return entry;
87}