aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/kernel/perf_event.c
diff options
context:
space:
mode:
authorFrederic Weisbecker <fweisbec@gmail.com>2010-06-30 17:03:51 -0400
committerFrederic Weisbecker <fweisbec@gmail.com>2010-08-18 19:30:59 -0400
commit56962b4449af34070bb1994621ef4f0265eed4d8 (patch)
treeb4c5dfee35d272c71cba80e75a51cb3e7070e430 /arch/arm/kernel/perf_event.c
parent70791ce9ba68a5921c9905ef05d23f62a90bc10c (diff)
perf: Generalize some arch callchain code
- Most archs use one callchain buffer per cpu, except x86 that needs to deal with NMIs. Provide a default perf_callchain_buffer() implementation that x86 overrides. - Centralize all the kernel/user regs handling and invoke new arch handlers from there: perf_callchain_user() / perf_callchain_kernel() That avoid all the user_mode(), current->mm checks and so... - Invert some parameters in perf_callchain_*() helpers: entry to the left, regs to the right, following the traditional (dst, src). Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Acked-by: Paul Mackerras <paulus@samba.org> Tested-by: Will Deacon <will.deacon@arm.com> Cc: Ingo Molnar <mingo@elte.hu> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Stephane Eranian <eranian@google.com> Cc: David Miller <davem@davemloft.net> Cc: Paul Mundt <lethal@linux-sh.org> Cc: Borislav Petkov <bp@amd64.org>
Diffstat (limited to 'arch/arm/kernel/perf_event.c')
-rw-r--r--arch/arm/kernel/perf_event.c43
1 files changed, 4 insertions, 39 deletions
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index a07c3b1955f0..0e3bbdb15927 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -3044,17 +3044,13 @@ user_backtrace(struct frame_tail *tail,
3044 return buftail.fp - 1; 3044 return buftail.fp - 1;
3045} 3045}
3046 3046
3047static void 3047void
3048perf_callchain_user(struct pt_regs *regs, 3048perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
3049 struct perf_callchain_entry *entry)
3050{ 3049{
3051 struct frame_tail *tail; 3050 struct frame_tail *tail;
3052 3051
3053 perf_callchain_store(entry, PERF_CONTEXT_USER); 3052 perf_callchain_store(entry, PERF_CONTEXT_USER);
3054 3053
3055 if (!user_mode(regs))
3056 regs = task_pt_regs(current);
3057
3058 tail = (struct frame_tail *)regs->ARM_fp - 1; 3054 tail = (struct frame_tail *)regs->ARM_fp - 1;
3059 3055
3060 while (tail && !((unsigned long)tail & 0x3)) 3056 while (tail && !((unsigned long)tail & 0x3))
@@ -3075,9 +3071,8 @@ callchain_trace(struct stackframe *fr,
3075 return 0; 3071 return 0;
3076} 3072}
3077 3073
3078static void 3074void
3079perf_callchain_kernel(struct pt_regs *regs, 3075perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
3080 struct perf_callchain_entry *entry)
3081{ 3076{
3082 struct stackframe fr; 3077 struct stackframe fr;
3083 3078
@@ -3088,33 +3083,3 @@ perf_callchain_kernel(struct pt_regs *regs,
3088 fr.pc = regs->ARM_pc; 3083 fr.pc = regs->ARM_pc;
3089 walk_stackframe(&fr, callchain_trace, entry); 3084 walk_stackframe(&fr, callchain_trace, entry);
3090} 3085}
3091
3092static void
3093perf_do_callchain(struct pt_regs *regs,
3094 struct perf_callchain_entry *entry)
3095{
3096 int is_user;
3097
3098 if (!regs)
3099 return;
3100
3101 is_user = user_mode(regs);
3102
3103 if (!is_user)
3104 perf_callchain_kernel(regs, entry);
3105
3106 if (current->mm)
3107 perf_callchain_user(regs, entry);
3108}
3109
3110static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_irq_entry);
3111
3112struct perf_callchain_entry *
3113perf_callchain(struct pt_regs *regs)
3114{
3115 struct perf_callchain_entry *entry = &__get_cpu_var(pmc_irq_entry);
3116
3117 entry->nr = 0;
3118 perf_do_callchain(regs, entry);
3119 return entry;
3120}