diff options
author | Frederic Weisbecker <fweisbec@gmail.com> | 2010-06-30 17:03:51 -0400 |
---|---|---|
committer | Frederic Weisbecker <fweisbec@gmail.com> | 2010-08-18 19:30:59 -0400 |
commit | 56962b4449af34070bb1994621ef4f0265eed4d8 (patch) | |
tree | b4c5dfee35d272c71cba80e75a51cb3e7070e430 /kernel/perf_event.c | |
parent | 70791ce9ba68a5921c9905ef05d23f62a90bc10c (diff) |
perf: Generalize some arch callchain code
- Most archs use one callchain buffer per cpu, except x86 that needs
to deal with NMIs. Provide a default perf_callchain_buffer()
implementation that x86 overrides.
- Centralize all the kernel/user regs handling and invoke new arch
handlers from there: perf_callchain_user() / perf_callchain_kernel()
That avoid all the user_mode(), current->mm checks and so...
- Invert some parameters in perf_callchain_*() helpers: entry to the
left, regs to the right, following the traditional (dst, src).
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Acked-by: Paul Mackerras <paulus@samba.org>
Tested-by: Will Deacon <will.deacon@arm.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Stephane Eranian <eranian@google.com>
Cc: David Miller <davem@davemloft.net>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: Borislav Petkov <bp@amd64.org>
Diffstat (limited to 'kernel/perf_event.c')
-rw-r--r-- | kernel/perf_event.c | 40 |
1 files changed, 38 insertions, 2 deletions
diff --git a/kernel/perf_event.c b/kernel/perf_event.c index c772a3d4000d..02efde6c8798 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c | |||
@@ -2937,13 +2937,49 @@ void perf_event_do_pending(void) | |||
2937 | __perf_pending_run(); | 2937 | __perf_pending_run(); |
2938 | } | 2938 | } |
2939 | 2939 | ||
2940 | DEFINE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry); | ||
2941 | |||
2940 | /* | 2942 | /* |
2941 | * Callchain support -- arch specific | 2943 | * Callchain support -- arch specific |
2942 | */ | 2944 | */ |
2943 | 2945 | ||
2944 | __weak struct perf_callchain_entry *perf_callchain(struct pt_regs *regs) | 2946 | __weak struct perf_callchain_entry *perf_callchain_buffer(void) |
2945 | { | 2947 | { |
2946 | return NULL; | 2948 | return &__get_cpu_var(perf_callchain_entry); |
2949 | } | ||
2950 | |||
2951 | __weak void perf_callchain_kernel(struct perf_callchain_entry *entry, | ||
2952 | struct pt_regs *regs) | ||
2953 | { | ||
2954 | } | ||
2955 | |||
2956 | __weak void perf_callchain_user(struct perf_callchain_entry *entry, | ||
2957 | struct pt_regs *regs) | ||
2958 | { | ||
2959 | } | ||
2960 | |||
2961 | static struct perf_callchain_entry *perf_callchain(struct pt_regs *regs) | ||
2962 | { | ||
2963 | struct perf_callchain_entry *entry; | ||
2964 | |||
2965 | entry = perf_callchain_buffer(); | ||
2966 | if (!entry) | ||
2967 | return NULL; | ||
2968 | |||
2969 | entry->nr = 0; | ||
2970 | |||
2971 | if (!user_mode(regs)) { | ||
2972 | perf_callchain_kernel(entry, regs); | ||
2973 | if (current->mm) | ||
2974 | regs = task_pt_regs(current); | ||
2975 | else | ||
2976 | regs = NULL; | ||
2977 | } | ||
2978 | |||
2979 | if (regs) | ||
2980 | perf_callchain_user(entry, regs); | ||
2981 | |||
2982 | return entry; | ||
2947 | } | 2983 | } |
2948 | 2984 | ||
2949 | 2985 | ||