aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc/kernel/perf_event.c
diff options
context:
space:
mode:
authorFrederic Weisbecker <fweisbec@gmail.com>2010-06-30 17:03:51 -0400
committerFrederic Weisbecker <fweisbec@gmail.com>2010-08-18 19:30:59 -0400
commit56962b4449af34070bb1994621ef4f0265eed4d8 (patch)
treeb4c5dfee35d272c71cba80e75a51cb3e7070e430 /arch/sparc/kernel/perf_event.c
parent70791ce9ba68a5921c9905ef05d23f62a90bc10c (diff)
perf: Generalize some arch callchain code
- Most archs use one callchain buffer per cpu, except x86 that needs to deal with NMIs. Provide a default perf_callchain_buffer() implementation that x86 overrides. - Centralize all the kernel/user regs handling and invoke new arch handlers from there: perf_callchain_user() / perf_callchain_kernel() That avoid all the user_mode(), current->mm checks and so... - Invert some parameters in perf_callchain_*() helpers: entry to the left, regs to the right, following the traditional (dst, src). Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Acked-by: Paul Mackerras <paulus@samba.org> Tested-by: Will Deacon <will.deacon@arm.com> Cc: Ingo Molnar <mingo@elte.hu> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Stephane Eranian <eranian@google.com> Cc: David Miller <davem@davemloft.net> Cc: Paul Mundt <lethal@linux-sh.org> Cc: Borislav Petkov <bp@amd64.org>
Diffstat (limited to 'arch/sparc/kernel/perf_event.c')
-rw-r--r--arch/sparc/kernel/perf_event.c46
1 files changed, 15 insertions, 31 deletions
diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c
index 2a95a9079862..460162d74aba 100644
--- a/arch/sparc/kernel/perf_event.c
+++ b/arch/sparc/kernel/perf_event.c
@@ -1283,14 +1283,16 @@ void __init init_hw_perf_events(void)
1283 register_die_notifier(&perf_event_nmi_notifier); 1283 register_die_notifier(&perf_event_nmi_notifier);
1284} 1284}
1285 1285
1286static void perf_callchain_kernel(struct pt_regs *regs, 1286void perf_callchain_kernel(struct perf_callchain_entry *entry,
1287 struct perf_callchain_entry *entry) 1287 struct pt_regs *regs)
1288{ 1288{
1289 unsigned long ksp, fp; 1289 unsigned long ksp, fp;
1290#ifdef CONFIG_FUNCTION_GRAPH_TRACER 1290#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1291 int graph = 0; 1291 int graph = 0;
1292#endif 1292#endif
1293 1293
1294 stack_trace_flush();
1295
1294 perf_callchain_store(entry, PERF_CONTEXT_KERNEL); 1296 perf_callchain_store(entry, PERF_CONTEXT_KERNEL);
1295 perf_callchain_store(entry, regs->tpc); 1297 perf_callchain_store(entry, regs->tpc);
1296 1298
@@ -1330,8 +1332,8 @@ static void perf_callchain_kernel(struct pt_regs *regs,
1330 } while (entry->nr < PERF_MAX_STACK_DEPTH); 1332 } while (entry->nr < PERF_MAX_STACK_DEPTH);
1331} 1333}
1332 1334
1333static void perf_callchain_user_64(struct pt_regs *regs, 1335static void perf_callchain_user_64(struct perf_callchain_entry *entry,
1334 struct perf_callchain_entry *entry) 1336 struct pt_regs *regs)
1335{ 1337{
1336 unsigned long ufp; 1338 unsigned long ufp;
1337 1339
@@ -1353,8 +1355,8 @@ static void perf_callchain_user_64(struct pt_regs *regs,
1353 } while (entry->nr < PERF_MAX_STACK_DEPTH); 1355 } while (entry->nr < PERF_MAX_STACK_DEPTH);
1354} 1356}
1355 1357
1356static void perf_callchain_user_32(struct pt_regs *regs, 1358static void perf_callchain_user_32(struct perf_callchain_entry *entry,
1357 struct perf_callchain_entry *entry) 1359 struct pt_regs *regs)
1358{ 1360{
1359 unsigned long ufp; 1361 unsigned long ufp;
1360 1362
@@ -1376,30 +1378,12 @@ static void perf_callchain_user_32(struct pt_regs *regs,
1376 } while (entry->nr < PERF_MAX_STACK_DEPTH); 1378 } while (entry->nr < PERF_MAX_STACK_DEPTH);
1377} 1379}
1378 1380
1379/* Like powerpc we can't get PMU interrupts within the PMU handler, 1381void
1380 * so no need for separate NMI and IRQ chains as on x86. 1382perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
1381 */
1382static DEFINE_PER_CPU(struct perf_callchain_entry, callchain);
1383
1384struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
1385{ 1383{
1386 struct perf_callchain_entry *entry = &__get_cpu_var(callchain); 1384 flushw_user();
1387 1385 if (test_thread_flag(TIF_32BIT))
1388 entry->nr = 0; 1386 perf_callchain_user_32(entry, regs);
1389 if (!user_mode(regs)) { 1387 else
1390 stack_trace_flush(); 1388 perf_callchain_user_64(entry, regs);
1391 perf_callchain_kernel(regs, entry);
1392 if (current->mm)
1393 regs = task_pt_regs(current);
1394 else
1395 regs = NULL;
1396 }
1397 if (regs) {
1398 flushw_user();
1399 if (test_thread_flag(TIF_32BIT))
1400 perf_callchain_user_32(regs, entry);
1401 else
1402 perf_callchain_user_64(regs, entry);
1403 }
1404 return entry;
1405} 1389}