aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorFrederic Weisbecker <fweisbec@gmail.com>2010-06-29 13:34:05 -0400
committerFrederic Weisbecker <fweisbec@gmail.com>2010-08-18 19:30:11 -0400
commit70791ce9ba68a5921c9905ef05d23f62a90bc10c (patch)
tree9711ff02cb910e1d8709c09512dbe7e94224bdd8
parentc1a65932fd7216fdc9a0db8bbffe1d47842f862c (diff)
perf: Generalize callchain_store()
callchain_store() is the same on every archs, inline it in perf_event.h and rename it to perf_callchain_store() to avoid any collision. This removes repetitive code. Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Acked-by: Paul Mackerras <paulus@samba.org> Tested-by: Will Deacon <will.deacon@arm.com> Cc: Ingo Molnar <mingo@elte.hu> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Stephane Eranian <eranian@google.com> Cc: David Miller <davem@davemloft.net> Cc: Paul Mundt <lethal@linux-sh.org> Cc: Borislav Petkov <bp@amd64.org>
-rw-r--r--arch/arm/kernel/perf_event.c15
-rw-r--r--arch/powerpc/kernel/perf_callchain.c40
-rw-r--r--arch/sh/kernel/perf_callchain.c11
-rw-r--r--arch/sparc/kernel/perf_event.c26
-rw-r--r--arch/x86/kernel/cpu/perf_event.c20
-rw-r--r--include/linux/perf_event.h7
6 files changed, 45 insertions, 74 deletions
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index fdcb0be47df1..a07c3b1955f0 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -3001,13 +3001,6 @@ arch_initcall(init_hw_perf_events);
3001/* 3001/*
3002 * Callchain handling code. 3002 * Callchain handling code.
3003 */ 3003 */
3004static inline void
3005callchain_store(struct perf_callchain_entry *entry,
3006 u64 ip)
3007{
3008 if (entry->nr < PERF_MAX_STACK_DEPTH)
3009 entry->ip[entry->nr++] = ip;
3010}
3011 3004
3012/* 3005/*
3013 * The registers we're interested in are at the end of the variable 3006 * The registers we're interested in are at the end of the variable
@@ -3039,7 +3032,7 @@ user_backtrace(struct frame_tail *tail,
3039 if (__copy_from_user_inatomic(&buftail, tail, sizeof(buftail))) 3032 if (__copy_from_user_inatomic(&buftail, tail, sizeof(buftail)))
3040 return NULL; 3033 return NULL;
3041 3034
3042 callchain_store(entry, buftail.lr); 3035 perf_callchain_store(entry, buftail.lr);
3043 3036
3044 /* 3037 /*
3045 * Frame pointers should strictly progress back up the stack 3038 * Frame pointers should strictly progress back up the stack
@@ -3057,7 +3050,7 @@ perf_callchain_user(struct pt_regs *regs,
3057{ 3050{
3058 struct frame_tail *tail; 3051 struct frame_tail *tail;
3059 3052
3060 callchain_store(entry, PERF_CONTEXT_USER); 3053 perf_callchain_store(entry, PERF_CONTEXT_USER);
3061 3054
3062 if (!user_mode(regs)) 3055 if (!user_mode(regs))
3063 regs = task_pt_regs(current); 3056 regs = task_pt_regs(current);
@@ -3078,7 +3071,7 @@ callchain_trace(struct stackframe *fr,
3078 void *data) 3071 void *data)
3079{ 3072{
3080 struct perf_callchain_entry *entry = data; 3073 struct perf_callchain_entry *entry = data;
3081 callchain_store(entry, fr->pc); 3074 perf_callchain_store(entry, fr->pc);
3082 return 0; 3075 return 0;
3083} 3076}
3084 3077
@@ -3088,7 +3081,7 @@ perf_callchain_kernel(struct pt_regs *regs,
3088{ 3081{
3089 struct stackframe fr; 3082 struct stackframe fr;
3090 3083
3091 callchain_store(entry, PERF_CONTEXT_KERNEL); 3084 perf_callchain_store(entry, PERF_CONTEXT_KERNEL);
3092 fr.fp = regs->ARM_fp; 3085 fr.fp = regs->ARM_fp;
3093 fr.sp = regs->ARM_sp; 3086 fr.sp = regs->ARM_sp;
3094 fr.lr = regs->ARM_lr; 3087 fr.lr = regs->ARM_lr;
diff --git a/arch/powerpc/kernel/perf_callchain.c b/arch/powerpc/kernel/perf_callchain.c
index 95ad9dad298e..a286c2e5a3ea 100644
--- a/arch/powerpc/kernel/perf_callchain.c
+++ b/arch/powerpc/kernel/perf_callchain.c
@@ -23,18 +23,6 @@
23#include "ppc32.h" 23#include "ppc32.h"
24#endif 24#endif
25 25
26/*
27 * Store another value in a callchain_entry.
28 */
29static inline void callchain_store(struct perf_callchain_entry *entry, u64 ip)
30{
31 unsigned int nr = entry->nr;
32
33 if (nr < PERF_MAX_STACK_DEPTH) {
34 entry->ip[nr] = ip;
35 entry->nr = nr + 1;
36 }
37}
38 26
39/* 27/*
40 * Is sp valid as the address of the next kernel stack frame after prev_sp? 28 * Is sp valid as the address of the next kernel stack frame after prev_sp?
@@ -69,8 +57,8 @@ static void perf_callchain_kernel(struct pt_regs *regs,
69 57
70 lr = regs->link; 58 lr = regs->link;
71 sp = regs->gpr[1]; 59 sp = regs->gpr[1];
72 callchain_store(entry, PERF_CONTEXT_KERNEL); 60 perf_callchain_store(entry, PERF_CONTEXT_KERNEL);
73 callchain_store(entry, regs->nip); 61 perf_callchain_store(entry, regs->nip);
74 62
75 if (!validate_sp(sp, current, STACK_FRAME_OVERHEAD)) 63 if (!validate_sp(sp, current, STACK_FRAME_OVERHEAD))
76 return; 64 return;
@@ -89,7 +77,7 @@ static void perf_callchain_kernel(struct pt_regs *regs,
89 next_ip = regs->nip; 77 next_ip = regs->nip;
90 lr = regs->link; 78 lr = regs->link;
91 level = 0; 79 level = 0;
92 callchain_store(entry, PERF_CONTEXT_KERNEL); 80 perf_callchain_store(entry, PERF_CONTEXT_KERNEL);
93 81
94 } else { 82 } else {
95 if (level == 0) 83 if (level == 0)
@@ -111,7 +99,7 @@ static void perf_callchain_kernel(struct pt_regs *regs,
111 ++level; 99 ++level;
112 } 100 }
113 101
114 callchain_store(entry, next_ip); 102 perf_callchain_store(entry, next_ip);
115 if (!valid_next_sp(next_sp, sp)) 103 if (!valid_next_sp(next_sp, sp))
116 return; 104 return;
117 sp = next_sp; 105 sp = next_sp;
@@ -246,8 +234,8 @@ static void perf_callchain_user_64(struct pt_regs *regs,
246 next_ip = regs->nip; 234 next_ip = regs->nip;
247 lr = regs->link; 235 lr = regs->link;
248 sp = regs->gpr[1]; 236 sp = regs->gpr[1];
249 callchain_store(entry, PERF_CONTEXT_USER); 237 perf_callchain_store(entry, PERF_CONTEXT_USER);
250 callchain_store(entry, next_ip); 238 perf_callchain_store(entry, next_ip);
251 239
252 for (;;) { 240 for (;;) {
253 fp = (unsigned long __user *) sp; 241 fp = (unsigned long __user *) sp;
@@ -276,14 +264,14 @@ static void perf_callchain_user_64(struct pt_regs *regs,
276 read_user_stack_64(&uregs[PT_R1], &sp)) 264 read_user_stack_64(&uregs[PT_R1], &sp))
277 return; 265 return;
278 level = 0; 266 level = 0;
279 callchain_store(entry, PERF_CONTEXT_USER); 267 perf_callchain_store(entry, PERF_CONTEXT_USER);
280 callchain_store(entry, next_ip); 268 perf_callchain_store(entry, next_ip);
281 continue; 269 continue;
282 } 270 }
283 271
284 if (level == 0) 272 if (level == 0)
285 next_ip = lr; 273 next_ip = lr;
286 callchain_store(entry, next_ip); 274 perf_callchain_store(entry, next_ip);
287 ++level; 275 ++level;
288 sp = next_sp; 276 sp = next_sp;
289 } 277 }
@@ -447,8 +435,8 @@ static void perf_callchain_user_32(struct pt_regs *regs,
447 next_ip = regs->nip; 435 next_ip = regs->nip;
448 lr = regs->link; 436 lr = regs->link;
449 sp = regs->gpr[1]; 437 sp = regs->gpr[1];
450 callchain_store(entry, PERF_CONTEXT_USER); 438 perf_callchain_store(entry, PERF_CONTEXT_USER);
451 callchain_store(entry, next_ip); 439 perf_callchain_store(entry, next_ip);
452 440
453 while (entry->nr < PERF_MAX_STACK_DEPTH) { 441 while (entry->nr < PERF_MAX_STACK_DEPTH) {
454 fp = (unsigned int __user *) (unsigned long) sp; 442 fp = (unsigned int __user *) (unsigned long) sp;
@@ -470,14 +458,14 @@ static void perf_callchain_user_32(struct pt_regs *regs,
470 read_user_stack_32(&uregs[PT_R1], &sp)) 458 read_user_stack_32(&uregs[PT_R1], &sp))
471 return; 459 return;
472 level = 0; 460 level = 0;
473 callchain_store(entry, PERF_CONTEXT_USER); 461 perf_callchain_store(entry, PERF_CONTEXT_USER);
474 callchain_store(entry, next_ip); 462 perf_callchain_store(entry, next_ip);
475 continue; 463 continue;
476 } 464 }
477 465
478 if (level == 0) 466 if (level == 0)
479 next_ip = lr; 467 next_ip = lr;
480 callchain_store(entry, next_ip); 468 perf_callchain_store(entry, next_ip);
481 ++level; 469 ++level;
482 sp = next_sp; 470 sp = next_sp;
483 } 471 }
diff --git a/arch/sh/kernel/perf_callchain.c b/arch/sh/kernel/perf_callchain.c
index 1d6dbce7a3bc..00143f3dd196 100644
--- a/arch/sh/kernel/perf_callchain.c
+++ b/arch/sh/kernel/perf_callchain.c
@@ -14,11 +14,6 @@
14#include <asm/unwinder.h> 14#include <asm/unwinder.h>
15#include <asm/ptrace.h> 15#include <asm/ptrace.h>
16 16
17static inline void callchain_store(struct perf_callchain_entry *entry, u64 ip)
18{
19 if (entry->nr < PERF_MAX_STACK_DEPTH)
20 entry->ip[entry->nr++] = ip;
21}
22 17
23static void callchain_warning(void *data, char *msg) 18static void callchain_warning(void *data, char *msg)
24{ 19{
@@ -39,7 +34,7 @@ static void callchain_address(void *data, unsigned long addr, int reliable)
39 struct perf_callchain_entry *entry = data; 34 struct perf_callchain_entry *entry = data;
40 35
41 if (reliable) 36 if (reliable)
42 callchain_store(entry, addr); 37 perf_callchain_store(entry, addr);
43} 38}
44 39
45static const struct stacktrace_ops callchain_ops = { 40static const struct stacktrace_ops callchain_ops = {
@@ -52,8 +47,8 @@ static const struct stacktrace_ops callchain_ops = {
52static void 47static void
53perf_callchain_kernel(struct pt_regs *regs, struct perf_callchain_entry *entry) 48perf_callchain_kernel(struct pt_regs *regs, struct perf_callchain_entry *entry)
54{ 49{
55 callchain_store(entry, PERF_CONTEXT_KERNEL); 50 perf_callchain_store(entry, PERF_CONTEXT_KERNEL);
56 callchain_store(entry, regs->pc); 51 perf_callchain_store(entry, regs->pc);
57 52
58 unwind_stack(NULL, regs, NULL, &callchain_ops, entry); 53 unwind_stack(NULL, regs, NULL, &callchain_ops, entry);
59} 54}
diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c
index 357ced3c33ff..2a95a9079862 100644
--- a/arch/sparc/kernel/perf_event.c
+++ b/arch/sparc/kernel/perf_event.c
@@ -1283,12 +1283,6 @@ void __init init_hw_perf_events(void)
1283 register_die_notifier(&perf_event_nmi_notifier); 1283 register_die_notifier(&perf_event_nmi_notifier);
1284} 1284}
1285 1285
1286static inline void callchain_store(struct perf_callchain_entry *entry, u64 ip)
1287{
1288 if (entry->nr < PERF_MAX_STACK_DEPTH)
1289 entry->ip[entry->nr++] = ip;
1290}
1291
1292static void perf_callchain_kernel(struct pt_regs *regs, 1286static void perf_callchain_kernel(struct pt_regs *regs,
1293 struct perf_callchain_entry *entry) 1287 struct perf_callchain_entry *entry)
1294{ 1288{
@@ -1297,8 +1291,8 @@ static void perf_callchain_kernel(struct pt_regs *regs,
1297 int graph = 0; 1291 int graph = 0;
1298#endif 1292#endif
1299 1293
1300 callchain_store(entry, PERF_CONTEXT_KERNEL); 1294 perf_callchain_store(entry, PERF_CONTEXT_KERNEL);
1301 callchain_store(entry, regs->tpc); 1295 perf_callchain_store(entry, regs->tpc);
1302 1296
1303 ksp = regs->u_regs[UREG_I6]; 1297 ksp = regs->u_regs[UREG_I6];
1304 fp = ksp + STACK_BIAS; 1298 fp = ksp + STACK_BIAS;
@@ -1322,13 +1316,13 @@ static void perf_callchain_kernel(struct pt_regs *regs,
1322 pc = sf->callers_pc; 1316 pc = sf->callers_pc;
1323 fp = (unsigned long)sf->fp + STACK_BIAS; 1317 fp = (unsigned long)sf->fp + STACK_BIAS;
1324 } 1318 }
1325 callchain_store(entry, pc); 1319 perf_callchain_store(entry, pc);
1326#ifdef CONFIG_FUNCTION_GRAPH_TRACER 1320#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1327 if ((pc + 8UL) == (unsigned long) &return_to_handler) { 1321 if ((pc + 8UL) == (unsigned long) &return_to_handler) {
1328 int index = current->curr_ret_stack; 1322 int index = current->curr_ret_stack;
1329 if (current->ret_stack && index >= graph) { 1323 if (current->ret_stack && index >= graph) {
1330 pc = current->ret_stack[index - graph].ret; 1324 pc = current->ret_stack[index - graph].ret;
1331 callchain_store(entry, pc); 1325 perf_callchain_store(entry, pc);
1332 graph++; 1326 graph++;
1333 } 1327 }
1334 } 1328 }
@@ -1341,8 +1335,8 @@ static void perf_callchain_user_64(struct pt_regs *regs,
1341{ 1335{
1342 unsigned long ufp; 1336 unsigned long ufp;
1343 1337
1344 callchain_store(entry, PERF_CONTEXT_USER); 1338 perf_callchain_store(entry, PERF_CONTEXT_USER);
1345 callchain_store(entry, regs->tpc); 1339 perf_callchain_store(entry, regs->tpc);
1346 1340
1347 ufp = regs->u_regs[UREG_I6] + STACK_BIAS; 1341 ufp = regs->u_regs[UREG_I6] + STACK_BIAS;
1348 do { 1342 do {
@@ -1355,7 +1349,7 @@ static void perf_callchain_user_64(struct pt_regs *regs,
1355 1349
1356 pc = sf.callers_pc; 1350 pc = sf.callers_pc;
1357 ufp = (unsigned long)sf.fp + STACK_BIAS; 1351 ufp = (unsigned long)sf.fp + STACK_BIAS;
1358 callchain_store(entry, pc); 1352 perf_callchain_store(entry, pc);
1359 } while (entry->nr < PERF_MAX_STACK_DEPTH); 1353 } while (entry->nr < PERF_MAX_STACK_DEPTH);
1360} 1354}
1361 1355
@@ -1364,8 +1358,8 @@ static void perf_callchain_user_32(struct pt_regs *regs,
1364{ 1358{
1365 unsigned long ufp; 1359 unsigned long ufp;
1366 1360
1367 callchain_store(entry, PERF_CONTEXT_USER); 1361 perf_callchain_store(entry, PERF_CONTEXT_USER);
1368 callchain_store(entry, regs->tpc); 1362 perf_callchain_store(entry, regs->tpc);
1369 1363
1370 ufp = regs->u_regs[UREG_I6] & 0xffffffffUL; 1364 ufp = regs->u_regs[UREG_I6] & 0xffffffffUL;
1371 do { 1365 do {
@@ -1378,7 +1372,7 @@ static void perf_callchain_user_32(struct pt_regs *regs,
1378 1372
1379 pc = sf.callers_pc; 1373 pc = sf.callers_pc;
1380 ufp = (unsigned long)sf.fp; 1374 ufp = (unsigned long)sf.fp;
1381 callchain_store(entry, pc); 1375 perf_callchain_store(entry, pc);
1382 } while (entry->nr < PERF_MAX_STACK_DEPTH); 1376 } while (entry->nr < PERF_MAX_STACK_DEPTH);
1383} 1377}
1384 1378
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 4a4d191f9492..8af28caeafc1 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -1571,12 +1571,6 @@ const struct pmu *hw_perf_event_init(struct perf_event *event)
1571 * callchain support 1571 * callchain support
1572 */ 1572 */
1573 1573
1574static inline
1575void callchain_store(struct perf_callchain_entry *entry, u64 ip)
1576{
1577 if (entry->nr < PERF_MAX_STACK_DEPTH)
1578 entry->ip[entry->nr++] = ip;
1579}
1580 1574
1581static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_irq_entry); 1575static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_irq_entry);
1582static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_nmi_entry); 1576static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_nmi_entry);
@@ -1602,7 +1596,7 @@ static void backtrace_address(void *data, unsigned long addr, int reliable)
1602{ 1596{
1603 struct perf_callchain_entry *entry = data; 1597 struct perf_callchain_entry *entry = data;
1604 1598
1605 callchain_store(entry, addr); 1599 perf_callchain_store(entry, addr);
1606} 1600}
1607 1601
1608static const struct stacktrace_ops backtrace_ops = { 1602static const struct stacktrace_ops backtrace_ops = {
@@ -1616,8 +1610,8 @@ static const struct stacktrace_ops backtrace_ops = {
1616static void 1610static void
1617perf_callchain_kernel(struct pt_regs *regs, struct perf_callchain_entry *entry) 1611perf_callchain_kernel(struct pt_regs *regs, struct perf_callchain_entry *entry)
1618{ 1612{
1619 callchain_store(entry, PERF_CONTEXT_KERNEL); 1613 perf_callchain_store(entry, PERF_CONTEXT_KERNEL);
1620 callchain_store(entry, regs->ip); 1614 perf_callchain_store(entry, regs->ip);
1621 1615
1622 dump_trace(NULL, regs, NULL, regs->bp, &backtrace_ops, entry); 1616 dump_trace(NULL, regs, NULL, regs->bp, &backtrace_ops, entry);
1623} 1617}
@@ -1646,7 +1640,7 @@ perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry)
1646 if (fp < compat_ptr(regs->sp)) 1640 if (fp < compat_ptr(regs->sp))
1647 break; 1641 break;
1648 1642
1649 callchain_store(entry, frame.return_address); 1643 perf_callchain_store(entry, frame.return_address);
1650 fp = compat_ptr(frame.next_frame); 1644 fp = compat_ptr(frame.next_frame);
1651 } 1645 }
1652 return 1; 1646 return 1;
@@ -1670,8 +1664,8 @@ perf_callchain_user(struct pt_regs *regs, struct perf_callchain_entry *entry)
1670 1664
1671 fp = (void __user *)regs->bp; 1665 fp = (void __user *)regs->bp;
1672 1666
1673 callchain_store(entry, PERF_CONTEXT_USER); 1667 perf_callchain_store(entry, PERF_CONTEXT_USER);
1674 callchain_store(entry, regs->ip); 1668 perf_callchain_store(entry, regs->ip);
1675 1669
1676 if (perf_callchain_user32(regs, entry)) 1670 if (perf_callchain_user32(regs, entry))
1677 return; 1671 return;
@@ -1688,7 +1682,7 @@ perf_callchain_user(struct pt_regs *regs, struct perf_callchain_entry *entry)
1688 if ((unsigned long)fp < regs->sp) 1682 if ((unsigned long)fp < regs->sp)
1689 break; 1683 break;
1690 1684
1691 callchain_store(entry, frame.return_address); 1685 perf_callchain_store(entry, frame.return_address);
1692 fp = frame.next_frame; 1686 fp = frame.next_frame;
1693 } 1687 }
1694} 1688}
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 937495c25073..358880404b42 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -978,6 +978,13 @@ extern void perf_event_fork(struct task_struct *tsk);
978 978
979extern struct perf_callchain_entry *perf_callchain(struct pt_regs *regs); 979extern struct perf_callchain_entry *perf_callchain(struct pt_regs *regs);
980 980
981static inline void
982perf_callchain_store(struct perf_callchain_entry *entry, u64 ip)
983{
984 if (entry->nr < PERF_MAX_STACK_DEPTH)
985 entry->ip[entry->nr++] = ip;
986}
987
981extern int sysctl_perf_event_paranoid; 988extern int sysctl_perf_event_paranoid;
982extern int sysctl_perf_event_mlock; 989extern int sysctl_perf_event_mlock;
983extern int sysctl_perf_event_sample_rate; 990extern int sysctl_perf_event_sample_rate;