aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorArnaldo Carvalho de Melo <acme@redhat.com>2016-04-28 11:30:53 -0400
committerArnaldo Carvalho de Melo <acme@redhat.com>2016-05-16 22:11:50 -0400
commitcfbcf468454ab4b20f0b4b62da51920b99fdb19e (patch)
tree08b7caa605bd1ef1babb8f58cb0771bc20e518c4
parenta831100aeefbe6d9f3e47a3e2712f82c042f1f5c (diff)
perf core: Pass max stack as a perf_callchain_entry context
This makes perf_callchain_{user,kernel}() receive the max stack as context for the perf_callchain_entry, instead of accessing the global sysctl_perf_event_max_stack. Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Alexei Starovoitov <ast@kernel.org> Cc: Brendan Gregg <brendan.d.gregg@gmail.com> Cc: David Ahern <dsahern@gmail.com> Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: He Kuang <hekuang@huawei.com> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Masami Hiramatsu <mhiramat@kernel.org> Cc: Milian Wolff <milian.wolff@kdab.com> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Stephane Eranian <eranian@google.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Vince Weaver <vincent.weaver@maine.edu> Cc: Wang Nan <wangnan0@huawei.com> Cc: Zefan Li <lizefan@huawei.com> Link: http://lkml.kernel.org/n/tip-kolmn1yo40p7jhswxwrc7rrd@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
-rw-r--r--arch/arc/kernel/perf_event.c6
-rw-r--r--arch/arm/kernel/perf_callchain.c10
-rw-r--r--arch/arm64/kernel/perf_callchain.c14
-rw-r--r--arch/metag/kernel/perf_callchain.c10
-rw-r--r--arch/mips/kernel/perf_event.c12
-rw-r--r--arch/powerpc/perf/callchain.c14
-rw-r--r--arch/s390/kernel/perf_event.c4
-rw-r--r--arch/sh/kernel/perf_callchain.c4
-rw-r--r--arch/sparc/kernel/perf_event.c14
-rw-r--r--arch/tile/kernel/perf_event.c6
-rw-r--r--arch/x86/events/core.c14
-rw-r--r--arch/xtensa/kernel/perf_event.c10
-rw-r--r--include/linux/perf_event.h16
-rw-r--r--kernel/bpf/stackmap.c3
-rw-r--r--kernel/events/callchain.c20
15 files changed, 84 insertions, 73 deletions
diff --git a/arch/arc/kernel/perf_event.c b/arch/arc/kernel/perf_event.c
index 8b134cfe5e1f..6fd48021324b 100644
--- a/arch/arc/kernel/perf_event.c
+++ b/arch/arc/kernel/perf_event.c
@@ -48,7 +48,7 @@ struct arc_callchain_trace {
48static int callchain_trace(unsigned int addr, void *data) 48static int callchain_trace(unsigned int addr, void *data)
49{ 49{
50 struct arc_callchain_trace *ctrl = data; 50 struct arc_callchain_trace *ctrl = data;
51 struct perf_callchain_entry *entry = ctrl->perf_stuff; 51 struct perf_callchain_entry_ctx *entry = ctrl->perf_stuff;
52 perf_callchain_store(entry, addr); 52 perf_callchain_store(entry, addr);
53 53
54 if (ctrl->depth++ < 3) 54 if (ctrl->depth++ < 3)
@@ -58,7 +58,7 @@ static int callchain_trace(unsigned int addr, void *data)
58} 58}
59 59
60void 60void
61perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs) 61perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
62{ 62{
63 struct arc_callchain_trace ctrl = { 63 struct arc_callchain_trace ctrl = {
64 .depth = 0, 64 .depth = 0,
@@ -69,7 +69,7 @@ perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
69} 69}
70 70
71void 71void
72perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs) 72perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
73{ 73{
74 /* 74 /*
75 * User stack can't be unwound trivially with kernel dwarf unwinder 75 * User stack can't be unwound trivially with kernel dwarf unwinder
diff --git a/arch/arm/kernel/perf_callchain.c b/arch/arm/kernel/perf_callchain.c
index 27563befa8a2..bc552e813e7b 100644
--- a/arch/arm/kernel/perf_callchain.c
+++ b/arch/arm/kernel/perf_callchain.c
@@ -31,7 +31,7 @@ struct frame_tail {
31 */ 31 */
32static struct frame_tail __user * 32static struct frame_tail __user *
33user_backtrace(struct frame_tail __user *tail, 33user_backtrace(struct frame_tail __user *tail,
34 struct perf_callchain_entry *entry) 34 struct perf_callchain_entry_ctx *entry)
35{ 35{
36 struct frame_tail buftail; 36 struct frame_tail buftail;
37 unsigned long err; 37 unsigned long err;
@@ -59,7 +59,7 @@ user_backtrace(struct frame_tail __user *tail,
59} 59}
60 60
61void 61void
62perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs) 62perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
63{ 63{
64 struct frame_tail __user *tail; 64 struct frame_tail __user *tail;
65 65
@@ -75,7 +75,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
75 75
76 tail = (struct frame_tail __user *)regs->ARM_fp - 1; 76 tail = (struct frame_tail __user *)regs->ARM_fp - 1;
77 77
78 while ((entry->nr < sysctl_perf_event_max_stack) && 78 while ((entry->entry->nr < entry->max_stack) &&
79 tail && !((unsigned long)tail & 0x3)) 79 tail && !((unsigned long)tail & 0x3))
80 tail = user_backtrace(tail, entry); 80 tail = user_backtrace(tail, entry);
81} 81}
@@ -89,13 +89,13 @@ static int
89callchain_trace(struct stackframe *fr, 89callchain_trace(struct stackframe *fr,
90 void *data) 90 void *data)
91{ 91{
92 struct perf_callchain_entry *entry = data; 92 struct perf_callchain_entry_ctx *entry = data;
93 perf_callchain_store(entry, fr->pc); 93 perf_callchain_store(entry, fr->pc);
94 return 0; 94 return 0;
95} 95}
96 96
97void 97void
98perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs) 98perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
99{ 99{
100 struct stackframe fr; 100 struct stackframe fr;
101 101
diff --git a/arch/arm64/kernel/perf_callchain.c b/arch/arm64/kernel/perf_callchain.c
index 32c3c6e70119..0d60150057cf 100644
--- a/arch/arm64/kernel/perf_callchain.c
+++ b/arch/arm64/kernel/perf_callchain.c
@@ -31,7 +31,7 @@ struct frame_tail {
31 */ 31 */
32static struct frame_tail __user * 32static struct frame_tail __user *
33user_backtrace(struct frame_tail __user *tail, 33user_backtrace(struct frame_tail __user *tail,
34 struct perf_callchain_entry *entry) 34 struct perf_callchain_entry_ctx *entry)
35{ 35{
36 struct frame_tail buftail; 36 struct frame_tail buftail;
37 unsigned long err; 37 unsigned long err;
@@ -76,7 +76,7 @@ struct compat_frame_tail {
76 76
77static struct compat_frame_tail __user * 77static struct compat_frame_tail __user *
78compat_user_backtrace(struct compat_frame_tail __user *tail, 78compat_user_backtrace(struct compat_frame_tail __user *tail,
79 struct perf_callchain_entry *entry) 79 struct perf_callchain_entry_ctx *entry)
80{ 80{
81 struct compat_frame_tail buftail; 81 struct compat_frame_tail buftail;
82 unsigned long err; 82 unsigned long err;
@@ -106,7 +106,7 @@ compat_user_backtrace(struct compat_frame_tail __user *tail,
106} 106}
107#endif /* CONFIG_COMPAT */ 107#endif /* CONFIG_COMPAT */
108 108
109void perf_callchain_user(struct perf_callchain_entry *entry, 109void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
110 struct pt_regs *regs) 110 struct pt_regs *regs)
111{ 111{
112 if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { 112 if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
@@ -122,7 +122,7 @@ void perf_callchain_user(struct perf_callchain_entry *entry,
122 122
123 tail = (struct frame_tail __user *)regs->regs[29]; 123 tail = (struct frame_tail __user *)regs->regs[29];
124 124
125 while (entry->nr < sysctl_perf_event_max_stack && 125 while (entry->entry->nr < entry->max_stack &&
126 tail && !((unsigned long)tail & 0xf)) 126 tail && !((unsigned long)tail & 0xf))
127 tail = user_backtrace(tail, entry); 127 tail = user_backtrace(tail, entry);
128 } else { 128 } else {
@@ -132,7 +132,7 @@ void perf_callchain_user(struct perf_callchain_entry *entry,
132 132
133 tail = (struct compat_frame_tail __user *)regs->compat_fp - 1; 133 tail = (struct compat_frame_tail __user *)regs->compat_fp - 1;
134 134
135 while ((entry->nr < sysctl_perf_event_max_stack) && 135 while ((entry->entry->nr < entry->max_stack) &&
136 tail && !((unsigned long)tail & 0x3)) 136 tail && !((unsigned long)tail & 0x3))
137 tail = compat_user_backtrace(tail, entry); 137 tail = compat_user_backtrace(tail, entry);
138#endif 138#endif
@@ -146,12 +146,12 @@ void perf_callchain_user(struct perf_callchain_entry *entry,
146 */ 146 */
147static int callchain_trace(struct stackframe *frame, void *data) 147static int callchain_trace(struct stackframe *frame, void *data)
148{ 148{
149 struct perf_callchain_entry *entry = data; 149 struct perf_callchain_entry_ctx *entry = data;
150 perf_callchain_store(entry, frame->pc); 150 perf_callchain_store(entry, frame->pc);
151 return 0; 151 return 0;
152} 152}
153 153
154void perf_callchain_kernel(struct perf_callchain_entry *entry, 154void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
155 struct pt_regs *regs) 155 struct pt_regs *regs)
156{ 156{
157 struct stackframe frame; 157 struct stackframe frame;
diff --git a/arch/metag/kernel/perf_callchain.c b/arch/metag/kernel/perf_callchain.c
index 252abc12a5a3..b3261a98b15b 100644
--- a/arch/metag/kernel/perf_callchain.c
+++ b/arch/metag/kernel/perf_callchain.c
@@ -29,7 +29,7 @@ static bool is_valid_call(unsigned long calladdr)
29 29
30static struct metag_frame __user * 30static struct metag_frame __user *
31user_backtrace(struct metag_frame __user *user_frame, 31user_backtrace(struct metag_frame __user *user_frame,
32 struct perf_callchain_entry *entry) 32 struct perf_callchain_entry_ctx *entry)
33{ 33{
34 struct metag_frame frame; 34 struct metag_frame frame;
35 unsigned long calladdr; 35 unsigned long calladdr;
@@ -56,7 +56,7 @@ user_backtrace(struct metag_frame __user *user_frame,
56} 56}
57 57
58void 58void
59perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs) 59perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
60{ 60{
61 unsigned long sp = regs->ctx.AX[0].U0; 61 unsigned long sp = regs->ctx.AX[0].U0;
62 struct metag_frame __user *frame; 62 struct metag_frame __user *frame;
@@ -65,7 +65,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
65 65
66 --frame; 66 --frame;
67 67
68 while ((entry->nr < sysctl_perf_event_max_stack) && frame) 68 while ((entry->entry->nr < entry->max_stack) && frame)
69 frame = user_backtrace(frame, entry); 69 frame = user_backtrace(frame, entry);
70} 70}
71 71
@@ -78,13 +78,13 @@ static int
78callchain_trace(struct stackframe *fr, 78callchain_trace(struct stackframe *fr,
79 void *data) 79 void *data)
80{ 80{
81 struct perf_callchain_entry *entry = data; 81 struct perf_callchain_entry_ctx *entry = data;
82 perf_callchain_store(entry, fr->pc); 82 perf_callchain_store(entry, fr->pc);
83 return 0; 83 return 0;
84} 84}
85 85
86void 86void
87perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs) 87perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
88{ 88{
89 struct stackframe fr; 89 struct stackframe fr;
90 90
diff --git a/arch/mips/kernel/perf_event.c b/arch/mips/kernel/perf_event.c
index 5021c546ad07..22395c7d7030 100644
--- a/arch/mips/kernel/perf_event.c
+++ b/arch/mips/kernel/perf_event.c
@@ -25,8 +25,8 @@
25 * the user stack callchains, we will add it here. 25 * the user stack callchains, we will add it here.
26 */ 26 */
27 27
28static void save_raw_perf_callchain(struct perf_callchain_entry *entry, 28static void save_raw_perf_callchain(struct perf_callchain_entry_ctx *entry,
29 unsigned long reg29) 29 unsigned long reg29)
30{ 30{
31 unsigned long *sp = (unsigned long *)reg29; 31 unsigned long *sp = (unsigned long *)reg29;
32 unsigned long addr; 32 unsigned long addr;
@@ -35,14 +35,14 @@ static void save_raw_perf_callchain(struct perf_callchain_entry *entry,
35 addr = *sp++; 35 addr = *sp++;
36 if (__kernel_text_address(addr)) { 36 if (__kernel_text_address(addr)) {
37 perf_callchain_store(entry, addr); 37 perf_callchain_store(entry, addr);
38 if (entry->nr >= sysctl_perf_event_max_stack) 38 if (entry->entry->nr >= entry->max_stack)
39 break; 39 break;
40 } 40 }
41 } 41 }
42} 42}
43 43
44void perf_callchain_kernel(struct perf_callchain_entry *entry, 44void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
45 struct pt_regs *regs) 45 struct pt_regs *regs)
46{ 46{
47 unsigned long sp = regs->regs[29]; 47 unsigned long sp = regs->regs[29];
48#ifdef CONFIG_KALLSYMS 48#ifdef CONFIG_KALLSYMS
@@ -59,7 +59,7 @@ void perf_callchain_kernel(struct perf_callchain_entry *entry,
59 } 59 }
60 do { 60 do {
61 perf_callchain_store(entry, pc); 61 perf_callchain_store(entry, pc);
62 if (entry->nr >= sysctl_perf_event_max_stack) 62 if (entry->entry->nr >= entry->max_stack)
63 break; 63 break;
64 pc = unwind_stack(current, &sp, pc, &ra); 64 pc = unwind_stack(current, &sp, pc, &ra);
65 } while (pc); 65 } while (pc);
diff --git a/arch/powerpc/perf/callchain.c b/arch/powerpc/perf/callchain.c
index 22d9015c1acc..c9260c1dfdbc 100644
--- a/arch/powerpc/perf/callchain.c
+++ b/arch/powerpc/perf/callchain.c
@@ -47,7 +47,7 @@ static int valid_next_sp(unsigned long sp, unsigned long prev_sp)
47} 47}
48 48
49void 49void
50perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs) 50perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
51{ 51{
52 unsigned long sp, next_sp; 52 unsigned long sp, next_sp;
53 unsigned long next_ip; 53 unsigned long next_ip;
@@ -232,7 +232,7 @@ static int sane_signal_64_frame(unsigned long sp)
232 puc == (unsigned long) &sf->uc; 232 puc == (unsigned long) &sf->uc;
233} 233}
234 234
235static void perf_callchain_user_64(struct perf_callchain_entry *entry, 235static void perf_callchain_user_64(struct perf_callchain_entry_ctx *entry,
236 struct pt_regs *regs) 236 struct pt_regs *regs)
237{ 237{
238 unsigned long sp, next_sp; 238 unsigned long sp, next_sp;
@@ -247,7 +247,7 @@ static void perf_callchain_user_64(struct perf_callchain_entry *entry,
247 sp = regs->gpr[1]; 247 sp = regs->gpr[1];
248 perf_callchain_store(entry, next_ip); 248 perf_callchain_store(entry, next_ip);
249 249
250 while (entry->nr < sysctl_perf_event_max_stack) { 250 while (entry->entry->nr < entry->max_stack) {
251 fp = (unsigned long __user *) sp; 251 fp = (unsigned long __user *) sp;
252 if (!valid_user_sp(sp, 1) || read_user_stack_64(fp, &next_sp)) 252 if (!valid_user_sp(sp, 1) || read_user_stack_64(fp, &next_sp))
253 return; 253 return;
@@ -319,7 +319,7 @@ static int read_user_stack_32(unsigned int __user *ptr, unsigned int *ret)
319 return rc; 319 return rc;
320} 320}
321 321
322static inline void perf_callchain_user_64(struct perf_callchain_entry *entry, 322static inline void perf_callchain_user_64(struct perf_callchain_entry_ctx *entry,
323 struct pt_regs *regs) 323 struct pt_regs *regs)
324{ 324{
325} 325}
@@ -439,7 +439,7 @@ static unsigned int __user *signal_frame_32_regs(unsigned int sp,
439 return mctx->mc_gregs; 439 return mctx->mc_gregs;
440} 440}
441 441
442static void perf_callchain_user_32(struct perf_callchain_entry *entry, 442static void perf_callchain_user_32(struct perf_callchain_entry_ctx *entry,
443 struct pt_regs *regs) 443 struct pt_regs *regs)
444{ 444{
445 unsigned int sp, next_sp; 445 unsigned int sp, next_sp;
@@ -453,7 +453,7 @@ static void perf_callchain_user_32(struct perf_callchain_entry *entry,
453 sp = regs->gpr[1]; 453 sp = regs->gpr[1];
454 perf_callchain_store(entry, next_ip); 454 perf_callchain_store(entry, next_ip);
455 455
456 while (entry->nr < sysctl_perf_event_max_stack) { 456 while (entry->entry->nr < entry->max_stack) {
457 fp = (unsigned int __user *) (unsigned long) sp; 457 fp = (unsigned int __user *) (unsigned long) sp;
458 if (!valid_user_sp(sp, 0) || read_user_stack_32(fp, &next_sp)) 458 if (!valid_user_sp(sp, 0) || read_user_stack_32(fp, &next_sp))
459 return; 459 return;
@@ -487,7 +487,7 @@ static void perf_callchain_user_32(struct perf_callchain_entry *entry,
487} 487}
488 488
489void 489void
490perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs) 490perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
491{ 491{
492 if (current_is_64bit()) 492 if (current_is_64bit())
493 perf_callchain_user_64(entry, regs); 493 perf_callchain_user_64(entry, regs);
diff --git a/arch/s390/kernel/perf_event.c b/arch/s390/kernel/perf_event.c
index c3e4099b60a5..87035fa58bbe 100644
--- a/arch/s390/kernel/perf_event.c
+++ b/arch/s390/kernel/perf_event.c
@@ -224,13 +224,13 @@ arch_initcall(service_level_perf_register);
224 224
225static int __perf_callchain_kernel(void *data, unsigned long address) 225static int __perf_callchain_kernel(void *data, unsigned long address)
226{ 226{
227 struct perf_callchain_entry *entry = data; 227 struct perf_callchain_entry_ctx *entry = data;
228 228
229 perf_callchain_store(entry, address); 229 perf_callchain_store(entry, address);
230 return 0; 230 return 0;
231} 231}
232 232
233void perf_callchain_kernel(struct perf_callchain_entry *entry, 233void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
234 struct pt_regs *regs) 234 struct pt_regs *regs)
235{ 235{
236 if (user_mode(regs)) 236 if (user_mode(regs))
diff --git a/arch/sh/kernel/perf_callchain.c b/arch/sh/kernel/perf_callchain.c
index cc80b614b5fa..fa2c0cd23eaa 100644
--- a/arch/sh/kernel/perf_callchain.c
+++ b/arch/sh/kernel/perf_callchain.c
@@ -21,7 +21,7 @@ static int callchain_stack(void *data, char *name)
21 21
22static void callchain_address(void *data, unsigned long addr, int reliable) 22static void callchain_address(void *data, unsigned long addr, int reliable)
23{ 23{
24 struct perf_callchain_entry *entry = data; 24 struct perf_callchain_entry_ctx *entry = data;
25 25
26 if (reliable) 26 if (reliable)
27 perf_callchain_store(entry, addr); 27 perf_callchain_store(entry, addr);
@@ -33,7 +33,7 @@ static const struct stacktrace_ops callchain_ops = {
33}; 33};
34 34
35void 35void
36perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs) 36perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
37{ 37{
38 perf_callchain_store(entry, regs->pc); 38 perf_callchain_store(entry, regs->pc);
39 39
diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c
index a4b8b5aed21c..bcc5376db74b 100644
--- a/arch/sparc/kernel/perf_event.c
+++ b/arch/sparc/kernel/perf_event.c
@@ -1711,7 +1711,7 @@ static int __init init_hw_perf_events(void)
1711} 1711}
1712pure_initcall(init_hw_perf_events); 1712pure_initcall(init_hw_perf_events);
1713 1713
1714void perf_callchain_kernel(struct perf_callchain_entry *entry, 1714void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
1715 struct pt_regs *regs) 1715 struct pt_regs *regs)
1716{ 1716{
1717 unsigned long ksp, fp; 1717 unsigned long ksp, fp;
@@ -1756,7 +1756,7 @@ void perf_callchain_kernel(struct perf_callchain_entry *entry,
1756 } 1756 }
1757 } 1757 }
1758#endif 1758#endif
1759 } while (entry->nr < sysctl_perf_event_max_stack); 1759 } while (entry->entry->nr < entry->max_stack);
1760} 1760}
1761 1761
1762static inline int 1762static inline int
@@ -1769,7 +1769,7 @@ valid_user_frame(const void __user *fp, unsigned long size)
1769 return (__range_not_ok(fp, size, TASK_SIZE) == 0); 1769 return (__range_not_ok(fp, size, TASK_SIZE) == 0);
1770} 1770}
1771 1771
1772static void perf_callchain_user_64(struct perf_callchain_entry *entry, 1772static void perf_callchain_user_64(struct perf_callchain_entry_ctx *entry,
1773 struct pt_regs *regs) 1773 struct pt_regs *regs)
1774{ 1774{
1775 unsigned long ufp; 1775 unsigned long ufp;
@@ -1790,10 +1790,10 @@ static void perf_callchain_user_64(struct perf_callchain_entry *entry,
1790 pc = sf.callers_pc; 1790 pc = sf.callers_pc;
1791 ufp = (unsigned long)sf.fp + STACK_BIAS; 1791 ufp = (unsigned long)sf.fp + STACK_BIAS;
1792 perf_callchain_store(entry, pc); 1792 perf_callchain_store(entry, pc);
1793 } while (entry->nr < sysctl_perf_event_max_stack); 1793 } while (entry->entry->nr < entry->max_stack);
1794} 1794}
1795 1795
1796static void perf_callchain_user_32(struct perf_callchain_entry *entry, 1796static void perf_callchain_user_32(struct perf_callchain_entry_ctx *entry,
1797 struct pt_regs *regs) 1797 struct pt_regs *regs)
1798{ 1798{
1799 unsigned long ufp; 1799 unsigned long ufp;
@@ -1822,11 +1822,11 @@ static void perf_callchain_user_32(struct perf_callchain_entry *entry,
1822 ufp = (unsigned long)sf.fp; 1822 ufp = (unsigned long)sf.fp;
1823 } 1823 }
1824 perf_callchain_store(entry, pc); 1824 perf_callchain_store(entry, pc);
1825 } while (entry->nr < sysctl_perf_event_max_stack); 1825 } while (entry->entry->nr < entry->max_stack);
1826} 1826}
1827 1827
1828void 1828void
1829perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs) 1829perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
1830{ 1830{
1831 u64 saved_fault_address = current_thread_info()->fault_address; 1831 u64 saved_fault_address = current_thread_info()->fault_address;
1832 u8 saved_fault_code = get_thread_fault_code(); 1832 u8 saved_fault_code = get_thread_fault_code();
diff --git a/arch/tile/kernel/perf_event.c b/arch/tile/kernel/perf_event.c
index 8767060d70fb..6394c1ccb68e 100644
--- a/arch/tile/kernel/perf_event.c
+++ b/arch/tile/kernel/perf_event.c
@@ -941,7 +941,7 @@ arch_initcall(init_hw_perf_events);
941/* 941/*
942 * Tile specific backtracing code for perf_events. 942 * Tile specific backtracing code for perf_events.
943 */ 943 */
944static inline void perf_callchain(struct perf_callchain_entry *entry, 944static inline void perf_callchain(struct perf_callchain_entry_ctx *entry,
945 struct pt_regs *regs) 945 struct pt_regs *regs)
946{ 946{
947 struct KBacktraceIterator kbt; 947 struct KBacktraceIterator kbt;
@@ -992,13 +992,13 @@ static inline void perf_callchain(struct perf_callchain_entry *entry,
992 } 992 }
993} 993}
994 994
995void perf_callchain_user(struct perf_callchain_entry *entry, 995void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
996 struct pt_regs *regs) 996 struct pt_regs *regs)
997{ 997{
998 perf_callchain(entry, regs); 998 perf_callchain(entry, regs);
999} 999}
1000 1000
1001void perf_callchain_kernel(struct perf_callchain_entry *entry, 1001void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
1002 struct pt_regs *regs) 1002 struct pt_regs *regs)
1003{ 1003{
1004 perf_callchain(entry, regs); 1004 perf_callchain(entry, regs);
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index 5e5e76a52f58..07f2b01cfb72 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -2202,7 +2202,7 @@ static int backtrace_stack(void *data, char *name)
2202 2202
2203static int backtrace_address(void *data, unsigned long addr, int reliable) 2203static int backtrace_address(void *data, unsigned long addr, int reliable)
2204{ 2204{
2205 struct perf_callchain_entry *entry = data; 2205 struct perf_callchain_entry_ctx *entry = data;
2206 2206
2207 return perf_callchain_store(entry, addr); 2207 return perf_callchain_store(entry, addr);
2208} 2208}
@@ -2214,7 +2214,7 @@ static const struct stacktrace_ops backtrace_ops = {
2214}; 2214};
2215 2215
2216void 2216void
2217perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs) 2217perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
2218{ 2218{
2219 if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { 2219 if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
2220 /* TODO: We don't support guest os callchain now */ 2220 /* TODO: We don't support guest os callchain now */
@@ -2268,7 +2268,7 @@ static unsigned long get_segment_base(unsigned int segment)
2268#include <asm/compat.h> 2268#include <asm/compat.h>
2269 2269
2270static inline int 2270static inline int
2271perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry) 2271perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry_ctx *entry)
2272{ 2272{
2273 /* 32-bit process in 64-bit kernel. */ 2273 /* 32-bit process in 64-bit kernel. */
2274 unsigned long ss_base, cs_base; 2274 unsigned long ss_base, cs_base;
@@ -2283,7 +2283,7 @@ perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry)
2283 2283
2284 fp = compat_ptr(ss_base + regs->bp); 2284 fp = compat_ptr(ss_base + regs->bp);
2285 pagefault_disable(); 2285 pagefault_disable();
2286 while (entry->nr < sysctl_perf_event_max_stack) { 2286 while (entry->entry->nr < entry->max_stack) {
2287 unsigned long bytes; 2287 unsigned long bytes;
2288 frame.next_frame = 0; 2288 frame.next_frame = 0;
2289 frame.return_address = 0; 2289 frame.return_address = 0;
@@ -2309,14 +2309,14 @@ perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry)
2309} 2309}
2310#else 2310#else
2311static inline int 2311static inline int
2312perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry) 2312perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry_ctx *entry)
2313{ 2313{
2314 return 0; 2314 return 0;
2315} 2315}
2316#endif 2316#endif
2317 2317
2318void 2318void
2319perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs) 2319perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
2320{ 2320{
2321 struct stack_frame frame; 2321 struct stack_frame frame;
2322 const void __user *fp; 2322 const void __user *fp;
@@ -2343,7 +2343,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
2343 return; 2343 return;
2344 2344
2345 pagefault_disable(); 2345 pagefault_disable();
2346 while (entry->nr < sysctl_perf_event_max_stack) { 2346 while (entry->entry->nr < entry->max_stack) {
2347 unsigned long bytes; 2347 unsigned long bytes;
2348 frame.next_frame = NULL; 2348 frame.next_frame = NULL;
2349 frame.return_address = 0; 2349 frame.return_address = 0;
diff --git a/arch/xtensa/kernel/perf_event.c b/arch/xtensa/kernel/perf_event.c
index a6b00b3af429..ef90479e0397 100644
--- a/arch/xtensa/kernel/perf_event.c
+++ b/arch/xtensa/kernel/perf_event.c
@@ -323,23 +323,23 @@ static void xtensa_pmu_read(struct perf_event *event)
323 323
324static int callchain_trace(struct stackframe *frame, void *data) 324static int callchain_trace(struct stackframe *frame, void *data)
325{ 325{
326 struct perf_callchain_entry *entry = data; 326 struct perf_callchain_entry_ctx *entry = data;
327 327
328 perf_callchain_store(entry, frame->pc); 328 perf_callchain_store(entry, frame->pc);
329 return 0; 329 return 0;
330} 330}
331 331
332void perf_callchain_kernel(struct perf_callchain_entry *entry, 332void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
333 struct pt_regs *regs) 333 struct pt_regs *regs)
334{ 334{
335 xtensa_backtrace_kernel(regs, sysctl_perf_event_max_stack, 335 xtensa_backtrace_kernel(regs, entry->max_stack,
336 callchain_trace, NULL, entry); 336 callchain_trace, NULL, entry);
337} 337}
338 338
339void perf_callchain_user(struct perf_callchain_entry *entry, 339void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
340 struct pt_regs *regs) 340 struct pt_regs *regs)
341{ 341{
342 xtensa_backtrace_user(regs, sysctl_perf_event_max_stack, 342 xtensa_backtrace_user(regs, entry->max_stack,
343 callchain_trace, entry); 343 callchain_trace, entry);
344} 344}
345 345
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 9e1c3ada91c4..dbd18246b36e 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -61,6 +61,11 @@ struct perf_callchain_entry {
61 __u64 ip[0]; /* /proc/sys/kernel/perf_event_max_stack */ 61 __u64 ip[0]; /* /proc/sys/kernel/perf_event_max_stack */
62}; 62};
63 63
64struct perf_callchain_entry_ctx {
65 struct perf_callchain_entry *entry;
66 u32 max_stack;
67};
68
64struct perf_raw_record { 69struct perf_raw_record {
65 u32 size; 70 u32 size;
66 void *data; 71 void *data;
@@ -1063,19 +1068,20 @@ extern void perf_event_fork(struct task_struct *tsk);
1063/* Callchains */ 1068/* Callchains */
1064DECLARE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry); 1069DECLARE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry);
1065 1070
1066extern void perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs); 1071extern void perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs);
1067extern void perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs); 1072extern void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs);
1068extern struct perf_callchain_entry * 1073extern struct perf_callchain_entry *
1069get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user, 1074get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user,
1070 bool crosstask, bool add_mark); 1075 u32 max_stack, bool crosstask, bool add_mark);
1071extern int get_callchain_buffers(void); 1076extern int get_callchain_buffers(void);
1072extern void put_callchain_buffers(void); 1077extern void put_callchain_buffers(void);
1073 1078
1074extern int sysctl_perf_event_max_stack; 1079extern int sysctl_perf_event_max_stack;
1075 1080
1076static inline int perf_callchain_store(struct perf_callchain_entry *entry, u64 ip) 1081static inline int perf_callchain_store(struct perf_callchain_entry_ctx *ctx, u64 ip)
1077{ 1082{
1078 if (entry->nr < sysctl_perf_event_max_stack) { 1083 struct perf_callchain_entry *entry = ctx->entry;
1084 if (entry->nr < ctx->max_stack) {
1079 entry->ip[entry->nr++] = ip; 1085 entry->ip[entry->nr++] = ip;
1080 return 0; 1086 return 0;
1081 } else { 1087 } else {
diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c
index f5a19548be12..a82d7605db3f 100644
--- a/kernel/bpf/stackmap.c
+++ b/kernel/bpf/stackmap.c
@@ -136,7 +136,8 @@ static u64 bpf_get_stackid(u64 r1, u64 r2, u64 flags, u64 r4, u64 r5)
136 BPF_F_FAST_STACK_CMP | BPF_F_REUSE_STACKID))) 136 BPF_F_FAST_STACK_CMP | BPF_F_REUSE_STACKID)))
137 return -EINVAL; 137 return -EINVAL;
138 138
139 trace = get_perf_callchain(regs, init_nr, kernel, user, false, false); 139 trace = get_perf_callchain(regs, init_nr, kernel, user,
140 sysctl_perf_event_max_stack, false, false);
140 141
141 if (unlikely(!trace)) 142 if (unlikely(!trace))
142 /* couldn't fetch the stack trace */ 143 /* couldn't fetch the stack trace */
diff --git a/kernel/events/callchain.c b/kernel/events/callchain.c
index 7fc89939ede9..af95ad92893a 100644
--- a/kernel/events/callchain.c
+++ b/kernel/events/callchain.c
@@ -32,12 +32,12 @@ static DEFINE_MUTEX(callchain_mutex);
32static struct callchain_cpus_entries *callchain_cpus_entries; 32static struct callchain_cpus_entries *callchain_cpus_entries;
33 33
34 34
35__weak void perf_callchain_kernel(struct perf_callchain_entry *entry, 35__weak void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
36 struct pt_regs *regs) 36 struct pt_regs *regs)
37{ 37{
38} 38}
39 39
40__weak void perf_callchain_user(struct perf_callchain_entry *entry, 40__weak void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
41 struct pt_regs *regs) 41 struct pt_regs *regs)
42{ 42{
43} 43}
@@ -176,14 +176,15 @@ perf_callchain(struct perf_event *event, struct pt_regs *regs)
176 if (!kernel && !user) 176 if (!kernel && !user)
177 return NULL; 177 return NULL;
178 178
179 return get_perf_callchain(regs, 0, kernel, user, crosstask, true); 179 return get_perf_callchain(regs, 0, kernel, user, sysctl_perf_event_max_stack, crosstask, true);
180} 180}
181 181
182struct perf_callchain_entry * 182struct perf_callchain_entry *
183get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user, 183get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user,
184 bool crosstask, bool add_mark) 184 u32 max_stack, bool crosstask, bool add_mark)
185{ 185{
186 struct perf_callchain_entry *entry; 186 struct perf_callchain_entry *entry;
187 struct perf_callchain_entry_ctx ctx;
187 int rctx; 188 int rctx;
188 189
189 entry = get_callchain_entry(&rctx); 190 entry = get_callchain_entry(&rctx);
@@ -193,12 +194,15 @@ get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user,
193 if (!entry) 194 if (!entry)
194 goto exit_put; 195 goto exit_put;
195 196
197 ctx.entry = entry;
198 ctx.max_stack = max_stack;
199
196 entry->nr = init_nr; 200 entry->nr = init_nr;
197 201
198 if (kernel && !user_mode(regs)) { 202 if (kernel && !user_mode(regs)) {
199 if (add_mark) 203 if (add_mark)
200 perf_callchain_store(entry, PERF_CONTEXT_KERNEL); 204 perf_callchain_store(&ctx, PERF_CONTEXT_KERNEL);
201 perf_callchain_kernel(entry, regs); 205 perf_callchain_kernel(&ctx, regs);
202 } 206 }
203 207
204 if (user) { 208 if (user) {
@@ -214,8 +218,8 @@ get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user,
214 goto exit_put; 218 goto exit_put;
215 219
216 if (add_mark) 220 if (add_mark)
217 perf_callchain_store(entry, PERF_CONTEXT_USER); 221 perf_callchain_store(&ctx, PERF_CONTEXT_USER);
218 perf_callchain_user(entry, regs); 222 perf_callchain_user(&ctx, regs);
219 } 223 }
220 } 224 }
221 225