aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/events/callchain.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/events/callchain.c')
-rw-r--r--kernel/events/callchain.c36
1 files changed, 24 insertions, 12 deletions
diff --git a/kernel/events/callchain.c b/kernel/events/callchain.c
index b9325e7dcba1..179ef4640964 100644
--- a/kernel/events/callchain.c
+++ b/kernel/events/callchain.c
@@ -19,11 +19,13 @@ struct callchain_cpus_entries {
19}; 19};
20 20
21int sysctl_perf_event_max_stack __read_mostly = PERF_MAX_STACK_DEPTH; 21int sysctl_perf_event_max_stack __read_mostly = PERF_MAX_STACK_DEPTH;
22int sysctl_perf_event_max_contexts_per_stack __read_mostly = PERF_MAX_CONTEXTS_PER_STACK;
22 23
23static inline size_t perf_callchain_entry__sizeof(void) 24static inline size_t perf_callchain_entry__sizeof(void)
24{ 25{
25 return (sizeof(struct perf_callchain_entry) + 26 return (sizeof(struct perf_callchain_entry) +
26 sizeof(__u64) * sysctl_perf_event_max_stack); 27 sizeof(__u64) * (sysctl_perf_event_max_stack +
28 sysctl_perf_event_max_contexts_per_stack));
27} 29}
28 30
29static DEFINE_PER_CPU(int, callchain_recursion[PERF_NR_CONTEXTS]); 31static DEFINE_PER_CPU(int, callchain_recursion[PERF_NR_CONTEXTS]);
@@ -32,12 +34,12 @@ static DEFINE_MUTEX(callchain_mutex);
32static struct callchain_cpus_entries *callchain_cpus_entries; 34static struct callchain_cpus_entries *callchain_cpus_entries;
33 35
34 36
35__weak void perf_callchain_kernel(struct perf_callchain_entry *entry, 37__weak void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
36 struct pt_regs *regs) 38 struct pt_regs *regs)
37{ 39{
38} 40}
39 41
40__weak void perf_callchain_user(struct perf_callchain_entry *entry, 42__weak void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
41 struct pt_regs *regs) 43 struct pt_regs *regs)
42{ 44{
43} 45}
@@ -176,14 +178,15 @@ perf_callchain(struct perf_event *event, struct pt_regs *regs)
176 if (!kernel && !user) 178 if (!kernel && !user)
177 return NULL; 179 return NULL;
178 180
179 return get_perf_callchain(regs, 0, kernel, user, crosstask, true); 181 return get_perf_callchain(regs, 0, kernel, user, sysctl_perf_event_max_stack, crosstask, true);
180} 182}
181 183
182struct perf_callchain_entry * 184struct perf_callchain_entry *
183get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user, 185get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user,
184 bool crosstask, bool add_mark) 186 u32 max_stack, bool crosstask, bool add_mark)
185{ 187{
186 struct perf_callchain_entry *entry; 188 struct perf_callchain_entry *entry;
189 struct perf_callchain_entry_ctx ctx;
187 int rctx; 190 int rctx;
188 191
189 entry = get_callchain_entry(&rctx); 192 entry = get_callchain_entry(&rctx);
@@ -193,12 +196,16 @@ get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user,
193 if (!entry) 196 if (!entry)
194 goto exit_put; 197 goto exit_put;
195 198
196 entry->nr = init_nr; 199 ctx.entry = entry;
200 ctx.max_stack = max_stack;
201 ctx.nr = entry->nr = init_nr;
202 ctx.contexts = 0;
203 ctx.contexts_maxed = false;
197 204
198 if (kernel && !user_mode(regs)) { 205 if (kernel && !user_mode(regs)) {
199 if (add_mark) 206 if (add_mark)
200 perf_callchain_store(entry, PERF_CONTEXT_KERNEL); 207 perf_callchain_store_context(&ctx, PERF_CONTEXT_KERNEL);
201 perf_callchain_kernel(entry, regs); 208 perf_callchain_kernel(&ctx, regs);
202 } 209 }
203 210
204 if (user) { 211 if (user) {
@@ -214,8 +221,8 @@ get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user,
214 goto exit_put; 221 goto exit_put;
215 222
216 if (add_mark) 223 if (add_mark)
217 perf_callchain_store(entry, PERF_CONTEXT_USER); 224 perf_callchain_store_context(&ctx, PERF_CONTEXT_USER);
218 perf_callchain_user(entry, regs); 225 perf_callchain_user(&ctx, regs);
219 } 226 }
220 } 227 }
221 228
@@ -225,10 +232,15 @@ exit_put:
225 return entry; 232 return entry;
226} 233}
227 234
235/*
236 * Used for sysctl_perf_event_max_stack and
237 * sysctl_perf_event_max_contexts_per_stack.
238 */
228int perf_event_max_stack_handler(struct ctl_table *table, int write, 239int perf_event_max_stack_handler(struct ctl_table *table, int write,
229 void __user *buffer, size_t *lenp, loff_t *ppos) 240 void __user *buffer, size_t *lenp, loff_t *ppos)
230{ 241{
231 int new_value = sysctl_perf_event_max_stack, ret; 242 int *value = table->data;
243 int new_value = *value, ret;
232 struct ctl_table new_table = *table; 244 struct ctl_table new_table = *table;
233 245
234 new_table.data = &new_value; 246 new_table.data = &new_value;
@@ -240,7 +252,7 @@ int perf_event_max_stack_handler(struct ctl_table *table, int write,
240 if (atomic_read(&nr_callchain_events)) 252 if (atomic_read(&nr_callchain_events))
241 ret = -EBUSY; 253 ret = -EBUSY;
242 else 254 else
243 sysctl_perf_event_max_stack = new_value; 255 *value = new_value;
244 256
245 mutex_unlock(&callchain_mutex); 257 mutex_unlock(&callchain_mutex);
246 258