diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/module.c | 12 | ||||
-rw-r--r-- | kernel/trace/trace.c | 19 |
2 files changed, 31 insertions, 0 deletions
diff --git a/kernel/module.c b/kernel/module.c index 437a74a7524a..d190664f25ff 100644 --- a/kernel/module.c +++ b/kernel/module.c | |||
@@ -2326,6 +2326,18 @@ static void find_module_sections(struct module *mod, struct load_info *info) | |||
2326 | kmemleak_scan_area(mod->trace_events, sizeof(*mod->trace_events) * | 2326 | kmemleak_scan_area(mod->trace_events, sizeof(*mod->trace_events) * |
2327 | mod->num_trace_events, GFP_KERNEL); | 2327 | mod->num_trace_events, GFP_KERNEL); |
2328 | #endif | 2328 | #endif |
2329 | #ifdef CONFIG_TRACING | ||
2330 | mod->trace_bprintk_fmt_start = section_objs(info, "__trace_printk_fmt", | ||
2331 | sizeof(*mod->trace_bprintk_fmt_start), | ||
2332 | &mod->num_trace_bprintk_fmt); | ||
2333 | /* | ||
2334 | * This section contains pointers to allocated objects in the trace | ||
2335 | * code and not scanning it leads to false positives. | ||
2336 | */ | ||
2337 | kmemleak_scan_area(mod->trace_bprintk_fmt_start, | ||
2338 | sizeof(*mod->trace_bprintk_fmt_start) * | ||
2339 | mod->num_trace_bprintk_fmt, GFP_KERNEL); | ||
2340 | #endif | ||
2329 | #ifdef CONFIG_FTRACE_MCOUNT_RECORD | 2341 | #ifdef CONFIG_FTRACE_MCOUNT_RECORD |
2330 | /* sechdrs[0].sh_size is always zero */ | 2342 | /* sechdrs[0].sh_size is always zero */ |
2331 | mod->ftrace_callsites = section_objs(info, "__mcount_loc", | 2343 | mod->ftrace_callsites = section_objs(info, "__mcount_loc", |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 82d9b8106cd0..ee6a7339cf0e 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -1284,6 +1284,8 @@ void trace_dump_stack(void) | |||
1284 | __ftrace_trace_stack(global_trace.buffer, flags, 3, preempt_count()); | 1284 | __ftrace_trace_stack(global_trace.buffer, flags, 3, preempt_count()); |
1285 | } | 1285 | } |
1286 | 1286 | ||
1287 | static DEFINE_PER_CPU(int, user_stack_count); | ||
1288 | |||
1287 | void | 1289 | void |
1288 | ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc) | 1290 | ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc) |
1289 | { | 1291 | { |
@@ -1302,6 +1304,18 @@ ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc) | |||
1302 | if (unlikely(in_nmi())) | 1304 | if (unlikely(in_nmi())) |
1303 | return; | 1305 | return; |
1304 | 1306 | ||
1307 | /* | ||
1308 | * prevent recursion, since the user stack tracing may | ||
1309 | * trigger other kernel events. | ||
1310 | */ | ||
1311 | preempt_disable(); | ||
1312 | if (__this_cpu_read(user_stack_count)) | ||
1313 | goto out; | ||
1314 | |||
1315 | __this_cpu_inc(user_stack_count); | ||
1316 | |||
1317 | |||
1318 | |||
1305 | event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK, | 1319 | event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK, |
1306 | sizeof(*entry), flags, pc); | 1320 | sizeof(*entry), flags, pc); |
1307 | if (!event) | 1321 | if (!event) |
@@ -1319,6 +1333,11 @@ ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc) | |||
1319 | save_stack_trace_user(&trace); | 1333 | save_stack_trace_user(&trace); |
1320 | if (!filter_check_discard(call, entry, buffer, event)) | 1334 | if (!filter_check_discard(call, entry, buffer, event)) |
1321 | ring_buffer_unlock_commit(buffer, event); | 1335 | ring_buffer_unlock_commit(buffer, event); |
1336 | |||
1337 | __this_cpu_dec(user_stack_count); | ||
1338 | |||
1339 | out: | ||
1340 | preempt_enable(); | ||
1322 | } | 1341 | } |
1323 | 1342 | ||
1324 | #ifdef UNUSED | 1343 | #ifdef UNUSED |