aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace
diff options
context:
space:
mode:
authorSteven Rostedt <srostedt@redhat.com>2010-11-10 06:56:12 -0500
committerSteven Rostedt <rostedt@goodmis.org>2010-11-12 21:20:08 -0500
commit91e86e560d0b3ce4c5fc64fd2bbb99f856a30a4e (patch)
tree26d7afb8373474a4d44d0eba4130499676c35bc7 /kernel/trace
parentb5908548537ccd3ada258ca5348df7ffc93e5a06 (diff)
tracing: Fix recursive user stack trace
The user stack trace can fault when examining the trace. Which would call the do_page_fault handler, which would trace again, which would do the user stack trace, which would fault and call do_page_fault again ... Thus this is causing a recursive bug. We need to have a recursion detector here. [ Resubmitted by Jiri Olsa ] [ Eric Dumazet recommended using __this_cpu_* instead of __get_cpu_* ] Cc: Eric Dumazet <eric.dumazet@gmail.com> Signed-off-by: Jiri Olsa <jolsa@redhat.com> LKML-Reference: <1289390172-9730-3-git-send-email-jolsa@redhat.com> Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace')
-rw-r--r--kernel/trace/trace.c19
1 files changed, 19 insertions, 0 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 82d9b8106cd0..ee6a7339cf0e 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -1284,6 +1284,8 @@ void trace_dump_stack(void)
1284 __ftrace_trace_stack(global_trace.buffer, flags, 3, preempt_count()); 1284 __ftrace_trace_stack(global_trace.buffer, flags, 3, preempt_count());
1285} 1285}
1286 1286
1287static DEFINE_PER_CPU(int, user_stack_count);
1288
1287void 1289void
1288ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc) 1290ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
1289{ 1291{
@@ -1302,6 +1304,18 @@ ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
1302 if (unlikely(in_nmi())) 1304 if (unlikely(in_nmi()))
1303 return; 1305 return;
1304 1306
1307 /*
1308 * prevent recursion, since the user stack tracing may
1309 * trigger other kernel events.
1310 */
1311 preempt_disable();
1312 if (__this_cpu_read(user_stack_count))
1313 goto out;
1314
1315 __this_cpu_inc(user_stack_count);
1316
1317
1318
1305 event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK, 1319 event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
1306 sizeof(*entry), flags, pc); 1320 sizeof(*entry), flags, pc);
1307 if (!event) 1321 if (!event)
@@ -1319,6 +1333,11 @@ ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
1319 save_stack_trace_user(&trace); 1333 save_stack_trace_user(&trace);
1320 if (!filter_check_discard(call, entry, buffer, event)) 1334 if (!filter_check_discard(call, entry, buffer, event))
1321 ring_buffer_unlock_commit(buffer, event); 1335 ring_buffer_unlock_commit(buffer, event);
1336
1337 __this_cpu_dec(user_stack_count);
1338
1339 out:
1340 preempt_enable();
1322} 1341}
1323 1342
1324#ifdef UNUSED 1343#ifdef UNUSED