aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace.c
diff options
context:
space:
mode:
authorFrederic Weisbecker <fweisbec@gmail.com>2009-03-22 18:10:46 -0400
committerIngo Molnar <mingo@elte.hu>2009-03-23 04:22:14 -0400
commit07edf7121374609709ef1b0889f6e7b8d6a62ec1 (patch)
treead1649c9546dc3ce23bb2f8609a7459a7ca2006e /kernel/trace/trace.c
parent9bd7d099ab3f10dd666da399c064999bae427cd9 (diff)
tracing/events: don't use wake up for events
Impact: fix hard-lockup with sched switch events Some ftrace events, such as sched wakeup, can be traced while the runqueue lock is hold. Since they are using trace_current_buffer_unlock_commit(), they call wake_up() which can try to grab the runqueue lock too, resulting in a deadlock. Now for all event, we call a new helper: trace_nowake_buffer_unlock_commit() which do pretty the same than trace_current_buffer_unlock_commit() except than it doesn't call trace_wake_up(). Reported-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Steven Rostedt <rostedt@goodmis.org> LKML-Reference: <1237759847-21025-4-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/trace/trace.c')
-rw-r--r--kernel/trace/trace.c26
1 files changed, 21 insertions, 5 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index e6fac0ffe6f0..6bad12819eb6 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -860,15 +860,25 @@ static void ftrace_trace_stack(struct trace_array *tr,
860static void ftrace_trace_userstack(struct trace_array *tr, 860static void ftrace_trace_userstack(struct trace_array *tr,
861 unsigned long flags, int pc); 861 unsigned long flags, int pc);
862 862
863void trace_buffer_unlock_commit(struct trace_array *tr, 863static inline void __trace_buffer_unlock_commit(struct trace_array *tr,
864 struct ring_buffer_event *event, 864 struct ring_buffer_event *event,
865 unsigned long flags, int pc) 865 unsigned long flags, int pc,
866 int wake)
866{ 867{
867 ring_buffer_unlock_commit(tr->buffer, event); 868 ring_buffer_unlock_commit(tr->buffer, event);
868 869
869 ftrace_trace_stack(tr, flags, 6, pc); 870 ftrace_trace_stack(tr, flags, 6, pc);
870 ftrace_trace_userstack(tr, flags, pc); 871 ftrace_trace_userstack(tr, flags, pc);
871 trace_wake_up(); 872
873 if (wake)
874 trace_wake_up();
875}
876
877void trace_buffer_unlock_commit(struct trace_array *tr,
878 struct ring_buffer_event *event,
879 unsigned long flags, int pc)
880{
881 __trace_buffer_unlock_commit(tr, event, flags, pc, 1);
872} 882}
873 883
874struct ring_buffer_event * 884struct ring_buffer_event *
@@ -882,7 +892,13 @@ trace_current_buffer_lock_reserve(unsigned char type, unsigned long len,
882void trace_current_buffer_unlock_commit(struct ring_buffer_event *event, 892void trace_current_buffer_unlock_commit(struct ring_buffer_event *event,
883 unsigned long flags, int pc) 893 unsigned long flags, int pc)
884{ 894{
885 return trace_buffer_unlock_commit(&global_trace, event, flags, pc); 895 return __trace_buffer_unlock_commit(&global_trace, event, flags, pc, 1);
896}
897
898void trace_nowake_buffer_unlock_commit(struct ring_buffer_event *event,
899 unsigned long flags, int pc)
900{
901 return __trace_buffer_unlock_commit(&global_trace, event, flags, pc, 0);
886} 902}
887 903
888void 904void