aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorFrederic Weisbecker <fweisbec@gmail.com>2008-10-31 08:08:28 -0400
committerIngo Molnar <mingo@elte.hu>2008-11-04 11:14:04 -0500
commit07695fa04e8a3384b0c855398ce1f7885bd7dc3b (patch)
tree5a70702087ea1f603b69281aaa270b117f37a63f
parent71566a0d161edec70361b7f90f6e54af6a6d5d05 (diff)
tracing/ftrace: fix a race condition in sched_switch tracer
Impact: fix race condition in sched_switch tracer This patch fixes a race condition in the sched_switch tracer. If several tasks (IE: concurrent initcalls) are playing with tracing_start_cmdline_record() and tracing_stop_cmdline_record(), the following situation could happen: _ Task A and B are using the same tracepoint probe. Task A holds it. Task B is sleeping and doesn't hold it. _ Task A frees the sched tracer, then sched_ref is decremented to 0. _ Task A is preempted and hadn't yet unregistered its tracepoint probe, then B runs. _ B increments sched_ref, sees it's 1 and then guess it has to register its probe. But it has not been freed by task A. _ A lot of bad things can happen after that... Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--kernel/trace/trace_sched_switch.c5
1 files changed, 5 insertions, 0 deletions
diff --git a/kernel/trace/trace_sched_switch.c b/kernel/trace/trace_sched_switch.c
index b8f56beb1a62..59de5141207c 100644
--- a/kernel/trace/trace_sched_switch.c
+++ b/kernel/trace/trace_sched_switch.c
@@ -17,6 +17,7 @@
17static struct trace_array *ctx_trace; 17static struct trace_array *ctx_trace;
18static int __read_mostly tracer_enabled; 18static int __read_mostly tracer_enabled;
19static atomic_t sched_ref; 19static atomic_t sched_ref;
20static DEFINE_MUTEX(tracepoint_mutex);
20 21
21static void 22static void
22probe_sched_switch(struct rq *__rq, struct task_struct *prev, 23probe_sched_switch(struct rq *__rq, struct task_struct *prev,
@@ -125,18 +126,22 @@ static void tracing_start_sched_switch(void)
125{ 126{
126 long ref; 127 long ref;
127 128
129 mutex_lock(&tracepoint_mutex);
128 ref = atomic_inc_return(&sched_ref); 130 ref = atomic_inc_return(&sched_ref);
129 if (ref == 1) 131 if (ref == 1)
130 tracing_sched_register(); 132 tracing_sched_register();
133 mutex_unlock(&tracepoint_mutex);
131} 134}
132 135
133static void tracing_stop_sched_switch(void) 136static void tracing_stop_sched_switch(void)
134{ 137{
135 long ref; 138 long ref;
136 139
140 mutex_lock(&tracepoint_mutex);
137 ref = atomic_dec_and_test(&sched_ref); 141 ref = atomic_dec_and_test(&sched_ref);
138 if (ref) 142 if (ref)
139 tracing_sched_unregister(); 143 tracing_sched_unregister();
144 mutex_unlock(&tracepoint_mutex);
140} 145}
141 146
142void tracing_start_cmdline_record(void) 147void tracing_start_cmdline_record(void)