diff options
author | Frederic Weisbecker <fweisbec@gmail.com> | 2008-10-31 08:08:28 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-11-04 11:14:04 -0500 |
commit | 07695fa04e8a3384b0c855398ce1f7885bd7dc3b (patch) | |
tree | 5a70702087ea1f603b69281aaa270b117f37a63f /kernel/trace/trace_sched_switch.c | |
parent | 71566a0d161edec70361b7f90f6e54af6a6d5d05 (diff) |
tracing/ftrace: fix a race condition in sched_switch tracer
Impact: fix race condition in sched_switch tracer
This patch fixes a race condition in the sched_switch tracer. If
several tasks (IE: concurrent initcalls) are playing with
tracing_start_cmdline_record() and tracing_stop_cmdline_record(), the
following situation could happen:
_ Task A and B are using the same tracepoint probe. Task A holds it.
Task B is sleeping and doesn't hold it.
_ Task A frees the sched tracer, then sched_ref is decremented to 0.
_ Task A is preempted and hadn't yet unregistered its tracepoint
probe, then B runs.
_ B increments sched_ref, sees it's 1 and then guess it has to
register its probe. But it has not been freed by task A.
_ A lot of bad things can happen after that...
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/trace/trace_sched_switch.c')
-rw-r--r-- | kernel/trace/trace_sched_switch.c | 5 |
1 files changed, 5 insertions, 0 deletions
diff --git a/kernel/trace/trace_sched_switch.c b/kernel/trace/trace_sched_switch.c index b8f56beb1a62..59de5141207c 100644 --- a/kernel/trace/trace_sched_switch.c +++ b/kernel/trace/trace_sched_switch.c | |||
@@ -17,6 +17,7 @@ | |||
17 | static struct trace_array *ctx_trace; | 17 | static struct trace_array *ctx_trace; |
18 | static int __read_mostly tracer_enabled; | 18 | static int __read_mostly tracer_enabled; |
19 | static atomic_t sched_ref; | 19 | static atomic_t sched_ref; |
20 | static DEFINE_MUTEX(tracepoint_mutex); | ||
20 | 21 | ||
21 | static void | 22 | static void |
22 | probe_sched_switch(struct rq *__rq, struct task_struct *prev, | 23 | probe_sched_switch(struct rq *__rq, struct task_struct *prev, |
@@ -125,18 +126,22 @@ static void tracing_start_sched_switch(void) | |||
125 | { | 126 | { |
126 | long ref; | 127 | long ref; |
127 | 128 | ||
129 | mutex_lock(&tracepoint_mutex); | ||
128 | ref = atomic_inc_return(&sched_ref); | 130 | ref = atomic_inc_return(&sched_ref); |
129 | if (ref == 1) | 131 | if (ref == 1) |
130 | tracing_sched_register(); | 132 | tracing_sched_register(); |
133 | mutex_unlock(&tracepoint_mutex); | ||
131 | } | 134 | } |
132 | 135 | ||
133 | static void tracing_stop_sched_switch(void) | 136 | static void tracing_stop_sched_switch(void) |
134 | { | 137 | { |
135 | long ref; | 138 | long ref; |
136 | 139 | ||
140 | mutex_lock(&tracepoint_mutex); | ||
137 | ref = atomic_dec_and_test(&sched_ref); | 141 | ref = atomic_dec_and_test(&sched_ref); |
138 | if (ref) | 142 | if (ref) |
139 | tracing_sched_unregister(); | 143 | tracing_sched_unregister(); |
144 | mutex_unlock(&tracepoint_mutex); | ||
140 | } | 145 | } |
141 | 146 | ||
142 | void tracing_start_cmdline_record(void) | 147 | void tracing_start_cmdline_record(void) |