diff options
author | Andrew Morton <akpm@linux-foundation.org> | 2009-09-19 14:55:44 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-09-20 14:00:32 -0400 |
commit | 89f19f04dc72363d912fd007a399cb10310eff6e (patch) | |
tree | daa64e5de98a668eed4a2ceeb6ca95c478be4053 | |
parent | 3f04e8cd5b24727a2500f8ab8f3de730ba47b02c (diff) |
sched: Fix raciness in runqueue_is_locked()
runqueue_is_locked() is unavoidably racy due to a poor interface design.
It does
cpu = get_cpu()
ret = some_perpcu_thing(cpu);
put_cpu(cpu);
return ret;
Its return value is unreliable.
Fix.
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Acked-by: Steven Rostedt <rostedt@goodmis.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <200909191855.n8JItiko022148@imap1.linux-foundation.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r-- | include/linux/sched.h | 2 | ||||
-rw-r--r-- | kernel/sched.c | 10 | ||||
-rw-r--r-- | kernel/trace/trace.c | 8 |
3 files changed, 10 insertions, 10 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index 8af3d249170e..cc37a3fa5065 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -257,7 +257,7 @@ extern asmlinkage void schedule_tail(struct task_struct *prev); | |||
257 | extern void init_idle(struct task_struct *idle, int cpu); | 257 | extern void init_idle(struct task_struct *idle, int cpu); |
258 | extern void init_idle_bootup_task(struct task_struct *idle); | 258 | extern void init_idle_bootup_task(struct task_struct *idle); |
259 | 259 | ||
260 | extern int runqueue_is_locked(void); | 260 | extern int runqueue_is_locked(int cpu); |
261 | extern void task_rq_unlock_wait(struct task_struct *p); | 261 | extern void task_rq_unlock_wait(struct task_struct *p); |
262 | 262 | ||
263 | extern cpumask_var_t nohz_cpu_mask; | 263 | extern cpumask_var_t nohz_cpu_mask; |
diff --git a/kernel/sched.c b/kernel/sched.c index faf4d463bbff..575fb0139038 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -681,15 +681,9 @@ inline void update_rq_clock(struct rq *rq) | |||
681 | * This interface allows printk to be called with the runqueue lock | 681 | * This interface allows printk to be called with the runqueue lock |
682 | * held and know whether or not it is OK to wake up the klogd. | 682 | * held and know whether or not it is OK to wake up the klogd. |
683 | */ | 683 | */ |
684 | int runqueue_is_locked(void) | 684 | int runqueue_is_locked(int cpu) |
685 | { | 685 | { |
686 | int cpu = get_cpu(); | 686 | return spin_is_locked(&cpu_rq(cpu)->lock); |
687 | struct rq *rq = cpu_rq(cpu); | ||
688 | int ret; | ||
689 | |||
690 | ret = spin_is_locked(&rq->lock); | ||
691 | put_cpu(); | ||
692 | return ret; | ||
693 | } | 687 | } |
694 | 688 | ||
695 | /* | 689 | /* |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index fd52a19dd172..420232a1fbba 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -275,12 +275,18 @@ static DEFINE_SPINLOCK(tracing_start_lock); | |||
275 | */ | 275 | */ |
276 | void trace_wake_up(void) | 276 | void trace_wake_up(void) |
277 | { | 277 | { |
278 | int cpu; | ||
279 | |||
280 | if (trace_flags & TRACE_ITER_BLOCK) | ||
281 | return; | ||
278 | /* | 282 | /* |
279 | * The runqueue_is_locked() can fail, but this is the best we | 283 | * The runqueue_is_locked() can fail, but this is the best we |
280 | * have for now: | 284 | * have for now: |
281 | */ | 285 | */ |
282 | if (!(trace_flags & TRACE_ITER_BLOCK) && !runqueue_is_locked()) | 286 | cpu = get_cpu(); |
287 | if (!runqueue_is_locked(cpu)) | ||
283 | wake_up(&trace_wait); | 288 | wake_up(&trace_wait); |
289 | put_cpu(); | ||
284 | } | 290 | } |
285 | 291 | ||
286 | static int __init set_buf_size(char *str) | 292 | static int __init set_buf_size(char *str) |