diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2010-03-24 13:34:10 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2010-04-02 14:12:03 -0400 |
commit | 0017d735092844118bef006696a750a0e4ef6ebd (patch) | |
tree | 8ed1540aaeb63da726f93da12950a8eaa0e0a3e0 /kernel/sched_fair.c | |
parent | 9084bb8246ea935b98320554229e2f371f7f52fa (diff) |
sched: Fix TASK_WAKING vs fork deadlock
Oleg noticed a few races with the TASK_WAKING usage on fork.
- since TASK_WAKING is basically a spinlock, it should be IRQ safe
- since we set TASK_WAKING (*) without holding rq->lock it could
be there still is a rq->lock holder, thereby not actually
providing full serialization.
(*) in fact we clear PF_STARTING, which in effect enables TASK_WAKING.
Cure the second issue by not setting TASK_WAKING in sched_fork(), but
only temporarily in wake_up_new_task() while calling select_task_rq().
Cure the first by holding rq->lock around the select_task_rq() call,
this will disable IRQs, this however requires that we push down the
rq->lock release into select_task_rq_fair()'s cgroup stuff.
Because select_task_rq_fair() still needs to drop the rq->lock we
cannot fully get rid of TASK_WAKING.
Reported-by: Oleg Nesterov <oleg@redhat.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r-- | kernel/sched_fair.c | 8 |
1 files changed, 6 insertions, 2 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 49ad99378f82..8a5e7632d09b 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -1423,7 +1423,8 @@ select_idle_sibling(struct task_struct *p, struct sched_domain *sd, int target) | |||
1423 | * | 1423 | * |
1424 | * preempt must be disabled. | 1424 | * preempt must be disabled. |
1425 | */ | 1425 | */ |
1426 | static int select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flags) | 1426 | static int |
1427 | select_task_rq_fair(struct rq *rq, struct task_struct *p, int sd_flag, int wake_flags) | ||
1427 | { | 1428 | { |
1428 | struct sched_domain *tmp, *affine_sd = NULL, *sd = NULL; | 1429 | struct sched_domain *tmp, *affine_sd = NULL, *sd = NULL; |
1429 | int cpu = smp_processor_id(); | 1430 | int cpu = smp_processor_id(); |
@@ -1521,8 +1522,11 @@ static int select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flag | |||
1521 | cpumask_weight(sched_domain_span(sd)))) | 1522 | cpumask_weight(sched_domain_span(sd)))) |
1522 | tmp = affine_sd; | 1523 | tmp = affine_sd; |
1523 | 1524 | ||
1524 | if (tmp) | 1525 | if (tmp) { |
1526 | raw_spin_unlock(&rq->lock); | ||
1525 | update_shares(tmp); | 1527 | update_shares(tmp); |
1528 | raw_spin_lock(&rq->lock); | ||
1529 | } | ||
1526 | } | 1530 | } |
1527 | #endif | 1531 | #endif |
1528 | 1532 | ||