diff options
author | Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp> | 2011-12-15 00:36:55 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2011-12-21 04:34:49 -0500 |
commit | 4fc420c91f53e0a9f95665c6b14a1983716081e7 (patch) | |
tree | e6bfad34810d368ed893780b9ddc05c5dbcb6923 /kernel/sched/fair.c | |
parent | 11534ec5b6cea13ae38d31799d2a5290c5d724af (diff) |
sched: Fix cgroup movement of forking process
There is a small race between task_fork_fair() and sched_move_task(),
which is trying to move the parent.
task_fork_fair() sched_move_task()
--------------------------------+---------------------------------
cfs_rq = task_cfs_rq(current)
-> cfs_rq is the "old" one.
curr = cfs_rq->curr
-> curr is set to the parent.
task_rq_lock()
dequeue_task()
->parent.se.vruntime -= (old)cfs_rq->min_vruntime
enqueue_task()
->parent.se.vruntime += (new)cfs_rq->min_vruntime
task_rq_unlock()
raw_spin_lock_irqsave(rq->lock)
se->vruntime = curr->vruntime
-> vruntime of the child is set to that of the parent
which has already been updated by sched_move_task().
se->vruntime -= (old)cfs_rq->min_vruntime.
raw_spin_unlock_irqrestore(rq->lock)
As a result, vruntime of the child becomes far bigger than expected,
if (new)cfs_rq->min_vruntime >> (old)cfs_rq->min_vruntime.
This patch fixes this problem by setting "cfs_rq" and "curr" after
holding the rq->lock.
Signed-off-by: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
Acked-by: Paul Turner <pjt@google.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Tejun Heo <tj@kernel.org>
Link: http://lkml.kernel.org/r/20111215143655.662676b0.nishimura@mxp.nes.nec.co.jp
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched/fair.c')
-rw-r--r-- | kernel/sched/fair.c | 7 |
1 files changed, 5 insertions, 2 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index cea2fa85327..525d69e5fb7 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c | |||
@@ -5190,8 +5190,8 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued) | |||
5190 | */ | 5190 | */ |
5191 | static void task_fork_fair(struct task_struct *p) | 5191 | static void task_fork_fair(struct task_struct *p) |
5192 | { | 5192 | { |
5193 | struct cfs_rq *cfs_rq = task_cfs_rq(current); | 5193 | struct cfs_rq *cfs_rq; |
5194 | struct sched_entity *se = &p->se, *curr = cfs_rq->curr; | 5194 | struct sched_entity *se = &p->se, *curr; |
5195 | int this_cpu = smp_processor_id(); | 5195 | int this_cpu = smp_processor_id(); |
5196 | struct rq *rq = this_rq(); | 5196 | struct rq *rq = this_rq(); |
5197 | unsigned long flags; | 5197 | unsigned long flags; |
@@ -5200,6 +5200,9 @@ static void task_fork_fair(struct task_struct *p) | |||
5200 | 5200 | ||
5201 | update_rq_clock(rq); | 5201 | update_rq_clock(rq); |
5202 | 5202 | ||
5203 | cfs_rq = task_cfs_rq(current); | ||
5204 | curr = cfs_rq->curr; | ||
5205 | |||
5203 | if (unlikely(task_cpu(p) != this_cpu)) { | 5206 | if (unlikely(task_cpu(p) != this_cpu)) { |
5204 | rcu_read_lock(); | 5207 | rcu_read_lock(); |
5205 | __set_task_cpu(p, this_cpu); | 5208 | __set_task_cpu(p, this_cpu); |