diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2009-12-09 03:32:03 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-12-09 04:01:07 -0500 |
commit | dba091b9e3522b9d32fc9975e48d3b69633b45f0 (patch) | |
tree | 91549e3921971b6d0074fd34c29223b593381c8a /kernel/sched_fair.c | |
parent | 3160568371da441b7f2fb57f2f1225404207e8f2 (diff) |
sched: Protect sched_rr_get_param() access to task->sched_class
sched_rr_get_param calls
task->sched_class->get_rr_interval(task) without protection
against a concurrent sched_setscheduler() call which modifies
task->sched_class.
Serialize the access with task_rq_lock(task) and hand the rq
pointer into get_rr_interval() as it's needed at least in the
sched_fair implementation.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: Peter Zijlstra <peterz@infradead.org>
LKML-Reference: <alpine.LFD.2.00.0912090930120.3089@localhost.localdomain>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r-- | kernel/sched_fair.c | 6 |
1 files changed, 1 insertions, 5 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index f61837ad336d..613c1c749677 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -2014,21 +2014,17 @@ static void moved_group_fair(struct task_struct *p) | |||
2014 | } | 2014 | } |
2015 | #endif | 2015 | #endif |
2016 | 2016 | ||
2017 | unsigned int get_rr_interval_fair(struct task_struct *task) | 2017 | unsigned int get_rr_interval_fair(struct rq *rq, struct task_struct *task) |
2018 | { | 2018 | { |
2019 | struct sched_entity *se = &task->se; | 2019 | struct sched_entity *se = &task->se; |
2020 | unsigned long flags; | ||
2021 | struct rq *rq; | ||
2022 | unsigned int rr_interval = 0; | 2020 | unsigned int rr_interval = 0; |
2023 | 2021 | ||
2024 | /* | 2022 | /* |
2025 | * Time slice is 0 for SCHED_OTHER tasks that are on an otherwise | 2023 | * Time slice is 0 for SCHED_OTHER tasks that are on an otherwise |
2026 | * idle runqueue: | 2024 | * idle runqueue: |
2027 | */ | 2025 | */ |
2028 | rq = task_rq_lock(task, &flags); | ||
2029 | if (rq->cfs.load.weight) | 2026 | if (rq->cfs.load.weight) |
2030 | rr_interval = NS_TO_JIFFIES(sched_slice(&rq->cfs, se)); | 2027 | rr_interval = NS_TO_JIFFIES(sched_slice(&rq->cfs, se)); |
2031 | task_rq_unlock(rq, &flags); | ||
2032 | 2028 | ||
2033 | return rr_interval; | 2029 | return rr_interval; |
2034 | } | 2030 | } |