diff options
author | Konstantin Khlebnikov <khlebnikov@yandex-team.ru> | 2016-06-16 08:57:01 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2016-06-24 02:26:44 -0400 |
commit | 094f469172e00d6ab0a3130b0e01c83b3cf3a98d (patch) | |
tree | a46b2d8e6e72d37af00388d20bdfb900f895f2c0 | |
parent | 8974189222159154c55f24ddad33e3613960521a (diff) |
sched/fair: Initialize throttle_count for new task-groups lazily
Cgroup created inside throttled group must inherit current throttle_count.
Broken throttle_count allows to nominate throttled entries as a next buddy,
later this leads to null pointer dereference in pick_next_task_fair().
This patch initialize cfs_rq->throttle_count at first enqueue: laziness
allows to skip locking all rq at group creation. Lazy approach also allows
to skip full sub-tree scan at throttling hierarchy (not in this patch).
Signed-off-by: Konstantin Khlebnikov <khlebnikov@yandex-team.ru>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: bsegall@google.com
Link: http://lkml.kernel.org/r/146608182119.21870.8439834428248129633.stgit@buzz
Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r-- | kernel/sched/fair.c | 20 | ||||
-rw-r--r-- | kernel/sched/sched.h | 2 |
2 files changed, 21 insertions, 1 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 2ae68f0e3bf5..8c5d8c0c8827 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c | |||
@@ -4202,6 +4202,26 @@ static void check_enqueue_throttle(struct cfs_rq *cfs_rq) | |||
4202 | if (!cfs_bandwidth_used()) | 4202 | if (!cfs_bandwidth_used()) |
4203 | return; | 4203 | return; |
4204 | 4204 | ||
4205 | /* Synchronize hierarchical throttle counter: */ | ||
4206 | if (unlikely(!cfs_rq->throttle_uptodate)) { | ||
4207 | struct rq *rq = rq_of(cfs_rq); | ||
4208 | struct cfs_rq *pcfs_rq; | ||
4209 | struct task_group *tg; | ||
4210 | |||
4211 | cfs_rq->throttle_uptodate = 1; | ||
4212 | |||
4213 | /* Get closest up-to-date node, because leaves go first: */ | ||
4214 | for (tg = cfs_rq->tg->parent; tg; tg = tg->parent) { | ||
4215 | pcfs_rq = tg->cfs_rq[cpu_of(rq)]; | ||
4216 | if (pcfs_rq->throttle_uptodate) | ||
4217 | break; | ||
4218 | } | ||
4219 | if (tg) { | ||
4220 | cfs_rq->throttle_count = pcfs_rq->throttle_count; | ||
4221 | cfs_rq->throttled_clock_task = rq_clock_task(rq); | ||
4222 | } | ||
4223 | } | ||
4224 | |||
4205 | /* an active group must be handled by the update_curr()->put() path */ | 4225 | /* an active group must be handled by the update_curr()->put() path */ |
4206 | if (!cfs_rq->runtime_enabled || cfs_rq->curr) | 4226 | if (!cfs_rq->runtime_enabled || cfs_rq->curr) |
4207 | return; | 4227 | return; |
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 72f1f3087b04..7cbeb92a1cb9 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h | |||
@@ -437,7 +437,7 @@ struct cfs_rq { | |||
437 | 437 | ||
438 | u64 throttled_clock, throttled_clock_task; | 438 | u64 throttled_clock, throttled_clock_task; |
439 | u64 throttled_clock_task_time; | 439 | u64 throttled_clock_task_time; |
440 | int throttled, throttle_count; | 440 | int throttled, throttle_count, throttle_uptodate; |
441 | struct list_head throttled_list; | 441 | struct list_head throttled_list; |
442 | #endif /* CONFIG_CFS_BANDWIDTH */ | 442 | #endif /* CONFIG_CFS_BANDWIDTH */ |
443 | #endif /* CONFIG_FAIR_GROUP_SCHED */ | 443 | #endif /* CONFIG_FAIR_GROUP_SCHED */ |