aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorPaul Turner <pjt@google.com>2011-07-21 12:43:38 -0400
committerIngo Molnar <mingo@elte.hu>2011-08-14 06:03:44 -0400
commit8cb120d3e41a0464a559d639d519cef563717a4e (patch)
tree4d17ae0d3d9a18cb502e5d098bd48b7436620754 /kernel
parent5238cdd3873e67a98b28c1161d65d2a615c320a3 (diff)
sched: Migrate throttled tasks on HOTPLUG
Throttled tasks are invisisble to cpu-offline since they are not eligible for selection by pick_next_task(). The regular 'escape' path for a thread that is blocked at offline is via ttwu->select_task_rq, however this will not handle a throttled group since there are no individual thread wakeups on an unthrottle. Resolve this by unthrottling offline cpus so that threads can be migrated. Signed-off-by: Paul Turner <pjt@google.com> Reviewed-by: Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Link: http://lkml.kernel.org/r/20110721184757.989000590@google.com Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched.c27
1 files changed, 27 insertions, 0 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 5db05f6fb470..397317248ddd 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -6335,6 +6335,30 @@ static void calc_global_load_remove(struct rq *rq)
6335 rq->calc_load_active = 0; 6335 rq->calc_load_active = 0;
6336} 6336}
6337 6337
6338#ifdef CONFIG_CFS_BANDWIDTH
6339static void unthrottle_offline_cfs_rqs(struct rq *rq)
6340{
6341 struct cfs_rq *cfs_rq;
6342
6343 for_each_leaf_cfs_rq(rq, cfs_rq) {
6344 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
6345
6346 if (!cfs_rq->runtime_enabled)
6347 continue;
6348
6349 /*
6350 * clock_task is not advancing so we just need to make sure
6351 * there's some valid quota amount
6352 */
6353 cfs_rq->runtime_remaining = cfs_b->quota;
6354 if (cfs_rq_throttled(cfs_rq))
6355 unthrottle_cfs_rq(cfs_rq);
6356 }
6357}
6358#else
6359static void unthrottle_offline_cfs_rqs(struct rq *rq) {}
6360#endif
6361
6338/* 6362/*
6339 * Migrate all tasks from the rq, sleeping tasks will be migrated by 6363 * Migrate all tasks from the rq, sleeping tasks will be migrated by
6340 * try_to_wake_up()->select_task_rq(). 6364 * try_to_wake_up()->select_task_rq().
@@ -6360,6 +6384,9 @@ static void migrate_tasks(unsigned int dead_cpu)
6360 */ 6384 */
6361 rq->stop = NULL; 6385 rq->stop = NULL;
6362 6386
6387 /* Ensure any throttled groups are reachable by pick_next_task */
6388 unthrottle_offline_cfs_rqs(rq);
6389
6363 for ( ; ; ) { 6390 for ( ; ; ) {
6364 /* 6391 /*
6365 * There's this thread running, bail when that's the only 6392 * There's this thread running, bail when that's the only