aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched_fair.c
diff options
context:
space:
mode:
authorPaul Turner <pjt@google.com>2011-07-21 12:43:27 -0400
committerIngo Molnar <mingo@elte.hu>2011-08-14 06:01:13 -0400
commit953bfcd10e6f3697233e8e5128c611d275da39c1 (patch)
treea3ca8136bb9e992bb40945c5eee2a8dcc0fd0b57 /kernel/sched_fair.c
parent5710f15b52664ae0bfa60a66d75464769d297b2b (diff)
sched: Implement hierarchical task accounting for SCHED_OTHER
Introduce hierarchical task accounting for the group scheduling case in CFS, as well as promoting the responsibility for maintaining rq->nr_running to the scheduling classes. The primary motivation for this is that with scheduling classes supporting bandwidth throttling it is possible for entities participating in throttled sub-trees to not have root visible changes in rq->nr_running across activate and de-activate operations. This in turn leads to incorrect idle and weight-per-task load balance decisions. This also allows us to make a small fixlet to the fastpath in pick_next_task() under group scheduling. Note: this issue also exists with the existing sched_rt throttling mechanism. This patch does not address that. Signed-off-by: Paul Turner <pjt@google.com> Reviewed-by: Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Link: http://lkml.kernel.org/r/20110721184756.878333391@google.com Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r--kernel/sched_fair.c6
1 files changed, 6 insertions, 0 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index f4b732a3552b..f86b0cb5eb29 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -1310,16 +1310,19 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
1310 break; 1310 break;
1311 cfs_rq = cfs_rq_of(se); 1311 cfs_rq = cfs_rq_of(se);
1312 enqueue_entity(cfs_rq, se, flags); 1312 enqueue_entity(cfs_rq, se, flags);
1313 cfs_rq->h_nr_running++;
1313 flags = ENQUEUE_WAKEUP; 1314 flags = ENQUEUE_WAKEUP;
1314 } 1315 }
1315 1316
1316 for_each_sched_entity(se) { 1317 for_each_sched_entity(se) {
1317 cfs_rq = cfs_rq_of(se); 1318 cfs_rq = cfs_rq_of(se);
1319 cfs_rq->h_nr_running++;
1318 1320
1319 update_cfs_load(cfs_rq, 0); 1321 update_cfs_load(cfs_rq, 0);
1320 update_cfs_shares(cfs_rq); 1322 update_cfs_shares(cfs_rq);
1321 } 1323 }
1322 1324
1325 inc_nr_running(rq);
1323 hrtick_update(rq); 1326 hrtick_update(rq);
1324} 1327}
1325 1328
@@ -1339,6 +1342,7 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
1339 for_each_sched_entity(se) { 1342 for_each_sched_entity(se) {
1340 cfs_rq = cfs_rq_of(se); 1343 cfs_rq = cfs_rq_of(se);
1341 dequeue_entity(cfs_rq, se, flags); 1344 dequeue_entity(cfs_rq, se, flags);
1345 cfs_rq->h_nr_running--;
1342 1346
1343 /* Don't dequeue parent if it has other entities besides us */ 1347 /* Don't dequeue parent if it has other entities besides us */
1344 if (cfs_rq->load.weight) { 1348 if (cfs_rq->load.weight) {
@@ -1358,11 +1362,13 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
1358 1362
1359 for_each_sched_entity(se) { 1363 for_each_sched_entity(se) {
1360 cfs_rq = cfs_rq_of(se); 1364 cfs_rq = cfs_rq_of(se);
1365 cfs_rq->h_nr_running--;
1361 1366
1362 update_cfs_load(cfs_rq, 0); 1367 update_cfs_load(cfs_rq, 0);
1363 update_cfs_shares(cfs_rq); 1368 update_cfs_shares(cfs_rq);
1364 } 1369 }
1365 1370
1371 dec_nr_running(rq);
1366 hrtick_update(rq); 1372 hrtick_update(rq);
1367} 1373}
1368 1374