diff options
author | Bharata B Rao <bharata@linux.vnet.ibm.com> | 2008-09-25 00:23:54 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-09-25 05:24:11 -0400 |
commit | b87f17242da6b2ac6db2d179b2f93fb84cff2fbe (patch) | |
tree | 7c90ccd5b3a307670f939b944a2bf32d523a3b15 /kernel | |
parent | 57fdc26d4a734a3e00c6b2fc0e1e40ff8da4dc31 (diff) |
sched: maintain only task entities in cfs_rq->tasks list
cfs_rq->tasks list is used by the load balancer to iterate
over all the tasks. Currently it holds all the entities
(both task and group entities) because of which there is
a need to check for group entities explicitly during load
balancing. This patch changes the cfs_rq->tasks list to
hold only task entities.
Signed-off-by: Bharata B Rao <bharata@linux.vnet.ibm.com>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/sched_fair.c | 26 |
1 files changed, 9 insertions, 17 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index e3f3c10f7033..95c1295ad26d 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -528,11 +528,12 @@ account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se) | |||
528 | update_load_add(&cfs_rq->load, se->load.weight); | 528 | update_load_add(&cfs_rq->load, se->load.weight); |
529 | if (!parent_entity(se)) | 529 | if (!parent_entity(se)) |
530 | inc_cpu_load(rq_of(cfs_rq), se->load.weight); | 530 | inc_cpu_load(rq_of(cfs_rq), se->load.weight); |
531 | if (entity_is_task(se)) | 531 | if (entity_is_task(se)) { |
532 | add_cfs_task_weight(cfs_rq, se->load.weight); | 532 | add_cfs_task_weight(cfs_rq, se->load.weight); |
533 | list_add(&se->group_node, &cfs_rq->tasks); | ||
534 | } | ||
533 | cfs_rq->nr_running++; | 535 | cfs_rq->nr_running++; |
534 | se->on_rq = 1; | 536 | se->on_rq = 1; |
535 | list_add(&se->group_node, &cfs_rq->tasks); | ||
536 | } | 537 | } |
537 | 538 | ||
538 | static void | 539 | static void |
@@ -541,11 +542,12 @@ account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se) | |||
541 | update_load_sub(&cfs_rq->load, se->load.weight); | 542 | update_load_sub(&cfs_rq->load, se->load.weight); |
542 | if (!parent_entity(se)) | 543 | if (!parent_entity(se)) |
543 | dec_cpu_load(rq_of(cfs_rq), se->load.weight); | 544 | dec_cpu_load(rq_of(cfs_rq), se->load.weight); |
544 | if (entity_is_task(se)) | 545 | if (entity_is_task(se)) { |
545 | add_cfs_task_weight(cfs_rq, -se->load.weight); | 546 | add_cfs_task_weight(cfs_rq, -se->load.weight); |
547 | list_del_init(&se->group_node); | ||
548 | } | ||
546 | cfs_rq->nr_running--; | 549 | cfs_rq->nr_running--; |
547 | se->on_rq = 0; | 550 | se->on_rq = 0; |
548 | list_del_init(&se->group_node); | ||
549 | } | 551 | } |
550 | 552 | ||
551 | static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se) | 553 | static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se) |
@@ -1335,19 +1337,9 @@ __load_balance_iterator(struct cfs_rq *cfs_rq, struct list_head *next) | |||
1335 | if (next == &cfs_rq->tasks) | 1337 | if (next == &cfs_rq->tasks) |
1336 | return NULL; | 1338 | return NULL; |
1337 | 1339 | ||
1338 | /* Skip over entities that are not tasks */ | 1340 | se = list_entry(next, struct sched_entity, group_node); |
1339 | do { | 1341 | p = task_of(se); |
1340 | se = list_entry(next, struct sched_entity, group_node); | 1342 | cfs_rq->balance_iterator = next->next; |
1341 | next = next->next; | ||
1342 | } while (next != &cfs_rq->tasks && !entity_is_task(se)); | ||
1343 | |||
1344 | if (next == &cfs_rq->tasks && !entity_is_task(se)) | ||
1345 | return NULL; | ||
1346 | |||
1347 | cfs_rq->balance_iterator = next; | ||
1348 | |||
1349 | if (entity_is_task(se)) | ||
1350 | p = task_of(se); | ||
1351 | 1343 | ||
1352 | return p; | 1344 | return p; |
1353 | } | 1345 | } |