aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2008-06-27 07:41:22 -0400
committerIngo Molnar <mingo@elte.hu>2008-06-27 08:31:35 -0400
commita25b5aca8740ea99d5e18dfc71235a52b685dcf7 (patch)
tree834bcfd9a51f840cc10693348ecffc99ce69550d /kernel
parentd3f40dbab954d83383b6a516582d5c09cc216dcc (diff)
sched: no need to aggregate task_weight
We only need to know the task_weight of the busiest rq - nothing to do if there are no tasks there. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com> Cc: Mike Galbraith <efault@gmx.de> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched.c16
-rw-r--r--kernel/sched_fair.c2
2 files changed, 2 insertions, 16 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 28229c5d4983..716cfc8e099e 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -433,12 +433,6 @@ struct cfs_rq {
433 * The sum of all runqueue weights within this span. 433 * The sum of all runqueue weights within this span.
434 */ 434 */
435 unsigned long rq_weight; 435 unsigned long rq_weight;
436
437 /*
438 * Weight contributed by tasks; this is the part we can
439 * influence by moving tasks around.
440 */
441 unsigned long task_weight;
442 } aggregate; 436 } aggregate;
443#endif 437#endif
444#endif 438#endif
@@ -1473,10 +1467,6 @@ static int task_hot(struct task_struct *p, u64 now, struct sched_domain *sd);
1473 * rq_weight: 1467 * rq_weight:
1474 * Direct sum of all the cpu's their rq weight, e.g. A would get 3 while 1468 * Direct sum of all the cpu's their rq weight, e.g. A would get 3 while
1475 * B would get 2. 1469 * B would get 2.
1476 *
1477 * task_weight:
1478 * Part of the rq_weight contributed by tasks; all groups except B would
1479 * get 1, B gets 2.
1480 */ 1470 */
1481 1471
1482static inline struct aggregate_struct * 1472static inline struct aggregate_struct *
@@ -1524,16 +1514,12 @@ static void
1524aggregate_group_weight(struct task_group *tg, int cpu, struct sched_domain *sd) 1514aggregate_group_weight(struct task_group *tg, int cpu, struct sched_domain *sd)
1525{ 1515{
1526 unsigned long rq_weight = 0; 1516 unsigned long rq_weight = 0;
1527 unsigned long task_weight = 0;
1528 int i; 1517 int i;
1529 1518
1530 for_each_cpu_mask(i, sd->span) { 1519 for_each_cpu_mask(i, sd->span)
1531 rq_weight += tg->cfs_rq[i]->load.weight; 1520 rq_weight += tg->cfs_rq[i]->load.weight;
1532 task_weight += tg->cfs_rq[i]->task_weight;
1533 }
1534 1521
1535 aggregate(tg, cpu)->rq_weight = rq_weight; 1522 aggregate(tg, cpu)->rq_weight = rq_weight;
1536 aggregate(tg, cpu)->task_weight = task_weight;
1537} 1523}
1538 1524
1539/* 1525/*
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index b10c0d61a2a9..03b9fbd9d648 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -1427,7 +1427,7 @@ load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
1427 /* 1427 /*
1428 * empty group 1428 * empty group
1429 */ 1429 */
1430 if (!aggregate(tg, this_cpu)->task_weight) 1430 if (!tg->cfs_rq[busiest_cpu]->task_weight)
1431 continue; 1431 continue;
1432 1432
1433 rem_load = rem_load_move * aggregate(tg, this_cpu)->rq_weight; 1433 rem_load = rem_load_move * aggregate(tg, this_cpu)->rq_weight;