aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2009-09-10 08:40:57 -0400
committerIngo Molnar <mingo@elte.hu>2009-09-15 10:01:06 -0400
commitae154be1f34a674e6cbb43ccf6e442f56acd7a70 (patch)
treee1feba2c818216c97e11801ee7a6e3c2592b10af /kernel
parentc88d5910890ad35af283344417891344604f0438 (diff)
sched: Weaken SD_POWERSAVINGS_BALANCE
One of the problems of power-saving balancing is that under certain scenarios it is too slow and allows tons of real work to pile up. Avoid this by ignoring the powersave stuff when there's real work to be done. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <new-submission> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched.c40
-rw-r--r--kernel/sched_fair.c21
2 files changed, 38 insertions, 23 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 6c819f338b11..f0ccb8b926c8 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1538,6 +1538,26 @@ static unsigned long target_load(int cpu, int type)
1538 return max(rq->cpu_load[type-1], total); 1538 return max(rq->cpu_load[type-1], total);
1539} 1539}
1540 1540
1541static struct sched_group *group_of(int cpu)
1542{
1543 struct sched_domain *sd = rcu_dereference(cpu_rq(cpu)->sd);
1544
1545 if (!sd)
1546 return NULL;
1547
1548 return sd->groups;
1549}
1550
1551static unsigned long power_of(int cpu)
1552{
1553 struct sched_group *group = group_of(cpu);
1554
1555 if (!group)
1556 return SCHED_LOAD_SCALE;
1557
1558 return group->cpu_power;
1559}
1560
1541static int task_hot(struct task_struct *p, u64 now, struct sched_domain *sd); 1561static int task_hot(struct task_struct *p, u64 now, struct sched_domain *sd);
1542 1562
1543static unsigned long cpu_avg_load_per_task(int cpu) 1563static unsigned long cpu_avg_load_per_task(int cpu)
@@ -3982,26 +4002,6 @@ ret:
3982 return NULL; 4002 return NULL;
3983} 4003}
3984 4004
3985static struct sched_group *group_of(int cpu)
3986{
3987 struct sched_domain *sd = rcu_dereference(cpu_rq(cpu)->sd);
3988
3989 if (!sd)
3990 return NULL;
3991
3992 return sd->groups;
3993}
3994
3995static unsigned long power_of(int cpu)
3996{
3997 struct sched_group *group = group_of(cpu);
3998
3999 if (!group)
4000 return SCHED_LOAD_SCALE;
4001
4002 return group->cpu_power;
4003}
4004
4005/* 4005/*
4006 * find_busiest_queue - find the busiest runqueue among the cpus in group. 4006 * find_busiest_queue - find the busiest runqueue among the cpus in group.
4007 */ 4007 */
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 09d19f77eb3a..eaa00014b499 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -1333,10 +1333,25 @@ static int select_task_rq_fair(struct task_struct *p, int flag, int sync)
1333 1333
1334 for_each_domain(cpu, tmp) { 1334 for_each_domain(cpu, tmp) {
1335 /* 1335 /*
1336 * If power savings logic is enabled for a domain, stop there. 1336 * If power savings logic is enabled for a domain, see if we
1337 * are not overloaded, if so, don't balance wider.
1337 */ 1338 */
1338 if (tmp->flags & SD_POWERSAVINGS_BALANCE) 1339 if (tmp->flags & SD_POWERSAVINGS_BALANCE) {
1339 break; 1340 unsigned long power = 0;
1341 unsigned long nr_running = 0;
1342 unsigned long capacity;
1343 int i;
1344
1345 for_each_cpu(i, sched_domain_span(tmp)) {
1346 power += power_of(i);
1347 nr_running += cpu_rq(i)->cfs.nr_running;
1348 }
1349
1350 capacity = DIV_ROUND_CLOSEST(power, SCHED_LOAD_SCALE);
1351
1352 if (nr_running/2 < capacity)
1353 break;
1354 }
1340 1355
1341 switch (flag) { 1356 switch (flag) {
1342 case SD_BALANCE_WAKE: 1357 case SD_BALANCE_WAKE: