aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2009-09-10 08:40:57 -0400
committerIngo Molnar <mingo@elte.hu>2009-09-15 10:01:06 -0400
commitae154be1f34a674e6cbb43ccf6e442f56acd7a70 (patch)
treee1feba2c818216c97e11801ee7a6e3c2592b10af /kernel/sched.c
parentc88d5910890ad35af283344417891344604f0438 (diff)
sched: Weaken SD_POWERSAVINGS_BALANCE
One of the problems of power-saving balancing is that under certain scenarios it is too slow and allows tons of real work to pile up. Avoid this by ignoring the powersave stuff when there's real work to be done. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <new-submission> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c40
1 files changed, 20 insertions, 20 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 6c819f338b11..f0ccb8b926c8 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1538,6 +1538,26 @@ static unsigned long target_load(int cpu, int type)
1538 return max(rq->cpu_load[type-1], total); 1538 return max(rq->cpu_load[type-1], total);
1539} 1539}
1540 1540
1541static struct sched_group *group_of(int cpu)
1542{
1543 struct sched_domain *sd = rcu_dereference(cpu_rq(cpu)->sd);
1544
1545 if (!sd)
1546 return NULL;
1547
1548 return sd->groups;
1549}
1550
1551static unsigned long power_of(int cpu)
1552{
1553 struct sched_group *group = group_of(cpu);
1554
1555 if (!group)
1556 return SCHED_LOAD_SCALE;
1557
1558 return group->cpu_power;
1559}
1560
1541static int task_hot(struct task_struct *p, u64 now, struct sched_domain *sd); 1561static int task_hot(struct task_struct *p, u64 now, struct sched_domain *sd);
1542 1562
1543static unsigned long cpu_avg_load_per_task(int cpu) 1563static unsigned long cpu_avg_load_per_task(int cpu)
@@ -3982,26 +4002,6 @@ ret:
3982 return NULL; 4002 return NULL;
3983} 4003}
3984 4004
3985static struct sched_group *group_of(int cpu)
3986{
3987 struct sched_domain *sd = rcu_dereference(cpu_rq(cpu)->sd);
3988
3989 if (!sd)
3990 return NULL;
3991
3992 return sd->groups;
3993}
3994
3995static unsigned long power_of(int cpu)
3996{
3997 struct sched_group *group = group_of(cpu);
3998
3999 if (!group)
4000 return SCHED_LOAD_SCALE;
4001
4002 return group->cpu_power;
4003}
4004
4005/* 4005/*
4006 * find_busiest_queue - find the busiest runqueue among the cpus in group. 4006 * find_busiest_queue - find the busiest runqueue among the cpus in group.
4007 */ 4007 */