aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2009-09-01 04:34:38 -0400
committerIngo Molnar <mingo@elte.hu>2009-09-04 04:09:55 -0400
commitbdb94aa5dbd8b55e75f5a50b61312fe589e2c2d1 (patch)
tree388da3774a3c69f4a0a74285478226311ec41c90 /kernel/sched.c
parente9e9250bc78e7f6342517214c0178a529807964b (diff)
sched: Try to deal with low capacity
When the capacity drops low, we want to migrate load away. Allow the load-balancer to remove all tasks when we hit rock bottom. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Tested-by: Andreas Herrmann <andreas.herrmann3@amd.com> Acked-by: Andreas Herrmann <andreas.herrmann3@amd.com> Acked-by: Gautham R Shenoy <ego@in.ibm.com> Cc: Balbir Singh <balbir@in.ibm.com> LKML-Reference: <20090901083826.342231003@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c33
1 files changed, 28 insertions, 5 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index ab532b5de40e..5f5b359b01b8 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -3908,8 +3908,8 @@ static inline void update_sg_lb_stats(struct sched_domain *sd,
3908 if ((max_cpu_load - min_cpu_load) > 2*avg_load_per_task) 3908 if ((max_cpu_load - min_cpu_load) > 2*avg_load_per_task)
3909 sgs->group_imb = 1; 3909 sgs->group_imb = 1;
3910 3910
3911 sgs->group_capacity = group->__cpu_power / SCHED_LOAD_SCALE; 3911 sgs->group_capacity =
3912 3912 DIV_ROUND_CLOSEST(group->__cpu_power, SCHED_LOAD_SCALE);
3913} 3913}
3914 3914
3915/** 3915/**
@@ -3959,7 +3959,7 @@ static inline void update_sd_lb_stats(struct sched_domain *sd, int this_cpu,
3959 * and move all the excess tasks away. 3959 * and move all the excess tasks away.
3960 */ 3960 */
3961 if (prefer_sibling) 3961 if (prefer_sibling)
3962 sgs.group_capacity = 1; 3962 sgs.group_capacity = min(sgs.group_capacity, 1UL);
3963 3963
3964 if (local_group) { 3964 if (local_group) {
3965 sds->this_load = sgs.avg_load; 3965 sds->this_load = sgs.avg_load;
@@ -4191,6 +4191,26 @@ ret:
4191 return NULL; 4191 return NULL;
4192} 4192}
4193 4193
4194static struct sched_group *group_of(int cpu)
4195{
4196 struct sched_domain *sd = rcu_dereference(cpu_rq(cpu)->sd);
4197
4198 if (!sd)
4199 return NULL;
4200
4201 return sd->groups;
4202}
4203
4204static unsigned long power_of(int cpu)
4205{
4206 struct sched_group *group = group_of(cpu);
4207
4208 if (!group)
4209 return SCHED_LOAD_SCALE;
4210
4211 return group->__cpu_power;
4212}
4213
4194/* 4214/*
4195 * find_busiest_queue - find the busiest runqueue among the cpus in group. 4215 * find_busiest_queue - find the busiest runqueue among the cpus in group.
4196 */ 4216 */
@@ -4203,15 +4223,18 @@ find_busiest_queue(struct sched_group *group, enum cpu_idle_type idle,
4203 int i; 4223 int i;
4204 4224
4205 for_each_cpu(i, sched_group_cpus(group)) { 4225 for_each_cpu(i, sched_group_cpus(group)) {
4226 unsigned long power = power_of(i);
4227 unsigned long capacity = DIV_ROUND_CLOSEST(power, SCHED_LOAD_SCALE);
4206 unsigned long wl; 4228 unsigned long wl;
4207 4229
4208 if (!cpumask_test_cpu(i, cpus)) 4230 if (!cpumask_test_cpu(i, cpus))
4209 continue; 4231 continue;
4210 4232
4211 rq = cpu_rq(i); 4233 rq = cpu_rq(i);
4212 wl = weighted_cpuload(i); 4234 wl = weighted_cpuload(i) * SCHED_LOAD_SCALE;
4235 wl /= power;
4213 4236
4214 if (rq->nr_running == 1 && wl > imbalance) 4237 if (capacity && rq->nr_running == 1 && wl > imbalance)
4215 continue; 4238 continue;
4216 4239
4217 if (wl > max_load) { 4240 if (wl > max_load) {