aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorNikhil Rao <ncrao@google.com>2010-10-13 15:09:36 -0400
committerIngo Molnar <mingo@elte.hu>2010-10-18 14:52:17 -0400
commit2582f0eba54066b5e98ff2b27ef0cfa833b59f54 (patch)
treec5a41c2b1458a813b40f20f529a3b86a93c24932 /kernel
parentef8002f6848236de5adc613063ebeabddea8a6fb (diff)
sched: Set group_imb only a task can be pulled from the busiest cpu
When cycling through sched groups to determine the busiest group, set group_imb only if the busiest cpu has more than 1 runnable task. This patch fixes the case where two cpus in a group have one runnable task each, but there is a large weight differential between these two tasks. The load balancer is unable to migrate any task from this group, and hence do not consider this group to be imbalanced. Signed-off-by: Nikhil Rao <ncrao@google.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <1286996978-7007-3-git-send-email-ncrao@google.com> [ small code readability edits ] Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched_fair.c12
1 files changed, 7 insertions, 5 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index bf87192e97fe..3656480e0f79 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -2378,7 +2378,7 @@ static inline void update_sg_lb_stats(struct sched_domain *sd,
2378 int local_group, const struct cpumask *cpus, 2378 int local_group, const struct cpumask *cpus,
2379 int *balance, struct sg_lb_stats *sgs) 2379 int *balance, struct sg_lb_stats *sgs)
2380{ 2380{
2381 unsigned long load, max_cpu_load, min_cpu_load; 2381 unsigned long load, max_cpu_load, min_cpu_load, max_nr_running;
2382 int i; 2382 int i;
2383 unsigned int balance_cpu = -1, first_idle_cpu = 0; 2383 unsigned int balance_cpu = -1, first_idle_cpu = 0;
2384 unsigned long avg_load_per_task = 0; 2384 unsigned long avg_load_per_task = 0;
@@ -2389,6 +2389,7 @@ static inline void update_sg_lb_stats(struct sched_domain *sd,
2389 /* Tally up the load of all CPUs in the group */ 2389 /* Tally up the load of all CPUs in the group */
2390 max_cpu_load = 0; 2390 max_cpu_load = 0;
2391 min_cpu_load = ~0UL; 2391 min_cpu_load = ~0UL;
2392 max_nr_running = 0;
2392 2393
2393 for_each_cpu_and(i, sched_group_cpus(group), cpus) { 2394 for_each_cpu_and(i, sched_group_cpus(group), cpus) {
2394 struct rq *rq = cpu_rq(i); 2395 struct rq *rq = cpu_rq(i);
@@ -2406,8 +2407,10 @@ static inline void update_sg_lb_stats(struct sched_domain *sd,
2406 load = target_load(i, load_idx); 2407 load = target_load(i, load_idx);
2407 } else { 2408 } else {
2408 load = source_load(i, load_idx); 2409 load = source_load(i, load_idx);
2409 if (load > max_cpu_load) 2410 if (load > max_cpu_load) {
2410 max_cpu_load = load; 2411 max_cpu_load = load;
2412 max_nr_running = rq->nr_running;
2413 }
2411 if (min_cpu_load > load) 2414 if (min_cpu_load > load)
2412 min_cpu_load = load; 2415 min_cpu_load = load;
2413 } 2416 }
@@ -2447,11 +2450,10 @@ static inline void update_sg_lb_stats(struct sched_domain *sd,
2447 if (sgs->sum_nr_running) 2450 if (sgs->sum_nr_running)
2448 avg_load_per_task = sgs->sum_weighted_load / sgs->sum_nr_running; 2451 avg_load_per_task = sgs->sum_weighted_load / sgs->sum_nr_running;
2449 2452
2450 if ((max_cpu_load - min_cpu_load) > 2*avg_load_per_task) 2453 if ((max_cpu_load - min_cpu_load) > 2*avg_load_per_task && max_nr_running > 1)
2451 sgs->group_imb = 1; 2454 sgs->group_imb = 1;
2452 2455
2453 sgs->group_capacity = 2456 sgs->group_capacity = DIV_ROUND_CLOSEST(group->cpu_power, SCHED_LOAD_SCALE);
2454 DIV_ROUND_CLOSEST(group->cpu_power, SCHED_LOAD_SCALE);
2455 if (!sgs->group_capacity) 2457 if (!sgs->group_capacity)
2456 sgs->group_capacity = fix_small_capacity(sd, group); 2458 sgs->group_capacity = fix_small_capacity(sd, group);
2457} 2459}