aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2013-08-15 14:37:48 -0400
committerIngo Molnar <mingo@kernel.org>2013-09-02 02:27:37 -0400
commit3ae11c90fd055ba1b1b03a014f851b395bdd26ff (patch)
tree6e2f571ab0ca90206bffacab9bf7aaaa91e25df5 /kernel
parent38d0f7708543bcfa03d5ee55e8346f801b4a59c9 (diff)
sched/fair: Make group power more consistent
For easier access, less dereferences and more consistent value, store the group power in update_sg_lb_stats() and use it thereafter. The actual value in sched_group::sched_group_power::power can change throughout the load-balance pass if we're unlucky. Reviewed-by: Preeti U Murthy <preeti@linux.vnet.ibm.com> Signed-off-by: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/n/tip-739xxqkyvftrhnh9ncudutc7@git.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched/fair.c41
1 files changed, 22 insertions, 19 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 57952198b01e..ccf20e76b6b2 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -4239,6 +4239,7 @@ struct sg_lb_stats {
4239 unsigned long group_load; /* Total load over the CPUs of the group */ 4239 unsigned long group_load; /* Total load over the CPUs of the group */
4240 unsigned long sum_weighted_load; /* Weighted load of group's tasks */ 4240 unsigned long sum_weighted_load; /* Weighted load of group's tasks */
4241 unsigned long load_per_task; 4241 unsigned long load_per_task;
4242 unsigned long group_power;
4242 unsigned int sum_nr_running; /* Nr tasks running in the group */ 4243 unsigned int sum_nr_running; /* Nr tasks running in the group */
4243 unsigned int group_capacity; 4244 unsigned int group_capacity;
4244 unsigned int idle_cpus; 4245 unsigned int idle_cpus;
@@ -4518,7 +4519,8 @@ static inline void update_sg_lb_stats(struct lb_env *env,
4518 update_group_power(env->sd, env->dst_cpu); 4519 update_group_power(env->sd, env->dst_cpu);
4519 4520
4520 /* Adjust by relative CPU power of the group */ 4521 /* Adjust by relative CPU power of the group */
4521 sgs->avg_load = (sgs->group_load*SCHED_POWER_SCALE) / group->sgp->power; 4522 sgs->group_power = group->sgp->power;
4523 sgs->avg_load = (sgs->group_load*SCHED_POWER_SCALE) / sgs->group_power;
4522 4524
4523 /* 4525 /*
4524 * Consider the group unbalanced when the imbalance is larger 4526 * Consider the group unbalanced when the imbalance is larger
@@ -4537,7 +4539,7 @@ static inline void update_sg_lb_stats(struct lb_env *env,
4537 sgs->group_imb = 1; 4539 sgs->group_imb = 1;
4538 4540
4539 sgs->group_capacity = 4541 sgs->group_capacity =
4540 DIV_ROUND_CLOSEST(group->sgp->power, SCHED_POWER_SCALE); 4542 DIV_ROUND_CLOSEST(sgs->group_power, SCHED_POWER_SCALE);
4541 4543
4542 if (!sgs->group_capacity) 4544 if (!sgs->group_capacity)
4543 sgs->group_capacity = fix_small_capacity(env->sd, group); 4545 sgs->group_capacity = fix_small_capacity(env->sd, group);
@@ -4637,7 +4639,7 @@ static inline void update_sd_lb_stats(struct lb_env *env,
4637 4639
4638 /* Now, start updating sd_lb_stats */ 4640 /* Now, start updating sd_lb_stats */
4639 sds->total_load += sgs->group_load; 4641 sds->total_load += sgs->group_load;
4640 sds->total_pwr += sg->sgp->power; 4642 sds->total_pwr += sgs->group_power;
4641 4643
4642 if (!local_group && update_sd_pick_busiest(env, sds, sg, sgs)) { 4644 if (!local_group && update_sd_pick_busiest(env, sds, sg, sgs)) {
4643 sds->busiest = sg; 4645 sds->busiest = sg;
@@ -4685,8 +4687,9 @@ static int check_asym_packing(struct lb_env *env, struct sd_lb_stats *sds)
4685 if (env->dst_cpu > busiest_cpu) 4687 if (env->dst_cpu > busiest_cpu)
4686 return 0; 4688 return 0;
4687 4689
4688 env->imbalance = DIV_ROUND_CLOSEST(sds->busiest_stat.avg_load * 4690 env->imbalance = DIV_ROUND_CLOSEST(
4689 sds->busiest->sgp->power, SCHED_POWER_SCALE); 4691 sds->busiest_stat.avg_load * sds->busiest_stat.group_power,
4692 SCHED_POWER_SCALE);
4690 4693
4691 return 1; 4694 return 1;
4692} 4695}
@@ -4716,7 +4719,7 @@ void fix_small_imbalance(struct lb_env *env, struct sd_lb_stats *sds)
4716 4719
4717 scaled_busy_load_per_task = 4720 scaled_busy_load_per_task =
4718 (busiest->load_per_task * SCHED_POWER_SCALE) / 4721 (busiest->load_per_task * SCHED_POWER_SCALE) /
4719 sds->busiest->sgp->power; 4722 busiest->group_power;
4720 4723
4721 if (busiest->avg_load - local->avg_load + scaled_busy_load_per_task >= 4724 if (busiest->avg_load - local->avg_load + scaled_busy_load_per_task >=
4722 (scaled_busy_load_per_task * imbn)) { 4725 (scaled_busy_load_per_task * imbn)) {
@@ -4730,32 +4733,32 @@ void fix_small_imbalance(struct lb_env *env, struct sd_lb_stats *sds)
4730 * moving them. 4733 * moving them.
4731 */ 4734 */
4732 4735
4733 pwr_now += sds->busiest->sgp->power * 4736 pwr_now += busiest->group_power *
4734 min(busiest->load_per_task, busiest->avg_load); 4737 min(busiest->load_per_task, busiest->avg_load);
4735 pwr_now += sds->local->sgp->power * 4738 pwr_now += local->group_power *
4736 min(local->load_per_task, local->avg_load); 4739 min(local->load_per_task, local->avg_load);
4737 pwr_now /= SCHED_POWER_SCALE; 4740 pwr_now /= SCHED_POWER_SCALE;
4738 4741
4739 /* Amount of load we'd subtract */ 4742 /* Amount of load we'd subtract */
4740 tmp = (busiest->load_per_task * SCHED_POWER_SCALE) / 4743 tmp = (busiest->load_per_task * SCHED_POWER_SCALE) /
4741 sds->busiest->sgp->power; 4744 busiest->group_power;
4742 if (busiest->avg_load > tmp) { 4745 if (busiest->avg_load > tmp) {
4743 pwr_move += sds->busiest->sgp->power * 4746 pwr_move += busiest->group_power *
4744 min(busiest->load_per_task, 4747 min(busiest->load_per_task,
4745 busiest->avg_load - tmp); 4748 busiest->avg_load - tmp);
4746 } 4749 }
4747 4750
4748 /* Amount of load we'd add */ 4751 /* Amount of load we'd add */
4749 if (busiest->avg_load * sds->busiest->sgp->power < 4752 if (busiest->avg_load * busiest->group_power <
4750 busiest->load_per_task * SCHED_POWER_SCALE) { 4753 busiest->load_per_task * SCHED_POWER_SCALE) {
4751 tmp = (busiest->avg_load * sds->busiest->sgp->power) / 4754 tmp = (busiest->avg_load * busiest->group_power) /
4752 sds->local->sgp->power; 4755 local->group_power;
4753 } else { 4756 } else {
4754 tmp = (busiest->load_per_task * SCHED_POWER_SCALE) / 4757 tmp = (busiest->load_per_task * SCHED_POWER_SCALE) /
4755 sds->local->sgp->power; 4758 local->group_power;
4756 } 4759 }
4757 pwr_move += sds->local->sgp->power * 4760 pwr_move += local->group_power *
4758 min(local->load_per_task, local->avg_load + tmp); 4761 min(local->load_per_task, local->avg_load + tmp);
4759 pwr_move /= SCHED_POWER_SCALE; 4762 pwr_move /= SCHED_POWER_SCALE;
4760 4763
4761 /* Move if we gain throughput */ 4764 /* Move if we gain throughput */
@@ -4800,7 +4803,7 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s
4800 (busiest->sum_nr_running - busiest->group_capacity); 4803 (busiest->sum_nr_running - busiest->group_capacity);
4801 4804
4802 load_above_capacity *= (SCHED_LOAD_SCALE * SCHED_POWER_SCALE); 4805 load_above_capacity *= (SCHED_LOAD_SCALE * SCHED_POWER_SCALE);
4803 load_above_capacity /= sds->busiest->sgp->power; 4806 load_above_capacity /= busiest->group_power;
4804 } 4807 }
4805 4808
4806 /* 4809 /*
@@ -4818,8 +4821,8 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s
4818 4821
4819 /* How much load to actually move to equalise the imbalance */ 4822 /* How much load to actually move to equalise the imbalance */
4820 env->imbalance = min( 4823 env->imbalance = min(
4821 max_pull * sds->busiest->sgp->power, 4824 max_pull * busiest->group_power,
4822 (sds->avg_load - local->avg_load) * sds->local->sgp->power 4825 (sds->avg_load - local->avg_load) * local->group_power
4823 ) / SCHED_POWER_SCALE; 4826 ) / SCHED_POWER_SCALE;
4824 4827
4825 /* 4828 /*