aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorMichael Wang <wangyun@linux.vnet.ibm.com>2012-07-12 04:10:13 -0400
committerIngo Molnar <mingo@kernel.org>2012-07-31 11:00:16 -0400
commitb9403130a5350fca59a50ed11c198cb8c7e54119 (patch)
tree68a4a4c6c270c14e7bbfa4c928e0e2a864ac4127 /kernel
parenta7e4786b937a3ae918a7520cfdba557a80915fa7 (diff)
sched/cleanups: Add load balance cpumask pointer to 'struct lb_env'
With this patch struct ld_env will have a pointer of the load balancing cpumask and we don't need to pass a cpumask around anymore. Signed-off-by: Michael Wang <wangyun@linux.vnet.ibm.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Link: http://lkml.kernel.org/r/4FFE8665.3080705@linux.vnet.ibm.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched/fair.c29
1 files changed, 14 insertions, 15 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 22321db64952..d0cc03b3e70b 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3069,6 +3069,9 @@ struct lb_env {
3069 int new_dst_cpu; 3069 int new_dst_cpu;
3070 enum cpu_idle_type idle; 3070 enum cpu_idle_type idle;
3071 long imbalance; 3071 long imbalance;
3072 /* The set of CPUs under consideration for load-balancing */
3073 struct cpumask *cpus;
3074
3072 unsigned int flags; 3075 unsigned int flags;
3073 3076
3074 unsigned int loop; 3077 unsigned int loop;
@@ -3653,8 +3656,7 @@ fix_small_capacity(struct sched_domain *sd, struct sched_group *group)
3653 */ 3656 */
3654static inline void update_sg_lb_stats(struct lb_env *env, 3657static inline void update_sg_lb_stats(struct lb_env *env,
3655 struct sched_group *group, int load_idx, 3658 struct sched_group *group, int load_idx,
3656 int local_group, const struct cpumask *cpus, 3659 int local_group, int *balance, struct sg_lb_stats *sgs)
3657 int *balance, struct sg_lb_stats *sgs)
3658{ 3660{
3659 unsigned long nr_running, max_nr_running, min_nr_running; 3661 unsigned long nr_running, max_nr_running, min_nr_running;
3660 unsigned long load, max_cpu_load, min_cpu_load; 3662 unsigned long load, max_cpu_load, min_cpu_load;
@@ -3671,7 +3673,7 @@ static inline void update_sg_lb_stats(struct lb_env *env,
3671 max_nr_running = 0; 3673 max_nr_running = 0;
3672 min_nr_running = ~0UL; 3674 min_nr_running = ~0UL;
3673 3675
3674 for_each_cpu_and(i, sched_group_cpus(group), cpus) { 3676 for_each_cpu_and(i, sched_group_cpus(group), env->cpus) {
3675 struct rq *rq = cpu_rq(i); 3677 struct rq *rq = cpu_rq(i);
3676 3678
3677 nr_running = rq->nr_running; 3679 nr_running = rq->nr_running;
@@ -3800,8 +3802,7 @@ static bool update_sd_pick_busiest(struct lb_env *env,
3800 * @sds: variable to hold the statistics for this sched_domain. 3802 * @sds: variable to hold the statistics for this sched_domain.
3801 */ 3803 */
3802static inline void update_sd_lb_stats(struct lb_env *env, 3804static inline void update_sd_lb_stats(struct lb_env *env,
3803 const struct cpumask *cpus, 3805 int *balance, struct sd_lb_stats *sds)
3804 int *balance, struct sd_lb_stats *sds)
3805{ 3806{
3806 struct sched_domain *child = env->sd->child; 3807 struct sched_domain *child = env->sd->child;
3807 struct sched_group *sg = env->sd->groups; 3808 struct sched_group *sg = env->sd->groups;
@@ -3818,8 +3819,7 @@ static inline void update_sd_lb_stats(struct lb_env *env,
3818 3819
3819 local_group = cpumask_test_cpu(env->dst_cpu, sched_group_cpus(sg)); 3820 local_group = cpumask_test_cpu(env->dst_cpu, sched_group_cpus(sg));
3820 memset(&sgs, 0, sizeof(sgs)); 3821 memset(&sgs, 0, sizeof(sgs));
3821 update_sg_lb_stats(env, sg, load_idx, local_group, 3822 update_sg_lb_stats(env, sg, load_idx, local_group, balance, &sgs);
3822 cpus, balance, &sgs);
3823 3823
3824 if (local_group && !(*balance)) 3824 if (local_group && !(*balance))
3825 return; 3825 return;
@@ -4055,7 +4055,6 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s
4055 * to restore balance. 4055 * to restore balance.
4056 * 4056 *
4057 * @env: The load balancing environment. 4057 * @env: The load balancing environment.
4058 * @cpus: The set of CPUs under consideration for load-balancing.
4059 * @balance: Pointer to a variable indicating if this_cpu 4058 * @balance: Pointer to a variable indicating if this_cpu
4060 * is the appropriate cpu to perform load balancing at this_level. 4059 * is the appropriate cpu to perform load balancing at this_level.
4061 * 4060 *
@@ -4065,7 +4064,7 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s
4065 * put to idle by rebalancing its tasks onto our group. 4064 * put to idle by rebalancing its tasks onto our group.
4066 */ 4065 */
4067static struct sched_group * 4066static struct sched_group *
4068find_busiest_group(struct lb_env *env, const struct cpumask *cpus, int *balance) 4067find_busiest_group(struct lb_env *env, int *balance)
4069{ 4068{
4070 struct sd_lb_stats sds; 4069 struct sd_lb_stats sds;
4071 4070
@@ -4075,7 +4074,7 @@ find_busiest_group(struct lb_env *env, const struct cpumask *cpus, int *balance)
4075 * Compute the various statistics relavent for load balancing at 4074 * Compute the various statistics relavent for load balancing at
4076 * this level. 4075 * this level.
4077 */ 4076 */
4078 update_sd_lb_stats(env, cpus, balance, &sds); 4077 update_sd_lb_stats(env, balance, &sds);
4079 4078
4080 /* 4079 /*
4081 * this_cpu is not the appropriate cpu to perform load balancing at 4080 * this_cpu is not the appropriate cpu to perform load balancing at
@@ -4155,8 +4154,7 @@ ret:
4155 * find_busiest_queue - find the busiest runqueue among the cpus in group. 4154 * find_busiest_queue - find the busiest runqueue among the cpus in group.
4156 */ 4155 */
4157static struct rq *find_busiest_queue(struct lb_env *env, 4156static struct rq *find_busiest_queue(struct lb_env *env,
4158 struct sched_group *group, 4157 struct sched_group *group)
4159 const struct cpumask *cpus)
4160{ 4158{
4161 struct rq *busiest = NULL, *rq; 4159 struct rq *busiest = NULL, *rq;
4162 unsigned long max_load = 0; 4160 unsigned long max_load = 0;
@@ -4171,7 +4169,7 @@ static struct rq *find_busiest_queue(struct lb_env *env,
4171 if (!capacity) 4169 if (!capacity)
4172 capacity = fix_small_capacity(env->sd, group); 4170 capacity = fix_small_capacity(env->sd, group);
4173 4171
4174 if (!cpumask_test_cpu(i, cpus)) 4172 if (!cpumask_test_cpu(i, env->cpus))
4175 continue; 4173 continue;
4176 4174
4177 rq = cpu_rq(i); 4175 rq = cpu_rq(i);
@@ -4252,6 +4250,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,
4252 .dst_grpmask = sched_group_cpus(sd->groups), 4250 .dst_grpmask = sched_group_cpus(sd->groups),
4253 .idle = idle, 4251 .idle = idle,
4254 .loop_break = sched_nr_migrate_break, 4252 .loop_break = sched_nr_migrate_break,
4253 .cpus = cpus,
4255 }; 4254 };
4256 4255
4257 cpumask_copy(cpus, cpu_active_mask); 4256 cpumask_copy(cpus, cpu_active_mask);
@@ -4260,7 +4259,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,
4260 schedstat_inc(sd, lb_count[idle]); 4259 schedstat_inc(sd, lb_count[idle]);
4261 4260
4262redo: 4261redo:
4263 group = find_busiest_group(&env, cpus, balance); 4262 group = find_busiest_group(&env, balance);
4264 4263
4265 if (*balance == 0) 4264 if (*balance == 0)
4266 goto out_balanced; 4265 goto out_balanced;
@@ -4270,7 +4269,7 @@ redo:
4270 goto out_balanced; 4269 goto out_balanced;
4271 } 4270 }
4272 4271
4273 busiest = find_busiest_queue(&env, group, cpus); 4272 busiest = find_busiest_queue(&env, group);
4274 if (!busiest) { 4273 if (!busiest) {
4275 schedstat_inc(sd, lb_nobusyq[idle]); 4274 schedstat_inc(sd, lb_nobusyq[idle]);
4276 goto out_balanced; 4275 goto out_balanced;