diff options
Diffstat (limited to 'kernel/sched/fair.c')
-rw-r--r-- | kernel/sched/fair.c | 40 |
1 files changed, 23 insertions, 17 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 22321db64952..c219bf8d704c 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c | |||
@@ -3069,6 +3069,9 @@ struct lb_env { | |||
3069 | int new_dst_cpu; | 3069 | int new_dst_cpu; |
3070 | enum cpu_idle_type idle; | 3070 | enum cpu_idle_type idle; |
3071 | long imbalance; | 3071 | long imbalance; |
3072 | /* The set of CPUs under consideration for load-balancing */ | ||
3073 | struct cpumask *cpus; | ||
3074 | |||
3072 | unsigned int flags; | 3075 | unsigned int flags; |
3073 | 3076 | ||
3074 | unsigned int loop; | 3077 | unsigned int loop; |
@@ -3384,6 +3387,14 @@ static int tg_load_down(struct task_group *tg, void *data) | |||
3384 | 3387 | ||
3385 | static void update_h_load(long cpu) | 3388 | static void update_h_load(long cpu) |
3386 | { | 3389 | { |
3390 | struct rq *rq = cpu_rq(cpu); | ||
3391 | unsigned long now = jiffies; | ||
3392 | |||
3393 | if (rq->h_load_throttle == now) | ||
3394 | return; | ||
3395 | |||
3396 | rq->h_load_throttle = now; | ||
3397 | |||
3387 | rcu_read_lock(); | 3398 | rcu_read_lock(); |
3388 | walk_tg_tree(tg_load_down, tg_nop, (void *)cpu); | 3399 | walk_tg_tree(tg_load_down, tg_nop, (void *)cpu); |
3389 | rcu_read_unlock(); | 3400 | rcu_read_unlock(); |
@@ -3653,8 +3664,7 @@ fix_small_capacity(struct sched_domain *sd, struct sched_group *group) | |||
3653 | */ | 3664 | */ |
3654 | static inline void update_sg_lb_stats(struct lb_env *env, | 3665 | static inline void update_sg_lb_stats(struct lb_env *env, |
3655 | struct sched_group *group, int load_idx, | 3666 | struct sched_group *group, int load_idx, |
3656 | int local_group, const struct cpumask *cpus, | 3667 | int local_group, int *balance, struct sg_lb_stats *sgs) |
3657 | int *balance, struct sg_lb_stats *sgs) | ||
3658 | { | 3668 | { |
3659 | unsigned long nr_running, max_nr_running, min_nr_running; | 3669 | unsigned long nr_running, max_nr_running, min_nr_running; |
3660 | unsigned long load, max_cpu_load, min_cpu_load; | 3670 | unsigned long load, max_cpu_load, min_cpu_load; |
@@ -3671,7 +3681,7 @@ static inline void update_sg_lb_stats(struct lb_env *env, | |||
3671 | max_nr_running = 0; | 3681 | max_nr_running = 0; |
3672 | min_nr_running = ~0UL; | 3682 | min_nr_running = ~0UL; |
3673 | 3683 | ||
3674 | for_each_cpu_and(i, sched_group_cpus(group), cpus) { | 3684 | for_each_cpu_and(i, sched_group_cpus(group), env->cpus) { |
3675 | struct rq *rq = cpu_rq(i); | 3685 | struct rq *rq = cpu_rq(i); |
3676 | 3686 | ||
3677 | nr_running = rq->nr_running; | 3687 | nr_running = rq->nr_running; |
@@ -3800,8 +3810,7 @@ static bool update_sd_pick_busiest(struct lb_env *env, | |||
3800 | * @sds: variable to hold the statistics for this sched_domain. | 3810 | * @sds: variable to hold the statistics for this sched_domain. |
3801 | */ | 3811 | */ |
3802 | static inline void update_sd_lb_stats(struct lb_env *env, | 3812 | static inline void update_sd_lb_stats(struct lb_env *env, |
3803 | const struct cpumask *cpus, | 3813 | int *balance, struct sd_lb_stats *sds) |
3804 | int *balance, struct sd_lb_stats *sds) | ||
3805 | { | 3814 | { |
3806 | struct sched_domain *child = env->sd->child; | 3815 | struct sched_domain *child = env->sd->child; |
3807 | struct sched_group *sg = env->sd->groups; | 3816 | struct sched_group *sg = env->sd->groups; |
@@ -3818,8 +3827,7 @@ static inline void update_sd_lb_stats(struct lb_env *env, | |||
3818 | 3827 | ||
3819 | local_group = cpumask_test_cpu(env->dst_cpu, sched_group_cpus(sg)); | 3828 | local_group = cpumask_test_cpu(env->dst_cpu, sched_group_cpus(sg)); |
3820 | memset(&sgs, 0, sizeof(sgs)); | 3829 | memset(&sgs, 0, sizeof(sgs)); |
3821 | update_sg_lb_stats(env, sg, load_idx, local_group, | 3830 | update_sg_lb_stats(env, sg, load_idx, local_group, balance, &sgs); |
3822 | cpus, balance, &sgs); | ||
3823 | 3831 | ||
3824 | if (local_group && !(*balance)) | 3832 | if (local_group && !(*balance)) |
3825 | return; | 3833 | return; |
@@ -4055,7 +4063,6 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s | |||
4055 | * to restore balance. | 4063 | * to restore balance. |
4056 | * | 4064 | * |
4057 | * @env: The load balancing environment. | 4065 | * @env: The load balancing environment. |
4058 | * @cpus: The set of CPUs under consideration for load-balancing. | ||
4059 | * @balance: Pointer to a variable indicating if this_cpu | 4066 | * @balance: Pointer to a variable indicating if this_cpu |
4060 | * is the appropriate cpu to perform load balancing at this_level. | 4067 | * is the appropriate cpu to perform load balancing at this_level. |
4061 | * | 4068 | * |
@@ -4065,7 +4072,7 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s | |||
4065 | * put to idle by rebalancing its tasks onto our group. | 4072 | * put to idle by rebalancing its tasks onto our group. |
4066 | */ | 4073 | */ |
4067 | static struct sched_group * | 4074 | static struct sched_group * |
4068 | find_busiest_group(struct lb_env *env, const struct cpumask *cpus, int *balance) | 4075 | find_busiest_group(struct lb_env *env, int *balance) |
4069 | { | 4076 | { |
4070 | struct sd_lb_stats sds; | 4077 | struct sd_lb_stats sds; |
4071 | 4078 | ||
@@ -4075,7 +4082,7 @@ find_busiest_group(struct lb_env *env, const struct cpumask *cpus, int *balance) | |||
4075 | * Compute the various statistics relavent for load balancing at | 4082 | * Compute the various statistics relavent for load balancing at |
4076 | * this level. | 4083 | * this level. |
4077 | */ | 4084 | */ |
4078 | update_sd_lb_stats(env, cpus, balance, &sds); | 4085 | update_sd_lb_stats(env, balance, &sds); |
4079 | 4086 | ||
4080 | /* | 4087 | /* |
4081 | * this_cpu is not the appropriate cpu to perform load balancing at | 4088 | * this_cpu is not the appropriate cpu to perform load balancing at |
@@ -4155,8 +4162,7 @@ ret: | |||
4155 | * find_busiest_queue - find the busiest runqueue among the cpus in group. | 4162 | * find_busiest_queue - find the busiest runqueue among the cpus in group. |
4156 | */ | 4163 | */ |
4157 | static struct rq *find_busiest_queue(struct lb_env *env, | 4164 | static struct rq *find_busiest_queue(struct lb_env *env, |
4158 | struct sched_group *group, | 4165 | struct sched_group *group) |
4159 | const struct cpumask *cpus) | ||
4160 | { | 4166 | { |
4161 | struct rq *busiest = NULL, *rq; | 4167 | struct rq *busiest = NULL, *rq; |
4162 | unsigned long max_load = 0; | 4168 | unsigned long max_load = 0; |
@@ -4171,7 +4177,7 @@ static struct rq *find_busiest_queue(struct lb_env *env, | |||
4171 | if (!capacity) | 4177 | if (!capacity) |
4172 | capacity = fix_small_capacity(env->sd, group); | 4178 | capacity = fix_small_capacity(env->sd, group); |
4173 | 4179 | ||
4174 | if (!cpumask_test_cpu(i, cpus)) | 4180 | if (!cpumask_test_cpu(i, env->cpus)) |
4175 | continue; | 4181 | continue; |
4176 | 4182 | ||
4177 | rq = cpu_rq(i); | 4183 | rq = cpu_rq(i); |
@@ -4252,6 +4258,7 @@ static int load_balance(int this_cpu, struct rq *this_rq, | |||
4252 | .dst_grpmask = sched_group_cpus(sd->groups), | 4258 | .dst_grpmask = sched_group_cpus(sd->groups), |
4253 | .idle = idle, | 4259 | .idle = idle, |
4254 | .loop_break = sched_nr_migrate_break, | 4260 | .loop_break = sched_nr_migrate_break, |
4261 | .cpus = cpus, | ||
4255 | }; | 4262 | }; |
4256 | 4263 | ||
4257 | cpumask_copy(cpus, cpu_active_mask); | 4264 | cpumask_copy(cpus, cpu_active_mask); |
@@ -4260,7 +4267,7 @@ static int load_balance(int this_cpu, struct rq *this_rq, | |||
4260 | schedstat_inc(sd, lb_count[idle]); | 4267 | schedstat_inc(sd, lb_count[idle]); |
4261 | 4268 | ||
4262 | redo: | 4269 | redo: |
4263 | group = find_busiest_group(&env, cpus, balance); | 4270 | group = find_busiest_group(&env, balance); |
4264 | 4271 | ||
4265 | if (*balance == 0) | 4272 | if (*balance == 0) |
4266 | goto out_balanced; | 4273 | goto out_balanced; |
@@ -4270,7 +4277,7 @@ redo: | |||
4270 | goto out_balanced; | 4277 | goto out_balanced; |
4271 | } | 4278 | } |
4272 | 4279 | ||
4273 | busiest = find_busiest_queue(&env, group, cpus); | 4280 | busiest = find_busiest_queue(&env, group); |
4274 | if (!busiest) { | 4281 | if (!busiest) { |
4275 | schedstat_inc(sd, lb_nobusyq[idle]); | 4282 | schedstat_inc(sd, lb_nobusyq[idle]); |
4276 | goto out_balanced; | 4283 | goto out_balanced; |
@@ -4294,11 +4301,10 @@ redo: | |||
4294 | env.src_rq = busiest; | 4301 | env.src_rq = busiest; |
4295 | env.loop_max = min(sysctl_sched_nr_migrate, busiest->nr_running); | 4302 | env.loop_max = min(sysctl_sched_nr_migrate, busiest->nr_running); |
4296 | 4303 | ||
4304 | update_h_load(env.src_cpu); | ||
4297 | more_balance: | 4305 | more_balance: |
4298 | local_irq_save(flags); | 4306 | local_irq_save(flags); |
4299 | double_rq_lock(this_rq, busiest); | 4307 | double_rq_lock(this_rq, busiest); |
4300 | if (!env.loop) | ||
4301 | update_h_load(env.src_cpu); | ||
4302 | 4308 | ||
4303 | /* | 4309 | /* |
4304 | * cur_ld_moved - load moved in current iteration | 4310 | * cur_ld_moved - load moved in current iteration |