diff options
-rw-r--r-- | kernel/sched/fair.c | 20 |
1 files changed, 14 insertions, 6 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 124e6b6999a7..0b42f4487329 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c | |||
@@ -3775,7 +3775,8 @@ static inline void update_sg_lb_stats(struct lb_env *env, | |||
3775 | int local_group, const struct cpumask *cpus, | 3775 | int local_group, const struct cpumask *cpus, |
3776 | int *balance, struct sg_lb_stats *sgs) | 3776 | int *balance, struct sg_lb_stats *sgs) |
3777 | { | 3777 | { |
3778 | unsigned long load, max_cpu_load, min_cpu_load, max_nr_running; | 3778 | unsigned long nr_running, max_nr_running, min_nr_running; |
3779 | unsigned long load, max_cpu_load, min_cpu_load; | ||
3779 | unsigned int balance_cpu = -1, first_idle_cpu = 0; | 3780 | unsigned int balance_cpu = -1, first_idle_cpu = 0; |
3780 | unsigned long avg_load_per_task = 0; | 3781 | unsigned long avg_load_per_task = 0; |
3781 | int i; | 3782 | int i; |
@@ -3787,10 +3788,13 @@ static inline void update_sg_lb_stats(struct lb_env *env, | |||
3787 | max_cpu_load = 0; | 3788 | max_cpu_load = 0; |
3788 | min_cpu_load = ~0UL; | 3789 | min_cpu_load = ~0UL; |
3789 | max_nr_running = 0; | 3790 | max_nr_running = 0; |
3791 | min_nr_running = ~0UL; | ||
3790 | 3792 | ||
3791 | for_each_cpu_and(i, sched_group_cpus(group), cpus) { | 3793 | for_each_cpu_and(i, sched_group_cpus(group), cpus) { |
3792 | struct rq *rq = cpu_rq(i); | 3794 | struct rq *rq = cpu_rq(i); |
3793 | 3795 | ||
3796 | nr_running = rq->nr_running; | ||
3797 | |||
3794 | /* Bias balancing toward cpus of our domain */ | 3798 | /* Bias balancing toward cpus of our domain */ |
3795 | if (local_group) { | 3799 | if (local_group) { |
3796 | if (idle_cpu(i) && !first_idle_cpu) { | 3800 | if (idle_cpu(i) && !first_idle_cpu) { |
@@ -3801,16 +3805,19 @@ static inline void update_sg_lb_stats(struct lb_env *env, | |||
3801 | load = target_load(i, load_idx); | 3805 | load = target_load(i, load_idx); |
3802 | } else { | 3806 | } else { |
3803 | load = source_load(i, load_idx); | 3807 | load = source_load(i, load_idx); |
3804 | if (load > max_cpu_load) { | 3808 | if (load > max_cpu_load) |
3805 | max_cpu_load = load; | 3809 | max_cpu_load = load; |
3806 | max_nr_running = rq->nr_running; | ||
3807 | } | ||
3808 | if (min_cpu_load > load) | 3810 | if (min_cpu_load > load) |
3809 | min_cpu_load = load; | 3811 | min_cpu_load = load; |
3812 | |||
3813 | if (nr_running > max_nr_running) | ||
3814 | max_nr_running = nr_running; | ||
3815 | if (min_nr_running > nr_running) | ||
3816 | min_nr_running = nr_running; | ||
3810 | } | 3817 | } |
3811 | 3818 | ||
3812 | sgs->group_load += load; | 3819 | sgs->group_load += load; |
3813 | sgs->sum_nr_running += rq->nr_running; | 3820 | sgs->sum_nr_running += nr_running; |
3814 | sgs->sum_weighted_load += weighted_cpuload(i); | 3821 | sgs->sum_weighted_load += weighted_cpuload(i); |
3815 | if (idle_cpu(i)) | 3822 | if (idle_cpu(i)) |
3816 | sgs->idle_cpus++; | 3823 | sgs->idle_cpus++; |
@@ -3848,7 +3855,8 @@ static inline void update_sg_lb_stats(struct lb_env *env, | |||
3848 | if (sgs->sum_nr_running) | 3855 | if (sgs->sum_nr_running) |
3849 | avg_load_per_task = sgs->sum_weighted_load / sgs->sum_nr_running; | 3856 | avg_load_per_task = sgs->sum_weighted_load / sgs->sum_nr_running; |
3850 | 3857 | ||
3851 | if ((max_cpu_load - min_cpu_load) >= avg_load_per_task && max_nr_running > 1) | 3858 | if ((max_cpu_load - min_cpu_load) >= avg_load_per_task && |
3859 | (max_nr_running - min_nr_running) > 1) | ||
3852 | sgs->group_imb = 1; | 3860 | sgs->group_imb = 1; |
3853 | 3861 | ||
3854 | sgs->group_capacity = DIV_ROUND_CLOSEST(group->sgp->power, | 3862 | sgs->group_capacity = DIV_ROUND_CLOSEST(group->sgp->power, |