aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--kernel/sched.c9
1 files changed, 7 insertions, 2 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 1dc29dec38a9..dbd4490afec1 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1910,6 +1910,7 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
1910{ 1910{
1911 struct sched_group *busiest = NULL, *this = NULL, *group = sd->groups; 1911 struct sched_group *busiest = NULL, *this = NULL, *group = sd->groups;
1912 unsigned long max_load, avg_load, total_load, this_load, total_pwr; 1912 unsigned long max_load, avg_load, total_load, this_load, total_pwr;
1913 unsigned long max_pull;
1913 int load_idx; 1914 int load_idx;
1914 1915
1915 max_load = this_load = total_load = total_pwr = 0; 1916 max_load = this_load = total_load = total_pwr = 0;
@@ -1959,7 +1960,7 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
1959 group = group->next; 1960 group = group->next;
1960 } while (group != sd->groups); 1961 } while (group != sd->groups);
1961 1962
1962 if (!busiest || this_load >= max_load) 1963 if (!busiest || this_load >= max_load || max_load <= SCHED_LOAD_SCALE)
1963 goto out_balanced; 1964 goto out_balanced;
1964 1965
1965 avg_load = (SCHED_LOAD_SCALE * total_load) / total_pwr; 1966 avg_load = (SCHED_LOAD_SCALE * total_load) / total_pwr;
@@ -1979,8 +1980,12 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
1979 * by pulling tasks to us. Be careful of negative numbers as they'll 1980 * by pulling tasks to us. Be careful of negative numbers as they'll
1980 * appear as very large values with unsigned longs. 1981 * appear as very large values with unsigned longs.
1981 */ 1982 */
1983
1984 /* Don't want to pull so many tasks that a group would go idle */
1985 max_pull = min(max_load - avg_load, max_load - SCHED_LOAD_SCALE);
1986
1982 /* How much load to actually move to equalise the imbalance */ 1987 /* How much load to actually move to equalise the imbalance */
1983 *imbalance = min((max_load - avg_load) * busiest->cpu_power, 1988 *imbalance = min(max_pull * busiest->cpu_power,
1984 (avg_load - this_load) * this->cpu_power) 1989 (avg_load - this_load) * this->cpu_power)
1985 / SCHED_LOAD_SCALE; 1990 / SCHED_LOAD_SCALE;
1986 1991