diff options
author | Siddha, Suresh B <suresh.b.siddha@intel.com> | 2005-09-10 03:26:21 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2005-09-10 13:06:24 -0400 |
commit | 0c117f1b4d14380baeed9c883f765ee023da8761 (patch) | |
tree | 8bd81914e49493bdae4b04db307a48dcfc0b6316 /kernel | |
parent | fa3b6ddc3f4a8eadba52234134cdb59c28b5332d (diff) |
[PATCH] sched: allow the load to grow upto its cpu_power
Don't pull tasks from a group if that would cause the group's total load to
drop below its total cpu_power (ie. cause the group to start going idle).
Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com>
Signed-off-by: Nick Piggin <npiggin@suse.de>
Acked-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/sched.c | 9 |
1 files changed, 7 insertions, 2 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 1dc29dec38a9..dbd4490afec1 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -1910,6 +1910,7 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, | |||
1910 | { | 1910 | { |
1911 | struct sched_group *busiest = NULL, *this = NULL, *group = sd->groups; | 1911 | struct sched_group *busiest = NULL, *this = NULL, *group = sd->groups; |
1912 | unsigned long max_load, avg_load, total_load, this_load, total_pwr; | 1912 | unsigned long max_load, avg_load, total_load, this_load, total_pwr; |
1913 | unsigned long max_pull; | ||
1913 | int load_idx; | 1914 | int load_idx; |
1914 | 1915 | ||
1915 | max_load = this_load = total_load = total_pwr = 0; | 1916 | max_load = this_load = total_load = total_pwr = 0; |
@@ -1959,7 +1960,7 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, | |||
1959 | group = group->next; | 1960 | group = group->next; |
1960 | } while (group != sd->groups); | 1961 | } while (group != sd->groups); |
1961 | 1962 | ||
1962 | if (!busiest || this_load >= max_load) | 1963 | if (!busiest || this_load >= max_load || max_load <= SCHED_LOAD_SCALE) |
1963 | goto out_balanced; | 1964 | goto out_balanced; |
1964 | 1965 | ||
1965 | avg_load = (SCHED_LOAD_SCALE * total_load) / total_pwr; | 1966 | avg_load = (SCHED_LOAD_SCALE * total_load) / total_pwr; |
@@ -1979,8 +1980,12 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, | |||
1979 | * by pulling tasks to us. Be careful of negative numbers as they'll | 1980 | * by pulling tasks to us. Be careful of negative numbers as they'll |
1980 | * appear as very large values with unsigned longs. | 1981 | * appear as very large values with unsigned longs. |
1981 | */ | 1982 | */ |
1983 | |||
1984 | /* Don't want to pull so many tasks that a group would go idle */ | ||
1985 | max_pull = min(max_load - avg_load, max_load - SCHED_LOAD_SCALE); | ||
1986 | |||
1982 | /* How much load to actually move to equalise the imbalance */ | 1987 | /* How much load to actually move to equalise the imbalance */ |
1983 | *imbalance = min((max_load - avg_load) * busiest->cpu_power, | 1988 | *imbalance = min(max_pull * busiest->cpu_power, |
1984 | (avg_load - this_load) * this->cpu_power) | 1989 | (avg_load - this_load) * this->cpu_power) |
1985 | / SCHED_LOAD_SCALE; | 1990 | / SCHED_LOAD_SCALE; |
1986 | 1991 | ||