diff options
author | Peter Williams <pwil3058@bigpond.net.au> | 2006-06-27 05:54:36 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-06-27 20:32:44 -0400 |
commit | 50ddd96917e4548b3813bfb5dd6f97f052b652bd (patch) | |
tree | f8e62672b35ebdefee048e042a54e8ceaeab0cf0 | |
parent | 2dd73a4f09beacadde827a032cf15fd8b1fa3d48 (diff) |
[PATCH] sched: modify move_tasks() to improve load balancing outcomes
Problem:
The move_tasks() function is designed to move UP TO the amount of load it
is asked to move and in doing this it skips over tasks looking for ones
whose load weights are less than or equal to the remaining load to be
moved. This is (in general) a good thing but it has the unfortunate result
of breaking one of the original load balancer's good points: namely, that
(within the limits imposed by the active/expired array model and the fact
the expired is processed first) it moves high priority tasks before low
priority ones and this means there's a good chance (see active/expired
problem for why it's only a chance) that the highest priority task on the
queue but not actually on the CPU will be moved to the other CPU where (as
a high priority task) it may preempt the current task.
Solution:
Modify move_tasks() so that high priority tasks are not skipped when moving
them will make them the highest priority task on their new run queue.
Signed-off-by: Peter Williams <pwil3058@bigpond.com.au>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: "Siddha, Suresh B" <suresh.b.siddha@intel.com>
Cc: "Chen, Kenneth W" <kenneth.w.chen@intel.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r-- | kernel/sched.c | 12 |
1 files changed, 10 insertions, 2 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 1847a4456a2d..b4dab63c6dbd 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -1955,7 +1955,7 @@ static int move_tasks(runqueue_t *this_rq, int this_cpu, runqueue_t *busiest, | |||
1955 | { | 1955 | { |
1956 | prio_array_t *array, *dst_array; | 1956 | prio_array_t *array, *dst_array; |
1957 | struct list_head *head, *curr; | 1957 | struct list_head *head, *curr; |
1958 | int idx, pulled = 0, pinned = 0; | 1958 | int idx, pulled = 0, pinned = 0, this_min_prio; |
1959 | long rem_load_move; | 1959 | long rem_load_move; |
1960 | task_t *tmp; | 1960 | task_t *tmp; |
1961 | 1961 | ||
@@ -1964,6 +1964,7 @@ static int move_tasks(runqueue_t *this_rq, int this_cpu, runqueue_t *busiest, | |||
1964 | 1964 | ||
1965 | rem_load_move = max_load_move; | 1965 | rem_load_move = max_load_move; |
1966 | pinned = 1; | 1966 | pinned = 1; |
1967 | this_min_prio = this_rq->curr->prio; | ||
1967 | 1968 | ||
1968 | /* | 1969 | /* |
1969 | * We first consider expired tasks. Those will likely not be | 1970 | * We first consider expired tasks. Those will likely not be |
@@ -2003,7 +2004,12 @@ skip_queue: | |||
2003 | 2004 | ||
2004 | curr = curr->prev; | 2005 | curr = curr->prev; |
2005 | 2006 | ||
2006 | if (tmp->load_weight > rem_load_move || | 2007 | /* |
2008 | * To help distribute high priority tasks accross CPUs we don't | ||
2009 | * skip a task if it will be the highest priority task (i.e. smallest | ||
2010 | * prio value) on its new queue regardless of its load weight | ||
2011 | */ | ||
2012 | if ((idx >= this_min_prio && tmp->load_weight > rem_load_move) || | ||
2007 | !can_migrate_task(tmp, busiest, this_cpu, sd, idle, &pinned)) { | 2013 | !can_migrate_task(tmp, busiest, this_cpu, sd, idle, &pinned)) { |
2008 | if (curr != head) | 2014 | if (curr != head) |
2009 | goto skip_queue; | 2015 | goto skip_queue; |
@@ -2025,6 +2031,8 @@ skip_queue: | |||
2025 | * and the prescribed amount of weighted load. | 2031 | * and the prescribed amount of weighted load. |
2026 | */ | 2032 | */ |
2027 | if (pulled < max_nr_move && rem_load_move > 0) { | 2033 | if (pulled < max_nr_move && rem_load_move > 0) { |
2034 | if (idx < this_min_prio) | ||
2035 | this_min_prio = idx; | ||
2028 | if (curr != head) | 2036 | if (curr != head) |
2029 | goto skip_queue; | 2037 | goto skip_queue; |
2030 | idx++; | 2038 | idx++; |