aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPeter Williams <pwil3058@bigpond.net.au>2006-06-27 05:54:37 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2006-06-27 20:32:44 -0400
commit615052dc3bf96278a843a64d3d1eea03532028c3 (patch)
treee7d5c61bd244e5fbde4ada9ae2ef1ebe7923fb53
parent50ddd96917e4548b3813bfb5dd6f97f052b652bd (diff)
[PATCH] sched: Avoid unnecessarily moving highest priority task move_tasks()
Problem: To help distribute high priority tasks evenly across the available CPUs move_tasks() does not, under some circumstances, skip tasks whose load weight is bigger than the designated amount. Because the highest priority task on the busiest queue may be on the expired array it may be moved as a result of this mechanism. Apart from not being the most desirable way to redistribute the high priority tasks (we'd rather move the second highest priority task), there is a risk that this could set up a loop with this task bouncing backwards and forwards between the two queues. (This latter possibility can be demonstrated by running a nice==-20 CPU bound task on an otherwise quiet 2 CPU system.) Solution: Modify the mechanism so that it does not override skip for the highest priority task on the CPU. Of course, if there are more than one tasks at the highest priority then it will allow the override for one of them as this is a desirable redistribution of high priority tasks. Signed-off-by: Peter Williams <pwil3058@bigpond.com.au> Cc: Ingo Molnar <mingo@elte.hu> Cc: "Siddha, Suresh B" <suresh.b.siddha@intel.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r--kernel/sched.c26
1 files changed, 21 insertions, 5 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index b4dab63c6dbd..0ec84f57695d 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1941,6 +1941,7 @@ int can_migrate_task(task_t *p, runqueue_t *rq, int this_cpu,
1941 return 1; 1941 return 1;
1942} 1942}
1943 1943
1944#define rq_best_prio(rq) min((rq)->curr->prio, (rq)->best_expired_prio)
1944/* 1945/*
1945 * move_tasks tries to move up to max_nr_move tasks and max_load_move weighted 1946 * move_tasks tries to move up to max_nr_move tasks and max_load_move weighted
1946 * load from busiest to this_rq, as part of a balancing operation within 1947 * load from busiest to this_rq, as part of a balancing operation within
@@ -1955,7 +1956,9 @@ static int move_tasks(runqueue_t *this_rq, int this_cpu, runqueue_t *busiest,
1955{ 1956{
1956 prio_array_t *array, *dst_array; 1957 prio_array_t *array, *dst_array;
1957 struct list_head *head, *curr; 1958 struct list_head *head, *curr;
1958 int idx, pulled = 0, pinned = 0, this_min_prio; 1959 int idx, pulled = 0, pinned = 0, this_best_prio, busiest_best_prio;
1960 int busiest_best_prio_seen;
1961 int skip_for_load; /* skip the task based on weighted load issues */
1959 long rem_load_move; 1962 long rem_load_move;
1960 task_t *tmp; 1963 task_t *tmp;
1961 1964
@@ -1964,7 +1967,16 @@ static int move_tasks(runqueue_t *this_rq, int this_cpu, runqueue_t *busiest,
1964 1967
1965 rem_load_move = max_load_move; 1968 rem_load_move = max_load_move;
1966 pinned = 1; 1969 pinned = 1;
1967 this_min_prio = this_rq->curr->prio; 1970 this_best_prio = rq_best_prio(this_rq);
1971 busiest_best_prio = rq_best_prio(busiest);
1972 /*
1973 * Enable handling of the case where there is more than one task
1974 * with the best priority. If the current running task is one
1975 * of those with prio==busiest_best_prio we know it won't be moved
1976 * and therefore it's safe to override the skip (based on load) of
1977 * any task we find with that prio.
1978 */
1979 busiest_best_prio_seen = busiest_best_prio == busiest->curr->prio;
1968 1980
1969 /* 1981 /*
1970 * We first consider expired tasks. Those will likely not be 1982 * We first consider expired tasks. Those will likely not be
@@ -2009,8 +2021,12 @@ skip_queue:
2009 * skip a task if it will be the highest priority task (i.e. smallest 2021 * skip a task if it will be the highest priority task (i.e. smallest
2010 * prio value) on its new queue regardless of its load weight 2022 * prio value) on its new queue regardless of its load weight
2011 */ 2023 */
2012 if ((idx >= this_min_prio && tmp->load_weight > rem_load_move) || 2024 skip_for_load = tmp->load_weight > rem_load_move;
2025 if (skip_for_load && idx < this_best_prio)
2026 skip_for_load = !busiest_best_prio_seen && idx == busiest_best_prio;
2027 if (skip_for_load ||
2013 !can_migrate_task(tmp, busiest, this_cpu, sd, idle, &pinned)) { 2028 !can_migrate_task(tmp, busiest, this_cpu, sd, idle, &pinned)) {
2029 busiest_best_prio_seen |= idx == busiest_best_prio;
2014 if (curr != head) 2030 if (curr != head)
2015 goto skip_queue; 2031 goto skip_queue;
2016 idx++; 2032 idx++;
@@ -2031,8 +2047,8 @@ skip_queue:
2031 * and the prescribed amount of weighted load. 2047 * and the prescribed amount of weighted load.
2032 */ 2048 */
2033 if (pulled < max_nr_move && rem_load_move > 0) { 2049 if (pulled < max_nr_move && rem_load_move > 0) {
2034 if (idx < this_min_prio) 2050 if (idx < this_best_prio)
2035 this_min_prio = idx; 2051 this_best_prio = idx;
2036 if (curr != head) 2052 if (curr != head)
2037 goto skip_queue; 2053 goto skip_queue;
2038 idx++; 2054 idx++;