diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2008-06-27 07:41:31 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-06-27 08:31:42 -0400 |
commit | 051c67640e771fd6ad1b676fc0c16c379b3c6f80 (patch) | |
tree | b56d9841d0ddb00a715489e83969051f7c4849e0 /kernel/sched.c | |
parent | 4be9daaa1b33701f011f4117f22dc1e45a3e6e34 (diff) |
sched: remove prio preference from balance decisions
Priority looses much of its meaning in a hierarchical context. So don't
use it in balance decisions.
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Cc: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
Cc: Mike Galbraith <efault@gmx.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 12 |
1 files changed, 3 insertions, 9 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 5e2aa394a812..10d43f5bf0fc 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -2896,7 +2896,7 @@ balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest, | |||
2896 | enum cpu_idle_type idle, int *all_pinned, | 2896 | enum cpu_idle_type idle, int *all_pinned, |
2897 | int *this_best_prio, struct rq_iterator *iterator) | 2897 | int *this_best_prio, struct rq_iterator *iterator) |
2898 | { | 2898 | { |
2899 | int loops = 0, pulled = 0, pinned = 0, skip_for_load; | 2899 | int loops = 0, pulled = 0, pinned = 0; |
2900 | struct task_struct *p; | 2900 | struct task_struct *p; |
2901 | long rem_load_move = max_load_move; | 2901 | long rem_load_move = max_load_move; |
2902 | 2902 | ||
@@ -2912,14 +2912,8 @@ balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest, | |||
2912 | next: | 2912 | next: |
2913 | if (!p || loops++ > sysctl_sched_nr_migrate) | 2913 | if (!p || loops++ > sysctl_sched_nr_migrate) |
2914 | goto out; | 2914 | goto out; |
2915 | /* | 2915 | |
2916 | * To help distribute high priority tasks across CPUs we don't | 2916 | if ((p->se.load.weight >> 1) > rem_load_move || |
2917 | * skip a task if it will be the highest priority task (i.e. smallest | ||
2918 | * prio value) on its new queue regardless of its load weight | ||
2919 | */ | ||
2920 | skip_for_load = (p->se.load.weight >> 1) > rem_load_move + | ||
2921 | SCHED_LOAD_SCALE_FUZZ; | ||
2922 | if ((skip_for_load && p->prio >= *this_best_prio) || | ||
2923 | !can_migrate_task(p, busiest, this_cpu, sd, idle, &pinned)) { | 2917 | !can_migrate_task(p, busiest, this_cpu, sd, idle, &pinned)) { |
2924 | p = iterator->next(iterator->arg); | 2918 | p = iterator->next(iterator->arg); |
2925 | goto next; | 2919 | goto next; |