aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorCon Kolivas <kernel@kolivas.org>2005-11-09 00:38:58 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2005-11-09 10:56:32 -0500
commit3b0bd9bc6f3b8a47853d1b1de4520de3878e8941 (patch)
tree5900a1fbf46d3c83aca09fca0ba664af40151082 /kernel
parentdad1c65c8000f4485d8602e1875ded77e0d72133 (diff)
[PATCH] sched: smp nice bias busy queues on idle rebalance
To intensify the 'nice' support across physical cpus on SMP we can bias the loads on idle rebalancing. To prevent idle rebalance from trying to pull tasks from queues that appear heavily loaded we only bias the load if there is more than one task running. Add some minor micro-optimisations and have only one return from __source_load and __target_load functions. Fix the fact that target_load was not biased by priority when type == 0. Signed-off-by: Con Kolivas <kernel@kolivas.org> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched.c41
1 files changed, 23 insertions, 18 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index d9dbf8ee6ca4..ec9ea9119b98 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -972,22 +972,26 @@ void kick_process(task_t *p)
972static inline unsigned long __source_load(int cpu, int type, enum idle_type idle) 972static inline unsigned long __source_load(int cpu, int type, enum idle_type idle)
973{ 973{
974 runqueue_t *rq = cpu_rq(cpu); 974 runqueue_t *rq = cpu_rq(cpu);
975 unsigned long cpu_load = rq->cpu_load[type-1], 975 unsigned long source_load, cpu_load = rq->cpu_load[type-1],
976 load_now = rq->nr_running * SCHED_LOAD_SCALE; 976 load_now = rq->nr_running * SCHED_LOAD_SCALE;
977 977
978 if (idle == NOT_IDLE) { 978 if (type == 0)
979 source_load = load_now;
980 else
981 source_load = min(cpu_load, load_now);
982
983 if (idle == NOT_IDLE || rq->nr_running > 1)
979 /* 984 /*
980 * If we are balancing busy runqueues the load is biased by 985 * If we are busy rebalancing the load is biased by
981 * priority to create 'nice' support across cpus. 986 * priority to create 'nice' support across cpus. When
987 * idle rebalancing we should only bias the source_load if
988 * there is more than one task running on that queue to
989 * prevent idle rebalance from trying to pull tasks from a
990 * queue with only one running task.
982 */ 991 */
983 cpu_load *= rq->prio_bias; 992 source_load *= rq->prio_bias;
984 load_now *= rq->prio_bias;
985 }
986 993
987 if (type == 0) 994 return source_load;
988 return load_now;
989
990 return min(cpu_load, load_now);
991} 995}
992 996
993static inline unsigned long source_load(int cpu, int type) 997static inline unsigned long source_load(int cpu, int type)
@@ -1001,17 +1005,18 @@ static inline unsigned long source_load(int cpu, int type)
1001static inline unsigned long __target_load(int cpu, int type, enum idle_type idle) 1005static inline unsigned long __target_load(int cpu, int type, enum idle_type idle)
1002{ 1006{
1003 runqueue_t *rq = cpu_rq(cpu); 1007 runqueue_t *rq = cpu_rq(cpu);
1004 unsigned long cpu_load = rq->cpu_load[type-1], 1008 unsigned long target_load, cpu_load = rq->cpu_load[type-1],
1005 load_now = rq->nr_running * SCHED_LOAD_SCALE; 1009 load_now = rq->nr_running * SCHED_LOAD_SCALE;
1006 1010
1007 if (type == 0) 1011 if (type == 0)
1008 return load_now; 1012 target_load = load_now;
1013 else
1014 target_load = max(cpu_load, load_now);
1009 1015
1010 if (idle == NOT_IDLE) { 1016 if (idle == NOT_IDLE || rq->nr_running > 1)
1011 cpu_load *= rq->prio_bias; 1017 target_load *= rq->prio_bias;
1012 load_now *= rq->prio_bias; 1018
1013 } 1019 return target_load;
1014 return max(cpu_load, load_now);
1015} 1020}
1016 1021
1017static inline unsigned long target_load(int cpu, int type) 1022static inline unsigned long target_load(int cpu, int type)