aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--kernel/sched.c14
1 files changed, 8 insertions, 6 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index ec9ea9119b98..502d47c883b6 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -972,15 +972,16 @@ void kick_process(task_t *p)
972static inline unsigned long __source_load(int cpu, int type, enum idle_type idle) 972static inline unsigned long __source_load(int cpu, int type, enum idle_type idle)
973{ 973{
974 runqueue_t *rq = cpu_rq(cpu); 974 runqueue_t *rq = cpu_rq(cpu);
975 unsigned long running = rq->nr_running;
975 unsigned long source_load, cpu_load = rq->cpu_load[type-1], 976 unsigned long source_load, cpu_load = rq->cpu_load[type-1],
976 load_now = rq->nr_running * SCHED_LOAD_SCALE; 977 load_now = running * SCHED_LOAD_SCALE;
977 978
978 if (type == 0) 979 if (type == 0)
979 source_load = load_now; 980 source_load = load_now;
980 else 981 else
981 source_load = min(cpu_load, load_now); 982 source_load = min(cpu_load, load_now);
982 983
983 if (idle == NOT_IDLE || rq->nr_running > 1) 984 if (running > 1 || (idle == NOT_IDLE && running))
984 /* 985 /*
985 * If we are busy rebalancing the load is biased by 986 * If we are busy rebalancing the load is biased by
986 * priority to create 'nice' support across cpus. When 987 * priority to create 'nice' support across cpus. When
@@ -989,7 +990,7 @@ static inline unsigned long __source_load(int cpu, int type, enum idle_type idle
989 * prevent idle rebalance from trying to pull tasks from a 990 * prevent idle rebalance from trying to pull tasks from a
990 * queue with only one running task. 991 * queue with only one running task.
991 */ 992 */
992 source_load *= rq->prio_bias; 993 source_load = source_load * rq->prio_bias / running;
993 994
994 return source_load; 995 return source_load;
995} 996}
@@ -1005,16 +1006,17 @@ static inline unsigned long source_load(int cpu, int type)
1005static inline unsigned long __target_load(int cpu, int type, enum idle_type idle) 1006static inline unsigned long __target_load(int cpu, int type, enum idle_type idle)
1006{ 1007{
1007 runqueue_t *rq = cpu_rq(cpu); 1008 runqueue_t *rq = cpu_rq(cpu);
1009 unsigned long running = rq->nr_running;
1008 unsigned long target_load, cpu_load = rq->cpu_load[type-1], 1010 unsigned long target_load, cpu_load = rq->cpu_load[type-1],
1009 load_now = rq->nr_running * SCHED_LOAD_SCALE; 1011 load_now = running * SCHED_LOAD_SCALE;
1010 1012
1011 if (type == 0) 1013 if (type == 0)
1012 target_load = load_now; 1014 target_load = load_now;
1013 else 1015 else
1014 target_load = max(cpu_load, load_now); 1016 target_load = max(cpu_load, load_now);
1015 1017
1016 if (idle == NOT_IDLE || rq->nr_running > 1) 1018 if (running > 1 || (idle == NOT_IDLE && running))
1017 target_load *= rq->prio_bias; 1019 target_load = target_load * rq->prio_bias / running;
1018 1020
1019 return target_load; 1021 return target_load;
1020} 1022}