aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c41
1 files changed, 23 insertions, 18 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index d9dbf8ee6ca4..ec9ea9119b98 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -972,22 +972,26 @@ void kick_process(task_t *p)
972static inline unsigned long __source_load(int cpu, int type, enum idle_type idle) 972static inline unsigned long __source_load(int cpu, int type, enum idle_type idle)
973{ 973{
974 runqueue_t *rq = cpu_rq(cpu); 974 runqueue_t *rq = cpu_rq(cpu);
975 unsigned long cpu_load = rq->cpu_load[type-1], 975 unsigned long source_load, cpu_load = rq->cpu_load[type-1],
976 load_now = rq->nr_running * SCHED_LOAD_SCALE; 976 load_now = rq->nr_running * SCHED_LOAD_SCALE;
977 977
978 if (idle == NOT_IDLE) { 978 if (type == 0)
979 source_load = load_now;
980 else
981 source_load = min(cpu_load, load_now);
982
983 if (idle == NOT_IDLE || rq->nr_running > 1)
979 /* 984 /*
980 * If we are balancing busy runqueues the load is biased by 985 * If we are busy rebalancing the load is biased by
981 * priority to create 'nice' support across cpus. 986 * priority to create 'nice' support across cpus. When
987 * idle rebalancing we should only bias the source_load if
988 * there is more than one task running on that queue to
989 * prevent idle rebalance from trying to pull tasks from a
990 * queue with only one running task.
982 */ 991 */
983 cpu_load *= rq->prio_bias; 992 source_load *= rq->prio_bias;
984 load_now *= rq->prio_bias;
985 }
986 993
987 if (type == 0) 994 return source_load;
988 return load_now;
989
990 return min(cpu_load, load_now);
991} 995}
992 996
993static inline unsigned long source_load(int cpu, int type) 997static inline unsigned long source_load(int cpu, int type)
@@ -1001,17 +1005,18 @@ static inline unsigned long source_load(int cpu, int type)
1001static inline unsigned long __target_load(int cpu, int type, enum idle_type idle) 1005static inline unsigned long __target_load(int cpu, int type, enum idle_type idle)
1002{ 1006{
1003 runqueue_t *rq = cpu_rq(cpu); 1007 runqueue_t *rq = cpu_rq(cpu);
1004 unsigned long cpu_load = rq->cpu_load[type-1], 1008 unsigned long target_load, cpu_load = rq->cpu_load[type-1],
1005 load_now = rq->nr_running * SCHED_LOAD_SCALE; 1009 load_now = rq->nr_running * SCHED_LOAD_SCALE;
1006 1010
1007 if (type == 0) 1011 if (type == 0)
1008 return load_now; 1012 target_load = load_now;
1013 else
1014 target_load = max(cpu_load, load_now);
1009 1015
1010 if (idle == NOT_IDLE) { 1016 if (idle == NOT_IDLE || rq->nr_running > 1)
1011 cpu_load *= rq->prio_bias; 1017 target_load *= rq->prio_bias;
1012 load_now *= rq->prio_bias; 1018
1013 } 1019 return target_load;
1014 return max(cpu_load, load_now);
1015} 1020}
1016 1021
1017static inline unsigned long target_load(int cpu, int type) 1022static inline unsigned long target_load(int cpu, int type)