aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorRik van Riel <riel@redhat.com>2014-05-14 13:22:21 -0400
committerIngo Molnar <mingo@kernel.org>2014-05-22 05:16:38 -0400
commite63da03639cc9e6e83b62e7ef8ffdbb92421416a (patch)
treecc25bdae654b1f3e3318e9564763e1f69799c832 /kernel
parent4027d080854d1be96ef134a1c3024d5276114db6 (diff)
sched/numa: Allow task switch if load imbalance improves
Currently the NUMA balancing code only allows moving tasks between NUMA nodes when the load on both nodes is in balance. This breaks down when the load was imbalanced to begin with. Allow tasks to be moved between NUMA nodes if the imbalance is small, or if the new imbalance is be smaller than the original one. Suggested-by: Peter Zijlstra <peterz@infradead.org> Signed-off-by: Rik van Riel <riel@redhat.com> Signed-off-by: Peter Zijlstra <peterz@infradead.org> Cc: mgorman@suse.de Cc: chegu_vinod@hp.com Signed-off-by: Ingo Molnar <mingo@kernel.org> Link: http://lkml.kernel.org/r/20140514132221.274b3463@annuminas.surriel.com
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched/fair.c46
1 files changed, 36 insertions, 10 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index f7cac2ba62ea..b899613f2bc6 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -1095,6 +1095,34 @@ static void task_numa_assign(struct task_numa_env *env,
1095 env->best_cpu = env->dst_cpu; 1095 env->best_cpu = env->dst_cpu;
1096} 1096}
1097 1097
1098static bool load_too_imbalanced(long orig_src_load, long orig_dst_load,
1099 long src_load, long dst_load,
1100 struct task_numa_env *env)
1101{
1102 long imb, old_imb;
1103
1104 /* We care about the slope of the imbalance, not the direction. */
1105 if (dst_load < src_load)
1106 swap(dst_load, src_load);
1107
1108 /* Is the difference below the threshold? */
1109 imb = dst_load * 100 - src_load * env->imbalance_pct;
1110 if (imb <= 0)
1111 return false;
1112
1113 /*
1114 * The imbalance is above the allowed threshold.
1115 * Compare it with the old imbalance.
1116 */
1117 if (orig_dst_load < orig_src_load)
1118 swap(orig_dst_load, orig_src_load);
1119
1120 old_imb = orig_dst_load * 100 - orig_src_load * env->imbalance_pct;
1121
1122 /* Would this change make things worse? */
1123 return (old_imb > imb);
1124}
1125
1098/* 1126/*
1099 * This checks if the overall compute and NUMA accesses of the system would 1127 * This checks if the overall compute and NUMA accesses of the system would
1100 * be improved if the source tasks was migrated to the target dst_cpu taking 1128 * be improved if the source tasks was migrated to the target dst_cpu taking
@@ -1107,7 +1135,8 @@ static void task_numa_compare(struct task_numa_env *env,
1107 struct rq *src_rq = cpu_rq(env->src_cpu); 1135 struct rq *src_rq = cpu_rq(env->src_cpu);
1108 struct rq *dst_rq = cpu_rq(env->dst_cpu); 1136 struct rq *dst_rq = cpu_rq(env->dst_cpu);
1109 struct task_struct *cur; 1137 struct task_struct *cur;
1110 long dst_load, src_load; 1138 long orig_src_load, src_load;
1139 long orig_dst_load, dst_load;
1111 long load; 1140 long load;
1112 long imp = (groupimp > 0) ? groupimp : taskimp; 1141 long imp = (groupimp > 0) ? groupimp : taskimp;
1113 1142
@@ -1181,13 +1210,13 @@ static void task_numa_compare(struct task_numa_env *env,
1181 * In the overloaded case, try and keep the load balanced. 1210 * In the overloaded case, try and keep the load balanced.
1182 */ 1211 */
1183balance: 1212balance:
1184 dst_load = env->dst_stats.load; 1213 orig_dst_load = env->dst_stats.load;
1185 src_load = env->src_stats.load; 1214 orig_src_load = env->src_stats.load;
1186 1215
1187 /* XXX missing power terms */ 1216 /* XXX missing power terms */
1188 load = task_h_load(env->p); 1217 load = task_h_load(env->p);
1189 dst_load += load; 1218 dst_load = orig_dst_load + load;
1190 src_load -= load; 1219 src_load = orig_src_load - load;
1191 1220
1192 if (cur) { 1221 if (cur) {
1193 load = task_h_load(cur); 1222 load = task_h_load(cur);
@@ -1195,11 +1224,8 @@ balance:
1195 src_load += load; 1224 src_load += load;
1196 } 1225 }
1197 1226
1198 /* make src_load the smaller */ 1227 if (load_too_imbalanced(orig_src_load, orig_dst_load,
1199 if (dst_load < src_load) 1228 src_load, dst_load, env))
1200 swap(dst_load, src_load);
1201
1202 if (src_load * env->imbalance_pct < dst_load * 100)
1203 goto unlock; 1229 goto unlock;
1204 1230
1205assign: 1231assign: