diff options
author | Peter Zijlstra <peterz@infradead.org> | 2014-07-11 10:01:53 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2014-07-16 07:38:23 -0400 |
commit | e720fff6341fe4b95e5a93c939bd3c77fa55ced4 (patch) | |
tree | 31ccde4e945d6f51dbe0c788800f0a5f111bdcc2 /kernel/sched/fair.c | |
parent | 5cd08fbfdb6baa9fe98f530b76898fc5725a6289 (diff) |
sched/numa: Revert "Use effective_load() to balance NUMA loads"
Due to divergent trees, Rik find that this patch is no longer
required.
Requested-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Link: http://lkml.kernel.org/n/tip-u6odkgkw8wz3m7orgsjfo5pi@git.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched/fair.c')
-rw-r--r-- | kernel/sched/fair.c | 20 |
1 files changed, 6 insertions, 14 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index f5f0cc91518c..45943b2fa82b 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c | |||
@@ -1151,7 +1151,6 @@ static void task_numa_compare(struct task_numa_env *env, | |||
1151 | struct rq *src_rq = cpu_rq(env->src_cpu); | 1151 | struct rq *src_rq = cpu_rq(env->src_cpu); |
1152 | struct rq *dst_rq = cpu_rq(env->dst_cpu); | 1152 | struct rq *dst_rq = cpu_rq(env->dst_cpu); |
1153 | struct task_struct *cur; | 1153 | struct task_struct *cur; |
1154 | struct task_group *tg; | ||
1155 | long src_load, dst_load; | 1154 | long src_load, dst_load; |
1156 | long load; | 1155 | long load; |
1157 | long imp = env->p->numa_group ? groupimp : taskimp; | 1156 | long imp = env->p->numa_group ? groupimp : taskimp; |
@@ -1223,14 +1222,9 @@ static void task_numa_compare(struct task_numa_env *env, | |||
1223 | * In the overloaded case, try and keep the load balanced. | 1222 | * In the overloaded case, try and keep the load balanced. |
1224 | */ | 1223 | */ |
1225 | balance: | 1224 | balance: |
1226 | src_load = env->src_stats.load; | 1225 | load = task_h_load(env->p); |
1227 | dst_load = env->dst_stats.load; | 1226 | dst_load = env->dst_stats.load + load; |
1228 | 1227 | src_load = env->src_stats.load - load; | |
1229 | /* Calculate the effect of moving env->p from src to dst. */ | ||
1230 | load = env->p->se.load.weight; | ||
1231 | tg = task_group(env->p); | ||
1232 | src_load += effective_load(tg, env->src_cpu, -load, -load); | ||
1233 | dst_load += effective_load(tg, env->dst_cpu, load, load); | ||
1234 | 1228 | ||
1235 | if (moveimp > imp && moveimp > env->best_imp) { | 1229 | if (moveimp > imp && moveimp > env->best_imp) { |
1236 | /* | 1230 | /* |
@@ -1250,11 +1244,9 @@ balance: | |||
1250 | goto unlock; | 1244 | goto unlock; |
1251 | 1245 | ||
1252 | if (cur) { | 1246 | if (cur) { |
1253 | /* Cur moves in the opposite direction. */ | 1247 | load = task_h_load(cur); |
1254 | load = cur->se.load.weight; | 1248 | dst_load -= load; |
1255 | tg = task_group(cur); | 1249 | src_load += load; |
1256 | src_load += effective_load(tg, env->src_cpu, load, load); | ||
1257 | dst_load += effective_load(tg, env->dst_cpu, -load, -load); | ||
1258 | } | 1250 | } |
1259 | 1251 | ||
1260 | if (load_too_imbalanced(src_load, dst_load, env)) | 1252 | if (load_too_imbalanced(src_load, dst_load, env)) |