aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorRik van Riel <riel@redhat.com>2014-09-04 16:35:30 -0400
committerIngo Molnar <mingo@kernel.org>2014-09-19 06:35:14 -0400
commitba7e5a279e72f4b246dc7a419ac707e1936ede3e (patch)
tree075a1aa73a66a8b798e15576a76eccb9fbbe8f51 /kernel
parent13924d2a983fc1557eb737ea59e2324adb538fa2 (diff)
sched/numa: Use select_idle_sibling() to select a destination for task_numa_move()
The code in task_numa_compare() will only examine at most one idle CPU per node, because they all have the same score. However, some idle CPUs are better candidates than others, due to busy or idle SMT siblings, etc... The scheduler has logic to find the best CPU within an LLC to place a task. The NUMA code should probably use it. This seems to reduce the standard deviation for single instance SPECjbb2005 with a low warehouse count on my 4 node test system. Signed-off-by: Rik van Riel <riel@redhat.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: mgorman@suse.de Cc: Mike Galbraith <umgwanakikbuti@gmail.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Link: http://lkml.kernel.org/r/20140904163530.189d410a@cuia.bos.redhat.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched/fair.c8
1 files changed, 8 insertions, 0 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index be9e97b0d76f..96e7147044bb 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -665,6 +665,7 @@ static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se)
665} 665}
666 666
667#ifdef CONFIG_SMP 667#ifdef CONFIG_SMP
668static int select_idle_sibling(struct task_struct *p, int cpu);
668static unsigned long task_h_load(struct task_struct *p); 669static unsigned long task_h_load(struct task_struct *p);
669 670
670static inline void __update_task_entity_contrib(struct sched_entity *se); 671static inline void __update_task_entity_contrib(struct sched_entity *se);
@@ -1257,6 +1258,13 @@ balance:
1257 if (load_too_imbalanced(src_load, dst_load, env)) 1258 if (load_too_imbalanced(src_load, dst_load, env))
1258 goto unlock; 1259 goto unlock;
1259 1260
1261 /*
1262 * One idle CPU per node is evaluated for a task numa move.
1263 * Call select_idle_sibling to maybe find a better one.
1264 */
1265 if (!cur)
1266 env->dst_cpu = select_idle_sibling(env->p, env->dst_cpu);
1267
1260assign: 1268assign:
1261 task_numa_assign(env, cur, imp); 1269 task_numa_assign(env, cur, imp);
1262unlock: 1270unlock: