aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched/core.c
diff options
context:
space:
mode:
authorMel Gorman <mgorman@suse.de>2013-10-07 06:29:17 -0400
committerIngo Molnar <mingo@kernel.org>2013-10-09 08:47:25 -0400
commitfb13c7ee0ed387bd6bec4b4024a4d49b1bd504f1 (patch)
treeb5892db95bf0b47375cc43005291006aeb115772 /kernel/sched/core.c
parentac66f5477239ebd3c4e2cbf2f591ef387aa09884 (diff)
sched/numa: Use a system-wide search to find swap/migration candidates
This patch implements a system-wide search for swap/migration candidates based on total NUMA hinting faults. It has a balance limit, however it doesn't properly consider total node balance. In the old scheme a task selected a preferred node based on the highest number of private faults recorded on the node. In this scheme, the preferred node is based on the total number of faults. If the preferred node for a task changes then task_numa_migrate will search the whole system looking for tasks to swap with that would improve both the overall compute balance and minimise the expected number of remote NUMA hinting faults. Not there is no guarantee that the node the source task is placed on by task_numa_migrate() has any relationship to the newly selected task->numa_preferred_nid due to compute overloading. Signed-off-by: Mel Gorman <mgorman@suse.de> [ Do not swap with tasks that cannot run on source cpu] Reviewed-by: Rik van Riel <riel@redhat.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Srikar Dronamraju <srikar@linux.vnet.ibm.com> [ Fixed compiler warning on UP. ] Signed-off-by: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/1381141781-10992-40-git-send-email-mgorman@suse.de Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched/core.c')
-rw-r--r--kernel/sched/core.c4
1 files changed, 4 insertions, 0 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 32a2b29c2610..1fe59da280e3 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -5236,6 +5236,7 @@ static void destroy_sched_domains(struct sched_domain *sd, int cpu)
5236DEFINE_PER_CPU(struct sched_domain *, sd_llc); 5236DEFINE_PER_CPU(struct sched_domain *, sd_llc);
5237DEFINE_PER_CPU(int, sd_llc_size); 5237DEFINE_PER_CPU(int, sd_llc_size);
5238DEFINE_PER_CPU(int, sd_llc_id); 5238DEFINE_PER_CPU(int, sd_llc_id);
5239DEFINE_PER_CPU(struct sched_domain *, sd_numa);
5239 5240
5240static void update_top_cache_domain(int cpu) 5241static void update_top_cache_domain(int cpu)
5241{ 5242{
@@ -5252,6 +5253,9 @@ static void update_top_cache_domain(int cpu)
5252 rcu_assign_pointer(per_cpu(sd_llc, cpu), sd); 5253 rcu_assign_pointer(per_cpu(sd_llc, cpu), sd);
5253 per_cpu(sd_llc_size, cpu) = size; 5254 per_cpu(sd_llc_size, cpu) = size;
5254 per_cpu(sd_llc_id, cpu) = id; 5255 per_cpu(sd_llc_id, cpu) = id;
5256
5257 sd = lowest_flag_domain(cpu, SD_NUMA);
5258 rcu_assign_pointer(per_cpu(sd_numa, cpu), sd);
5255} 5259}
5256 5260
5257/* 5261/*