aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched/sched.h
diff options
context:
space:
mode:
authorMel Gorman <mgorman@suse.de>2013-10-07 06:29:17 -0400
committerIngo Molnar <mingo@kernel.org>2013-10-09 08:47:25 -0400
commitfb13c7ee0ed387bd6bec4b4024a4d49b1bd504f1 (patch)
treeb5892db95bf0b47375cc43005291006aeb115772 /kernel/sched/sched.h
parentac66f5477239ebd3c4e2cbf2f591ef387aa09884 (diff)
sched/numa: Use a system-wide search to find swap/migration candidates
This patch implements a system-wide search for swap/migration candidates based on total NUMA hinting faults. It has a balance limit, however it doesn't properly consider total node balance. In the old scheme a task selected a preferred node based on the highest number of private faults recorded on the node. In this scheme, the preferred node is based on the total number of faults. If the preferred node for a task changes then task_numa_migrate will search the whole system looking for tasks to swap with that would improve both the overall compute balance and minimise the expected number of remote NUMA hinting faults. Not there is no guarantee that the node the source task is placed on by task_numa_migrate() has any relationship to the newly selected task->numa_preferred_nid due to compute overloading. Signed-off-by: Mel Gorman <mgorman@suse.de> [ Do not swap with tasks that cannot run on source cpu] Reviewed-by: Rik van Riel <riel@redhat.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Srikar Dronamraju <srikar@linux.vnet.ibm.com> [ Fixed compiler warning on UP. ] Signed-off-by: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/1381141781-10992-40-git-send-email-mgorman@suse.de Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched/sched.h')
-rw-r--r--kernel/sched/sched.h13
1 files changed, 13 insertions, 0 deletions
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 4dc92d016aef..691e96964dcc 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -610,9 +610,22 @@ static inline struct sched_domain *highest_flag_domain(int cpu, int flag)
610 return hsd; 610 return hsd;
611} 611}
612 612
613static inline struct sched_domain *lowest_flag_domain(int cpu, int flag)
614{
615 struct sched_domain *sd;
616
617 for_each_domain(cpu, sd) {
618 if (sd->flags & flag)
619 break;
620 }
621
622 return sd;
623}
624
613DECLARE_PER_CPU(struct sched_domain *, sd_llc); 625DECLARE_PER_CPU(struct sched_domain *, sd_llc);
614DECLARE_PER_CPU(int, sd_llc_size); 626DECLARE_PER_CPU(int, sd_llc_size);
615DECLARE_PER_CPU(int, sd_llc_id); 627DECLARE_PER_CPU(int, sd_llc_id);
628DECLARE_PER_CPU(struct sched_domain *, sd_numa);
616 629
617struct sched_group_power { 630struct sched_group_power {
618 atomic_t ref; 631 atomic_t ref;