aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/sched.h
diff options
context:
space:
mode:
authorRik van Riel <riel@redhat.com>2014-01-27 17:03:44 -0500
committerIngo Molnar <mingo@kernel.org>2014-01-28 07:17:07 -0500
commit10f39042711ba21773763f267b4943a2c66c8bef (patch)
tree394f8399c6f9b980f5673e1034b125b91844f662 /include/linux/sched.h
parent20e07dea286a90f096a779706861472d296397c6 (diff)
sched/numa, mm: Use active_nodes nodemask to limit numa migrations
Use the active_nodes nodemask to make smarter decisions on NUMA migrations. In order to maximize performance of workloads that do not fit in one NUMA node, we want to satisfy the following criteria: 1) keep private memory local to each thread 2) avoid excessive NUMA migration of pages 3) distribute shared memory across the active nodes, to maximize memory bandwidth available to the workload This patch accomplishes that by implementing the following policy for NUMA migrations: 1) always migrate on a private fault 2) never migrate to a node that is not in the set of active nodes for the numa_group 3) always migrate from a node outside of the set of active nodes, to a node that is in that set 4) within the set of active nodes in the numa_group, only migrate from a node with more NUMA page faults, to a node with fewer NUMA page faults, with a 25% margin to avoid ping-ponging This results in most pages of a workload ending up on the actively used nodes, with reduced ping-ponging of pages between those nodes. Signed-off-by: Rik van Riel <riel@redhat.com> Acked-by: Mel Gorman <mgorman@suse.de> Signed-off-by: Peter Zijlstra <peterz@infradead.org> Cc: Chegu Vinod <chegu_vinod@hp.com> Link: http://lkml.kernel.org/r/1390860228-21539-6-git-send-email-riel@redhat.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r--include/linux/sched.h7
1 files changed, 7 insertions, 0 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 5fb0cfb43ecf..5ab3b89fc33e 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1589,6 +1589,8 @@ extern void task_numa_fault(int last_node, int node, int pages, int flags);
1589extern pid_t task_numa_group_id(struct task_struct *p); 1589extern pid_t task_numa_group_id(struct task_struct *p);
1590extern void set_numabalancing_state(bool enabled); 1590extern void set_numabalancing_state(bool enabled);
1591extern void task_numa_free(struct task_struct *p); 1591extern void task_numa_free(struct task_struct *p);
1592extern bool should_numa_migrate_memory(struct task_struct *p, struct page *page,
1593 int src_nid, int dst_cpu);
1592#else 1594#else
1593static inline void task_numa_fault(int last_node, int node, int pages, 1595static inline void task_numa_fault(int last_node, int node, int pages,
1594 int flags) 1596 int flags)
@@ -1604,6 +1606,11 @@ static inline void set_numabalancing_state(bool enabled)
1604static inline void task_numa_free(struct task_struct *p) 1606static inline void task_numa_free(struct task_struct *p)
1605{ 1607{
1606} 1608}
1609static inline bool should_numa_migrate_memory(struct task_struct *p,
1610 struct page *page, int src_nid, int dst_cpu)
1611{
1612 return true;
1613}
1607#endif 1614#endif
1608 1615
1609static inline struct pid *task_pid(struct task_struct *task) 1616static inline struct pid *task_pid(struct task_struct *task)