aboutsummaryrefslogtreecommitdiffstats
path: root/mm/mempolicy.c
diff options
context:
space:
mode:
authorMel Gorman <mgorman@suse.de>2012-11-02 14:19:13 -0400
committerMel Gorman <mgorman@suse.de>2012-12-11 09:42:48 -0500
commit5606e3877ad8baea42f3a71ebde0a03622bbb551 (patch)
tree4fc481ccb482236ced5fca76ad19729ba083e8da /mm/mempolicy.c
parent03c5a6e16322c997bf8f264851bfa3f532ad515f (diff)
mm: numa: Migrate on reference policy
This is the simplest possible policy that still does something of note. When a pte_numa is faulted, it is moved immediately. Any replacement policy must at least do better than this and in all likelihood this policy regresses normal workloads. Signed-off-by: Mel Gorman <mgorman@suse.de> Acked-by: Rik van Riel <riel@redhat.com>
Diffstat (limited to 'mm/mempolicy.c')
-rw-r--r--mm/mempolicy.c38
1 files changed, 36 insertions, 2 deletions
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 516491fbfaa8..4c1c8d83ac6a 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -118,6 +118,26 @@ static struct mempolicy default_policy = {
118 .flags = MPOL_F_LOCAL, 118 .flags = MPOL_F_LOCAL,
119}; 119};
120 120
121static struct mempolicy preferred_node_policy[MAX_NUMNODES];
122
123static struct mempolicy *get_task_policy(struct task_struct *p)
124{
125 struct mempolicy *pol = p->mempolicy;
126 int node;
127
128 if (!pol) {
129 node = numa_node_id();
130 if (node != -1)
131 pol = &preferred_node_policy[node];
132
133 /* preferred_node_policy is not initialised early in boot */
134 if (!pol->mode)
135 pol = NULL;
136 }
137
138 return pol;
139}
140
121static const struct mempolicy_operations { 141static const struct mempolicy_operations {
122 int (*create)(struct mempolicy *pol, const nodemask_t *nodes); 142 int (*create)(struct mempolicy *pol, const nodemask_t *nodes);
123 /* 143 /*
@@ -1598,7 +1618,7 @@ asmlinkage long compat_sys_mbind(compat_ulong_t start, compat_ulong_t len,
1598struct mempolicy *get_vma_policy(struct task_struct *task, 1618struct mempolicy *get_vma_policy(struct task_struct *task,
1599 struct vm_area_struct *vma, unsigned long addr) 1619 struct vm_area_struct *vma, unsigned long addr)
1600{ 1620{
1601 struct mempolicy *pol = task->mempolicy; 1621 struct mempolicy *pol = get_task_policy(task);
1602 1622
1603 if (vma) { 1623 if (vma) {
1604 if (vma->vm_ops && vma->vm_ops->get_policy) { 1624 if (vma->vm_ops && vma->vm_ops->get_policy) {
@@ -2021,7 +2041,7 @@ retry_cpuset:
2021 */ 2041 */
2022struct page *alloc_pages_current(gfp_t gfp, unsigned order) 2042struct page *alloc_pages_current(gfp_t gfp, unsigned order)
2023{ 2043{
2024 struct mempolicy *pol = current->mempolicy; 2044 struct mempolicy *pol = get_task_policy(current);
2025 struct page *page; 2045 struct page *page;
2026 unsigned int cpuset_mems_cookie; 2046 unsigned int cpuset_mems_cookie;
2027 2047
@@ -2295,6 +2315,11 @@ int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long
2295 default: 2315 default:
2296 BUG(); 2316 BUG();
2297 } 2317 }
2318
2319 /* Migrate the page towards the node whose CPU is referencing it */
2320 if (pol->flags & MPOL_F_MORON)
2321 polnid = numa_node_id();
2322
2298 if (curnid != polnid) 2323 if (curnid != polnid)
2299 ret = polnid; 2324 ret = polnid;
2300out: 2325out:
@@ -2483,6 +2508,15 @@ void __init numa_policy_init(void)
2483 sizeof(struct sp_node), 2508 sizeof(struct sp_node),
2484 0, SLAB_PANIC, NULL); 2509 0, SLAB_PANIC, NULL);
2485 2510
2511 for_each_node(nid) {
2512 preferred_node_policy[nid] = (struct mempolicy) {
2513 .refcnt = ATOMIC_INIT(1),
2514 .mode = MPOL_PREFERRED,
2515 .flags = MPOL_F_MOF | MPOL_F_MORON,
2516 .v = { .preferred_node = nid, },
2517 };
2518 }
2519
2486 /* 2520 /*
2487 * Set interleaving policy for system init. Interleaving is only 2521 * Set interleaving policy for system init. Interleaving is only
2488 * enabled across suitably sized nodes (default is >= 16MB), or 2522 * enabled across suitably sized nodes (default is >= 16MB), or