aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid Rientjes <rientjes@google.com>2015-04-14 18:46:58 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2015-04-14 19:49:03 -0400
commit5265047ac30191ea24b16503165000c225f54feb (patch)
treeb51d1c3e3a42f19f05ef8e5f0460ee6417eed9e2
parent4167e9b2cf10f8a4bcda0c713ddc8bb0a18e8187 (diff)
mm, thp: really limit transparent hugepage allocation to local node
Commit 077fcf116c8c ("mm/thp: allocate transparent hugepages on local node") restructured alloc_hugepage_vma() with the intent of only allocating transparent hugepages locally when there was not an effective interleave mempolicy. alloc_pages_exact_node() does not limit the allocation to the single node, however, but rather prefers it. This is because __GFP_THISNODE is not set which would cause the node-local nodemask to be passed. Without it, only a nodemask that prefers the local node is passed. Fix this by passing __GFP_THISNODE and falling back to small pages when the allocation fails. Commit 9f1b868a13ac ("mm: thp: khugepaged: add policy for finding target node") suffers from a similar problem for khugepaged, which is also fixed. Fixes: 077fcf116c8c ("mm/thp: allocate transparent hugepages on local node") Fixes: 9f1b868a13ac ("mm: thp: khugepaged: add policy for finding target node") Signed-off-by: David Rientjes <rientjes@google.com> Acked-by: Vlastimil Babka <vbabka@suse.cz> Cc: Christoph Lameter <cl@linux.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Mel Gorman <mgorman@suse.de> Cc: Pravin Shelar <pshelar@nicira.com> Cc: Jarno Rajahalme <jrajahalme@nicira.com> Cc: Li Zefan <lizefan@huawei.com> Cc: Greg Thelen <gthelen@google.com> Cc: Tejun Heo <tj@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--mm/huge_memory.c9
-rw-r--r--mm/mempolicy.c3
2 files changed, 9 insertions, 3 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 6352c1dfa898..3afb5cbe1312 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -2328,8 +2328,14 @@ static struct page
2328 struct vm_area_struct *vma, unsigned long address, 2328 struct vm_area_struct *vma, unsigned long address,
2329 int node) 2329 int node)
2330{ 2330{
2331 gfp_t flags;
2332
2331 VM_BUG_ON_PAGE(*hpage, *hpage); 2333 VM_BUG_ON_PAGE(*hpage, *hpage);
2332 2334
2335 /* Only allocate from the target node */
2336 flags = alloc_hugepage_gfpmask(khugepaged_defrag(), __GFP_OTHER_NODE) |
2337 __GFP_THISNODE;
2338
2333 /* 2339 /*
2334 * Before allocating the hugepage, release the mmap_sem read lock. 2340 * Before allocating the hugepage, release the mmap_sem read lock.
2335 * The allocation can take potentially a long time if it involves 2341 * The allocation can take potentially a long time if it involves
@@ -2338,8 +2344,7 @@ static struct page
2338 */ 2344 */
2339 up_read(&mm->mmap_sem); 2345 up_read(&mm->mmap_sem);
2340 2346
2341 *hpage = alloc_pages_exact_node(node, alloc_hugepage_gfpmask( 2347 *hpage = alloc_pages_exact_node(node, flags, HPAGE_PMD_ORDER);
2342 khugepaged_defrag(), __GFP_OTHER_NODE), HPAGE_PMD_ORDER);
2343 if (unlikely(!*hpage)) { 2348 if (unlikely(!*hpage)) {
2344 count_vm_event(THP_COLLAPSE_ALLOC_FAILED); 2349 count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
2345 *hpage = ERR_PTR(-ENOMEM); 2350 *hpage = ERR_PTR(-ENOMEM);
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 69d05acfa18c..ede26291d4aa 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -1986,7 +1986,8 @@ retry_cpuset:
1986 nmask = policy_nodemask(gfp, pol); 1986 nmask = policy_nodemask(gfp, pol);
1987 if (!nmask || node_isset(node, *nmask)) { 1987 if (!nmask || node_isset(node, *nmask)) {
1988 mpol_cond_put(pol); 1988 mpol_cond_put(pol);
1989 page = alloc_pages_exact_node(node, gfp, order); 1989 page = alloc_pages_exact_node(node,
1990 gfp | __GFP_THISNODE, order);
1990 goto out; 1991 goto out;
1991 } 1992 }
1992 } 1993 }