summaryrefslogtreecommitdiffstats
path: root/mm/page_isolation.c
diff options
context:
space:
mode:
authorMichal Hocko <mhocko@suse.com>2017-07-10 18:48:47 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2017-07-10 19:32:31 -0400
commit8b9132388964df2cfe151a88fd1dd8219dabf23c (patch)
tree4b4fb6c14c4cc44bcf5be7a4827ba2681f6f7a3e /mm/page_isolation.c
parent4db9b2efe94967be34e3b136a93251a3c1736dd5 (diff)
mm: unify new_node_page and alloc_migrate_target
Commit 394e31d2ceb4 ("mem-hotplug: alloc new page from a nearest neighbor node when mem-offline") has duplicated a large part of alloc_migrate_target with some hotplug specific special casing. To be more precise it tried to enfore the allocation from a different node than the original page. As a result the two function diverged in their shared logic, e.g. the hugetlb allocation strategy. Let's unify the two and express different NUMA requirements by the given nodemask. new_node_page will simply exclude the node it doesn't care about and alloc_migrate_target will use all the available nodes. alloc_migrate_target will then learn to migrate hugetlb pages more sanely and use preallocated pool when possible. Please note that alloc_migrate_target used to call alloc_page resp. alloc_pages_current so the memory policy of the current context which is quite strange when we consider that it is used in the context of alloc_contig_range which just tries to migrate pages which stand in the way. Link: http://lkml.kernel.org/r/20170608074553.22152-4-mhocko@kernel.org Signed-off-by: Michal Hocko <mhocko@suse.com> Acked-by: Vlastimil Babka <vbabka@suse.cz> Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Cc: Xishi Qiu <qiuxishi@huawei.com> Cc: zhong jiang <zhongjiang@huawei.com> Cc: Joonsoo Kim <js1304@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_isolation.c')
-rw-r--r--mm/page_isolation.c18
1 files changed, 2 insertions, 16 deletions
diff --git a/mm/page_isolation.c b/mm/page_isolation.c
index 3606104893e0..757410d9f758 100644
--- a/mm/page_isolation.c
+++ b/mm/page_isolation.c
@@ -8,6 +8,7 @@
8#include <linux/memory.h> 8#include <linux/memory.h>
9#include <linux/hugetlb.h> 9#include <linux/hugetlb.h>
10#include <linux/page_owner.h> 10#include <linux/page_owner.h>
11#include <linux/migrate.h>
11#include "internal.h" 12#include "internal.h"
12 13
13#define CREATE_TRACE_POINTS 14#define CREATE_TRACE_POINTS
@@ -294,20 +295,5 @@ int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
294struct page *alloc_migrate_target(struct page *page, unsigned long private, 295struct page *alloc_migrate_target(struct page *page, unsigned long private,
295 int **resultp) 296 int **resultp)
296{ 297{
297 gfp_t gfp_mask = GFP_USER | __GFP_MOVABLE; 298 return new_page_nodemask(page, numa_node_id(), &node_states[N_MEMORY]);
298
299 /*
300 * TODO: allocate a destination hugepage from a nearest neighbor node,
301 * accordance with memory policy of the user process if possible. For
302 * now as a simple work-around, we use the next node for destination.
303 */
304 if (PageHuge(page))
305 return alloc_huge_page_node(page_hstate(compound_head(page)),
306 next_node_in(page_to_nid(page),
307 node_online_map));
308
309 if (PageHighMem(page))
310 gfp_mask |= __GFP_HIGHMEM;
311
312 return alloc_page(gfp_mask);
313} 299}