aboutsummaryrefslogtreecommitdiffstats
path: root/mm/page_isolation.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/page_isolation.c')
-rw-r--r--mm/page_isolation.c14
1 files changed, 14 insertions, 0 deletions
diff --git a/mm/page_isolation.c b/mm/page_isolation.c
index 0cee10ffb98d..d1473b2e9481 100644
--- a/mm/page_isolation.c
+++ b/mm/page_isolation.c
@@ -6,6 +6,7 @@
6#include <linux/page-isolation.h> 6#include <linux/page-isolation.h>
7#include <linux/pageblock-flags.h> 7#include <linux/pageblock-flags.h>
8#include <linux/memory.h> 8#include <linux/memory.h>
9#include <linux/hugetlb.h>
9#include "internal.h" 10#include "internal.h"
10 11
11int set_migratetype_isolate(struct page *page, bool skip_hwpoisoned_pages) 12int set_migratetype_isolate(struct page *page, bool skip_hwpoisoned_pages)
@@ -252,6 +253,19 @@ struct page *alloc_migrate_target(struct page *page, unsigned long private,
252{ 253{
253 gfp_t gfp_mask = GFP_USER | __GFP_MOVABLE; 254 gfp_t gfp_mask = GFP_USER | __GFP_MOVABLE;
254 255
256 /*
257 * TODO: allocate a destination hugepage from a nearest neighbor node,
258 * accordance with memory policy of the user process if possible. For
259 * now as a simple work-around, we use the next node for destination.
260 */
261 if (PageHuge(page)) {
262 nodemask_t src = nodemask_of_node(page_to_nid(page));
263 nodemask_t dst;
264 nodes_complement(dst, src);
265 return alloc_huge_page_node(page_hstate(compound_head(page)),
266 next_node(page_to_nid(page), dst));
267 }
268
255 if (PageHighMem(page)) 269 if (PageHighMem(page))
256 gfp_mask |= __GFP_HIGHMEM; 270 gfp_mask |= __GFP_HIGHMEM;
257 271