aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorNaoya Horiguchi <n-horiguchi@ah.jp.nec.com>2013-09-11 17:22:06 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-09-11 18:57:48 -0400
commit74060e4d78795c7c43805133cb717d82533d4e0d (patch)
tree923febdc5b4565fbbcf05387d7cc423c72648695
parente632a938d914d271bec26e570d36c755a1e35e4c (diff)
mm: mbind: add hugepage migration code to mbind()
Extend do_mbind() to handle vma with VM_HUGETLB set. We will be able to migrate hugepage with mbind(2) after applying the enablement patch which comes later in this series. Signed-off-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Acked-by: Andi Kleen <ak@linux.intel.com> Reviewed-by: Wanpeng Li <liwanp@linux.vnet.ibm.com> Acked-by: Hillf Danton <dhillf@gmail.com> Cc: Mel Gorman <mgorman@suse.de> Cc: Hugh Dickins <hughd@google.com> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Cc: Michal Hocko <mhocko@suse.cz> Cc: Rik van Riel <riel@redhat.com> Cc: "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--include/linux/hugetlb.h3
-rw-r--r--mm/hugetlb.c14
-rw-r--r--mm/mempolicy.c4
3 files changed, 20 insertions, 1 deletions
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index bc8d8370cd0d..d1db00790a84 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -265,6 +265,8 @@ struct huge_bootmem_page {
265}; 265};
266 266
267struct page *alloc_huge_page_node(struct hstate *h, int nid); 267struct page *alloc_huge_page_node(struct hstate *h, int nid);
268struct page *alloc_huge_page_noerr(struct vm_area_struct *vma,
269 unsigned long addr, int avoid_reserve);
268 270
269/* arch callback */ 271/* arch callback */
270int __init alloc_bootmem_huge_page(struct hstate *h); 272int __init alloc_bootmem_huge_page(struct hstate *h);
@@ -378,6 +380,7 @@ static inline pgoff_t basepage_index(struct page *page)
378#else /* CONFIG_HUGETLB_PAGE */ 380#else /* CONFIG_HUGETLB_PAGE */
379struct hstate {}; 381struct hstate {};
380#define alloc_huge_page_node(h, nid) NULL 382#define alloc_huge_page_node(h, nid) NULL
383#define alloc_huge_page_noerr(v, a, r) NULL
381#define alloc_bootmem_huge_page(h) NULL 384#define alloc_bootmem_huge_page(h) NULL
382#define hstate_file(f) NULL 385#define hstate_file(f) NULL
383#define hstate_sizelog(s) NULL 386#define hstate_sizelog(s) NULL
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index e51723866fb1..d37b3b95c439 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1207,6 +1207,20 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma,
1207 return page; 1207 return page;
1208} 1208}
1209 1209
1210/*
1211 * alloc_huge_page()'s wrapper which simply returns the page if allocation
1212 * succeeds, otherwise NULL. This function is called from new_vma_page(),
1213 * where no ERR_VALUE is expected to be returned.
1214 */
1215struct page *alloc_huge_page_noerr(struct vm_area_struct *vma,
1216 unsigned long addr, int avoid_reserve)
1217{
1218 struct page *page = alloc_huge_page(vma, addr, avoid_reserve);
1219 if (IS_ERR(page))
1220 page = NULL;
1221 return page;
1222}
1223
1210int __weak alloc_bootmem_huge_page(struct hstate *h) 1224int __weak alloc_bootmem_huge_page(struct hstate *h)
1211{ 1225{
1212 struct huge_bootmem_page *m; 1226 struct huge_bootmem_page *m;
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 4626be621e74..c7c359213ae1 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -1192,6 +1192,8 @@ static struct page *new_vma_page(struct page *page, unsigned long private, int *
1192 vma = vma->vm_next; 1192 vma = vma->vm_next;
1193 } 1193 }
1194 1194
1195 if (PageHuge(page))
1196 return alloc_huge_page_noerr(vma, address, 1);
1195 /* 1197 /*
1196 * if !vma, alloc_page_vma() will use task or system default policy 1198 * if !vma, alloc_page_vma() will use task or system default policy
1197 */ 1199 */
@@ -1302,7 +1304,7 @@ static long do_mbind(unsigned long start, unsigned long len,
1302 (unsigned long)vma, 1304 (unsigned long)vma,
1303 MIGRATE_SYNC, MR_MEMPOLICY_MBIND); 1305 MIGRATE_SYNC, MR_MEMPOLICY_MBIND);
1304 if (nr_failed) 1306 if (nr_failed)
1305 putback_lru_pages(&pagelist); 1307 putback_movable_pages(&pagelist);
1306 } 1308 }
1307 1309
1308 if (nr_failed && (flags & MPOL_MF_STRICT)) 1310 if (nr_failed && (flags & MPOL_MF_STRICT))