aboutsummaryrefslogtreecommitdiffstats
path: root/mm/mempolicy.c
diff options
context:
space:
mode:
authorNaoya Horiguchi <n-horiguchi@ah.jp.nec.com>2013-09-11 17:22:03 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-09-11 18:57:48 -0400
commite2d8cf405525d83e6ca42969be460f94b0339798 (patch)
treebf36312ef5b29802dcca7e0849596a128042b0db /mm/mempolicy.c
parentb8ec1cee5a4375c1244b85709138a2eac2d89cb6 (diff)
migrate: add hugepage migration code to migrate_pages()
Extend check_range() to handle vma with VM_HUGETLB set. We will be able to migrate hugepage with migrate_pages(2) after applying the enablement patch which comes later in this series. Note that for larger hugepages (covered by pud entries, 1GB for x86_64 for example), we simply skip it now. Note that using pmd_huge/pud_huge assumes that hugepages are pointed to by pmd/pud. This is not true in some architectures implementing hugepage with other mechanisms like ia64, but it's OK because pmd_huge/pud_huge simply return 0 in such arch and page walker simply ignores such hugepages. Signed-off-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Acked-by: Andi Kleen <ak@linux.intel.com> Reviewed-by: Wanpeng Li <liwanp@linux.vnet.ibm.com> Acked-by: Hillf Danton <dhillf@gmail.com> Cc: Mel Gorman <mgorman@suse.de> Cc: Hugh Dickins <hughd@google.com> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Cc: Michal Hocko <mhocko@suse.cz> Cc: Rik van Riel <riel@redhat.com> Cc: "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/mempolicy.c')
-rw-r--r--mm/mempolicy.c44
1 files changed, 39 insertions, 5 deletions
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 27022ca890f8..4626be621e74 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -515,6 +515,30 @@ static int check_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
515 return addr != end; 515 return addr != end;
516} 516}
517 517
518static void check_hugetlb_pmd_range(struct vm_area_struct *vma, pmd_t *pmd,
519 const nodemask_t *nodes, unsigned long flags,
520 void *private)
521{
522#ifdef CONFIG_HUGETLB_PAGE
523 int nid;
524 struct page *page;
525
526 spin_lock(&vma->vm_mm->page_table_lock);
527 page = pte_page(huge_ptep_get((pte_t *)pmd));
528 nid = page_to_nid(page);
529 if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT))
530 goto unlock;
531 /* With MPOL_MF_MOVE, we migrate only unshared hugepage. */
532 if (flags & (MPOL_MF_MOVE_ALL) ||
533 (flags & MPOL_MF_MOVE && page_mapcount(page) == 1))
534 isolate_huge_page(page, private);
535unlock:
536 spin_unlock(&vma->vm_mm->page_table_lock);
537#else
538 BUG();
539#endif
540}
541
518static inline int check_pmd_range(struct vm_area_struct *vma, pud_t *pud, 542static inline int check_pmd_range(struct vm_area_struct *vma, pud_t *pud,
519 unsigned long addr, unsigned long end, 543 unsigned long addr, unsigned long end,
520 const nodemask_t *nodes, unsigned long flags, 544 const nodemask_t *nodes, unsigned long flags,
@@ -526,6 +550,13 @@ static inline int check_pmd_range(struct vm_area_struct *vma, pud_t *pud,
526 pmd = pmd_offset(pud, addr); 550 pmd = pmd_offset(pud, addr);
527 do { 551 do {
528 next = pmd_addr_end(addr, end); 552 next = pmd_addr_end(addr, end);
553 if (!pmd_present(*pmd))
554 continue;
555 if (pmd_huge(*pmd) && is_vm_hugetlb_page(vma)) {
556 check_hugetlb_pmd_range(vma, pmd, nodes,
557 flags, private);
558 continue;
559 }
529 split_huge_page_pmd(vma, addr, pmd); 560 split_huge_page_pmd(vma, addr, pmd);
530 if (pmd_none_or_trans_huge_or_clear_bad(pmd)) 561 if (pmd_none_or_trans_huge_or_clear_bad(pmd))
531 continue; 562 continue;
@@ -547,6 +578,8 @@ static inline int check_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
547 pud = pud_offset(pgd, addr); 578 pud = pud_offset(pgd, addr);
548 do { 579 do {
549 next = pud_addr_end(addr, end); 580 next = pud_addr_end(addr, end);
581 if (pud_huge(*pud) && is_vm_hugetlb_page(vma))
582 continue;
550 if (pud_none_or_clear_bad(pud)) 583 if (pud_none_or_clear_bad(pud))
551 continue; 584 continue;
552 if (check_pmd_range(vma, pud, addr, next, nodes, 585 if (check_pmd_range(vma, pud, addr, next, nodes,
@@ -638,9 +671,6 @@ check_range(struct mm_struct *mm, unsigned long start, unsigned long end,
638 return ERR_PTR(-EFAULT); 671 return ERR_PTR(-EFAULT);
639 } 672 }
640 673
641 if (is_vm_hugetlb_page(vma))
642 goto next;
643
644 if (flags & MPOL_MF_LAZY) { 674 if (flags & MPOL_MF_LAZY) {
645 change_prot_numa(vma, start, endvma); 675 change_prot_numa(vma, start, endvma);
646 goto next; 676 goto next;
@@ -993,7 +1023,11 @@ static void migrate_page_add(struct page *page, struct list_head *pagelist,
993 1023
994static struct page *new_node_page(struct page *page, unsigned long node, int **x) 1024static struct page *new_node_page(struct page *page, unsigned long node, int **x)
995{ 1025{
996 return alloc_pages_exact_node(node, GFP_HIGHUSER_MOVABLE, 0); 1026 if (PageHuge(page))
1027 return alloc_huge_page_node(page_hstate(compound_head(page)),
1028 node);
1029 else
1030 return alloc_pages_exact_node(node, GFP_HIGHUSER_MOVABLE, 0);
997} 1031}
998 1032
999/* 1033/*
@@ -1023,7 +1057,7 @@ static int migrate_to_node(struct mm_struct *mm, int source, int dest,
1023 err = migrate_pages(&pagelist, new_node_page, dest, 1057 err = migrate_pages(&pagelist, new_node_page, dest,
1024 MIGRATE_SYNC, MR_SYSCALL); 1058 MIGRATE_SYNC, MR_SYSCALL);
1025 if (err) 1059 if (err)
1026 putback_lru_pages(&pagelist); 1060 putback_movable_pages(&pagelist);
1027 } 1061 }
1028 1062
1029 return err; 1063 return err;