aboutsummaryrefslogtreecommitdiffstats
path: root/mm/hugetlb.c
diff options
context:
space:
mode:
authorNaoya Horiguchi <n-horiguchi@ah.jp.nec.com>2013-09-11 17:21:59 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-09-11 18:57:46 -0400
commit31caf665e666b51fe36efd1e54031ed29e86c0b4 (patch)
treee17452c7c698aade9946cd5557e3d999663e3f76 /mm/hugetlb.c
parent07443a85ad90c7b62fbe11dcd3d6a1de1e10516f (diff)
mm: migrate: make core migration code aware of hugepage
Currently hugepage migration is available only for soft offlining, but it's also useful for some other users of page migration (clearly because users of hugepage can enjoy the benefit of mempolicy and memory hotplug.) So this patchset tries to extend such users to support hugepage migration. The target of this patchset is to enable hugepage migration for NUMA related system calls (migrate_pages(2), move_pages(2), and mbind(2)), and memory hotplug. This patchset does not add hugepage migration for memory compaction, because users of memory compaction mainly expect to construct thp by arranging raw pages, and there's little or no need to compact hugepages. CMA, another user of page migration, can have benefit from hugepage migration, but is not enabled to support it for now (just because of lack of testing and expertise in CMA.) Hugepage migration of non pmd-based hugepage (for example 1GB hugepage in x86_64, or hugepages in architectures like ia64) is not enabled for now (again, because of lack of testing.) As for how these are achived, I extended the API (migrate_pages()) to handle hugepage (with patch 1 and 2) and adjusted code of each caller to check and collect movable hugepages (with patch 3-7). Remaining 2 patches are kind of miscellaneous ones to avoid unexpected behavior. Patch 8 is about making sure that we only migrate pmd-based hugepages. And patch 9 is about choosing appropriate zone for hugepage allocation. My test is mainly functional one, simply kicking hugepage migration via each entry point and confirm that migration is done correctly. Test code is available here: git://github.com/Naoya-Horiguchi/test_hugepage_migration_extension.git And I always run libhugetlbfs test when changing hugetlbfs's code. With this patchset, no regression was found in the test. This patch (of 9): Before enabling each user of page migration to support hugepage, this patch enables the list of pages for migration to link not only LRU pages, but also hugepages. As a result, putback_movable_pages() and migrate_pages() can handle both of LRU pages and hugepages. Signed-off-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Acked-by: Andi Kleen <ak@linux.intel.com> Reviewed-by: Wanpeng Li <liwanp@linux.vnet.ibm.com> Acked-by: Hillf Danton <dhillf@gmail.com> Cc: Mel Gorman <mgorman@suse.de> Cc: Hugh Dickins <hughd@google.com> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Cc: Michal Hocko <mhocko@suse.cz> Cc: Rik van Riel <riel@redhat.com> Cc: "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/hugetlb.c')
-rw-r--r--mm/hugetlb.c23
1 files changed, 22 insertions, 1 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 06315560bd23..e51723866fb1 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -48,7 +48,8 @@ static unsigned long __initdata default_hstate_max_huge_pages;
48static unsigned long __initdata default_hstate_size; 48static unsigned long __initdata default_hstate_size;
49 49
50/* 50/*
51 * Protects updates to hugepage_freelists, nr_huge_pages, and free_huge_pages 51 * Protects updates to hugepage_freelists, hugepage_activelist, nr_huge_pages,
52 * free_huge_pages, and surplus_huge_pages.
52 */ 53 */
53DEFINE_SPINLOCK(hugetlb_lock); 54DEFINE_SPINLOCK(hugetlb_lock);
54 55
@@ -3422,3 +3423,23 @@ int dequeue_hwpoisoned_huge_page(struct page *hpage)
3422 return ret; 3423 return ret;
3423} 3424}
3424#endif 3425#endif
3426
3427bool isolate_huge_page(struct page *page, struct list_head *list)
3428{
3429 VM_BUG_ON(!PageHead(page));
3430 if (!get_page_unless_zero(page))
3431 return false;
3432 spin_lock(&hugetlb_lock);
3433 list_move_tail(&page->lru, list);
3434 spin_unlock(&hugetlb_lock);
3435 return true;
3436}
3437
3438void putback_active_hugepage(struct page *page)
3439{
3440 VM_BUG_ON(!PageHead(page));
3441 spin_lock(&hugetlb_lock);
3442 list_move_tail(&page->lru, &(page_hstate(page))->hugepage_activelist);
3443 spin_unlock(&hugetlb_lock);
3444 put_page(page);
3445}