aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
authorNaoya Horiguchi <n-horiguchi@ah.jp.nec.com>2013-09-11 17:22:09 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-09-11 18:57:48 -0400
commitc8721bbbdd36382de51cd6b7a56322e0acca2414 (patch)
tree8fb7b55974defcde9a4b07f571f0dd2dd1ad591f /include/linux
parent71ea2efb1e936a127690a0a540b3a6162f95e48a (diff)
mm: memory-hotplug: enable memory hotplug to handle hugepage
Until now we can't offline memory blocks which contain hugepages because a hugepage is considered as an unmovable page. But now with this patch series, a hugepage has become movable, so by using hugepage migration we can offline such memory blocks. What's different from other users of hugepage migration is that we need to decompose all the hugepages inside the target memory block into free buddy pages after hugepage migration, because otherwise free hugepages remaining in the memory block intervene the memory offlining. For this reason we introduce new functions dissolve_free_huge_page() and dissolve_free_huge_pages(). Other than that, what this patch does is straightforwardly to add hugepage migration code, that is, adding hugepage code to the functions which scan over pfn and collect hugepages to be migrated, and adding a hugepage allocation function to alloc_migrate_target(). As for larger hugepages (1GB for x86_64), it's not easy to do hotremove over them because it's larger than memory block. So we now simply leave it to fail as it is. [yongjun_wei@trendmicro.com.cn: remove duplicated include] Signed-off-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Acked-by: Andi Kleen <ak@linux.intel.com> Cc: Hillf Danton <dhillf@gmail.com> Cc: Wanpeng Li <liwanp@linux.vnet.ibm.com> Cc: Mel Gorman <mgorman@suse.de> Cc: Hugh Dickins <hughd@google.com> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Cc: Michal Hocko <mhocko@suse.cz> Cc: Rik van Riel <riel@redhat.com> Cc: "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com> Signed-off-by: Wei Yongjun <yongjun_wei@trendmicro.com.cn> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/hugetlb.h6
1 files changed, 6 insertions, 0 deletions
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index d1db00790a84..2e02c4ed1035 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -68,6 +68,7 @@ void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed);
68int dequeue_hwpoisoned_huge_page(struct page *page); 68int dequeue_hwpoisoned_huge_page(struct page *page);
69bool isolate_huge_page(struct page *page, struct list_head *list); 69bool isolate_huge_page(struct page *page, struct list_head *list);
70void putback_active_hugepage(struct page *page); 70void putback_active_hugepage(struct page *page);
71bool is_hugepage_active(struct page *page);
71void copy_huge_page(struct page *dst, struct page *src); 72void copy_huge_page(struct page *dst, struct page *src);
72 73
73#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE 74#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
@@ -138,6 +139,7 @@ static inline int dequeue_hwpoisoned_huge_page(struct page *page)
138 139
139#define isolate_huge_page(p, l) false 140#define isolate_huge_page(p, l) false
140#define putback_active_hugepage(p) do {} while (0) 141#define putback_active_hugepage(p) do {} while (0)
142#define is_hugepage_active(x) false
141static inline void copy_huge_page(struct page *dst, struct page *src) 143static inline void copy_huge_page(struct page *dst, struct page *src)
142{ 144{
143} 145}
@@ -377,6 +379,9 @@ static inline pgoff_t basepage_index(struct page *page)
377 return __basepage_index(page); 379 return __basepage_index(page);
378} 380}
379 381
382extern void dissolve_free_huge_pages(unsigned long start_pfn,
383 unsigned long end_pfn);
384
380#else /* CONFIG_HUGETLB_PAGE */ 385#else /* CONFIG_HUGETLB_PAGE */
381struct hstate {}; 386struct hstate {};
382#define alloc_huge_page_node(h, nid) NULL 387#define alloc_huge_page_node(h, nid) NULL
@@ -403,6 +408,7 @@ static inline pgoff_t basepage_index(struct page *page)
403{ 408{
404 return page->index; 409 return page->index;
405} 410}
411#define dissolve_free_huge_pages(s, e) do {} while (0)
406#endif /* CONFIG_HUGETLB_PAGE */ 412#endif /* CONFIG_HUGETLB_PAGE */
407 413
408#endif /* _LINUX_HUGETLB_H */ 414#endif /* _LINUX_HUGETLB_H */