diff options
author | Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> | 2014-06-04 19:05:35 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-06-04 19:53:51 -0400 |
commit | c177c81e09e517bbf75b67762cdab1b83aba6976 (patch) | |
tree | 569aed2a30badb1ce3e5393de74b43eeeb22e376 /include/linux/hugetlb.h | |
parent | 7f39dda9d86fb4f4f17af0de170decf125726f8c (diff) |
hugetlb: restrict hugepage_migration_support() to x86_64
Currently hugepage migration is available for all archs which support
pmd-level hugepage, but testing is done only for x86_64 and there're
bugs for other archs. So to avoid breaking such archs, this patch
limits the availability strictly to x86_64 until developers of other
archs get interested in enabling this feature.
Simply disabling hugepage migration on non-x86_64 archs is not enough to
fix the reported problem where sys_move_pages() hits the BUG_ON() in
follow_page(FOLL_GET), so let's fix this by checking if hugepage
migration is supported in vma_migratable().
Signed-off-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Reported-by: Michael Ellerman <mpe@ellerman.id.au>
Tested-by: Michael Ellerman <mpe@ellerman.id.au>
Acked-by: Hugh Dickins <hughd@google.com>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Tony Luck <tony.luck@intel.com>
Cc: Russell King <rmk@arm.linux.org.uk>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: James Hogan <james.hogan@imgtec.com>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: David Miller <davem@davemloft.net>
Cc: <stable@vger.kernel.org> [3.12+]
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include/linux/hugetlb.h')
-rw-r--r-- | include/linux/hugetlb.h | 13 |
1 files changed, 5 insertions, 8 deletions
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index b65166de1d9d..d0bad1a8b0bd 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h | |||
@@ -392,15 +392,13 @@ static inline pgoff_t basepage_index(struct page *page) | |||
392 | 392 | ||
393 | extern void dissolve_free_huge_pages(unsigned long start_pfn, | 393 | extern void dissolve_free_huge_pages(unsigned long start_pfn, |
394 | unsigned long end_pfn); | 394 | unsigned long end_pfn); |
395 | int pmd_huge_support(void); | ||
396 | /* | ||
397 | * Currently hugepage migration is enabled only for pmd-based hugepage. | ||
398 | * This function will be updated when hugepage migration is more widely | ||
399 | * supported. | ||
400 | */ | ||
401 | static inline int hugepage_migration_support(struct hstate *h) | 395 | static inline int hugepage_migration_support(struct hstate *h) |
402 | { | 396 | { |
403 | return pmd_huge_support() && (huge_page_shift(h) == PMD_SHIFT); | 397 | #ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION |
398 | return huge_page_shift(h) == PMD_SHIFT; | ||
399 | #else | ||
400 | return 0; | ||
401 | #endif | ||
404 | } | 402 | } |
405 | 403 | ||
406 | static inline spinlock_t *huge_pte_lockptr(struct hstate *h, | 404 | static inline spinlock_t *huge_pte_lockptr(struct hstate *h, |
@@ -450,7 +448,6 @@ static inline pgoff_t basepage_index(struct page *page) | |||
450 | return page->index; | 448 | return page->index; |
451 | } | 449 | } |
452 | #define dissolve_free_huge_pages(s, e) do {} while (0) | 450 | #define dissolve_free_huge_pages(s, e) do {} while (0) |
453 | #define pmd_huge_support() 0 | ||
454 | #define hugepage_migration_support(h) 0 | 451 | #define hugepage_migration_support(h) 0 |
455 | 452 | ||
456 | static inline spinlock_t *huge_pte_lockptr(struct hstate *h, | 453 | static inline spinlock_t *huge_pte_lockptr(struct hstate *h, |