aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorWill Deacon <will.deacon@arm.com>2016-03-09 10:22:55 -0500
committerWill Deacon <will.deacon@arm.com>2016-03-09 10:29:29 -0500
commitff7925848b50050732ac0401e0acf27e8b241d7b (patch)
tree6d085f6f9743981c7ebc5c3a5114e32990d78bbc
parent36e5cd6b897e17d03008f81e075625d8e43e52d0 (diff)
arm64: hugetlb: partial revert of 66b3923a1a0f
Commit 66b3923a1a0f ("arm64: hugetlb: add support for PTE contiguous bit") introduced support for huge pages using the contiguous bit in the PTE as opposed to block mappings, which may be slightly unwieldy (512M) in 64k page configurations. Unfortunately, this support has resulted in some late regressions when running the libhugetlbfs test suite with 64k pages and CONFIG_DEBUG_VM as a result of a BUG: | readback (2M: 64): ------------[ cut here ]------------ | kernel BUG at fs/hugetlbfs/inode.c:446! | Internal error: Oops - BUG: 0 [#1] SMP | Modules linked in: | CPU: 7 PID: 1448 Comm: readback Not tainted 4.5.0-rc7 #148 | Hardware name: linux,dummy-virt (DT) | task: fffffe0040964b00 ti: fffffe00c2668000 task.ti: fffffe00c2668000 | PC is at remove_inode_hugepages+0x44c/0x480 | LR is at remove_inode_hugepages+0x264/0x480 Rather than revert the entire patch, simply avoid advertising the contiguous huge page sizes for now while people are actively working on a fix. This patch can then be reverted once things have been sorted out. Cc: David Woods <dwoods@ezchip.com> Reported-by: Steve Capper <steve.capper@arm.com> Signed-off-by: Will Deacon <will.deacon@arm.com>
-rw-r--r--arch/arm64/mm/hugetlbpage.c14
1 files changed, 0 insertions, 14 deletions
diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c
index 82d607c3614e..da30529bb1f6 100644
--- a/arch/arm64/mm/hugetlbpage.c
+++ b/arch/arm64/mm/hugetlbpage.c
@@ -306,10 +306,6 @@ static __init int setup_hugepagesz(char *opt)
306 hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT); 306 hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT);
307 } else if (ps == PUD_SIZE) { 307 } else if (ps == PUD_SIZE) {
308 hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT); 308 hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
309 } else if (ps == (PAGE_SIZE * CONT_PTES)) {
310 hugetlb_add_hstate(CONT_PTE_SHIFT);
311 } else if (ps == (PMD_SIZE * CONT_PMDS)) {
312 hugetlb_add_hstate((PMD_SHIFT + CONT_PMD_SHIFT) - PAGE_SHIFT);
313 } else { 309 } else {
314 pr_err("hugepagesz: Unsupported page size %lu K\n", ps >> 10); 310 pr_err("hugepagesz: Unsupported page size %lu K\n", ps >> 10);
315 return 0; 311 return 0;
@@ -317,13 +313,3 @@ static __init int setup_hugepagesz(char *opt)
317 return 1; 313 return 1;
318} 314}
319__setup("hugepagesz=", setup_hugepagesz); 315__setup("hugepagesz=", setup_hugepagesz);
320
321#ifdef CONFIG_ARM64_64K_PAGES
322static __init int add_default_hugepagesz(void)
323{
324 if (size_to_hstate(CONT_PTES * PAGE_SIZE) == NULL)
325 hugetlb_add_hstate(CONT_PMD_SHIFT);
326 return 0;
327}
328arch_initcall(add_default_hugepagesz);
329#endif