aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorArd Biesheuvel <ard.biesheuvel@linaro.org>2016-02-26 11:57:13 -0500
committerWill Deacon <will.deacon@arm.com>2016-02-26 12:59:04 -0500
commitdfd55ad85e4a7fbaa82df12467515ac3c81e8a3e (patch)
tree60fcc5a02c202bbc59193cbd1c7a2e7755a5f934
parent81f70ba233d5f660e1ea5fe23260ee323af5d53a (diff)
arm64: vmemmap: use virtual projection of linear region
Commit dd006da21646 ("arm64: mm: increase VA range of identity map") made some changes to the memory mapping code to allow physical memory to reside at an offset that exceeds the size of the virtual mapping. However, since the size of the vmemmap area is proportional to the size of the VA area, but it is populated relative to the physical space, we may end up with the struct page array being mapped outside of the vmemmap region. For instance, on my Seattle A0 box, I can see the following output in the dmesg log. vmemmap : 0xffffffbdc0000000 - 0xffffffbfc0000000 ( 8 GB maximum) 0xffffffbfc0000000 - 0xffffffbfd0000000 ( 256 MB actual) We can fix this by deciding that the vmemmap region is not a projection of the physical space, but of the virtual space above PAGE_OFFSET, i.e., the linear region. This way, we are guaranteed that the vmemmap region is of sufficient size, and we can even reduce the size by half. Cc: <stable@vger.kernel.org> Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org> Signed-off-by: Will Deacon <will.deacon@arm.com>
-rw-r--r--arch/arm64/include/asm/pgtable.h7
-rw-r--r--arch/arm64/mm/init.c4
2 files changed, 6 insertions, 5 deletions
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index bf464de33f52..f50608674580 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -34,13 +34,13 @@
34/* 34/*
35 * VMALLOC and SPARSEMEM_VMEMMAP ranges. 35 * VMALLOC and SPARSEMEM_VMEMMAP ranges.
36 * 36 *
37 * VMEMAP_SIZE: allows the whole VA space to be covered by a struct page array 37 * VMEMAP_SIZE: allows the whole linear region to be covered by a struct page array
38 * (rounded up to PUD_SIZE). 38 * (rounded up to PUD_SIZE).
39 * VMALLOC_START: beginning of the kernel VA space 39 * VMALLOC_START: beginning of the kernel VA space
40 * VMALLOC_END: extends to the available space below vmmemmap, PCI I/O space, 40 * VMALLOC_END: extends to the available space below vmmemmap, PCI I/O space,
41 * fixed mappings and modules 41 * fixed mappings and modules
42 */ 42 */
43#define VMEMMAP_SIZE ALIGN((1UL << (VA_BITS - PAGE_SHIFT)) * sizeof(struct page), PUD_SIZE) 43#define VMEMMAP_SIZE ALIGN((1UL << (VA_BITS - PAGE_SHIFT - 1)) * sizeof(struct page), PUD_SIZE)
44 44
45#ifndef CONFIG_KASAN 45#ifndef CONFIG_KASAN
46#define VMALLOC_START (VA_START) 46#define VMALLOC_START (VA_START)
@@ -51,7 +51,8 @@
51 51
52#define VMALLOC_END (PAGE_OFFSET - PUD_SIZE - VMEMMAP_SIZE - SZ_64K) 52#define VMALLOC_END (PAGE_OFFSET - PUD_SIZE - VMEMMAP_SIZE - SZ_64K)
53 53
54#define vmemmap ((struct page *)(VMALLOC_END + SZ_64K)) 54#define VMEMMAP_START (VMALLOC_END + SZ_64K)
55#define vmemmap ((struct page *)VMEMMAP_START - (memstart_addr >> PAGE_SHIFT))
55 56
56#define FIRST_USER_ADDRESS 0UL 57#define FIRST_USER_ADDRESS 0UL
57 58
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index f3b061e67bfe..7802f216a67a 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -319,8 +319,8 @@ void __init mem_init(void)
319#endif 319#endif
320 MLG(VMALLOC_START, VMALLOC_END), 320 MLG(VMALLOC_START, VMALLOC_END),
321#ifdef CONFIG_SPARSEMEM_VMEMMAP 321#ifdef CONFIG_SPARSEMEM_VMEMMAP
322 MLG((unsigned long)vmemmap, 322 MLG(VMEMMAP_START,
323 (unsigned long)vmemmap + VMEMMAP_SIZE), 323 VMEMMAP_START + VMEMMAP_SIZE),
324 MLM((unsigned long)virt_to_page(PAGE_OFFSET), 324 MLM((unsigned long)virt_to_page(PAGE_OFFSET),
325 (unsigned long)virt_to_page(high_memory)), 325 (unsigned long)virt_to_page(high_memory)),
326#endif 326#endif