aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64/mm/mmu.c
diff options
context:
space:
mode:
authorCatalin Marinas <catalin.marinas@arm.com>2013-08-23 13:04:44 -0400
committerCatalin Marinas <catalin.marinas@arm.com>2013-08-28 05:47:00 -0400
commite25208f77c2dad5a9f2ab3d3df61252a90b71afa (patch)
tree13cc074cdd9d4db0c43029c3fcdab3d0845f30fe /arch/arm64/mm/mmu.c
parentc80b7ee8520606f77fbc8ced870c96659053269e (diff)
arm64: Fix mapping of memory banks not ending on a PMD_SIZE boundary
The map_mem() function limits the current memblock limit to PGDIR_SIZE (the initial swapper_pg_dir mapping) to avoid create_mapping() allocating memory from unmapped areas. However, if the first block is within PGDIR_SIZE and not ending on a PMD_SIZE boundary, when 4K page configuration is enabled, create_mapping() will try to allocate a pte page. Such page may be returned by memblock_alloc() from the end of such bank (or any subsequent bank within PGDIR_SIZE) which is not mapped yet. The patch limits the current memblock limit to the aligned end of the first bank and gradually increases it as more memory is mapped. It also ensures that the start of the first bank is aligned to PMD_SIZE to avoid pte page allocation for this mapping. Signed-off-by: Catalin Marinas <catalin.marinas@arm.com> Reported-by: "Leizhen (ThunderTown, Euler)" <thunder.leizhen@huawei.com> Tested-by: "Leizhen (ThunderTown, Euler)" <thunder.leizhen@huawei.com>
Diffstat (limited to 'arch/arm64/mm/mmu.c')
-rw-r--r--arch/arm64/mm/mmu.c23
1 files changed, 21 insertions, 2 deletions
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index a8d1059b91b2..f557ebbe7013 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -296,6 +296,7 @@ void __iomem * __init early_io_map(phys_addr_t phys, unsigned long virt)
296static void __init map_mem(void) 296static void __init map_mem(void)
297{ 297{
298 struct memblock_region *reg; 298 struct memblock_region *reg;
299 phys_addr_t limit;
299 300
300 /* 301 /*
301 * Temporarily limit the memblock range. We need to do this as 302 * Temporarily limit the memblock range. We need to do this as
@@ -303,9 +304,11 @@ static void __init map_mem(void)
303 * memory addressable from the initial direct kernel mapping. 304 * memory addressable from the initial direct kernel mapping.
304 * 305 *
305 * The initial direct kernel mapping, located at swapper_pg_dir, 306 * The initial direct kernel mapping, located at swapper_pg_dir,
306 * gives us PGDIR_SIZE memory starting from PHYS_OFFSET (aligned). 307 * gives us PGDIR_SIZE memory starting from PHYS_OFFSET (which must be
308 * aligned to 2MB as per Documentation/arm64/booting.txt).
307 */ 309 */
308 memblock_set_current_limit((PHYS_OFFSET & PGDIR_MASK) + PGDIR_SIZE); 310 limit = PHYS_OFFSET + PGDIR_SIZE;
311 memblock_set_current_limit(limit);
309 312
310 /* map all the memory banks */ 313 /* map all the memory banks */
311 for_each_memblock(memory, reg) { 314 for_each_memblock(memory, reg) {
@@ -315,6 +318,22 @@ static void __init map_mem(void)
315 if (start >= end) 318 if (start >= end)
316 break; 319 break;
317 320
321#ifndef CONFIG_ARM64_64K_PAGES
322 /*
323 * For the first memory bank align the start address and
324 * current memblock limit to prevent create_mapping() from
325 * allocating pte page tables from unmapped memory.
326 * When 64K pages are enabled, the pte page table for the
327 * first PGDIR_SIZE is already present in swapper_pg_dir.
328 */
329 if (start < limit)
330 start = ALIGN(start, PMD_SIZE);
331 if (end < limit) {
332 limit = end & PMD_MASK;
333 memblock_set_current_limit(limit);
334 }
335#endif
336
318 create_mapping(start, __phys_to_virt(start), end - start); 337 create_mapping(start, __phys_to_virt(start), end - start);
319 } 338 }
320 339