aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorMark Rutland <mark.rutland@arm.com>2015-05-13 10:07:54 -0400
committerRussell King <rmk+kernel@arm.linux.org.uk>2015-05-14 11:15:20 -0400
commit965278dcb8ab0b1f666cc47937933c4be4aea48d (patch)
treeeb658fbdafb95d6735f9103c5bec00ca8904407e /arch
parent3b8786ff7a1b31645ae2c26a2ec32dbd42ac1094 (diff)
ARM: 8356/1: mm: handle non-pmd-aligned end of RAM
At boot time we round the memblock limit down to section size in an attempt to ensure that we will have mapped this RAM with section mappings prior to allocating from it. When mapping RAM we iterate over PMD-sized chunks, creating these section mappings. Section mappings are only created when the end of a chunk is aligned to section size. Unfortunately, with classic page tables (where PMD_SIZE is 2 * SECTION_SIZE) this means that if a chunk is between 1M and 2M in size the first 1M will not be mapped despite having been accounted for in the memblock limit. This has been observed to result in page tables being allocated from unmapped memory, causing boot-time hangs. This patch modifies the memblock limit rounding to always round down to PMD_SIZE instead of SECTION_SIZE. For classic MMU this means that we will round the memblock limit down to a 2M boundary, matching the limits on section mappings, and preventing allocations from unmapped memory. For LPAE there should be no change as PMD_SIZE == SECTION_SIZE. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Reported-by: Stefan Agner <stefan@agner.ch> Tested-by: Stefan Agner <stefan@agner.ch> Acked-by: Laura Abbott <labbott@redhat.com> Tested-by: Hans de Goede <hdegoede@redhat.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Steve Capper <steve.capper@linaro.org> Cc: stable@vger.kernel.org Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/mm/mmu.c20
1 files changed, 10 insertions, 10 deletions
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 4e6ef896c619..7186382672b5 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -1112,22 +1112,22 @@ void __init sanity_check_meminfo(void)
1112 } 1112 }
1113 1113
1114 /* 1114 /*
1115 * Find the first non-section-aligned page, and point 1115 * Find the first non-pmd-aligned page, and point
1116 * memblock_limit at it. This relies on rounding the 1116 * memblock_limit at it. This relies on rounding the
1117 * limit down to be section-aligned, which happens at 1117 * limit down to be pmd-aligned, which happens at the
1118 * the end of this function. 1118 * end of this function.
1119 * 1119 *
1120 * With this algorithm, the start or end of almost any 1120 * With this algorithm, the start or end of almost any
1121 * bank can be non-section-aligned. The only exception 1121 * bank can be non-pmd-aligned. The only exception is
1122 * is that the start of the bank 0 must be section- 1122 * that the start of the bank 0 must be section-
1123 * aligned, since otherwise memory would need to be 1123 * aligned, since otherwise memory would need to be
1124 * allocated when mapping the start of bank 0, which 1124 * allocated when mapping the start of bank 0, which
1125 * occurs before any free memory is mapped. 1125 * occurs before any free memory is mapped.
1126 */ 1126 */
1127 if (!memblock_limit) { 1127 if (!memblock_limit) {
1128 if (!IS_ALIGNED(block_start, SECTION_SIZE)) 1128 if (!IS_ALIGNED(block_start, PMD_SIZE))
1129 memblock_limit = block_start; 1129 memblock_limit = block_start;
1130 else if (!IS_ALIGNED(block_end, SECTION_SIZE)) 1130 else if (!IS_ALIGNED(block_end, PMD_SIZE))
1131 memblock_limit = arm_lowmem_limit; 1131 memblock_limit = arm_lowmem_limit;
1132 } 1132 }
1133 1133
@@ -1137,12 +1137,12 @@ void __init sanity_check_meminfo(void)
1137 high_memory = __va(arm_lowmem_limit - 1) + 1; 1137 high_memory = __va(arm_lowmem_limit - 1) + 1;
1138 1138
1139 /* 1139 /*
1140 * Round the memblock limit down to a section size. This 1140 * Round the memblock limit down to a pmd size. This
1141 * helps to ensure that we will allocate memory from the 1141 * helps to ensure that we will allocate memory from the
1142 * last full section, which should be mapped. 1142 * last full pmd, which should be mapped.
1143 */ 1143 */
1144 if (memblock_limit) 1144 if (memblock_limit)
1145 memblock_limit = round_down(memblock_limit, SECTION_SIZE); 1145 memblock_limit = round_down(memblock_limit, PMD_SIZE);
1146 if (!memblock_limit) 1146 if (!memblock_limit)
1147 memblock_limit = arm_lowmem_limit; 1147 memblock_limit = arm_lowmem_limit;
1148 1148