aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/powerpc/mm/40x_mmu.c16
1 files changed, 14 insertions, 2 deletions
diff --git a/arch/powerpc/mm/40x_mmu.c b/arch/powerpc/mm/40x_mmu.c
index cecbbc76f624..29954dc28942 100644
--- a/arch/powerpc/mm/40x_mmu.c
+++ b/arch/powerpc/mm/40x_mmu.c
@@ -93,7 +93,7 @@ void __init MMU_init_hw(void)
93 93
94unsigned long __init mmu_mapin_ram(void) 94unsigned long __init mmu_mapin_ram(void)
95{ 95{
96 unsigned long v, s; 96 unsigned long v, s, mapped;
97 phys_addr_t p; 97 phys_addr_t p;
98 98
99 v = KERNELBASE; 99 v = KERNELBASE;
@@ -130,5 +130,17 @@ unsigned long __init mmu_mapin_ram(void)
130 s -= LARGE_PAGE_SIZE_4M; 130 s -= LARGE_PAGE_SIZE_4M;
131 } 131 }
132 132
133 return total_lowmem - s; 133 mapped = total_lowmem - s;
134
135 /* If the size of RAM is not an exact power of two, we may not
136 * have covered RAM in its entirety with 16 and 4 MiB
137 * pages. Consequently, restrict the top end of RAM currently
138 * allocable so that calls to the LMB to allocate PTEs for "tail"
139 * coverage with normal-sized pages (or other reasons) do not
140 * attempt to allocate outside the allowed range.
141 */
142
143 __initial_memory_limit_addr = memstart_addr + mapped;
144
145 return mapped;
134} 146}