aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sh/mm/init.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sh/mm/init.c')
-rw-r--r--arch/sh/mm/init.c8
1 files changed, 4 insertions, 4 deletions
diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c
index 58a93fb3d965..c9dbace35b16 100644
--- a/arch/sh/mm/init.c
+++ b/arch/sh/mm/init.c
@@ -287,6 +287,8 @@ static void __init do_init_bootmem(void)
287static void __init early_reserve_mem(void) 287static void __init early_reserve_mem(void)
288{ 288{
289 unsigned long start_pfn; 289 unsigned long start_pfn;
290 u32 zero_base = (u32)__MEMORY_START + (u32)PHYSICAL_OFFSET;
291 u32 start = zero_base + (u32)CONFIG_ZERO_PAGE_OFFSET;
290 292
291 /* 293 /*
292 * Partially used pages are not usable - thus 294 * Partially used pages are not usable - thus
@@ -300,15 +302,13 @@ static void __init early_reserve_mem(void)
300 * this catches the (definitely buggy) case of us accidentally 302 * this catches the (definitely buggy) case of us accidentally
301 * initializing the bootmem allocator with an invalid RAM area. 303 * initializing the bootmem allocator with an invalid RAM area.
302 */ 304 */
303 memblock_reserve(__MEMORY_START + CONFIG_ZERO_PAGE_OFFSET, 305 memblock_reserve(start, (PFN_PHYS(start_pfn) + PAGE_SIZE - 1) - start);
304 (PFN_PHYS(start_pfn) + PAGE_SIZE - 1) -
305 (__MEMORY_START + CONFIG_ZERO_PAGE_OFFSET));
306 306
307 /* 307 /*
308 * Reserve physical pages below CONFIG_ZERO_PAGE_OFFSET. 308 * Reserve physical pages below CONFIG_ZERO_PAGE_OFFSET.
309 */ 309 */
310 if (CONFIG_ZERO_PAGE_OFFSET != 0) 310 if (CONFIG_ZERO_PAGE_OFFSET != 0)
311 memblock_reserve(__MEMORY_START, CONFIG_ZERO_PAGE_OFFSET); 311 memblock_reserve(zero_base, CONFIG_ZERO_PAGE_OFFSET);
312 312
313 /* 313 /*
314 * Handle additional early reservations 314 * Handle additional early reservations