aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/mm/init.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/mm/init.c')
-rw-r--r--arch/x86/mm/init.c32
1 files changed, 26 insertions, 6 deletions
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index e71c5cbc8f35..452ee5b8f309 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -331,11 +331,23 @@ int devmem_is_allowed(unsigned long pagenr)
331 331
332void free_init_pages(char *what, unsigned long begin, unsigned long end) 332void free_init_pages(char *what, unsigned long begin, unsigned long end)
333{ 333{
334 unsigned long addr = begin; 334 unsigned long addr;
335 unsigned long begin_aligned, end_aligned;
335 336
336 if (addr >= end) 337 /* Make sure boundaries are page aligned */
338 begin_aligned = PAGE_ALIGN(begin);
339 end_aligned = end & PAGE_MASK;
340
341 if (WARN_ON(begin_aligned != begin || end_aligned != end)) {
342 begin = begin_aligned;
343 end = end_aligned;
344 }
345
346 if (begin >= end)
337 return; 347 return;
338 348
349 addr = begin;
350
339 /* 351 /*
340 * If debugging page accesses then do not free this memory but 352 * If debugging page accesses then do not free this memory but
341 * mark them not present - any buggy init-section access will 353 * mark them not present - any buggy init-section access will
@@ -343,7 +355,7 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
343 */ 355 */
344#ifdef CONFIG_DEBUG_PAGEALLOC 356#ifdef CONFIG_DEBUG_PAGEALLOC
345 printk(KERN_INFO "debug: unmapping init memory %08lx..%08lx\n", 357 printk(KERN_INFO "debug: unmapping init memory %08lx..%08lx\n",
346 begin, PAGE_ALIGN(end)); 358 begin, end);
347 set_memory_np(begin, (end - begin) >> PAGE_SHIFT); 359 set_memory_np(begin, (end - begin) >> PAGE_SHIFT);
348#else 360#else
349 /* 361 /*
@@ -358,8 +370,7 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
358 for (; addr < end; addr += PAGE_SIZE) { 370 for (; addr < end; addr += PAGE_SIZE) {
359 ClearPageReserved(virt_to_page(addr)); 371 ClearPageReserved(virt_to_page(addr));
360 init_page_count(virt_to_page(addr)); 372 init_page_count(virt_to_page(addr));
361 memset((void *)(addr & ~(PAGE_SIZE-1)), 373 memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
362 POISON_FREE_INITMEM, PAGE_SIZE);
363 free_page(addr); 374 free_page(addr);
364 totalram_pages++; 375 totalram_pages++;
365 } 376 }
@@ -376,6 +387,15 @@ void free_initmem(void)
376#ifdef CONFIG_BLK_DEV_INITRD 387#ifdef CONFIG_BLK_DEV_INITRD
377void free_initrd_mem(unsigned long start, unsigned long end) 388void free_initrd_mem(unsigned long start, unsigned long end)
378{ 389{
379 free_init_pages("initrd memory", start, end); 390 /*
391 * end could be not aligned, and We can not align that,
392 * decompresser could be confused by aligned initrd_end
393 * We already reserve the end partial page before in
394 * - i386_start_kernel()
395 * - x86_64_start_kernel()
396 * - relocate_initrd()
397 * So here We can do PAGE_ALIGN() safely to get partial page to be freed
398 */
399 free_init_pages("initrd memory", start, PAGE_ALIGN(end));
380} 400}
381#endif 401#endif