aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/mm/init.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2010-04-04 22:37:28 -0400
committerTejun Heo <tj@kernel.org>2010-04-04 22:37:28 -0400
commit336f5899d287f06d8329e208fc14ce50f7ec9698 (patch)
tree9b762d450d5eb248a6ff8317badb7e223d93ed58 /arch/x86/mm/init.c
parenta4ab2773205e8b94c18625455f85e3b6bb9d7ad6 (diff)
parentdb217dece3003df0841bacf9556b5c06aa097dae (diff)
Merge branch 'master' into export-slabh
Diffstat (limited to 'arch/x86/mm/init.c')
-rw-r--r--arch/x86/mm/init.c32
1 files changed, 26 insertions, 6 deletions
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index a4a7d7dc8aa4..b278535b14aa 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -332,11 +332,23 @@ int devmem_is_allowed(unsigned long pagenr)
332 332
333void free_init_pages(char *what, unsigned long begin, unsigned long end) 333void free_init_pages(char *what, unsigned long begin, unsigned long end)
334{ 334{
335 unsigned long addr = begin; 335 unsigned long addr;
336 unsigned long begin_aligned, end_aligned;
336 337
337 if (addr >= end) 338 /* Make sure boundaries are page aligned */
339 begin_aligned = PAGE_ALIGN(begin);
340 end_aligned = end & PAGE_MASK;
341
342 if (WARN_ON(begin_aligned != begin || end_aligned != end)) {
343 begin = begin_aligned;
344 end = end_aligned;
345 }
346
347 if (begin >= end)
338 return; 348 return;
339 349
350 addr = begin;
351
340 /* 352 /*
341 * If debugging page accesses then do not free this memory but 353 * If debugging page accesses then do not free this memory but
342 * mark them not present - any buggy init-section access will 354 * mark them not present - any buggy init-section access will
@@ -344,7 +356,7 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
344 */ 356 */
345#ifdef CONFIG_DEBUG_PAGEALLOC 357#ifdef CONFIG_DEBUG_PAGEALLOC
346 printk(KERN_INFO "debug: unmapping init memory %08lx..%08lx\n", 358 printk(KERN_INFO "debug: unmapping init memory %08lx..%08lx\n",
347 begin, PAGE_ALIGN(end)); 359 begin, end);
348 set_memory_np(begin, (end - begin) >> PAGE_SHIFT); 360 set_memory_np(begin, (end - begin) >> PAGE_SHIFT);
349#else 361#else
350 /* 362 /*
@@ -359,8 +371,7 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
359 for (; addr < end; addr += PAGE_SIZE) { 371 for (; addr < end; addr += PAGE_SIZE) {
360 ClearPageReserved(virt_to_page(addr)); 372 ClearPageReserved(virt_to_page(addr));
361 init_page_count(virt_to_page(addr)); 373 init_page_count(virt_to_page(addr));
362 memset((void *)(addr & ~(PAGE_SIZE-1)), 374 memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
363 POISON_FREE_INITMEM, PAGE_SIZE);
364 free_page(addr); 375 free_page(addr);
365 totalram_pages++; 376 totalram_pages++;
366 } 377 }
@@ -377,6 +388,15 @@ void free_initmem(void)
377#ifdef CONFIG_BLK_DEV_INITRD 388#ifdef CONFIG_BLK_DEV_INITRD
378void free_initrd_mem(unsigned long start, unsigned long end) 389void free_initrd_mem(unsigned long start, unsigned long end)
379{ 390{
380 free_init_pages("initrd memory", start, end); 391 /*
392 * end could be not aligned, and We can not align that,
393 * decompresser could be confused by aligned initrd_end
394 * We already reserve the end partial page before in
395 * - i386_start_kernel()
396 * - x86_64_start_kernel()
397 * - relocate_initrd()
398 * So here We can do PAGE_ALIGN() safely to get partial page to be freed
399 */
400 free_init_pages("initrd memory", start, PAGE_ALIGN(end));
381} 401}
382#endif 402#endif