aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/mm/init.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/mm/init.c')
-rw-r--r--arch/x86/mm/init.c44
1 files changed, 27 insertions, 17 deletions
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index 73ffd5536f62..b278535b14aa 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -1,3 +1,4 @@
1#include <linux/gfp.h>
1#include <linux/initrd.h> 2#include <linux/initrd.h>
2#include <linux/ioport.h> 3#include <linux/ioport.h>
3#include <linux/swap.h> 4#include <linux/swap.h>
@@ -146,10 +147,6 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
146 use_gbpages = direct_gbpages; 147 use_gbpages = direct_gbpages;
147#endif 148#endif
148 149
149 set_nx();
150 if (nx_enabled)
151 printk(KERN_INFO "NX (Execute Disable) protection: active\n");
152
153 /* Enable PSE if available */ 150 /* Enable PSE if available */
154 if (cpu_has_pse) 151 if (cpu_has_pse)
155 set_in_cr4(X86_CR4_PSE); 152 set_in_cr4(X86_CR4_PSE);
@@ -270,16 +267,9 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
270 if (!after_bootmem) 267 if (!after_bootmem)
271 find_early_table_space(end, use_pse, use_gbpages); 268 find_early_table_space(end, use_pse, use_gbpages);
272 269
273#ifdef CONFIG_X86_32
274 for (i = 0; i < nr_range; i++)
275 kernel_physical_mapping_init(mr[i].start, mr[i].end,
276 mr[i].page_size_mask);
277 ret = end;
278#else /* CONFIG_X86_64 */
279 for (i = 0; i < nr_range; i++) 270 for (i = 0; i < nr_range; i++)
280 ret = kernel_physical_mapping_init(mr[i].start, mr[i].end, 271 ret = kernel_physical_mapping_init(mr[i].start, mr[i].end,
281 mr[i].page_size_mask); 272 mr[i].page_size_mask);
282#endif
283 273
284#ifdef CONFIG_X86_32 274#ifdef CONFIG_X86_32
285 early_ioremap_page_table_range_init(); 275 early_ioremap_page_table_range_init();
@@ -342,11 +332,23 @@ int devmem_is_allowed(unsigned long pagenr)
342 332
343void free_init_pages(char *what, unsigned long begin, unsigned long end) 333void free_init_pages(char *what, unsigned long begin, unsigned long end)
344{ 334{
345 unsigned long addr = begin; 335 unsigned long addr;
336 unsigned long begin_aligned, end_aligned;
337
338 /* Make sure boundaries are page aligned */
339 begin_aligned = PAGE_ALIGN(begin);
340 end_aligned = end & PAGE_MASK;
346 341
347 if (addr >= end) 342 if (WARN_ON(begin_aligned != begin || end_aligned != end)) {
343 begin = begin_aligned;
344 end = end_aligned;
345 }
346
347 if (begin >= end)
348 return; 348 return;
349 349
350 addr = begin;
351
350 /* 352 /*
351 * If debugging page accesses then do not free this memory but 353 * If debugging page accesses then do not free this memory but
352 * mark them not present - any buggy init-section access will 354 * mark them not present - any buggy init-section access will
@@ -354,7 +356,7 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
354 */ 356 */
355#ifdef CONFIG_DEBUG_PAGEALLOC 357#ifdef CONFIG_DEBUG_PAGEALLOC
356 printk(KERN_INFO "debug: unmapping init memory %08lx..%08lx\n", 358 printk(KERN_INFO "debug: unmapping init memory %08lx..%08lx\n",
357 begin, PAGE_ALIGN(end)); 359 begin, end);
358 set_memory_np(begin, (end - begin) >> PAGE_SHIFT); 360 set_memory_np(begin, (end - begin) >> PAGE_SHIFT);
359#else 361#else
360 /* 362 /*
@@ -369,8 +371,7 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
369 for (; addr < end; addr += PAGE_SIZE) { 371 for (; addr < end; addr += PAGE_SIZE) {
370 ClearPageReserved(virt_to_page(addr)); 372 ClearPageReserved(virt_to_page(addr));
371 init_page_count(virt_to_page(addr)); 373 init_page_count(virt_to_page(addr));
372 memset((void *)(addr & ~(PAGE_SIZE-1)), 374 memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
373 POISON_FREE_INITMEM, PAGE_SIZE);
374 free_page(addr); 375 free_page(addr);
375 totalram_pages++; 376 totalram_pages++;
376 } 377 }
@@ -387,6 +388,15 @@ void free_initmem(void)
387#ifdef CONFIG_BLK_DEV_INITRD 388#ifdef CONFIG_BLK_DEV_INITRD
388void free_initrd_mem(unsigned long start, unsigned long end) 389void free_initrd_mem(unsigned long start, unsigned long end)
389{ 390{
390 free_init_pages("initrd memory", start, end); 391 /*
392 * end could be not aligned, and We can not align that,
393 * decompresser could be confused by aligned initrd_end
394 * We already reserve the end partial page before in
395 * - i386_start_kernel()
396 * - x86_64_start_kernel()
397 * - relocate_initrd()
398 * So here We can do PAGE_ALIGN() safely to get partial page to be freed
399 */
400 free_init_pages("initrd memory", start, PAGE_ALIGN(end));
391} 401}
392#endif 402#endif