aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86_64/mm/init.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86_64/mm/init.c')
-rw-r--r--arch/x86_64/mm/init.c25
1 files changed, 16 insertions, 9 deletions
diff --git a/arch/x86_64/mm/init.c b/arch/x86_64/mm/init.c
index 69e22d3c9238..e3134bc9a4fc 100644
--- a/arch/x86_64/mm/init.c
+++ b/arch/x86_64/mm/init.c
@@ -22,6 +22,7 @@
22#include <linux/bootmem.h> 22#include <linux/bootmem.h>
23#include <linux/proc_fs.h> 23#include <linux/proc_fs.h>
24#include <linux/pci.h> 24#include <linux/pci.h>
25#include <linux/pfn.h>
25#include <linux/poison.h> 26#include <linux/poison.h>
26#include <linux/dma-mapping.h> 27#include <linux/dma-mapping.h>
27#include <linux/module.h> 28#include <linux/module.h>
@@ -563,21 +564,23 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
563 if (begin >= end) 564 if (begin >= end)
564 return; 565 return;
565 566
566 printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10); 567 printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10);
567 for (addr = begin; addr < end; addr += PAGE_SIZE) { 568 for (addr = begin; addr < end; addr += PAGE_SIZE) {
568 struct page *page = pfn_to_page(addr >> PAGE_SHIFT); 569 struct page *page = pfn_to_page(addr >> PAGE_SHIFT);
569 ClearPageReserved(page); 570 ClearPageReserved(page);
570 init_page_count(page); 571 init_page_count(page);
571 memset(page_address(page), POISON_FREE_INITMEM, PAGE_SIZE); 572 memset(page_address(page), POISON_FREE_INITMEM, PAGE_SIZE);
573 if (addr >= __START_KERNEL_map)
574 change_page_attr_addr(addr, 1, __pgprot(0));
572 __free_page(page); 575 __free_page(page);
573 totalram_pages++; 576 totalram_pages++;
574 } 577 }
578 if (addr > __START_KERNEL_map)
579 global_flush_tlb();
575} 580}
576 581
577void free_initmem(void) 582void free_initmem(void)
578{ 583{
579 memset(__initdata_begin, POISON_FREE_INITDATA,
580 __initdata_end - __initdata_begin);
581 free_init_pages("unused kernel memory", 584 free_init_pages("unused kernel memory",
582 __pa_symbol(&__init_begin), 585 __pa_symbol(&__init_begin),
583 __pa_symbol(&__init_end)); 586 __pa_symbol(&__init_end));
@@ -587,14 +590,18 @@ void free_initmem(void)
587 590
588void mark_rodata_ro(void) 591void mark_rodata_ro(void)
589{ 592{
590 unsigned long addr = (unsigned long)__va(__pa_symbol(&__start_rodata)); 593 unsigned long start = PFN_ALIGN(__va(__pa_symbol(&_stext))), size;
591 unsigned long end = (unsigned long)__va(__pa_symbol(&__end_rodata));
592 594
593 for (; addr < end; addr += PAGE_SIZE) 595#ifdef CONFIG_HOTPLUG_CPU
594 change_page_attr_addr(addr, 1, PAGE_KERNEL_RO); 596 /* It must still be possible to apply SMP alternatives. */
597 if (num_possible_cpus() > 1)
598 start = PFN_ALIGN(__va(__pa_symbol(&_etext)));
599#endif
600 size = (unsigned long)__va(__pa_symbol(&__end_rodata)) - start;
601 change_page_attr_addr(start, size >> PAGE_SHIFT, PAGE_KERNEL_RO);
595 602
596 printk ("Write protecting the kernel read-only data: %luk\n", 603 printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n",
597 (__end_rodata - __start_rodata) >> 10); 604 size >> 10);
598 605
599 /* 606 /*
600 * change_page_attr_addr() requires a global_flush_tlb() call after it. 607 * change_page_attr_addr() requires a global_flush_tlb() call after it.