aboutsummaryrefslogtreecommitdiffstats
path: root/arch/i386/mm/init.c
diff options
context:
space:
mode:
authorJan Beulich <jbeulich@novell.com>2007-05-02 13:27:10 -0400
committerAndi Kleen <andi@basil.nowhere.org>2007-05-02 13:27:10 -0400
commit6fb14755a676282a4e6caa05a08c92db8e45cfff (patch)
tree71a862edf87cafe61986c0aff90db72045cf14c3 /arch/i386/mm/init.c
parentd01ad8dd56527be72947b4b9997bb2c05783c3ed (diff)
[PATCH] x86: tighten kernel image page access rights
On x86-64, kernel memory freed after init can be entirely unmapped instead of just getting 'poisoned' by overwriting with a debug pattern. On i386 and x86-64 (under CONFIG_DEBUG_RODATA), kernel text and bug table can also be write-protected. Compared to the first version, this one prevents re-creating deleted mappings in the kernel image range on x86-64, if those got removed previously. This, together with the original changes, prevents temporarily having inconsistent mappings when cacheability attributes are being changed on such pages (e.g. from AGP code). While on i386 such duplicate mappings don't exist, the same change is done there, too, both for consistency and because checking pte_present() before using various other pte_XXX functions is a requirement anyway. At once, i386 code gets adjusted to use pte_huge() instead of open coding this. AK: split out cpa() changes Signed-off-by: Jan Beulich <jbeulich@novell.com> Signed-off-by: Andi Kleen <ak@suse.de>
Diffstat (limited to 'arch/i386/mm/init.c')
-rw-r--r--arch/i386/mm/init.c25
1 files changed, 19 insertions, 6 deletions
diff --git a/arch/i386/mm/init.c b/arch/i386/mm/init.c
index 23be1b0aafa4..bd5ef3718504 100644
--- a/arch/i386/mm/init.c
+++ b/arch/i386/mm/init.c
@@ -22,6 +22,7 @@
22#include <linux/init.h> 22#include <linux/init.h>
23#include <linux/highmem.h> 23#include <linux/highmem.h>
24#include <linux/pagemap.h> 24#include <linux/pagemap.h>
25#include <linux/pfn.h>
25#include <linux/poison.h> 26#include <linux/poison.h>
26#include <linux/bootmem.h> 27#include <linux/bootmem.h>
27#include <linux/slab.h> 28#include <linux/slab.h>
@@ -751,13 +752,25 @@ static int noinline do_test_wp_bit(void)
751 752
752void mark_rodata_ro(void) 753void mark_rodata_ro(void)
753{ 754{
754 unsigned long addr = (unsigned long)__start_rodata; 755 unsigned long start = PFN_ALIGN(_text);
756 unsigned long size = PFN_ALIGN(_etext) - start;
755 757
756 for (; addr < (unsigned long)__end_rodata; addr += PAGE_SIZE) 758#ifdef CONFIG_HOTPLUG_CPU
757 change_page_attr(virt_to_page(addr), 1, PAGE_KERNEL_RO); 759 /* It must still be possible to apply SMP alternatives. */
760 if (num_possible_cpus() <= 1)
761#endif
762 {
763 change_page_attr(virt_to_page(start),
764 size >> PAGE_SHIFT, PAGE_KERNEL_RX);
765 printk("Write protecting the kernel text: %luk\n", size >> 10);
766 }
758 767
759 printk("Write protecting the kernel read-only data: %uk\n", 768 start += size;
760 (__end_rodata - __start_rodata) >> 10); 769 size = (unsigned long)__end_rodata - start;
770 change_page_attr(virt_to_page(start),
771 size >> PAGE_SHIFT, PAGE_KERNEL_RO);
772 printk("Write protecting the kernel read-only data: %luk\n",
773 size >> 10);
761 774
762 /* 775 /*
763 * change_page_attr() requires a global_flush_tlb() call after it. 776 * change_page_attr() requires a global_flush_tlb() call after it.
@@ -781,7 +794,7 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
781 __free_page(page); 794 __free_page(page);
782 totalram_pages++; 795 totalram_pages++;
783 } 796 }
784 printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10); 797 printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10);
785} 798}
786 799
787void free_initmem(void) 800void free_initmem(void)