diff options
author | Jan Beulich <jbeulich@novell.com> | 2007-05-02 13:27:10 -0400 |
---|---|---|
committer | Andi Kleen <andi@basil.nowhere.org> | 2007-05-02 13:27:10 -0400 |
commit | 6fb14755a676282a4e6caa05a08c92db8e45cfff (patch) | |
tree | 71a862edf87cafe61986c0aff90db72045cf14c3 /arch/x86_64 | |
parent | d01ad8dd56527be72947b4b9997bb2c05783c3ed (diff) |
[PATCH] x86: tighten kernel image page access rights
On x86-64, kernel memory freed after init can be entirely unmapped instead
of just getting 'poisoned' by overwriting with a debug pattern.
On i386 and x86-64 (under CONFIG_DEBUG_RODATA), kernel text and bug table
can also be write-protected.
Compared to the first version, this one prevents re-creating deleted
mappings in the kernel image range on x86-64, if those got removed
previously. This, together with the original changes, prevents temporarily
having inconsistent mappings when cacheability attributes are being
changed on such pages (e.g. from AGP code). While on i386 such duplicate
mappings don't exist, the same change is done there, too, both for
consistency and because checking pte_present() before using various other
pte_XXX functions is a requirement anyway. At once, i386 code gets
adjusted to use pte_huge() instead of open coding this.
AK: split out cpa() changes
Signed-off-by: Jan Beulich <jbeulich@novell.com>
Signed-off-by: Andi Kleen <ak@suse.de>
Diffstat (limited to 'arch/x86_64')
-rw-r--r-- | arch/x86_64/kernel/head.S | 1 | ||||
-rw-r--r-- | arch/x86_64/kernel/vmlinux.lds.S | 5 | ||||
-rw-r--r-- | arch/x86_64/mm/init.c | 25 |
3 files changed, 19 insertions, 12 deletions
diff --git a/arch/x86_64/kernel/head.S b/arch/x86_64/kernel/head.S index 36aa98a6d15c..fd9fdfdd143e 100644 --- a/arch/x86_64/kernel/head.S +++ b/arch/x86_64/kernel/head.S | |||
@@ -280,7 +280,6 @@ early_idt_ripmsg: | |||
280 | 280 | ||
281 | .balign PAGE_SIZE | 281 | .balign PAGE_SIZE |
282 | ENTRY(stext) | 282 | ENTRY(stext) |
283 | ENTRY(_stext) | ||
284 | 283 | ||
285 | #define NEXT_PAGE(name) \ | 284 | #define NEXT_PAGE(name) \ |
286 | .balign PAGE_SIZE; \ | 285 | .balign PAGE_SIZE; \ |
diff --git a/arch/x86_64/kernel/vmlinux.lds.S b/arch/x86_64/kernel/vmlinux.lds.S index 5176ecf006ee..3bdeb88d28f4 100644 --- a/arch/x86_64/kernel/vmlinux.lds.S +++ b/arch/x86_64/kernel/vmlinux.lds.S | |||
@@ -29,6 +29,7 @@ SECTIONS | |||
29 | .text : AT(ADDR(.text) - LOAD_OFFSET) { | 29 | .text : AT(ADDR(.text) - LOAD_OFFSET) { |
30 | /* First the code that has to be first for bootstrapping */ | 30 | /* First the code that has to be first for bootstrapping */ |
31 | *(.bootstrap.text) | 31 | *(.bootstrap.text) |
32 | _stext = .; | ||
32 | /* Then all the functions that are "hot" in profiles, to group them | 33 | /* Then all the functions that are "hot" in profiles, to group them |
33 | onto the same hugetlb entry */ | 34 | onto the same hugetlb entry */ |
34 | #include "functionlist" | 35 | #include "functionlist" |
@@ -50,10 +51,10 @@ SECTIONS | |||
50 | __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) { *(__ex_table) } | 51 | __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) { *(__ex_table) } |
51 | __stop___ex_table = .; | 52 | __stop___ex_table = .; |
52 | 53 | ||
53 | RODATA | ||
54 | |||
55 | BUG_TABLE | 54 | BUG_TABLE |
56 | 55 | ||
56 | RODATA | ||
57 | |||
57 | . = ALIGN(PAGE_SIZE); /* Align data segment to page size boundary */ | 58 | . = ALIGN(PAGE_SIZE); /* Align data segment to page size boundary */ |
58 | /* Data */ | 59 | /* Data */ |
59 | .data : AT(ADDR(.data) - LOAD_OFFSET) { | 60 | .data : AT(ADDR(.data) - LOAD_OFFSET) { |
diff --git a/arch/x86_64/mm/init.c b/arch/x86_64/mm/init.c index 69e22d3c9238..e3134bc9a4fc 100644 --- a/arch/x86_64/mm/init.c +++ b/arch/x86_64/mm/init.c | |||
@@ -22,6 +22,7 @@ | |||
22 | #include <linux/bootmem.h> | 22 | #include <linux/bootmem.h> |
23 | #include <linux/proc_fs.h> | 23 | #include <linux/proc_fs.h> |
24 | #include <linux/pci.h> | 24 | #include <linux/pci.h> |
25 | #include <linux/pfn.h> | ||
25 | #include <linux/poison.h> | 26 | #include <linux/poison.h> |
26 | #include <linux/dma-mapping.h> | 27 | #include <linux/dma-mapping.h> |
27 | #include <linux/module.h> | 28 | #include <linux/module.h> |
@@ -563,21 +564,23 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end) | |||
563 | if (begin >= end) | 564 | if (begin >= end) |
564 | return; | 565 | return; |
565 | 566 | ||
566 | printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10); | 567 | printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10); |
567 | for (addr = begin; addr < end; addr += PAGE_SIZE) { | 568 | for (addr = begin; addr < end; addr += PAGE_SIZE) { |
568 | struct page *page = pfn_to_page(addr >> PAGE_SHIFT); | 569 | struct page *page = pfn_to_page(addr >> PAGE_SHIFT); |
569 | ClearPageReserved(page); | 570 | ClearPageReserved(page); |
570 | init_page_count(page); | 571 | init_page_count(page); |
571 | memset(page_address(page), POISON_FREE_INITMEM, PAGE_SIZE); | 572 | memset(page_address(page), POISON_FREE_INITMEM, PAGE_SIZE); |
573 | if (addr >= __START_KERNEL_map) | ||
574 | change_page_attr_addr(addr, 1, __pgprot(0)); | ||
572 | __free_page(page); | 575 | __free_page(page); |
573 | totalram_pages++; | 576 | totalram_pages++; |
574 | } | 577 | } |
578 | if (addr > __START_KERNEL_map) | ||
579 | global_flush_tlb(); | ||
575 | } | 580 | } |
576 | 581 | ||
577 | void free_initmem(void) | 582 | void free_initmem(void) |
578 | { | 583 | { |
579 | memset(__initdata_begin, POISON_FREE_INITDATA, | ||
580 | __initdata_end - __initdata_begin); | ||
581 | free_init_pages("unused kernel memory", | 584 | free_init_pages("unused kernel memory", |
582 | __pa_symbol(&__init_begin), | 585 | __pa_symbol(&__init_begin), |
583 | __pa_symbol(&__init_end)); | 586 | __pa_symbol(&__init_end)); |
@@ -587,14 +590,18 @@ void free_initmem(void) | |||
587 | 590 | ||
588 | void mark_rodata_ro(void) | 591 | void mark_rodata_ro(void) |
589 | { | 592 | { |
590 | unsigned long addr = (unsigned long)__va(__pa_symbol(&__start_rodata)); | 593 | unsigned long start = PFN_ALIGN(__va(__pa_symbol(&_stext))), size; |
591 | unsigned long end = (unsigned long)__va(__pa_symbol(&__end_rodata)); | ||
592 | 594 | ||
593 | for (; addr < end; addr += PAGE_SIZE) | 595 | #ifdef CONFIG_HOTPLUG_CPU |
594 | change_page_attr_addr(addr, 1, PAGE_KERNEL_RO); | 596 | /* It must still be possible to apply SMP alternatives. */ |
597 | if (num_possible_cpus() > 1) | ||
598 | start = PFN_ALIGN(__va(__pa_symbol(&_etext))); | ||
599 | #endif | ||
600 | size = (unsigned long)__va(__pa_symbol(&__end_rodata)) - start; | ||
601 | change_page_attr_addr(start, size >> PAGE_SHIFT, PAGE_KERNEL_RO); | ||
595 | 602 | ||
596 | printk ("Write protecting the kernel read-only data: %luk\n", | 603 | printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n", |
597 | (__end_rodata - __start_rodata) >> 10); | 604 | size >> 10); |
598 | 605 | ||
599 | /* | 606 | /* |
600 | * change_page_attr_addr() requires a global_flush_tlb() call after it. | 607 | * change_page_attr_addr() requires a global_flush_tlb() call after it. |