aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorSuresh Siddha <suresh.b.siddha@intel.com>2009-10-14 17:46:56 -0400
committerH. Peter Anvin <hpa@zytor.com>2009-10-20 01:46:00 -0400
commit74e081797bd9d2a7d8005fe519e719df343a2ba8 (patch)
treef210cca2002f87bf4cb17c20b853c129ce1df8f9 /arch
parentb9af7c0d44b8bb71e3af5e94688d076414aa8c87 (diff)
x86-64: align RODATA kernel section to 2MB with CONFIG_DEBUG_RODATA
CONFIG_DEBUG_RODATA chops the large pages spanning boundaries of kernel text/rodata/data to small 4KB pages as they are mapped with different attributes (text as RO, RODATA as RO and NX etc). On x86_64, preserve the large page mappings for kernel text/rodata/data boundaries when CONFIG_DEBUG_RODATA is enabled. This is done by allowing the RODATA section to be hugepage aligned and having same RWX attributes for the 2MB page boundaries Extra Memory pages padding the sections will be freed during the end of the boot and the kernel identity mappings will have different RWX permissions compared to the kernel text mappings. Kernel identity mappings to these physical pages will be mapped with smaller pages but large page mappings are still retained for kernel text,rodata,data mappings. Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com> LKML-Reference: <20091014220254.190119924@sbs-t61.sc.intel.com> Signed-off-by: H. Peter Anvin <hpa@zytor.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/include/asm/sections.h6
-rw-r--r--arch/x86/kernel/vmlinux.lds.S17
-rw-r--r--arch/x86/mm/init_64.c14
-rw-r--r--arch/x86/mm/pageattr.c14
4 files changed, 50 insertions, 1 deletions
diff --git a/arch/x86/include/asm/sections.h b/arch/x86/include/asm/sections.h
index 1b7ee5d673c2..0a5242428659 100644
--- a/arch/x86/include/asm/sections.h
+++ b/arch/x86/include/asm/sections.h
@@ -2,7 +2,13 @@
2#define _ASM_X86_SECTIONS_H 2#define _ASM_X86_SECTIONS_H
3 3
4#include <asm-generic/sections.h> 4#include <asm-generic/sections.h>
5#include <asm/uaccess.h>
5 6
6extern char __brk_base[], __brk_limit[]; 7extern char __brk_base[], __brk_limit[];
8extern struct exception_table_entry __stop___ex_table[];
9
10#if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
11extern char __end_rodata_hpage_align[];
12#endif
7 13
8#endif /* _ASM_X86_SECTIONS_H */ 14#endif /* _ASM_X86_SECTIONS_H */
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
index 92929fb3f9fa..14763790e415 100644
--- a/arch/x86/kernel/vmlinux.lds.S
+++ b/arch/x86/kernel/vmlinux.lds.S
@@ -41,6 +41,21 @@ ENTRY(phys_startup_64)
41jiffies_64 = jiffies; 41jiffies_64 = jiffies;
42#endif 42#endif
43 43
44#if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
45
46#define X64_ALIGN_DEBUG_RODATA_BEGIN . = ALIGN(HPAGE_SIZE);
47
48#define X64_ALIGN_DEBUG_RODATA_END \
49 . = ALIGN(HPAGE_SIZE); \
50 __end_rodata_hpage_align = .;
51
52#else
53
54#define X64_ALIGN_DEBUG_RODATA_BEGIN
55#define X64_ALIGN_DEBUG_RODATA_END
56
57#endif
58
44PHDRS { 59PHDRS {
45 text PT_LOAD FLAGS(5); /* R_E */ 60 text PT_LOAD FLAGS(5); /* R_E */
46 data PT_LOAD FLAGS(7); /* RWE */ 61 data PT_LOAD FLAGS(7); /* RWE */
@@ -90,7 +105,9 @@ SECTIONS
90 105
91 EXCEPTION_TABLE(16) :text = 0x9090 106 EXCEPTION_TABLE(16) :text = 0x9090
92 107
108 X64_ALIGN_DEBUG_RODATA_BEGIN
93 RO_DATA(PAGE_SIZE) 109 RO_DATA(PAGE_SIZE)
110 X64_ALIGN_DEBUG_RODATA_END
94 111
95 /* Data */ 112 /* Data */
96 .data : AT(ADDR(.data) - LOAD_OFFSET) { 113 .data : AT(ADDR(.data) - LOAD_OFFSET) {
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 7dafd4159ad6..0ed09fad6aa1 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -727,9 +727,13 @@ void set_kernel_text_ro(void)
727 727
728void mark_rodata_ro(void) 728void mark_rodata_ro(void)
729{ 729{
730 unsigned long start = PFN_ALIGN(_text), end = PFN_ALIGN(__end_rodata); 730 unsigned long start = PFN_ALIGN(_text);
731 unsigned long rodata_start = 731 unsigned long rodata_start =
732 ((unsigned long)__start_rodata + PAGE_SIZE - 1) & PAGE_MASK; 732 ((unsigned long)__start_rodata + PAGE_SIZE - 1) & PAGE_MASK;
733 unsigned long end = (unsigned long) &__end_rodata_hpage_align;
734 unsigned long text_end = PAGE_ALIGN((unsigned long) &__stop___ex_table);
735 unsigned long rodata_end = PAGE_ALIGN((unsigned long) &__end_rodata);
736 unsigned long data_start = (unsigned long) &_sdata;
733 737
734 printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n", 738 printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n",
735 (end - start) >> 10); 739 (end - start) >> 10);
@@ -752,6 +756,14 @@ void mark_rodata_ro(void)
752 printk(KERN_INFO "Testing CPA: again\n"); 756 printk(KERN_INFO "Testing CPA: again\n");
753 set_memory_ro(start, (end-start) >> PAGE_SHIFT); 757 set_memory_ro(start, (end-start) >> PAGE_SHIFT);
754#endif 758#endif
759
760 free_init_pages("unused kernel memory",
761 (unsigned long) page_address(virt_to_page(text_end)),
762 (unsigned long)
763 page_address(virt_to_page(rodata_start)));
764 free_init_pages("unused kernel memory",
765 (unsigned long) page_address(virt_to_page(rodata_end)),
766 (unsigned long) page_address(virt_to_page(data_start)));
755} 767}
756 768
757#endif 769#endif
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index dd38bfbefd1f..b494fc4a986e 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -279,6 +279,20 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
279 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT)) 279 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
280 pgprot_val(forbidden) |= _PAGE_RW; 280 pgprot_val(forbidden) |= _PAGE_RW;
281 281
282#if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
283 /*
284 * Kernel text mappings for the large page aligned .rodata section
285 * will be read-only. For the kernel identity mappings covering
286 * the holes caused by this alignment can be anything.
287 *
288 * This will preserve the large page mappings for kernel text/data
289 * at no extra cost.
290 */
291 if (within(address, (unsigned long)_text,
292 (unsigned long)__end_rodata_hpage_align))
293 pgprot_val(forbidden) |= _PAGE_RW;
294#endif
295
282 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden)); 296 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
283 297
284 return prot; 298 return prot;