aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorSuresh Siddha <suresh.b.siddha@intel.com>2009-10-28 22:46:56 -0400
committerIngo Molnar <mingo@elte.hu>2009-11-02 11:16:35 -0500
commit502f660466ba7a66711ffdf414b1f7f1131dcbf7 (patch)
tree9b5ac4309f392855108365d3d17406ce3199d2eb /arch
parent883242dd0e5faaba041528a9a99f483f2a656c83 (diff)
x86, cpa: Fix kernel text RO checks in static_protection()
Steven Rostedt reported that we are unconditionally making the kernel text mapping as read-only. i.e., if someone does cpa() to the kernel text area for setting/clearing any page table attribute, we unconditionally clear the read-write attribute for the kernel text mapping that is set at compile time. We should delay (to forbid the write attribute) and enforce only after the kernel has mapped the text as read-only. Reported-by: Steven Rostedt <rostedt@goodmis.org> Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com> Acked-by: Steven Rostedt <rostedt@goodmis.org> Tested-by: Steven Rostedt <rostedt@goodmis.org> LKML-Reference: <20091029024820.996634347@sbs-t61.sc.intel.com> [ marked kernel_set_to_readonly as __read_mostly ] Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/include/asm/cacheflush.h1
-rw-r--r--arch/x86/mm/init_32.c2
-rw-r--r--arch/x86/mm/init_64.c2
-rw-r--r--arch/x86/mm/pageattr.c10
4 files changed, 9 insertions, 6 deletions
diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
index b54f6afe7ec4..eebb2cd2b9bf 100644
--- a/arch/x86/include/asm/cacheflush.h
+++ b/arch/x86/include/asm/cacheflush.h
@@ -176,6 +176,7 @@ void clflush_cache_range(void *addr, unsigned int size);
176#ifdef CONFIG_DEBUG_RODATA 176#ifdef CONFIG_DEBUG_RODATA
177void mark_rodata_ro(void); 177void mark_rodata_ro(void);
178extern const int rodata_test_data; 178extern const int rodata_test_data;
179extern int kernel_set_to_readonly;
179void set_kernel_text_rw(void); 180void set_kernel_text_rw(void);
180void set_kernel_text_ro(void); 181void set_kernel_text_ro(void);
181#else 182#else
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index f64d0d5e0f89..c973f8e2a6cf 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -997,7 +997,7 @@ static noinline int do_test_wp_bit(void)
997const int rodata_test_data = 0xC3; 997const int rodata_test_data = 0xC3;
998EXPORT_SYMBOL_GPL(rodata_test_data); 998EXPORT_SYMBOL_GPL(rodata_test_data);
999 999
1000static int kernel_set_to_readonly; 1000int kernel_set_to_readonly __read_mostly;
1001 1001
1002void set_kernel_text_rw(void) 1002void set_kernel_text_rw(void)
1003{ 1003{
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 0ed09fad6aa1..4b507c089402 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -695,7 +695,7 @@ void __init mem_init(void)
695const int rodata_test_data = 0xC3; 695const int rodata_test_data = 0xC3;
696EXPORT_SYMBOL_GPL(rodata_test_data); 696EXPORT_SYMBOL_GPL(rodata_test_data);
697 697
698static int kernel_set_to_readonly; 698int kernel_set_to_readonly;
699 699
700void set_kernel_text_rw(void) 700void set_kernel_text_rw(void)
701{ 701{
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 78d3168b3c64..8d1e8d95ea45 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -282,14 +282,16 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
282#if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA) && \ 282#if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA) && \
283 !defined(CONFIG_DYNAMIC_FTRACE) 283 !defined(CONFIG_DYNAMIC_FTRACE)
284 /* 284 /*
285 * Kernel text mappings for the large page aligned .rodata section 285 * Once the kernel maps the text as RO (kernel_set_to_readonly is set),
286 * will be read-only. For the kernel identity mappings covering 286 * kernel text mappings for the large page aligned text, rodata sections
287 * the holes caused by this alignment can be anything. 287 * will be always read-only. For the kernel identity mappings covering
288 * the holes caused by this alignment can be anything that user asks.
288 * 289 *
289 * This will preserve the large page mappings for kernel text/data 290 * This will preserve the large page mappings for kernel text/data
290 * at no extra cost. 291 * at no extra cost.
291 */ 292 */
292 if (within(address, (unsigned long)_text, 293 if (kernel_set_to_readonly &&
294 within(address, (unsigned long)_text,
293 (unsigned long)__end_rodata_hpage_align)) 295 (unsigned long)__end_rodata_hpage_align))
294 pgprot_val(forbidden) |= _PAGE_RW; 296 pgprot_val(forbidden) |= _PAGE_RW;
295#endif 297#endif