aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorDave Jones <davej@redhat.com>2006-01-06 03:12:10 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2006-01-06 11:33:38 -0500
commitf8af095d3a4c8300b4e63ee2c4bb198b565d9431 (patch)
tree8ca5c266790eb543706fd202d2d2f731466ad541 /arch
parente72c8585e09f127a69a1608bb5ccd1e3fc0dd41e (diff)
[PATCH] x86: change_page_attr() fix
The 'make rodata read-only' patch in -mm exposes a latent bug in the 32-bit change_page_attr() function, which causes certain CPUs (Those with NX basically) to reboot instantly after pages are marked read-only. The same bug got fixed a while back on x86-64, but never got propagated to i386. Stuart Hayes from Dell also picked up on this last June, but it never got fixed, as the only thing affected by it aparently was the nvidia driver. Blatantly stealing description from his post.. "It doesn't appear to be fixed (in the i386 arch). The change_page_attr()/split_large_page() code will still still set all the 4K PTEs to PAGE_KERNEL (setting the _PAGE_NX bit) when a large page needs to be split. This wouldn't be a problem for the bulk of the kernel memory, but there are pages in the lower 4MB of memory that's free, and are part of large executable pages that also contain kernel code. If change_page_attr() is called on these, it will set the _PAGE_NX bit on the whole 2MB region that was covered by the large page, causing a large chunk of kernel code to be non-executable." Signed-off-by: Arjan van de Ven <arjan@infradead.org> Signed-off-by: Dave Jones <davej@redhat.com> Cc: <Stuart_Hayes@Dell.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/i386/mm/pageattr.c27
1 files changed, 21 insertions, 6 deletions
diff --git a/arch/i386/mm/pageattr.c b/arch/i386/mm/pageattr.c
index f600fc244f02..c30a16df6440 100644
--- a/arch/i386/mm/pageattr.c
+++ b/arch/i386/mm/pageattr.c
@@ -13,6 +13,7 @@
13#include <asm/processor.h> 13#include <asm/processor.h>
14#include <asm/tlbflush.h> 14#include <asm/tlbflush.h>
15#include <asm/pgalloc.h> 15#include <asm/pgalloc.h>
16#include <asm/sections.h>
16 17
17static DEFINE_SPINLOCK(cpa_lock); 18static DEFINE_SPINLOCK(cpa_lock);
18static struct list_head df_list = LIST_HEAD_INIT(df_list); 19static struct list_head df_list = LIST_HEAD_INIT(df_list);
@@ -36,7 +37,8 @@ pte_t *lookup_address(unsigned long address)
36 return pte_offset_kernel(pmd, address); 37 return pte_offset_kernel(pmd, address);
37} 38}
38 39
39static struct page *split_large_page(unsigned long address, pgprot_t prot) 40static struct page *split_large_page(unsigned long address, pgprot_t prot,
41 pgprot_t ref_prot)
40{ 42{
41 int i; 43 int i;
42 unsigned long addr; 44 unsigned long addr;
@@ -54,7 +56,7 @@ static struct page *split_large_page(unsigned long address, pgprot_t prot)
54 pbase = (pte_t *)page_address(base); 56 pbase = (pte_t *)page_address(base);
55 for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) { 57 for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) {
56 set_pte(&pbase[i], pfn_pte(addr >> PAGE_SHIFT, 58 set_pte(&pbase[i], pfn_pte(addr >> PAGE_SHIFT,
57 addr == address ? prot : PAGE_KERNEL)); 59 addr == address ? prot : ref_prot));
58 } 60 }
59 return base; 61 return base;
60} 62}
@@ -98,11 +100,18 @@ static void set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
98 */ 100 */
99static inline void revert_page(struct page *kpte_page, unsigned long address) 101static inline void revert_page(struct page *kpte_page, unsigned long address)
100{ 102{
101 pte_t *linear = (pte_t *) 103 pgprot_t ref_prot;
104 pte_t *linear;
105
106 ref_prot =
107 ((address & LARGE_PAGE_MASK) < (unsigned long)&_etext)
108 ? PAGE_KERNEL_LARGE_EXEC : PAGE_KERNEL_LARGE;
109
110 linear = (pte_t *)
102 pmd_offset(pud_offset(pgd_offset_k(address), address), address); 111 pmd_offset(pud_offset(pgd_offset_k(address), address), address);
103 set_pmd_pte(linear, address, 112 set_pmd_pte(linear, address,
104 pfn_pte((__pa(address) & LARGE_PAGE_MASK) >> PAGE_SHIFT, 113 pfn_pte((__pa(address) & LARGE_PAGE_MASK) >> PAGE_SHIFT,
105 PAGE_KERNEL_LARGE)); 114 ref_prot));
106} 115}
107 116
108static int 117static int
@@ -123,10 +132,16 @@ __change_page_attr(struct page *page, pgprot_t prot)
123 if ((pte_val(*kpte) & _PAGE_PSE) == 0) { 132 if ((pte_val(*kpte) & _PAGE_PSE) == 0) {
124 set_pte_atomic(kpte, mk_pte(page, prot)); 133 set_pte_atomic(kpte, mk_pte(page, prot));
125 } else { 134 } else {
126 struct page *split = split_large_page(address, prot); 135 pgprot_t ref_prot;
136 struct page *split;
137
138 ref_prot =
139 ((address & LARGE_PAGE_MASK) < (unsigned long)&_etext)
140 ? PAGE_KERNEL_EXEC : PAGE_KERNEL;
141 split = split_large_page(address, prot, ref_prot);
127 if (!split) 142 if (!split)
128 return -ENOMEM; 143 return -ENOMEM;
129 set_pmd_pte(kpte,address,mk_pte(split, PAGE_KERNEL)); 144 set_pmd_pte(kpte,address,mk_pte(split, ref_prot));
130 kpte_page = split; 145 kpte_page = split;
131 } 146 }
132 get_page(kpte_page); 147 get_page(kpte_page);