aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorArjan van de Ven <arjan@infradead.org>2006-01-06 03:12:03 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2006-01-06 11:33:36 -0500
commitc728252c7a072628bd3932ff87943d1e12423359 (patch)
tree13a354b83179d6060add1c39bbac79e79f5d4ef2
parent63aaf3086baea7b94c218053af8237f9dbac5d05 (diff)
[PATCH] x86/x86_64: mark rodata section read only: generic x86-64 bugfix
Bug fix required for the .rodata work on x86-64: when change_page_attr() and friends need to break up a 2Mb page into 4Kb pages, it always set the NX bit on the PMD, which causes the cpu to consider the entire 2Mb region to be NX regardless of the actual PTE perms. This is fine in general, with one big exception: the 2Mb page that covers the last part of the kernel .text! The fix is to not invent a new permission for the new PMD entry, but to just inherit the existing one minus the PSE bit. Signed-off-by: Arjan van de Ven <arjan@infradead.org> Signed-off-by: Ingo Molnar <mingo@elte.hu> Cc: Andi Kleen <ak@muc.de> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r--arch/x86_64/mm/pageattr.c9
-rw-r--r--include/asm-x86_64/pgtable.h2
2 files changed, 9 insertions, 2 deletions
diff --git a/arch/x86_64/mm/pageattr.c b/arch/x86_64/mm/pageattr.c
index b90e8fe9eeb0..35f1f1aab063 100644
--- a/arch/x86_64/mm/pageattr.c
+++ b/arch/x86_64/mm/pageattr.c
@@ -128,6 +128,7 @@ __change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot,
128 pte_t *kpte; 128 pte_t *kpte;
129 struct page *kpte_page; 129 struct page *kpte_page;
130 unsigned kpte_flags; 130 unsigned kpte_flags;
131 pgprot_t ref_prot2;
131 kpte = lookup_address(address); 132 kpte = lookup_address(address);
132 if (!kpte) return 0; 133 if (!kpte) return 0;
133 kpte_page = virt_to_page(((unsigned long)kpte) & PAGE_MASK); 134 kpte_page = virt_to_page(((unsigned long)kpte) & PAGE_MASK);
@@ -140,10 +141,14 @@ __change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot,
140 * split_large_page will take the reference for this change_page_attr 141 * split_large_page will take the reference for this change_page_attr
141 * on the split page. 142 * on the split page.
142 */ 143 */
143 struct page *split = split_large_page(address, prot, ref_prot); 144
145 struct page *split;
146 ref_prot2 = __pgprot(pgprot_val(pte_pgprot(*lookup_address(address))) & ~(1<<_PAGE_BIT_PSE));
147
148 split = split_large_page(address, prot, ref_prot2);
144 if (!split) 149 if (!split)
145 return -ENOMEM; 150 return -ENOMEM;
146 set_pte(kpte,mk_pte(split, ref_prot)); 151 set_pte(kpte,mk_pte(split, ref_prot2));
147 kpte_page = split; 152 kpte_page = split;
148 } 153 }
149 get_page(kpte_page); 154 get_page(kpte_page);
diff --git a/include/asm-x86_64/pgtable.h b/include/asm-x86_64/pgtable.h
index ecf58c7c1650..02888d7a496f 100644
--- a/include/asm-x86_64/pgtable.h
+++ b/include/asm-x86_64/pgtable.h
@@ -122,6 +122,8 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long
122 122
123#define pte_same(a, b) ((a).pte == (b).pte) 123#define pte_same(a, b) ((a).pte == (b).pte)
124 124
125#define pte_pgprot(a) (__pgprot((a).pte & ~PHYSICAL_PAGE_MASK))
126
125#define PMD_SIZE (1UL << PMD_SHIFT) 127#define PMD_SIZE (1UL << PMD_SHIFT)
126#define PMD_MASK (~(PMD_SIZE-1)) 128#define PMD_MASK (~(PMD_SIZE-1))
127#define PUD_SIZE (1UL << PUD_SHIFT) 129#define PUD_SIZE (1UL << PUD_SHIFT)