aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-01-30 07:34:09 -0500
committerIngo Molnar <mingo@elte.hu>2008-01-30 07:34:09 -0500
commit86f03989d99cfa2e1216cdd7aa996852236909cf (patch)
tree6fae63f51c4adf08f94975b48e656b31c6bced62
parentaba8391f7323294e88e3a665513434aba4042a7d (diff)
x86: cpa: fix the self-test
Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r--arch/x86/Kconfig.debug2
-rw-r--r--arch/x86/mm/init_32.c4
-rw-r--r--arch/x86/mm/init_64.c16
-rw-r--r--arch/x86/mm/pageattr-test.c15
-rw-r--r--arch/x86/mm/pageattr.c74
-rw-r--r--include/asm-x86/pgtable.h1
6 files changed, 48 insertions, 64 deletions
diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
index 2d0bd33b73aa..2e1e3af28c3a 100644
--- a/arch/x86/Kconfig.debug
+++ b/arch/x86/Kconfig.debug
@@ -40,7 +40,7 @@ comment "Page alloc debug is incompatible with Software Suspend on i386"
40 40
41config DEBUG_PAGEALLOC 41config DEBUG_PAGEALLOC
42 bool "Debug page memory allocations" 42 bool "Debug page memory allocations"
43 depends on DEBUG_KERNEL 43 depends on DEBUG_KERNEL && X86_32
44 help 44 help
45 Unmap pages from the kernel linear mapping after free_pages(). 45 Unmap pages from the kernel linear mapping after free_pages().
46 This results in a large slowdown, but helps to find certain types 46 This results in a large slowdown, but helps to find certain types
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index 8d7f723cfc28..8ed5c189d7aa 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -781,8 +781,6 @@ void mark_rodata_ro(void)
781 781
782void free_init_pages(char *what, unsigned long begin, unsigned long end) 782void free_init_pages(char *what, unsigned long begin, unsigned long end)
783{ 783{
784 unsigned long addr;
785
786#ifdef CONFIG_DEBUG_PAGEALLOC 784#ifdef CONFIG_DEBUG_PAGEALLOC
787 /* 785 /*
788 * If debugging page accesses then do not free this memory but 786 * If debugging page accesses then do not free this memory but
@@ -793,6 +791,8 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
793 begin, PAGE_ALIGN(end)); 791 begin, PAGE_ALIGN(end));
794 set_memory_np(begin, (end - begin) >> PAGE_SHIFT); 792 set_memory_np(begin, (end - begin) >> PAGE_SHIFT);
795#else 793#else
794 unsigned long addr;
795
796 /* 796 /*
797 * We just marked the kernel text read only above, now that 797 * We just marked the kernel text read only above, now that
798 * we are going to free part of that, we need to make that 798 * we are going to free part of that, we need to make that
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index e0c1e98ad1bf..8a7b725ce3c7 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -569,22 +569,6 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
569 free_page(addr); 569 free_page(addr);
570 totalram_pages++; 570 totalram_pages++;
571 } 571 }
572#ifdef CONFIG_DEBUG_RODATA
573 /*
574 * This will make the __init pages not present and
575 * not executable, so that any attempt to use a
576 * __init function from now on will fault immediately
577 * rather than supriously later when memory gets reused.
578 *
579 * We only do this for DEBUG_RODATA to not break up the
580 * 2Mb kernel mapping just for this debug feature.
581 */
582 if (begin >= __START_KERNEL_map) {
583 set_memory_rw(begin, (end - begin)/PAGE_SIZE);
584 set_memory_np(begin, (end - begin)/PAGE_SIZE);
585 set_memory_nx(begin, (end - begin)/PAGE_SIZE);
586 }
587#endif
588#endif 572#endif
589} 573}
590 574
diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c
index 554820265b95..06353d43f72e 100644
--- a/arch/x86/mm/pageattr-test.c
+++ b/arch/x86/mm/pageattr-test.c
@@ -15,8 +15,7 @@
15#include <asm/kdebug.h> 15#include <asm/kdebug.h>
16 16
17enum { 17enum {
18 NTEST = 400, 18 NTEST = 4000,
19 LOWEST_LEVEL = PG_LEVEL_4K,
20#ifdef CONFIG_X86_64 19#ifdef CONFIG_X86_64
21 LPS = (1 << PMD_SHIFT), 20 LPS = (1 << PMD_SHIFT),
22#elif defined(CONFIG_X86_PAE) 21#elif defined(CONFIG_X86_PAE)
@@ -59,10 +58,10 @@ static __init int print_split(struct split_state *s)
59 continue; 58 continue;
60 } 59 }
61 60
62 if (level == 2 && sizeof(long) == 8) { 61 if (level == PG_LEVEL_1G && sizeof(long) == 8) {
63 s->gpg++; 62 s->gpg++;
64 i += GPS/PAGE_SIZE; 63 i += GPS/PAGE_SIZE;
65 } else if (level != LOWEST_LEVEL) { 64 } else if (level == PG_LEVEL_2M) {
66 if (!(pte_val(*pte) & _PAGE_PSE)) { 65 if (!(pte_val(*pte) & _PAGE_PSE)) {
67 printk(KERN_ERR 66 printk(KERN_ERR
68 "%lx level %d but not PSE %Lx\n", 67 "%lx level %d but not PSE %Lx\n",
@@ -162,7 +161,7 @@ static __init int exercise_pageattr(void)
162 continue; 161 continue;
163 } 162 }
164 163
165 err = __change_page_attr_clear(addr[i], len[i], 164 err = change_page_attr_clear(addr[i], len[i],
166 __pgprot(_PAGE_GLOBAL)); 165 __pgprot(_PAGE_GLOBAL));
167 if (err < 0) { 166 if (err < 0) {
168 printk(KERN_ERR "CPA %d failed %d\n", i, err); 167 printk(KERN_ERR "CPA %d failed %d\n", i, err);
@@ -175,7 +174,7 @@ static __init int exercise_pageattr(void)
175 pte ? (u64)pte_val(*pte) : 0ULL); 174 pte ? (u64)pte_val(*pte) : 0ULL);
176 failed++; 175 failed++;
177 } 176 }
178 if (level != LOWEST_LEVEL) { 177 if (level != PG_LEVEL_4K) {
179 printk(KERN_ERR "CPA %lx: unexpected level %d\n", 178 printk(KERN_ERR "CPA %lx: unexpected level %d\n",
180 addr[i], level); 179 addr[i], level);
181 failed++; 180 failed++;
@@ -183,7 +182,6 @@ static __init int exercise_pageattr(void)
183 182
184 } 183 }
185 vfree(bm); 184 vfree(bm);
186 cpa_flush_all();
187 185
188 failed += print_split(&sb); 186 failed += print_split(&sb);
189 187
@@ -197,7 +195,7 @@ static __init int exercise_pageattr(void)
197 failed++; 195 failed++;
198 continue; 196 continue;
199 } 197 }
200 err = __change_page_attr_set(addr[i], len[i], 198 err = change_page_attr_set(addr[i], len[i],
201 __pgprot(_PAGE_GLOBAL)); 199 __pgprot(_PAGE_GLOBAL));
202 if (err < 0) { 200 if (err < 0) {
203 printk(KERN_ERR "CPA reverting failed: %d\n", err); 201 printk(KERN_ERR "CPA reverting failed: %d\n", err);
@@ -211,7 +209,6 @@ static __init int exercise_pageattr(void)
211 } 209 }
212 210
213 } 211 }
214 cpa_flush_all();
215 212
216 failed += print_split(&sc); 213 failed += print_split(&sc);
217 214
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 97ec9e7d29d9..532a40bc0e7e 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -197,10 +197,11 @@ static int split_large_page(pte_t *kpte, unsigned long address)
197 unsigned long addr; 197 unsigned long addr;
198 pte_t *pbase, *tmp; 198 pte_t *pbase, *tmp;
199 struct page *base; 199 struct page *base;
200 int i, level; 200 unsigned int i, level;
201 201
202#ifdef CONFIG_DEBUG_PAGEALLOC 202#ifdef CONFIG_DEBUG_PAGEALLOC
203 gfp_flags = GFP_ATOMIC; 203 gfp_flags = __GFP_HIGH | __GFP_NOFAIL | __GFP_NOWARN;
204 gfp_flags = GFP_ATOMIC | __GFP_NOWARN;
204#endif 205#endif
205 base = alloc_pages(gfp_flags, 0); 206 base = alloc_pages(gfp_flags, 0);
206 if (!base) 207 if (!base)
@@ -224,6 +225,7 @@ static int split_large_page(pte_t *kpte, unsigned long address)
224 paravirt_alloc_pt(&init_mm, page_to_pfn(base)); 225 paravirt_alloc_pt(&init_mm, page_to_pfn(base));
225#endif 226#endif
226 227
228 pgprot_val(ref_prot) &= ~_PAGE_NX;
227 for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) 229 for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE)
228 set_pte(&pbase[i], pfn_pte(addr >> PAGE_SHIFT, ref_prot)); 230 set_pte(&pbase[i], pfn_pte(addr >> PAGE_SHIFT, ref_prot));
229 231
@@ -248,7 +250,8 @@ out_unlock:
248} 250}
249 251
250static int 252static int
251__change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot) 253__change_page_attr(unsigned long address, unsigned long pfn,
254 pgprot_t mask_set, pgprot_t mask_clr)
252{ 255{
253 struct page *kpte_page; 256 struct page *kpte_page;
254 int level, err = 0; 257 int level, err = 0;
@@ -267,15 +270,20 @@ repeat:
267 BUG_ON(PageLRU(kpte_page)); 270 BUG_ON(PageLRU(kpte_page));
268 BUG_ON(PageCompound(kpte_page)); 271 BUG_ON(PageCompound(kpte_page));
269 272
270 prot = static_protections(prot, address);
271
272 if (level == PG_LEVEL_4K) { 273 if (level == PG_LEVEL_4K) {
273 WARN_ON_ONCE(pgprot_val(prot) & _PAGE_PSE); 274 pgprot_t new_prot = pte_pgprot(*kpte);
274 set_pte_atomic(kpte, pfn_pte(pfn, canon_pgprot(prot))); 275 pte_t new_pte, old_pte = *kpte;
275 } else { 276
276 /* Clear the PSE bit for the 4k level pages ! */ 277 pgprot_val(new_prot) &= ~pgprot_val(mask_clr);
277 pgprot_val(prot) = pgprot_val(prot) & ~_PAGE_PSE; 278 pgprot_val(new_prot) |= pgprot_val(mask_set);
279
280 new_prot = static_protections(new_prot, address);
281
282 new_pte = pfn_pte(pfn, canon_pgprot(new_prot));
283 BUG_ON(pte_pfn(new_pte) != pte_pfn(old_pte));
278 284
285 set_pte_atomic(kpte, new_pte);
286 } else {
279 err = split_large_page(kpte, address); 287 err = split_large_page(kpte, address);
280 if (!err) 288 if (!err)
281 goto repeat; 289 goto repeat;
@@ -297,22 +305,26 @@ repeat:
297 * Modules and drivers should use the set_memory_* APIs instead. 305 * Modules and drivers should use the set_memory_* APIs instead.
298 */ 306 */
299 307
300static int change_page_attr_addr(unsigned long address, pgprot_t prot) 308static int
309change_page_attr_addr(unsigned long address, pgprot_t mask_set,
310 pgprot_t mask_clr)
301{ 311{
302 int err = 0, kernel_map = 0; 312 int err = 0, kernel_map = 0;
303 unsigned long pfn = __pa(address) >> PAGE_SHIFT; 313 unsigned long pfn;
304 314
305#ifdef CONFIG_X86_64 315#ifdef CONFIG_X86_64
306 if (address >= __START_KERNEL_map && 316 if (address >= __START_KERNEL_map &&
307 address < __START_KERNEL_map + KERNEL_TEXT_SIZE) { 317 address < __START_KERNEL_map + KERNEL_TEXT_SIZE) {
308 318
309 address = (unsigned long)__va(__pa(address)); 319 address = (unsigned long)__va(__pa((void *)address));
310 kernel_map = 1; 320 kernel_map = 1;
311 } 321 }
312#endif 322#endif
313 323
314 if (!kernel_map || pte_present(pfn_pte(0, prot))) { 324 pfn = __pa(address) >> PAGE_SHIFT;
315 err = __change_page_attr(address, pfn, prot); 325
326 if (!kernel_map || 1) {
327 err = __change_page_attr(address, pfn, mask_set, mask_clr);
316 if (err) 328 if (err)
317 return err; 329 return err;
318 } 330 }
@@ -324,12 +336,15 @@ static int change_page_attr_addr(unsigned long address, pgprot_t prot)
324 */ 336 */
325 if (__pa(address) < KERNEL_TEXT_SIZE) { 337 if (__pa(address) < KERNEL_TEXT_SIZE) {
326 unsigned long addr2; 338 unsigned long addr2;
327 pgprot_t prot2;
328 339
329 addr2 = __START_KERNEL_map + __pa(address); 340 addr2 = __pa(address) + __START_KERNEL_map - phys_base;
330 /* Make sure the kernel mappings stay executable */ 341 /* Make sure the kernel mappings stay executable */
331 prot2 = pte_pgprot(pte_mkexec(pfn_pte(0, prot))); 342 pgprot_val(mask_clr) |= _PAGE_NX;
332 err = __change_page_attr(addr2, pfn, prot2); 343 /*
344 * Our high aliases are imprecise, so do not propagate
345 * failures back to users:
346 */
347 __change_page_attr(addr2, pfn, mask_set, mask_clr);
333 } 348 }
334#endif 349#endif
335 350
@@ -339,26 +354,13 @@ static int change_page_attr_addr(unsigned long address, pgprot_t prot)
339static int __change_page_attr_set_clr(unsigned long addr, int numpages, 354static int __change_page_attr_set_clr(unsigned long addr, int numpages,
340 pgprot_t mask_set, pgprot_t mask_clr) 355 pgprot_t mask_set, pgprot_t mask_clr)
341{ 356{
342 pgprot_t new_prot; 357 unsigned int i;
343 int level; 358 int ret;
344 pte_t *pte;
345 int i, ret;
346
347 for (i = 0; i < numpages ; i++) {
348
349 pte = lookup_address(addr, &level);
350 if (!pte)
351 return -EINVAL;
352
353 new_prot = pte_pgprot(*pte);
354
355 pgprot_val(new_prot) &= ~pgprot_val(mask_clr);
356 pgprot_val(new_prot) |= pgprot_val(mask_set);
357 359
358 ret = change_page_attr_addr(addr, new_prot); 360 for (i = 0; i < numpages ; i++, addr += PAGE_SIZE) {
361 ret = change_page_attr_addr(addr, mask_set, mask_clr);
359 if (ret) 362 if (ret)
360 return ret; 363 return ret;
361 addr += PAGE_SIZE;
362 } 364 }
363 365
364 return 0; 366 return 0;
diff --git a/include/asm-x86/pgtable.h b/include/asm-x86/pgtable.h
index ee40a88882f6..269e7e29ea8e 100644
--- a/include/asm-x86/pgtable.h
+++ b/include/asm-x86/pgtable.h
@@ -240,6 +240,7 @@ enum {
240 PG_LEVEL_NONE, 240 PG_LEVEL_NONE,
241 PG_LEVEL_4K, 241 PG_LEVEL_4K,
242 PG_LEVEL_2M, 242 PG_LEVEL_2M,
243 PG_LEVEL_1G,
243}; 244};
244 245
245/* 246/*