aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorArjan van de Ven <arjan@linux.intel.com>2008-01-30 07:34:07 -0500
committerIngo Molnar <mingo@elte.hu>2008-01-30 07:34:07 -0500
commit488fd99588bf23da951b524a806e44feaa1aa366 (patch)
treea4ff5c8ddf932920fc2700e261f92c08b293d040 /arch
parent5398f9854f60d670e8ef1ea08c0e0310f253eeb1 (diff)
x86: fix pageattr-selftest
In Ingo's testing, he found a bug in the CPA selftest code. What would happen is that the test would call change_page_attr_addr on a range of memory, part of which was read only, part of which was writable. The only thing the test wanted to change was the global bit... What actually happened was that the selftest would take the permissions of the first page, and then the change_page_attr_addr call would then set the permissions of the entire range to this first page. In the rodata section case, this resulted in pages after the .rodata becoming read only... which made the kernel rather unhappy in many interesting ways. This is just another example of how dangerous the cpa API is (was); this patch changes the test to use the incremental clear/set APIs instead, and it changes the clear/set implementation to work on a 1 page at a time basis. Signed-off-by: Arjan van de Ven <arjan@linux.intel.com> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/mm/pageattr-test.c8
-rw-r--r--arch/x86/mm/pageattr.c96
2 files changed, 58 insertions, 46 deletions
diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c
index 6a41a0f0c149..fe73905d075e 100644
--- a/arch/x86/mm/pageattr-test.c
+++ b/arch/x86/mm/pageattr-test.c
@@ -162,8 +162,8 @@ static __init int exercise_pageattr(void)
162 continue; 162 continue;
163 } 163 }
164 164
165 err = change_page_attr_addr(addr[i], len[i], 165 err = change_page_attr_clear(addr[i], len[i],
166 pte_pgprot(pte_clrhuge(pte_clrglobal(pte0)))); 166 __pgprot(_PAGE_GLOBAL));
167 if (err < 0) { 167 if (err < 0) {
168 printk(KERN_ERR "CPA %d failed %d\n", i, err); 168 printk(KERN_ERR "CPA %d failed %d\n", i, err);
169 failed++; 169 failed++;
@@ -197,8 +197,8 @@ static __init int exercise_pageattr(void)
197 failed++; 197 failed++;
198 continue; 198 continue;
199 } 199 }
200 err = change_page_attr_addr(addr[i], len[i], 200 err = change_page_attr_set(addr[i], len[i],
201 pte_pgprot(pte_mkglobal(*pte))); 201 __pgprot(_PAGE_GLOBAL));
202 if (err < 0) { 202 if (err < 0) {
203 printk(KERN_ERR "CPA reverting failed: %d\n", err); 203 printk(KERN_ERR "CPA reverting failed: %d\n", err);
204 failed++; 204 failed++;
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index a2d747c06147..23f0aa3d01c1 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -228,7 +228,6 @@ repeat:
228/** 228/**
229 * change_page_attr_addr - Change page table attributes in linear mapping 229 * change_page_attr_addr - Change page table attributes in linear mapping
230 * @address: Virtual address in linear mapping. 230 * @address: Virtual address in linear mapping.
231 * @numpages: Number of pages to change
232 * @prot: New page table attribute (PAGE_*) 231 * @prot: New page table attribute (PAGE_*)
233 * 232 *
234 * Change page attributes of a page in the direct mapping. This is a variant 233 * Change page attributes of a page in the direct mapping. This is a variant
@@ -240,10 +239,10 @@ repeat:
240 * Modules and drivers should use the set_memory_* APIs instead. 239 * Modules and drivers should use the set_memory_* APIs instead.
241 */ 240 */
242 241
243static int change_page_attr_addr(unsigned long address, int numpages, 242static int change_page_attr_addr(unsigned long address, pgprot_t prot)
244 pgprot_t prot)
245{ 243{
246 int err = 0, kernel_map = 0, i; 244 int err = 0, kernel_map = 0;
245 unsigned long pfn = __pa(address) >> PAGE_SHIFT;
247 246
248#ifdef CONFIG_X86_64 247#ifdef CONFIG_X86_64
249 if (address >= __START_KERNEL_map && 248 if (address >= __START_KERNEL_map &&
@@ -254,30 +253,27 @@ static int change_page_attr_addr(unsigned long address, int numpages,
254 } 253 }
255#endif 254#endif
256 255
257 for (i = 0; i < numpages; i++, address += PAGE_SIZE) { 256 if (!kernel_map || pte_present(pfn_pte(0, prot))) {
258 unsigned long pfn = __pa(address) >> PAGE_SHIFT; 257 err = __change_page_attr(address, pfn, prot);
258 if (err)
259 return err;
260 }
259 261
260 if (!kernel_map || pte_present(pfn_pte(0, prot))) {
261 err = __change_page_attr(address, pfn, prot);
262 if (err)
263 break;
264 }
265#ifdef CONFIG_X86_64 262#ifdef CONFIG_X86_64
266 /* 263 /*
267 * Handle kernel mapping too which aliases part of 264 * Handle kernel mapping too which aliases part of
268 * lowmem: 265 * lowmem:
269 */ 266 */
270 if (__pa(address) < KERNEL_TEXT_SIZE) { 267 if (__pa(address) < KERNEL_TEXT_SIZE) {
271 unsigned long addr2; 268 unsigned long addr2;
272 pgprot_t prot2; 269 pgprot_t prot2;
273 270
274 addr2 = __START_KERNEL_map + __pa(address); 271 addr2 = __START_KERNEL_map + __pa(address);
275 /* Make sure the kernel mappings stay executable */ 272 /* Make sure the kernel mappings stay executable */
276 prot2 = pte_pgprot(pte_mkexec(pfn_pte(0, prot))); 273 prot2 = pte_pgprot(pte_mkexec(pfn_pte(0, prot)));
277 err = __change_page_attr(addr2, pfn, prot2); 274 err = __change_page_attr(addr2, pfn, prot2);
278 }
279#endif
280 } 275 }
276#endif
281 277
282 return err; 278 return err;
283} 279}
@@ -307,16 +303,24 @@ static int change_page_attr_set(unsigned long addr, int numpages,
307 pgprot_t current_prot; 303 pgprot_t current_prot;
308 int level; 304 int level;
309 pte_t *pte; 305 pte_t *pte;
306 int i, ret;
310 307
311 pte = lookup_address(addr, &level); 308 for (i = 0; i < numpages ; i++) {
312 if (pte)
313 current_prot = pte_pgprot(*pte);
314 else
315 pgprot_val(current_prot) = 0;
316 309
317 pgprot_val(prot) = pgprot_val(current_prot) | pgprot_val(prot); 310 pte = lookup_address(addr, &level);
311 if (pte)
312 current_prot = pte_pgprot(*pte);
313 else
314 pgprot_val(current_prot) = 0;
318 315
319 return change_page_attr_addr(addr, numpages, prot); 316 pgprot_val(prot) = pgprot_val(current_prot) | pgprot_val(prot);
317
318 ret = change_page_attr_addr(addr, prot);
319 if (ret)
320 return ret;
321 addr += PAGE_SIZE;
322 }
323 return 0;
320} 324}
321 325
322/** 326/**
@@ -344,16 +348,24 @@ static int change_page_attr_clear(unsigned long addr, int numpages,
344 pgprot_t current_prot; 348 pgprot_t current_prot;
345 int level; 349 int level;
346 pte_t *pte; 350 pte_t *pte;
347 351 int i, ret;
348 pte = lookup_address(addr, &level); 352
349 if (pte) 353 for (i = 0; i < numpages; i++) {
350 current_prot = pte_pgprot(*pte); 354 pte = lookup_address(addr, &level);
351 else 355 if (pte)
352 pgprot_val(current_prot) = 0; 356 current_prot = pte_pgprot(*pte);
353 357 else
354 pgprot_val(prot) = pgprot_val(current_prot) & ~pgprot_val(prot); 358 pgprot_val(current_prot) = 0;
355 359
356 return change_page_attr_addr(addr, numpages, prot); 360 pgprot_val(prot) =
361 pgprot_val(current_prot) & ~pgprot_val(prot);
362
363 ret = change_page_attr_addr(addr, prot);
364 if (ret)
365 return ret;
366 addr += PAGE_SIZE;
367 }
368 return 0;
357} 369}
358 370
359int set_memory_uc(unsigned long addr, int numpages) 371int set_memory_uc(unsigned long addr, int numpages)