aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authormatthieu castet <castet.matthieu@free.fr>2010-11-16 16:30:27 -0500
committerIngo Molnar <mingo@elte.hu>2010-11-18 06:52:04 -0500
commit64edc8ed5ffae999d8d413ba006850e9e34166cb (patch)
tree8bbb5c5426e536ac199ab1aba37900ed73c4a822
parente53beacd23d9cb47590da6a7a7f6d417b941a994 (diff)
x86: Fix improper large page preservation
This patch fixes a bug in try_preserve_large_page() which may result in improper large page preservation and improper application of page attributes to the memory area outside of the original change request. More specifically, the problem manifests itself when set_memory_*() is called for several pages at the beginning of the large page and try_preserve_large_page() erroneously concludes that the change can be applied to whole large page. The fix consists of 3 parts: 1. Addition of "required" protection attributes in static_protections(), so .data and .bss can be guaranteed to stay "RW" 2. static_protections() is now called for every small page within large page to determine compatibility of new protection attributes (instead of just small pages within the requested range). 3. Large page can be preserved only if attribute change is large-page-aligned and covers whole large page. -v1: Try_preserve_large_page() patch for Linux 2.6.34-rc2 -v2: Replaced pfn check with address check for kernel rw-data Signed-off-by: Siarhei Liakh <sliakh.lkml@gmail.com> Signed-off-by: Xuxian Jiang <jiang@cs.ncsu.edu> Reviewed-by: Suresh Siddha <suresh.b.siddha@intel.com> Cc: Arjan van de Ven <arjan@infradead.org> Cc: James Morris <jmorris@namei.org> Cc: Andi Kleen <ak@muc.de> Cc: Rusty Russell <rusty@rustcorp.com.au> Cc: Stephen Rothwell <sfr@canb.auug.org.au> Cc: Dave Jones <davej@redhat.com> Cc: Kees Cook <kees.cook@canonical.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> LKML-Reference: <4CE2F7F3.8030809@free.fr> Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--arch/x86/mm/pageattr.c28
1 files changed, 18 insertions, 10 deletions
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 532e7933d606..6f2a6b6deb6b 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -255,6 +255,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
255 unsigned long pfn) 255 unsigned long pfn)
256{ 256{
257 pgprot_t forbidden = __pgprot(0); 257 pgprot_t forbidden = __pgprot(0);
258 pgprot_t required = __pgprot(0);
258 259
259 /* 260 /*
260 * The BIOS area between 640k and 1Mb needs to be executable for 261 * The BIOS area between 640k and 1Mb needs to be executable for
@@ -278,6 +279,12 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
278 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT, 279 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
279 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT)) 280 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
280 pgprot_val(forbidden) |= _PAGE_RW; 281 pgprot_val(forbidden) |= _PAGE_RW;
282 /*
283 * .data and .bss should always be writable.
284 */
285 if (within(address, (unsigned long)_sdata, (unsigned long)_edata) ||
286 within(address, (unsigned long)__bss_start, (unsigned long)__bss_stop))
287 pgprot_val(required) |= _PAGE_RW;
281 288
282#if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA) 289#if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
283 /* 290 /*
@@ -317,6 +324,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
317#endif 324#endif
318 325
319 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden)); 326 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
327 prot = __pgprot(pgprot_val(prot) | pgprot_val(required));
320 328
321 return prot; 329 return prot;
322} 330}
@@ -393,7 +401,7 @@ try_preserve_large_page(pte_t *kpte, unsigned long address,
393{ 401{
394 unsigned long nextpage_addr, numpages, pmask, psize, flags, addr, pfn; 402 unsigned long nextpage_addr, numpages, pmask, psize, flags, addr, pfn;
395 pte_t new_pte, old_pte, *tmp; 403 pte_t new_pte, old_pte, *tmp;
396 pgprot_t old_prot, new_prot; 404 pgprot_t old_prot, new_prot, req_prot;
397 int i, do_split = 1; 405 int i, do_split = 1;
398 unsigned int level; 406 unsigned int level;
399 407
@@ -438,10 +446,10 @@ try_preserve_large_page(pte_t *kpte, unsigned long address,
438 * We are safe now. Check whether the new pgprot is the same: 446 * We are safe now. Check whether the new pgprot is the same:
439 */ 447 */
440 old_pte = *kpte; 448 old_pte = *kpte;
441 old_prot = new_prot = pte_pgprot(old_pte); 449 old_prot = new_prot = req_prot = pte_pgprot(old_pte);
442 450
443 pgprot_val(new_prot) &= ~pgprot_val(cpa->mask_clr); 451 pgprot_val(req_prot) &= ~pgprot_val(cpa->mask_clr);
444 pgprot_val(new_prot) |= pgprot_val(cpa->mask_set); 452 pgprot_val(req_prot) |= pgprot_val(cpa->mask_set);
445 453
446 /* 454 /*
447 * old_pte points to the large page base address. So we need 455 * old_pte points to the large page base address. So we need
@@ -450,17 +458,17 @@ try_preserve_large_page(pte_t *kpte, unsigned long address,
450 pfn = pte_pfn(old_pte) + ((address & (psize - 1)) >> PAGE_SHIFT); 458 pfn = pte_pfn(old_pte) + ((address & (psize - 1)) >> PAGE_SHIFT);
451 cpa->pfn = pfn; 459 cpa->pfn = pfn;
452 460
453 new_prot = static_protections(new_prot, address, pfn); 461 new_prot = static_protections(req_prot, address, pfn);
454 462
455 /* 463 /*
456 * We need to check the full range, whether 464 * We need to check the full range, whether
457 * static_protection() requires a different pgprot for one of 465 * static_protection() requires a different pgprot for one of
458 * the pages in the range we try to preserve: 466 * the pages in the range we try to preserve:
459 */ 467 */
460 addr = address + PAGE_SIZE; 468 addr = address & pmask;
461 pfn++; 469 pfn = pte_pfn(old_pte);
462 for (i = 1; i < cpa->numpages; i++, addr += PAGE_SIZE, pfn++) { 470 for (i = 0; i < (psize >> PAGE_SHIFT); i++, addr += PAGE_SIZE, pfn++) {
463 pgprot_t chk_prot = static_protections(new_prot, addr, pfn); 471 pgprot_t chk_prot = static_protections(req_prot, addr, pfn);
464 472
465 if (pgprot_val(chk_prot) != pgprot_val(new_prot)) 473 if (pgprot_val(chk_prot) != pgprot_val(new_prot))
466 goto out_unlock; 474 goto out_unlock;
@@ -483,7 +491,7 @@ try_preserve_large_page(pte_t *kpte, unsigned long address,
483 * that we limited the number of possible pages already to 491 * that we limited the number of possible pages already to
484 * the number of pages in the large page. 492 * the number of pages in the large page.
485 */ 493 */
486 if (address == (nextpage_addr - psize) && cpa->numpages == numpages) { 494 if (address == (address & pmask) && cpa->numpages == (psize >> PAGE_SHIFT)) {
487 /* 495 /*
488 * The address is aligned and the number of pages 496 * The address is aligned and the number of pages
489 * covers the full page. 497 * covers the full page.