diff options
author | Suresh Siddha <suresh.b.siddha@intel.com> | 2009-01-20 17:20:21 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-01-21 06:24:54 -0500 |
commit | a1e46212a410793d575718818e81ddc442a65283 (patch) | |
tree | 0c253c114d069e3aaed34d0ac1254fd6e9c59537 | |
parent | 552b8aa4d1edcc1c764ff6f61a7686347a2d1827 (diff) |
x86: fix page attribute corruption with cpa()
Impact: fix sporadic slowdowns and warning messages
This patch fixes a performance issue reported by Linus on his
Nehalem system. While Linus reverted the PAT patch (commit
58dab916dfb57328d50deb0aa9b3fc92efa248ff) which exposed the issue,
existing cpa() code can potentially still cause wrong(page attribute
corruption) behavior.
This patch also fixes the "WARNING: at arch/x86/mm/pageattr.c:560" that
various people reported.
In 64bit kernel, kernel identity mapping might have holes depending
on the available memory and how e820 reports the address range
covering the RAM, ACPI, PCI reserved regions. If there is a 2MB/1GB hole
in the address range that is not listed by e820 entries, kernel identity
mapping will have a corresponding hole in its 1-1 identity mapping.
If cpa() happens on the kernel identity mapping which falls into these holes,
existing code fails like this:
__change_page_attr_set_clr()
__change_page_attr()
returns 0 because of if (!kpte). But doesn't
set cpa->numpages and cpa->pfn.
cpa_process_alias()
uses uninitialized cpa->pfn (random value)
which can potentially lead to changing the page
attribute of kernel text/data, kernel identity
mapping of RAM pages etc. oops!
This bug was easily exposed by another PAT patch which was doing
cpa() more often on kernel identity mapping holes (physical range between
max_low_pfn_mapped and 4GB), where in here it was setting the
cache disable attribute(PCD) for kernel identity mappings aswell.
Fix cpa() to handle the kernel identity mapping holes. Retain
the WARN() for cpa() calls to other not present address ranges
(kernel-text/data, ioremap() addresses)
Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com>
Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Cc: <stable@kernel.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r-- | arch/x86/mm/pageattr.c | 49 |
1 files changed, 34 insertions, 15 deletions
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index e89d24815f26..84ba74820ad6 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c | |||
@@ -534,6 +534,36 @@ out_unlock: | |||
534 | return 0; | 534 | return 0; |
535 | } | 535 | } |
536 | 536 | ||
537 | static int __cpa_process_fault(struct cpa_data *cpa, unsigned long vaddr, | ||
538 | int primary) | ||
539 | { | ||
540 | /* | ||
541 | * Ignore all non primary paths. | ||
542 | */ | ||
543 | if (!primary) | ||
544 | return 0; | ||
545 | |||
546 | /* | ||
547 | * Ignore the NULL PTE for kernel identity mapping, as it is expected | ||
548 | * to have holes. | ||
549 | * Also set numpages to '1' indicating that we processed cpa req for | ||
550 | * one virtual address page and its pfn. TBD: numpages can be set based | ||
551 | * on the initial value and the level returned by lookup_address(). | ||
552 | */ | ||
553 | if (within(vaddr, PAGE_OFFSET, | ||
554 | PAGE_OFFSET + (max_pfn_mapped << PAGE_SHIFT))) { | ||
555 | cpa->numpages = 1; | ||
556 | cpa->pfn = __pa(vaddr) >> PAGE_SHIFT; | ||
557 | return 0; | ||
558 | } else { | ||
559 | WARN(1, KERN_WARNING "CPA: called for zero pte. " | ||
560 | "vaddr = %lx cpa->vaddr = %lx\n", vaddr, | ||
561 | *cpa->vaddr); | ||
562 | |||
563 | return -EFAULT; | ||
564 | } | ||
565 | } | ||
566 | |||
537 | static int __change_page_attr(struct cpa_data *cpa, int primary) | 567 | static int __change_page_attr(struct cpa_data *cpa, int primary) |
538 | { | 568 | { |
539 | unsigned long address; | 569 | unsigned long address; |
@@ -549,17 +579,11 @@ static int __change_page_attr(struct cpa_data *cpa, int primary) | |||
549 | repeat: | 579 | repeat: |
550 | kpte = lookup_address(address, &level); | 580 | kpte = lookup_address(address, &level); |
551 | if (!kpte) | 581 | if (!kpte) |
552 | return 0; | 582 | return __cpa_process_fault(cpa, address, primary); |
553 | 583 | ||
554 | old_pte = *kpte; | 584 | old_pte = *kpte; |
555 | if (!pte_val(old_pte)) { | 585 | if (!pte_val(old_pte)) |
556 | if (!primary) | 586 | return __cpa_process_fault(cpa, address, primary); |
557 | return 0; | ||
558 | WARN(1, KERN_WARNING "CPA: called for zero pte. " | ||
559 | "vaddr = %lx cpa->vaddr = %lx\n", address, | ||
560 | *cpa->vaddr); | ||
561 | return -EINVAL; | ||
562 | } | ||
563 | 587 | ||
564 | if (level == PG_LEVEL_4K) { | 588 | if (level == PG_LEVEL_4K) { |
565 | pte_t new_pte; | 589 | pte_t new_pte; |
@@ -657,12 +681,7 @@ static int cpa_process_alias(struct cpa_data *cpa) | |||
657 | vaddr = *cpa->vaddr; | 681 | vaddr = *cpa->vaddr; |
658 | 682 | ||
659 | if (!(within(vaddr, PAGE_OFFSET, | 683 | if (!(within(vaddr, PAGE_OFFSET, |
660 | PAGE_OFFSET + (max_low_pfn_mapped << PAGE_SHIFT)) | 684 | PAGE_OFFSET + (max_pfn_mapped << PAGE_SHIFT)))) { |
661 | #ifdef CONFIG_X86_64 | ||
662 | || within(vaddr, PAGE_OFFSET + (1UL<<32), | ||
663 | PAGE_OFFSET + (max_pfn_mapped << PAGE_SHIFT)) | ||
664 | #endif | ||
665 | )) { | ||
666 | 685 | ||
667 | alias_cpa = *cpa; | 686 | alias_cpa = *cpa; |
668 | temp_cpa_vaddr = (unsigned long) __va(cpa->pfn << PAGE_SHIFT); | 687 | temp_cpa_vaddr = (unsigned long) __va(cpa->pfn << PAGE_SHIFT); |