aboutsummaryrefslogtreecommitdiffstats
path: root/mm/memory.c
diff options
context:
space:
mode:
authorHugh Dickins <hughd@google.com>2014-08-29 18:18:44 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-08-29 19:28:16 -0400
commitb38af4721f59d0b564468f623b3e52a638195015 (patch)
tree1197f01ff4bec576a027157f42cffa6b238153f5 /mm/memory.c
parent7ea8574e5fa31f43d8098a028f12ba6a9c9f3530 (diff)
x86,mm: fix pte_special versus pte_numa
Sasha Levin has shown oopses on ffffea0003480048 and ffffea0003480008 at mm/memory.c:1132, running Trinity on different 3.16-rc-next kernels: where zap_pte_range() checks page->mapping to see if PageAnon(page). Those addresses fit struct pages for pfns d2001 and d2000, and in each dump a register or a stack slot showed d2001730 or d2000730: pte flags 0x730 are PCD ACCESSED PROTNONE SPECIAL IOMAP; and Sasha's e820 map has a hole between cfffffff and 100000000, which would need special access. Commit c46a7c817e66 ("x86: define _PAGE_NUMA by reusing software bits on the PMD and PTE levels") has broken vm_normal_page(): a PROTNONE SPECIAL pte no longer passes the pte_special() test, so zap_pte_range() goes on to try to access a non-existent struct page. Fix this by refining pte_special() (SPECIAL with PRESENT or PROTNONE) to complement pte_numa() (SPECIAL with neither PRESENT nor PROTNONE). A hint that this was a problem was that c46a7c817e66 added pte_numa() test to vm_normal_page(), and moved its is_zero_pfn() test from slow to fast path: This was papering over a pte_special() snag when the zero page was encountered during zap. This patch reverts vm_normal_page() to how it was before, relying on pte_special(). It still appears that this patch may be incomplete: aren't there other places which need to be handling PROTNONE along with PRESENT? For example, pte_mknuma() clears _PAGE_PRESENT and sets _PAGE_NUMA, but on a PROT_NONE area, that would make it pte_special(). This is side-stepped by the fact that NUMA hinting faults skipped PROT_NONE VMAs and there are no grounds where a NUMA hinting fault on a PROT_NONE VMA would be interesting. Fixes: c46a7c817e66 ("x86: define _PAGE_NUMA by reusing software bits on the PMD and PTE levels") Reported-by: Sasha Levin <sasha.levin@oracle.com> Tested-by: Sasha Levin <sasha.levin@oracle.com> Signed-off-by: Hugh Dickins <hughd@google.com> Signed-off-by: Mel Gorman <mgorman@suse.de> Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Rik van Riel <riel@redhat.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Cyrill Gorcunov <gorcunov@gmail.com> Cc: Matthew Wilcox <matthew.r.wilcox@intel.com> Cc: <stable@vger.kernel.org> [3.16] Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/memory.c')
-rw-r--r--mm/memory.c7
1 files changed, 3 insertions, 4 deletions
diff --git a/mm/memory.c b/mm/memory.c
index ab3537bcfed2..adeac306610f 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -751,7 +751,7 @@ struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
751 unsigned long pfn = pte_pfn(pte); 751 unsigned long pfn = pte_pfn(pte);
752 752
753 if (HAVE_PTE_SPECIAL) { 753 if (HAVE_PTE_SPECIAL) {
754 if (likely(!pte_special(pte) || pte_numa(pte))) 754 if (likely(!pte_special(pte)))
755 goto check_pfn; 755 goto check_pfn;
756 if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP)) 756 if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP))
757 return NULL; 757 return NULL;
@@ -777,15 +777,14 @@ struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
777 } 777 }
778 } 778 }
779 779
780 if (is_zero_pfn(pfn))
781 return NULL;
780check_pfn: 782check_pfn:
781 if (unlikely(pfn > highest_memmap_pfn)) { 783 if (unlikely(pfn > highest_memmap_pfn)) {
782 print_bad_pte(vma, addr, pte, NULL); 784 print_bad_pte(vma, addr, pte, NULL);
783 return NULL; 785 return NULL;
784 } 786 }
785 787
786 if (is_zero_pfn(pfn))
787 return NULL;
788
789 /* 788 /*
790 * NOTE! We still have PageReserved() pages in the page tables. 789 * NOTE! We still have PageReserved() pages in the page tables.
791 * eg. VDSO mappings can cause them to exist. 790 * eg. VDSO mappings can cause them to exist.