diff options
author | Hugh Dickins <hughd@google.com> | 2014-08-29 18:18:44 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-08-29 19:28:16 -0400 |
commit | b38af4721f59d0b564468f623b3e52a638195015 (patch) | |
tree | 1197f01ff4bec576a027157f42cffa6b238153f5 | |
parent | 7ea8574e5fa31f43d8098a028f12ba6a9c9f3530 (diff) |
x86,mm: fix pte_special versus pte_numa
Sasha Levin has shown oopses on ffffea0003480048 and ffffea0003480008 at
mm/memory.c:1132, running Trinity on different 3.16-rc-next kernels:
where zap_pte_range() checks page->mapping to see if PageAnon(page).
Those addresses fit struct pages for pfns d2001 and d2000, and in each
dump a register or a stack slot showed d2001730 or d2000730: pte flags
0x730 are PCD ACCESSED PROTNONE SPECIAL IOMAP; and Sasha's e820 map has
a hole between cfffffff and 100000000, which would need special access.
Commit c46a7c817e66 ("x86: define _PAGE_NUMA by reusing software bits on
the PMD and PTE levels") has broken vm_normal_page(): a PROTNONE SPECIAL
pte no longer passes the pte_special() test, so zap_pte_range() goes on
to try to access a non-existent struct page.
Fix this by refining pte_special() (SPECIAL with PRESENT or PROTNONE) to
complement pte_numa() (SPECIAL with neither PRESENT nor PROTNONE). A
hint that this was a problem was that c46a7c817e66 added pte_numa() test
to vm_normal_page(), and moved its is_zero_pfn() test from slow to fast
path: This was papering over a pte_special() snag when the zero page was
encountered during zap. This patch reverts vm_normal_page() to how it
was before, relying on pte_special().
It still appears that this patch may be incomplete: aren't there other
places which need to be handling PROTNONE along with PRESENT? For
example, pte_mknuma() clears _PAGE_PRESENT and sets _PAGE_NUMA, but on a
PROT_NONE area, that would make it pte_special(). This is side-stepped
by the fact that NUMA hinting faults skipped PROT_NONE VMAs and there
are no grounds where a NUMA hinting fault on a PROT_NONE VMA would be
interesting.
Fixes: c46a7c817e66 ("x86: define _PAGE_NUMA by reusing software bits on the PMD and PTE levels")
Reported-by: Sasha Levin <sasha.levin@oracle.com>
Tested-by: Sasha Levin <sasha.levin@oracle.com>
Signed-off-by: Hugh Dickins <hughd@google.com>
Signed-off-by: Mel Gorman <mgorman@suse.de>
Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rik van Riel <riel@redhat.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Cyrill Gorcunov <gorcunov@gmail.com>
Cc: Matthew Wilcox <matthew.r.wilcox@intel.com>
Cc: <stable@vger.kernel.org> [3.16]
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | arch/x86/include/asm/pgtable.h | 9 | ||||
-rw-r--r-- | mm/memory.c | 7 |
2 files changed, 10 insertions, 6 deletions
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index 0ec056012618..aa97a070f09f 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h | |||
@@ -131,8 +131,13 @@ static inline int pte_exec(pte_t pte) | |||
131 | 131 | ||
132 | static inline int pte_special(pte_t pte) | 132 | static inline int pte_special(pte_t pte) |
133 | { | 133 | { |
134 | return (pte_flags(pte) & (_PAGE_PRESENT|_PAGE_SPECIAL)) == | 134 | /* |
135 | (_PAGE_PRESENT|_PAGE_SPECIAL); | 135 | * See CONFIG_NUMA_BALANCING pte_numa in include/asm-generic/pgtable.h. |
136 | * On x86 we have _PAGE_BIT_NUMA == _PAGE_BIT_GLOBAL+1 == | ||
137 | * __PAGE_BIT_SOFTW1 == _PAGE_BIT_SPECIAL. | ||
138 | */ | ||
139 | return (pte_flags(pte) & _PAGE_SPECIAL) && | ||
140 | (pte_flags(pte) & (_PAGE_PRESENT|_PAGE_PROTNONE)); | ||
136 | } | 141 | } |
137 | 142 | ||
138 | static inline unsigned long pte_pfn(pte_t pte) | 143 | static inline unsigned long pte_pfn(pte_t pte) |
diff --git a/mm/memory.c b/mm/memory.c index ab3537bcfed2..adeac306610f 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -751,7 +751,7 @@ struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, | |||
751 | unsigned long pfn = pte_pfn(pte); | 751 | unsigned long pfn = pte_pfn(pte); |
752 | 752 | ||
753 | if (HAVE_PTE_SPECIAL) { | 753 | if (HAVE_PTE_SPECIAL) { |
754 | if (likely(!pte_special(pte) || pte_numa(pte))) | 754 | if (likely(!pte_special(pte))) |
755 | goto check_pfn; | 755 | goto check_pfn; |
756 | if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP)) | 756 | if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP)) |
757 | return NULL; | 757 | return NULL; |
@@ -777,15 +777,14 @@ struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, | |||
777 | } | 777 | } |
778 | } | 778 | } |
779 | 779 | ||
780 | if (is_zero_pfn(pfn)) | ||
781 | return NULL; | ||
780 | check_pfn: | 782 | check_pfn: |
781 | if (unlikely(pfn > highest_memmap_pfn)) { | 783 | if (unlikely(pfn > highest_memmap_pfn)) { |
782 | print_bad_pte(vma, addr, pte, NULL); | 784 | print_bad_pte(vma, addr, pte, NULL); |
783 | return NULL; | 785 | return NULL; |
784 | } | 786 | } |
785 | 787 | ||
786 | if (is_zero_pfn(pfn)) | ||
787 | return NULL; | ||
788 | |||
789 | /* | 788 | /* |
790 | * NOTE! We still have PageReserved() pages in the page tables. | 789 | * NOTE! We still have PageReserved() pages in the page tables. |
791 | * eg. VDSO mappings can cause them to exist. | 790 | * eg. VDSO mappings can cause them to exist. |