diff options
author | Gerald Schaefer <gerald.schaefer@de.ibm.com> | 2016-04-28 19:18:35 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-04-28 22:34:04 -0400 |
commit | 28093f9f34cedeaea0f481c58446d9dac6dd620f (patch) | |
tree | da930385a854ae9cb95239b84fa9bac9b677c812 /mm/memory.c | |
parent | 3486b85a29c1741db99d0c522211c82d2b7a56d0 (diff) |
numa: fix /proc/<pid>/numa_maps for THP
In gather_pte_stats() a THP pmd is cast into a pte, which is wrong
because the layouts may differ depending on the architecture. On s390
this will lead to inaccurate numa_maps accounting in /proc because of
misguided pte_present() and pte_dirty() checks on the fake pte.
On other architectures pte_present() and pte_dirty() may work by chance,
but there may be an issue with direct-access (dax) mappings w/o
underlying struct pages when HAVE_PTE_SPECIAL is set and THP is
available. In vm_normal_page() the fake pte will be checked with
pte_special() and because there is no "special" bit in a pmd, this will
always return false and the VM_PFNMAP | VM_MIXEDMAP checking will be
skipped. On dax mappings w/o struct pages, an invalid struct page
pointer would then be returned that can crash the kernel.
This patch fixes the numa_maps THP handling by introducing new "_pmd"
variants of the can_gather_numa_stats() and vm_normal_page() functions.
Signed-off-by: Gerald Schaefer <gerald.schaefer@de.ibm.com>
Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Cc: "Kirill A . Shutemov" <kirill.shutemov@linux.intel.com>
Cc: Konstantin Khlebnikov <koct9i@gmail.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Jerome Marchand <jmarchan@redhat.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: Michael Holzheu <holzheu@linux.vnet.ibm.com>
Cc: <stable@vger.kernel.org> [4.3+]
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/memory.c')
-rw-r--r-- | mm/memory.c | 40 |
1 files changed, 40 insertions, 0 deletions
diff --git a/mm/memory.c b/mm/memory.c index 93897f23cc11..305537fc8640 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -789,6 +789,46 @@ out: | |||
789 | return pfn_to_page(pfn); | 789 | return pfn_to_page(pfn); |
790 | } | 790 | } |
791 | 791 | ||
792 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | ||
793 | struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr, | ||
794 | pmd_t pmd) | ||
795 | { | ||
796 | unsigned long pfn = pmd_pfn(pmd); | ||
797 | |||
798 | /* | ||
799 | * There is no pmd_special() but there may be special pmds, e.g. | ||
800 | * in a direct-access (dax) mapping, so let's just replicate the | ||
801 | * !HAVE_PTE_SPECIAL case from vm_normal_page() here. | ||
802 | */ | ||
803 | if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) { | ||
804 | if (vma->vm_flags & VM_MIXEDMAP) { | ||
805 | if (!pfn_valid(pfn)) | ||
806 | return NULL; | ||
807 | goto out; | ||
808 | } else { | ||
809 | unsigned long off; | ||
810 | off = (addr - vma->vm_start) >> PAGE_SHIFT; | ||
811 | if (pfn == vma->vm_pgoff + off) | ||
812 | return NULL; | ||
813 | if (!is_cow_mapping(vma->vm_flags)) | ||
814 | return NULL; | ||
815 | } | ||
816 | } | ||
817 | |||
818 | if (is_zero_pfn(pfn)) | ||
819 | return NULL; | ||
820 | if (unlikely(pfn > highest_memmap_pfn)) | ||
821 | return NULL; | ||
822 | |||
823 | /* | ||
824 | * NOTE! We still have PageReserved() pages in the page tables. | ||
825 | * eg. VDSO mappings can cause them to exist. | ||
826 | */ | ||
827 | out: | ||
828 | return pfn_to_page(pfn); | ||
829 | } | ||
830 | #endif | ||
831 | |||
792 | /* | 832 | /* |
793 | * copy one vm_area from one task to the other. Assumes the page tables | 833 | * copy one vm_area from one task to the other. Assumes the page tables |
794 | * already present in the new task to be cleared in the whole range | 834 | * already present in the new task to be cleared in the whole range |