aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-generic/pgtable.h
diff options
context:
space:
mode:
authorMel Gorman <mgorman@suse.de>2014-10-09 18:26:33 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-10-09 22:25:52 -0400
commit6a33979d5bd7521497121c5ae4435d7003115a0f (patch)
treef010f2441b1942f0153b999fa987196e68a30272 /include/asm-generic/pgtable.h
parented2f240094f900833ac06f533ab8bbcf0a1e8199 (diff)
mm: remove misleading ARCH_USES_NUMA_PROT_NONE
ARCH_USES_NUMA_PROT_NONE was defined for architectures that implemented _PAGE_NUMA using _PROT_NONE. This saved using an additional PTE bit and relied on the fact that PROT_NONE vmas were skipped by the NUMA hinting fault scanner. This was found to be conceptually confusing with a lot of implicit assumptions and it was asked that an alternative be found. Commit c46a7c81 "x86: define _PAGE_NUMA by reusing software bits on the PMD and PTE levels" redefined _PAGE_NUMA on x86 to be one of the swap PTE bits and shrunk the maximum possible swap size but it did not go far enough. There are no architectures that reuse _PROT_NONE as _PROT_NUMA but the relics still exist. This patch removes ARCH_USES_NUMA_PROT_NONE and removes some unnecessary duplication in powerpc vs the generic implementation by defining the types the core NUMA helpers expected to exist from x86 with their ppc64 equivalent. This necessitated that a PTE bit mask be created that identified the bits that distinguish present from NUMA pte entries but it is expected this will only differ between arches based on _PAGE_PROTNONE. The naming for the generic helpers was taken from x86 originally but ppc64 has types that are equivalent for the purposes of the helper so they are mapped instead of duplicating code. Signed-off-by: Mel Gorman <mgorman@suse.de> Cc: Hugh Dickins <hughd@google.com> Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com> Cc: Rik van Riel <riel@redhat.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Cyrill Gorcunov <gorcunov@gmail.com> Reviewed-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include/asm-generic/pgtable.h')
-rw-r--r--include/asm-generic/pgtable.h27
1 files changed, 9 insertions, 18 deletions
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index 53b2acc38213..281870f56450 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -660,11 +660,12 @@ static inline int pmd_trans_unstable(pmd_t *pmd)
660} 660}
661 661
662#ifdef CONFIG_NUMA_BALANCING 662#ifdef CONFIG_NUMA_BALANCING
663#ifdef CONFIG_ARCH_USES_NUMA_PROT_NONE
664/* 663/*
665 * _PAGE_NUMA works identical to _PAGE_PROTNONE (it's actually the 664 * _PAGE_NUMA distinguishes between an unmapped page table entry, an entry that
666 * same bit too). It's set only when _PAGE_PRESET is not set and it's 665 * is protected for PROT_NONE and a NUMA hinting fault entry. If the
667 * never set if _PAGE_PRESENT is set. 666 * architecture defines __PAGE_PROTNONE then it should take that into account
667 * but those that do not can rely on the fact that the NUMA hinting scanner
668 * skips inaccessible VMAs.
668 * 669 *
669 * pte/pmd_present() returns true if pte/pmd_numa returns true. Page 670 * pte/pmd_present() returns true if pte/pmd_numa returns true. Page
670 * fault triggers on those regions if pte/pmd_numa returns true 671 * fault triggers on those regions if pte/pmd_numa returns true
@@ -673,16 +674,14 @@ static inline int pmd_trans_unstable(pmd_t *pmd)
673#ifndef pte_numa 674#ifndef pte_numa
674static inline int pte_numa(pte_t pte) 675static inline int pte_numa(pte_t pte)
675{ 676{
676 return (pte_flags(pte) & 677 return ptenuma_flags(pte) == _PAGE_NUMA;
677 (_PAGE_NUMA|_PAGE_PROTNONE|_PAGE_PRESENT)) == _PAGE_NUMA;
678} 678}
679#endif 679#endif
680 680
681#ifndef pmd_numa 681#ifndef pmd_numa
682static inline int pmd_numa(pmd_t pmd) 682static inline int pmd_numa(pmd_t pmd)
683{ 683{
684 return (pmd_flags(pmd) & 684 return pmdnuma_flags(pmd) == _PAGE_NUMA;
685 (_PAGE_NUMA|_PAGE_PROTNONE|_PAGE_PRESENT)) == _PAGE_NUMA;
686} 685}
687#endif 686#endif
688 687
@@ -722,6 +721,8 @@ static inline pte_t pte_mknuma(pte_t pte)
722{ 721{
723 pteval_t val = pte_val(pte); 722 pteval_t val = pte_val(pte);
724 723
724 VM_BUG_ON(!(val & _PAGE_PRESENT));
725
725 val &= ~_PAGE_PRESENT; 726 val &= ~_PAGE_PRESENT;
726 val |= _PAGE_NUMA; 727 val |= _PAGE_NUMA;
727 728
@@ -765,16 +766,6 @@ static inline void pmdp_set_numa(struct mm_struct *mm, unsigned long addr,
765} 766}
766#endif 767#endif
767#else 768#else
768extern int pte_numa(pte_t pte);
769extern int pmd_numa(pmd_t pmd);
770extern pte_t pte_mknonnuma(pte_t pte);
771extern pmd_t pmd_mknonnuma(pmd_t pmd);
772extern pte_t pte_mknuma(pte_t pte);
773extern pmd_t pmd_mknuma(pmd_t pmd);
774extern void ptep_set_numa(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
775extern void pmdp_set_numa(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp);
776#endif /* CONFIG_ARCH_USES_NUMA_PROT_NONE */
777#else
778static inline int pmd_numa(pmd_t pmd) 769static inline int pmd_numa(pmd_t pmd)
779{ 770{
780 return 0; 771 return 0;