aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDan Williams <dan.j.williams@intel.com>2018-04-05 19:24:21 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2018-04-06 00:36:26 -0400
commit09135cc594d141cd279c32a18b91cb3bd3fe8cc5 (patch)
treec42d4d5b500e79f625f6dd2b873c3bcb7000b586
parent2923117b7162946042e49cd363846c498293230c (diff)
mm, powerpc: use vma_kernel_pagesize() in vma_mmu_pagesize()
Patch series "mm, smaps: MMUPageSize for device-dax", v3. Similar to commit 31383c6865a5 ("mm, hugetlbfs: introduce ->split() to vm_operations_struct") here is another occasion where we want special-case hugetlbfs/hstate enabling to also apply to device-dax. This prompts the question what other hstate conversions we might do beyond ->split() and ->pagesize(), but this appears to be the last of the usages of hstate_vma() in generic/non-hugetlbfs specific code paths. This patch (of 3): The current powerpc definition of vma_mmu_pagesize() open codes looking up the page size via hstate. It is identical to the generic vma_kernel_pagesize() implementation. Now, vma_kernel_pagesize() is growing support for determining the page size of Device-DAX vmas in addition to the existing Hugetlbfs page size determination. Ideally, if the powerpc vma_mmu_pagesize() used vma_kernel_pagesize() it would automatically benefit from any new vma-type support that is added to vma_kernel_pagesize(). However, the powerpc vma_mmu_pagesize() is prevented from calling vma_kernel_pagesize() due to a circular header dependency that requires vma_mmu_pagesize() to be defined before including <linux/hugetlb.h>. Break this circular dependency by defining the default vma_mmu_pagesize() as a __weak symbol to be overridden by the powerpc version. Link: http://lkml.kernel.org/r/151996254179.27922.2213728278535578744.stgit@dwillia2-desk3.amr.corp.intel.com Signed-off-by: Dan Williams <dan.j.williams@intel.com> Reviewed-by: Andrew Morton <akpm@linux-foundation.org> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Paul Mackerras <paulus@samba.org> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Jane Chu <jane.chu@oracle.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--arch/powerpc/include/asm/hugetlb.h6
-rw-r--r--arch/powerpc/mm/hugetlbpage.c5
-rw-r--r--mm/hugetlb.c8
3 files changed, 4 insertions, 15 deletions
diff --git a/arch/powerpc/include/asm/hugetlb.h b/arch/powerpc/include/asm/hugetlb.h
index 1a4847f67ea8..6f6751d3eba9 100644
--- a/arch/powerpc/include/asm/hugetlb.h
+++ b/arch/powerpc/include/asm/hugetlb.h
@@ -118,12 +118,6 @@ void hugetlb_free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
118 unsigned long ceiling); 118 unsigned long ceiling);
119 119
120/* 120/*
121 * The version of vma_mmu_pagesize() in arch/powerpc/mm/hugetlbpage.c needs
122 * to override the version in mm/hugetlb.c
123 */
124#define vma_mmu_pagesize vma_mmu_pagesize
125
126/*
127 * If the arch doesn't supply something else, assume that hugepage 121 * If the arch doesn't supply something else, assume that hugepage
128 * size aligned regions are ok without further preparation. 122 * size aligned regions are ok without further preparation.
129 */ 123 */
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 876da2bc1796..3a08d211d2ee 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -568,10 +568,7 @@ unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
568 if (!radix_enabled()) 568 if (!radix_enabled())
569 return 1UL << mmu_psize_to_shift(psize); 569 return 1UL << mmu_psize_to_shift(psize);
570#endif 570#endif
571 if (!is_vm_hugetlb_page(vma)) 571 return vma_kernel_pagesize(vma);
572 return PAGE_SIZE;
573
574 return huge_page_size(hstate_vma(vma));
575} 572}
576 573
577static inline bool is_power_of_4(unsigned long x) 574static inline bool is_power_of_4(unsigned long x)
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 976bbc5646fe..92c49b9d7cbb 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -651,15 +651,13 @@ EXPORT_SYMBOL_GPL(vma_kernel_pagesize);
651/* 651/*
652 * Return the page size being used by the MMU to back a VMA. In the majority 652 * Return the page size being used by the MMU to back a VMA. In the majority
653 * of cases, the page size used by the kernel matches the MMU size. On 653 * of cases, the page size used by the kernel matches the MMU size. On
654 * architectures where it differs, an architecture-specific version of this 654 * architectures where it differs, an architecture-specific 'strong'
655 * function is required. 655 * version of this symbol is required.
656 */ 656 */
657#ifndef vma_mmu_pagesize 657__weak unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
658unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
659{ 658{
660 return vma_kernel_pagesize(vma); 659 return vma_kernel_pagesize(vma);
661} 660}
662#endif
663 661
664/* 662/*
665 * Flags for MAP_PRIVATE reservations. These are stored in the bottom 663 * Flags for MAP_PRIVATE reservations. These are stored in the bottom