aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid Gibson <david@gibson.dropbear.id.au>2006-03-22 03:09:01 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2006-03-22 10:54:04 -0500
commit42b88befd6e0dae1a5fe04c03925037fa890e1f3 (patch)
treec234584f797e65e1bcd0d4675d56d1eb004d6681
parent3915bcf38fe0b6d130b4bbde97804f29a0becf32 (diff)
[PATCH] hugepage: is_aligned_hugepage_range() cleanup
Quite a long time back, prepare_hugepage_range() replaced is_aligned_hugepage_range() as the callback from mm/mmap.c to arch code to verify if an address range is suitable for a hugepage mapping. is_aligned_hugepage_range() stuck around, but only to implement prepare_hugepage_range() on archs which didn't implement their own. Most archs (everything except ia64 and powerpc) used the same implementation of is_aligned_hugepage_range(). On powerpc, which implements its own prepare_hugepage_range(), the custom version was never used. In addition, "is_aligned_hugepage_range()" was a bad name, because it suggests it returns true iff the given range is a good hugepage range, whereas in fact it returns 0-or-error (so the sense is reversed). This patch cleans up by abolishing is_aligned_hugepage_range(). Instead prepare_hugepage_range() is defined directly. Most archs use the default version, which simply checks the given region is aligned to the size of a hugepage. ia64 and powerpc define custom versions. The ia64 one simply checks that the range is in the correct address space region in addition to being suitably aligned. The powerpc version (just as previously) checks for suitable addresses, and if necessary performs low-level MMU frobbing to set up new areas for use by hugepages. No libhugetlbfs testsuite regressions on ppc64 (POWER5 LPAR). Signed-off-by: David Gibson <david@gibson.dropbear.id.au> Signed-off-by: Zhang Yanmin <yanmin.zhang@intel.com> Cc: "David S. Miller" <davem@davemloft.net> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Paul Mackerras <paulus@samba.org> Cc: William Lee Irwin III <wli@holomorphy.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r--arch/i386/mm/hugetlbpage.c12
-rw-r--r--arch/ia64/mm/hugetlbpage.c5
-rw-r--r--arch/powerpc/mm/hugetlbpage.c15
-rw-r--r--arch/sh/mm/hugetlbpage.c12
-rw-r--r--arch/sh64/mm/hugetlbpage.c12
-rw-r--r--arch/sparc64/mm/hugetlbpage.c12
-rw-r--r--include/asm-ia64/page.h1
-rw-r--r--include/linux/hugetlb.h16
8 files changed, 16 insertions, 69 deletions
diff --git a/arch/i386/mm/hugetlbpage.c b/arch/i386/mm/hugetlbpage.c
index d524127c9afc..a7d891585411 100644
--- a/arch/i386/mm/hugetlbpage.c
+++ b/arch/i386/mm/hugetlbpage.c
@@ -48,18 +48,6 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
48 return (pte_t *) pmd; 48 return (pte_t *) pmd;
49} 49}
50 50
51/*
52 * This function checks for proper alignment of input addr and len parameters.
53 */
54int is_aligned_hugepage_range(unsigned long addr, unsigned long len)
55{
56 if (len & ~HPAGE_MASK)
57 return -EINVAL;
58 if (addr & ~HPAGE_MASK)
59 return -EINVAL;
60 return 0;
61}
62
63#if 0 /* This is just for testing */ 51#if 0 /* This is just for testing */
64struct page * 52struct page *
65follow_huge_addr(struct mm_struct *mm, unsigned long address, int write) 53follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
index 2d13889d0a99..9dbc7dadd165 100644
--- a/arch/ia64/mm/hugetlbpage.c
+++ b/arch/ia64/mm/hugetlbpage.c
@@ -68,9 +68,10 @@ huge_pte_offset (struct mm_struct *mm, unsigned long addr)
68#define mk_pte_huge(entry) { pte_val(entry) |= _PAGE_P; } 68#define mk_pte_huge(entry) { pte_val(entry) |= _PAGE_P; }
69 69
70/* 70/*
71 * This function checks for proper alignment of input addr and len parameters. 71 * Don't actually need to do any preparation, but need to make sure
72 * the address is in the right region.
72 */ 73 */
73int is_aligned_hugepage_range(unsigned long addr, unsigned long len) 74int prepare_hugepage_range(unsigned long addr, unsigned long len)
74{ 75{
75 if (len & ~HPAGE_MASK) 76 if (len & ~HPAGE_MASK)
76 return -EINVAL; 77 return -EINVAL;
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index b51bb28c054b..7370f9f33e29 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -133,21 +133,6 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
133 return __pte(old); 133 return __pte(old);
134} 134}
135 135
136/*
137 * This function checks for proper alignment of input addr and len parameters.
138 */
139int is_aligned_hugepage_range(unsigned long addr, unsigned long len)
140{
141 if (len & ~HPAGE_MASK)
142 return -EINVAL;
143 if (addr & ~HPAGE_MASK)
144 return -EINVAL;
145 if (! (within_hugepage_low_range(addr, len)
146 || within_hugepage_high_range(addr, len)) )
147 return -EINVAL;
148 return 0;
149}
150
151struct slb_flush_info { 136struct slb_flush_info {
152 struct mm_struct *mm; 137 struct mm_struct *mm;
153 u16 newareas; 138 u16 newareas;
diff --git a/arch/sh/mm/hugetlbpage.c b/arch/sh/mm/hugetlbpage.c
index 6b7a7688c98e..a3568fd51508 100644
--- a/arch/sh/mm/hugetlbpage.c
+++ b/arch/sh/mm/hugetlbpage.c
@@ -84,18 +84,6 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
84 return entry; 84 return entry;
85} 85}
86 86
87/*
88 * This function checks for proper alignment of input addr and len parameters.
89 */
90int is_aligned_hugepage_range(unsigned long addr, unsigned long len)
91{
92 if (len & ~HPAGE_MASK)
93 return -EINVAL;
94 if (addr & ~HPAGE_MASK)
95 return -EINVAL;
96 return 0;
97}
98
99struct page *follow_huge_addr(struct mm_struct *mm, 87struct page *follow_huge_addr(struct mm_struct *mm,
100 unsigned long address, int write) 88 unsigned long address, int write)
101{ 89{
diff --git a/arch/sh64/mm/hugetlbpage.c b/arch/sh64/mm/hugetlbpage.c
index ed6a505b3ee2..3d89f2a6c785 100644
--- a/arch/sh64/mm/hugetlbpage.c
+++ b/arch/sh64/mm/hugetlbpage.c
@@ -84,18 +84,6 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
84 return entry; 84 return entry;
85} 85}
86 86
87/*
88 * This function checks for proper alignment of input addr and len parameters.
89 */
90int is_aligned_hugepage_range(unsigned long addr, unsigned long len)
91{
92 if (len & ~HPAGE_MASK)
93 return -EINVAL;
94 if (addr & ~HPAGE_MASK)
95 return -EINVAL;
96 return 0;
97}
98
99struct page *follow_huge_addr(struct mm_struct *mm, 87struct page *follow_huge_addr(struct mm_struct *mm,
100 unsigned long address, int write) 88 unsigned long address, int write)
101{ 89{
diff --git a/arch/sparc64/mm/hugetlbpage.c b/arch/sparc64/mm/hugetlbpage.c
index a7a24869d045..280dc7958a13 100644
--- a/arch/sparc64/mm/hugetlbpage.c
+++ b/arch/sparc64/mm/hugetlbpage.c
@@ -263,18 +263,6 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
263 return entry; 263 return entry;
264} 264}
265 265
266/*
267 * This function checks for proper alignment of input addr and len parameters.
268 */
269int is_aligned_hugepage_range(unsigned long addr, unsigned long len)
270{
271 if (len & ~HPAGE_MASK)
272 return -EINVAL;
273 if (addr & ~HPAGE_MASK)
274 return -EINVAL;
275 return 0;
276}
277
278struct page *follow_huge_addr(struct mm_struct *mm, 266struct page *follow_huge_addr(struct mm_struct *mm,
279 unsigned long address, int write) 267 unsigned long address, int write)
280{ 268{
diff --git a/include/asm-ia64/page.h b/include/asm-ia64/page.h
index 732cf3086741..3ab27333dae4 100644
--- a/include/asm-ia64/page.h
+++ b/include/asm-ia64/page.h
@@ -57,6 +57,7 @@
57 57
58# define HAVE_ARCH_HUGETLB_UNMAPPED_AREA 58# define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
59# define ARCH_HAS_HUGEPAGE_ONLY_RANGE 59# define ARCH_HAS_HUGEPAGE_ONLY_RANGE
60# define ARCH_HAS_PREPARE_HUGEPAGE_RANGE
60# define ARCH_HAS_HUGETLB_FREE_PGD_RANGE 61# define ARCH_HAS_HUGETLB_FREE_PGD_RANGE
61#endif /* CONFIG_HUGETLB_PAGE */ 62#endif /* CONFIG_HUGETLB_PAGE */
62 63
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 5db25ffdb3eb..d6f1019625af 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -36,7 +36,6 @@ struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
36 int write); 36 int write);
37struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address, 37struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
38 pmd_t *pmd, int write); 38 pmd_t *pmd, int write);
39int is_aligned_hugepage_range(unsigned long addr, unsigned long len);
40int pmd_huge(pmd_t pmd); 39int pmd_huge(pmd_t pmd);
41void hugetlb_change_protection(struct vm_area_struct *vma, 40void hugetlb_change_protection(struct vm_area_struct *vma,
42 unsigned long address, unsigned long end, pgprot_t newprot); 41 unsigned long address, unsigned long end, pgprot_t newprot);
@@ -54,8 +53,18 @@ void hugetlb_free_pgd_range(struct mmu_gather **tlb, unsigned long addr,
54#endif 53#endif
55 54
56#ifndef ARCH_HAS_PREPARE_HUGEPAGE_RANGE 55#ifndef ARCH_HAS_PREPARE_HUGEPAGE_RANGE
57#define prepare_hugepage_range(addr, len) \ 56/*
58 is_aligned_hugepage_range(addr, len) 57 * If the arch doesn't supply something else, assume that hugepage
58 * size aligned regions are ok without further preparation.
59 */
60static inline int prepare_hugepage_range(unsigned long addr, unsigned long len)
61{
62 if (len & ~HPAGE_MASK)
63 return -EINVAL;
64 if (addr & ~HPAGE_MASK)
65 return -EINVAL;
66 return 0;
67}
59#else 68#else
60int prepare_hugepage_range(unsigned long addr, unsigned long len); 69int prepare_hugepage_range(unsigned long addr, unsigned long len);
61#endif 70#endif
@@ -95,7 +104,6 @@ static inline unsigned long hugetlb_total_pages(void)
95#define hugetlb_report_meminfo(buf) 0 104#define hugetlb_report_meminfo(buf) 0
96#define hugetlb_report_node_meminfo(n, buf) 0 105#define hugetlb_report_node_meminfo(n, buf) 0
97#define follow_huge_pmd(mm, addr, pmd, write) NULL 106#define follow_huge_pmd(mm, addr, pmd, write) NULL
98#define is_aligned_hugepage_range(addr, len) 0
99#define prepare_hugepage_range(addr, len) (-EINVAL) 107#define prepare_hugepage_range(addr, len) (-EINVAL)
100#define pmd_huge(x) 0 108#define pmd_huge(x) 0
101#define is_hugepage_only_range(mm, addr, len) 0 109#define is_hugepage_only_range(mm, addr, len) 0