aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64/mm/hugetlbpage.c
diff options
context:
space:
mode:
authorHugh Dickins <hugh@veritas.com>2005-04-19 16:29:16 -0400
committerLinus Torvalds <torvalds@ppc970.osdl.org.(none)>2005-04-19 16:29:16 -0400
commit3bf5ee95648c694bac4d13529563c230cd4fe5f2 (patch)
tree9430e6e4f4c3d586ecb7375cd780fd17694888c7 /arch/ia64/mm/hugetlbpage.c
parentee39b37b23da0b6ec53a8ebe90ff41c016f8ae27 (diff)
[PATCH] freepgt: hugetlb_free_pgd_range
ia64 and ppc64 had hugetlb_free_pgtables functions which were no longer being called, and it wasn't obvious what to do about them. The ppc64 case turns out to be easy: the associated tables are noted elsewhere and freed later, safe to either skip its hugetlb areas or go through the motions of freeing nothing. Since ia64 does need a special case, restore to ppc64 the special case of skipping them. The ia64 hugetlb case has been broken since pgd_addr_end went in, though it probably appeared to work okay if you just had one such area; in fact it's been broken much longer if you consider a long munmap spanning from another region into the hugetlb region. In the ia64 hugetlb region, more virtual address bits are available than in the other regions, yet the page tables are structured the same way: the page at the bottom is larger. Here we need to scale down each addr before passing it to the standard free_pgd_range. Was about to write a hugely_scaled_down macro, but found htlbpage_to_page already exists for just this purpose. Fixed off-by-one in ia64 is_hugepage_only_range. Uninline free_pgd_range to make it available to ia64. Make sure the vma-gathering loop in free_pgtables cannot join a hugepage_only_range to any other (safe to join huges? probably but don't bother). Signed-off-by: Hugh Dickins <hugh@veritas.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch/ia64/mm/hugetlbpage.c')
-rw-r--r--arch/ia64/mm/hugetlbpage.c29
1 files changed, 23 insertions, 6 deletions
diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
index 626258ae9742..df08ae7634b6 100644
--- a/arch/ia64/mm/hugetlbpage.c
+++ b/arch/ia64/mm/hugetlbpage.c
@@ -186,13 +186,30 @@ follow_huge_pmd(struct mm_struct *mm, unsigned long address, pmd_t *pmd, int wri
186 return NULL; 186 return NULL;
187} 187}
188 188
189/* 189void hugetlb_free_pgd_range(struct mmu_gather **tlb,
190 * Do nothing, until we've worked out what to do! To allow build, we 190 unsigned long addr, unsigned long end,
191 * must remove reference to clear_page_range since it no longer exists. 191 unsigned long floor, unsigned long ceiling)
192 */
193void hugetlb_free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *prev,
194 unsigned long start, unsigned long end)
195{ 192{
193 /*
194 * This is called only when is_hugepage_only_range(addr,),
195 * and it follows that is_hugepage_only_range(end,) also.
196 *
197 * The offset of these addresses from the base of the hugetlb
198 * region must be scaled down by HPAGE_SIZE/PAGE_SIZE so that
199 * the standard free_pgd_range will free the right page tables.
200 *
201 * If floor and ceiling are also in the hugetlb region, they
202 * must likewise be scaled down; but if outside, left unchanged.
203 */
204
205 addr = htlbpage_to_page(addr);
206 end = htlbpage_to_page(end);
207 if (is_hugepage_only_range(tlb->mm, floor, HPAGE_SIZE))
208 floor = htlbpage_to_page(floor);
209 if (is_hugepage_only_range(tlb->mm, ceiling, HPAGE_SIZE))
210 ceiling = htlbpage_to_page(ceiling);
211
212 free_pgd_range(tlb, addr, end, floor, ceiling);
196} 213}
197 214
198void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) 215void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)