aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64/mm/hugetlbpage.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/ia64/mm/hugetlbpage.c')
-rw-r--r--arch/ia64/mm/hugetlbpage.c29
1 files changed, 23 insertions, 6 deletions
diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
index 626258ae9742..df08ae7634b6 100644
--- a/arch/ia64/mm/hugetlbpage.c
+++ b/arch/ia64/mm/hugetlbpage.c
@@ -186,13 +186,30 @@ follow_huge_pmd(struct mm_struct *mm, unsigned long address, pmd_t *pmd, int wri
186 return NULL; 186 return NULL;
187} 187}
188 188
189/* 189void hugetlb_free_pgd_range(struct mmu_gather **tlb,
190 * Do nothing, until we've worked out what to do! To allow build, we 190 unsigned long addr, unsigned long end,
191 * must remove reference to clear_page_range since it no longer exists. 191 unsigned long floor, unsigned long ceiling)
192 */
193void hugetlb_free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *prev,
194 unsigned long start, unsigned long end)
195{ 192{
193 /*
194 * This is called only when is_hugepage_only_range(addr,),
195 * and it follows that is_hugepage_only_range(end,) also.
196 *
197 * The offset of these addresses from the base of the hugetlb
198 * region must be scaled down by HPAGE_SIZE/PAGE_SIZE so that
199 * the standard free_pgd_range will free the right page tables.
200 *
201 * If floor and ceiling are also in the hugetlb region, they
202 * must likewise be scaled down; but if outside, left unchanged.
203 */
204
205 addr = htlbpage_to_page(addr);
206 end = htlbpage_to_page(end);
207 if (is_hugepage_only_range(tlb->mm, floor, HPAGE_SIZE))
208 floor = htlbpage_to_page(floor);
209 if (is_hugepage_only_range(tlb->mm, ceiling, HPAGE_SIZE))
210 ceiling = htlbpage_to_page(ceiling);
211
212 free_pgd_range(tlb, addr, end, floor, ceiling);
196} 213}
197 214
198void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) 215void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)