aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/ia64/mm/hugetlbpage.c29
-rw-r--r--arch/ppc64/mm/hugetlbpage.c10
2 files changed, 23 insertions, 16 deletions
diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
index 626258ae9742..df08ae7634b6 100644
--- a/arch/ia64/mm/hugetlbpage.c
+++ b/arch/ia64/mm/hugetlbpage.c
@@ -186,13 +186,30 @@ follow_huge_pmd(struct mm_struct *mm, unsigned long address, pmd_t *pmd, int wri
186 return NULL; 186 return NULL;
187} 187}
188 188
189/* 189void hugetlb_free_pgd_range(struct mmu_gather **tlb,
190 * Do nothing, until we've worked out what to do! To allow build, we 190 unsigned long addr, unsigned long end,
191 * must remove reference to clear_page_range since it no longer exists. 191 unsigned long floor, unsigned long ceiling)
192 */
193void hugetlb_free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *prev,
194 unsigned long start, unsigned long end)
195{ 192{
193 /*
194 * This is called only when is_hugepage_only_range(addr,),
195 * and it follows that is_hugepage_only_range(end,) also.
196 *
197 * The offset of these addresses from the base of the hugetlb
198 * region must be scaled down by HPAGE_SIZE/PAGE_SIZE so that
199 * the standard free_pgd_range will free the right page tables.
200 *
201 * If floor and ceiling are also in the hugetlb region, they
202 * must likewise be scaled down; but if outside, left unchanged.
203 */
204
205 addr = htlbpage_to_page(addr);
206 end = htlbpage_to_page(end);
207 if (is_hugepage_only_range(tlb->mm, floor, HPAGE_SIZE))
208 floor = htlbpage_to_page(floor);
209 if (is_hugepage_only_range(tlb->mm, ceiling, HPAGE_SIZE))
210 ceiling = htlbpage_to_page(ceiling);
211
212 free_pgd_range(tlb, addr, end, floor, ceiling);
196} 213}
197 214
198void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) 215void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
diff --git a/arch/ppc64/mm/hugetlbpage.c b/arch/ppc64/mm/hugetlbpage.c
index c62ddaff0720..8665bb57e42b 100644
--- a/arch/ppc64/mm/hugetlbpage.c
+++ b/arch/ppc64/mm/hugetlbpage.c
@@ -430,16 +430,6 @@ void unmap_hugepage_range(struct vm_area_struct *vma,
430 flush_tlb_pending(); 430 flush_tlb_pending();
431} 431}
432 432
433void hugetlb_free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *prev,
434 unsigned long start, unsigned long end)
435{
436 /* Because the huge pgtables are only 2 level, they can take
437 * at most around 4M, much less than one hugepage which the
438 * process is presumably entitled to use. So we don't bother
439 * freeing up the pagetables on unmap, and wait until
440 * destroy_context() to clean up the lot. */
441}
442
443int hugetlb_prefault(struct address_space *mapping, struct vm_area_struct *vma) 433int hugetlb_prefault(struct address_space *mapping, struct vm_area_struct *vma)
444{ 434{
445 struct mm_struct *mm = current->mm; 435 struct mm_struct *mm = current->mm;