diff options
Diffstat (limited to 'arch/ppc64/mm/hugetlbpage.c')
-rw-r--r-- | arch/ppc64/mm/hugetlbpage.c | 10 |
1 files changed, 0 insertions, 10 deletions
diff --git a/arch/ppc64/mm/hugetlbpage.c b/arch/ppc64/mm/hugetlbpage.c index c62ddaff0720..8665bb57e42b 100644 --- a/arch/ppc64/mm/hugetlbpage.c +++ b/arch/ppc64/mm/hugetlbpage.c | |||
@@ -430,16 +430,6 @@ void unmap_hugepage_range(struct vm_area_struct *vma, | |||
430 | flush_tlb_pending(); | 430 | flush_tlb_pending(); |
431 | } | 431 | } |
432 | 432 | ||
433 | void hugetlb_free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *prev, | ||
434 | unsigned long start, unsigned long end) | ||
435 | { | ||
436 | /* Because the huge pgtables are only 2 level, they can take | ||
437 | * at most around 4M, much less than one hugepage which the | ||
438 | * process is presumably entitled to use. So we don't bother | ||
439 | * freeing up the pagetables on unmap, and wait until | ||
440 | * destroy_context() to clean up the lot. */ | ||
441 | } | ||
442 | |||
443 | int hugetlb_prefault(struct address_space *mapping, struct vm_area_struct *vma) | 433 | int hugetlb_prefault(struct address_space *mapping, struct vm_area_struct *vma) |
444 | { | 434 | { |
445 | struct mm_struct *mm = current->mm; | 435 | struct mm_struct *mm = current->mm; |