diff options
author | Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> | 2016-07-13 05:36:43 -0400 |
---|---|---|
committer | Michael Ellerman <mpe@ellerman.id.au> | 2016-07-31 21:15:13 -0400 |
commit | 5491ae7b6f48499b8892822cff371746f0b4102f (patch) | |
tree | a8bb6c61ba9276e1b64d4ad2ef1b6c10b8508929 /mm | |
parent | fbfa26d85418a155feacdb0f73cbf938f1027a8c (diff) |
powerpc/mm/hugetlb: Add flush_hugetlb_tlb_range
Some archs like ppc64 need to do special things when flushing tlb for
hugepage. Add a new helper to flush hugetlb tlb range. This helps us to
avoid flushing the entire tlb mapping for the pid.
Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/hugetlb.c | 10 |
1 files changed, 9 insertions, 1 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index f904246a8fd5..af2d88253bfc 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c | |||
@@ -3938,6 +3938,14 @@ same_page: | |||
3938 | return i ? i : -EFAULT; | 3938 | return i ? i : -EFAULT; |
3939 | } | 3939 | } |
3940 | 3940 | ||
3941 | #ifndef __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE | ||
3942 | /* | ||
3943 | * ARCHes with special requirements for evicting HUGETLB backing TLB entries can | ||
3944 | * implement this. | ||
3945 | */ | ||
3946 | #define flush_hugetlb_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end) | ||
3947 | #endif | ||
3948 | |||
3941 | unsigned long hugetlb_change_protection(struct vm_area_struct *vma, | 3949 | unsigned long hugetlb_change_protection(struct vm_area_struct *vma, |
3942 | unsigned long address, unsigned long end, pgprot_t newprot) | 3950 | unsigned long address, unsigned long end, pgprot_t newprot) |
3943 | { | 3951 | { |
@@ -3998,7 +4006,7 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma, | |||
3998 | * once we release i_mmap_rwsem, another task can do the final put_page | 4006 | * once we release i_mmap_rwsem, another task can do the final put_page |
3999 | * and that page table be reused and filled with junk. | 4007 | * and that page table be reused and filled with junk. |
4000 | */ | 4008 | */ |
4001 | flush_tlb_range(vma, start, end); | 4009 | flush_hugetlb_tlb_range(vma, start, end); |
4002 | mmu_notifier_invalidate_range(mm, start, end); | 4010 | mmu_notifier_invalidate_range(mm, start, end); |
4003 | i_mmap_unlock_write(vma->vm_file->f_mapping); | 4011 | i_mmap_unlock_write(vma->vm_file->f_mapping); |
4004 | mmu_notifier_invalidate_range_end(mm, start, end); | 4012 | mmu_notifier_invalidate_range_end(mm, start, end); |