aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>2016-07-26 18:24:06 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-07-26 19:19:19 -0400
commit31d49da5ad01728e48a1bb2b43795598b23de68a (patch)
tree2b67176245e53731381950f990b817ed0c46de61 /mm
parent337d9abf1cd1a59645d91b6d0b1685a476b81978 (diff)
mm/hugetlb: simplify hugetlb unmap
For hugetlb like THP (and unlike regular page), we do tlb flush after dropping ptl. Because of the above, we don't need to track force_flush like we do now. Instead we can simply call tlb_remove_page() which will do the flush if needed. No functionality change in this patch. Link: http://lkml.kernel.org/r/1465049193-22197-1-git-send-email-aneesh.kumar@linux.vnet.ibm.com Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Paul Mackerras <paulus@samba.org> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: "Kirill A. Shutemov" <kirill@shutemov.name> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/hugetlb.c54
1 files changed, 21 insertions, 33 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index addfe4accc07..524c078ce67b 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -3177,7 +3177,6 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
3177 unsigned long start, unsigned long end, 3177 unsigned long start, unsigned long end,
3178 struct page *ref_page) 3178 struct page *ref_page)
3179{ 3179{
3180 int force_flush = 0;
3181 struct mm_struct *mm = vma->vm_mm; 3180 struct mm_struct *mm = vma->vm_mm;
3182 unsigned long address; 3181 unsigned long address;
3183 pte_t *ptep; 3182 pte_t *ptep;
@@ -3196,19 +3195,22 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
3196 tlb_start_vma(tlb, vma); 3195 tlb_start_vma(tlb, vma);
3197 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); 3196 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
3198 address = start; 3197 address = start;
3199again:
3200 for (; address < end; address += sz) { 3198 for (; address < end; address += sz) {
3201 ptep = huge_pte_offset(mm, address); 3199 ptep = huge_pte_offset(mm, address);
3202 if (!ptep) 3200 if (!ptep)
3203 continue; 3201 continue;
3204 3202
3205 ptl = huge_pte_lock(h, mm, ptep); 3203 ptl = huge_pte_lock(h, mm, ptep);
3206 if (huge_pmd_unshare(mm, &address, ptep)) 3204 if (huge_pmd_unshare(mm, &address, ptep)) {
3207 goto unlock; 3205 spin_unlock(ptl);
3206 continue;
3207 }
3208 3208
3209 pte = huge_ptep_get(ptep); 3209 pte = huge_ptep_get(ptep);
3210 if (huge_pte_none(pte)) 3210 if (huge_pte_none(pte)) {
3211 goto unlock; 3211 spin_unlock(ptl);
3212 continue;
3213 }
3212 3214
3213 /* 3215 /*
3214 * Migrating hugepage or HWPoisoned hugepage is already 3216 * Migrating hugepage or HWPoisoned hugepage is already
@@ -3216,7 +3218,8 @@ again:
3216 */ 3218 */
3217 if (unlikely(!pte_present(pte))) { 3219 if (unlikely(!pte_present(pte))) {
3218 huge_pte_clear(mm, address, ptep); 3220 huge_pte_clear(mm, address, ptep);
3219 goto unlock; 3221 spin_unlock(ptl);
3222 continue;
3220 } 3223 }
3221 3224
3222 page = pte_page(pte); 3225 page = pte_page(pte);
@@ -3226,9 +3229,10 @@ again:
3226 * are about to unmap is the actual page of interest. 3229 * are about to unmap is the actual page of interest.
3227 */ 3230 */
3228 if (ref_page) { 3231 if (ref_page) {
3229 if (page != ref_page) 3232 if (page != ref_page) {
3230 goto unlock; 3233 spin_unlock(ptl);
3231 3234 continue;
3235 }
3232 /* 3236 /*
3233 * Mark the VMA as having unmapped its page so that 3237 * Mark the VMA as having unmapped its page so that
3234 * future faults in this VMA will fail rather than 3238 * future faults in this VMA will fail rather than
@@ -3244,30 +3248,14 @@ again:
3244 3248
3245 hugetlb_count_sub(pages_per_huge_page(h), mm); 3249 hugetlb_count_sub(pages_per_huge_page(h), mm);
3246 page_remove_rmap(page, true); 3250 page_remove_rmap(page, true);
3247 force_flush = !__tlb_remove_page(tlb, page); 3251
3248 if (force_flush) {
3249 address += sz;
3250 spin_unlock(ptl);
3251 break;
3252 }
3253 /* Bail out after unmapping reference page if supplied */
3254 if (ref_page) {
3255 spin_unlock(ptl);
3256 break;
3257 }
3258unlock:
3259 spin_unlock(ptl); 3252 spin_unlock(ptl);
3260 } 3253 tlb_remove_page(tlb, page);
3261 /* 3254 /*
3262 * mmu_gather ran out of room to batch pages, we break out of 3255 * Bail out after unmapping reference page if supplied
3263 * the PTE lock to avoid doing the potential expensive TLB invalidate 3256 */
3264 * and page-free while holding it. 3257 if (ref_page)
3265 */ 3258 break;
3266 if (force_flush) {
3267 force_flush = 0;
3268 tlb_flush_mmu(tlb);
3269 if (address < end && !ref_page)
3270 goto again;
3271 } 3259 }
3272 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); 3260 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
3273 tlb_end_vma(tlb, vma); 3261 tlb_end_vma(tlb, vma);