aboutsummaryrefslogtreecommitdiffstats
path: root/mm/rmap.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/rmap.c')
-rw-r--r--mm/rmap.c42
1 files changed, 39 insertions, 3 deletions
diff --git a/mm/rmap.c b/mm/rmap.c
index eb477809a5c0..1e79fac3186b 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1362,11 +1362,21 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
1362 } 1362 }
1363 1363
1364 /* 1364 /*
1365 * We have to assume the worse case ie pmd for invalidation. Note that 1365 * For THP, we have to assume the worse case ie pmd for invalidation.
1366 * the page can not be free in this function as call of try_to_unmap() 1366 * For hugetlb, it could be much worse if we need to do pud
1367 * must hold a reference on the page. 1367 * invalidation in the case of pmd sharing.
1368 *
1369 * Note that the page can not be free in this function as call of
1370 * try_to_unmap() must hold a reference on the page.
1368 */ 1371 */
1369 end = min(vma->vm_end, start + (PAGE_SIZE << compound_order(page))); 1372 end = min(vma->vm_end, start + (PAGE_SIZE << compound_order(page)));
1373 if (PageHuge(page)) {
1374 /*
1375 * If sharing is possible, start and end will be adjusted
1376 * accordingly.
1377 */
1378 adjust_range_if_pmd_sharing_possible(vma, &start, &end);
1379 }
1370 mmu_notifier_invalidate_range_start(vma->vm_mm, start, end); 1380 mmu_notifier_invalidate_range_start(vma->vm_mm, start, end);
1371 1381
1372 while (page_vma_mapped_walk(&pvmw)) { 1382 while (page_vma_mapped_walk(&pvmw)) {
@@ -1409,6 +1419,32 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
1409 subpage = page - page_to_pfn(page) + pte_pfn(*pvmw.pte); 1419 subpage = page - page_to_pfn(page) + pte_pfn(*pvmw.pte);
1410 address = pvmw.address; 1420 address = pvmw.address;
1411 1421
1422 if (PageHuge(page)) {
1423 if (huge_pmd_unshare(mm, &address, pvmw.pte)) {
1424 /*
1425 * huge_pmd_unshare unmapped an entire PMD
1426 * page. There is no way of knowing exactly
1427 * which PMDs may be cached for this mm, so
1428 * we must flush them all. start/end were
1429 * already adjusted above to cover this range.
1430 */
1431 flush_cache_range(vma, start, end);
1432 flush_tlb_range(vma, start, end);
1433 mmu_notifier_invalidate_range(mm, start, end);
1434
1435 /*
1436 * The ref count of the PMD page was dropped
1437 * which is part of the way map counting
1438 * is done for shared PMDs. Return 'true'
1439 * here. When there is no other sharing,
1440 * huge_pmd_unshare returns false and we will
1441 * unmap the actual page and drop map count
1442 * to zero.
1443 */
1444 page_vma_mapped_walk_done(&pvmw);
1445 break;
1446 }
1447 }
1412 1448
1413 if (IS_ENABLED(CONFIG_MIGRATION) && 1449 if (IS_ENABLED(CONFIG_MIGRATION) &&
1414 (flags & TTU_MIGRATION) && 1450 (flags & TTU_MIGRATION) &&