aboutsummaryrefslogtreecommitdiffstats
path: root/mm/rmap.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/rmap.c')
-rw-r--r--mm/rmap.c21
1 files changed, 0 insertions, 21 deletions
diff --git a/mm/rmap.c b/mm/rmap.c
index 9827409eb7c7..89770bd25f31 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -539,27 +539,6 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma)
539 goto out_unmap; 539 goto out_unmap;
540 } 540 }
541 541
542 /*
543 * Don't pull an anonymous page out from under get_user_pages.
544 * GUP carefully breaks COW and raises page count (while holding
545 * page_table_lock, as we have here) to make sure that the page
546 * cannot be freed. If we unmap that page here, a user write
547 * access to the virtual address will bring back the page, but
548 * its raised count will (ironically) be taken to mean it's not
549 * an exclusive swap page, do_wp_page will replace it by a copy
550 * page, and the user never get to see the data GUP was holding
551 * the original page for.
552 *
553 * This test is also useful for when swapoff (unuse_process) has
554 * to drop page lock: its reference to the page stops existing
555 * ptes from being unmapped, so swapoff can make progress.
556 */
557 if (PageSwapCache(page) &&
558 page_count(page) != page_mapcount(page) + 2) {
559 ret = SWAP_FAIL;
560 goto out_unmap;
561 }
562
563 /* Nuke the page table entry. */ 542 /* Nuke the page table entry. */
564 flush_cache_page(vma, address, page_to_pfn(page)); 543 flush_cache_page(vma, address, page_to_pfn(page));
565 pteval = ptep_clear_flush(vma, address, pte); 544 pteval = ptep_clear_flush(vma, address, pte);