aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/rmap.c25
1 files changed, 16 insertions, 9 deletions
diff --git a/mm/rmap.c b/mm/rmap.c
index 1ea4e6fcee77..059774712c08 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -659,23 +659,30 @@ void page_remove_rmap(struct page *page, struct vm_area_struct *vma)
659 } 659 }
660 660
661 /* 661 /*
662 * It would be tidy to reset the PageAnon mapping here, 662 * Now that the last pte has gone, s390 must transfer dirty
663 * but that might overwrite a racing page_add_anon_rmap 663 * flag from storage key to struct page. We can usually skip
664 * which increments mapcount after us but sets mapping 664 * this if the page is anon, so about to be freed; but perhaps
665 * before us: so leave the reset to free_hot_cold_page, 665 * not if it's in swapcache - there might be another pte slot
666 * and remember that it's only reliable while mapped. 666 * containing the swap entry, but page not yet written to swap.
667 * Leaving it set also helps swapoff to reinstate ptes
668 * faster for those pages still in swapcache.
669 */ 667 */
670 if ((!PageAnon(page) || PageSwapCache(page)) && 668 if ((!PageAnon(page) || PageSwapCache(page)) &&
671 page_test_dirty(page)) { 669 page_test_dirty(page)) {
672 page_clear_dirty(page); 670 page_clear_dirty(page);
673 set_page_dirty(page); 671 set_page_dirty(page);
674 } 672 }
675 mem_cgroup_uncharge_page(page);
676 673
674 mem_cgroup_uncharge_page(page);
677 __dec_zone_page_state(page, 675 __dec_zone_page_state(page,
678 PageAnon(page) ? NR_ANON_PAGES : NR_FILE_MAPPED); 676 PageAnon(page) ? NR_ANON_PAGES : NR_FILE_MAPPED);
677 /*
678 * It would be tidy to reset the PageAnon mapping here,
679 * but that might overwrite a racing page_add_anon_rmap
680 * which increments mapcount after us but sets mapping
681 * before us: so leave the reset to free_hot_cold_page,
682 * and remember that it's only reliable while mapped.
683 * Leaving it set also helps swapoff to reinstate ptes
684 * faster for those pages still in swapcache.
685 */
679 } 686 }
680} 687}
681 688