aboutsummaryrefslogtreecommitdiffstats
path: root/mm/rmap.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/rmap.c')
-rw-r--r--mm/rmap.c78
1 files changed, 30 insertions, 48 deletions
diff --git a/mm/rmap.c b/mm/rmap.c
index 0895b5c7cbff..720fc03a7bc4 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -710,27 +710,6 @@ void page_add_file_rmap(struct page *page)
710 } 710 }
711} 711}
712 712
713#ifdef CONFIG_DEBUG_VM
714/**
715 * page_dup_rmap - duplicate pte mapping to a page
716 * @page: the page to add the mapping to
717 * @vma: the vm area being duplicated
718 * @address: the user virtual address mapped
719 *
720 * For copy_page_range only: minimal extract from page_add_file_rmap /
721 * page_add_anon_rmap, avoiding unnecessary tests (already checked) so it's
722 * quicker.
723 *
724 * The caller needs to hold the pte lock.
725 */
726void page_dup_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address)
727{
728 if (PageAnon(page))
729 __page_check_anon_rmap(page, vma, address);
730 atomic_inc(&page->_mapcount);
731}
732#endif
733
734/** 713/**
735 * page_remove_rmap - take down pte mapping from a page 714 * page_remove_rmap - take down pte mapping from a page
736 * @page: page to remove mapping from 715 * @page: page to remove mapping from
@@ -739,34 +718,37 @@ void page_dup_rmap(struct page *page, struct vm_area_struct *vma, unsigned long
739 */ 718 */
740void page_remove_rmap(struct page *page) 719void page_remove_rmap(struct page *page)
741{ 720{
742 if (atomic_add_negative(-1, &page->_mapcount)) { 721 /* page still mapped by someone else? */
743 /* 722 if (!atomic_add_negative(-1, &page->_mapcount))
744 * Now that the last pte has gone, s390 must transfer dirty 723 return;
745 * flag from storage key to struct page. We can usually skip 724
746 * this if the page is anon, so about to be freed; but perhaps 725 /*
747 * not if it's in swapcache - there might be another pte slot 726 * Now that the last pte has gone, s390 must transfer dirty
748 * containing the swap entry, but page not yet written to swap. 727 * flag from storage key to struct page. We can usually skip
749 */ 728 * this if the page is anon, so about to be freed; but perhaps
750 if ((!PageAnon(page) || PageSwapCache(page)) && 729 * not if it's in swapcache - there might be another pte slot
751 page_test_dirty(page)) { 730 * containing the swap entry, but page not yet written to swap.
752 page_clear_dirty(page); 731 */
753 set_page_dirty(page); 732 if ((!PageAnon(page) || PageSwapCache(page)) && page_test_dirty(page)) {
754 } 733 page_clear_dirty(page);
755 if (PageAnon(page)) 734 set_page_dirty(page);
756 mem_cgroup_uncharge_page(page);
757 __dec_zone_page_state(page,
758 PageAnon(page) ? NR_ANON_PAGES : NR_FILE_MAPPED);
759 mem_cgroup_update_mapped_file_stat(page, -1);
760 /*
761 * It would be tidy to reset the PageAnon mapping here,
762 * but that might overwrite a racing page_add_anon_rmap
763 * which increments mapcount after us but sets mapping
764 * before us: so leave the reset to free_hot_cold_page,
765 * and remember that it's only reliable while mapped.
766 * Leaving it set also helps swapoff to reinstate ptes
767 * faster for those pages still in swapcache.
768 */
769 } 735 }
736 if (PageAnon(page)) {
737 mem_cgroup_uncharge_page(page);
738 __dec_zone_page_state(page, NR_ANON_PAGES);
739 } else {
740 __dec_zone_page_state(page, NR_FILE_MAPPED);
741 }
742 mem_cgroup_update_mapped_file_stat(page, -1);
743 /*
744 * It would be tidy to reset the PageAnon mapping here,
745 * but that might overwrite a racing page_add_anon_rmap
746 * which increments mapcount after us but sets mapping
747 * before us: so leave the reset to free_hot_cold_page,
748 * and remember that it's only reliable while mapped.
749 * Leaving it set also helps swapoff to reinstate ptes
750 * faster for those pages still in swapcache.
751 */
770} 752}
771 753
772/* 754/*