aboutsummaryrefslogtreecommitdiffstats
path: root/mm/rmap.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/rmap.c')
-rw-r--r--mm/rmap.c78
1 files changed, 30 insertions, 48 deletions
diff --git a/mm/rmap.c b/mm/rmap.c
index 09c3d0b96116..28aafe2b5306 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -715,27 +715,6 @@ void page_add_file_rmap(struct page *page)
715 } 715 }
716} 716}
717 717
718#ifdef CONFIG_DEBUG_VM
719/**
720 * page_dup_rmap - duplicate pte mapping to a page
721 * @page: the page to add the mapping to
722 * @vma: the vm area being duplicated
723 * @address: the user virtual address mapped
724 *
725 * For copy_page_range only: minimal extract from page_add_file_rmap /
726 * page_add_anon_rmap, avoiding unnecessary tests (already checked) so it's
727 * quicker.
728 *
729 * The caller needs to hold the pte lock.
730 */
731void page_dup_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address)
732{
733 if (PageAnon(page))
734 __page_check_anon_rmap(page, vma, address);
735 atomic_inc(&page->_mapcount);
736}
737#endif
738
739/** 718/**
740 * page_remove_rmap - take down pte mapping from a page 719 * page_remove_rmap - take down pte mapping from a page
741 * @page: page to remove mapping from 720 * @page: page to remove mapping from
@@ -744,34 +723,37 @@ void page_dup_rmap(struct page *page, struct vm_area_struct *vma, unsigned long
744 */ 723 */
745void page_remove_rmap(struct page *page) 724void page_remove_rmap(struct page *page)
746{ 725{
747 if (atomic_add_negative(-1, &page->_mapcount)) { 726 /* page still mapped by someone else? */
748 /* 727 if (!atomic_add_negative(-1, &page->_mapcount))
749 * Now that the last pte has gone, s390 must transfer dirty 728 return;
750 * flag from storage key to struct page. We can usually skip 729
751 * this if the page is anon, so about to be freed; but perhaps 730 /*
752 * not if it's in swapcache - there might be another pte slot 731 * Now that the last pte has gone, s390 must transfer dirty
753 * containing the swap entry, but page not yet written to swap. 732 * flag from storage key to struct page. We can usually skip
754 */ 733 * this if the page is anon, so about to be freed; but perhaps
755 if ((!PageAnon(page) || PageSwapCache(page)) && 734 * not if it's in swapcache - there might be another pte slot
756 page_test_dirty(page)) { 735 * containing the swap entry, but page not yet written to swap.
757 page_clear_dirty(page); 736 */
758 set_page_dirty(page); 737 if ((!PageAnon(page) || PageSwapCache(page)) && page_test_dirty(page)) {
759 } 738 page_clear_dirty(page);
760 if (PageAnon(page)) 739 set_page_dirty(page);
761 mem_cgroup_uncharge_page(page);
762 __dec_zone_page_state(page,
763 PageAnon(page) ? NR_ANON_PAGES : NR_FILE_MAPPED);
764 mem_cgroup_update_mapped_file_stat(page, -1);
765 /*
766 * It would be tidy to reset the PageAnon mapping here,
767 * but that might overwrite a racing page_add_anon_rmap
768 * which increments mapcount after us but sets mapping
769 * before us: so leave the reset to free_hot_cold_page,
770 * and remember that it's only reliable while mapped.
771 * Leaving it set also helps swapoff to reinstate ptes
772 * faster for those pages still in swapcache.
773 */
774 } 740 }
741 if (PageAnon(page)) {
742 mem_cgroup_uncharge_page(page);
743 __dec_zone_page_state(page, NR_ANON_PAGES);
744 } else {
745 __dec_zone_page_state(page, NR_FILE_MAPPED);
746 }
747 mem_cgroup_update_mapped_file_stat(page, -1);
748 /*
749 * It would be tidy to reset the PageAnon mapping here,
750 * but that might overwrite a racing page_add_anon_rmap
751 * which increments mapcount after us but sets mapping
752 * before us: so leave the reset to free_hot_cold_page,
753 * and remember that it's only reliable while mapped.
754 * Leaving it set also helps swapoff to reinstate ptes
755 * faster for those pages still in swapcache.
756 */
775} 757}
776 758
777/* 759/*