aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>2009-09-21 20:01:28 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-09-22 10:17:26 -0400
commitb904dcfed6967e9cfc8a54778498f6d289420309 (patch)
treeaf57711bb516ad30d622223d80f01932fe51a4fe
parent57dd28fb0513d2f772bb215f27925165e7b9ce5f (diff)
mm: clean up page_remove_rmap()
page_remove_rmap() has multiple PageAnon() tests and it has deep nesting. Clean this up. Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Acked-by: Mel Gorman <mel@csn.ul.ie> Reviewed-by: Wu Fengguang <fengguang.wu@intel.com> Cc: Rik van Riel <riel@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--mm/rmap.c57
1 files changed, 30 insertions, 27 deletions
diff --git a/mm/rmap.c b/mm/rmap.c
index 0895b5c7cbf..1406e67f961 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -739,34 +739,37 @@ void page_dup_rmap(struct page *page, struct vm_area_struct *vma, unsigned long
739 */ 739 */
740void page_remove_rmap(struct page *page) 740void page_remove_rmap(struct page *page)
741{ 741{
742 if (atomic_add_negative(-1, &page->_mapcount)) { 742 /* page still mapped by someone else? */
743 /* 743 if (!atomic_add_negative(-1, &page->_mapcount))
744 * Now that the last pte has gone, s390 must transfer dirty 744 return;
745 * flag from storage key to struct page. We can usually skip 745
746 * this if the page is anon, so about to be freed; but perhaps 746 /*
747 * not if it's in swapcache - there might be another pte slot 747 * Now that the last pte has gone, s390 must transfer dirty
748 * containing the swap entry, but page not yet written to swap. 748 * flag from storage key to struct page. We can usually skip
749 */ 749 * this if the page is anon, so about to be freed; but perhaps
750 if ((!PageAnon(page) || PageSwapCache(page)) && 750 * not if it's in swapcache - there might be another pte slot
751 page_test_dirty(page)) { 751 * containing the swap entry, but page not yet written to swap.
752 page_clear_dirty(page); 752 */
753 set_page_dirty(page); 753 if ((!PageAnon(page) || PageSwapCache(page)) && page_test_dirty(page)) {
754 } 754 page_clear_dirty(page);
755 if (PageAnon(page)) 755 set_page_dirty(page);
756 mem_cgroup_uncharge_page(page);
757 __dec_zone_page_state(page,
758 PageAnon(page) ? NR_ANON_PAGES : NR_FILE_MAPPED);
759 mem_cgroup_update_mapped_file_stat(page, -1);
760 /*
761 * It would be tidy to reset the PageAnon mapping here,
762 * but that might overwrite a racing page_add_anon_rmap
763 * which increments mapcount after us but sets mapping
764 * before us: so leave the reset to free_hot_cold_page,
765 * and remember that it's only reliable while mapped.
766 * Leaving it set also helps swapoff to reinstate ptes
767 * faster for those pages still in swapcache.
768 */
769 } 756 }
757 if (PageAnon(page)) {
758 mem_cgroup_uncharge_page(page);
759 __dec_zone_page_state(page, NR_ANON_PAGES);
760 } else {
761 __dec_zone_page_state(page, NR_FILE_MAPPED);
762 }
763 mem_cgroup_update_mapped_file_stat(page, -1);
764 /*
765 * It would be tidy to reset the PageAnon mapping here,
766 * but that might overwrite a racing page_add_anon_rmap
767 * which increments mapcount after us but sets mapping
768 * before us: so leave the reset to free_hot_cold_page,
769 * and remember that it's only reliable while mapped.
770 * Leaving it set also helps swapoff to reinstate ptes
771 * faster for those pages still in swapcache.
772 */
770} 773}
771 774
772/* 775/*