aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorJohannes Weiner <hannes@cmpxchg.org>2014-10-29 17:50:51 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-10-29 19:33:15 -0400
commit8186eb6a799e4e32f984b55858d8e393938be0c1 (patch)
tree972a445b8a00c5c0f0a360d30893239cfa1f4e8e /mm
parentd7365e783edb858279be1d03f61bc8d5d3383d90 (diff)
mm: rmap: split out page_remove_file_rmap()
page_remove_rmap() has too many branches on PageAnon() and is hard to follow. Move the file part into a separate function. Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Reviewed-by: Michal Hocko <mhocko@suse.cz> Cc: Vladimir Davydov <vdavydov@parallels.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/rmap.c78
1 files changed, 46 insertions, 32 deletions
diff --git a/mm/rmap.c b/mm/rmap.c
index f574046f77d4..19886fb2f13a 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1054,6 +1054,36 @@ void page_add_file_rmap(struct page *page)
1054 mem_cgroup_end_page_stat(memcg, locked, flags); 1054 mem_cgroup_end_page_stat(memcg, locked, flags);
1055} 1055}
1056 1056
1057static void page_remove_file_rmap(struct page *page)
1058{
1059 struct mem_cgroup *memcg;
1060 unsigned long flags;
1061 bool locked;
1062
1063 memcg = mem_cgroup_begin_page_stat(page, &locked, &flags);
1064
1065 /* page still mapped by someone else? */
1066 if (!atomic_add_negative(-1, &page->_mapcount))
1067 goto out;
1068
1069 /* Hugepages are not counted in NR_FILE_MAPPED for now. */
1070 if (unlikely(PageHuge(page)))
1071 goto out;
1072
1073 /*
1074 * We use the irq-unsafe __{inc|mod}_zone_page_stat because
1075 * these counters are not modified in interrupt context, and
1076 * pte lock(a spinlock) is held, which implies preemption disabled.
1077 */
1078 __dec_zone_page_state(page, NR_FILE_MAPPED);
1079 mem_cgroup_dec_page_stat(memcg, MEM_CGROUP_STAT_FILE_MAPPED);
1080
1081 if (unlikely(PageMlocked(page)))
1082 clear_page_mlock(page);
1083out:
1084 mem_cgroup_end_page_stat(memcg, locked, flags);
1085}
1086
1057/** 1087/**
1058 * page_remove_rmap - take down pte mapping from a page 1088 * page_remove_rmap - take down pte mapping from a page
1059 * @page: page to remove mapping from 1089 * @page: page to remove mapping from
@@ -1062,46 +1092,33 @@ void page_add_file_rmap(struct page *page)
1062 */ 1092 */
1063void page_remove_rmap(struct page *page) 1093void page_remove_rmap(struct page *page)
1064{ 1094{
1065 struct mem_cgroup *uninitialized_var(memcg); 1095 if (!PageAnon(page)) {
1066 bool anon = PageAnon(page); 1096 page_remove_file_rmap(page);
1067 unsigned long flags; 1097 return;
1068 bool locked; 1098 }
1069
1070 /*
1071 * The anon case has no mem_cgroup page_stat to update; but may
1072 * uncharge_page() below, where the lock ordering can deadlock if
1073 * we hold the lock against page_stat move: so avoid it on anon.
1074 */
1075 if (!anon)
1076 memcg = mem_cgroup_begin_page_stat(page, &locked, &flags);
1077 1099
1078 /* page still mapped by someone else? */ 1100 /* page still mapped by someone else? */
1079 if (!atomic_add_negative(-1, &page->_mapcount)) 1101 if (!atomic_add_negative(-1, &page->_mapcount))
1080 goto out; 1102 return;
1103
1104 /* Hugepages are not counted in NR_ANON_PAGES for now. */
1105 if (unlikely(PageHuge(page)))
1106 return;
1081 1107
1082 /* 1108 /*
1083 * Hugepages are not counted in NR_ANON_PAGES nor NR_FILE_MAPPED
1084 * and not charged by memcg for now.
1085 *
1086 * We use the irq-unsafe __{inc|mod}_zone_page_stat because 1109 * We use the irq-unsafe __{inc|mod}_zone_page_stat because
1087 * these counters are not modified in interrupt context, and 1110 * these counters are not modified in interrupt context, and
1088 * these counters are not modified in interrupt context, and
1089 * pte lock(a spinlock) is held, which implies preemption disabled. 1111 * pte lock(a spinlock) is held, which implies preemption disabled.
1090 */ 1112 */
1091 if (unlikely(PageHuge(page))) 1113 if (PageTransHuge(page))
1092 goto out; 1114 __dec_zone_page_state(page, NR_ANON_TRANSPARENT_HUGEPAGES);
1093 if (anon) { 1115
1094 if (PageTransHuge(page)) 1116 __mod_zone_page_state(page_zone(page), NR_ANON_PAGES,
1095 __dec_zone_page_state(page, 1117 -hpage_nr_pages(page));
1096 NR_ANON_TRANSPARENT_HUGEPAGES); 1118
1097 __mod_zone_page_state(page_zone(page), NR_ANON_PAGES,
1098 -hpage_nr_pages(page));
1099 } else {
1100 __dec_zone_page_state(page, NR_FILE_MAPPED);
1101 mem_cgroup_dec_page_stat(memcg, MEM_CGROUP_STAT_FILE_MAPPED);
1102 }
1103 if (unlikely(PageMlocked(page))) 1119 if (unlikely(PageMlocked(page)))
1104 clear_page_mlock(page); 1120 clear_page_mlock(page);
1121
1105 /* 1122 /*
1106 * It would be tidy to reset the PageAnon mapping here, 1123 * It would be tidy to reset the PageAnon mapping here,
1107 * but that might overwrite a racing page_add_anon_rmap 1124 * but that might overwrite a racing page_add_anon_rmap
@@ -1111,9 +1128,6 @@ void page_remove_rmap(struct page *page)
1111 * Leaving it set also helps swapoff to reinstate ptes 1128 * Leaving it set also helps swapoff to reinstate ptes
1112 * faster for those pages still in swapcache. 1129 * faster for those pages still in swapcache.
1113 */ 1130 */
1114out:
1115 if (!anon)
1116 mem_cgroup_end_page_stat(memcg, locked, flags);
1117} 1131}
1118 1132
1119/* 1133/*