aboutsummaryrefslogtreecommitdiffstats
path: root/mm/rmap.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/rmap.c')
-rw-r--r--mm/rmap.c20
1 files changed, 10 insertions, 10 deletions
diff --git a/mm/rmap.c b/mm/rmap.c
index 116a5053415b..f574046f77d4 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1042,15 +1042,16 @@ void page_add_new_anon_rmap(struct page *page,
1042 */ 1042 */
1043void page_add_file_rmap(struct page *page) 1043void page_add_file_rmap(struct page *page)
1044{ 1044{
1045 bool locked; 1045 struct mem_cgroup *memcg;
1046 unsigned long flags; 1046 unsigned long flags;
1047 bool locked;
1047 1048
1048 mem_cgroup_begin_update_page_stat(page, &locked, &flags); 1049 memcg = mem_cgroup_begin_page_stat(page, &locked, &flags);
1049 if (atomic_inc_and_test(&page->_mapcount)) { 1050 if (atomic_inc_and_test(&page->_mapcount)) {
1050 __inc_zone_page_state(page, NR_FILE_MAPPED); 1051 __inc_zone_page_state(page, NR_FILE_MAPPED);
1051 mem_cgroup_inc_page_stat(page, MEM_CGROUP_STAT_FILE_MAPPED); 1052 mem_cgroup_inc_page_stat(memcg, MEM_CGROUP_STAT_FILE_MAPPED);
1052 } 1053 }
1053 mem_cgroup_end_update_page_stat(page, &locked, &flags); 1054 mem_cgroup_end_page_stat(memcg, locked, flags);
1054} 1055}
1055 1056
1056/** 1057/**
@@ -1061,9 +1062,10 @@ void page_add_file_rmap(struct page *page)
1061 */ 1062 */
1062void page_remove_rmap(struct page *page) 1063void page_remove_rmap(struct page *page)
1063{ 1064{
1065 struct mem_cgroup *uninitialized_var(memcg);
1064 bool anon = PageAnon(page); 1066 bool anon = PageAnon(page);
1065 bool locked;
1066 unsigned long flags; 1067 unsigned long flags;
1068 bool locked;
1067 1069
1068 /* 1070 /*
1069 * The anon case has no mem_cgroup page_stat to update; but may 1071 * The anon case has no mem_cgroup page_stat to update; but may
@@ -1071,7 +1073,7 @@ void page_remove_rmap(struct page *page)
1071 * we hold the lock against page_stat move: so avoid it on anon. 1073 * we hold the lock against page_stat move: so avoid it on anon.
1072 */ 1074 */
1073 if (!anon) 1075 if (!anon)
1074 mem_cgroup_begin_update_page_stat(page, &locked, &flags); 1076 memcg = mem_cgroup_begin_page_stat(page, &locked, &flags);
1075 1077
1076 /* page still mapped by someone else? */ 1078 /* page still mapped by someone else? */
1077 if (!atomic_add_negative(-1, &page->_mapcount)) 1079 if (!atomic_add_negative(-1, &page->_mapcount))
@@ -1096,8 +1098,7 @@ void page_remove_rmap(struct page *page)
1096 -hpage_nr_pages(page)); 1098 -hpage_nr_pages(page));
1097 } else { 1099 } else {
1098 __dec_zone_page_state(page, NR_FILE_MAPPED); 1100 __dec_zone_page_state(page, NR_FILE_MAPPED);
1099 mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_FILE_MAPPED); 1101 mem_cgroup_dec_page_stat(memcg, MEM_CGROUP_STAT_FILE_MAPPED);
1100 mem_cgroup_end_update_page_stat(page, &locked, &flags);
1101 } 1102 }
1102 if (unlikely(PageMlocked(page))) 1103 if (unlikely(PageMlocked(page)))
1103 clear_page_mlock(page); 1104 clear_page_mlock(page);
@@ -1110,10 +1111,9 @@ void page_remove_rmap(struct page *page)
1110 * Leaving it set also helps swapoff to reinstate ptes 1111 * Leaving it set also helps swapoff to reinstate ptes
1111 * faster for those pages still in swapcache. 1112 * faster for those pages still in swapcache.
1112 */ 1113 */
1113 return;
1114out: 1114out:
1115 if (!anon) 1115 if (!anon)
1116 mem_cgroup_end_update_page_stat(page, &locked, &flags); 1116 mem_cgroup_end_page_stat(memcg, locked, flags);
1117} 1117}
1118 1118
1119/* 1119/*