aboutsummaryrefslogtreecommitdiffstats
path: root/mm/rmap.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-10-29 19:38:48 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-10-29 19:38:48 -0400
commita7ca10f263d7e673c74d8e0946d6b9993405cc9c (patch)
tree7c50f0e728ca1a426235356acba1115c45dfe809 /mm/rmap.c
parentd506aa68c23db708ad45ca8c17f0d7f5d7029a37 (diff)
parent4d88e6f7d5ffc84e6094a47925870f4a130555c2 (diff)
Merge branch 'akpm' (incoming from Andrew Morton)
Merge misc fixes from Andrew Morton: "21 fixes" * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (21 commits) mm/balloon_compaction: fix deflation when compaction is disabled sh: fix sh770x SCIF memory regions zram: avoid NULL pointer access in concurrent situation mm/slab_common: don't check for duplicate cache names ocfs2: fix d_splice_alias() return code checking mm: rmap: split out page_remove_file_rmap() mm: memcontrol: fix missed end-writeback page accounting mm: page-writeback: inline account_page_dirtied() into single caller lib/bitmap.c: fix undefined shift in __bitmap_shift_{left|right}() drivers/rtc/rtc-bq32k.c: fix register value memory-hotplug: clear pgdat which is allocated by bootmem in try_offline_node() drivers/rtc/rtc-s3c.c: fix initialization failure without rtc source clock kernel/kmod: fix use-after-free of the sub_info structure drivers/rtc/rtc-pm8xxx.c: rework to support pm8941 rtc mm, thp: fix collapsing of hugepages on madvise drivers: of: add return value to of_reserved_mem_device_init() mm: free compound page with correct order gcov: add ARM64 to GCOV_PROFILE_ALL fsnotify: next_i is freed during fsnotify_unmount_inodes. mm/compaction.c: avoid premature range skip in isolate_migratepages_range ...
Diffstat (limited to 'mm/rmap.c')
-rw-r--r--mm/rmap.c88
1 files changed, 51 insertions, 37 deletions
diff --git a/mm/rmap.c b/mm/rmap.c
index 116a5053415b..19886fb2f13a 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1042,15 +1042,46 @@ void page_add_new_anon_rmap(struct page *page,
1042 */ 1042 */
1043void page_add_file_rmap(struct page *page) 1043void page_add_file_rmap(struct page *page)
1044{ 1044{
1045 bool locked; 1045 struct mem_cgroup *memcg;
1046 unsigned long flags; 1046 unsigned long flags;
1047 bool locked;
1047 1048
1048 mem_cgroup_begin_update_page_stat(page, &locked, &flags); 1049 memcg = mem_cgroup_begin_page_stat(page, &locked, &flags);
1049 if (atomic_inc_and_test(&page->_mapcount)) { 1050 if (atomic_inc_and_test(&page->_mapcount)) {
1050 __inc_zone_page_state(page, NR_FILE_MAPPED); 1051 __inc_zone_page_state(page, NR_FILE_MAPPED);
1051 mem_cgroup_inc_page_stat(page, MEM_CGROUP_STAT_FILE_MAPPED); 1052 mem_cgroup_inc_page_stat(memcg, MEM_CGROUP_STAT_FILE_MAPPED);
1052 } 1053 }
1053 mem_cgroup_end_update_page_stat(page, &locked, &flags); 1054 mem_cgroup_end_page_stat(memcg, locked, flags);
1055}
1056
1057static void page_remove_file_rmap(struct page *page)
1058{
1059 struct mem_cgroup *memcg;
1060 unsigned long flags;
1061 bool locked;
1062
1063 memcg = mem_cgroup_begin_page_stat(page, &locked, &flags);
1064
1065 /* page still mapped by someone else? */
1066 if (!atomic_add_negative(-1, &page->_mapcount))
1067 goto out;
1068
1069 /* Hugepages are not counted in NR_FILE_MAPPED for now. */
1070 if (unlikely(PageHuge(page)))
1071 goto out;
1072
1073 /*
1074 * We use the irq-unsafe __{inc|mod}_zone_page_stat because
1075 * these counters are not modified in interrupt context, and
1076 * pte lock(a spinlock) is held, which implies preemption disabled.
1077 */
1078 __dec_zone_page_state(page, NR_FILE_MAPPED);
1079 mem_cgroup_dec_page_stat(memcg, MEM_CGROUP_STAT_FILE_MAPPED);
1080
1081 if (unlikely(PageMlocked(page)))
1082 clear_page_mlock(page);
1083out:
1084 mem_cgroup_end_page_stat(memcg, locked, flags);
1054} 1085}
1055 1086
1056/** 1087/**
@@ -1061,46 +1092,33 @@ void page_add_file_rmap(struct page *page)
1061 */ 1092 */
1062void page_remove_rmap(struct page *page) 1093void page_remove_rmap(struct page *page)
1063{ 1094{
1064 bool anon = PageAnon(page); 1095 if (!PageAnon(page)) {
1065 bool locked; 1096 page_remove_file_rmap(page);
1066 unsigned long flags; 1097 return;
1067 1098 }
1068 /*
1069 * The anon case has no mem_cgroup page_stat to update; but may
1070 * uncharge_page() below, where the lock ordering can deadlock if
1071 * we hold the lock against page_stat move: so avoid it on anon.
1072 */
1073 if (!anon)
1074 mem_cgroup_begin_update_page_stat(page, &locked, &flags);
1075 1099
1076 /* page still mapped by someone else? */ 1100 /* page still mapped by someone else? */
1077 if (!atomic_add_negative(-1, &page->_mapcount)) 1101 if (!atomic_add_negative(-1, &page->_mapcount))
1078 goto out; 1102 return;
1103
1104 /* Hugepages are not counted in NR_ANON_PAGES for now. */
1105 if (unlikely(PageHuge(page)))
1106 return;
1079 1107
1080 /* 1108 /*
1081 * Hugepages are not counted in NR_ANON_PAGES nor NR_FILE_MAPPED
1082 * and not charged by memcg for now.
1083 *
1084 * We use the irq-unsafe __{inc|mod}_zone_page_stat because 1109 * We use the irq-unsafe __{inc|mod}_zone_page_stat because
1085 * these counters are not modified in interrupt context, and 1110 * these counters are not modified in interrupt context, and
1086 * these counters are not modified in interrupt context, and
1087 * pte lock(a spinlock) is held, which implies preemption disabled. 1111 * pte lock(a spinlock) is held, which implies preemption disabled.
1088 */ 1112 */
1089 if (unlikely(PageHuge(page))) 1113 if (PageTransHuge(page))
1090 goto out; 1114 __dec_zone_page_state(page, NR_ANON_TRANSPARENT_HUGEPAGES);
1091 if (anon) { 1115
1092 if (PageTransHuge(page)) 1116 __mod_zone_page_state(page_zone(page), NR_ANON_PAGES,
1093 __dec_zone_page_state(page, 1117 -hpage_nr_pages(page));
1094 NR_ANON_TRANSPARENT_HUGEPAGES); 1118
1095 __mod_zone_page_state(page_zone(page), NR_ANON_PAGES,
1096 -hpage_nr_pages(page));
1097 } else {
1098 __dec_zone_page_state(page, NR_FILE_MAPPED);
1099 mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_FILE_MAPPED);
1100 mem_cgroup_end_update_page_stat(page, &locked, &flags);
1101 }
1102 if (unlikely(PageMlocked(page))) 1119 if (unlikely(PageMlocked(page)))
1103 clear_page_mlock(page); 1120 clear_page_mlock(page);
1121
1104 /* 1122 /*
1105 * It would be tidy to reset the PageAnon mapping here, 1123 * It would be tidy to reset the PageAnon mapping here,
1106 * but that might overwrite a racing page_add_anon_rmap 1124 * but that might overwrite a racing page_add_anon_rmap
@@ -1110,10 +1128,6 @@ void page_remove_rmap(struct page *page)
1110 * Leaving it set also helps swapoff to reinstate ptes 1128 * Leaving it set also helps swapoff to reinstate ptes
1111 * faster for those pages still in swapcache. 1129 * faster for those pages still in swapcache.
1112 */ 1130 */
1113 return;
1114out:
1115 if (!anon)
1116 mem_cgroup_end_update_page_stat(page, &locked, &flags);
1117} 1131}
1118 1132
1119/* 1133/*