summaryrefslogtreecommitdiffstats
path: root/mm/rmap.c
diff options
context:
space:
mode:
authorJohannes Weiner <hannes@cmpxchg.org>2017-07-06 18:40:52 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2017-07-06 19:24:35 -0400
commit00f3ca2c2d6635d85108571c4dd9a29088668662 (patch)
treed85532fc7416bcb2dc7c442f8ebf5536fb684ad5 /mm/rmap.c
parented52be7bfd45533b194b429f43361493d24599a7 (diff)
mm: memcontrol: per-lruvec stats infrastructure
lruvecs are at the intersection of the NUMA node and memcg, which is the scope for most paging activity. Introduce a convenient accounting infrastructure that maintains statistics per node, per memcg, and the lruvec itself. Then convert over accounting sites for statistics that are already tracked in both nodes and memcgs and can be easily switched. [hannes@cmpxchg.org: fix crash in the new cgroup stat keeping code] Link: http://lkml.kernel.org/r/20170531171450.GA10481@cmpxchg.org [hannes@cmpxchg.org: don't track uncharged pages at all Link: http://lkml.kernel.org/r/20170605175254.GA8547@cmpxchg.org [hannes@cmpxchg.org: add missing free_percpu()] Link: http://lkml.kernel.org/r/20170605175354.GB8547@cmpxchg.org [linux@roeck-us.net: hexagon: fix build error caused by include file order] Link: http://lkml.kernel.org/r/20170617153721.GA4382@roeck-us.net Link: http://lkml.kernel.org/r/20170530181724.27197-6-hannes@cmpxchg.org Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Signed-off-by: Guenter Roeck <linux@roeck-us.net> Acked-by: Vladimir Davydov <vdavydov.dev@gmail.com> Cc: Josef Bacik <josef@toxicpanda.com> Cc: Michal Hocko <mhocko@suse.com> Cc: Rik van Riel <riel@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/rmap.c')
-rw-r--r--mm/rmap.c8
1 files changed, 3 insertions, 5 deletions
diff --git a/mm/rmap.c b/mm/rmap.c
index b255743351e5..ced14f1af6dc 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1145,8 +1145,7 @@ void page_add_file_rmap(struct page *page, bool compound)
1145 if (!atomic_inc_and_test(&page->_mapcount)) 1145 if (!atomic_inc_and_test(&page->_mapcount))
1146 goto out; 1146 goto out;
1147 } 1147 }
1148 __mod_node_page_state(page_pgdat(page), NR_FILE_MAPPED, nr); 1148 __mod_lruvec_page_state(page, NR_FILE_MAPPED, nr);
1149 mod_memcg_page_state(page, NR_FILE_MAPPED, nr);
1150out: 1149out:
1151 unlock_page_memcg(page); 1150 unlock_page_memcg(page);
1152} 1151}
@@ -1181,12 +1180,11 @@ static void page_remove_file_rmap(struct page *page, bool compound)
1181 } 1180 }
1182 1181
1183 /* 1182 /*
1184 * We use the irq-unsafe __{inc|mod}_zone_page_state because 1183 * We use the irq-unsafe __{inc|mod}_lruvec_page_state because
1185 * these counters are not modified in interrupt context, and 1184 * these counters are not modified in interrupt context, and
1186 * pte lock(a spinlock) is held, which implies preemption disabled. 1185 * pte lock(a spinlock) is held, which implies preemption disabled.
1187 */ 1186 */
1188 __mod_node_page_state(page_pgdat(page), NR_FILE_MAPPED, -nr); 1187 __mod_lruvec_page_state(page, NR_FILE_MAPPED, -nr);
1189 mod_memcg_page_state(page, NR_FILE_MAPPED, -nr);
1190 1188
1191 if (unlikely(PageMlocked(page))) 1189 if (unlikely(PageMlocked(page)))
1192 clear_page_mlock(page); 1190 clear_page_mlock(page);