diff options
author | Mel Gorman <mgorman@techsingularity.net> | 2016-07-28 18:46:14 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-07-28 19:07:41 -0400 |
commit | 50658e2e04c12d5cd628381c1b9cb69d0093a9c0 (patch) | |
tree | 9afad9b1cba152df6971cedb58d5d39d9a538c44 /mm/rmap.c | |
parent | 281e37265f2826ed401d84d6790226448ef3f0e8 (diff) |
mm: move page mapped accounting to the node
Reclaim makes decisions based on the number of pages that are mapped but
it's mixing node and zone information. Account NR_FILE_MAPPED and
NR_ANON_PAGES pages on the node.
Link: http://lkml.kernel.org/r/1467970510-21195-18-git-send-email-mgorman@techsingularity.net
Signed-off-by: Mel Gorman <mgorman@techsingularity.net>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Acked-by: Michal Hocko <mhocko@suse.com>
Cc: Hillf Danton <hillf.zj@alibaba-inc.com>
Acked-by: Johannes Weiner <hannes@cmpxchg.org>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Rik van Riel <riel@surriel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/rmap.c')
-rw-r--r-- | mm/rmap.c | 14 |
1 files changed, 7 insertions, 7 deletions
@@ -1214,7 +1214,7 @@ void do_page_add_anon_rmap(struct page *page, | |||
1214 | */ | 1214 | */ |
1215 | if (compound) | 1215 | if (compound) |
1216 | __inc_zone_page_state(page, NR_ANON_THPS); | 1216 | __inc_zone_page_state(page, NR_ANON_THPS); |
1217 | __mod_zone_page_state(page_zone(page), NR_ANON_PAGES, nr); | 1217 | __mod_node_page_state(page_pgdat(page), NR_ANON_PAGES, nr); |
1218 | } | 1218 | } |
1219 | if (unlikely(PageKsm(page))) | 1219 | if (unlikely(PageKsm(page))) |
1220 | return; | 1220 | return; |
@@ -1258,7 +1258,7 @@ void page_add_new_anon_rmap(struct page *page, | |||
1258 | /* increment count (starts at -1) */ | 1258 | /* increment count (starts at -1) */ |
1259 | atomic_set(&page->_mapcount, 0); | 1259 | atomic_set(&page->_mapcount, 0); |
1260 | } | 1260 | } |
1261 | __mod_zone_page_state(page_zone(page), NR_ANON_PAGES, nr); | 1261 | __mod_node_page_state(page_pgdat(page), NR_ANON_PAGES, nr); |
1262 | __page_set_anon_rmap(page, vma, address, 1); | 1262 | __page_set_anon_rmap(page, vma, address, 1); |
1263 | } | 1263 | } |
1264 | 1264 | ||
@@ -1293,7 +1293,7 @@ void page_add_file_rmap(struct page *page, bool compound) | |||
1293 | if (!atomic_inc_and_test(&page->_mapcount)) | 1293 | if (!atomic_inc_and_test(&page->_mapcount)) |
1294 | goto out; | 1294 | goto out; |
1295 | } | 1295 | } |
1296 | __mod_zone_page_state(page_zone(page), NR_FILE_MAPPED, nr); | 1296 | __mod_node_page_state(page_pgdat(page), NR_FILE_MAPPED, nr); |
1297 | mem_cgroup_inc_page_stat(page, MEM_CGROUP_STAT_FILE_MAPPED); | 1297 | mem_cgroup_inc_page_stat(page, MEM_CGROUP_STAT_FILE_MAPPED); |
1298 | out: | 1298 | out: |
1299 | unlock_page_memcg(page); | 1299 | unlock_page_memcg(page); |
@@ -1329,11 +1329,11 @@ static void page_remove_file_rmap(struct page *page, bool compound) | |||
1329 | } | 1329 | } |
1330 | 1330 | ||
1331 | /* | 1331 | /* |
1332 | * We use the irq-unsafe __{inc|mod}_zone_page_stat because | 1332 | * We use the irq-unsafe __{inc|mod}_zone_page_state because |
1333 | * these counters are not modified in interrupt context, and | 1333 | * these counters are not modified in interrupt context, and |
1334 | * pte lock(a spinlock) is held, which implies preemption disabled. | 1334 | * pte lock(a spinlock) is held, which implies preemption disabled. |
1335 | */ | 1335 | */ |
1336 | __mod_zone_page_state(page_zone(page), NR_FILE_MAPPED, -nr); | 1336 | __mod_node_page_state(page_pgdat(page), NR_FILE_MAPPED, -nr); |
1337 | mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_FILE_MAPPED); | 1337 | mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_FILE_MAPPED); |
1338 | 1338 | ||
1339 | if (unlikely(PageMlocked(page))) | 1339 | if (unlikely(PageMlocked(page))) |
@@ -1375,7 +1375,7 @@ static void page_remove_anon_compound_rmap(struct page *page) | |||
1375 | clear_page_mlock(page); | 1375 | clear_page_mlock(page); |
1376 | 1376 | ||
1377 | if (nr) { | 1377 | if (nr) { |
1378 | __mod_zone_page_state(page_zone(page), NR_ANON_PAGES, -nr); | 1378 | __mod_node_page_state(page_pgdat(page), NR_ANON_PAGES, -nr); |
1379 | deferred_split_huge_page(page); | 1379 | deferred_split_huge_page(page); |
1380 | } | 1380 | } |
1381 | } | 1381 | } |
@@ -1404,7 +1404,7 @@ void page_remove_rmap(struct page *page, bool compound) | |||
1404 | * these counters are not modified in interrupt context, and | 1404 | * these counters are not modified in interrupt context, and |
1405 | * pte lock(a spinlock) is held, which implies preemption disabled. | 1405 | * pte lock(a spinlock) is held, which implies preemption disabled. |
1406 | */ | 1406 | */ |
1407 | __dec_zone_page_state(page, NR_ANON_PAGES); | 1407 | __dec_node_page_state(page, NR_ANON_PAGES); |
1408 | 1408 | ||
1409 | if (unlikely(PageMlocked(page))) | 1409 | if (unlikely(PageMlocked(page))) |
1410 | clear_page_mlock(page); | 1410 | clear_page_mlock(page); |