aboutsummaryrefslogtreecommitdiffstats
path: root/mm/filemap.c
diff options
context:
space:
mode:
authorMel Gorman <mgorman@techsingularity.net>2016-07-28 18:46:20 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-07-28 19:07:41 -0400
commit11fb998986a72aa7e997d96d63d52582a01228c5 (patch)
treefd9db095081c4fe2212db7de2757bfdf4645dc04 /mm/filemap.c
parent4b9d0fab7166c9323f06d708518a35cf3a90426c (diff)
mm: move most file-based accounting to the node
There are now a number of accounting oddities such as mapped file pages being accounted for on the node while the total number of file pages are accounted on the zone. This can be coped with to some extent but it's confusing so this patch moves the relevant file-based accounted. Due to throttling logic in the page allocator for reliable OOM detection, it is still necessary to track dirty and writeback pages on a per-zone basis. [mgorman@techsingularity.net: fix NR_ZONE_WRITE_PENDING accounting] Link: http://lkml.kernel.org/r/1468404004-5085-5-git-send-email-mgorman@techsingularity.net Link: http://lkml.kernel.org/r/1467970510-21195-20-git-send-email-mgorman@techsingularity.net Signed-off-by: Mel Gorman <mgorman@techsingularity.net> Acked-by: Vlastimil Babka <vbabka@suse.cz> Acked-by: Michal Hocko <mhocko@suse.com> Cc: Hillf Danton <hillf.zj@alibaba-inc.com> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Minchan Kim <minchan@kernel.org> Cc: Rik van Riel <riel@surriel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/filemap.c')
-rw-r--r--mm/filemap.c12
1 files changed, 6 insertions, 6 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index 7ec50bd6f88c..c5f5e46c6f7f 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -218,11 +218,11 @@ void __delete_from_page_cache(struct page *page, void *shadow)
218 218
219 /* hugetlb pages do not participate in page cache accounting. */ 219 /* hugetlb pages do not participate in page cache accounting. */
220 if (!PageHuge(page)) 220 if (!PageHuge(page))
221 __mod_zone_page_state(page_zone(page), NR_FILE_PAGES, -nr); 221 __mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, -nr);
222 if (PageSwapBacked(page)) { 222 if (PageSwapBacked(page)) {
223 __mod_zone_page_state(page_zone(page), NR_SHMEM, -nr); 223 __mod_node_page_state(page_pgdat(page), NR_SHMEM, -nr);
224 if (PageTransHuge(page)) 224 if (PageTransHuge(page))
225 __dec_zone_page_state(page, NR_SHMEM_THPS); 225 __dec_node_page_state(page, NR_SHMEM_THPS);
226 } else { 226 } else {
227 VM_BUG_ON_PAGE(PageTransHuge(page) && !PageHuge(page), page); 227 VM_BUG_ON_PAGE(PageTransHuge(page) && !PageHuge(page), page);
228 } 228 }
@@ -568,9 +568,9 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
568 * hugetlb pages do not participate in page cache accounting. 568 * hugetlb pages do not participate in page cache accounting.
569 */ 569 */
570 if (!PageHuge(new)) 570 if (!PageHuge(new))
571 __inc_zone_page_state(new, NR_FILE_PAGES); 571 __inc_node_page_state(new, NR_FILE_PAGES);
572 if (PageSwapBacked(new)) 572 if (PageSwapBacked(new))
573 __inc_zone_page_state(new, NR_SHMEM); 573 __inc_node_page_state(new, NR_SHMEM);
574 spin_unlock_irqrestore(&mapping->tree_lock, flags); 574 spin_unlock_irqrestore(&mapping->tree_lock, flags);
575 mem_cgroup_migrate(old, new); 575 mem_cgroup_migrate(old, new);
576 radix_tree_preload_end(); 576 radix_tree_preload_end();
@@ -677,7 +677,7 @@ static int __add_to_page_cache_locked(struct page *page,
677 677
678 /* hugetlb pages do not participate in page cache accounting. */ 678 /* hugetlb pages do not participate in page cache accounting. */
679 if (!huge) 679 if (!huge)
680 __inc_zone_page_state(page, NR_FILE_PAGES); 680 __inc_node_page_state(page, NR_FILE_PAGES);
681 spin_unlock_irq(&mapping->tree_lock); 681 spin_unlock_irq(&mapping->tree_lock);
682 if (!huge) 682 if (!huge)
683 mem_cgroup_commit_charge(page, memcg, false, false); 683 mem_cgroup_commit_charge(page, memcg, false, false);