summaryrefslogtreecommitdiffstats
path: root/drivers/base
diff options
context:
space:
mode:
authorMel Gorman <mgorman@techsingularity.net>2016-07-28 18:46:20 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-07-28 19:07:41 -0400
commit11fb998986a72aa7e997d96d63d52582a01228c5 (patch)
treefd9db095081c4fe2212db7de2757bfdf4645dc04 /drivers/base
parent4b9d0fab7166c9323f06d708518a35cf3a90426c (diff)
mm: move most file-based accounting to the node
There are now a number of accounting oddities such as mapped file pages being accounted for on the node while the total number of file pages are accounted on the zone. This can be coped with to some extent but it's confusing so this patch moves the relevant file-based accounted. Due to throttling logic in the page allocator for reliable OOM detection, it is still necessary to track dirty and writeback pages on a per-zone basis. [mgorman@techsingularity.net: fix NR_ZONE_WRITE_PENDING accounting] Link: http://lkml.kernel.org/r/1468404004-5085-5-git-send-email-mgorman@techsingularity.net Link: http://lkml.kernel.org/r/1467970510-21195-20-git-send-email-mgorman@techsingularity.net Signed-off-by: Mel Gorman <mgorman@techsingularity.net> Acked-by: Vlastimil Babka <vbabka@suse.cz> Acked-by: Michal Hocko <mhocko@suse.com> Cc: Hillf Danton <hillf.zj@alibaba-inc.com> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Minchan Kim <minchan@kernel.org> Cc: Rik van Riel <riel@surriel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'drivers/base')
-rw-r--r--drivers/base/node.c16
1 files changed, 8 insertions, 8 deletions
diff --git a/drivers/base/node.c b/drivers/base/node.c
index 6cd9ff43ee22..264cc214c4df 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -118,28 +118,28 @@ static ssize_t node_read_meminfo(struct device *dev,
118 "Node %d ShmemPmdMapped: %8lu kB\n" 118 "Node %d ShmemPmdMapped: %8lu kB\n"
119#endif 119#endif
120 , 120 ,
121 nid, K(sum_zone_node_page_state(nid, NR_FILE_DIRTY)), 121 nid, K(node_page_state(pgdat, NR_FILE_DIRTY)),
122 nid, K(sum_zone_node_page_state(nid, NR_WRITEBACK)), 122 nid, K(node_page_state(pgdat, NR_WRITEBACK)),
123 nid, K(sum_zone_node_page_state(nid, NR_FILE_PAGES)), 123 nid, K(node_page_state(pgdat, NR_FILE_PAGES)),
124 nid, K(node_page_state(pgdat, NR_FILE_MAPPED)), 124 nid, K(node_page_state(pgdat, NR_FILE_MAPPED)),
125 nid, K(node_page_state(pgdat, NR_ANON_MAPPED)), 125 nid, K(node_page_state(pgdat, NR_ANON_MAPPED)),
126 nid, K(i.sharedram), 126 nid, K(i.sharedram),
127 nid, sum_zone_node_page_state(nid, NR_KERNEL_STACK) * 127 nid, sum_zone_node_page_state(nid, NR_KERNEL_STACK) *
128 THREAD_SIZE / 1024, 128 THREAD_SIZE / 1024,
129 nid, K(sum_zone_node_page_state(nid, NR_PAGETABLE)), 129 nid, K(sum_zone_node_page_state(nid, NR_PAGETABLE)),
130 nid, K(sum_zone_node_page_state(nid, NR_UNSTABLE_NFS)), 130 nid, K(node_page_state(pgdat, NR_UNSTABLE_NFS)),
131 nid, K(sum_zone_node_page_state(nid, NR_BOUNCE)), 131 nid, K(sum_zone_node_page_state(nid, NR_BOUNCE)),
132 nid, K(sum_zone_node_page_state(nid, NR_WRITEBACK_TEMP)), 132 nid, K(node_page_state(pgdat, NR_WRITEBACK_TEMP)),
133 nid, K(sum_zone_node_page_state(nid, NR_SLAB_RECLAIMABLE) + 133 nid, K(sum_zone_node_page_state(nid, NR_SLAB_RECLAIMABLE) +
134 sum_zone_node_page_state(nid, NR_SLAB_UNRECLAIMABLE)), 134 sum_zone_node_page_state(nid, NR_SLAB_UNRECLAIMABLE)),
135 nid, K(sum_zone_node_page_state(nid, NR_SLAB_RECLAIMABLE)), 135 nid, K(sum_zone_node_page_state(nid, NR_SLAB_RECLAIMABLE)),
136#ifdef CONFIG_TRANSPARENT_HUGEPAGE 136#ifdef CONFIG_TRANSPARENT_HUGEPAGE
137 nid, K(sum_zone_node_page_state(nid, NR_SLAB_UNRECLAIMABLE)), 137 nid, K(sum_zone_node_page_state(nid, NR_SLAB_UNRECLAIMABLE)),
138 nid, K(sum_zone_node_page_state(nid, NR_ANON_THPS) * 138 nid, K(node_page_state(pgdat, NR_ANON_THPS) *
139 HPAGE_PMD_NR), 139 HPAGE_PMD_NR),
140 nid, K(sum_zone_node_page_state(nid, NR_SHMEM_THPS) * 140 nid, K(node_page_state(pgdat, NR_SHMEM_THPS) *
141 HPAGE_PMD_NR), 141 HPAGE_PMD_NR),
142 nid, K(sum_zone_node_page_state(nid, NR_SHMEM_PMDMAPPED) * 142 nid, K(node_page_state(pgdat, NR_SHMEM_PMDMAPPED) *
143 HPAGE_PMD_NR)); 143 HPAGE_PMD_NR));
144#else 144#else
145 nid, K(sum_zone_node_page_state(nid, NR_SLAB_UNRECLAIMABLE))); 145 nid, K(sum_zone_node_page_state(nid, NR_SLAB_UNRECLAIMABLE)));