summaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
authorJohannes Weiner <hannes@cmpxchg.org>2017-08-10 18:23:31 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2017-08-10 18:54:06 -0400
commitd507e2ebd2c7be9138e5cf5c0cb1931c90c42ab1 (patch)
tree3ee794865430f69ddfba9d87a876bf3f497ad249 /mm/page_alloc.c
parent26273939ace935dd7553b31d279eab30b40f7b9a (diff)
mm: fix global NR_SLAB_.*CLAIMABLE counter reads
As Tetsuo points out: "Commit 385386cff4c6 ("mm: vmstat: move slab statistics from zone to node counters") broke "Slab:" field of /proc/meminfo . It shows nearly 0kB" In addition to /proc/meminfo, this problem also affects the slab counters OOM/allocation failure info dumps, can cause early -ENOMEM from overcommit protection, and miscalculate image size requirements during suspend-to-disk. This is because the patch in question switched the slab counters from the zone level to the node level, but forgot to update the global accessor functions to read the aggregate node data instead of the aggregate zone data. Use global_node_page_state() to access the global slab counters. Fixes: 385386cff4c6 ("mm: vmstat: move slab statistics from zone to node counters") Link: http://lkml.kernel.org/r/20170801134256.5400-1-hannes@cmpxchg.org Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Reported-by: Tetsuo Handa <penguin-kernel@i-love.sakura.ne.jp> Acked-by: Michal Hocko <mhocko@suse.com> Cc: Josef Bacik <josef@toxicpanda.com> Cc: Vladimir Davydov <vdavydov.dev@gmail.com> Cc: Stefan Agner <stefan@agner.ch> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c9
1 files changed, 5 insertions, 4 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index fc32aa81f359..626a430e32d1 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -4458,8 +4458,9 @@ long si_mem_available(void)
4458 * Part of the reclaimable slab consists of items that are in use, 4458 * Part of the reclaimable slab consists of items that are in use,
4459 * and cannot be freed. Cap this estimate at the low watermark. 4459 * and cannot be freed. Cap this estimate at the low watermark.
4460 */ 4460 */
4461 available += global_page_state(NR_SLAB_RECLAIMABLE) - 4461 available += global_node_page_state(NR_SLAB_RECLAIMABLE) -
4462 min(global_page_state(NR_SLAB_RECLAIMABLE) / 2, wmark_low); 4462 min(global_node_page_state(NR_SLAB_RECLAIMABLE) / 2,
4463 wmark_low);
4463 4464
4464 if (available < 0) 4465 if (available < 0)
4465 available = 0; 4466 available = 0;
@@ -4602,8 +4603,8 @@ void show_free_areas(unsigned int filter, nodemask_t *nodemask)
4602 global_node_page_state(NR_FILE_DIRTY), 4603 global_node_page_state(NR_FILE_DIRTY),
4603 global_node_page_state(NR_WRITEBACK), 4604 global_node_page_state(NR_WRITEBACK),
4604 global_node_page_state(NR_UNSTABLE_NFS), 4605 global_node_page_state(NR_UNSTABLE_NFS),
4605 global_page_state(NR_SLAB_RECLAIMABLE), 4606 global_node_page_state(NR_SLAB_RECLAIMABLE),
4606 global_page_state(NR_SLAB_UNRECLAIMABLE), 4607 global_node_page_state(NR_SLAB_UNRECLAIMABLE),
4607 global_node_page_state(NR_FILE_MAPPED), 4608 global_node_page_state(NR_FILE_MAPPED),
4608 global_node_page_state(NR_SHMEM), 4609 global_node_page_state(NR_SHMEM),
4609 global_page_state(NR_PAGETABLE), 4610 global_page_state(NR_PAGETABLE),