summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorVlastimil Babka <vbabka@suse.cz>2018-10-26 18:05:46 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2018-10-26 19:26:32 -0400
commitb29940c1abd7a4c3abeb926df0a5ec84d6902d47 (patch)
tree66fc77981adf0ad7ac1ca34c97150ad6585d974f
parent2e03b4bc4ae84fcc0eee00e5ba5d228901d38809 (diff)
mm: rename and change semantics of nr_indirectly_reclaimable_bytes
The vmstat counter NR_INDIRECTLY_RECLAIMABLE_BYTES was introduced by commit eb59254608bc ("mm: introduce NR_INDIRECTLY_RECLAIMABLE_BYTES") with the goal of accounting objects that can be reclaimed, but cannot be allocated via a SLAB_RECLAIM_ACCOUNT cache. This is now possible via kmalloc() with __GFP_RECLAIMABLE flag, and the dcache external names user is converted. The counter is however still useful for accounting direct page allocations (i.e. not slab) with a shrinker, such as the ION page pool. So keep it, and: - change granularity to pages to be more like other counters; sub-page allocations should be able to use kmalloc - rename the counter to NR_KERNEL_MISC_RECLAIMABLE - expose the counter again in vmstat as "nr_kernel_misc_reclaimable"; we can again remove the check for not printing "hidden" counters Link: http://lkml.kernel.org/r/20180731090649.16028-5-vbabka@suse.cz Signed-off-by: Vlastimil Babka <vbabka@suse.cz> Acked-by: Christoph Lameter <cl@linux.com> Acked-by: Roman Gushchin <guro@fb.com> Cc: Vijayanand Jitta <vjitta@codeaurora.org> Cc: Laura Abbott <labbott@redhat.com> Cc: Sumit Semwal <sumit.semwal@linaro.org> Cc: David Rientjes <rientjes@google.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Matthew Wilcox <willy@infradead.org> Cc: Mel Gorman <mgorman@techsingularity.net> Cc: Michal Hocko <mhocko@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--drivers/staging/android/ion/ion_page_pool.c8
-rw-r--r--include/linux/mmzone.h2
-rw-r--r--mm/page_alloc.c19
-rw-r--r--mm/util.c3
-rw-r--r--mm/vmstat.c6
5 files changed, 14 insertions, 24 deletions
diff --git a/drivers/staging/android/ion/ion_page_pool.c b/drivers/staging/android/ion/ion_page_pool.c
index 9bc56eb48d2a..0d2a95957ee8 100644
--- a/drivers/staging/android/ion/ion_page_pool.c
+++ b/drivers/staging/android/ion/ion_page_pool.c
@@ -33,8 +33,8 @@ static void ion_page_pool_add(struct ion_page_pool *pool, struct page *page)
33 pool->low_count++; 33 pool->low_count++;
34 } 34 }
35 35
36 mod_node_page_state(page_pgdat(page), NR_INDIRECTLY_RECLAIMABLE_BYTES, 36 mod_node_page_state(page_pgdat(page), NR_KERNEL_MISC_RECLAIMABLE,
37 (1 << (PAGE_SHIFT + pool->order))); 37 1 << pool->order);
38 mutex_unlock(&pool->mutex); 38 mutex_unlock(&pool->mutex);
39} 39}
40 40
@@ -53,8 +53,8 @@ static struct page *ion_page_pool_remove(struct ion_page_pool *pool, bool high)
53 } 53 }
54 54
55 list_del(&page->lru); 55 list_del(&page->lru);
56 mod_node_page_state(page_pgdat(page), NR_INDIRECTLY_RECLAIMABLE_BYTES, 56 mod_node_page_state(page_pgdat(page), NR_KERNEL_MISC_RECLAIMABLE,
57 -(1 << (PAGE_SHIFT + pool->order))); 57 -(1 << pool->order));
58 return page; 58 return page;
59} 59}
60 60
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index d4b0c79d2924..7bbeba21f6a3 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -180,7 +180,7 @@ enum node_stat_item {
180 NR_VMSCAN_IMMEDIATE, /* Prioritise for reclaim when writeback ends */ 180 NR_VMSCAN_IMMEDIATE, /* Prioritise for reclaim when writeback ends */
181 NR_DIRTIED, /* page dirtyings since bootup */ 181 NR_DIRTIED, /* page dirtyings since bootup */
182 NR_WRITTEN, /* page writings since bootup */ 182 NR_WRITTEN, /* page writings since bootup */
183 NR_INDIRECTLY_RECLAIMABLE_BYTES, /* measured in bytes */ 183 NR_KERNEL_MISC_RECLAIMABLE, /* reclaimable non-slab kernel pages */
184 NR_VM_NODE_STAT_ITEMS 184 NR_VM_NODE_STAT_ITEMS
185}; 185};
186 186
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 747031c2352d..20f25d06c00c 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -4701,6 +4701,7 @@ long si_mem_available(void)
4701 unsigned long pagecache; 4701 unsigned long pagecache;
4702 unsigned long wmark_low = 0; 4702 unsigned long wmark_low = 0;
4703 unsigned long pages[NR_LRU_LISTS]; 4703 unsigned long pages[NR_LRU_LISTS];
4704 unsigned long reclaimable;
4704 struct zone *zone; 4705 struct zone *zone;
4705 int lru; 4706 int lru;
4706 4707
@@ -4726,19 +4727,13 @@ long si_mem_available(void)
4726 available += pagecache; 4727 available += pagecache;
4727 4728
4728 /* 4729 /*
4729 * Part of the reclaimable slab consists of items that are in use, 4730 * Part of the reclaimable slab and other kernel memory consists of
4730 * and cannot be freed. Cap this estimate at the low watermark. 4731 * items that are in use, and cannot be freed. Cap this estimate at the
4732 * low watermark.
4731 */ 4733 */
4732 available += global_node_page_state(NR_SLAB_RECLAIMABLE) - 4734 reclaimable = global_node_page_state(NR_SLAB_RECLAIMABLE) +
4733 min(global_node_page_state(NR_SLAB_RECLAIMABLE) / 2, 4735 global_node_page_state(NR_KERNEL_MISC_RECLAIMABLE);
4734 wmark_low); 4736 available += reclaimable - min(reclaimable / 2, wmark_low);
4735
4736 /*
4737 * Part of the kernel memory, which can be released under memory
4738 * pressure.
4739 */
4740 available += global_node_page_state(NR_INDIRECTLY_RECLAIMABLE_BYTES) >>
4741 PAGE_SHIFT;
4742 4737
4743 if (available < 0) 4738 if (available < 0)
4744 available = 0; 4739 available = 0;
diff --git a/mm/util.c b/mm/util.c
index 470f5cd80b64..f740754f5012 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -678,8 +678,7 @@ int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
678 * Part of the kernel memory, which can be released 678 * Part of the kernel memory, which can be released
679 * under memory pressure. 679 * under memory pressure.
680 */ 680 */
681 free += global_node_page_state( 681 free += global_node_page_state(NR_KERNEL_MISC_RECLAIMABLE);
682 NR_INDIRECTLY_RECLAIMABLE_BYTES) >> PAGE_SHIFT;
683 682
684 /* 683 /*
685 * Leave reserved pages. The pages are not for anonymous pages. 684 * Leave reserved pages. The pages are not for anonymous pages.
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 7878da76abf2..2cec2fa4c8ae 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -1161,7 +1161,7 @@ const char * const vmstat_text[] = {
1161 "nr_vmscan_immediate_reclaim", 1161 "nr_vmscan_immediate_reclaim",
1162 "nr_dirtied", 1162 "nr_dirtied",
1163 "nr_written", 1163 "nr_written",
1164 "", /* nr_indirectly_reclaimable */ 1164 "nr_kernel_misc_reclaimable",
1165 1165
1166 /* enum writeback_stat_item counters */ 1166 /* enum writeback_stat_item counters */
1167 "nr_dirty_threshold", 1167 "nr_dirty_threshold",
@@ -1706,10 +1706,6 @@ static int vmstat_show(struct seq_file *m, void *arg)
1706 unsigned long *l = arg; 1706 unsigned long *l = arg;
1707 unsigned long off = l - (unsigned long *)m->private; 1707 unsigned long off = l - (unsigned long *)m->private;
1708 1708
1709 /* Skip hidden vmstat items. */
1710 if (*vmstat_text[off] == '\0')
1711 return 0;
1712
1713 seq_puts(m, vmstat_text[off]); 1709 seq_puts(m, vmstat_text[off]);
1714 seq_put_decimal_ull(m, " ", *l); 1710 seq_put_decimal_ull(m, " ", *l);
1715 seq_putc(m, '\n'); 1711 seq_putc(m, '\n');