summaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
authorJohannes Weiner <hannes@cmpxchg.org>2017-07-06 18:40:43 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2017-07-06 19:24:35 -0400
commit385386cff4c6f047907655e05791d88198c4c523 (patch)
treed2dad3e3c9acdd389dd63a89c498fbedb284befa /mm/page_alloc.c
parent2b2695f5fdf185092a72c81ca2c09ed1b9b37416 (diff)
mm: vmstat: move slab statistics from zone to node counters
Patch series "mm: per-lruvec slab stats" Josef is working on a new approach to balancing slab caches and the page cache. For this to work, he needs slab cache statistics on the lruvec level. These patches implement that by adding infrastructure that allows updating and reading generic VM stat items per lruvec, then switches some existing VM accounting sites, including the slab accounting ones, to this new cgroup-aware API. I'll follow up with more patches on this, because there is actually substantial simplification that can be done to the memory controller when we replace private memcg accounting with making the existing VM accounting sites cgroup-aware. But this is enough for Josef to base his slab reclaim work on, so here goes. This patch (of 5): To re-implement slab cache vs. page cache balancing, we'll need the slab counters at the lruvec level, which, ever since lru reclaim was moved from the zone to the node, is the intersection of the node, not the zone, and the memcg. We could retain the per-zone counters for when the page allocator dumps its memory information on failures, and have counters on both levels - which on all but NUMA node 0 is usually redundant. But let's keep it simple for now and just move them. If anybody complains we can restore the per-zone counters. [hannes@cmpxchg.org: fix oops] Link: http://lkml.kernel.org/r/20170605183511.GA8915@cmpxchg.org Link: http://lkml.kernel.org/r/20170530181724.27197-3-hannes@cmpxchg.org Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Cc: Josef Bacik <josef@toxicpanda.com> Cc: Michal Hocko <mhocko@suse.com> Cc: Vladimir Davydov <vdavydov.dev@gmail.com> Cc: Rik van Riel <riel@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c7
1 files changed, 3 insertions, 4 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 8aa860017d66..a35add8d7c0b 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -4643,8 +4643,6 @@ void show_free_areas(unsigned int filter, nodemask_t *nodemask)
4643 " present:%lukB" 4643 " present:%lukB"
4644 " managed:%lukB" 4644 " managed:%lukB"
4645 " mlocked:%lukB" 4645 " mlocked:%lukB"
4646 " slab_reclaimable:%lukB"
4647 " slab_unreclaimable:%lukB"
4648 " kernel_stack:%lukB" 4646 " kernel_stack:%lukB"
4649 " pagetables:%lukB" 4647 " pagetables:%lukB"
4650 " bounce:%lukB" 4648 " bounce:%lukB"
@@ -4666,8 +4664,6 @@ void show_free_areas(unsigned int filter, nodemask_t *nodemask)
4666 K(zone->present_pages), 4664 K(zone->present_pages),
4667 K(zone->managed_pages), 4665 K(zone->managed_pages),
4668 K(zone_page_state(zone, NR_MLOCK)), 4666 K(zone_page_state(zone, NR_MLOCK)),
4669 K(zone_page_state(zone, NR_SLAB_RECLAIMABLE)),
4670 K(zone_page_state(zone, NR_SLAB_UNRECLAIMABLE)),
4671 zone_page_state(zone, NR_KERNEL_STACK_KB), 4667 zone_page_state(zone, NR_KERNEL_STACK_KB),
4672 K(zone_page_state(zone, NR_PAGETABLE)), 4668 K(zone_page_state(zone, NR_PAGETABLE)),
4673 K(zone_page_state(zone, NR_BOUNCE)), 4669 K(zone_page_state(zone, NR_BOUNCE)),
@@ -5153,6 +5149,7 @@ static void build_zonelists(pg_data_t *pgdat)
5153 */ 5149 */
5154static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch); 5150static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch);
5155static DEFINE_PER_CPU(struct per_cpu_pageset, boot_pageset); 5151static DEFINE_PER_CPU(struct per_cpu_pageset, boot_pageset);
5152static DEFINE_PER_CPU(struct per_cpu_nodestat, boot_nodestats);
5156static void setup_zone_pageset(struct zone *zone); 5153static void setup_zone_pageset(struct zone *zone);
5157 5154
5158/* 5155/*
@@ -6053,6 +6050,8 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat)
6053 spin_lock_init(&pgdat->lru_lock); 6050 spin_lock_init(&pgdat->lru_lock);
6054 lruvec_init(node_lruvec(pgdat)); 6051 lruvec_init(node_lruvec(pgdat));
6055 6052
6053 pgdat->per_cpu_nodestats = &boot_nodestats;
6054
6056 for (j = 0; j < MAX_NR_ZONES; j++) { 6055 for (j = 0; j < MAX_NR_ZONES; j++) {
6057 struct zone *zone = pgdat->node_zones + j; 6056 struct zone *zone = pgdat->node_zones + j;
6058 unsigned long size, realsize, freesize, memmap_pages; 6057 unsigned long size, realsize, freesize, memmap_pages;