aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorVladimir Davydov <vdavydov@virtuozzo.com>2016-03-17 17:17:35 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-03-17 18:09:34 -0400
commit27ee57c93ff00b8a2d6c6dd6b0b3dddda7b43b77 (patch)
treea848f3e480c7b486a825c01b2cb6dd09ad79459e /mm
parent72b54e7314a2e7a68567c92bbb32fe2598a3c783 (diff)
mm: memcontrol: report slab usage in cgroup2 memory.stat
Show how much memory is used for storing reclaimable and unreclaimable in-kernel data structures allocated from slab caches. Signed-off-by: Vladimir Davydov <vdavydov@virtuozzo.com> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Cc: Michal Hocko <mhocko@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/memcontrol.c8
-rw-r--r--mm/slab.c8
-rw-r--r--mm/slab.h30
-rw-r--r--mm/slub.c3
4 files changed, 43 insertions, 6 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 430266071c36..3ad64bf464fd 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -5106,6 +5106,9 @@ static int memory_stat_show(struct seq_file *m, void *v)
5106 (u64)stat[MEM_CGROUP_STAT_RSS] * PAGE_SIZE); 5106 (u64)stat[MEM_CGROUP_STAT_RSS] * PAGE_SIZE);
5107 seq_printf(m, "file %llu\n", 5107 seq_printf(m, "file %llu\n",
5108 (u64)stat[MEM_CGROUP_STAT_CACHE] * PAGE_SIZE); 5108 (u64)stat[MEM_CGROUP_STAT_CACHE] * PAGE_SIZE);
5109 seq_printf(m, "slab %llu\n",
5110 (u64)(stat[MEMCG_SLAB_RECLAIMABLE] +
5111 stat[MEMCG_SLAB_UNRECLAIMABLE]) * PAGE_SIZE);
5109 seq_printf(m, "sock %llu\n", 5112 seq_printf(m, "sock %llu\n",
5110 (u64)stat[MEMCG_SOCK] * PAGE_SIZE); 5113 (u64)stat[MEMCG_SOCK] * PAGE_SIZE);
5111 5114
@@ -5126,6 +5129,11 @@ static int memory_stat_show(struct seq_file *m, void *v)
5126 mem_cgroup_lru_names[i], (u64)val * PAGE_SIZE); 5129 mem_cgroup_lru_names[i], (u64)val * PAGE_SIZE);
5127 } 5130 }
5128 5131
5132 seq_printf(m, "slab_reclaimable %llu\n",
5133 (u64)stat[MEMCG_SLAB_RECLAIMABLE] * PAGE_SIZE);
5134 seq_printf(m, "slab_unreclaimable %llu\n",
5135 (u64)stat[MEMCG_SLAB_UNRECLAIMABLE] * PAGE_SIZE);
5136
5129 /* Accumulated memory events */ 5137 /* Accumulated memory events */
5130 5138
5131 seq_printf(m, "pgfault %lu\n", 5139 seq_printf(m, "pgfault %lu\n",
diff --git a/mm/slab.c b/mm/slab.c
index 852fc5c79829..56dd0df2a8ce 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1442,9 +1442,10 @@ static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,
1442 */ 1442 */
1443static void kmem_freepages(struct kmem_cache *cachep, struct page *page) 1443static void kmem_freepages(struct kmem_cache *cachep, struct page *page)
1444{ 1444{
1445 const unsigned long nr_freed = (1 << cachep->gfporder); 1445 int order = cachep->gfporder;
1446 unsigned long nr_freed = (1 << order);
1446 1447
1447 kmemcheck_free_shadow(page, cachep->gfporder); 1448 kmemcheck_free_shadow(page, order);
1448 1449
1449 if (cachep->flags & SLAB_RECLAIM_ACCOUNT) 1450 if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
1450 sub_zone_page_state(page_zone(page), 1451 sub_zone_page_state(page_zone(page),
@@ -1461,7 +1462,8 @@ static void kmem_freepages(struct kmem_cache *cachep, struct page *page)
1461 1462
1462 if (current->reclaim_state) 1463 if (current->reclaim_state)
1463 current->reclaim_state->reclaimed_slab += nr_freed; 1464 current->reclaim_state->reclaimed_slab += nr_freed;
1464 __free_kmem_pages(page, cachep->gfporder); 1465 memcg_uncharge_slab(page, order, cachep);
1466 __free_pages(page, order);
1465} 1467}
1466 1468
1467static void kmem_rcu_free(struct rcu_head *head) 1469static void kmem_rcu_free(struct rcu_head *head)
diff --git a/mm/slab.h b/mm/slab.h
index b7934361f026..ff39a8fc3b3f 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -246,12 +246,33 @@ static __always_inline int memcg_charge_slab(struct page *page,
246 gfp_t gfp, int order, 246 gfp_t gfp, int order,
247 struct kmem_cache *s) 247 struct kmem_cache *s)
248{ 248{
249 int ret;
250
249 if (!memcg_kmem_enabled()) 251 if (!memcg_kmem_enabled())
250 return 0; 252 return 0;
251 if (is_root_cache(s)) 253 if (is_root_cache(s))
252 return 0; 254 return 0;
253 return __memcg_kmem_charge_memcg(page, gfp, order, 255
254 s->memcg_params.memcg); 256 ret = __memcg_kmem_charge_memcg(page, gfp, order,
257 s->memcg_params.memcg);
258 if (ret)
259 return ret;
260
261 memcg_kmem_update_page_stat(page,
262 (s->flags & SLAB_RECLAIM_ACCOUNT) ?
263 MEMCG_SLAB_RECLAIMABLE : MEMCG_SLAB_UNRECLAIMABLE,
264 1 << order);
265 return 0;
266}
267
268static __always_inline void memcg_uncharge_slab(struct page *page, int order,
269 struct kmem_cache *s)
270{
271 memcg_kmem_update_page_stat(page,
272 (s->flags & SLAB_RECLAIM_ACCOUNT) ?
273 MEMCG_SLAB_RECLAIMABLE : MEMCG_SLAB_UNRECLAIMABLE,
274 -(1 << order));
275 memcg_kmem_uncharge(page, order);
255} 276}
256 277
257extern void slab_init_memcg_params(struct kmem_cache *); 278extern void slab_init_memcg_params(struct kmem_cache *);
@@ -294,6 +315,11 @@ static inline int memcg_charge_slab(struct page *page, gfp_t gfp, int order,
294 return 0; 315 return 0;
295} 316}
296 317
318static inline void memcg_uncharge_slab(struct page *page, int order,
319 struct kmem_cache *s)
320{
321}
322
297static inline void slab_init_memcg_params(struct kmem_cache *s) 323static inline void slab_init_memcg_params(struct kmem_cache *s)
298{ 324{
299} 325}
diff --git a/mm/slub.c b/mm/slub.c
index 6c91324f9370..712d53474082 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1540,7 +1540,8 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
1540 page_mapcount_reset(page); 1540 page_mapcount_reset(page);
1541 if (current->reclaim_state) 1541 if (current->reclaim_state)
1542 current->reclaim_state->reclaimed_slab += pages; 1542 current->reclaim_state->reclaimed_slab += pages;
1543 __free_kmem_pages(page, order); 1543 memcg_uncharge_slab(page, order, s);
1544 __free_pages(page, order);
1544} 1545}
1545 1546
1546#define need_reserve_slab_rcu \ 1547#define need_reserve_slab_rcu \