aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorVladimir Davydov <vdavydov@virtuozzo.com>2016-03-17 17:17:35 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-03-17 18:09:34 -0400
commit27ee57c93ff00b8a2d6c6dd6b0b3dddda7b43b77 (patch)
treea848f3e480c7b486a825c01b2cb6dd09ad79459e
parent72b54e7314a2e7a68567c92bbb32fe2598a3c783 (diff)
mm: memcontrol: report slab usage in cgroup2 memory.stat
Show how much memory is used for storing reclaimable and unreclaimable in-kernel data structures allocated from slab caches. Signed-off-by: Vladimir Davydov <vdavydov@virtuozzo.com> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Cc: Michal Hocko <mhocko@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--Documentation/cgroup-v2.txt15
-rw-r--r--include/linux/memcontrol.h21
-rw-r--r--mm/memcontrol.c8
-rw-r--r--mm/slab.c8
-rw-r--r--mm/slab.h30
-rw-r--r--mm/slub.c3
6 files changed, 79 insertions, 6 deletions
diff --git a/Documentation/cgroup-v2.txt b/Documentation/cgroup-v2.txt
index ff49cf901148..e4e0c1d78cee 100644
--- a/Documentation/cgroup-v2.txt
+++ b/Documentation/cgroup-v2.txt
@@ -843,6 +843,11 @@ PAGE_SIZE multiple when read back.
843 Amount of memory used to cache filesystem data, 843 Amount of memory used to cache filesystem data,
844 including tmpfs and shared memory. 844 including tmpfs and shared memory.
845 845
846 slab
847
848 Amount of memory used for storing in-kernel data
849 structures.
850
846 sock 851 sock
847 852
848 Amount of memory used in network transmission buffers 853 Amount of memory used in network transmission buffers
@@ -871,6 +876,16 @@ PAGE_SIZE multiple when read back.
871 on the internal memory management lists used by the 876 on the internal memory management lists used by the
872 page reclaim algorithm 877 page reclaim algorithm
873 878
879 slab_reclaimable
880
881 Part of "slab" that might be reclaimed, such as
882 dentries and inodes.
883
884 slab_unreclaimable
885
886 Part of "slab" that cannot be reclaimed on memory
887 pressure.
888
874 pgfault 889 pgfault
875 890
876 Total number of page faults incurred 891 Total number of page faults incurred
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index f0c4bec6565b..e7af4834ffea 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -53,6 +53,8 @@ enum mem_cgroup_stat_index {
53 MEM_CGROUP_STAT_NSTATS, 53 MEM_CGROUP_STAT_NSTATS,
54 /* default hierarchy stats */ 54 /* default hierarchy stats */
55 MEMCG_SOCK = MEM_CGROUP_STAT_NSTATS, 55 MEMCG_SOCK = MEM_CGROUP_STAT_NSTATS,
56 MEMCG_SLAB_RECLAIMABLE,
57 MEMCG_SLAB_UNRECLAIMABLE,
56 MEMCG_NR_STAT, 58 MEMCG_NR_STAT,
57}; 59};
58 60
@@ -883,6 +885,20 @@ static __always_inline void memcg_kmem_put_cache(struct kmem_cache *cachep)
883 if (memcg_kmem_enabled()) 885 if (memcg_kmem_enabled())
884 __memcg_kmem_put_cache(cachep); 886 __memcg_kmem_put_cache(cachep);
885} 887}
888
889/**
890 * memcg_kmem_update_page_stat - update kmem page state statistics
891 * @page: the page
892 * @idx: page state item to account
893 * @val: number of pages (positive or negative)
894 */
895static inline void memcg_kmem_update_page_stat(struct page *page,
896 enum mem_cgroup_stat_index idx, int val)
897{
898 if (memcg_kmem_enabled() && page->mem_cgroup)
899 this_cpu_add(page->mem_cgroup->stat->count[idx], val);
900}
901
886#else 902#else
887#define for_each_memcg_cache_index(_idx) \ 903#define for_each_memcg_cache_index(_idx) \
888 for (; NULL; ) 904 for (; NULL; )
@@ -928,6 +944,11 @@ memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp)
928static inline void memcg_kmem_put_cache(struct kmem_cache *cachep) 944static inline void memcg_kmem_put_cache(struct kmem_cache *cachep)
929{ 945{
930} 946}
947
948static inline void memcg_kmem_update_page_stat(struct page *page,
949 enum mem_cgroup_stat_index idx, int val)
950{
951}
931#endif /* CONFIG_MEMCG && !CONFIG_SLOB */ 952#endif /* CONFIG_MEMCG && !CONFIG_SLOB */
932 953
933#endif /* _LINUX_MEMCONTROL_H */ 954#endif /* _LINUX_MEMCONTROL_H */
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 430266071c36..3ad64bf464fd 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -5106,6 +5106,9 @@ static int memory_stat_show(struct seq_file *m, void *v)
5106 (u64)stat[MEM_CGROUP_STAT_RSS] * PAGE_SIZE); 5106 (u64)stat[MEM_CGROUP_STAT_RSS] * PAGE_SIZE);
5107 seq_printf(m, "file %llu\n", 5107 seq_printf(m, "file %llu\n",
5108 (u64)stat[MEM_CGROUP_STAT_CACHE] * PAGE_SIZE); 5108 (u64)stat[MEM_CGROUP_STAT_CACHE] * PAGE_SIZE);
5109 seq_printf(m, "slab %llu\n",
5110 (u64)(stat[MEMCG_SLAB_RECLAIMABLE] +
5111 stat[MEMCG_SLAB_UNRECLAIMABLE]) * PAGE_SIZE);
5109 seq_printf(m, "sock %llu\n", 5112 seq_printf(m, "sock %llu\n",
5110 (u64)stat[MEMCG_SOCK] * PAGE_SIZE); 5113 (u64)stat[MEMCG_SOCK] * PAGE_SIZE);
5111 5114
@@ -5126,6 +5129,11 @@ static int memory_stat_show(struct seq_file *m, void *v)
5126 mem_cgroup_lru_names[i], (u64)val * PAGE_SIZE); 5129 mem_cgroup_lru_names[i], (u64)val * PAGE_SIZE);
5127 } 5130 }
5128 5131
5132 seq_printf(m, "slab_reclaimable %llu\n",
5133 (u64)stat[MEMCG_SLAB_RECLAIMABLE] * PAGE_SIZE);
5134 seq_printf(m, "slab_unreclaimable %llu\n",
5135 (u64)stat[MEMCG_SLAB_UNRECLAIMABLE] * PAGE_SIZE);
5136
5129 /* Accumulated memory events */ 5137 /* Accumulated memory events */
5130 5138
5131 seq_printf(m, "pgfault %lu\n", 5139 seq_printf(m, "pgfault %lu\n",
diff --git a/mm/slab.c b/mm/slab.c
index 852fc5c79829..56dd0df2a8ce 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1442,9 +1442,10 @@ static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,
1442 */ 1442 */
1443static void kmem_freepages(struct kmem_cache *cachep, struct page *page) 1443static void kmem_freepages(struct kmem_cache *cachep, struct page *page)
1444{ 1444{
1445 const unsigned long nr_freed = (1 << cachep->gfporder); 1445 int order = cachep->gfporder;
1446 unsigned long nr_freed = (1 << order);
1446 1447
1447 kmemcheck_free_shadow(page, cachep->gfporder); 1448 kmemcheck_free_shadow(page, order);
1448 1449
1449 if (cachep->flags & SLAB_RECLAIM_ACCOUNT) 1450 if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
1450 sub_zone_page_state(page_zone(page), 1451 sub_zone_page_state(page_zone(page),
@@ -1461,7 +1462,8 @@ static void kmem_freepages(struct kmem_cache *cachep, struct page *page)
1461 1462
1462 if (current->reclaim_state) 1463 if (current->reclaim_state)
1463 current->reclaim_state->reclaimed_slab += nr_freed; 1464 current->reclaim_state->reclaimed_slab += nr_freed;
1464 __free_kmem_pages(page, cachep->gfporder); 1465 memcg_uncharge_slab(page, order, cachep);
1466 __free_pages(page, order);
1465} 1467}
1466 1468
1467static void kmem_rcu_free(struct rcu_head *head) 1469static void kmem_rcu_free(struct rcu_head *head)
diff --git a/mm/slab.h b/mm/slab.h
index b7934361f026..ff39a8fc3b3f 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -246,12 +246,33 @@ static __always_inline int memcg_charge_slab(struct page *page,
246 gfp_t gfp, int order, 246 gfp_t gfp, int order,
247 struct kmem_cache *s) 247 struct kmem_cache *s)
248{ 248{
249 int ret;
250
249 if (!memcg_kmem_enabled()) 251 if (!memcg_kmem_enabled())
250 return 0; 252 return 0;
251 if (is_root_cache(s)) 253 if (is_root_cache(s))
252 return 0; 254 return 0;
253 return __memcg_kmem_charge_memcg(page, gfp, order, 255
254 s->memcg_params.memcg); 256 ret = __memcg_kmem_charge_memcg(page, gfp, order,
257 s->memcg_params.memcg);
258 if (ret)
259 return ret;
260
261 memcg_kmem_update_page_stat(page,
262 (s->flags & SLAB_RECLAIM_ACCOUNT) ?
263 MEMCG_SLAB_RECLAIMABLE : MEMCG_SLAB_UNRECLAIMABLE,
264 1 << order);
265 return 0;
266}
267
268static __always_inline void memcg_uncharge_slab(struct page *page, int order,
269 struct kmem_cache *s)
270{
271 memcg_kmem_update_page_stat(page,
272 (s->flags & SLAB_RECLAIM_ACCOUNT) ?
273 MEMCG_SLAB_RECLAIMABLE : MEMCG_SLAB_UNRECLAIMABLE,
274 -(1 << order));
275 memcg_kmem_uncharge(page, order);
255} 276}
256 277
257extern void slab_init_memcg_params(struct kmem_cache *); 278extern void slab_init_memcg_params(struct kmem_cache *);
@@ -294,6 +315,11 @@ static inline int memcg_charge_slab(struct page *page, gfp_t gfp, int order,
294 return 0; 315 return 0;
295} 316}
296 317
318static inline void memcg_uncharge_slab(struct page *page, int order,
319 struct kmem_cache *s)
320{
321}
322
297static inline void slab_init_memcg_params(struct kmem_cache *s) 323static inline void slab_init_memcg_params(struct kmem_cache *s)
298{ 324{
299} 325}
diff --git a/mm/slub.c b/mm/slub.c
index 6c91324f9370..712d53474082 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1540,7 +1540,8 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
1540 page_mapcount_reset(page); 1540 page_mapcount_reset(page);
1541 if (current->reclaim_state) 1541 if (current->reclaim_state)
1542 current->reclaim_state->reclaimed_slab += pages; 1542 current->reclaim_state->reclaimed_slab += pages;
1543 __free_kmem_pages(page, order); 1543 memcg_uncharge_slab(page, order, s);
1544 __free_pages(page, order);
1544} 1545}
1545 1546
1546#define need_reserve_slab_rcu \ 1547#define need_reserve_slab_rcu \