aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorVladimir Davydov <vdavydov@parallels.com>2014-12-10 18:44:19 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2014-12-10 20:41:07 -0500
commitb047501cd9f11d5e1d54ea0f90e2b10754021a0e (patch)
tree5532702b7921006bc6ffd02a04e481c04fa20481 /mm
parent4ef461e8f4dd13a2e64c6c8f00c420d62294e2d4 (diff)
memcg: use generic slab iterators for showing slabinfo
Let's use generic slab_start/next/stop for showing memcg caches info. In contrast to the current implementation, this will work even if all memcg caches' info doesn't fit into a seq buffer (a page), plus it simply looks neater. Actually, the main reason I do this isn't mere cleanup. I'm going to zap the memcg_slab_caches list, because I find it useless provided we have the slab_caches list, and this patch is a step in this direction. It should be noted that before this patch an attempt to read memory.kmem.slabinfo of a cgroup that doesn't have kmem limit set resulted in -EIO, while after this patch it will silently show nothing except the header, but I don't think it will frustrate anyone. Signed-off-by: Vladimir Davydov <vdavydov@parallels.com> Cc: Christoph Lameter <cl@linux.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: David Rientjes <rientjes@google.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Michal Hocko <mhocko@suse.cz> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/memcontrol.c25
-rw-r--r--mm/slab.h1
-rw-r--r--mm/slab_common.c25
3 files changed, 24 insertions, 27 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 32e3b191857d..9d30129b0d4a 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -2547,26 +2547,6 @@ static struct kmem_cache *memcg_params_to_cache(struct memcg_cache_params *p)
2547 return cache_from_memcg_idx(cachep, memcg_cache_id(p->memcg)); 2547 return cache_from_memcg_idx(cachep, memcg_cache_id(p->memcg));
2548} 2548}
2549 2549
2550#ifdef CONFIG_SLABINFO
2551static int mem_cgroup_slabinfo_read(struct seq_file *m, void *v)
2552{
2553 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
2554 struct memcg_cache_params *params;
2555
2556 if (!memcg_kmem_is_active(memcg))
2557 return -EIO;
2558
2559 print_slabinfo_header(m);
2560
2561 mutex_lock(&memcg_slab_mutex);
2562 list_for_each_entry(params, &memcg->memcg_slab_caches, list)
2563 cache_show(memcg_params_to_cache(params), m);
2564 mutex_unlock(&memcg_slab_mutex);
2565
2566 return 0;
2567}
2568#endif
2569
2570static int memcg_charge_kmem(struct mem_cgroup *memcg, gfp_t gfp, 2550static int memcg_charge_kmem(struct mem_cgroup *memcg, gfp_t gfp,
2571 unsigned long nr_pages) 2551 unsigned long nr_pages)
2572{ 2552{
@@ -4708,7 +4688,10 @@ static struct cftype mem_cgroup_files[] = {
4708#ifdef CONFIG_SLABINFO 4688#ifdef CONFIG_SLABINFO
4709 { 4689 {
4710 .name = "kmem.slabinfo", 4690 .name = "kmem.slabinfo",
4711 .seq_show = mem_cgroup_slabinfo_read, 4691 .seq_start = slab_start,
4692 .seq_next = slab_next,
4693 .seq_stop = slab_stop,
4694 .seq_show = memcg_slab_show,
4712 }, 4695 },
4713#endif 4696#endif
4714#endif 4697#endif
diff --git a/mm/slab.h b/mm/slab.h
index 078acbcf64e8..1cf4005482dd 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -360,5 +360,6 @@ static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
360void *slab_start(struct seq_file *m, loff_t *pos); 360void *slab_start(struct seq_file *m, loff_t *pos);
361void *slab_next(struct seq_file *m, void *p, loff_t *pos); 361void *slab_next(struct seq_file *m, void *p, loff_t *pos);
362void slab_stop(struct seq_file *m, void *p); 362void slab_stop(struct seq_file *m, void *p);
363int memcg_slab_show(struct seq_file *m, void *p);
363 364
364#endif /* MM_SLAB_H */ 365#endif /* MM_SLAB_H */
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 2a3f5ff410cf..e03dd6f2a272 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -811,7 +811,7 @@ EXPORT_SYMBOL(kmalloc_order_trace);
811#define SLABINFO_RIGHTS S_IRUSR 811#define SLABINFO_RIGHTS S_IRUSR
812#endif 812#endif
813 813
814void print_slabinfo_header(struct seq_file *m) 814static void print_slabinfo_header(struct seq_file *m)
815{ 815{
816 /* 816 /*
817 * Output format version, so at least we can change it 817 * Output format version, so at least we can change it
@@ -876,7 +876,7 @@ memcg_accumulate_slabinfo(struct kmem_cache *s, struct slabinfo *info)
876 } 876 }
877} 877}
878 878
879int cache_show(struct kmem_cache *s, struct seq_file *m) 879static void cache_show(struct kmem_cache *s, struct seq_file *m)
880{ 880{
881 struct slabinfo sinfo; 881 struct slabinfo sinfo;
882 882
@@ -895,7 +895,6 @@ int cache_show(struct kmem_cache *s, struct seq_file *m)
895 sinfo.active_slabs, sinfo.num_slabs, sinfo.shared_avail); 895 sinfo.active_slabs, sinfo.num_slabs, sinfo.shared_avail);
896 slabinfo_show_stats(m, s); 896 slabinfo_show_stats(m, s);
897 seq_putc(m, '\n'); 897 seq_putc(m, '\n');
898 return 0;
899} 898}
900 899
901static int slab_show(struct seq_file *m, void *p) 900static int slab_show(struct seq_file *m, void *p)
@@ -904,10 +903,24 @@ static int slab_show(struct seq_file *m, void *p)
904 903
905 if (p == slab_caches.next) 904 if (p == slab_caches.next)
906 print_slabinfo_header(m); 905 print_slabinfo_header(m);
907 if (!is_root_cache(s)) 906 if (is_root_cache(s))
908 return 0; 907 cache_show(s, m);
909 return cache_show(s, m); 908 return 0;
909}
910
911#ifdef CONFIG_MEMCG_KMEM
912int memcg_slab_show(struct seq_file *m, void *p)
913{
914 struct kmem_cache *s = list_entry(p, struct kmem_cache, list);
915 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
916
917 if (p == slab_caches.next)
918 print_slabinfo_header(m);
919 if (!is_root_cache(s) && s->memcg_params->memcg == memcg)
920 cache_show(s, m);
921 return 0;
910} 922}
923#endif
911 924
912/* 925/*
913 * slabinfo_op - iterator that generates /proc/slabinfo 926 * slabinfo_op - iterator that generates /proc/slabinfo