diff options
-rw-r--r-- | include/linux/memcontrol.h | 8 | ||||
-rw-r--r-- | include/linux/slab.h | 4 | ||||
-rw-r--r-- | mm/memcontrol.c | 30 | ||||
-rw-r--r-- | mm/slab.h | 27 | ||||
-rw-r--r-- | mm/slab_common.c | 44 |
5 files changed, 108 insertions, 5 deletions
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index e119f3ef793c..8dc7c746b44f 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h | |||
@@ -420,6 +420,11 @@ static inline void sock_release_memcg(struct sock *sk) | |||
420 | 420 | ||
421 | #ifdef CONFIG_MEMCG_KMEM | 421 | #ifdef CONFIG_MEMCG_KMEM |
422 | extern struct static_key memcg_kmem_enabled_key; | 422 | extern struct static_key memcg_kmem_enabled_key; |
423 | |||
424 | extern int memcg_limited_groups_array_size; | ||
425 | #define for_each_memcg_cache_index(_idx) \ | ||
426 | for ((_idx) = 0; i < memcg_limited_groups_array_size; (_idx)++) | ||
427 | |||
423 | static inline bool memcg_kmem_enabled(void) | 428 | static inline bool memcg_kmem_enabled(void) |
424 | { | 429 | { |
425 | return static_key_false(&memcg_kmem_enabled_key); | 430 | return static_key_false(&memcg_kmem_enabled_key); |
@@ -557,6 +562,9 @@ memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp) | |||
557 | return __memcg_kmem_get_cache(cachep, gfp); | 562 | return __memcg_kmem_get_cache(cachep, gfp); |
558 | } | 563 | } |
559 | #else | 564 | #else |
565 | #define for_each_memcg_cache_index(_idx) \ | ||
566 | for (; NULL; ) | ||
567 | |||
560 | static inline bool memcg_kmem_enabled(void) | 568 | static inline bool memcg_kmem_enabled(void) |
561 | { | 569 | { |
562 | return false; | 570 | return false; |
diff --git a/include/linux/slab.h b/include/linux/slab.h index 869efb8d2377..b9278663f22a 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h | |||
@@ -220,6 +220,10 @@ struct memcg_cache_params { | |||
220 | 220 | ||
221 | int memcg_update_all_caches(int num_memcgs); | 221 | int memcg_update_all_caches(int num_memcgs); |
222 | 222 | ||
223 | struct seq_file; | ||
224 | int cache_show(struct kmem_cache *s, struct seq_file *m); | ||
225 | void print_slabinfo_header(struct seq_file *m); | ||
226 | |||
223 | /* | 227 | /* |
224 | * Common kmalloc functions provided by all allocators | 228 | * Common kmalloc functions provided by all allocators |
225 | */ | 229 | */ |
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 7633e0d429e0..a32d83c2e353 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
@@ -572,7 +572,8 @@ static void disarm_sock_keys(struct mem_cgroup *memcg) | |||
572 | * increase it. | 572 | * increase it. |
573 | */ | 573 | */ |
574 | static DEFINE_IDA(kmem_limited_groups); | 574 | static DEFINE_IDA(kmem_limited_groups); |
575 | static int memcg_limited_groups_array_size; | 575 | int memcg_limited_groups_array_size; |
576 | |||
576 | /* | 577 | /* |
577 | * MIN_SIZE is different than 1, because we would like to avoid going through | 578 | * MIN_SIZE is different than 1, because we would like to avoid going through |
578 | * the alloc/free process all the time. In a small machine, 4 kmem-limited | 579 | * the alloc/free process all the time. In a small machine, 4 kmem-limited |
@@ -2794,6 +2795,27 @@ static struct kmem_cache *memcg_params_to_cache(struct memcg_cache_params *p) | |||
2794 | return cachep->memcg_params->memcg_caches[memcg_cache_id(p->memcg)]; | 2795 | return cachep->memcg_params->memcg_caches[memcg_cache_id(p->memcg)]; |
2795 | } | 2796 | } |
2796 | 2797 | ||
2798 | #ifdef CONFIG_SLABINFO | ||
2799 | static int mem_cgroup_slabinfo_read(struct cgroup *cont, struct cftype *cft, | ||
2800 | struct seq_file *m) | ||
2801 | { | ||
2802 | struct mem_cgroup *memcg = mem_cgroup_from_cont(cont); | ||
2803 | struct memcg_cache_params *params; | ||
2804 | |||
2805 | if (!memcg_can_account_kmem(memcg)) | ||
2806 | return -EIO; | ||
2807 | |||
2808 | print_slabinfo_header(m); | ||
2809 | |||
2810 | mutex_lock(&memcg->slab_caches_mutex); | ||
2811 | list_for_each_entry(params, &memcg->memcg_slab_caches, list) | ||
2812 | cache_show(memcg_params_to_cache(params), m); | ||
2813 | mutex_unlock(&memcg->slab_caches_mutex); | ||
2814 | |||
2815 | return 0; | ||
2816 | } | ||
2817 | #endif | ||
2818 | |||
2797 | static int memcg_charge_kmem(struct mem_cgroup *memcg, gfp_t gfp, u64 size) | 2819 | static int memcg_charge_kmem(struct mem_cgroup *memcg, gfp_t gfp, u64 size) |
2798 | { | 2820 | { |
2799 | struct res_counter *fail_res; | 2821 | struct res_counter *fail_res; |
@@ -5822,6 +5844,12 @@ static struct cftype mem_cgroup_files[] = { | |||
5822 | .trigger = mem_cgroup_reset, | 5844 | .trigger = mem_cgroup_reset, |
5823 | .read = mem_cgroup_read, | 5845 | .read = mem_cgroup_read, |
5824 | }, | 5846 | }, |
5847 | #ifdef CONFIG_SLABINFO | ||
5848 | { | ||
5849 | .name = "kmem.slabinfo", | ||
5850 | .read_seq_string = mem_cgroup_slabinfo_read, | ||
5851 | }, | ||
5852 | #endif | ||
5825 | #endif | 5853 | #endif |
5826 | { }, /* terminate */ | 5854 | { }, /* terminate */ |
5827 | }; | 5855 | }; |
@@ -138,6 +138,23 @@ static inline bool slab_equal_or_root(struct kmem_cache *s, | |||
138 | return (p == s) || | 138 | return (p == s) || |
139 | (s->memcg_params && (p == s->memcg_params->root_cache)); | 139 | (s->memcg_params && (p == s->memcg_params->root_cache)); |
140 | } | 140 | } |
141 | |||
142 | /* | ||
143 | * We use suffixes to the name in memcg because we can't have caches | ||
144 | * created in the system with the same name. But when we print them | ||
145 | * locally, better refer to them with the base name | ||
146 | */ | ||
147 | static inline const char *cache_name(struct kmem_cache *s) | ||
148 | { | ||
149 | if (!is_root_cache(s)) | ||
150 | return s->memcg_params->root_cache->name; | ||
151 | return s->name; | ||
152 | } | ||
153 | |||
154 | static inline struct kmem_cache *cache_from_memcg(struct kmem_cache *s, int idx) | ||
155 | { | ||
156 | return s->memcg_params->memcg_caches[idx]; | ||
157 | } | ||
141 | #else | 158 | #else |
142 | static inline bool is_root_cache(struct kmem_cache *s) | 159 | static inline bool is_root_cache(struct kmem_cache *s) |
143 | { | 160 | { |
@@ -163,6 +180,16 @@ static inline bool slab_equal_or_root(struct kmem_cache *s, | |||
163 | { | 180 | { |
164 | return true; | 181 | return true; |
165 | } | 182 | } |
183 | |||
184 | static inline const char *cache_name(struct kmem_cache *s) | ||
185 | { | ||
186 | return s->name; | ||
187 | } | ||
188 | |||
189 | static inline struct kmem_cache *cache_from_memcg(struct kmem_cache *s, int idx) | ||
190 | { | ||
191 | return NULL; | ||
192 | } | ||
166 | #endif | 193 | #endif |
167 | 194 | ||
168 | static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x) | 195 | static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x) |
diff --git a/mm/slab_common.c b/mm/slab_common.c index 080a43804bf1..081f1b8d9a7b 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c | |||
@@ -322,7 +322,7 @@ struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size, | |||
322 | 322 | ||
323 | 323 | ||
324 | #ifdef CONFIG_SLABINFO | 324 | #ifdef CONFIG_SLABINFO |
325 | static void print_slabinfo_header(struct seq_file *m) | 325 | void print_slabinfo_header(struct seq_file *m) |
326 | { | 326 | { |
327 | /* | 327 | /* |
328 | * Output format version, so at least we can change it | 328 | * Output format version, so at least we can change it |
@@ -366,16 +366,43 @@ static void s_stop(struct seq_file *m, void *p) | |||
366 | mutex_unlock(&slab_mutex); | 366 | mutex_unlock(&slab_mutex); |
367 | } | 367 | } |
368 | 368 | ||
369 | static int s_show(struct seq_file *m, void *p) | 369 | static void |
370 | memcg_accumulate_slabinfo(struct kmem_cache *s, struct slabinfo *info) | ||
371 | { | ||
372 | struct kmem_cache *c; | ||
373 | struct slabinfo sinfo; | ||
374 | int i; | ||
375 | |||
376 | if (!is_root_cache(s)) | ||
377 | return; | ||
378 | |||
379 | for_each_memcg_cache_index(i) { | ||
380 | c = cache_from_memcg(s, i); | ||
381 | if (!c) | ||
382 | continue; | ||
383 | |||
384 | memset(&sinfo, 0, sizeof(sinfo)); | ||
385 | get_slabinfo(c, &sinfo); | ||
386 | |||
387 | info->active_slabs += sinfo.active_slabs; | ||
388 | info->num_slabs += sinfo.num_slabs; | ||
389 | info->shared_avail += sinfo.shared_avail; | ||
390 | info->active_objs += sinfo.active_objs; | ||
391 | info->num_objs += sinfo.num_objs; | ||
392 | } | ||
393 | } | ||
394 | |||
395 | int cache_show(struct kmem_cache *s, struct seq_file *m) | ||
370 | { | 396 | { |
371 | struct kmem_cache *s = list_entry(p, struct kmem_cache, list); | ||
372 | struct slabinfo sinfo; | 397 | struct slabinfo sinfo; |
373 | 398 | ||
374 | memset(&sinfo, 0, sizeof(sinfo)); | 399 | memset(&sinfo, 0, sizeof(sinfo)); |
375 | get_slabinfo(s, &sinfo); | 400 | get_slabinfo(s, &sinfo); |
376 | 401 | ||
402 | memcg_accumulate_slabinfo(s, &sinfo); | ||
403 | |||
377 | seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d", | 404 | seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d", |
378 | s->name, sinfo.active_objs, sinfo.num_objs, s->size, | 405 | cache_name(s), sinfo.active_objs, sinfo.num_objs, s->size, |
379 | sinfo.objects_per_slab, (1 << sinfo.cache_order)); | 406 | sinfo.objects_per_slab, (1 << sinfo.cache_order)); |
380 | 407 | ||
381 | seq_printf(m, " : tunables %4u %4u %4u", | 408 | seq_printf(m, " : tunables %4u %4u %4u", |
@@ -387,6 +414,15 @@ static int s_show(struct seq_file *m, void *p) | |||
387 | return 0; | 414 | return 0; |
388 | } | 415 | } |
389 | 416 | ||
417 | static int s_show(struct seq_file *m, void *p) | ||
418 | { | ||
419 | struct kmem_cache *s = list_entry(p, struct kmem_cache, list); | ||
420 | |||
421 | if (!is_root_cache(s)) | ||
422 | return 0; | ||
423 | return cache_show(s, m); | ||
424 | } | ||
425 | |||
390 | /* | 426 | /* |
391 | * slabinfo_op - iterator that generates /proc/slabinfo | 427 | * slabinfo_op - iterator that generates /proc/slabinfo |
392 | * | 428 | * |