aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slab_common.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/slab_common.c')
-rw-r--r--mm/slab_common.c118
1 files changed, 105 insertions, 13 deletions
diff --git a/mm/slab_common.c b/mm/slab_common.c
index a8e76d79ee65..3f3cd97d3fdf 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -18,6 +18,7 @@
18#include <asm/cacheflush.h> 18#include <asm/cacheflush.h>
19#include <asm/tlbflush.h> 19#include <asm/tlbflush.h>
20#include <asm/page.h> 20#include <asm/page.h>
21#include <linux/memcontrol.h>
21 22
22#include "slab.h" 23#include "slab.h"
23 24
@@ -27,7 +28,8 @@ DEFINE_MUTEX(slab_mutex);
27struct kmem_cache *kmem_cache; 28struct kmem_cache *kmem_cache;
28 29
29#ifdef CONFIG_DEBUG_VM 30#ifdef CONFIG_DEBUG_VM
30static int kmem_cache_sanity_check(const char *name, size_t size) 31static int kmem_cache_sanity_check(struct mem_cgroup *memcg, const char *name,
32 size_t size)
31{ 33{
32 struct kmem_cache *s = NULL; 34 struct kmem_cache *s = NULL;
33 35
@@ -53,7 +55,13 @@ static int kmem_cache_sanity_check(const char *name, size_t size)
53 continue; 55 continue;
54 } 56 }
55 57
56 if (!strcmp(s->name, name)) { 58 /*
59 * For simplicity, we won't check this in the list of memcg
60 * caches. We have control over memcg naming, and if there
61 * aren't duplicates in the global list, there won't be any
62 * duplicates in the memcg lists as well.
63 */
64 if (!memcg && !strcmp(s->name, name)) {
57 pr_err("%s (%s): Cache name already exists.\n", 65 pr_err("%s (%s): Cache name already exists.\n",
58 __func__, name); 66 __func__, name);
59 dump_stack(); 67 dump_stack();
@@ -66,12 +74,41 @@ static int kmem_cache_sanity_check(const char *name, size_t size)
66 return 0; 74 return 0;
67} 75}
68#else 76#else
69static inline int kmem_cache_sanity_check(const char *name, size_t size) 77static inline int kmem_cache_sanity_check(struct mem_cgroup *memcg,
78 const char *name, size_t size)
70{ 79{
71 return 0; 80 return 0;
72} 81}
73#endif 82#endif
74 83
84#ifdef CONFIG_MEMCG_KMEM
85int memcg_update_all_caches(int num_memcgs)
86{
87 struct kmem_cache *s;
88 int ret = 0;
89 mutex_lock(&slab_mutex);
90
91 list_for_each_entry(s, &slab_caches, list) {
92 if (!is_root_cache(s))
93 continue;
94
95 ret = memcg_update_cache_size(s, num_memcgs);
96 /*
97 * See comment in memcontrol.c, memcg_update_cache_size:
98 * Instead of freeing the memory, we'll just leave the caches
99 * up to this point in an updated state.
100 */
101 if (ret)
102 goto out;
103 }
104
105 memcg_update_array_size(num_memcgs);
106out:
107 mutex_unlock(&slab_mutex);
108 return ret;
109}
110#endif
111
75/* 112/*
76 * Figure out what the alignment of the objects will be given a set of 113 * Figure out what the alignment of the objects will be given a set of
77 * flags, a user specified alignment and the size of the objects. 114 * flags, a user specified alignment and the size of the objects.
@@ -125,8 +162,10 @@ unsigned long calculate_alignment(unsigned long flags,
125 * as davem. 162 * as davem.
126 */ 163 */
127 164
128struct kmem_cache *kmem_cache_create(const char *name, size_t size, size_t align, 165struct kmem_cache *
129 unsigned long flags, void (*ctor)(void *)) 166kmem_cache_create_memcg(struct mem_cgroup *memcg, const char *name, size_t size,
167 size_t align, unsigned long flags, void (*ctor)(void *),
168 struct kmem_cache *parent_cache)
130{ 169{
131 struct kmem_cache *s = NULL; 170 struct kmem_cache *s = NULL;
132 int err = 0; 171 int err = 0;
@@ -134,7 +173,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, size_t align
134 get_online_cpus(); 173 get_online_cpus();
135 mutex_lock(&slab_mutex); 174 mutex_lock(&slab_mutex);
136 175
137 if (!kmem_cache_sanity_check(name, size) == 0) 176 if (!kmem_cache_sanity_check(memcg, name, size) == 0)
138 goto out_locked; 177 goto out_locked;
139 178
140 /* 179 /*
@@ -145,7 +184,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, size_t align
145 */ 184 */
146 flags &= CACHE_CREATE_MASK; 185 flags &= CACHE_CREATE_MASK;
147 186
148 s = __kmem_cache_alias(name, size, align, flags, ctor); 187 s = __kmem_cache_alias(memcg, name, size, align, flags, ctor);
149 if (s) 188 if (s)
150 goto out_locked; 189 goto out_locked;
151 190
@@ -154,6 +193,13 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, size_t align
154 s->object_size = s->size = size; 193 s->object_size = s->size = size;
155 s->align = calculate_alignment(flags, align, size); 194 s->align = calculate_alignment(flags, align, size);
156 s->ctor = ctor; 195 s->ctor = ctor;
196
197 if (memcg_register_cache(memcg, s, parent_cache)) {
198 kmem_cache_free(kmem_cache, s);
199 err = -ENOMEM;
200 goto out_locked;
201 }
202
157 s->name = kstrdup(name, GFP_KERNEL); 203 s->name = kstrdup(name, GFP_KERNEL);
158 if (!s->name) { 204 if (!s->name) {
159 kmem_cache_free(kmem_cache, s); 205 kmem_cache_free(kmem_cache, s);
@@ -163,10 +209,9 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, size_t align
163 209
164 err = __kmem_cache_create(s, flags); 210 err = __kmem_cache_create(s, flags);
165 if (!err) { 211 if (!err) {
166
167 s->refcount = 1; 212 s->refcount = 1;
168 list_add(&s->list, &slab_caches); 213 list_add(&s->list, &slab_caches);
169 214 memcg_cache_list_add(memcg, s);
170 } else { 215 } else {
171 kfree(s->name); 216 kfree(s->name);
172 kmem_cache_free(kmem_cache, s); 217 kmem_cache_free(kmem_cache, s);
@@ -194,10 +239,20 @@ out_locked:
194 239
195 return s; 240 return s;
196} 241}
242
243struct kmem_cache *
244kmem_cache_create(const char *name, size_t size, size_t align,
245 unsigned long flags, void (*ctor)(void *))
246{
247 return kmem_cache_create_memcg(NULL, name, size, align, flags, ctor, NULL);
248}
197EXPORT_SYMBOL(kmem_cache_create); 249EXPORT_SYMBOL(kmem_cache_create);
198 250
199void kmem_cache_destroy(struct kmem_cache *s) 251void kmem_cache_destroy(struct kmem_cache *s)
200{ 252{
253 /* Destroy all the children caches if we aren't a memcg cache */
254 kmem_cache_destroy_memcg_children(s);
255
201 get_online_cpus(); 256 get_online_cpus();
202 mutex_lock(&slab_mutex); 257 mutex_lock(&slab_mutex);
203 s->refcount--; 258 s->refcount--;
@@ -209,6 +264,7 @@ void kmem_cache_destroy(struct kmem_cache *s)
209 if (s->flags & SLAB_DESTROY_BY_RCU) 264 if (s->flags & SLAB_DESTROY_BY_RCU)
210 rcu_barrier(); 265 rcu_barrier();
211 266
267 memcg_release_cache(s);
212 kfree(s->name); 268 kfree(s->name);
213 kmem_cache_free(kmem_cache, s); 269 kmem_cache_free(kmem_cache, s);
214 } else { 270 } else {
@@ -267,7 +323,7 @@ struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
267 323
268 324
269#ifdef CONFIG_SLABINFO 325#ifdef CONFIG_SLABINFO
270static void print_slabinfo_header(struct seq_file *m) 326void print_slabinfo_header(struct seq_file *m)
271{ 327{
272 /* 328 /*
273 * Output format version, so at least we can change it 329 * Output format version, so at least we can change it
@@ -311,16 +367,43 @@ static void s_stop(struct seq_file *m, void *p)
311 mutex_unlock(&slab_mutex); 367 mutex_unlock(&slab_mutex);
312} 368}
313 369
314static int s_show(struct seq_file *m, void *p) 370static void
371memcg_accumulate_slabinfo(struct kmem_cache *s, struct slabinfo *info)
372{
373 struct kmem_cache *c;
374 struct slabinfo sinfo;
375 int i;
376
377 if (!is_root_cache(s))
378 return;
379
380 for_each_memcg_cache_index(i) {
381 c = cache_from_memcg(s, i);
382 if (!c)
383 continue;
384
385 memset(&sinfo, 0, sizeof(sinfo));
386 get_slabinfo(c, &sinfo);
387
388 info->active_slabs += sinfo.active_slabs;
389 info->num_slabs += sinfo.num_slabs;
390 info->shared_avail += sinfo.shared_avail;
391 info->active_objs += sinfo.active_objs;
392 info->num_objs += sinfo.num_objs;
393 }
394}
395
396int cache_show(struct kmem_cache *s, struct seq_file *m)
315{ 397{
316 struct kmem_cache *s = list_entry(p, struct kmem_cache, list);
317 struct slabinfo sinfo; 398 struct slabinfo sinfo;
318 399
319 memset(&sinfo, 0, sizeof(sinfo)); 400 memset(&sinfo, 0, sizeof(sinfo));
320 get_slabinfo(s, &sinfo); 401 get_slabinfo(s, &sinfo);
321 402
403 memcg_accumulate_slabinfo(s, &sinfo);
404
322 seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d", 405 seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d",
323 s->name, sinfo.active_objs, sinfo.num_objs, s->size, 406 cache_name(s), sinfo.active_objs, sinfo.num_objs, s->size,
324 sinfo.objects_per_slab, (1 << sinfo.cache_order)); 407 sinfo.objects_per_slab, (1 << sinfo.cache_order));
325 408
326 seq_printf(m, " : tunables %4u %4u %4u", 409 seq_printf(m, " : tunables %4u %4u %4u",
@@ -332,6 +415,15 @@ static int s_show(struct seq_file *m, void *p)
332 return 0; 415 return 0;
333} 416}
334 417
418static int s_show(struct seq_file *m, void *p)
419{
420 struct kmem_cache *s = list_entry(p, struct kmem_cache, list);
421
422 if (!is_root_cache(s))
423 return 0;
424 return cache_show(s, m);
425}
426
335/* 427/*
336 * slabinfo_op - iterator that generates /proc/slabinfo 428 * slabinfo_op - iterator that generates /proc/slabinfo
337 * 429 *