aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slab_common.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/slab_common.c')
-rw-r--r--mm/slab_common.c250
1 files changed, 154 insertions, 96 deletions
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 1ec3c619ba04..f3cfccf76dda 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -29,8 +29,7 @@ DEFINE_MUTEX(slab_mutex);
29struct kmem_cache *kmem_cache; 29struct kmem_cache *kmem_cache;
30 30
31#ifdef CONFIG_DEBUG_VM 31#ifdef CONFIG_DEBUG_VM
32static int kmem_cache_sanity_check(struct mem_cgroup *memcg, const char *name, 32static int kmem_cache_sanity_check(const char *name, size_t size)
33 size_t size)
34{ 33{
35 struct kmem_cache *s = NULL; 34 struct kmem_cache *s = NULL;
36 35
@@ -57,13 +56,7 @@ static int kmem_cache_sanity_check(struct mem_cgroup *memcg, const char *name,
57 } 56 }
58 57
59#if !defined(CONFIG_SLUB) || !defined(CONFIG_SLUB_DEBUG_ON) 58#if !defined(CONFIG_SLUB) || !defined(CONFIG_SLUB_DEBUG_ON)
60 /* 59 if (!strcmp(s->name, name)) {
61 * For simplicity, we won't check this in the list of memcg
62 * caches. We have control over memcg naming, and if there
63 * aren't duplicates in the global list, there won't be any
64 * duplicates in the memcg lists as well.
65 */
66 if (!memcg && !strcmp(s->name, name)) {
67 pr_err("%s (%s): Cache name already exists.\n", 60 pr_err("%s (%s): Cache name already exists.\n",
68 __func__, name); 61 __func__, name);
69 dump_stack(); 62 dump_stack();
@@ -77,8 +70,7 @@ static int kmem_cache_sanity_check(struct mem_cgroup *memcg, const char *name,
77 return 0; 70 return 0;
78} 71}
79#else 72#else
80static inline int kmem_cache_sanity_check(struct mem_cgroup *memcg, 73static inline int kmem_cache_sanity_check(const char *name, size_t size)
81 const char *name, size_t size)
82{ 74{
83 return 0; 75 return 0;
84} 76}
@@ -139,6 +131,46 @@ unsigned long calculate_alignment(unsigned long flags,
139 return ALIGN(align, sizeof(void *)); 131 return ALIGN(align, sizeof(void *));
140} 132}
141 133
134static struct kmem_cache *
135do_kmem_cache_create(char *name, size_t object_size, size_t size, size_t align,
136 unsigned long flags, void (*ctor)(void *),
137 struct mem_cgroup *memcg, struct kmem_cache *root_cache)
138{
139 struct kmem_cache *s;
140 int err;
141
142 err = -ENOMEM;
143 s = kmem_cache_zalloc(kmem_cache, GFP_KERNEL);
144 if (!s)
145 goto out;
146
147 s->name = name;
148 s->object_size = object_size;
149 s->size = size;
150 s->align = align;
151 s->ctor = ctor;
152
153 err = memcg_alloc_cache_params(memcg, s, root_cache);
154 if (err)
155 goto out_free_cache;
156
157 err = __kmem_cache_create(s, flags);
158 if (err)
159 goto out_free_cache;
160
161 s->refcount = 1;
162 list_add(&s->list, &slab_caches);
163 memcg_register_cache(s);
164out:
165 if (err)
166 return ERR_PTR(err);
167 return s;
168
169out_free_cache:
170 memcg_free_cache_params(s);
171 kfree(s);
172 goto out;
173}
142 174
143/* 175/*
144 * kmem_cache_create - Create a cache. 176 * kmem_cache_create - Create a cache.
@@ -164,34 +196,21 @@ unsigned long calculate_alignment(unsigned long flags,
164 * cacheline. This can be beneficial if you're counting cycles as closely 196 * cacheline. This can be beneficial if you're counting cycles as closely
165 * as davem. 197 * as davem.
166 */ 198 */
167
168struct kmem_cache * 199struct kmem_cache *
169kmem_cache_create_memcg(struct mem_cgroup *memcg, const char *name, size_t size, 200kmem_cache_create(const char *name, size_t size, size_t align,
170 size_t align, unsigned long flags, void (*ctor)(void *), 201 unsigned long flags, void (*ctor)(void *))
171 struct kmem_cache *parent_cache)
172{ 202{
173 struct kmem_cache *s = NULL; 203 struct kmem_cache *s;
204 char *cache_name;
174 int err; 205 int err;
175 206
176 get_online_cpus(); 207 get_online_cpus();
177 mutex_lock(&slab_mutex); 208 mutex_lock(&slab_mutex);
178 209
179 err = kmem_cache_sanity_check(memcg, name, size); 210 err = kmem_cache_sanity_check(name, size);
180 if (err) 211 if (err)
181 goto out_unlock; 212 goto out_unlock;
182 213
183 if (memcg) {
184 /*
185 * Since per-memcg caches are created asynchronously on first
186 * allocation (see memcg_kmem_get_cache()), several threads can
187 * try to create the same cache, but only one of them may
188 * succeed. Therefore if we get here and see the cache has
189 * already been created, we silently return NULL.
190 */
191 if (cache_from_memcg_idx(parent_cache, memcg_cache_id(memcg)))
192 goto out_unlock;
193 }
194
195 /* 214 /*
196 * Some allocators will constraint the set of valid flags to a subset 215 * Some allocators will constraint the set of valid flags to a subset
197 * of all flags. We expect them to define CACHE_CREATE_MASK in this 216 * of all flags. We expect them to define CACHE_CREATE_MASK in this
@@ -200,50 +219,29 @@ kmem_cache_create_memcg(struct mem_cgroup *memcg, const char *name, size_t size,
200 */ 219 */
201 flags &= CACHE_CREATE_MASK; 220 flags &= CACHE_CREATE_MASK;
202 221
203 s = __kmem_cache_alias(memcg, name, size, align, flags, ctor); 222 s = __kmem_cache_alias(name, size, align, flags, ctor);
204 if (s) 223 if (s)
205 goto out_unlock; 224 goto out_unlock;
206 225
207 err = -ENOMEM; 226 cache_name = kstrdup(name, GFP_KERNEL);
208 s = kmem_cache_zalloc(kmem_cache, GFP_KERNEL); 227 if (!cache_name) {
209 if (!s) 228 err = -ENOMEM;
210 goto out_unlock; 229 goto out_unlock;
230 }
211 231
212 s->object_size = s->size = size; 232 s = do_kmem_cache_create(cache_name, size, size,
213 s->align = calculate_alignment(flags, align, size); 233 calculate_alignment(flags, align, size),
214 s->ctor = ctor; 234 flags, ctor, NULL, NULL);
215 235 if (IS_ERR(s)) {
216 s->name = kstrdup(name, GFP_KERNEL); 236 err = PTR_ERR(s);
217 if (!s->name) 237 kfree(cache_name);
218 goto out_free_cache; 238 }
219
220 err = memcg_alloc_cache_params(memcg, s, parent_cache);
221 if (err)
222 goto out_free_cache;
223
224 err = __kmem_cache_create(s, flags);
225 if (err)
226 goto out_free_cache;
227
228 s->refcount = 1;
229 list_add(&s->list, &slab_caches);
230 memcg_register_cache(s);
231 239
232out_unlock: 240out_unlock:
233 mutex_unlock(&slab_mutex); 241 mutex_unlock(&slab_mutex);
234 put_online_cpus(); 242 put_online_cpus();
235 243
236 if (err) { 244 if (err) {
237 /*
238 * There is no point in flooding logs with warnings or
239 * especially crashing the system if we fail to create a cache
240 * for a memcg. In this case we will be accounting the memcg
241 * allocation to the root cgroup until we succeed to create its
242 * own cache, but it isn't that critical.
243 */
244 if (!memcg)
245 return NULL;
246
247 if (flags & SLAB_PANIC) 245 if (flags & SLAB_PANIC)
248 panic("kmem_cache_create: Failed to create slab '%s'. Error %d\n", 246 panic("kmem_cache_create: Failed to create slab '%s'. Error %d\n",
249 name, err); 247 name, err);
@@ -255,52 +253,112 @@ out_unlock:
255 return NULL; 253 return NULL;
256 } 254 }
257 return s; 255 return s;
256}
257EXPORT_SYMBOL(kmem_cache_create);
258 258
259out_free_cache: 259#ifdef CONFIG_MEMCG_KMEM
260 memcg_free_cache_params(s); 260/*
261 kfree(s->name); 261 * kmem_cache_create_memcg - Create a cache for a memory cgroup.
262 kmem_cache_free(kmem_cache, s); 262 * @memcg: The memory cgroup the new cache is for.
263 goto out_unlock; 263 * @root_cache: The parent of the new cache.
264 *
265 * This function attempts to create a kmem cache that will serve allocation
266 * requests going from @memcg to @root_cache. The new cache inherits properties
267 * from its parent.
268 */
269void kmem_cache_create_memcg(struct mem_cgroup *memcg, struct kmem_cache *root_cache)
270{
271 struct kmem_cache *s;
272 char *cache_name;
273
274 get_online_cpus();
275 mutex_lock(&slab_mutex);
276
277 /*
278 * Since per-memcg caches are created asynchronously on first
279 * allocation (see memcg_kmem_get_cache()), several threads can try to
280 * create the same cache, but only one of them may succeed.
281 */
282 if (cache_from_memcg_idx(root_cache, memcg_cache_id(memcg)))
283 goto out_unlock;
284
285 cache_name = memcg_create_cache_name(memcg, root_cache);
286 if (!cache_name)
287 goto out_unlock;
288
289 s = do_kmem_cache_create(cache_name, root_cache->object_size,
290 root_cache->size, root_cache->align,
291 root_cache->flags, root_cache->ctor,
292 memcg, root_cache);
293 if (IS_ERR(s)) {
294 kfree(cache_name);
295 goto out_unlock;
296 }
297
298 s->allocflags |= __GFP_KMEMCG;
299
300out_unlock:
301 mutex_unlock(&slab_mutex);
302 put_online_cpus();
264} 303}
265 304
266struct kmem_cache * 305static int kmem_cache_destroy_memcg_children(struct kmem_cache *s)
267kmem_cache_create(const char *name, size_t size, size_t align,
268 unsigned long flags, void (*ctor)(void *))
269{ 306{
270 return kmem_cache_create_memcg(NULL, name, size, align, flags, ctor, NULL); 307 int rc;
308
309 if (!s->memcg_params ||
310 !s->memcg_params->is_root_cache)
311 return 0;
312
313 mutex_unlock(&slab_mutex);
314 rc = __kmem_cache_destroy_memcg_children(s);
315 mutex_lock(&slab_mutex);
316
317 return rc;
271} 318}
272EXPORT_SYMBOL(kmem_cache_create); 319#else
320static int kmem_cache_destroy_memcg_children(struct kmem_cache *s)
321{
322 return 0;
323}
324#endif /* CONFIG_MEMCG_KMEM */
273 325
274void kmem_cache_destroy(struct kmem_cache *s) 326void kmem_cache_destroy(struct kmem_cache *s)
275{ 327{
276 /* Destroy all the children caches if we aren't a memcg cache */
277 kmem_cache_destroy_memcg_children(s);
278
279 get_online_cpus(); 328 get_online_cpus();
280 mutex_lock(&slab_mutex); 329 mutex_lock(&slab_mutex);
330
281 s->refcount--; 331 s->refcount--;
282 if (!s->refcount) { 332 if (s->refcount)
283 list_del(&s->list); 333 goto out_unlock;
284 334
285 if (!__kmem_cache_shutdown(s)) { 335 if (kmem_cache_destroy_memcg_children(s) != 0)
286 memcg_unregister_cache(s); 336 goto out_unlock;
287 mutex_unlock(&slab_mutex); 337
288 if (s->flags & SLAB_DESTROY_BY_RCU) 338 list_del(&s->list);
289 rcu_barrier(); 339 memcg_unregister_cache(s);
290 340
291 memcg_free_cache_params(s); 341 if (__kmem_cache_shutdown(s) != 0) {
292 kfree(s->name); 342 list_add(&s->list, &slab_caches);
293 kmem_cache_free(kmem_cache, s); 343 memcg_register_cache(s);
294 } else { 344 printk(KERN_ERR "kmem_cache_destroy %s: "
295 list_add(&s->list, &slab_caches); 345 "Slab cache still has objects\n", s->name);
296 mutex_unlock(&slab_mutex); 346 dump_stack();
297 printk(KERN_ERR "kmem_cache_destroy %s: Slab cache still has objects\n", 347 goto out_unlock;
298 s->name);
299 dump_stack();
300 }
301 } else {
302 mutex_unlock(&slab_mutex);
303 } 348 }
349
350 mutex_unlock(&slab_mutex);
351 if (s->flags & SLAB_DESTROY_BY_RCU)
352 rcu_barrier();
353
354 memcg_free_cache_params(s);
355 kfree(s->name);
356 kmem_cache_free(kmem_cache, s);
357 goto out_put_cpus;
358
359out_unlock:
360 mutex_unlock(&slab_mutex);
361out_put_cpus:
304 put_online_cpus(); 362 put_online_cpus();
305} 363}
306EXPORT_SYMBOL(kmem_cache_destroy); 364EXPORT_SYMBOL(kmem_cache_destroy);