diff options
author | Vladimir Davydov <vdavydov@parallels.com> | 2014-04-07 18:39:28 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-04-07 19:36:13 -0400 |
commit | b8529907ba35d625fa4b85d3e4dc8021be97c1f3 (patch) | |
tree | 88ba9609669e3f82b475f7a8bcaa927d5e5928b5 /mm | |
parent | 051dd46050f2a9bdfff8cc067f8987069eae1743 (diff) |
memcg, slab: do not destroy children caches if parent has aliases
Currently we destroy children caches at the very beginning of
kmem_cache_destroy(). This is wrong, because the root cache will not
necessarily be destroyed in the end - if it has aliases (refcount > 0),
kmem_cache_destroy() will simply decrement its refcount and return. In
this case, at best we will get a bunch of warnings in dmesg, like this
one:
kmem_cache_destroy kmalloc-32:0: Slab cache still has objects
CPU: 1 PID: 7139 Comm: modprobe Tainted: G B W 3.13.0+ #117
Call Trace:
dump_stack+0x49/0x5b
kmem_cache_destroy+0xdf/0xf0
kmem_cache_destroy_memcg_children+0x97/0xc0
kmem_cache_destroy+0xf/0xf0
xfs_mru_cache_uninit+0x21/0x30 [xfs]
exit_xfs_fs+0x2e/0xc44 [xfs]
SyS_delete_module+0x198/0x1f0
system_call_fastpath+0x16/0x1b
At worst - if kmem_cache_destroy() will race with an allocation from a
memcg cache - the kernel will panic.
This patch fixes this by moving children caches destruction after the
check if the cache has aliases. Plus, it forbids destroying a root
cache if it still has children caches, because each children cache keeps
a reference to its parent.
Signed-off-by: Vladimir Davydov <vdavydov@parallels.com>
Cc: Michal Hocko <mhocko@suse.cz>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: David Rientjes <rientjes@google.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: Glauber Costa <glommer@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/memcontrol.c | 13 | ||||
-rw-r--r-- | mm/slab_common.c | 75 |
2 files changed, 56 insertions, 32 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index c22d8bf42d9a..29501f040568 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
@@ -3321,15 +3321,10 @@ void mem_cgroup_destroy_cache(struct kmem_cache *cachep) | |||
3321 | schedule_work(&cachep->memcg_params->destroy); | 3321 | schedule_work(&cachep->memcg_params->destroy); |
3322 | } | 3322 | } |
3323 | 3323 | ||
3324 | void kmem_cache_destroy_memcg_children(struct kmem_cache *s) | 3324 | int __kmem_cache_destroy_memcg_children(struct kmem_cache *s) |
3325 | { | 3325 | { |
3326 | struct kmem_cache *c; | 3326 | struct kmem_cache *c; |
3327 | int i; | 3327 | int i, failed = 0; |
3328 | |||
3329 | if (!s->memcg_params) | ||
3330 | return; | ||
3331 | if (!s->memcg_params->is_root_cache) | ||
3332 | return; | ||
3333 | 3328 | ||
3334 | /* | 3329 | /* |
3335 | * If the cache is being destroyed, we trust that there is no one else | 3330 | * If the cache is being destroyed, we trust that there is no one else |
@@ -3363,8 +3358,12 @@ void kmem_cache_destroy_memcg_children(struct kmem_cache *s) | |||
3363 | c->memcg_params->dead = false; | 3358 | c->memcg_params->dead = false; |
3364 | cancel_work_sync(&c->memcg_params->destroy); | 3359 | cancel_work_sync(&c->memcg_params->destroy); |
3365 | kmem_cache_destroy(c); | 3360 | kmem_cache_destroy(c); |
3361 | |||
3362 | if (cache_from_memcg_idx(s, i)) | ||
3363 | failed++; | ||
3366 | } | 3364 | } |
3367 | mutex_unlock(&activate_kmem_mutex); | 3365 | mutex_unlock(&activate_kmem_mutex); |
3366 | return failed; | ||
3368 | } | 3367 | } |
3369 | 3368 | ||
3370 | static void mem_cgroup_destroy_all_caches(struct mem_cgroup *memcg) | 3369 | static void mem_cgroup_destroy_all_caches(struct mem_cgroup *memcg) |
diff --git a/mm/slab_common.c b/mm/slab_common.c index 0c2879ff414c..f3cfccf76dda 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c | |||
@@ -301,39 +301,64 @@ out_unlock: | |||
301 | mutex_unlock(&slab_mutex); | 301 | mutex_unlock(&slab_mutex); |
302 | put_online_cpus(); | 302 | put_online_cpus(); |
303 | } | 303 | } |
304 | |||
305 | static int kmem_cache_destroy_memcg_children(struct kmem_cache *s) | ||
306 | { | ||
307 | int rc; | ||
308 | |||
309 | if (!s->memcg_params || | ||
310 | !s->memcg_params->is_root_cache) | ||
311 | return 0; | ||
312 | |||
313 | mutex_unlock(&slab_mutex); | ||
314 | rc = __kmem_cache_destroy_memcg_children(s); | ||
315 | mutex_lock(&slab_mutex); | ||
316 | |||
317 | return rc; | ||
318 | } | ||
319 | #else | ||
320 | static int kmem_cache_destroy_memcg_children(struct kmem_cache *s) | ||
321 | { | ||
322 | return 0; | ||
323 | } | ||
304 | #endif /* CONFIG_MEMCG_KMEM */ | 324 | #endif /* CONFIG_MEMCG_KMEM */ |
305 | 325 | ||
306 | void kmem_cache_destroy(struct kmem_cache *s) | 326 | void kmem_cache_destroy(struct kmem_cache *s) |
307 | { | 327 | { |
308 | /* Destroy all the children caches if we aren't a memcg cache */ | ||
309 | kmem_cache_destroy_memcg_children(s); | ||
310 | |||
311 | get_online_cpus(); | 328 | get_online_cpus(); |
312 | mutex_lock(&slab_mutex); | 329 | mutex_lock(&slab_mutex); |
330 | |||
313 | s->refcount--; | 331 | s->refcount--; |
314 | if (!s->refcount) { | 332 | if (s->refcount) |
315 | list_del(&s->list); | 333 | goto out_unlock; |
316 | memcg_unregister_cache(s); | 334 | |
317 | 335 | if (kmem_cache_destroy_memcg_children(s) != 0) | |
318 | if (!__kmem_cache_shutdown(s)) { | 336 | goto out_unlock; |
319 | mutex_unlock(&slab_mutex); | 337 | |
320 | if (s->flags & SLAB_DESTROY_BY_RCU) | 338 | list_del(&s->list); |
321 | rcu_barrier(); | 339 | memcg_unregister_cache(s); |
322 | 340 | ||
323 | memcg_free_cache_params(s); | 341 | if (__kmem_cache_shutdown(s) != 0) { |
324 | kfree(s->name); | 342 | list_add(&s->list, &slab_caches); |
325 | kmem_cache_free(kmem_cache, s); | 343 | memcg_register_cache(s); |
326 | } else { | 344 | printk(KERN_ERR "kmem_cache_destroy %s: " |
327 | list_add(&s->list, &slab_caches); | 345 | "Slab cache still has objects\n", s->name); |
328 | memcg_register_cache(s); | 346 | dump_stack(); |
329 | mutex_unlock(&slab_mutex); | 347 | goto out_unlock; |
330 | printk(KERN_ERR "kmem_cache_destroy %s: Slab cache still has objects\n", | ||
331 | s->name); | ||
332 | dump_stack(); | ||
333 | } | ||
334 | } else { | ||
335 | mutex_unlock(&slab_mutex); | ||
336 | } | 348 | } |
349 | |||
350 | mutex_unlock(&slab_mutex); | ||
351 | if (s->flags & SLAB_DESTROY_BY_RCU) | ||
352 | rcu_barrier(); | ||
353 | |||
354 | memcg_free_cache_params(s); | ||
355 | kfree(s->name); | ||
356 | kmem_cache_free(kmem_cache, s); | ||
357 | goto out_put_cpus; | ||
358 | |||
359 | out_unlock: | ||
360 | mutex_unlock(&slab_mutex); | ||
361 | out_put_cpus: | ||
337 | put_online_cpus(); | 362 | put_online_cpus(); |
338 | } | 363 | } |
339 | EXPORT_SYMBOL(kmem_cache_destroy); | 364 | EXPORT_SYMBOL(kmem_cache_destroy); |