aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorVladimir Davydov <vdavydov@parallels.com>2014-01-23 18:53:02 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2014-01-23 19:36:51 -0500
commit2edefe1155b3ad3dc92065f6e1018d363525296e (patch)
tree893832c3693fab8cb849fdbfc1a264e376154860 /mm
parent96403da244443d9842dbf290c2a02390b78a158e (diff)
memcg, slab: fix races in per-memcg cache creation/destruction
We obtain a per-memcg cache from a root kmem_cache by dereferencing an entry of the root cache's memcg_params::memcg_caches array. If we find no cache for a memcg there on allocation, we initiate the memcg cache creation (see memcg_kmem_get_cache()). The cache creation proceeds asynchronously in memcg_create_kmem_cache() in order to avoid lock clashes, so there can be several threads trying to create the same kmem_cache concurrently, but only one of them may succeed. However, due to a race in the code, it is not always true. The point is that the memcg_caches array can be relocated when we activate kmem accounting for a memcg (see memcg_update_all_caches(), memcg_update_cache_size()). If memcg_update_cache_size() and memcg_create_kmem_cache() proceed concurrently as described below, we can leak a kmem_cache. Asume two threads schedule creation of the same kmem_cache. One of them successfully creates it. Another one should fail then, but if memcg_create_kmem_cache() interleaves with memcg_update_cache_size() as follows, it won't: memcg_create_kmem_cache() memcg_update_cache_size() (called w/o mutexes held) (called with slab_mutex, set_limit_mutex held) ------------------------- ------------------------- mutex_lock(&memcg_cache_mutex) s->memcg_params=kzalloc(...) new_cachep=cache_from_memcg_idx(cachep,idx) // new_cachep==NULL => proceed to creation s->memcg_params->memcg_caches[i] =cur_params->memcg_caches[i] // kmem_cache_create_memcg takes slab_mutex // so we will hang around until // memcg_update_cache_size finishes, but // nothing will prevent it from succeeding so // memcg_caches[idx] will be overwritten in // memcg_register_cache! new_cachep = kmem_cache_create_memcg(...) mutex_unlock(&memcg_cache_mutex) Let's fix this by moving the check for existence of the memcg cache to kmem_cache_create_memcg() to be called under the slab_mutex and make it return NULL if so. A similar race is possible when destroying a memcg cache (see kmem_cache_destroy()). Since memcg_unregister_cache(), which clears the pointer in the memcg_caches array, is called w/o protection, we can race with memcg_update_cache_size() and omit clearing the pointer. Therefore memcg_unregister_cache() should be moved before we release the slab_mutex. Signed-off-by: Vladimir Davydov <vdavydov@parallels.com> Cc: Michal Hocko <mhocko@suse.cz> Cc: Glauber Costa <glommer@gmail.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Balbir Singh <bsingharora@gmail.com> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: Christoph Lameter <cl@linux.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/memcontrol.c23
-rw-r--r--mm/slab_common.c14
2 files changed, 27 insertions, 10 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 014a4f1acf1c..d2da65c4cd84 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -3264,6 +3264,12 @@ void memcg_register_cache(struct kmem_cache *s)
3264 if (is_root_cache(s)) 3264 if (is_root_cache(s))
3265 return; 3265 return;
3266 3266
3267 /*
3268 * Holding the slab_mutex assures nobody will touch the memcg_caches
3269 * array while we are modifying it.
3270 */
3271 lockdep_assert_held(&slab_mutex);
3272
3267 root = s->memcg_params->root_cache; 3273 root = s->memcg_params->root_cache;
3268 memcg = s->memcg_params->memcg; 3274 memcg = s->memcg_params->memcg;
3269 id = memcg_cache_id(memcg); 3275 id = memcg_cache_id(memcg);
@@ -3283,6 +3289,7 @@ void memcg_register_cache(struct kmem_cache *s)
3283 * before adding it to the memcg_slab_caches list, otherwise we can 3289 * before adding it to the memcg_slab_caches list, otherwise we can
3284 * fail to convert memcg_params_to_cache() while traversing the list. 3290 * fail to convert memcg_params_to_cache() while traversing the list.
3285 */ 3291 */
3292 VM_BUG_ON(root->memcg_params->memcg_caches[id]);
3286 root->memcg_params->memcg_caches[id] = s; 3293 root->memcg_params->memcg_caches[id] = s;
3287 3294
3288 mutex_lock(&memcg->slab_caches_mutex); 3295 mutex_lock(&memcg->slab_caches_mutex);
@@ -3299,6 +3306,12 @@ void memcg_unregister_cache(struct kmem_cache *s)
3299 if (is_root_cache(s)) 3306 if (is_root_cache(s))
3300 return; 3307 return;
3301 3308
3309 /*
3310 * Holding the slab_mutex assures nobody will touch the memcg_caches
3311 * array while we are modifying it.
3312 */
3313 lockdep_assert_held(&slab_mutex);
3314
3302 root = s->memcg_params->root_cache; 3315 root = s->memcg_params->root_cache;
3303 memcg = s->memcg_params->memcg; 3316 memcg = s->memcg_params->memcg;
3304 id = memcg_cache_id(memcg); 3317 id = memcg_cache_id(memcg);
@@ -3312,6 +3325,7 @@ void memcg_unregister_cache(struct kmem_cache *s)
3312 * after removing it from the memcg_slab_caches list, otherwise we can 3325 * after removing it from the memcg_slab_caches list, otherwise we can
3313 * fail to convert memcg_params_to_cache() while traversing the list. 3326 * fail to convert memcg_params_to_cache() while traversing the list.
3314 */ 3327 */
3328 VM_BUG_ON(!root->memcg_params->memcg_caches[id]);
3315 root->memcg_params->memcg_caches[id] = NULL; 3329 root->memcg_params->memcg_caches[id] = NULL;
3316 3330
3317 css_put(&memcg->css); 3331 css_put(&memcg->css);
@@ -3464,22 +3478,13 @@ static struct kmem_cache *memcg_create_kmem_cache(struct mem_cgroup *memcg,
3464 struct kmem_cache *cachep) 3478 struct kmem_cache *cachep)
3465{ 3479{
3466 struct kmem_cache *new_cachep; 3480 struct kmem_cache *new_cachep;
3467 int idx;
3468 3481
3469 BUG_ON(!memcg_can_account_kmem(memcg)); 3482 BUG_ON(!memcg_can_account_kmem(memcg));
3470 3483
3471 idx = memcg_cache_id(memcg);
3472
3473 mutex_lock(&memcg_cache_mutex); 3484 mutex_lock(&memcg_cache_mutex);
3474 new_cachep = cache_from_memcg_idx(cachep, idx);
3475 if (new_cachep)
3476 goto out;
3477
3478 new_cachep = kmem_cache_dup(memcg, cachep); 3485 new_cachep = kmem_cache_dup(memcg, cachep);
3479 if (new_cachep == NULL) 3486 if (new_cachep == NULL)
3480 new_cachep = cachep; 3487 new_cachep = cachep;
3481
3482out:
3483 mutex_unlock(&memcg_cache_mutex); 3488 mutex_unlock(&memcg_cache_mutex);
3484 return new_cachep; 3489 return new_cachep;
3485} 3490}
diff --git a/mm/slab_common.c b/mm/slab_common.c
index db24ec48b946..f34707eeacc7 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -180,6 +180,18 @@ kmem_cache_create_memcg(struct mem_cgroup *memcg, const char *name, size_t size,
180 if (err) 180 if (err)
181 goto out_unlock; 181 goto out_unlock;
182 182
183 if (memcg) {
184 /*
185 * Since per-memcg caches are created asynchronously on first
186 * allocation (see memcg_kmem_get_cache()), several threads can
187 * try to create the same cache, but only one of them may
188 * succeed. Therefore if we get here and see the cache has
189 * already been created, we silently return NULL.
190 */
191 if (cache_from_memcg_idx(parent_cache, memcg_cache_id(memcg)))
192 goto out_unlock;
193 }
194
183 /* 195 /*
184 * Some allocators will constraint the set of valid flags to a subset 196 * Some allocators will constraint the set of valid flags to a subset
185 * of all flags. We expect them to define CACHE_CREATE_MASK in this 197 * of all flags. We expect them to define CACHE_CREATE_MASK in this
@@ -261,11 +273,11 @@ void kmem_cache_destroy(struct kmem_cache *s)
261 list_del(&s->list); 273 list_del(&s->list);
262 274
263 if (!__kmem_cache_shutdown(s)) { 275 if (!__kmem_cache_shutdown(s)) {
276 memcg_unregister_cache(s);
264 mutex_unlock(&slab_mutex); 277 mutex_unlock(&slab_mutex);
265 if (s->flags & SLAB_DESTROY_BY_RCU) 278 if (s->flags & SLAB_DESTROY_BY_RCU)
266 rcu_barrier(); 279 rcu_barrier();
267 280
268 memcg_unregister_cache(s);
269 memcg_free_cache_params(s); 281 memcg_free_cache_params(s);
270 kfree(s->name); 282 kfree(s->name);
271 kmem_cache_free(kmem_cache, s); 283 kmem_cache_free(kmem_cache, s);