aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/memcontrol.c39
1 files changed, 8 insertions, 31 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index d2da65c4cd84..80197e544764 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -3427,27 +3427,16 @@ void mem_cgroup_destroy_cache(struct kmem_cache *cachep)
3427 schedule_work(&cachep->memcg_params->destroy); 3427 schedule_work(&cachep->memcg_params->destroy);
3428} 3428}
3429 3429
3430/* 3430static struct kmem_cache *memcg_create_kmem_cache(struct mem_cgroup *memcg,
3431 * This lock protects updaters, not readers. We want readers to be as fast as 3431 struct kmem_cache *s)
3432 * they can, and they will either see NULL or a valid cache value. Our model
3433 * allow them to see NULL, in which case the root memcg will be selected.
3434 *
3435 * We need this lock because multiple allocations to the same cache from a non
3436 * will span more than one worker. Only one of them can create the cache.
3437 */
3438static DEFINE_MUTEX(memcg_cache_mutex);
3439
3440/*
3441 * Called with memcg_cache_mutex held
3442 */
3443static struct kmem_cache *kmem_cache_dup(struct mem_cgroup *memcg,
3444 struct kmem_cache *s)
3445{ 3432{
3446 struct kmem_cache *new; 3433 struct kmem_cache *new;
3447 static char *tmp_name = NULL; 3434 static char *tmp_name = NULL;
3435 static DEFINE_MUTEX(mutex); /* protects tmp_name */
3448 3436
3449 lockdep_assert_held(&memcg_cache_mutex); 3437 BUG_ON(!memcg_can_account_kmem(memcg));
3450 3438
3439 mutex_lock(&mutex);
3451 /* 3440 /*
3452 * kmem_cache_create_memcg duplicates the given name and 3441 * kmem_cache_create_memcg duplicates the given name and
3453 * cgroup_name for this name requires RCU context. 3442 * cgroup_name for this name requires RCU context.
@@ -3470,25 +3459,13 @@ static struct kmem_cache *kmem_cache_dup(struct mem_cgroup *memcg,
3470 3459
3471 if (new) 3460 if (new)
3472 new->allocflags |= __GFP_KMEMCG; 3461 new->allocflags |= __GFP_KMEMCG;
3462 else
3463 new = s;
3473 3464
3465 mutex_unlock(&mutex);
3474 return new; 3466 return new;
3475} 3467}
3476 3468
3477static struct kmem_cache *memcg_create_kmem_cache(struct mem_cgroup *memcg,
3478 struct kmem_cache *cachep)
3479{
3480 struct kmem_cache *new_cachep;
3481
3482 BUG_ON(!memcg_can_account_kmem(memcg));
3483
3484 mutex_lock(&memcg_cache_mutex);
3485 new_cachep = kmem_cache_dup(memcg, cachep);
3486 if (new_cachep == NULL)
3487 new_cachep = cachep;
3488 mutex_unlock(&memcg_cache_mutex);
3489 return new_cachep;
3490}
3491
3492void kmem_cache_destroy_memcg_children(struct kmem_cache *s) 3469void kmem_cache_destroy_memcg_children(struct kmem_cache *s)
3493{ 3470{
3494 struct kmem_cache *c; 3471 struct kmem_cache *c;