diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/memcontrol.c | 63 |
1 files changed, 33 insertions, 30 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 47b36fea7e1f..b8dc8e4cbf6a 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
@@ -3483,7 +3483,6 @@ static void memcg_create_cache_work_func(struct work_struct *w) | |||
3483 | 3483 | ||
3484 | /* | 3484 | /* |
3485 | * Enqueue the creation of a per-memcg kmem_cache. | 3485 | * Enqueue the creation of a per-memcg kmem_cache. |
3486 | * Called with rcu_read_lock. | ||
3487 | */ | 3486 | */ |
3488 | static void __memcg_create_cache_enqueue(struct mem_cgroup *memcg, | 3487 | static void __memcg_create_cache_enqueue(struct mem_cgroup *memcg, |
3489 | struct kmem_cache *cachep) | 3488 | struct kmem_cache *cachep) |
@@ -3491,12 +3490,8 @@ static void __memcg_create_cache_enqueue(struct mem_cgroup *memcg, | |||
3491 | struct create_work *cw; | 3490 | struct create_work *cw; |
3492 | 3491 | ||
3493 | cw = kmalloc(sizeof(struct create_work), GFP_NOWAIT); | 3492 | cw = kmalloc(sizeof(struct create_work), GFP_NOWAIT); |
3494 | if (cw == NULL) | 3493 | if (cw == NULL) { |
3495 | return; | 3494 | css_put(&memcg->css); |
3496 | |||
3497 | /* The corresponding put will be done in the workqueue. */ | ||
3498 | if (!css_tryget(&memcg->css)) { | ||
3499 | kfree(cw); | ||
3500 | return; | 3495 | return; |
3501 | } | 3496 | } |
3502 | 3497 | ||
@@ -3552,10 +3547,9 @@ struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep, | |||
3552 | 3547 | ||
3553 | rcu_read_lock(); | 3548 | rcu_read_lock(); |
3554 | memcg = mem_cgroup_from_task(rcu_dereference(current->mm->owner)); | 3549 | memcg = mem_cgroup_from_task(rcu_dereference(current->mm->owner)); |
3555 | rcu_read_unlock(); | ||
3556 | 3550 | ||
3557 | if (!memcg_can_account_kmem(memcg)) | 3551 | if (!memcg_can_account_kmem(memcg)) |
3558 | return cachep; | 3552 | goto out; |
3559 | 3553 | ||
3560 | idx = memcg_cache_id(memcg); | 3554 | idx = memcg_cache_id(memcg); |
3561 | 3555 | ||
@@ -3564,29 +3558,38 @@ struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep, | |||
3564 | * code updating memcg_caches will issue a write barrier to match this. | 3558 | * code updating memcg_caches will issue a write barrier to match this. |
3565 | */ | 3559 | */ |
3566 | read_barrier_depends(); | 3560 | read_barrier_depends(); |
3567 | if (unlikely(cachep->memcg_params->memcg_caches[idx] == NULL)) { | 3561 | if (likely(cachep->memcg_params->memcg_caches[idx])) { |
3568 | /* | 3562 | cachep = cachep->memcg_params->memcg_caches[idx]; |
3569 | * If we are in a safe context (can wait, and not in interrupt | 3563 | goto out; |
3570 | * context), we could be be predictable and return right away. | ||
3571 | * This would guarantee that the allocation being performed | ||
3572 | * already belongs in the new cache. | ||
3573 | * | ||
3574 | * However, there are some clashes that can arrive from locking. | ||
3575 | * For instance, because we acquire the slab_mutex while doing | ||
3576 | * kmem_cache_dup, this means no further allocation could happen | ||
3577 | * with the slab_mutex held. | ||
3578 | * | ||
3579 | * Also, because cache creation issue get_online_cpus(), this | ||
3580 | * creates a lock chain: memcg_slab_mutex -> cpu_hotplug_mutex, | ||
3581 | * that ends up reversed during cpu hotplug. (cpuset allocates | ||
3582 | * a bunch of GFP_KERNEL memory during cpuup). Due to all that, | ||
3583 | * better to defer everything. | ||
3584 | */ | ||
3585 | memcg_create_cache_enqueue(memcg, cachep); | ||
3586 | return cachep; | ||
3587 | } | 3564 | } |
3588 | 3565 | ||
3589 | return cachep->memcg_params->memcg_caches[idx]; | 3566 | /* The corresponding put will be done in the workqueue. */ |
3567 | if (!css_tryget(&memcg->css)) | ||
3568 | goto out; | ||
3569 | rcu_read_unlock(); | ||
3570 | |||
3571 | /* | ||
3572 | * If we are in a safe context (can wait, and not in interrupt | ||
3573 | * context), we could be be predictable and return right away. | ||
3574 | * This would guarantee that the allocation being performed | ||
3575 | * already belongs in the new cache. | ||
3576 | * | ||
3577 | * However, there are some clashes that can arrive from locking. | ||
3578 | * For instance, because we acquire the slab_mutex while doing | ||
3579 | * kmem_cache_dup, this means no further allocation could happen | ||
3580 | * with the slab_mutex held. | ||
3581 | * | ||
3582 | * Also, because cache creation issue get_online_cpus(), this | ||
3583 | * creates a lock chain: memcg_slab_mutex -> cpu_hotplug_mutex, | ||
3584 | * that ends up reversed during cpu hotplug. (cpuset allocates | ||
3585 | * a bunch of GFP_KERNEL memory during cpuup). Due to all that, | ||
3586 | * better to defer everything. | ||
3587 | */ | ||
3588 | memcg_create_cache_enqueue(memcg, cachep); | ||
3589 | return cachep; | ||
3590 | out: | ||
3591 | rcu_read_unlock(); | ||
3592 | return cachep; | ||
3590 | } | 3593 | } |
3591 | EXPORT_SYMBOL(__memcg_kmem_get_cache); | 3594 | EXPORT_SYMBOL(__memcg_kmem_get_cache); |
3592 | 3595 | ||