diff options
author | Vladimir Davydov <vdavydov.dev@gmail.com> | 2016-12-12 19:41:32 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-12-12 21:55:06 -0500 |
commit | 89e364db71fb5e7fc8d93228152abfa67daf35fa (patch) | |
tree | 7e70cba61d27fc6e7c7ebd21ec498b808ba2e132 /mm/slab_common.c | |
parent | 13583c3d3224508582ec03d881d0b68dd3ee8e10 (diff) |
slub: move synchronize_sched out of slab_mutex on shrink
synchronize_sched() is a heavy operation and calling it per each cache
owned by a memory cgroup being destroyed may take quite some time. What
is worse, it's currently called under the slab_mutex, stalling all works
doing cache creation/destruction.
Actually, there isn't much point in calling synchronize_sched() for each
cache - it's enough to call it just once - after setting cpu_partial for
all caches and before shrinking them. This way, we can also move it out
of the slab_mutex, which we have to hold for iterating over the slab
cache list.
Link: https://bugzilla.kernel.org/show_bug.cgi?id=172991
Link: http://lkml.kernel.org/r/0a10d71ecae3db00fb4421bcd3f82bcc911f4be4.1475329751.git.vdavydov.dev@gmail.com
Signed-off-by: Vladimir Davydov <vdavydov.dev@gmail.com>
Reported-by: Doug Smythies <dsmythies@telus.net>
Acked-by: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Pekka Enberg <penberg@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/slab_common.c')
-rw-r--r-- | mm/slab_common.c | 27 |
1 files changed, 25 insertions, 2 deletions
diff --git a/mm/slab_common.c b/mm/slab_common.c index 329b03843863..5d2f24fbafc5 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c | |||
@@ -573,6 +573,29 @@ void memcg_deactivate_kmem_caches(struct mem_cgroup *memcg) | |||
573 | get_online_cpus(); | 573 | get_online_cpus(); |
574 | get_online_mems(); | 574 | get_online_mems(); |
575 | 575 | ||
576 | #ifdef CONFIG_SLUB | ||
577 | /* | ||
578 | * In case of SLUB, we need to disable empty slab caching to | ||
579 | * avoid pinning the offline memory cgroup by freeable kmem | ||
580 | * pages charged to it. SLAB doesn't need this, as it | ||
581 | * periodically purges unused slabs. | ||
582 | */ | ||
583 | mutex_lock(&slab_mutex); | ||
584 | list_for_each_entry(s, &slab_caches, list) { | ||
585 | c = is_root_cache(s) ? cache_from_memcg_idx(s, idx) : NULL; | ||
586 | if (c) { | ||
587 | c->cpu_partial = 0; | ||
588 | c->min_partial = 0; | ||
589 | } | ||
590 | } | ||
591 | mutex_unlock(&slab_mutex); | ||
592 | /* | ||
593 | * kmem_cache->cpu_partial is checked locklessly (see | ||
594 | * put_cpu_partial()). Make sure the change is visible. | ||
595 | */ | ||
596 | synchronize_sched(); | ||
597 | #endif | ||
598 | |||
576 | mutex_lock(&slab_mutex); | 599 | mutex_lock(&slab_mutex); |
577 | list_for_each_entry(s, &slab_caches, list) { | 600 | list_for_each_entry(s, &slab_caches, list) { |
578 | if (!is_root_cache(s)) | 601 | if (!is_root_cache(s)) |
@@ -584,7 +607,7 @@ void memcg_deactivate_kmem_caches(struct mem_cgroup *memcg) | |||
584 | if (!c) | 607 | if (!c) |
585 | continue; | 608 | continue; |
586 | 609 | ||
587 | __kmem_cache_shrink(c, true); | 610 | __kmem_cache_shrink(c); |
588 | arr->entries[idx] = NULL; | 611 | arr->entries[idx] = NULL; |
589 | } | 612 | } |
590 | mutex_unlock(&slab_mutex); | 613 | mutex_unlock(&slab_mutex); |
@@ -755,7 +778,7 @@ int kmem_cache_shrink(struct kmem_cache *cachep) | |||
755 | get_online_cpus(); | 778 | get_online_cpus(); |
756 | get_online_mems(); | 779 | get_online_mems(); |
757 | kasan_cache_shrink(cachep); | 780 | kasan_cache_shrink(cachep); |
758 | ret = __kmem_cache_shrink(cachep, false); | 781 | ret = __kmem_cache_shrink(cachep); |
759 | put_online_mems(); | 782 | put_online_mems(); |
760 | put_online_cpus(); | 783 | put_online_cpus(); |
761 | return ret; | 784 | return ret; |