aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slub.c
diff options
context:
space:
mode:
authorVladimir Davydov <vdavydov.dev@gmail.com>2016-12-12 19:41:32 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2016-12-12 21:55:06 -0500
commit89e364db71fb5e7fc8d93228152abfa67daf35fa (patch)
tree7e70cba61d27fc6e7c7ebd21ec498b808ba2e132 /mm/slub.c
parent13583c3d3224508582ec03d881d0b68dd3ee8e10 (diff)
slub: move synchronize_sched out of slab_mutex on shrink
synchronize_sched() is a heavy operation and calling it per each cache owned by a memory cgroup being destroyed may take quite some time. What is worse, it's currently called under the slab_mutex, stalling all works doing cache creation/destruction. Actually, there isn't much point in calling synchronize_sched() for each cache - it's enough to call it just once - after setting cpu_partial for all caches and before shrinking them. This way, we can also move it out of the slab_mutex, which we have to hold for iterating over the slab cache list. Link: https://bugzilla.kernel.org/show_bug.cgi?id=172991 Link: http://lkml.kernel.org/r/0a10d71ecae3db00fb4421bcd3f82bcc911f4be4.1475329751.git.vdavydov.dev@gmail.com Signed-off-by: Vladimir Davydov <vdavydov.dev@gmail.com> Reported-by: Doug Smythies <dsmythies@telus.net> Acked-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Christoph Lameter <cl@linux.com> Cc: David Rientjes <rientjes@google.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Michal Hocko <mhocko@kernel.org> Cc: Pekka Enberg <penberg@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c19
1 files changed, 2 insertions, 17 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 2b3e740609e9..4a861f265cd7 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -3883,7 +3883,7 @@ EXPORT_SYMBOL(kfree);
3883 * being allocated from last increasing the chance that the last objects 3883 * being allocated from last increasing the chance that the last objects
3884 * are freed in them. 3884 * are freed in them.
3885 */ 3885 */
3886int __kmem_cache_shrink(struct kmem_cache *s, bool deactivate) 3886int __kmem_cache_shrink(struct kmem_cache *s)
3887{ 3887{
3888 int node; 3888 int node;
3889 int i; 3889 int i;
@@ -3895,21 +3895,6 @@ int __kmem_cache_shrink(struct kmem_cache *s, bool deactivate)
3895 unsigned long flags; 3895 unsigned long flags;
3896 int ret = 0; 3896 int ret = 0;
3897 3897
3898 if (deactivate) {
3899 /*
3900 * Disable empty slabs caching. Used to avoid pinning offline
3901 * memory cgroups by kmem pages that can be freed.
3902 */
3903 s->cpu_partial = 0;
3904 s->min_partial = 0;
3905
3906 /*
3907 * s->cpu_partial is checked locklessly (see put_cpu_partial),
3908 * so we have to make sure the change is visible.
3909 */
3910 synchronize_sched();
3911 }
3912
3913 flush_all(s); 3898 flush_all(s);
3914 for_each_kmem_cache_node(s, node, n) { 3899 for_each_kmem_cache_node(s, node, n) {
3915 INIT_LIST_HEAD(&discard); 3900 INIT_LIST_HEAD(&discard);
@@ -3966,7 +3951,7 @@ static int slab_mem_going_offline_callback(void *arg)
3966 3951
3967 mutex_lock(&slab_mutex); 3952 mutex_lock(&slab_mutex);
3968 list_for_each_entry(s, &slab_caches, list) 3953 list_for_each_entry(s, &slab_caches, list)
3969 __kmem_cache_shrink(s, false); 3954 __kmem_cache_shrink(s);
3970 mutex_unlock(&slab_mutex); 3955 mutex_unlock(&slab_mutex);
3971 3956
3972 return 0; 3957 return 0;