aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2017-02-22 18:41:27 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2017-02-22 19:41:27 -0500
commitc9fc586403e7c85eee06b2d5dea14ce71c00fcd8 (patch)
tree4edb0cd2ebf8b0439239fb1ebe511e3def80e39a
parent510ded33e075c2bd662b1efab0110f4240325fc9 (diff)
slab: introduce __kmemcg_cache_deactivate()
__kmem_cache_shrink() is called with %true @deactivate only for memcg caches. Remove @deactivate from __kmem_cache_shrink() and introduce __kmemcg_cache_deactivate() instead. Each memcg-supporting allocator should implement it and it should deactivate and drain the cache. This is to allow memcg cache deactivation behavior to further deviate from simple shrinking without messing up __kmem_cache_shrink(). This is pure reorganization and doesn't introduce any observable behavior changes. v2: Dropped unnecessary ifdef in mm/slab.h as suggested by Vladimir. Link: http://lkml.kernel.org/r/20170117235411.9408-8-tj@kernel.org Signed-off-by: Tejun Heo <tj@kernel.org> Acked-by: Vladimir Davydov <vdavydov.dev@gmail.com> Cc: Christoph Lameter <cl@linux.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: David Rientjes <rientjes@google.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--mm/slab.c11
-rw-r--r--mm/slab.h3
-rw-r--r--mm/slab_common.c4
-rw-r--r--mm/slob.c2
-rw-r--r--mm/slub.c39
5 files changed, 36 insertions, 23 deletions
diff --git a/mm/slab.c b/mm/slab.c
index 8a0e3392f181..bd63450a9b16 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -2315,7 +2315,7 @@ out:
2315 return nr_freed; 2315 return nr_freed;
2316} 2316}
2317 2317
2318int __kmem_cache_shrink(struct kmem_cache *cachep, bool deactivate) 2318int __kmem_cache_shrink(struct kmem_cache *cachep)
2319{ 2319{
2320 int ret = 0; 2320 int ret = 0;
2321 int node; 2321 int node;
@@ -2333,9 +2333,16 @@ int __kmem_cache_shrink(struct kmem_cache *cachep, bool deactivate)
2333 return (ret ? 1 : 0); 2333 return (ret ? 1 : 0);
2334} 2334}
2335 2335
2336#ifdef CONFIG_MEMCG
2337void __kmemcg_cache_deactivate(struct kmem_cache *cachep)
2338{
2339 __kmem_cache_shrink(cachep);
2340}
2341#endif
2342
2336int __kmem_cache_shutdown(struct kmem_cache *cachep) 2343int __kmem_cache_shutdown(struct kmem_cache *cachep)
2337{ 2344{
2338 return __kmem_cache_shrink(cachep, false); 2345 return __kmem_cache_shrink(cachep);
2339} 2346}
2340 2347
2341void __kmem_cache_release(struct kmem_cache *cachep) 2348void __kmem_cache_release(struct kmem_cache *cachep)
diff --git a/mm/slab.h b/mm/slab.h
index 9631bb27c772..7bff1ee513c2 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -167,7 +167,8 @@ static inline unsigned long kmem_cache_flags(unsigned long object_size,
167 167
168int __kmem_cache_shutdown(struct kmem_cache *); 168int __kmem_cache_shutdown(struct kmem_cache *);
169void __kmem_cache_release(struct kmem_cache *); 169void __kmem_cache_release(struct kmem_cache *);
170int __kmem_cache_shrink(struct kmem_cache *, bool); 170int __kmem_cache_shrink(struct kmem_cache *);
171void __kmemcg_cache_deactivate(struct kmem_cache *s);
171void slab_kmem_cache_release(struct kmem_cache *); 172void slab_kmem_cache_release(struct kmem_cache *);
172 173
173struct seq_file; 174struct seq_file;
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 274697e1a42a..59e41bb81575 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -646,7 +646,7 @@ void memcg_deactivate_kmem_caches(struct mem_cgroup *memcg)
646 if (!c) 646 if (!c)
647 continue; 647 continue;
648 648
649 __kmem_cache_shrink(c, true); 649 __kmemcg_cache_deactivate(c);
650 arr->entries[idx] = NULL; 650 arr->entries[idx] = NULL;
651 } 651 }
652 mutex_unlock(&slab_mutex); 652 mutex_unlock(&slab_mutex);
@@ -794,7 +794,7 @@ int kmem_cache_shrink(struct kmem_cache *cachep)
794 get_online_cpus(); 794 get_online_cpus();
795 get_online_mems(); 795 get_online_mems();
796 kasan_cache_shrink(cachep); 796 kasan_cache_shrink(cachep);
797 ret = __kmem_cache_shrink(cachep, false); 797 ret = __kmem_cache_shrink(cachep);
798 put_online_mems(); 798 put_online_mems();
799 put_online_cpus(); 799 put_online_cpus();
800 return ret; 800 return ret;
diff --git a/mm/slob.c b/mm/slob.c
index 5ec158054ffe..eac04d4357ec 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -634,7 +634,7 @@ void __kmem_cache_release(struct kmem_cache *c)
634{ 634{
635} 635}
636 636
637int __kmem_cache_shrink(struct kmem_cache *d, bool deactivate) 637int __kmem_cache_shrink(struct kmem_cache *d)
638{ 638{
639 return 0; 639 return 0;
640} 640}
diff --git a/mm/slub.c b/mm/slub.c
index 03b012bcb5fa..8a4591526f37 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -3894,7 +3894,7 @@ EXPORT_SYMBOL(kfree);
3894 * being allocated from last increasing the chance that the last objects 3894 * being allocated from last increasing the chance that the last objects
3895 * are freed in them. 3895 * are freed in them.
3896 */ 3896 */
3897int __kmem_cache_shrink(struct kmem_cache *s, bool deactivate) 3897int __kmem_cache_shrink(struct kmem_cache *s)
3898{ 3898{
3899 int node; 3899 int node;
3900 int i; 3900 int i;
@@ -3906,21 +3906,6 @@ int __kmem_cache_shrink(struct kmem_cache *s, bool deactivate)
3906 unsigned long flags; 3906 unsigned long flags;
3907 int ret = 0; 3907 int ret = 0;
3908 3908
3909 if (deactivate) {
3910 /*
3911 * Disable empty slabs caching. Used to avoid pinning offline
3912 * memory cgroups by kmem pages that can be freed.
3913 */
3914 s->cpu_partial = 0;
3915 s->min_partial = 0;
3916
3917 /*
3918 * s->cpu_partial is checked locklessly (see put_cpu_partial),
3919 * so we have to make sure the change is visible.
3920 */
3921 synchronize_sched();
3922 }
3923
3924 flush_all(s); 3909 flush_all(s);
3925 for_each_kmem_cache_node(s, node, n) { 3910 for_each_kmem_cache_node(s, node, n) {
3926 INIT_LIST_HEAD(&discard); 3911 INIT_LIST_HEAD(&discard);
@@ -3971,13 +3956,33 @@ int __kmem_cache_shrink(struct kmem_cache *s, bool deactivate)
3971 return ret; 3956 return ret;
3972} 3957}
3973 3958
3959#ifdef CONFIG_MEMCG
3960void __kmemcg_cache_deactivate(struct kmem_cache *s)
3961{
3962 /*
3963 * Disable empty slabs caching. Used to avoid pinning offline
3964 * memory cgroups by kmem pages that can be freed.
3965 */
3966 s->cpu_partial = 0;
3967 s->min_partial = 0;
3968
3969 /*
3970 * s->cpu_partial is checked locklessly (see put_cpu_partial), so
3971 * we have to make sure the change is visible.
3972 */
3973 synchronize_sched();
3974
3975 __kmem_cache_shrink(s);
3976}
3977#endif
3978
3974static int slab_mem_going_offline_callback(void *arg) 3979static int slab_mem_going_offline_callback(void *arg)
3975{ 3980{
3976 struct kmem_cache *s; 3981 struct kmem_cache *s;
3977 3982
3978 mutex_lock(&slab_mutex); 3983 mutex_lock(&slab_mutex);
3979 list_for_each_entry(s, &slab_caches, list) 3984 list_for_each_entry(s, &slab_caches, list)
3980 __kmem_cache_shrink(s, false); 3985 __kmem_cache_shrink(s);
3981 mutex_unlock(&slab_mutex); 3986 mutex_unlock(&slab_mutex);
3982 3987
3983 return 0; 3988 return 0;