diff options
author | Tejun Heo <tj@kernel.org> | 2017-02-22 18:41:27 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-02-22 19:41:27 -0500 |
commit | c9fc586403e7c85eee06b2d5dea14ce71c00fcd8 (patch) | |
tree | 4edb0cd2ebf8b0439239fb1ebe511e3def80e39a | |
parent | 510ded33e075c2bd662b1efab0110f4240325fc9 (diff) |
slab: introduce __kmemcg_cache_deactivate()
__kmem_cache_shrink() is called with %true @deactivate only for memcg
caches. Remove @deactivate from __kmem_cache_shrink() and introduce
__kmemcg_cache_deactivate() instead. Each memcg-supporting allocator
should implement it and it should deactivate and drain the cache.
This is to allow memcg cache deactivation behavior to further deviate
from simple shrinking without messing up __kmem_cache_shrink().
This is pure reorganization and doesn't introduce any observable
behavior changes.
v2: Dropped unnecessary ifdef in mm/slab.h as suggested by Vladimir.
Link: http://lkml.kernel.org/r/20170117235411.9408-8-tj@kernel.org
Signed-off-by: Tejun Heo <tj@kernel.org>
Acked-by: Vladimir Davydov <vdavydov.dev@gmail.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: David Rientjes <rientjes@google.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | mm/slab.c | 11 | ||||
-rw-r--r-- | mm/slab.h | 3 | ||||
-rw-r--r-- | mm/slab_common.c | 4 | ||||
-rw-r--r-- | mm/slob.c | 2 | ||||
-rw-r--r-- | mm/slub.c | 39 |
5 files changed, 36 insertions, 23 deletions
@@ -2315,7 +2315,7 @@ out: | |||
2315 | return nr_freed; | 2315 | return nr_freed; |
2316 | } | 2316 | } |
2317 | 2317 | ||
2318 | int __kmem_cache_shrink(struct kmem_cache *cachep, bool deactivate) | 2318 | int __kmem_cache_shrink(struct kmem_cache *cachep) |
2319 | { | 2319 | { |
2320 | int ret = 0; | 2320 | int ret = 0; |
2321 | int node; | 2321 | int node; |
@@ -2333,9 +2333,16 @@ int __kmem_cache_shrink(struct kmem_cache *cachep, bool deactivate) | |||
2333 | return (ret ? 1 : 0); | 2333 | return (ret ? 1 : 0); |
2334 | } | 2334 | } |
2335 | 2335 | ||
2336 | #ifdef CONFIG_MEMCG | ||
2337 | void __kmemcg_cache_deactivate(struct kmem_cache *cachep) | ||
2338 | { | ||
2339 | __kmem_cache_shrink(cachep); | ||
2340 | } | ||
2341 | #endif | ||
2342 | |||
2336 | int __kmem_cache_shutdown(struct kmem_cache *cachep) | 2343 | int __kmem_cache_shutdown(struct kmem_cache *cachep) |
2337 | { | 2344 | { |
2338 | return __kmem_cache_shrink(cachep, false); | 2345 | return __kmem_cache_shrink(cachep); |
2339 | } | 2346 | } |
2340 | 2347 | ||
2341 | void __kmem_cache_release(struct kmem_cache *cachep) | 2348 | void __kmem_cache_release(struct kmem_cache *cachep) |
@@ -167,7 +167,8 @@ static inline unsigned long kmem_cache_flags(unsigned long object_size, | |||
167 | 167 | ||
168 | int __kmem_cache_shutdown(struct kmem_cache *); | 168 | int __kmem_cache_shutdown(struct kmem_cache *); |
169 | void __kmem_cache_release(struct kmem_cache *); | 169 | void __kmem_cache_release(struct kmem_cache *); |
170 | int __kmem_cache_shrink(struct kmem_cache *, bool); | 170 | int __kmem_cache_shrink(struct kmem_cache *); |
171 | void __kmemcg_cache_deactivate(struct kmem_cache *s); | ||
171 | void slab_kmem_cache_release(struct kmem_cache *); | 172 | void slab_kmem_cache_release(struct kmem_cache *); |
172 | 173 | ||
173 | struct seq_file; | 174 | struct seq_file; |
diff --git a/mm/slab_common.c b/mm/slab_common.c index 274697e1a42a..59e41bb81575 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c | |||
@@ -646,7 +646,7 @@ void memcg_deactivate_kmem_caches(struct mem_cgroup *memcg) | |||
646 | if (!c) | 646 | if (!c) |
647 | continue; | 647 | continue; |
648 | 648 | ||
649 | __kmem_cache_shrink(c, true); | 649 | __kmemcg_cache_deactivate(c); |
650 | arr->entries[idx] = NULL; | 650 | arr->entries[idx] = NULL; |
651 | } | 651 | } |
652 | mutex_unlock(&slab_mutex); | 652 | mutex_unlock(&slab_mutex); |
@@ -794,7 +794,7 @@ int kmem_cache_shrink(struct kmem_cache *cachep) | |||
794 | get_online_cpus(); | 794 | get_online_cpus(); |
795 | get_online_mems(); | 795 | get_online_mems(); |
796 | kasan_cache_shrink(cachep); | 796 | kasan_cache_shrink(cachep); |
797 | ret = __kmem_cache_shrink(cachep, false); | 797 | ret = __kmem_cache_shrink(cachep); |
798 | put_online_mems(); | 798 | put_online_mems(); |
799 | put_online_cpus(); | 799 | put_online_cpus(); |
800 | return ret; | 800 | return ret; |
@@ -634,7 +634,7 @@ void __kmem_cache_release(struct kmem_cache *c) | |||
634 | { | 634 | { |
635 | } | 635 | } |
636 | 636 | ||
637 | int __kmem_cache_shrink(struct kmem_cache *d, bool deactivate) | 637 | int __kmem_cache_shrink(struct kmem_cache *d) |
638 | { | 638 | { |
639 | return 0; | 639 | return 0; |
640 | } | 640 | } |
@@ -3894,7 +3894,7 @@ EXPORT_SYMBOL(kfree); | |||
3894 | * being allocated from last increasing the chance that the last objects | 3894 | * being allocated from last increasing the chance that the last objects |
3895 | * are freed in them. | 3895 | * are freed in them. |
3896 | */ | 3896 | */ |
3897 | int __kmem_cache_shrink(struct kmem_cache *s, bool deactivate) | 3897 | int __kmem_cache_shrink(struct kmem_cache *s) |
3898 | { | 3898 | { |
3899 | int node; | 3899 | int node; |
3900 | int i; | 3900 | int i; |
@@ -3906,21 +3906,6 @@ int __kmem_cache_shrink(struct kmem_cache *s, bool deactivate) | |||
3906 | unsigned long flags; | 3906 | unsigned long flags; |
3907 | int ret = 0; | 3907 | int ret = 0; |
3908 | 3908 | ||
3909 | if (deactivate) { | ||
3910 | /* | ||
3911 | * Disable empty slabs caching. Used to avoid pinning offline | ||
3912 | * memory cgroups by kmem pages that can be freed. | ||
3913 | */ | ||
3914 | s->cpu_partial = 0; | ||
3915 | s->min_partial = 0; | ||
3916 | |||
3917 | /* | ||
3918 | * s->cpu_partial is checked locklessly (see put_cpu_partial), | ||
3919 | * so we have to make sure the change is visible. | ||
3920 | */ | ||
3921 | synchronize_sched(); | ||
3922 | } | ||
3923 | |||
3924 | flush_all(s); | 3909 | flush_all(s); |
3925 | for_each_kmem_cache_node(s, node, n) { | 3910 | for_each_kmem_cache_node(s, node, n) { |
3926 | INIT_LIST_HEAD(&discard); | 3911 | INIT_LIST_HEAD(&discard); |
@@ -3971,13 +3956,33 @@ int __kmem_cache_shrink(struct kmem_cache *s, bool deactivate) | |||
3971 | return ret; | 3956 | return ret; |
3972 | } | 3957 | } |
3973 | 3958 | ||
3959 | #ifdef CONFIG_MEMCG | ||
3960 | void __kmemcg_cache_deactivate(struct kmem_cache *s) | ||
3961 | { | ||
3962 | /* | ||
3963 | * Disable empty slabs caching. Used to avoid pinning offline | ||
3964 | * memory cgroups by kmem pages that can be freed. | ||
3965 | */ | ||
3966 | s->cpu_partial = 0; | ||
3967 | s->min_partial = 0; | ||
3968 | |||
3969 | /* | ||
3970 | * s->cpu_partial is checked locklessly (see put_cpu_partial), so | ||
3971 | * we have to make sure the change is visible. | ||
3972 | */ | ||
3973 | synchronize_sched(); | ||
3974 | |||
3975 | __kmem_cache_shrink(s); | ||
3976 | } | ||
3977 | #endif | ||
3978 | |||
3974 | static int slab_mem_going_offline_callback(void *arg) | 3979 | static int slab_mem_going_offline_callback(void *arg) |
3975 | { | 3980 | { |
3976 | struct kmem_cache *s; | 3981 | struct kmem_cache *s; |
3977 | 3982 | ||
3978 | mutex_lock(&slab_mutex); | 3983 | mutex_lock(&slab_mutex); |
3979 | list_for_each_entry(s, &slab_caches, list) | 3984 | list_for_each_entry(s, &slab_caches, list) |
3980 | __kmem_cache_shrink(s, false); | 3985 | __kmem_cache_shrink(s); |
3981 | mutex_unlock(&slab_mutex); | 3986 | mutex_unlock(&slab_mutex); |
3982 | 3987 | ||
3983 | return 0; | 3988 | return 0; |