diff options
Diffstat (limited to 'mm')
| -rw-r--r-- | mm/slab.c | 4 | ||||
| -rw-r--r-- | mm/slab.h | 2 | ||||
| -rw-r--r-- | mm/slab_common.c | 27 | ||||
| -rw-r--r-- | mm/slob.c | 2 | ||||
| -rw-r--r-- | mm/slub.c | 19 |
5 files changed, 31 insertions, 23 deletions
| @@ -2332,7 +2332,7 @@ out: | |||
| 2332 | return nr_freed; | 2332 | return nr_freed; |
| 2333 | } | 2333 | } |
| 2334 | 2334 | ||
| 2335 | int __kmem_cache_shrink(struct kmem_cache *cachep, bool deactivate) | 2335 | int __kmem_cache_shrink(struct kmem_cache *cachep) |
| 2336 | { | 2336 | { |
| 2337 | int ret = 0; | 2337 | int ret = 0; |
| 2338 | int node; | 2338 | int node; |
| @@ -2352,7 +2352,7 @@ int __kmem_cache_shrink(struct kmem_cache *cachep, bool deactivate) | |||
| 2352 | 2352 | ||
| 2353 | int __kmem_cache_shutdown(struct kmem_cache *cachep) | 2353 | int __kmem_cache_shutdown(struct kmem_cache *cachep) |
| 2354 | { | 2354 | { |
| 2355 | return __kmem_cache_shrink(cachep, false); | 2355 | return __kmem_cache_shrink(cachep); |
| 2356 | } | 2356 | } |
| 2357 | 2357 | ||
| 2358 | void __kmem_cache_release(struct kmem_cache *cachep) | 2358 | void __kmem_cache_release(struct kmem_cache *cachep) |
| @@ -146,7 +146,7 @@ static inline unsigned long kmem_cache_flags(unsigned long object_size, | |||
| 146 | 146 | ||
| 147 | int __kmem_cache_shutdown(struct kmem_cache *); | 147 | int __kmem_cache_shutdown(struct kmem_cache *); |
| 148 | void __kmem_cache_release(struct kmem_cache *); | 148 | void __kmem_cache_release(struct kmem_cache *); |
| 149 | int __kmem_cache_shrink(struct kmem_cache *, bool); | 149 | int __kmem_cache_shrink(struct kmem_cache *); |
| 150 | void slab_kmem_cache_release(struct kmem_cache *); | 150 | void slab_kmem_cache_release(struct kmem_cache *); |
| 151 | 151 | ||
| 152 | struct seq_file; | 152 | struct seq_file; |
diff --git a/mm/slab_common.c b/mm/slab_common.c index 329b03843863..5d2f24fbafc5 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c | |||
| @@ -573,6 +573,29 @@ void memcg_deactivate_kmem_caches(struct mem_cgroup *memcg) | |||
| 573 | get_online_cpus(); | 573 | get_online_cpus(); |
| 574 | get_online_mems(); | 574 | get_online_mems(); |
| 575 | 575 | ||
| 576 | #ifdef CONFIG_SLUB | ||
| 577 | /* | ||
| 578 | * In case of SLUB, we need to disable empty slab caching to | ||
| 579 | * avoid pinning the offline memory cgroup by freeable kmem | ||
| 580 | * pages charged to it. SLAB doesn't need this, as it | ||
| 581 | * periodically purges unused slabs. | ||
| 582 | */ | ||
| 583 | mutex_lock(&slab_mutex); | ||
| 584 | list_for_each_entry(s, &slab_caches, list) { | ||
| 585 | c = is_root_cache(s) ? cache_from_memcg_idx(s, idx) : NULL; | ||
| 586 | if (c) { | ||
| 587 | c->cpu_partial = 0; | ||
| 588 | c->min_partial = 0; | ||
| 589 | } | ||
| 590 | } | ||
| 591 | mutex_unlock(&slab_mutex); | ||
| 592 | /* | ||
| 593 | * kmem_cache->cpu_partial is checked locklessly (see | ||
| 594 | * put_cpu_partial()). Make sure the change is visible. | ||
| 595 | */ | ||
| 596 | synchronize_sched(); | ||
| 597 | #endif | ||
| 598 | |||
| 576 | mutex_lock(&slab_mutex); | 599 | mutex_lock(&slab_mutex); |
| 577 | list_for_each_entry(s, &slab_caches, list) { | 600 | list_for_each_entry(s, &slab_caches, list) { |
| 578 | if (!is_root_cache(s)) | 601 | if (!is_root_cache(s)) |
| @@ -584,7 +607,7 @@ void memcg_deactivate_kmem_caches(struct mem_cgroup *memcg) | |||
| 584 | if (!c) | 607 | if (!c) |
| 585 | continue; | 608 | continue; |
| 586 | 609 | ||
| 587 | __kmem_cache_shrink(c, true); | 610 | __kmem_cache_shrink(c); |
| 588 | arr->entries[idx] = NULL; | 611 | arr->entries[idx] = NULL; |
| 589 | } | 612 | } |
| 590 | mutex_unlock(&slab_mutex); | 613 | mutex_unlock(&slab_mutex); |
| @@ -755,7 +778,7 @@ int kmem_cache_shrink(struct kmem_cache *cachep) | |||
| 755 | get_online_cpus(); | 778 | get_online_cpus(); |
| 756 | get_online_mems(); | 779 | get_online_mems(); |
| 757 | kasan_cache_shrink(cachep); | 780 | kasan_cache_shrink(cachep); |
| 758 | ret = __kmem_cache_shrink(cachep, false); | 781 | ret = __kmem_cache_shrink(cachep); |
| 759 | put_online_mems(); | 782 | put_online_mems(); |
| 760 | put_online_cpus(); | 783 | put_online_cpus(); |
| 761 | return ret; | 784 | return ret; |
| @@ -634,7 +634,7 @@ void __kmem_cache_release(struct kmem_cache *c) | |||
| 634 | { | 634 | { |
| 635 | } | 635 | } |
| 636 | 636 | ||
| 637 | int __kmem_cache_shrink(struct kmem_cache *d, bool deactivate) | 637 | int __kmem_cache_shrink(struct kmem_cache *d) |
| 638 | { | 638 | { |
| 639 | return 0; | 639 | return 0; |
| 640 | } | 640 | } |
| @@ -3883,7 +3883,7 @@ EXPORT_SYMBOL(kfree); | |||
| 3883 | * being allocated from last increasing the chance that the last objects | 3883 | * being allocated from last increasing the chance that the last objects |
| 3884 | * are freed in them. | 3884 | * are freed in them. |
| 3885 | */ | 3885 | */ |
| 3886 | int __kmem_cache_shrink(struct kmem_cache *s, bool deactivate) | 3886 | int __kmem_cache_shrink(struct kmem_cache *s) |
| 3887 | { | 3887 | { |
| 3888 | int node; | 3888 | int node; |
| 3889 | int i; | 3889 | int i; |
| @@ -3895,21 +3895,6 @@ int __kmem_cache_shrink(struct kmem_cache *s, bool deactivate) | |||
| 3895 | unsigned long flags; | 3895 | unsigned long flags; |
| 3896 | int ret = 0; | 3896 | int ret = 0; |
| 3897 | 3897 | ||
| 3898 | if (deactivate) { | ||
| 3899 | /* | ||
| 3900 | * Disable empty slabs caching. Used to avoid pinning offline | ||
| 3901 | * memory cgroups by kmem pages that can be freed. | ||
| 3902 | */ | ||
| 3903 | s->cpu_partial = 0; | ||
| 3904 | s->min_partial = 0; | ||
| 3905 | |||
| 3906 | /* | ||
| 3907 | * s->cpu_partial is checked locklessly (see put_cpu_partial), | ||
| 3908 | * so we have to make sure the change is visible. | ||
| 3909 | */ | ||
| 3910 | synchronize_sched(); | ||
| 3911 | } | ||
| 3912 | |||
| 3913 | flush_all(s); | 3898 | flush_all(s); |
| 3914 | for_each_kmem_cache_node(s, node, n) { | 3899 | for_each_kmem_cache_node(s, node, n) { |
| 3915 | INIT_LIST_HEAD(&discard); | 3900 | INIT_LIST_HEAD(&discard); |
| @@ -3966,7 +3951,7 @@ static int slab_mem_going_offline_callback(void *arg) | |||
| 3966 | 3951 | ||
| 3967 | mutex_lock(&slab_mutex); | 3952 | mutex_lock(&slab_mutex); |
| 3968 | list_for_each_entry(s, &slab_caches, list) | 3953 | list_for_each_entry(s, &slab_caches, list) |
| 3969 | __kmem_cache_shrink(s, false); | 3954 | __kmem_cache_shrink(s); |
| 3970 | mutex_unlock(&slab_mutex); | 3955 | mutex_unlock(&slab_mutex); |
| 3971 | 3956 | ||
| 3972 | return 0; | 3957 | return 0; |
