summaryrefslogtreecommitdiffstats
path: root/mm/slab_common.c
diff options
context:
space:
mode:
authorRoman Gushchin <guro@fb.com>2019-07-11 23:56:06 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2019-07-12 14:05:44 -0400
commit0b14e8aa68223c2c124d408aa4b110b364d13c53 (patch)
treececbf49a174c630e88347f874ce05afd1d3bc630 /mm/slab_common.c
parentc03914b7aa319fb2b6701a6427c13752c7418b9b (diff)
mm: memcg/slab: rename slab delayed deactivation functions and fields
The delayed work/rcu deactivation infrastructure of non-root kmem_caches can be also used for asynchronous release of these objects. Let's get rid of the word "deactivation" in corresponding names to make the code look better after generalization. It's easier to make the renaming first, so that the generalized code will look consistent from scratch. Let's rename struct memcg_cache_params fields: deact_fn -> work_fn deact_rcu_head -> rcu_head deact_work -> work And RCU/delayed work callbacks in slab common code: kmemcg_deactivate_rcufn -> kmemcg_rcufn kmemcg_deactivate_workfn -> kmemcg_workfn This patch contains no functional changes, only renamings. Link: http://lkml.kernel.org/r/20190611231813.3148843-3-guro@fb.com Signed-off-by: Roman Gushchin <guro@fb.com> Acked-by: Vladimir Davydov <vdavydov.dev@gmail.com> Reviewed-by: Shakeel Butt <shakeelb@google.com> Cc: Christoph Lameter <cl@linux.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Michal Hocko <mhocko@suse.com> Cc: Waiman Long <longman@redhat.com> Cc: David Rientjes <rientjes@google.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: Andrei Vagin <avagin@gmail.com> Cc: Qian Cai <cai@lca.pw> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/slab_common.c')
-rw-r--r--mm/slab_common.c30
1 files changed, 15 insertions, 15 deletions
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 07ee4189b40c..f4dd9f75751c 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -691,17 +691,17 @@ out_unlock:
691 put_online_cpus(); 691 put_online_cpus();
692} 692}
693 693
694static void kmemcg_deactivate_workfn(struct work_struct *work) 694static void kmemcg_workfn(struct work_struct *work)
695{ 695{
696 struct kmem_cache *s = container_of(work, struct kmem_cache, 696 struct kmem_cache *s = container_of(work, struct kmem_cache,
697 memcg_params.deact_work); 697 memcg_params.work);
698 698
699 get_online_cpus(); 699 get_online_cpus();
700 get_online_mems(); 700 get_online_mems();
701 701
702 mutex_lock(&slab_mutex); 702 mutex_lock(&slab_mutex);
703 703
704 s->memcg_params.deact_fn(s); 704 s->memcg_params.work_fn(s);
705 705
706 mutex_unlock(&slab_mutex); 706 mutex_unlock(&slab_mutex);
707 707
@@ -712,36 +712,36 @@ static void kmemcg_deactivate_workfn(struct work_struct *work)
712 css_put(&s->memcg_params.memcg->css); 712 css_put(&s->memcg_params.memcg->css);
713} 713}
714 714
715static void kmemcg_deactivate_rcufn(struct rcu_head *head) 715static void kmemcg_rcufn(struct rcu_head *head)
716{ 716{
717 struct kmem_cache *s = container_of(head, struct kmem_cache, 717 struct kmem_cache *s = container_of(head, struct kmem_cache,
718 memcg_params.deact_rcu_head); 718 memcg_params.rcu_head);
719 719
720 /* 720 /*
721 * We need to grab blocking locks. Bounce to ->deact_work. The 721 * We need to grab blocking locks. Bounce to ->work. The
722 * work item shares the space with the RCU head and can't be 722 * work item shares the space with the RCU head and can't be
723 * initialized eariler. 723 * initialized eariler.
724 */ 724 */
725 INIT_WORK(&s->memcg_params.deact_work, kmemcg_deactivate_workfn); 725 INIT_WORK(&s->memcg_params.work, kmemcg_workfn);
726 queue_work(memcg_kmem_cache_wq, &s->memcg_params.deact_work); 726 queue_work(memcg_kmem_cache_wq, &s->memcg_params.work);
727} 727}
728 728
729/** 729/**
730 * slab_deactivate_memcg_cache_rcu_sched - schedule deactivation after a 730 * slab_deactivate_memcg_cache_rcu_sched - schedule deactivation after a
731 * sched RCU grace period 731 * sched RCU grace period
732 * @s: target kmem_cache 732 * @s: target kmem_cache
733 * @deact_fn: deactivation function to call 733 * @work_fn: deactivation function to call
734 * 734 *
735 * Schedule @deact_fn to be invoked with online cpus, mems and slab_mutex 735 * Schedule @work_fn to be invoked with online cpus, mems and slab_mutex
736 * held after a sched RCU grace period. The slab is guaranteed to stay 736 * held after a sched RCU grace period. The slab is guaranteed to stay
737 * alive until @deact_fn is finished. This is to be used from 737 * alive until @work_fn is finished. This is to be used from
738 * __kmemcg_cache_deactivate(). 738 * __kmemcg_cache_deactivate().
739 */ 739 */
740void slab_deactivate_memcg_cache_rcu_sched(struct kmem_cache *s, 740void slab_deactivate_memcg_cache_rcu_sched(struct kmem_cache *s,
741 void (*deact_fn)(struct kmem_cache *)) 741 void (*work_fn)(struct kmem_cache *))
742{ 742{
743 if (WARN_ON_ONCE(is_root_cache(s)) || 743 if (WARN_ON_ONCE(is_root_cache(s)) ||
744 WARN_ON_ONCE(s->memcg_params.deact_fn)) 744 WARN_ON_ONCE(s->memcg_params.work_fn))
745 return; 745 return;
746 746
747 if (s->memcg_params.root_cache->memcg_params.dying) 747 if (s->memcg_params.root_cache->memcg_params.dying)
@@ -750,8 +750,8 @@ void slab_deactivate_memcg_cache_rcu_sched(struct kmem_cache *s,
750 /* pin memcg so that @s doesn't get destroyed in the middle */ 750 /* pin memcg so that @s doesn't get destroyed in the middle */
751 css_get(&s->memcg_params.memcg->css); 751 css_get(&s->memcg_params.memcg->css);
752 752
753 s->memcg_params.deact_fn = deact_fn; 753 s->memcg_params.work_fn = work_fn;
754 call_rcu(&s->memcg_params.deact_rcu_head, kmemcg_deactivate_rcufn); 754 call_rcu(&s->memcg_params.rcu_head, kmemcg_rcufn);
755} 755}
756 756
757void memcg_deactivate_kmem_caches(struct mem_cgroup *memcg) 757void memcg_deactivate_kmem_caches(struct mem_cgroup *memcg)