summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/slab.h6
-rw-r--r--mm/slab.h2
-rw-r--r--mm/slab_common.c30
3 files changed, 19 insertions, 19 deletions
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 98c3d12b7275..6008d884e621 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -643,10 +643,10 @@ struct memcg_cache_params {
643 struct list_head children_node; 643 struct list_head children_node;
644 struct list_head kmem_caches_node; 644 struct list_head kmem_caches_node;
645 645
646 void (*deact_fn)(struct kmem_cache *); 646 void (*work_fn)(struct kmem_cache *);
647 union { 647 union {
648 struct rcu_head deact_rcu_head; 648 struct rcu_head rcu_head;
649 struct work_struct deact_work; 649 struct work_struct work;
650 }; 650 };
651 }; 651 };
652 }; 652 };
diff --git a/mm/slab.h b/mm/slab.h
index 86f7ede21203..7ef695b91919 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -291,7 +291,7 @@ static __always_inline void memcg_uncharge_slab(struct page *page, int order,
291extern void slab_init_memcg_params(struct kmem_cache *); 291extern void slab_init_memcg_params(struct kmem_cache *);
292extern void memcg_link_cache(struct kmem_cache *s, struct mem_cgroup *memcg); 292extern void memcg_link_cache(struct kmem_cache *s, struct mem_cgroup *memcg);
293extern void slab_deactivate_memcg_cache_rcu_sched(struct kmem_cache *s, 293extern void slab_deactivate_memcg_cache_rcu_sched(struct kmem_cache *s,
294 void (*deact_fn)(struct kmem_cache *)); 294 void (*work_fn)(struct kmem_cache *));
295 295
296#else /* CONFIG_MEMCG_KMEM */ 296#else /* CONFIG_MEMCG_KMEM */
297 297
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 07ee4189b40c..f4dd9f75751c 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -691,17 +691,17 @@ out_unlock:
691 put_online_cpus(); 691 put_online_cpus();
692} 692}
693 693
694static void kmemcg_deactivate_workfn(struct work_struct *work) 694static void kmemcg_workfn(struct work_struct *work)
695{ 695{
696 struct kmem_cache *s = container_of(work, struct kmem_cache, 696 struct kmem_cache *s = container_of(work, struct kmem_cache,
697 memcg_params.deact_work); 697 memcg_params.work);
698 698
699 get_online_cpus(); 699 get_online_cpus();
700 get_online_mems(); 700 get_online_mems();
701 701
702 mutex_lock(&slab_mutex); 702 mutex_lock(&slab_mutex);
703 703
704 s->memcg_params.deact_fn(s); 704 s->memcg_params.work_fn(s);
705 705
706 mutex_unlock(&slab_mutex); 706 mutex_unlock(&slab_mutex);
707 707
@@ -712,36 +712,36 @@ static void kmemcg_deactivate_workfn(struct work_struct *work)
712 css_put(&s->memcg_params.memcg->css); 712 css_put(&s->memcg_params.memcg->css);
713} 713}
714 714
715static void kmemcg_deactivate_rcufn(struct rcu_head *head) 715static void kmemcg_rcufn(struct rcu_head *head)
716{ 716{
717 struct kmem_cache *s = container_of(head, struct kmem_cache, 717 struct kmem_cache *s = container_of(head, struct kmem_cache,
718 memcg_params.deact_rcu_head); 718 memcg_params.rcu_head);
719 719
720 /* 720 /*
721 * We need to grab blocking locks. Bounce to ->deact_work. The 721 * We need to grab blocking locks. Bounce to ->work. The
722 * work item shares the space with the RCU head and can't be 722 * work item shares the space with the RCU head and can't be
723 * initialized eariler. 723 * initialized eariler.
724 */ 724 */
725 INIT_WORK(&s->memcg_params.deact_work, kmemcg_deactivate_workfn); 725 INIT_WORK(&s->memcg_params.work, kmemcg_workfn);
726 queue_work(memcg_kmem_cache_wq, &s->memcg_params.deact_work); 726 queue_work(memcg_kmem_cache_wq, &s->memcg_params.work);
727} 727}
728 728
729/** 729/**
730 * slab_deactivate_memcg_cache_rcu_sched - schedule deactivation after a 730 * slab_deactivate_memcg_cache_rcu_sched - schedule deactivation after a
731 * sched RCU grace period 731 * sched RCU grace period
732 * @s: target kmem_cache 732 * @s: target kmem_cache
733 * @deact_fn: deactivation function to call 733 * @work_fn: deactivation function to call
734 * 734 *
735 * Schedule @deact_fn to be invoked with online cpus, mems and slab_mutex 735 * Schedule @work_fn to be invoked with online cpus, mems and slab_mutex
736 * held after a sched RCU grace period. The slab is guaranteed to stay 736 * held after a sched RCU grace period. The slab is guaranteed to stay
737 * alive until @deact_fn is finished. This is to be used from 737 * alive until @work_fn is finished. This is to be used from
738 * __kmemcg_cache_deactivate(). 738 * __kmemcg_cache_deactivate().
739 */ 739 */
740void slab_deactivate_memcg_cache_rcu_sched(struct kmem_cache *s, 740void slab_deactivate_memcg_cache_rcu_sched(struct kmem_cache *s,
741 void (*deact_fn)(struct kmem_cache *)) 741 void (*work_fn)(struct kmem_cache *))
742{ 742{
743 if (WARN_ON_ONCE(is_root_cache(s)) || 743 if (WARN_ON_ONCE(is_root_cache(s)) ||
744 WARN_ON_ONCE(s->memcg_params.deact_fn)) 744 WARN_ON_ONCE(s->memcg_params.work_fn))
745 return; 745 return;
746 746
747 if (s->memcg_params.root_cache->memcg_params.dying) 747 if (s->memcg_params.root_cache->memcg_params.dying)
@@ -750,8 +750,8 @@ void slab_deactivate_memcg_cache_rcu_sched(struct kmem_cache *s,
750 /* pin memcg so that @s doesn't get destroyed in the middle */ 750 /* pin memcg so that @s doesn't get destroyed in the middle */
751 css_get(&s->memcg_params.memcg->css); 751 css_get(&s->memcg_params.memcg->css);
752 752
753 s->memcg_params.deact_fn = deact_fn; 753 s->memcg_params.work_fn = work_fn;
754 call_rcu(&s->memcg_params.deact_rcu_head, kmemcg_deactivate_rcufn); 754 call_rcu(&s->memcg_params.rcu_head, kmemcg_rcufn);
755} 755}
756 756
757void memcg_deactivate_kmem_caches(struct mem_cgroup *memcg) 757void memcg_deactivate_kmem_caches(struct mem_cgroup *memcg)