diff options
author | Tejun Heo <tj@kernel.org> | 2013-12-06 15:11:56 -0500 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2013-12-06 15:11:56 -0500 |
commit | 780cd8b347b52584704438e599274f25c0a0fd13 (patch) | |
tree | ff2a27330b53429234d45db384803f0783975fd3 | |
parent | 87fb54f1b5a447662854f176eeb1ba92d5ffc1d5 (diff) |
cgroup: make for_each_subsys() useable under cgroup_root_mutex
We want to use for_each_subsys() in cgroupfs_root handling where only
cgroup_root_mutex is held. The only way cgroup_subsys[] can change is
through module load/unload, make cgroup_[un]load_subsys() grab
cgroup_root_mutex too and update the lockdep annotation in
for_each_subsys() to allow either cgroup_mutex or cgroup_root_mutex.
* Lockdep annotation is moved from inner 'if' condition to outer 'for'
init caluse. There's no reason to execute the assertion every loop.
* Loop index @i is renamed to @ssid. Indices iterating through subsys
will be [re]named to @ssid gradually.
v2: cgroup_assert_mutex_or_root_locked() caused build failure if
!CONFIG_LOCKEDP. Conditionalize its definition. The build failure
was reported by kbuild test bot.
Signed-off-by: Tejun Heo <tj@kernel.org>
Acked-by: Li Zefan <lizefan@huawei.com>
Cc: kbuild test robot <fengguang.wu@intel.com>
-rw-r--r-- | kernel/cgroup.c | 26 |
1 files changed, 20 insertions, 6 deletions
diff --git a/kernel/cgroup.c b/kernel/cgroup.c index c22eecbbbc1c..4a7fb4043cff 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c | |||
@@ -98,6 +98,14 @@ static DEFINE_MUTEX(cgroup_root_mutex); | |||
98 | lockdep_is_held(&cgroup_mutex), \ | 98 | lockdep_is_held(&cgroup_mutex), \ |
99 | "cgroup_mutex or RCU read lock required"); | 99 | "cgroup_mutex or RCU read lock required"); |
100 | 100 | ||
101 | #ifdef CONFIG_LOCKDEP | ||
102 | #define cgroup_assert_mutex_or_root_locked() \ | ||
103 | WARN_ON_ONCE(debug_locks && (!lockdep_is_held(&cgroup_mutex) && \ | ||
104 | !lockdep_is_held(&cgroup_root_mutex))) | ||
105 | #else | ||
106 | #define cgroup_assert_mutex_or_root_locked() do { } while (0) | ||
107 | #endif | ||
108 | |||
101 | /* | 109 | /* |
102 | * cgroup destruction makes heavy use of work items and there can be a lot | 110 | * cgroup destruction makes heavy use of work items and there can be a lot |
103 | * of concurrent destructions. Use a separate workqueue so that cgroup | 111 | * of concurrent destructions. Use a separate workqueue so that cgroup |
@@ -237,14 +245,15 @@ static int notify_on_release(const struct cgroup *cgrp) | |||
237 | /** | 245 | /** |
238 | * for_each_subsys - iterate all loaded cgroup subsystems | 246 | * for_each_subsys - iterate all loaded cgroup subsystems |
239 | * @ss: the iteration cursor | 247 | * @ss: the iteration cursor |
240 | * @i: the index of @ss, CGROUP_SUBSYS_COUNT after reaching the end | 248 | * @ssid: the index of @ss, CGROUP_SUBSYS_COUNT after reaching the end |
241 | * | 249 | * |
242 | * Should be called under cgroup_mutex. | 250 | * Iterates through all loaded subsystems. Should be called under |
251 | * cgroup_mutex or cgroup_root_mutex. | ||
243 | */ | 252 | */ |
244 | #define for_each_subsys(ss, i) \ | 253 | #define for_each_subsys(ss, ssid) \ |
245 | for ((i) = 0; (i) < CGROUP_SUBSYS_COUNT; (i)++) \ | 254 | for (({ cgroup_assert_mutex_or_root_locked(); (ssid) = 0; }); \ |
246 | if (({ lockdep_assert_held(&cgroup_mutex); \ | 255 | (ssid) < CGROUP_SUBSYS_COUNT; (ssid)++) \ |
247 | !((ss) = cgroup_subsys[i]); })) { } \ | 256 | if (!((ss) = cgroup_subsys[(ssid)])) { } \ |
248 | else | 257 | else |
249 | 258 | ||
250 | /** | 259 | /** |
@@ -4592,6 +4601,7 @@ int __init_or_module cgroup_load_subsys(struct cgroup_subsys *ss) | |||
4592 | cgroup_init_cftsets(ss); | 4601 | cgroup_init_cftsets(ss); |
4593 | 4602 | ||
4594 | mutex_lock(&cgroup_mutex); | 4603 | mutex_lock(&cgroup_mutex); |
4604 | mutex_lock(&cgroup_root_mutex); | ||
4595 | cgroup_subsys[ss->subsys_id] = ss; | 4605 | cgroup_subsys[ss->subsys_id] = ss; |
4596 | 4606 | ||
4597 | /* | 4607 | /* |
@@ -4641,10 +4651,12 @@ int __init_or_module cgroup_load_subsys(struct cgroup_subsys *ss) | |||
4641 | goto err_unload; | 4651 | goto err_unload; |
4642 | 4652 | ||
4643 | /* success! */ | 4653 | /* success! */ |
4654 | mutex_unlock(&cgroup_root_mutex); | ||
4644 | mutex_unlock(&cgroup_mutex); | 4655 | mutex_unlock(&cgroup_mutex); |
4645 | return 0; | 4656 | return 0; |
4646 | 4657 | ||
4647 | err_unload: | 4658 | err_unload: |
4659 | mutex_unlock(&cgroup_root_mutex); | ||
4648 | mutex_unlock(&cgroup_mutex); | 4660 | mutex_unlock(&cgroup_mutex); |
4649 | /* @ss can't be mounted here as try_module_get() would fail */ | 4661 | /* @ss can't be mounted here as try_module_get() would fail */ |
4650 | cgroup_unload_subsys(ss); | 4662 | cgroup_unload_subsys(ss); |
@@ -4674,6 +4686,7 @@ void cgroup_unload_subsys(struct cgroup_subsys *ss) | |||
4674 | BUG_ON(ss->root != &cgroup_dummy_root); | 4686 | BUG_ON(ss->root != &cgroup_dummy_root); |
4675 | 4687 | ||
4676 | mutex_lock(&cgroup_mutex); | 4688 | mutex_lock(&cgroup_mutex); |
4689 | mutex_lock(&cgroup_root_mutex); | ||
4677 | 4690 | ||
4678 | offline_css(cgroup_css(cgroup_dummy_top, ss)); | 4691 | offline_css(cgroup_css(cgroup_dummy_top, ss)); |
4679 | 4692 | ||
@@ -4708,6 +4721,7 @@ void cgroup_unload_subsys(struct cgroup_subsys *ss) | |||
4708 | ss->css_free(cgroup_css(cgroup_dummy_top, ss)); | 4721 | ss->css_free(cgroup_css(cgroup_dummy_top, ss)); |
4709 | RCU_INIT_POINTER(cgroup_dummy_top->subsys[ss->subsys_id], NULL); | 4722 | RCU_INIT_POINTER(cgroup_dummy_top->subsys[ss->subsys_id], NULL); |
4710 | 4723 | ||
4724 | mutex_unlock(&cgroup_root_mutex); | ||
4711 | mutex_unlock(&cgroup_mutex); | 4725 | mutex_unlock(&cgroup_mutex); |
4712 | } | 4726 | } |
4713 | EXPORT_SYMBOL_GPL(cgroup_unload_subsys); | 4727 | EXPORT_SYMBOL_GPL(cgroup_unload_subsys); |