aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/cgroup.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2013-12-06 15:11:56 -0500
committerTejun Heo <tj@kernel.org>2013-12-06 15:11:56 -0500
commit1c6727af4b495a9ec74c46d1fc08e508e675899d (patch)
tree83031295e5dc2d7fab009cd1ae5eb155c116bd72 /kernel/cgroup.c
parentc81c925ad9b0460a14ec35b52c61158da0733d51 (diff)
cgroup: implement for_each_css()
There are enough places where css's of a cgroup are iterated, which currently uses for_each_root_subsys() + explicit cgroup_css(). This patch implements for_each_css() and replaces the above combination with it. This patch doesn't introduce any behavior changes. v2: Updated to apply cleanly on top of v2 of "cgroup: fix css leaks on online_css() failure" Signed-off-by: Tejun Heo <tj@kernel.org> Acked-by: Li Zefan <lizefan@huawei.com>
Diffstat (limited to 'kernel/cgroup.c')
-rw-r--r--kernel/cgroup.c57
1 files changed, 31 insertions, 26 deletions
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index d12c29f42feb..329fde82ef7c 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -243,6 +243,21 @@ static int notify_on_release(const struct cgroup *cgrp)
243} 243}
244 244
245/** 245/**
246 * for_each_css - iterate all css's of a cgroup
247 * @css: the iteration cursor
248 * @ssid: the index of the subsystem, CGROUP_SUBSYS_COUNT after reaching the end
249 * @cgrp: the target cgroup to iterate css's of
250 *
251 * Should be called under cgroup_mutex.
252 */
253#define for_each_css(css, ssid, cgrp) \
254 for ((ssid) = 0; (ssid) < CGROUP_SUBSYS_COUNT; (ssid)++) \
255 if (!((css) = rcu_dereference_check( \
256 (cgrp)->subsys[(ssid)], \
257 lockdep_is_held(&cgroup_mutex)))) { } \
258 else
259
260/**
246 * for_each_subsys - iterate all loaded cgroup subsystems 261 * for_each_subsys - iterate all loaded cgroup subsystems
247 * @ss: the iteration cursor 262 * @ss: the iteration cursor
248 * @ssid: the index of @ss, CGROUP_SUBSYS_COUNT after reaching the end 263 * @ssid: the index of @ss, CGROUP_SUBSYS_COUNT after reaching the end
@@ -1942,8 +1957,8 @@ static int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk,
1942 bool threadgroup) 1957 bool threadgroup)
1943{ 1958{
1944 int retval, i, group_size; 1959 int retval, i, group_size;
1945 struct cgroup_subsys *ss, *failed_ss = NULL;
1946 struct cgroupfs_root *root = cgrp->root; 1960 struct cgroupfs_root *root = cgrp->root;
1961 struct cgroup_subsys_state *css, *failed_css = NULL;
1947 /* threadgroup list cursor and array */ 1962 /* threadgroup list cursor and array */
1948 struct task_struct *leader = tsk; 1963 struct task_struct *leader = tsk;
1949 struct task_and_cgroup *tc; 1964 struct task_and_cgroup *tc;
@@ -2016,13 +2031,11 @@ static int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk,
2016 /* 2031 /*
2017 * step 1: check that we can legitimately attach to the cgroup. 2032 * step 1: check that we can legitimately attach to the cgroup.
2018 */ 2033 */
2019 for_each_root_subsys(root, ss) { 2034 for_each_css(css, i, cgrp) {
2020 struct cgroup_subsys_state *css = cgroup_css(cgrp, ss); 2035 if (css->ss->can_attach) {
2021 2036 retval = css->ss->can_attach(css, &tset);
2022 if (ss->can_attach) {
2023 retval = ss->can_attach(css, &tset);
2024 if (retval) { 2037 if (retval) {
2025 failed_ss = ss; 2038 failed_css = css;
2026 goto out_cancel_attach; 2039 goto out_cancel_attach;
2027 } 2040 }
2028 } 2041 }
@@ -2058,12 +2071,9 @@ static int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk,
2058 /* 2071 /*
2059 * step 4: do subsystem attach callbacks. 2072 * step 4: do subsystem attach callbacks.
2060 */ 2073 */
2061 for_each_root_subsys(root, ss) { 2074 for_each_css(css, i, cgrp)
2062 struct cgroup_subsys_state *css = cgroup_css(cgrp, ss); 2075 if (css->ss->attach)
2063 2076 css->ss->attach(css, &tset);
2064 if (ss->attach)
2065 ss->attach(css, &tset);
2066 }
2067 2077
2068 /* 2078 /*
2069 * step 5: success! and cleanup 2079 * step 5: success! and cleanup
@@ -2080,13 +2090,11 @@ out_put_css_set_refs:
2080 } 2090 }
2081out_cancel_attach: 2091out_cancel_attach:
2082 if (retval) { 2092 if (retval) {
2083 for_each_root_subsys(root, ss) { 2093 for_each_css(css, i, cgrp) {
2084 struct cgroup_subsys_state *css = cgroup_css(cgrp, ss); 2094 if (css == failed_css)
2085
2086 if (ss == failed_ss)
2087 break; 2095 break;
2088 if (ss->cancel_attach) 2096 if (css->ss->cancel_attach)
2089 ss->cancel_attach(css, &tset); 2097 css->ss->cancel_attach(css, &tset);
2090 } 2098 }
2091 } 2099 }
2092out_free_group_list: 2100out_free_group_list:
@@ -4375,9 +4383,10 @@ static int cgroup_destroy_locked(struct cgroup *cgrp)
4375 __releases(&cgroup_mutex) __acquires(&cgroup_mutex) 4383 __releases(&cgroup_mutex) __acquires(&cgroup_mutex)
4376{ 4384{
4377 struct dentry *d = cgrp->dentry; 4385 struct dentry *d = cgrp->dentry;
4378 struct cgroup_subsys *ss; 4386 struct cgroup_subsys_state *css;
4379 struct cgroup *child; 4387 struct cgroup *child;
4380 bool empty; 4388 bool empty;
4389 int ssid;
4381 4390
4382 lockdep_assert_held(&d->d_inode->i_mutex); 4391 lockdep_assert_held(&d->d_inode->i_mutex);
4383 lockdep_assert_held(&cgroup_mutex); 4392 lockdep_assert_held(&cgroup_mutex);
@@ -4413,12 +4422,8 @@ static int cgroup_destroy_locked(struct cgroup *cgrp)
4413 * will be invoked to perform the rest of destruction once the 4422 * will be invoked to perform the rest of destruction once the
4414 * percpu refs of all css's are confirmed to be killed. 4423 * percpu refs of all css's are confirmed to be killed.
4415 */ 4424 */
4416 for_each_root_subsys(cgrp->root, ss) { 4425 for_each_css(css, ssid, cgrp)
4417 struct cgroup_subsys_state *css = cgroup_css(cgrp, ss); 4426 kill_css(css);
4418
4419 if (css)
4420 kill_css(css);
4421 }
4422 4427
4423 /* 4428 /*
4424 * Mark @cgrp dead. This prevents further task migration and child 4429 * Mark @cgrp dead. This prevents further task migration and child