aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/cgroup.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2014-05-13 12:19:22 -0400
committerTejun Heo <tj@kernel.org>2014-05-13 12:19:22 -0400
commita9746d8da786bc79b3b4ae1baa0fbbc4b795c1b7 (patch)
tree4bc006ff8f7df66d8df51f51cd0e285797712ae9 /kernel/cgroup.c
parentcfc79d5bec04cdf26cd207d3e73d8bd59fd780a8 (diff)
cgroup: factor out cgroup_kn_lock_live() and cgroup_kn_unlock()
cgroup_mkdir(), cgroup_rmdir() and cgroup_subtree_control_write() share the logic to break active protection so that they can grab cgroup_tree_mutex which nests above active protection and/or remove self. Factor out this logic into cgroup_kn_lock_live() and cgroup_kn_unlock(). This patch doesn't introduce any functional changes. Signed-off-by: Tejun Heo <tj@kernel.org> Acked-by: Li Zefan <lizefan@huawei.com>
Diffstat (limited to 'kernel/cgroup.c')
-rw-r--r--kernel/cgroup.c157
1 files changed, 90 insertions, 67 deletions
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index b49e63d5386b..21739e481006 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -1093,6 +1093,75 @@ static void cgroup_put(struct cgroup *cgrp)
1093 call_rcu(&cgrp->rcu_head, cgroup_free_rcu); 1093 call_rcu(&cgrp->rcu_head, cgroup_free_rcu);
1094} 1094}
1095 1095
1096/**
1097 * cgroup_kn_unlock - unlocking helper for cgroup kernfs methods
1098 * @kn: the kernfs_node being serviced
1099 *
1100 * This helper undoes cgroup_kn_lock_live() and should be invoked before
1101 * the method finishes if locking succeeded. Note that once this function
1102 * returns the cgroup returned by cgroup_kn_lock_live() may become
1103 * inaccessible any time. If the caller intends to continue to access the
1104 * cgroup, it should pin it before invoking this function.
1105 */
1106static void cgroup_kn_unlock(struct kernfs_node *kn)
1107{
1108 struct cgroup *cgrp;
1109
1110 if (kernfs_type(kn) == KERNFS_DIR)
1111 cgrp = kn->priv;
1112 else
1113 cgrp = kn->parent->priv;
1114
1115 mutex_unlock(&cgroup_mutex);
1116 mutex_unlock(&cgroup_tree_mutex);
1117
1118 kernfs_unbreak_active_protection(kn);
1119 cgroup_put(cgrp);
1120}
1121
1122/**
1123 * cgroup_kn_lock_live - locking helper for cgroup kernfs methods
1124 * @kn: the kernfs_node being serviced
1125 *
1126 * This helper is to be used by a cgroup kernfs method currently servicing
1127 * @kn. It breaks the active protection, performs cgroup locking and
1128 * verifies that the associated cgroup is alive. Returns the cgroup if
1129 * alive; otherwise, %NULL. A successful return should be undone by a
1130 * matching cgroup_kn_unlock() invocation.
1131 *
1132 * Any cgroup kernfs method implementation which requires locking the
1133 * associated cgroup should use this helper. It avoids nesting cgroup
1134 * locking under kernfs active protection and allows all kernfs operations
1135 * including self-removal.
1136 */
1137static struct cgroup *cgroup_kn_lock_live(struct kernfs_node *kn)
1138{
1139 struct cgroup *cgrp;
1140
1141 if (kernfs_type(kn) == KERNFS_DIR)
1142 cgrp = kn->priv;
1143 else
1144 cgrp = kn->parent->priv;
1145
1146 /*
1147 * We're gonna grab cgroup_tree_mutex which nests outside kernfs
1148 * active_ref. cgroup liveliness check alone provides enough
1149 * protection against removal. Ensure @cgrp stays accessible and
1150 * break the active_ref protection.
1151 */
1152 cgroup_get(cgrp);
1153 kernfs_break_active_protection(kn);
1154
1155 mutex_lock(&cgroup_tree_mutex);
1156 mutex_lock(&cgroup_mutex);
1157
1158 if (!cgroup_is_dead(cgrp))
1159 return cgrp;
1160
1161 cgroup_kn_unlock(kn);
1162 return NULL;
1163}
1164
1096static void cgroup_rm_file(struct cgroup *cgrp, const struct cftype *cft) 1165static void cgroup_rm_file(struct cgroup *cgrp, const struct cftype *cft)
1097{ 1166{
1098 char name[CGROUP_FILE_NAME_MAX]; 1167 char name[CGROUP_FILE_NAME_MAX];
@@ -2541,7 +2610,7 @@ static ssize_t cgroup_subtree_control_write(struct kernfs_open_file *of,
2541 loff_t off) 2610 loff_t off)
2542{ 2611{
2543 unsigned int enable = 0, disable = 0; 2612 unsigned int enable = 0, disable = 0;
2544 struct cgroup *cgrp = of_css(of)->cgroup, *child; 2613 struct cgroup *cgrp, *child;
2545 struct cgroup_subsys *ss; 2614 struct cgroup_subsys *ss;
2546 char *tok; 2615 char *tok;
2547 int ssid, ret; 2616 int ssid, ret;
@@ -2573,20 +2642,9 @@ static ssize_t cgroup_subtree_control_write(struct kernfs_open_file *of,
2573 return -EINVAL; 2642 return -EINVAL;
2574 } 2643 }
2575 2644
2576 /* 2645 cgrp = cgroup_kn_lock_live(of->kn);
2577 * We're gonna grab cgroup_tree_mutex which nests outside kernfs 2646 if (!cgrp)
2578 * active_ref. cgroup_lock_live_group() already provides enough 2647 return -ENODEV;
2579 * protection. Ensure @cgrp stays accessible and break the
2580 * active_ref protection.
2581 */
2582 cgroup_get(cgrp);
2583 kernfs_break_active_protection(of->kn);
2584
2585 mutex_lock(&cgroup_tree_mutex);
2586 if (!cgroup_lock_live_group(cgrp)) {
2587 ret = -ENODEV;
2588 goto out_unlock_tree;
2589 }
2590 2648
2591 for_each_subsys(ss, ssid) { 2649 for_each_subsys(ss, ssid) {
2592 if (enable & (1 << ssid)) { 2650 if (enable & (1 << ssid)) {
@@ -2610,14 +2668,12 @@ static ssize_t cgroup_subtree_control_write(struct kernfs_open_file *of,
2610 cgroup_get(child); 2668 cgroup_get(child);
2611 prepare_to_wait(&child->offline_waitq, &wait, 2669 prepare_to_wait(&child->offline_waitq, &wait,
2612 TASK_UNINTERRUPTIBLE); 2670 TASK_UNINTERRUPTIBLE);
2613 mutex_unlock(&cgroup_mutex); 2671 cgroup_kn_unlock(of->kn);
2614 mutex_unlock(&cgroup_tree_mutex);
2615 schedule(); 2672 schedule();
2616 finish_wait(&child->offline_waitq, &wait); 2673 finish_wait(&child->offline_waitq, &wait);
2617 cgroup_put(child); 2674 cgroup_put(child);
2618 2675
2619 ret = restart_syscall(); 2676 return restart_syscall();
2620 goto out_unbreak;
2621 } 2677 }
2622 2678
2623 /* unavailable or not enabled on the parent? */ 2679 /* unavailable or not enabled on the parent? */
@@ -2693,12 +2749,7 @@ static ssize_t cgroup_subtree_control_write(struct kernfs_open_file *of,
2693 kernfs_activate(cgrp->kn); 2749 kernfs_activate(cgrp->kn);
2694 ret = 0; 2750 ret = 0;
2695out_unlock: 2751out_unlock:
2696 mutex_unlock(&cgroup_mutex); 2752 cgroup_kn_unlock(of->kn);
2697out_unlock_tree:
2698 mutex_unlock(&cgroup_tree_mutex);
2699out_unbreak:
2700 kernfs_unbreak_active_protection(of->kn);
2701 cgroup_put(cgrp);
2702 return ret ?: nbytes; 2753 return ret ?: nbytes;
2703 2754
2704err_undo_css: 2755err_undo_css:
@@ -4238,25 +4289,16 @@ err_free_css:
4238static int cgroup_mkdir(struct kernfs_node *parent_kn, const char *name, 4289static int cgroup_mkdir(struct kernfs_node *parent_kn, const char *name,
4239 umode_t mode) 4290 umode_t mode)
4240{ 4291{
4241 struct cgroup *parent = parent_kn->priv, *cgrp; 4292 struct cgroup *parent, *cgrp;
4242 struct cgroup_root *root = parent->root; 4293 struct cgroup_root *root;
4243 struct cgroup_subsys *ss; 4294 struct cgroup_subsys *ss;
4244 struct kernfs_node *kn; 4295 struct kernfs_node *kn;
4245 int ssid, ret; 4296 int ssid, ret;
4246 4297
4247 /* 4298 parent = cgroup_kn_lock_live(parent_kn);
4248 * cgroup_mkdir() grabs cgroup_tree_mutex which nests outside 4299 if (!parent)
4249 * kernfs active_ref and cgroup_create() already synchronizes 4300 return -ENODEV;
4250 * properly against removal through cgroup_lock_live_group(). 4301 root = parent->root;
4251 * Break it before calling cgroup_create().
4252 */
4253 cgroup_get(parent);
4254 kernfs_break_active_protection(parent_kn);
4255 mutex_lock(&cgroup_tree_mutex);
4256 if (!cgroup_lock_live_group(parent)) {
4257 ret = -ENODEV;
4258 goto out_unlock_tree;
4259 }
4260 4302
4261 /* allocate the cgroup and its ID, 0 is reserved for the root */ 4303 /* allocate the cgroup and its ID, 0 is reserved for the root */
4262 cgrp = kzalloc(sizeof(*cgrp), GFP_KERNEL); 4304 cgrp = kzalloc(sizeof(*cgrp), GFP_KERNEL);
@@ -4348,11 +4390,7 @@ out_free_id:
4348out_free_cgrp: 4390out_free_cgrp:
4349 kfree(cgrp); 4391 kfree(cgrp);
4350out_unlock: 4392out_unlock:
4351 mutex_unlock(&cgroup_mutex); 4393 cgroup_kn_unlock(parent_kn);
4352out_unlock_tree:
4353 mutex_unlock(&cgroup_tree_mutex);
4354 kernfs_unbreak_active_protection(parent_kn);
4355 cgroup_put(parent);
4356 return ret; 4394 return ret;
4357 4395
4358out_destroy: 4396out_destroy:
@@ -4579,32 +4617,17 @@ static void cgroup_destroy_css_killed(struct cgroup *cgrp)
4579 4617
4580static int cgroup_rmdir(struct kernfs_node *kn) 4618static int cgroup_rmdir(struct kernfs_node *kn)
4581{ 4619{
4582 struct cgroup *cgrp = kn->priv; 4620 struct cgroup *cgrp;
4583 int ret = 0; 4621 int ret = 0;
4584 4622
4585 /* 4623 cgrp = cgroup_kn_lock_live(kn);
4586 * This is self-destruction but @kn can't be removed while this 4624 if (!cgrp)
4587 * callback is in progress. Let's break active protection. Once 4625 return 0;
4588 * the protection is broken, @cgrp can be destroyed at any point. 4626 cgroup_get(cgrp); /* for @kn->priv clearing */
4589 * Pin it so that it stays accessible.
4590 */
4591 cgroup_get(cgrp);
4592 kernfs_break_active_protection(kn);
4593
4594 mutex_lock(&cgroup_tree_mutex);
4595 mutex_lock(&cgroup_mutex);
4596
4597 /*
4598 * @cgrp might already have been destroyed while we're trying to
4599 * grab the mutexes.
4600 */
4601 if (!cgroup_is_dead(cgrp))
4602 ret = cgroup_destroy_locked(cgrp);
4603 4627
4604 mutex_unlock(&cgroup_mutex); 4628 ret = cgroup_destroy_locked(cgrp);
4605 mutex_unlock(&cgroup_tree_mutex);
4606 4629
4607 kernfs_unbreak_active_protection(kn); 4630 cgroup_kn_unlock(kn);
4608 4631
4609 /* 4632 /*
4610 * There are two control paths which try to determine cgroup from 4633 * There are two control paths which try to determine cgroup from