aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/cgroup.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2013-06-13 00:04:54 -0400
committerTejun Heo <tj@kernel.org>2013-06-13 13:55:18 -0400
commitddd69148bdc45e5e3e55bfde3571daecd5a96d75 (patch)
tree0d248de63b78acf91668a96a1f6cc77f49f2946c /kernel/cgroup.c
parent54766d4a1d3d6f84ff8fa475cd8f165c0a0000eb (diff)
cgroup: drop unnecessary RCU dancing from __put_css_set()
__put_css_set() does RCU read access on @cgrp across dropping @cgrp->count so that it can continue accessing @cgrp even if the count reached zero and destruction of the cgroup commenced. Given that both sides - __css_put() and cgroup_destroy_locked() - are cold paths, this is unnecessary. Just making cgroup_destroy_locked() grab css_set_lock while checking @cgrp->count is enough. Remove the RCU read locking from __put_css_set() and make cgroup_destroy_locked() read-lock css_set_lock when checking @cgrp->count. This will also allow removing @cgrp->count. Signed-off-by: Tejun Heo <tj@kernel.org> Acked-by: Li Zefan <lizefan@huawei.com>
Diffstat (limited to 'kernel/cgroup.c')
-rw-r--r--kernel/cgroup.c20
1 files changed, 10 insertions, 10 deletions
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 84efb344fdf6..1a68241ca835 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -407,19 +407,13 @@ static void __put_css_set(struct css_set *cset, int taskexit)
407 list_del(&link->cset_link); 407 list_del(&link->cset_link);
408 list_del(&link->cgrp_link); 408 list_del(&link->cgrp_link);
409 409
410 /* 410 /* @cgrp can't go away while we're holding css_set_lock */
411 * We may not be holding cgroup_mutex, and if cgrp->count is
412 * dropped to 0 the cgroup can be destroyed at any time, hence
413 * rcu_read_lock is used to keep it alive.
414 */
415 rcu_read_lock();
416 if (atomic_dec_and_test(&cgrp->count) && 411 if (atomic_dec_and_test(&cgrp->count) &&
417 notify_on_release(cgrp)) { 412 notify_on_release(cgrp)) {
418 if (taskexit) 413 if (taskexit)
419 set_bit(CGRP_RELEASABLE, &cgrp->flags); 414 set_bit(CGRP_RELEASABLE, &cgrp->flags);
420 check_for_release(cgrp); 415 check_for_release(cgrp);
421 } 416 }
422 rcu_read_unlock();
423 417
424 kfree(link); 418 kfree(link);
425 } 419 }
@@ -4370,11 +4364,19 @@ static int cgroup_destroy_locked(struct cgroup *cgrp)
4370 struct cgroup *parent = cgrp->parent; 4364 struct cgroup *parent = cgrp->parent;
4371 struct cgroup_event *event, *tmp; 4365 struct cgroup_event *event, *tmp;
4372 struct cgroup_subsys *ss; 4366 struct cgroup_subsys *ss;
4367 bool empty;
4373 4368
4374 lockdep_assert_held(&d->d_inode->i_mutex); 4369 lockdep_assert_held(&d->d_inode->i_mutex);
4375 lockdep_assert_held(&cgroup_mutex); 4370 lockdep_assert_held(&cgroup_mutex);
4376 4371
4377 if (atomic_read(&cgrp->count) || !list_empty(&cgrp->children)) 4372 /*
4373 * css_set_lock prevents @cgrp from being removed while
4374 * __put_css_set() is in progress.
4375 */
4376 read_lock(&css_set_lock);
4377 empty = !atomic_read(&cgrp->count) && list_empty(&cgrp->children);
4378 read_unlock(&css_set_lock);
4379 if (!empty)
4378 return -EBUSY; 4380 return -EBUSY;
4379 4381
4380 /* 4382 /*
@@ -5051,8 +5053,6 @@ void cgroup_exit(struct task_struct *tsk, int run_callbacks)
5051 5053
5052static void check_for_release(struct cgroup *cgrp) 5054static void check_for_release(struct cgroup *cgrp)
5053{ 5055{
5054 /* All of these checks rely on RCU to keep the cgroup
5055 * structure alive */
5056 if (cgroup_is_releasable(cgrp) && 5056 if (cgroup_is_releasable(cgrp) &&
5057 !atomic_read(&cgrp->count) && list_empty(&cgrp->children)) { 5057 !atomic_read(&cgrp->count) && list_empty(&cgrp->children)) {
5058 /* 5058 /*