aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2013-06-13 00:04:53 -0400
committerTejun Heo <tj@kernel.org>2013-06-13 13:55:18 -0400
commit54766d4a1d3d6f84ff8fa475cd8f165c0a0000eb (patch)
tree8a897b12cfe0f2e373eceffd3eddb095efd23844 /kernel
parent5de0107e634ce862f16360139709d9d3a656463e (diff)
cgroup: rename CGRP_REMOVED to CGRP_DEAD
We will add another flag indicating that the cgroup is in the process of being killed. REMOVING / REMOVED is more difficult to distinguish and cgroup_is_removing()/cgroup_is_removed() are a bit awkward. Also, later percpu_ref usage will involve "kill"ing the refcnt. s/CGRP_REMOVED/CGRP_DEAD/ s/cgroup_is_removed()/cgroup_is_dead() This patch is purely cosmetic. Signed-off-by: Tejun Heo <tj@kernel.org> Acked-by: Li Zefan <lizefan@huawei.com>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/cgroup.c30
1 files changed, 14 insertions, 16 deletions
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index d86a8477d56a..84efb344fdf6 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -226,9 +226,9 @@ static int css_refcnt(struct cgroup_subsys_state *css)
226} 226}
227 227
228/* convenient tests for these bits */ 228/* convenient tests for these bits */
229static inline bool cgroup_is_removed(const struct cgroup *cgrp) 229static inline bool cgroup_is_dead(const struct cgroup *cgrp)
230{ 230{
231 return test_bit(CGRP_REMOVED, &cgrp->flags); 231 return test_bit(CGRP_DEAD, &cgrp->flags);
232} 232}
233 233
234/** 234/**
@@ -300,7 +300,7 @@ static inline struct cftype *__d_cft(struct dentry *dentry)
300static bool cgroup_lock_live_group(struct cgroup *cgrp) 300static bool cgroup_lock_live_group(struct cgroup *cgrp)
301{ 301{
302 mutex_lock(&cgroup_mutex); 302 mutex_lock(&cgroup_mutex);
303 if (cgroup_is_removed(cgrp)) { 303 if (cgroup_is_dead(cgrp)) {
304 mutex_unlock(&cgroup_mutex); 304 mutex_unlock(&cgroup_mutex);
305 return false; 305 return false;
306 } 306 }
@@ -892,7 +892,7 @@ static void cgroup_diput(struct dentry *dentry, struct inode *inode)
892 if (S_ISDIR(inode->i_mode)) { 892 if (S_ISDIR(inode->i_mode)) {
893 struct cgroup *cgrp = dentry->d_fsdata; 893 struct cgroup *cgrp = dentry->d_fsdata;
894 894
895 BUG_ON(!(cgroup_is_removed(cgrp))); 895 BUG_ON(!(cgroup_is_dead(cgrp)));
896 call_rcu(&cgrp->rcu_head, cgroup_free_rcu); 896 call_rcu(&cgrp->rcu_head, cgroup_free_rcu);
897 } else { 897 } else {
898 struct cfent *cfe = __d_cfe(dentry); 898 struct cfent *cfe = __d_cfe(dentry);
@@ -2363,7 +2363,7 @@ static ssize_t cgroup_file_write(struct file *file, const char __user *buf,
2363 struct cftype *cft = __d_cft(file->f_dentry); 2363 struct cftype *cft = __d_cft(file->f_dentry);
2364 struct cgroup *cgrp = __d_cgrp(file->f_dentry->d_parent); 2364 struct cgroup *cgrp = __d_cgrp(file->f_dentry->d_parent);
2365 2365
2366 if (cgroup_is_removed(cgrp)) 2366 if (cgroup_is_dead(cgrp))
2367 return -ENODEV; 2367 return -ENODEV;
2368 if (cft->write) 2368 if (cft->write)
2369 return cft->write(cgrp, cft, file, buf, nbytes, ppos); 2369 return cft->write(cgrp, cft, file, buf, nbytes, ppos);
@@ -2408,7 +2408,7 @@ static ssize_t cgroup_file_read(struct file *file, char __user *buf,
2408 struct cftype *cft = __d_cft(file->f_dentry); 2408 struct cftype *cft = __d_cft(file->f_dentry);
2409 struct cgroup *cgrp = __d_cgrp(file->f_dentry->d_parent); 2409 struct cgroup *cgrp = __d_cgrp(file->f_dentry->d_parent);
2410 2410
2411 if (cgroup_is_removed(cgrp)) 2411 if (cgroup_is_dead(cgrp))
2412 return -ENODEV; 2412 return -ENODEV;
2413 2413
2414 if (cft->read) 2414 if (cft->read)
@@ -2831,7 +2831,7 @@ static void cgroup_cfts_commit(struct cgroup_subsys *ss,
2831 2831
2832 mutex_lock(&inode->i_mutex); 2832 mutex_lock(&inode->i_mutex);
2833 mutex_lock(&cgroup_mutex); 2833 mutex_lock(&cgroup_mutex);
2834 if (!cgroup_is_removed(cgrp)) 2834 if (!cgroup_is_dead(cgrp))
2835 cgroup_addrm_files(cgrp, ss, cfts, is_add); 2835 cgroup_addrm_files(cgrp, ss, cfts, is_add);
2836 mutex_unlock(&cgroup_mutex); 2836 mutex_unlock(&cgroup_mutex);
2837 mutex_unlock(&inode->i_mutex); 2837 mutex_unlock(&inode->i_mutex);
@@ -2999,14 +2999,14 @@ struct cgroup *cgroup_next_sibling(struct cgroup *pos)
2999 /* 2999 /*
3000 * @pos could already have been removed. Once a cgroup is removed, 3000 * @pos could already have been removed. Once a cgroup is removed,
3001 * its ->sibling.next is no longer updated when its next sibling 3001 * its ->sibling.next is no longer updated when its next sibling
3002 * changes. As CGRP_REMOVED is set on removal which is fully 3002 * changes. As CGRP_DEAD is set on removal which is fully
3003 * serialized, if we see it unasserted, it's guaranteed that the 3003 * serialized, if we see it unasserted, it's guaranteed that the
3004 * next sibling hasn't finished its grace period even if it's 3004 * next sibling hasn't finished its grace period even if it's
3005 * already removed, and thus safe to dereference from this RCU 3005 * already removed, and thus safe to dereference from this RCU
3006 * critical section. If ->sibling.next is inaccessible, 3006 * critical section. If ->sibling.next is inaccessible,
3007 * cgroup_is_removed() is guaranteed to be visible as %true here. 3007 * cgroup_is_dead() is guaranteed to be visible as %true here.
3008 */ 3008 */
3009 if (likely(!cgroup_is_removed(pos))) { 3009 if (likely(!cgroup_is_dead(pos))) {
3010 next = list_entry_rcu(pos->sibling.next, struct cgroup, sibling); 3010 next = list_entry_rcu(pos->sibling.next, struct cgroup, sibling);
3011 if (&next->sibling != &pos->parent->children) 3011 if (&next->sibling != &pos->parent->children)
3012 return next; 3012 return next;
@@ -4383,7 +4383,7 @@ static int cgroup_destroy_locked(struct cgroup *cgrp)
4383 * attempts fail thus maintaining the removal conditions verified 4383 * attempts fail thus maintaining the removal conditions verified
4384 * above. 4384 * above.
4385 * 4385 *
4386 * Note that CGRP_REMVOED clearing is depended upon by 4386 * Note that CGRP_DEAD assertion is depended upon by
4387 * cgroup_next_sibling() to resume iteration after dropping RCU 4387 * cgroup_next_sibling() to resume iteration after dropping RCU
4388 * read lock. See cgroup_next_sibling() for details. 4388 * read lock. See cgroup_next_sibling() for details.
4389 */ 4389 */
@@ -4393,7 +4393,7 @@ static int cgroup_destroy_locked(struct cgroup *cgrp)
4393 WARN_ON(atomic_read(&css->refcnt) < 0); 4393 WARN_ON(atomic_read(&css->refcnt) < 0);
4394 atomic_add(CSS_DEACT_BIAS, &css->refcnt); 4394 atomic_add(CSS_DEACT_BIAS, &css->refcnt);
4395 } 4395 }
4396 set_bit(CGRP_REMOVED, &cgrp->flags); 4396 set_bit(CGRP_DEAD, &cgrp->flags);
4397 4397
4398 /* tell subsystems to initate destruction */ 4398 /* tell subsystems to initate destruction */
4399 for_each_subsys(cgrp->root, ss) 4399 for_each_subsys(cgrp->root, ss)
@@ -5063,7 +5063,7 @@ static void check_for_release(struct cgroup *cgrp)
5063 int need_schedule_work = 0; 5063 int need_schedule_work = 0;
5064 5064
5065 raw_spin_lock(&release_list_lock); 5065 raw_spin_lock(&release_list_lock);
5066 if (!cgroup_is_removed(cgrp) && 5066 if (!cgroup_is_dead(cgrp) &&
5067 list_empty(&cgrp->release_list)) { 5067 list_empty(&cgrp->release_list)) {
5068 list_add(&cgrp->release_list, &release_list); 5068 list_add(&cgrp->release_list, &release_list);
5069 need_schedule_work = 1; 5069 need_schedule_work = 1;
@@ -5209,9 +5209,7 @@ __setup("cgroup_disable=", cgroup_disable);
5209 * Functons for CSS ID. 5209 * Functons for CSS ID.
5210 */ 5210 */
5211 5211
5212/* 5212/* to get ID other than 0, this should be called when !cgroup_is_dead() */
5213 *To get ID other than 0, this should be called when !cgroup_is_removed().
5214 */
5215unsigned short css_id(struct cgroup_subsys_state *css) 5213unsigned short css_id(struct cgroup_subsys_state *css)
5216{ 5214{
5217 struct css_id *cssid; 5215 struct css_id *cssid;