aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/cgroup.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2011-12-12 21:12:21 -0500
committerTejun Heo <tj@kernel.org>2011-12-12 21:12:21 -0500
commit257058ae2b971646b96ab3a15605ac69186e562a (patch)
treeb8a462e64c4bb4b43dcc6c9d05b194f9c747c91d /kernel/cgroup.c
parente25e2cbb4c6679bed5f52fb0f2cc381688297901 (diff)
threadgroup: rename signal->threadgroup_fork_lock to ->group_rwsem
Make the following renames to prepare for extension of threadgroup locking. * s/signal->threadgroup_fork_lock/signal->group_rwsem/ * s/threadgroup_fork_read_lock()/threadgroup_change_begin()/ * s/threadgroup_fork_read_unlock()/threadgroup_change_end()/ * s/threadgroup_fork_write_lock()/threadgroup_lock()/ * s/threadgroup_fork_write_unlock()/threadgroup_unlock()/ This patch doesn't cause any behavior change. -v2: Rename threadgroup_change_done() to threadgroup_change_end() per KAMEZAWA's suggestion. Signed-off-by: Tejun Heo <tj@kernel.org> Reviewed-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Acked-by: Li Zefan <lizf@cn.fujitsu.com> Cc: Oleg Nesterov <oleg@redhat.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Paul Menage <paul@paulmenage.org>
Diffstat (limited to 'kernel/cgroup.c')
-rw-r--r--kernel/cgroup.c13
1 files changed, 6 insertions, 7 deletions
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 6545fd61b10d..b409df3b2e9d 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -2003,8 +2003,8 @@ static int css_set_prefetch(struct cgroup *cgrp, struct css_set *cg,
2003 * @cgrp: the cgroup to attach to 2003 * @cgrp: the cgroup to attach to
2004 * @leader: the threadgroup leader task_struct of the group to be attached 2004 * @leader: the threadgroup leader task_struct of the group to be attached
2005 * 2005 *
2006 * Call holding cgroup_mutex and the threadgroup_fork_lock of the leader. Will 2006 * Call holding cgroup_mutex and the group_rwsem of the leader. Will take
2007 * take task_lock of each thread in leader's threadgroup individually in turn. 2007 * task_lock of each thread in leader's threadgroup individually in turn.
2008 */ 2008 */
2009int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader) 2009int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
2010{ 2010{
@@ -2030,8 +2030,8 @@ int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
2030 * step 0: in order to do expensive, possibly blocking operations for 2030 * step 0: in order to do expensive, possibly blocking operations for
2031 * every thread, we cannot iterate the thread group list, since it needs 2031 * every thread, we cannot iterate the thread group list, since it needs
2032 * rcu or tasklist locked. instead, build an array of all threads in the 2032 * rcu or tasklist locked. instead, build an array of all threads in the
2033 * group - threadgroup_fork_lock prevents new threads from appearing, 2033 * group - group_rwsem prevents new threads from appearing, and if
2034 * and if threads exit, this will just be an over-estimate. 2034 * threads exit, this will just be an over-estimate.
2035 */ 2035 */
2036 group_size = get_nr_threads(leader); 2036 group_size = get_nr_threads(leader);
2037 /* flex_array supports very large thread-groups better than kmalloc. */ 2037 /* flex_array supports very large thread-groups better than kmalloc. */
@@ -2249,7 +2249,6 @@ static int attach_task_by_pid(struct cgroup *cgrp, u64 pid, bool threadgroup)
2249 cgroup_unlock(); 2249 cgroup_unlock();
2250 return -ESRCH; 2250 return -ESRCH;
2251 } 2251 }
2252
2253 /* 2252 /*
2254 * even if we're attaching all tasks in the thread group, we 2253 * even if we're attaching all tasks in the thread group, we
2255 * only need to check permissions on one of them. 2254 * only need to check permissions on one of them.
@@ -2273,9 +2272,9 @@ static int attach_task_by_pid(struct cgroup *cgrp, u64 pid, bool threadgroup)
2273 } 2272 }
2274 2273
2275 if (threadgroup) { 2274 if (threadgroup) {
2276 threadgroup_fork_write_lock(tsk); 2275 threadgroup_lock(tsk);
2277 ret = cgroup_attach_proc(cgrp, tsk); 2276 ret = cgroup_attach_proc(cgrp, tsk);
2278 threadgroup_fork_write_unlock(tsk); 2277 threadgroup_unlock(tsk);
2279 } else { 2278 } else {
2280 ret = cgroup_attach_task(cgrp, tsk); 2279 ret = cgroup_attach_task(cgrp, tsk);
2281 } 2280 }