diff options
| author | Tejun Heo <tj@kernel.org> | 2011-12-12 21:12:21 -0500 |
|---|---|---|
| committer | Tejun Heo <tj@kernel.org> | 2011-12-12 21:12:21 -0500 |
| commit | 257058ae2b971646b96ab3a15605ac69186e562a (patch) | |
| tree | b8a462e64c4bb4b43dcc6c9d05b194f9c747c91d /kernel | |
| parent | e25e2cbb4c6679bed5f52fb0f2cc381688297901 (diff) | |
threadgroup: rename signal->threadgroup_fork_lock to ->group_rwsem
Make the following renames to prepare for extension of threadgroup
locking.
* s/signal->threadgroup_fork_lock/signal->group_rwsem/
* s/threadgroup_fork_read_lock()/threadgroup_change_begin()/
* s/threadgroup_fork_read_unlock()/threadgroup_change_end()/
* s/threadgroup_fork_write_lock()/threadgroup_lock()/
* s/threadgroup_fork_write_unlock()/threadgroup_unlock()/
This patch doesn't cause any behavior change.
-v2: Rename threadgroup_change_done() to threadgroup_change_end() per
KAMEZAWA's suggestion.
Signed-off-by: Tejun Heo <tj@kernel.org>
Reviewed-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Acked-by: Li Zefan <lizf@cn.fujitsu.com>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Paul Menage <paul@paulmenage.org>
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/cgroup.c | 13 | ||||
| -rw-r--r-- | kernel/fork.c | 8 |
2 files changed, 10 insertions, 11 deletions
diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 6545fd61b10d..b409df3b2e9d 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c | |||
| @@ -2003,8 +2003,8 @@ static int css_set_prefetch(struct cgroup *cgrp, struct css_set *cg, | |||
| 2003 | * @cgrp: the cgroup to attach to | 2003 | * @cgrp: the cgroup to attach to |
| 2004 | * @leader: the threadgroup leader task_struct of the group to be attached | 2004 | * @leader: the threadgroup leader task_struct of the group to be attached |
| 2005 | * | 2005 | * |
| 2006 | * Call holding cgroup_mutex and the threadgroup_fork_lock of the leader. Will | 2006 | * Call holding cgroup_mutex and the group_rwsem of the leader. Will take |
| 2007 | * take task_lock of each thread in leader's threadgroup individually in turn. | 2007 | * task_lock of each thread in leader's threadgroup individually in turn. |
| 2008 | */ | 2008 | */ |
| 2009 | int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader) | 2009 | int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader) |
| 2010 | { | 2010 | { |
| @@ -2030,8 +2030,8 @@ int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader) | |||
| 2030 | * step 0: in order to do expensive, possibly blocking operations for | 2030 | * step 0: in order to do expensive, possibly blocking operations for |
| 2031 | * every thread, we cannot iterate the thread group list, since it needs | 2031 | * every thread, we cannot iterate the thread group list, since it needs |
| 2032 | * rcu or tasklist locked. instead, build an array of all threads in the | 2032 | * rcu or tasklist locked. instead, build an array of all threads in the |
| 2033 | * group - threadgroup_fork_lock prevents new threads from appearing, | 2033 | * group - group_rwsem prevents new threads from appearing, and if |
| 2034 | * and if threads exit, this will just be an over-estimate. | 2034 | * threads exit, this will just be an over-estimate. |
| 2035 | */ | 2035 | */ |
| 2036 | group_size = get_nr_threads(leader); | 2036 | group_size = get_nr_threads(leader); |
| 2037 | /* flex_array supports very large thread-groups better than kmalloc. */ | 2037 | /* flex_array supports very large thread-groups better than kmalloc. */ |
| @@ -2249,7 +2249,6 @@ static int attach_task_by_pid(struct cgroup *cgrp, u64 pid, bool threadgroup) | |||
| 2249 | cgroup_unlock(); | 2249 | cgroup_unlock(); |
| 2250 | return -ESRCH; | 2250 | return -ESRCH; |
| 2251 | } | 2251 | } |
| 2252 | |||
| 2253 | /* | 2252 | /* |
| 2254 | * even if we're attaching all tasks in the thread group, we | 2253 | * even if we're attaching all tasks in the thread group, we |
| 2255 | * only need to check permissions on one of them. | 2254 | * only need to check permissions on one of them. |
| @@ -2273,9 +2272,9 @@ static int attach_task_by_pid(struct cgroup *cgrp, u64 pid, bool threadgroup) | |||
| 2273 | } | 2272 | } |
| 2274 | 2273 | ||
| 2275 | if (threadgroup) { | 2274 | if (threadgroup) { |
| 2276 | threadgroup_fork_write_lock(tsk); | 2275 | threadgroup_lock(tsk); |
| 2277 | ret = cgroup_attach_proc(cgrp, tsk); | 2276 | ret = cgroup_attach_proc(cgrp, tsk); |
| 2278 | threadgroup_fork_write_unlock(tsk); | 2277 | threadgroup_unlock(tsk); |
| 2279 | } else { | 2278 | } else { |
| 2280 | ret = cgroup_attach_task(cgrp, tsk); | 2279 | ret = cgroup_attach_task(cgrp, tsk); |
| 2281 | } | 2280 | } |
diff --git a/kernel/fork.c b/kernel/fork.c index 827808613847..d4ac9e3e0075 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
| @@ -972,7 +972,7 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk) | |||
| 972 | sched_autogroup_fork(sig); | 972 | sched_autogroup_fork(sig); |
| 973 | 973 | ||
| 974 | #ifdef CONFIG_CGROUPS | 974 | #ifdef CONFIG_CGROUPS |
| 975 | init_rwsem(&sig->threadgroup_fork_lock); | 975 | init_rwsem(&sig->group_rwsem); |
| 976 | #endif | 976 | #endif |
| 977 | 977 | ||
| 978 | sig->oom_adj = current->signal->oom_adj; | 978 | sig->oom_adj = current->signal->oom_adj; |
| @@ -1157,7 +1157,7 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
| 1157 | p->io_context = NULL; | 1157 | p->io_context = NULL; |
| 1158 | p->audit_context = NULL; | 1158 | p->audit_context = NULL; |
| 1159 | if (clone_flags & CLONE_THREAD) | 1159 | if (clone_flags & CLONE_THREAD) |
| 1160 | threadgroup_fork_read_lock(current); | 1160 | threadgroup_change_begin(current); |
| 1161 | cgroup_fork(p); | 1161 | cgroup_fork(p); |
| 1162 | #ifdef CONFIG_NUMA | 1162 | #ifdef CONFIG_NUMA |
| 1163 | p->mempolicy = mpol_dup(p->mempolicy); | 1163 | p->mempolicy = mpol_dup(p->mempolicy); |
| @@ -1372,7 +1372,7 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
| 1372 | proc_fork_connector(p); | 1372 | proc_fork_connector(p); |
| 1373 | cgroup_post_fork(p); | 1373 | cgroup_post_fork(p); |
| 1374 | if (clone_flags & CLONE_THREAD) | 1374 | if (clone_flags & CLONE_THREAD) |
| 1375 | threadgroup_fork_read_unlock(current); | 1375 | threadgroup_change_end(current); |
| 1376 | perf_event_fork(p); | 1376 | perf_event_fork(p); |
| 1377 | return p; | 1377 | return p; |
| 1378 | 1378 | ||
| @@ -1407,7 +1407,7 @@ bad_fork_cleanup_policy: | |||
| 1407 | bad_fork_cleanup_cgroup: | 1407 | bad_fork_cleanup_cgroup: |
| 1408 | #endif | 1408 | #endif |
| 1409 | if (clone_flags & CLONE_THREAD) | 1409 | if (clone_flags & CLONE_THREAD) |
| 1410 | threadgroup_fork_read_unlock(current); | 1410 | threadgroup_change_end(current); |
| 1411 | cgroup_exit(p, cgroup_callbacks_done); | 1411 | cgroup_exit(p, cgroup_callbacks_done); |
| 1412 | delayacct_tsk_free(p); | 1412 | delayacct_tsk_free(p); |
| 1413 | module_put(task_thread_info(p)->exec_domain->module); | 1413 | module_put(task_thread_info(p)->exec_domain->module); |
