diff options
author | Tejun Heo <tj@kernel.org> | 2011-12-12 21:12:21 -0500 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2011-12-12 21:12:21 -0500 |
commit | 257058ae2b971646b96ab3a15605ac69186e562a (patch) | |
tree | b8a462e64c4bb4b43dcc6c9d05b194f9c747c91d | |
parent | e25e2cbb4c6679bed5f52fb0f2cc381688297901 (diff) |
threadgroup: rename signal->threadgroup_fork_lock to ->group_rwsem
Make the following renames to prepare for extension of threadgroup
locking.
* s/signal->threadgroup_fork_lock/signal->group_rwsem/
* s/threadgroup_fork_read_lock()/threadgroup_change_begin()/
* s/threadgroup_fork_read_unlock()/threadgroup_change_end()/
* s/threadgroup_fork_write_lock()/threadgroup_lock()/
* s/threadgroup_fork_write_unlock()/threadgroup_unlock()/
This patch doesn't cause any behavior change.
-v2: Rename threadgroup_change_done() to threadgroup_change_end() per
KAMEZAWA's suggestion.
Signed-off-by: Tejun Heo <tj@kernel.org>
Reviewed-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Acked-by: Li Zefan <lizf@cn.fujitsu.com>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Paul Menage <paul@paulmenage.org>
-rw-r--r-- | include/linux/init_task.h | 9 | ||||
-rw-r--r-- | include/linux/sched.h | 30 | ||||
-rw-r--r-- | kernel/cgroup.c | 13 | ||||
-rw-r--r-- | kernel/fork.c | 8 |
4 files changed, 29 insertions, 31 deletions
diff --git a/include/linux/init_task.h b/include/linux/init_task.h index 94b1e356c02a..f4544b99efe4 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h | |||
@@ -23,11 +23,10 @@ extern struct files_struct init_files; | |||
23 | extern struct fs_struct init_fs; | 23 | extern struct fs_struct init_fs; |
24 | 24 | ||
25 | #ifdef CONFIG_CGROUPS | 25 | #ifdef CONFIG_CGROUPS |
26 | #define INIT_THREADGROUP_FORK_LOCK(sig) \ | 26 | #define INIT_GROUP_RWSEM(sig) \ |
27 | .threadgroup_fork_lock = \ | 27 | .group_rwsem = __RWSEM_INITIALIZER(sig.group_rwsem), |
28 | __RWSEM_INITIALIZER(sig.threadgroup_fork_lock), | ||
29 | #else | 28 | #else |
30 | #define INIT_THREADGROUP_FORK_LOCK(sig) | 29 | #define INIT_GROUP_RWSEM(sig) |
31 | #endif | 30 | #endif |
32 | 31 | ||
33 | #define INIT_SIGNALS(sig) { \ | 32 | #define INIT_SIGNALS(sig) { \ |
@@ -46,7 +45,7 @@ extern struct fs_struct init_fs; | |||
46 | }, \ | 45 | }, \ |
47 | .cred_guard_mutex = \ | 46 | .cred_guard_mutex = \ |
48 | __MUTEX_INITIALIZER(sig.cred_guard_mutex), \ | 47 | __MUTEX_INITIALIZER(sig.cred_guard_mutex), \ |
49 | INIT_THREADGROUP_FORK_LOCK(sig) \ | 48 | INIT_GROUP_RWSEM(sig) \ |
50 | } | 49 | } |
51 | 50 | ||
52 | extern struct nsproxy init_nsproxy; | 51 | extern struct nsproxy init_nsproxy; |
diff --git a/include/linux/sched.h b/include/linux/sched.h index d81cce933869..8cd523202a3b 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -635,13 +635,13 @@ struct signal_struct { | |||
635 | #endif | 635 | #endif |
636 | #ifdef CONFIG_CGROUPS | 636 | #ifdef CONFIG_CGROUPS |
637 | /* | 637 | /* |
638 | * The threadgroup_fork_lock prevents threads from forking with | 638 | * The group_rwsem prevents threads from forking with |
639 | * CLONE_THREAD while held for writing. Use this for fork-sensitive | 639 | * CLONE_THREAD while held for writing. Use this for fork-sensitive |
640 | * threadgroup-wide operations. It's taken for reading in fork.c in | 640 | * threadgroup-wide operations. It's taken for reading in fork.c in |
641 | * copy_process(). | 641 | * copy_process(). |
642 | * Currently only needed write-side by cgroups. | 642 | * Currently only needed write-side by cgroups. |
643 | */ | 643 | */ |
644 | struct rw_semaphore threadgroup_fork_lock; | 644 | struct rw_semaphore group_rwsem; |
645 | #endif | 645 | #endif |
646 | 646 | ||
647 | int oom_adj; /* OOM kill score adjustment (bit shift) */ | 647 | int oom_adj; /* OOM kill score adjustment (bit shift) */ |
@@ -2371,29 +2371,29 @@ static inline void unlock_task_sighand(struct task_struct *tsk, | |||
2371 | spin_unlock_irqrestore(&tsk->sighand->siglock, *flags); | 2371 | spin_unlock_irqrestore(&tsk->sighand->siglock, *flags); |
2372 | } | 2372 | } |
2373 | 2373 | ||
2374 | /* See the declaration of threadgroup_fork_lock in signal_struct. */ | 2374 | /* See the declaration of group_rwsem in signal_struct. */ |
2375 | #ifdef CONFIG_CGROUPS | 2375 | #ifdef CONFIG_CGROUPS |
2376 | static inline void threadgroup_fork_read_lock(struct task_struct *tsk) | 2376 | static inline void threadgroup_change_begin(struct task_struct *tsk) |
2377 | { | 2377 | { |
2378 | down_read(&tsk->signal->threadgroup_fork_lock); | 2378 | down_read(&tsk->signal->group_rwsem); |
2379 | } | 2379 | } |
2380 | static inline void threadgroup_fork_read_unlock(struct task_struct *tsk) | 2380 | static inline void threadgroup_change_end(struct task_struct *tsk) |
2381 | { | 2381 | { |
2382 | up_read(&tsk->signal->threadgroup_fork_lock); | 2382 | up_read(&tsk->signal->group_rwsem); |
2383 | } | 2383 | } |
2384 | static inline void threadgroup_fork_write_lock(struct task_struct *tsk) | 2384 | static inline void threadgroup_lock(struct task_struct *tsk) |
2385 | { | 2385 | { |
2386 | down_write(&tsk->signal->threadgroup_fork_lock); | 2386 | down_write(&tsk->signal->group_rwsem); |
2387 | } | 2387 | } |
2388 | static inline void threadgroup_fork_write_unlock(struct task_struct *tsk) | 2388 | static inline void threadgroup_unlock(struct task_struct *tsk) |
2389 | { | 2389 | { |
2390 | up_write(&tsk->signal->threadgroup_fork_lock); | 2390 | up_write(&tsk->signal->group_rwsem); |
2391 | } | 2391 | } |
2392 | #else | 2392 | #else |
2393 | static inline void threadgroup_fork_read_lock(struct task_struct *tsk) {} | 2393 | static inline void threadgroup_change_begin(struct task_struct *tsk) {} |
2394 | static inline void threadgroup_fork_read_unlock(struct task_struct *tsk) {} | 2394 | static inline void threadgroup_change_end(struct task_struct *tsk) {} |
2395 | static inline void threadgroup_fork_write_lock(struct task_struct *tsk) {} | 2395 | static inline void threadgroup_lock(struct task_struct *tsk) {} |
2396 | static inline void threadgroup_fork_write_unlock(struct task_struct *tsk) {} | 2396 | static inline void threadgroup_unlock(struct task_struct *tsk) {} |
2397 | #endif | 2397 | #endif |
2398 | 2398 | ||
2399 | #ifndef __HAVE_THREAD_FUNCTIONS | 2399 | #ifndef __HAVE_THREAD_FUNCTIONS |
diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 6545fd61b10d..b409df3b2e9d 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c | |||
@@ -2003,8 +2003,8 @@ static int css_set_prefetch(struct cgroup *cgrp, struct css_set *cg, | |||
2003 | * @cgrp: the cgroup to attach to | 2003 | * @cgrp: the cgroup to attach to |
2004 | * @leader: the threadgroup leader task_struct of the group to be attached | 2004 | * @leader: the threadgroup leader task_struct of the group to be attached |
2005 | * | 2005 | * |
2006 | * Call holding cgroup_mutex and the threadgroup_fork_lock of the leader. Will | 2006 | * Call holding cgroup_mutex and the group_rwsem of the leader. Will take |
2007 | * take task_lock of each thread in leader's threadgroup individually in turn. | 2007 | * task_lock of each thread in leader's threadgroup individually in turn. |
2008 | */ | 2008 | */ |
2009 | int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader) | 2009 | int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader) |
2010 | { | 2010 | { |
@@ -2030,8 +2030,8 @@ int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader) | |||
2030 | * step 0: in order to do expensive, possibly blocking operations for | 2030 | * step 0: in order to do expensive, possibly blocking operations for |
2031 | * every thread, we cannot iterate the thread group list, since it needs | 2031 | * every thread, we cannot iterate the thread group list, since it needs |
2032 | * rcu or tasklist locked. instead, build an array of all threads in the | 2032 | * rcu or tasklist locked. instead, build an array of all threads in the |
2033 | * group - threadgroup_fork_lock prevents new threads from appearing, | 2033 | * group - group_rwsem prevents new threads from appearing, and if |
2034 | * and if threads exit, this will just be an over-estimate. | 2034 | * threads exit, this will just be an over-estimate. |
2035 | */ | 2035 | */ |
2036 | group_size = get_nr_threads(leader); | 2036 | group_size = get_nr_threads(leader); |
2037 | /* flex_array supports very large thread-groups better than kmalloc. */ | 2037 | /* flex_array supports very large thread-groups better than kmalloc. */ |
@@ -2249,7 +2249,6 @@ static int attach_task_by_pid(struct cgroup *cgrp, u64 pid, bool threadgroup) | |||
2249 | cgroup_unlock(); | 2249 | cgroup_unlock(); |
2250 | return -ESRCH; | 2250 | return -ESRCH; |
2251 | } | 2251 | } |
2252 | |||
2253 | /* | 2252 | /* |
2254 | * even if we're attaching all tasks in the thread group, we | 2253 | * even if we're attaching all tasks in the thread group, we |
2255 | * only need to check permissions on one of them. | 2254 | * only need to check permissions on one of them. |
@@ -2273,9 +2272,9 @@ static int attach_task_by_pid(struct cgroup *cgrp, u64 pid, bool threadgroup) | |||
2273 | } | 2272 | } |
2274 | 2273 | ||
2275 | if (threadgroup) { | 2274 | if (threadgroup) { |
2276 | threadgroup_fork_write_lock(tsk); | 2275 | threadgroup_lock(tsk); |
2277 | ret = cgroup_attach_proc(cgrp, tsk); | 2276 | ret = cgroup_attach_proc(cgrp, tsk); |
2278 | threadgroup_fork_write_unlock(tsk); | 2277 | threadgroup_unlock(tsk); |
2279 | } else { | 2278 | } else { |
2280 | ret = cgroup_attach_task(cgrp, tsk); | 2279 | ret = cgroup_attach_task(cgrp, tsk); |
2281 | } | 2280 | } |
diff --git a/kernel/fork.c b/kernel/fork.c index 827808613847..d4ac9e3e0075 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
@@ -972,7 +972,7 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk) | |||
972 | sched_autogroup_fork(sig); | 972 | sched_autogroup_fork(sig); |
973 | 973 | ||
974 | #ifdef CONFIG_CGROUPS | 974 | #ifdef CONFIG_CGROUPS |
975 | init_rwsem(&sig->threadgroup_fork_lock); | 975 | init_rwsem(&sig->group_rwsem); |
976 | #endif | 976 | #endif |
977 | 977 | ||
978 | sig->oom_adj = current->signal->oom_adj; | 978 | sig->oom_adj = current->signal->oom_adj; |
@@ -1157,7 +1157,7 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
1157 | p->io_context = NULL; | 1157 | p->io_context = NULL; |
1158 | p->audit_context = NULL; | 1158 | p->audit_context = NULL; |
1159 | if (clone_flags & CLONE_THREAD) | 1159 | if (clone_flags & CLONE_THREAD) |
1160 | threadgroup_fork_read_lock(current); | 1160 | threadgroup_change_begin(current); |
1161 | cgroup_fork(p); | 1161 | cgroup_fork(p); |
1162 | #ifdef CONFIG_NUMA | 1162 | #ifdef CONFIG_NUMA |
1163 | p->mempolicy = mpol_dup(p->mempolicy); | 1163 | p->mempolicy = mpol_dup(p->mempolicy); |
@@ -1372,7 +1372,7 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
1372 | proc_fork_connector(p); | 1372 | proc_fork_connector(p); |
1373 | cgroup_post_fork(p); | 1373 | cgroup_post_fork(p); |
1374 | if (clone_flags & CLONE_THREAD) | 1374 | if (clone_flags & CLONE_THREAD) |
1375 | threadgroup_fork_read_unlock(current); | 1375 | threadgroup_change_end(current); |
1376 | perf_event_fork(p); | 1376 | perf_event_fork(p); |
1377 | return p; | 1377 | return p; |
1378 | 1378 | ||
@@ -1407,7 +1407,7 @@ bad_fork_cleanup_policy: | |||
1407 | bad_fork_cleanup_cgroup: | 1407 | bad_fork_cleanup_cgroup: |
1408 | #endif | 1408 | #endif |
1409 | if (clone_flags & CLONE_THREAD) | 1409 | if (clone_flags & CLONE_THREAD) |
1410 | threadgroup_fork_read_unlock(current); | 1410 | threadgroup_change_end(current); |
1411 | cgroup_exit(p, cgroup_callbacks_done); | 1411 | cgroup_exit(p, cgroup_callbacks_done); |
1412 | delayacct_tsk_free(p); | 1412 | delayacct_tsk_free(p); |
1413 | module_put(task_thread_info(p)->exec_domain->module); | 1413 | module_put(task_thread_info(p)->exec_domain->module); |