aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMandeep Singh Baines <msb@chromium.org>2012-01-04 00:18:31 -0500
committerTejun Heo <tj@kernel.org>2012-01-20 18:58:13 -0500
commitfb5d2b4cfc24963d0e8a7df57de1ecffa10a04cf (patch)
tree4603496bbe19740067195bf0669f7be484dbc950
parentb78949ebfb563c29808a9d0a772e3adb5561bc80 (diff)
cgroup: replace tasklist_lock with rcu_read_lock
We can replace the tasklist_lock in cgroup_attach_proc with an rcu_read_lock(). Changes in V4: * https://lkml.org/lkml/2011/12/23/284 (Frederic Weisbecker) * Minimize size of rcu_read_lock critical section * Add comment * https://lkml.org/lkml/2011/12/26/136 (Li Zefan) * Split into two patches Changes in V3: * https://lkml.org/lkml/2011/12/22/419 (Frederic Weisbecker) * Add an rcu_read_lock to protect against exit Changes in V2: * https://lkml.org/lkml/2011/12/22/86 (Tejun Heo) * Use a goto instead of returning -EAGAIN Suggested-by: Frederic Weisbecker <fweisbec@gmail.com> Signed-off-by: Mandeep Singh Baines <msb@chromium.org> Acked-by: Li Zefan <lizf@cn.fujitsu.com> Acked-by: Frederic Weisbecker <fweisbec@gmail.com> Signed-off-by: Tejun Heo <tj@kernel.org> Cc: containers@lists.linux-foundation.org Cc: cgroups@vger.kernel.org Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Oleg Nesterov <oleg@redhat.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Paul Menage <paul@paulmenage.org>
-rw-r--r--kernel/cgroup.c10
1 files changed, 7 insertions, 3 deletions
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 12c07e8fd69c..1626152dcc1e 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -2102,10 +2102,14 @@ static int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
2102 if (retval) 2102 if (retval)
2103 goto out_free_group_list; 2103 goto out_free_group_list;
2104 2104
2105 /* prevent changes to the threadgroup list while we take a snapshot. */
2106 read_lock(&tasklist_lock);
2107 tsk = leader; 2105 tsk = leader;
2108 i = 0; 2106 i = 0;
2107 /*
2108 * Prevent freeing of tasks while we take a snapshot. Tasks that are
2109 * already PF_EXITING could be freed from underneath us unless we
2110 * take an rcu_read_lock.
2111 */
2112 rcu_read_lock();
2109 do { 2113 do {
2110 struct task_and_cgroup ent; 2114 struct task_and_cgroup ent;
2111 2115
@@ -2128,11 +2132,11 @@ static int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
2128 BUG_ON(retval != 0); 2132 BUG_ON(retval != 0);
2129 i++; 2133 i++;
2130 } while_each_thread(leader, tsk); 2134 } while_each_thread(leader, tsk);
2135 rcu_read_unlock();
2131 /* remember the number of threads in the array for later. */ 2136 /* remember the number of threads in the array for later. */
2132 group_size = i; 2137 group_size = i;
2133 tset.tc_array = group; 2138 tset.tc_array = group;
2134 tset.tc_array_len = group_size; 2139 tset.tc_array_len = group_size;
2135 read_unlock(&tasklist_lock);
2136 2140
2137 /* methods shouldn't be called if no task is actually migrating */ 2141 /* methods shouldn't be called if no task is actually migrating */
2138 retval = 0; 2142 retval = 0;