aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/cgroup.c
diff options
context:
space:
mode:
authorMandeep Singh Baines <msb@chromium.org>2012-01-04 00:18:30 -0500
committerTejun Heo <tj@kernel.org>2012-01-20 18:58:13 -0500
commitb78949ebfb563c29808a9d0a772e3adb5561bc80 (patch)
treed5b473db418a7480d314ffe36d428290d3603bfc /kernel/cgroup.c
parent245282557c49754af3dbcc732316e814340d6bce (diff)
cgroup: simplify double-check locking in cgroup_attach_proc
To keep the complexity of the double-check locking in one place, move the thread_group_leader check up into attach_task_by_pid(). This allows us to use a goto instead of returning -EAGAIN. While at it, convert a couple of returns to gotos and use rcu for the !pid case also in order to simplify the logic. Changes in V2: * https://lkml.org/lkml/2011/12/22/86 (Tejun Heo) * Use a goto instead of returning -EAGAIN Signed-off-by: Mandeep Singh Baines <msb@chromium.org> Acked-by: Li Zefan <lizf@cn.fujitsu.com> Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: containers@lists.linux-foundation.org Cc: cgroups@vger.kernel.org Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Oleg Nesterov <oleg@redhat.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Paul Menage <paul@paulmenage.org>
Diffstat (limited to 'kernel/cgroup.c')
-rw-r--r--kernel/cgroup.c79
1 files changed, 29 insertions, 50 deletions
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 6ca7acad7c55..12c07e8fd69c 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -2104,19 +2104,6 @@ static int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
2104 2104
2105 /* prevent changes to the threadgroup list while we take a snapshot. */ 2105 /* prevent changes to the threadgroup list while we take a snapshot. */
2106 read_lock(&tasklist_lock); 2106 read_lock(&tasklist_lock);
2107 if (!thread_group_leader(leader)) {
2108 /*
2109 * a race with de_thread from another thread's exec() may strip
2110 * us of our leadership, making while_each_thread unsafe to use
2111 * on this task. if this happens, there is no choice but to
2112 * throw this task away and try again (from cgroup_procs_write);
2113 * this is "double-double-toil-and-trouble-check locking".
2114 */
2115 read_unlock(&tasklist_lock);
2116 retval = -EAGAIN;
2117 goto out_free_group_list;
2118 }
2119
2120 tsk = leader; 2107 tsk = leader;
2121 i = 0; 2108 i = 0;
2122 do { 2109 do {
@@ -2245,22 +2232,14 @@ static int attach_task_by_pid(struct cgroup *cgrp, u64 pid, bool threadgroup)
2245 if (!cgroup_lock_live_group(cgrp)) 2232 if (!cgroup_lock_live_group(cgrp))
2246 return -ENODEV; 2233 return -ENODEV;
2247 2234
2235retry_find_task:
2236 rcu_read_lock();
2248 if (pid) { 2237 if (pid) {
2249 rcu_read_lock();
2250 tsk = find_task_by_vpid(pid); 2238 tsk = find_task_by_vpid(pid);
2251 if (!tsk) { 2239 if (!tsk) {
2252 rcu_read_unlock(); 2240 rcu_read_unlock();
2253 cgroup_unlock(); 2241 ret= -ESRCH;
2254 return -ESRCH; 2242 goto out_unlock_cgroup;
2255 }
2256 if (threadgroup) {
2257 /*
2258 * RCU protects this access, since tsk was found in the
2259 * tid map. a race with de_thread may cause group_leader
2260 * to stop being the leader, but cgroup_attach_proc will
2261 * detect it later.
2262 */
2263 tsk = tsk->group_leader;
2264 } 2243 }
2265 /* 2244 /*
2266 * even if we're attaching all tasks in the thread group, we 2245 * even if we're attaching all tasks in the thread group, we
@@ -2271,29 +2250,38 @@ static int attach_task_by_pid(struct cgroup *cgrp, u64 pid, bool threadgroup)
2271 cred->euid != tcred->uid && 2250 cred->euid != tcred->uid &&
2272 cred->euid != tcred->suid) { 2251 cred->euid != tcred->suid) {
2273 rcu_read_unlock(); 2252 rcu_read_unlock();
2274 cgroup_unlock(); 2253 ret = -EACCES;
2275 return -EACCES; 2254 goto out_unlock_cgroup;
2276 } 2255 }
2277 get_task_struct(tsk); 2256 } else
2278 rcu_read_unlock(); 2257 tsk = current;
2279 } else {
2280 if (threadgroup)
2281 tsk = current->group_leader;
2282 else
2283 tsk = current;
2284 get_task_struct(tsk);
2285 }
2286
2287 threadgroup_lock(tsk);
2288 2258
2289 if (threadgroup) 2259 if (threadgroup)
2260 tsk = tsk->group_leader;
2261 get_task_struct(tsk);
2262 rcu_read_unlock();
2263
2264 threadgroup_lock(tsk);
2265 if (threadgroup) {
2266 if (!thread_group_leader(tsk)) {
2267 /*
2268 * a race with de_thread from another thread's exec()
2269 * may strip us of our leadership, if this happens,
2270 * there is no choice but to throw this task away and
2271 * try again; this is
2272 * "double-double-toil-and-trouble-check locking".
2273 */
2274 threadgroup_unlock(tsk);
2275 put_task_struct(tsk);
2276 goto retry_find_task;
2277 }
2290 ret = cgroup_attach_proc(cgrp, tsk); 2278 ret = cgroup_attach_proc(cgrp, tsk);
2291 else 2279 } else
2292 ret = cgroup_attach_task(cgrp, tsk); 2280 ret = cgroup_attach_task(cgrp, tsk);
2293
2294 threadgroup_unlock(tsk); 2281 threadgroup_unlock(tsk);
2295 2282
2296 put_task_struct(tsk); 2283 put_task_struct(tsk);
2284out_unlock_cgroup:
2297 cgroup_unlock(); 2285 cgroup_unlock();
2298 return ret; 2286 return ret;
2299} 2287}
@@ -2305,16 +2293,7 @@ static int cgroup_tasks_write(struct cgroup *cgrp, struct cftype *cft, u64 pid)
2305 2293
2306static int cgroup_procs_write(struct cgroup *cgrp, struct cftype *cft, u64 tgid) 2294static int cgroup_procs_write(struct cgroup *cgrp, struct cftype *cft, u64 tgid)
2307{ 2295{
2308 int ret; 2296 return attach_task_by_pid(cgrp, tgid, true);
2309 do {
2310 /*
2311 * attach_proc fails with -EAGAIN if threadgroup leadership
2312 * changes in the middle of the operation, in which case we need
2313 * to find the task_struct for the new leader and start over.
2314 */
2315 ret = attach_task_by_pid(cgrp, tgid, true);
2316 } while (ret == -EAGAIN);
2317 return ret;
2318} 2297}
2319 2298
2320/** 2299/**