aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorAnjana V Kumar <anjanavk12@gmail.com>2013-10-11 22:59:17 -0400
committerTejun Heo <tj@kernel.org>2013-10-13 16:07:10 -0400
commitea84753c98a7ac6b74e530b64c444a912b3835ca (patch)
tree5e5f08d2c9a4f7fdc434d1fc7db3f7095809dd23 /kernel
parent58b79a91f57efec9457de8ff93a4cc4fb8daf753 (diff)
cgroup: fix to break the while loop in cgroup_attach_task() correctly
Both Anjana and Eunki reported a stall in the while_each_thread loop in cgroup_attach_task(). It's because, when we attach a single thread to a cgroup, if the cgroup is exiting or is already in that cgroup, we won't break the loop. If the task is already in the cgroup, the bug can lead to another thread being attached to the cgroup unexpectedly: # echo 5207 > tasks # cat tasks 5207 # echo 5207 > tasks # cat tasks 5207 5215 What's worse, if the task to be attached isn't the leader of the thread group, we might never exit the loop, hence cpu stall. Thanks for Oleg's analysis. This bug was introduced by commit 081aa458c38ba576bdd4265fc807fa95b48b9e79 ("cgroup: consolidate cgroup_attach_task() and cgroup_attach_proc()") [ lizf: - fixed the first continue, pointed out by Oleg, - rewrote changelog. ] Cc: <stable@vger.kernel.org> # 3.9+ Reported-by: Eunki Kim <eunki_kim@samsung.com> Reported-by: Anjana V Kumar <anjanavk12@gmail.com> Signed-off-by: Anjana V Kumar <anjanavk12@gmail.com> Signed-off-by: Li Zefan <lizefan@huawei.com> Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/cgroup.c6
1 files changed, 3 insertions, 3 deletions
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 8075b72d22be..1bf4f7a12703 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -2038,7 +2038,7 @@ static int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk,
2038 2038
2039 /* @tsk either already exited or can't exit until the end */ 2039 /* @tsk either already exited or can't exit until the end */
2040 if (tsk->flags & PF_EXITING) 2040 if (tsk->flags & PF_EXITING)
2041 continue; 2041 goto next;
2042 2042
2043 /* as per above, nr_threads may decrease, but not increase. */ 2043 /* as per above, nr_threads may decrease, but not increase. */
2044 BUG_ON(i >= group_size); 2044 BUG_ON(i >= group_size);
@@ -2046,7 +2046,7 @@ static int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk,
2046 ent.cgrp = task_cgroup_from_root(tsk, root); 2046 ent.cgrp = task_cgroup_from_root(tsk, root);
2047 /* nothing to do if this task is already in the cgroup */ 2047 /* nothing to do if this task is already in the cgroup */
2048 if (ent.cgrp == cgrp) 2048 if (ent.cgrp == cgrp)
2049 continue; 2049 goto next;
2050 /* 2050 /*
2051 * saying GFP_ATOMIC has no effect here because we did prealloc 2051 * saying GFP_ATOMIC has no effect here because we did prealloc
2052 * earlier, but it's good form to communicate our expectations. 2052 * earlier, but it's good form to communicate our expectations.
@@ -2054,7 +2054,7 @@ static int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk,
2054 retval = flex_array_put(group, i, &ent, GFP_ATOMIC); 2054 retval = flex_array_put(group, i, &ent, GFP_ATOMIC);
2055 BUG_ON(retval != 0); 2055 BUG_ON(retval != 0);
2056 i++; 2056 i++;
2057 2057 next:
2058 if (!threadgroup) 2058 if (!threadgroup)
2059 break; 2059 break;
2060 } while_each_thread(leader, tsk); 2060 } while_each_thread(leader, tsk);