aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/cgroup.c
diff options
context:
space:
mode:
authorMandeep Singh Baines <msb@chromium.org>2011-12-21 23:18:37 -0500
committerTejun Heo <tj@kernel.org>2011-12-22 10:32:57 -0500
commit892a2b90ba15cb7dbee40979f23fdb492913abf8 (patch)
treedb1cb827649a846b84e9c5da03d2b2cf4aedc656 /kernel/cgroup.c
parentb07ef7741122a83575499c11417e514877941e76 (diff)
cgroup: only need to check oldcgrp==newgrp once
In cgroup_attach_proc it is now sufficient to only check that oldcgrp==newcgrp once. Now that we are using threadgroup_lock() during the migrations, oldcgrp will not change. Signed-off-by: Mandeep Singh Baines <msb@chromium.org> Acked-by: Li Zefan <lizf@cn.fujitsu.com> Signed-off-by: Tejun Heo <tj@kernel.org> Cc: containers@lists.linux-foundation.org Cc: cgroups@vger.kernel.org Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: Oleg Nesterov <oleg@redhat.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Paul Menage <paul@paulmenage.org>
Diffstat (limited to 'kernel/cgroup.c')
-rw-r--r--kernel/cgroup.c22
1 files changed, 6 insertions, 16 deletions
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index a85a7002ca33..1042b3c41314 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -2067,7 +2067,7 @@ static int css_set_prefetch(struct cgroup *cgrp, struct css_set *cg,
2067 */ 2067 */
2068int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader) 2068int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
2069{ 2069{
2070 int retval, i, group_size, nr_migrating_tasks; 2070 int retval, i, group_size;
2071 struct cgroup_subsys *ss, *failed_ss = NULL; 2071 struct cgroup_subsys *ss, *failed_ss = NULL;
2072 /* guaranteed to be initialized later, but the compiler needs this */ 2072 /* guaranteed to be initialized later, but the compiler needs this */
2073 struct css_set *oldcg; 2073 struct css_set *oldcg;
@@ -2118,7 +2118,7 @@ int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
2118 } 2118 }
2119 2119
2120 tsk = leader; 2120 tsk = leader;
2121 i = nr_migrating_tasks = 0; 2121 i = 0;
2122 do { 2122 do {
2123 struct task_and_cgroup ent; 2123 struct task_and_cgroup ent;
2124 2124
@@ -2134,11 +2134,12 @@ int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
2134 */ 2134 */
2135 ent.task = tsk; 2135 ent.task = tsk;
2136 ent.cgrp = task_cgroup_from_root(tsk, root); 2136 ent.cgrp = task_cgroup_from_root(tsk, root);
2137 /* nothing to do if this task is already in the cgroup */
2138 if (ent.cgrp == cgrp)
2139 continue;
2137 retval = flex_array_put(group, i, &ent, GFP_ATOMIC); 2140 retval = flex_array_put(group, i, &ent, GFP_ATOMIC);
2138 BUG_ON(retval != 0); 2141 BUG_ON(retval != 0);
2139 i++; 2142 i++;
2140 if (ent.cgrp != cgrp)
2141 nr_migrating_tasks++;
2142 } while_each_thread(leader, tsk); 2143 } while_each_thread(leader, tsk);
2143 /* remember the number of threads in the array for later. */ 2144 /* remember the number of threads in the array for later. */
2144 group_size = i; 2145 group_size = i;
@@ -2148,7 +2149,7 @@ int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
2148 2149
2149 /* methods shouldn't be called if no task is actually migrating */ 2150 /* methods shouldn't be called if no task is actually migrating */
2150 retval = 0; 2151 retval = 0;
2151 if (!nr_migrating_tasks) 2152 if (!group_size)
2152 goto out_free_group_list; 2153 goto out_free_group_list;
2153 2154
2154 /* 2155 /*
@@ -2171,14 +2172,6 @@ int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
2171 INIT_LIST_HEAD(&newcg_list); 2172 INIT_LIST_HEAD(&newcg_list);
2172 for (i = 0; i < group_size; i++) { 2173 for (i = 0; i < group_size; i++) {
2173 tc = flex_array_get(group, i); 2174 tc = flex_array_get(group, i);
2174 /* nothing to do if this task is already in the cgroup */
2175 if (tc->cgrp == cgrp)
2176 continue;
2177 /*
2178 * get old css_set pointer. threadgroup is locked so this is
2179 * safe against concurrent cgroup_exit() changing this to
2180 * init_css_set.
2181 */
2182 oldcg = tc->task->cgroups; 2175 oldcg = tc->task->cgroups;
2183 2176
2184 /* if we don't already have it in the list get a new one */ 2177 /* if we don't already have it in the list get a new one */
@@ -2194,9 +2187,6 @@ int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
2194 */ 2187 */
2195 for (i = 0; i < group_size; i++) { 2188 for (i = 0; i < group_size; i++) {
2196 tc = flex_array_get(group, i); 2189 tc = flex_array_get(group, i);
2197 /* leave current thread as it is if it's already there */
2198 if (tc->cgrp == cgrp)
2199 continue;
2200 retval = cgroup_task_migrate(cgrp, tc->cgrp, tc->task, true); 2190 retval = cgroup_task_migrate(cgrp, tc->cgrp, tc->task, true);
2201 BUG_ON(retval); 2191 BUG_ON(retval);
2202 } 2192 }