aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/cgroup.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2011-12-12 21:12:21 -0500
committerTejun Heo <tj@kernel.org>2011-12-12 21:12:21 -0500
commit134d33737f9015761c3832f6b268fae6274aac7f (patch)
tree536b973af6429c25e1fc8ccbaf76395c09a8623d /kernel/cgroup.c
parentcd3d095275374220921fcf0d4e0c16584b26ddbc (diff)
cgroup: improve old cgroup handling in cgroup_attach_proc()
cgroup_attach_proc() behaves differently from cgroup_attach_task() in the following aspects. * All hooks are invoked even if no task is actually being moved. * ->can_attach_task() is called for all tasks in the group whether the new cgrp is different from the current cgrp or not; however, ->attach_task() is skipped if new equals new. This makes the calls asymmetric. This patch improves old cgroup handling in cgroup_attach_proc() by looking up the current cgroup at the head, recording it in the flex array along with the task itself, and using it to remove the above two differences. This will also ease further changes. -v2: nr_todo renamed to nr_migrating_tasks as per Paul Menage's suggestion. Signed-off-by: Tejun Heo <tj@kernel.org> Reviewed-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Reviewed-by: Frederic Weisbecker <fweisbec@gmail.com> Acked-by: Paul Menage <paul@paulmenage.org> Acked-by: Li Zefan <lizf@cn.fujitsu.com>
Diffstat (limited to 'kernel/cgroup.c')
-rw-r--r--kernel/cgroup.c66
1 files changed, 42 insertions, 24 deletions
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index d71e012e81be..0f2d00519d37 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -1757,6 +1757,11 @@ int cgroup_path(const struct cgroup *cgrp, char *buf, int buflen)
1757} 1757}
1758EXPORT_SYMBOL_GPL(cgroup_path); 1758EXPORT_SYMBOL_GPL(cgroup_path);
1759 1759
1760struct task_and_cgroup {
1761 struct task_struct *task;
1762 struct cgroup *cgrp;
1763};
1764
1760/* 1765/*
1761 * cgroup_task_migrate - move a task from one cgroup to another. 1766 * cgroup_task_migrate - move a task from one cgroup to another.
1762 * 1767 *
@@ -2008,15 +2013,15 @@ static int css_set_prefetch(struct cgroup *cgrp, struct css_set *cg,
2008 */ 2013 */
2009int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader) 2014int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
2010{ 2015{
2011 int retval, i, group_size; 2016 int retval, i, group_size, nr_migrating_tasks;
2012 struct cgroup_subsys *ss, *failed_ss = NULL; 2017 struct cgroup_subsys *ss, *failed_ss = NULL;
2013 bool cancel_failed_ss = false; 2018 bool cancel_failed_ss = false;
2014 /* guaranteed to be initialized later, but the compiler needs this */ 2019 /* guaranteed to be initialized later, but the compiler needs this */
2015 struct cgroup *oldcgrp = NULL;
2016 struct css_set *oldcg; 2020 struct css_set *oldcg;
2017 struct cgroupfs_root *root = cgrp->root; 2021 struct cgroupfs_root *root = cgrp->root;
2018 /* threadgroup list cursor and array */ 2022 /* threadgroup list cursor and array */
2019 struct task_struct *tsk; 2023 struct task_struct *tsk;
2024 struct task_and_cgroup *tc;
2020 struct flex_array *group; 2025 struct flex_array *group;
2021 /* 2026 /*
2022 * we need to make sure we have css_sets for all the tasks we're 2027 * we need to make sure we have css_sets for all the tasks we're
@@ -2035,8 +2040,7 @@ int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
2035 */ 2040 */
2036 group_size = get_nr_threads(leader); 2041 group_size = get_nr_threads(leader);
2037 /* flex_array supports very large thread-groups better than kmalloc. */ 2042 /* flex_array supports very large thread-groups better than kmalloc. */
2038 group = flex_array_alloc(sizeof(struct task_struct *), group_size, 2043 group = flex_array_alloc(sizeof(*tc), group_size, GFP_KERNEL);
2039 GFP_KERNEL);
2040 if (!group) 2044 if (!group)
2041 return -ENOMEM; 2045 return -ENOMEM;
2042 /* pre-allocate to guarantee space while iterating in rcu read-side. */ 2046 /* pre-allocate to guarantee space while iterating in rcu read-side. */
@@ -2060,8 +2064,10 @@ int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
2060 } 2064 }
2061 /* take a reference on each task in the group to go in the array. */ 2065 /* take a reference on each task in the group to go in the array. */
2062 tsk = leader; 2066 tsk = leader;
2063 i = 0; 2067 i = nr_migrating_tasks = 0;
2064 do { 2068 do {
2069 struct task_and_cgroup ent;
2070
2065 /* @tsk either already exited or can't exit until the end */ 2071 /* @tsk either already exited or can't exit until the end */
2066 if (tsk->flags & PF_EXITING) 2072 if (tsk->flags & PF_EXITING)
2067 continue; 2073 continue;
@@ -2073,14 +2079,23 @@ int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
2073 * saying GFP_ATOMIC has no effect here because we did prealloc 2079 * saying GFP_ATOMIC has no effect here because we did prealloc
2074 * earlier, but it's good form to communicate our expectations. 2080 * earlier, but it's good form to communicate our expectations.
2075 */ 2081 */
2076 retval = flex_array_put_ptr(group, i, tsk, GFP_ATOMIC); 2082 ent.task = tsk;
2083 ent.cgrp = task_cgroup_from_root(tsk, root);
2084 retval = flex_array_put(group, i, &ent, GFP_ATOMIC);
2077 BUG_ON(retval != 0); 2085 BUG_ON(retval != 0);
2078 i++; 2086 i++;
2087 if (ent.cgrp != cgrp)
2088 nr_migrating_tasks++;
2079 } while_each_thread(leader, tsk); 2089 } while_each_thread(leader, tsk);
2080 /* remember the number of threads in the array for later. */ 2090 /* remember the number of threads in the array for later. */
2081 group_size = i; 2091 group_size = i;
2082 read_unlock(&tasklist_lock); 2092 read_unlock(&tasklist_lock);
2083 2093
2094 /* methods shouldn't be called if no task is actually migrating */
2095 retval = 0;
2096 if (!nr_migrating_tasks)
2097 goto out_put_tasks;
2098
2084 /* 2099 /*
2085 * step 1: check that we can legitimately attach to the cgroup. 2100 * step 1: check that we can legitimately attach to the cgroup.
2086 */ 2101 */
@@ -2096,8 +2111,10 @@ int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
2096 if (ss->can_attach_task) { 2111 if (ss->can_attach_task) {
2097 /* run on each task in the threadgroup. */ 2112 /* run on each task in the threadgroup. */
2098 for (i = 0; i < group_size; i++) { 2113 for (i = 0; i < group_size; i++) {
2099 tsk = flex_array_get_ptr(group, i); 2114 tc = flex_array_get(group, i);
2100 retval = ss->can_attach_task(cgrp, tsk); 2115 if (tc->cgrp == cgrp)
2116 continue;
2117 retval = ss->can_attach_task(cgrp, tc->task);
2101 if (retval) { 2118 if (retval) {
2102 failed_ss = ss; 2119 failed_ss = ss;
2103 cancel_failed_ss = true; 2120 cancel_failed_ss = true;
@@ -2113,18 +2130,17 @@ int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
2113 */ 2130 */
2114 INIT_LIST_HEAD(&newcg_list); 2131 INIT_LIST_HEAD(&newcg_list);
2115 for (i = 0; i < group_size; i++) { 2132 for (i = 0; i < group_size; i++) {
2116 tsk = flex_array_get_ptr(group, i); 2133 tc = flex_array_get(group, i);
2117 /* nothing to do if this task is already in the cgroup */ 2134 /* nothing to do if this task is already in the cgroup */
2118 oldcgrp = task_cgroup_from_root(tsk, root); 2135 if (tc->cgrp == cgrp)
2119 if (cgrp == oldcgrp)
2120 continue; 2136 continue;
2121 /* get old css_set pointer */ 2137 /* get old css_set pointer */
2122 task_lock(tsk); 2138 task_lock(tc->task);
2123 oldcg = tsk->cgroups; 2139 oldcg = tc->task->cgroups;
2124 get_css_set(oldcg); 2140 get_css_set(oldcg);
2125 task_unlock(tsk); 2141 task_unlock(tc->task);
2126 /* see if the new one for us is already in the list? */ 2142 /* see if the new one for us is already in the list? */
2127 if (css_set_check_fetched(cgrp, tsk, oldcg, &newcg_list)) { 2143 if (css_set_check_fetched(cgrp, tc->task, oldcg, &newcg_list)) {
2128 /* was already there, nothing to do. */ 2144 /* was already there, nothing to do. */
2129 put_css_set(oldcg); 2145 put_css_set(oldcg);
2130 } else { 2146 } else {
@@ -2147,17 +2163,16 @@ int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
2147 ss->pre_attach(cgrp); 2163 ss->pre_attach(cgrp);
2148 } 2164 }
2149 for (i = 0; i < group_size; i++) { 2165 for (i = 0; i < group_size; i++) {
2150 tsk = flex_array_get_ptr(group, i); 2166 tc = flex_array_get(group, i);
2151 /* leave current thread as it is if it's already there */ 2167 /* leave current thread as it is if it's already there */
2152 oldcgrp = task_cgroup_from_root(tsk, root); 2168 if (tc->cgrp == cgrp)
2153 if (cgrp == oldcgrp)
2154 continue; 2169 continue;
2155 retval = cgroup_task_migrate(cgrp, oldcgrp, tsk, true); 2170 retval = cgroup_task_migrate(cgrp, tc->cgrp, tc->task, true);
2156 BUG_ON(retval); 2171 BUG_ON(retval);
2157 /* attach each task to each subsystem */ 2172 /* attach each task to each subsystem */
2158 for_each_subsys(root, ss) { 2173 for_each_subsys(root, ss) {
2159 if (ss->attach_task) 2174 if (ss->attach_task)
2160 ss->attach_task(cgrp, tsk); 2175 ss->attach_task(cgrp, tc->task);
2161 } 2176 }
2162 } 2177 }
2163 /* nothing is sensitive to fork() after this point. */ 2178 /* nothing is sensitive to fork() after this point. */
@@ -2168,8 +2183,10 @@ int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
2168 * being moved, this call will need to be reworked to communicate that. 2183 * being moved, this call will need to be reworked to communicate that.
2169 */ 2184 */
2170 for_each_subsys(root, ss) { 2185 for_each_subsys(root, ss) {
2171 if (ss->attach) 2186 if (ss->attach) {
2172 ss->attach(ss, cgrp, oldcgrp, leader); 2187 tc = flex_array_get(group, 0);
2188 ss->attach(ss, cgrp, tc->cgrp, tc->task);
2189 }
2173 } 2190 }
2174 2191
2175 /* 2192 /*
@@ -2198,10 +2215,11 @@ out_cancel_attach:
2198 ss->cancel_attach(ss, cgrp, leader); 2215 ss->cancel_attach(ss, cgrp, leader);
2199 } 2216 }
2200 } 2217 }
2218out_put_tasks:
2201 /* clean up the array of referenced threads in the group. */ 2219 /* clean up the array of referenced threads in the group. */
2202 for (i = 0; i < group_size; i++) { 2220 for (i = 0; i < group_size; i++) {
2203 tsk = flex_array_get_ptr(group, i); 2221 tc = flex_array_get(group, i);
2204 put_task_struct(tsk); 2222 put_task_struct(tc->task);
2205 } 2223 }
2206out_free_group_list: 2224out_free_group_list:
2207 flex_array_free(group); 2225 flex_array_free(group);