aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2011-12-12 21:12:22 -0500
committerTejun Heo <tj@kernel.org>2011-12-12 21:12:22 -0500
commit494c167cf76d02000adf740c215adc69a824ecc9 (patch)
treebf7cdf462b62ebf099b965f7bea503df30918c17
parent94196f51c1ee5bbad674de28c682b17d78adb8e6 (diff)
cgroup: kill subsys->can_attach_task(), pre_attach() and attach_task()
These three methods are no longer used. Kill them. Signed-off-by: Tejun Heo <tj@kernel.org> Reviewed-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Reviewed-by: Frederic Weisbecker <fweisbec@gmail.com> Acked-by: Paul Menage <paul@paulmenage.org> Cc: Li Zefan <lizf@cn.fujitsu.com>
-rw-r--r--Documentation/cgroups/cgroups.txt20
-rw-r--r--include/linux/cgroup.h3
-rw-r--r--kernel/cgroup.c52
3 files changed, 5 insertions, 70 deletions
diff --git a/Documentation/cgroups/cgroups.txt b/Documentation/cgroups/cgroups.txt
index 8a2f302327fa..a7c96ae5557c 100644
--- a/Documentation/cgroups/cgroups.txt
+++ b/Documentation/cgroups/cgroups.txt
@@ -615,13 +615,6 @@ fork. If this method returns 0 (success) then this should remain valid
615while the caller holds cgroup_mutex and it is ensured that either 615while the caller holds cgroup_mutex and it is ensured that either
616attach() or cancel_attach() will be called in future. 616attach() or cancel_attach() will be called in future.
617 617
618int can_attach_task(struct cgroup *cgrp, struct task_struct *tsk);
619(cgroup_mutex held by caller)
620
621As can_attach, but for operations that must be run once per task to be
622attached (possibly many when using cgroup_attach_proc). Called after
623can_attach.
624
625void cancel_attach(struct cgroup_subsys *ss, struct cgroup *cgrp, 618void cancel_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
626 struct cgroup_taskset *tset) 619 struct cgroup_taskset *tset)
627(cgroup_mutex held by caller) 620(cgroup_mutex held by caller)
@@ -632,12 +625,6 @@ function, so that the subsystem can implement a rollback. If not, not necessary.
632This will be called only about subsystems whose can_attach() operation have 625This will be called only about subsystems whose can_attach() operation have
633succeeded. The parameters are identical to can_attach(). 626succeeded. The parameters are identical to can_attach().
634 627
635void pre_attach(struct cgroup *cgrp);
636(cgroup_mutex held by caller)
637
638For any non-per-thread attachment work that needs to happen before
639attach_task. Needed by cpuset.
640
641void attach(struct cgroup_subsys *ss, struct cgroup *cgrp, 628void attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
642 struct cgroup_taskset *tset) 629 struct cgroup_taskset *tset)
643(cgroup_mutex held by caller) 630(cgroup_mutex held by caller)
@@ -646,13 +633,6 @@ Called after the task has been attached to the cgroup, to allow any
646post-attachment activity that requires memory allocations or blocking. 633post-attachment activity that requires memory allocations or blocking.
647The parameters are identical to can_attach(). 634The parameters are identical to can_attach().
648 635
649void attach_task(struct cgroup *cgrp, struct task_struct *tsk);
650(cgroup_mutex held by caller)
651
652As attach, but for operations that must be run once per task to be attached,
653like can_attach_task. Called before attach. Currently does not support any
654subsystem that might need the old_cgrp for every thread in the group.
655
656void fork(struct cgroup_subsy *ss, struct task_struct *task) 636void fork(struct cgroup_subsy *ss, struct task_struct *task)
657 637
658Called when a task is forked into a cgroup. 638Called when a task is forked into a cgroup.
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index 34256ad9e553..7ad5e406c421 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -490,11 +490,8 @@ struct cgroup_subsys {
490 void (*destroy)(struct cgroup_subsys *ss, struct cgroup *cgrp); 490 void (*destroy)(struct cgroup_subsys *ss, struct cgroup *cgrp);
491 int (*can_attach)(struct cgroup_subsys *ss, struct cgroup *cgrp, 491 int (*can_attach)(struct cgroup_subsys *ss, struct cgroup *cgrp,
492 struct cgroup_taskset *tset); 492 struct cgroup_taskset *tset);
493 int (*can_attach_task)(struct cgroup *cgrp, struct task_struct *tsk);
494 void (*cancel_attach)(struct cgroup_subsys *ss, struct cgroup *cgrp, 493 void (*cancel_attach)(struct cgroup_subsys *ss, struct cgroup *cgrp,
495 struct cgroup_taskset *tset); 494 struct cgroup_taskset *tset);
496 void (*pre_attach)(struct cgroup *cgrp);
497 void (*attach_task)(struct cgroup *cgrp, struct task_struct *tsk);
498 void (*attach)(struct cgroup_subsys *ss, struct cgroup *cgrp, 495 void (*attach)(struct cgroup_subsys *ss, struct cgroup *cgrp,
499 struct cgroup_taskset *tset); 496 struct cgroup_taskset *tset);
500 void (*fork)(struct cgroup_subsys *ss, struct task_struct *task); 497 void (*fork)(struct cgroup_subsys *ss, struct task_struct *task);
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 41ee01e392e6..1b3b84174ead 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -1944,13 +1944,6 @@ int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
1944 goto out; 1944 goto out;
1945 } 1945 }
1946 } 1946 }
1947 if (ss->can_attach_task) {
1948 retval = ss->can_attach_task(cgrp, tsk);
1949 if (retval) {
1950 failed_ss = ss;
1951 goto out;
1952 }
1953 }
1954 } 1947 }
1955 1948
1956 retval = cgroup_task_migrate(cgrp, oldcgrp, tsk, false); 1949 retval = cgroup_task_migrate(cgrp, oldcgrp, tsk, false);
@@ -1958,10 +1951,6 @@ int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
1958 goto out; 1951 goto out;
1959 1952
1960 for_each_subsys(root, ss) { 1953 for_each_subsys(root, ss) {
1961 if (ss->pre_attach)
1962 ss->pre_attach(cgrp);
1963 if (ss->attach_task)
1964 ss->attach_task(cgrp, tsk);
1965 if (ss->attach) 1954 if (ss->attach)
1966 ss->attach(ss, cgrp, &tset); 1955 ss->attach(ss, cgrp, &tset);
1967 } 1956 }
@@ -2093,7 +2082,6 @@ int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
2093{ 2082{
2094 int retval, i, group_size, nr_migrating_tasks; 2083 int retval, i, group_size, nr_migrating_tasks;
2095 struct cgroup_subsys *ss, *failed_ss = NULL; 2084 struct cgroup_subsys *ss, *failed_ss = NULL;
2096 bool cancel_failed_ss = false;
2097 /* guaranteed to be initialized later, but the compiler needs this */ 2085 /* guaranteed to be initialized later, but the compiler needs this */
2098 struct css_set *oldcg; 2086 struct css_set *oldcg;
2099 struct cgroupfs_root *root = cgrp->root; 2087 struct cgroupfs_root *root = cgrp->root;
@@ -2188,21 +2176,6 @@ int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
2188 goto out_cancel_attach; 2176 goto out_cancel_attach;
2189 } 2177 }
2190 } 2178 }
2191 /* a callback to be run on every thread in the threadgroup. */
2192 if (ss->can_attach_task) {
2193 /* run on each task in the threadgroup. */
2194 for (i = 0; i < group_size; i++) {
2195 tc = flex_array_get(group, i);
2196 if (tc->cgrp == cgrp)
2197 continue;
2198 retval = ss->can_attach_task(cgrp, tc->task);
2199 if (retval) {
2200 failed_ss = ss;
2201 cancel_failed_ss = true;
2202 goto out_cancel_attach;
2203 }
2204 }
2205 }
2206 } 2179 }
2207 2180
2208 /* 2181 /*
@@ -2234,15 +2207,10 @@ int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
2234 } 2207 }
2235 2208
2236 /* 2209 /*
2237 * step 3: now that we're guaranteed success wrt the css_sets, proceed 2210 * step 3: now that we're guaranteed success wrt the css_sets,
2238 * to move all tasks to the new cgroup, calling ss->attach_task for each 2211 * proceed to move all tasks to the new cgroup. There are no
2239 * one along the way. there are no failure cases after here, so this is 2212 * failure cases after here, so this is the commit point.
2240 * the commit point.
2241 */ 2213 */
2242 for_each_subsys(root, ss) {
2243 if (ss->pre_attach)
2244 ss->pre_attach(cgrp);
2245 }
2246 for (i = 0; i < group_size; i++) { 2214 for (i = 0; i < group_size; i++) {
2247 tc = flex_array_get(group, i); 2215 tc = flex_array_get(group, i);
2248 /* leave current thread as it is if it's already there */ 2216 /* leave current thread as it is if it's already there */
@@ -2250,18 +2218,11 @@ int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
2250 continue; 2218 continue;
2251 retval = cgroup_task_migrate(cgrp, tc->cgrp, tc->task, true); 2219 retval = cgroup_task_migrate(cgrp, tc->cgrp, tc->task, true);
2252 BUG_ON(retval); 2220 BUG_ON(retval);
2253 /* attach each task to each subsystem */
2254 for_each_subsys(root, ss) {
2255 if (ss->attach_task)
2256 ss->attach_task(cgrp, tc->task);
2257 }
2258 } 2221 }
2259 /* nothing is sensitive to fork() after this point. */ 2222 /* nothing is sensitive to fork() after this point. */
2260 2223
2261 /* 2224 /*
2262 * step 4: do expensive, non-thread-specific subsystem callbacks. 2225 * step 4: do subsystem attach callbacks.
2263 * TODO: if ever a subsystem needs to know the oldcgrp for each task
2264 * being moved, this call will need to be reworked to communicate that.
2265 */ 2226 */
2266 for_each_subsys(root, ss) { 2227 for_each_subsys(root, ss) {
2267 if (ss->attach) 2228 if (ss->attach)
@@ -2285,11 +2246,8 @@ out_cancel_attach:
2285 /* same deal as in cgroup_attach_task */ 2246 /* same deal as in cgroup_attach_task */
2286 if (retval) { 2247 if (retval) {
2287 for_each_subsys(root, ss) { 2248 for_each_subsys(root, ss) {
2288 if (ss == failed_ss) { 2249 if (ss == failed_ss)
2289 if (cancel_failed_ss && ss->cancel_attach)
2290 ss->cancel_attach(ss, cgrp, &tset);
2291 break; 2250 break;
2292 }
2293 if (ss->cancel_attach) 2251 if (ss->cancel_attach)
2294 ss->cancel_attach(ss, cgrp, &tset); 2252 ss->cancel_attach(ss, cgrp, &tset);
2295 } 2253 }