aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/cgroup.c
diff options
context:
space:
mode:
authorLi Zefan <lizefan@huawei.com>2013-03-12 21:17:09 -0400
committerTejun Heo <tj@kernel.org>2013-03-20 10:50:25 -0400
commit081aa458c38ba576bdd4265fc807fa95b48b9e79 (patch)
treeb080dbb2d1fed0b53193b8552f561d8bab39d6b5 /kernel/cgroup.c
parentbd2953ebbb533aeda9b86c82a53d5197a9a38f1b (diff)
cgroup: consolidate cgroup_attach_task() and cgroup_attach_proc()
These two functions share most of the code. Signed-off-by: Li Zefan <lizefan@huawei.com> Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'kernel/cgroup.c')
-rw-r--r--kernel/cgroup.c109
1 files changed, 20 insertions, 89 deletions
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 54689fc008f6..04fa2abf94b2 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -59,7 +59,7 @@
59#include <linux/vmalloc.h> /* TODO: replace with more sophisticated array */ 59#include <linux/vmalloc.h> /* TODO: replace with more sophisticated array */
60#include <linux/eventfd.h> 60#include <linux/eventfd.h>
61#include <linux/poll.h> 61#include <linux/poll.h>
62#include <linux/flex_array.h> /* used in cgroup_attach_proc */ 62#include <linux/flex_array.h> /* used in cgroup_attach_task */
63#include <linux/kthread.h> 63#include <linux/kthread.h>
64 64
65#include <linux/atomic.h> 65#include <linux/atomic.h>
@@ -1944,82 +1944,6 @@ static void cgroup_task_migrate(struct cgroup *cgrp, struct cgroup *oldcgrp,
1944} 1944}
1945 1945
1946/** 1946/**
1947 * cgroup_attach_task - attach task 'tsk' to cgroup 'cgrp'
1948 * @cgrp: the cgroup the task is attaching to
1949 * @tsk: the task to be attached
1950 *
1951 * Call with cgroup_mutex and threadgroup locked. May take task_lock of
1952 * @tsk during call.
1953 */
1954int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
1955{
1956 int retval = 0;
1957 struct cgroup_subsys *ss, *failed_ss = NULL;
1958 struct cgroup *oldcgrp;
1959 struct cgroupfs_root *root = cgrp->root;
1960 struct cgroup_taskset tset = { };
1961 struct css_set *newcg;
1962
1963 /* @tsk either already exited or can't exit until the end */
1964 if (tsk->flags & PF_EXITING)
1965 return -ESRCH;
1966
1967 /* Nothing to do if the task is already in that cgroup */
1968 oldcgrp = task_cgroup_from_root(tsk, root);
1969 if (cgrp == oldcgrp)
1970 return 0;
1971
1972 tset.single.task = tsk;
1973 tset.single.cgrp = oldcgrp;
1974
1975 for_each_subsys(root, ss) {
1976 if (ss->can_attach) {
1977 retval = ss->can_attach(cgrp, &tset);
1978 if (retval) {
1979 /*
1980 * Remember on which subsystem the can_attach()
1981 * failed, so that we only call cancel_attach()
1982 * against the subsystems whose can_attach()
1983 * succeeded. (See below)
1984 */
1985 failed_ss = ss;
1986 goto out;
1987 }
1988 }
1989 }
1990
1991 newcg = find_css_set(tsk->cgroups, cgrp);
1992 if (!newcg) {
1993 retval = -ENOMEM;
1994 goto out;
1995 }
1996
1997 cgroup_task_migrate(cgrp, oldcgrp, tsk, newcg);
1998
1999 for_each_subsys(root, ss) {
2000 if (ss->attach)
2001 ss->attach(cgrp, &tset);
2002 }
2003
2004out:
2005 if (retval) {
2006 for_each_subsys(root, ss) {
2007 if (ss == failed_ss)
2008 /*
2009 * This subsystem was the one that failed the
2010 * can_attach() check earlier, so we don't need
2011 * to call cancel_attach() against it or any
2012 * remaining subsystems.
2013 */
2014 break;
2015 if (ss->cancel_attach)
2016 ss->cancel_attach(cgrp, &tset);
2017 }
2018 }
2019 return retval;
2020}
2021
2022/**
2023 * cgroup_attach_task_all - attach task 'tsk' to all cgroups of task 'from' 1947 * cgroup_attach_task_all - attach task 'tsk' to all cgroups of task 'from'
2024 * @from: attach to all cgroups of a given task 1948 * @from: attach to all cgroups of a given task
2025 * @tsk: the task to be attached 1949 * @tsk: the task to be attached
@@ -2033,7 +1957,7 @@ int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk)
2033 for_each_active_root(root) { 1957 for_each_active_root(root) {
2034 struct cgroup *from_cg = task_cgroup_from_root(from, root); 1958 struct cgroup *from_cg = task_cgroup_from_root(from, root);
2035 1959
2036 retval = cgroup_attach_task(from_cg, tsk); 1960 retval = cgroup_attach_task(from_cg, tsk, false);
2037 if (retval) 1961 if (retval)
2038 break; 1962 break;
2039 } 1963 }
@@ -2044,21 +1968,22 @@ int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk)
2044EXPORT_SYMBOL_GPL(cgroup_attach_task_all); 1968EXPORT_SYMBOL_GPL(cgroup_attach_task_all);
2045 1969
2046/** 1970/**
2047 * cgroup_attach_proc - attach all threads in a threadgroup to a cgroup 1971 * cgroup_attach_task - attach a task or a whole threadgroup to a cgroup
2048 * @cgrp: the cgroup to attach to 1972 * @cgrp: the cgroup to attach to
2049 * @leader: the threadgroup leader task_struct of the group to be attached 1973 * @tsk: the task or the leader of the threadgroup to be attached
1974 * @threadgroup: attach the whole threadgroup?
2050 * 1975 *
2051 * Call holding cgroup_mutex and the group_rwsem of the leader. Will take 1976 * Call holding cgroup_mutex and the group_rwsem of the leader. Will take
2052 * task_lock of each thread in leader's threadgroup individually in turn. 1977 * task_lock of @tsk or each thread in the threadgroup individually in turn.
2053 */ 1978 */
2054static int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader) 1979int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk,
1980 bool threadgroup)
2055{ 1981{
2056 int retval, i, group_size; 1982 int retval, i, group_size;
2057 struct cgroup_subsys *ss, *failed_ss = NULL; 1983 struct cgroup_subsys *ss, *failed_ss = NULL;
2058 /* guaranteed to be initialized later, but the compiler needs this */
2059 struct cgroupfs_root *root = cgrp->root; 1984 struct cgroupfs_root *root = cgrp->root;
2060 /* threadgroup list cursor and array */ 1985 /* threadgroup list cursor and array */
2061 struct task_struct *tsk; 1986 struct task_struct *leader = tsk;
2062 struct task_and_cgroup *tc; 1987 struct task_and_cgroup *tc;
2063 struct flex_array *group; 1988 struct flex_array *group;
2064 struct cgroup_taskset tset = { }; 1989 struct cgroup_taskset tset = { };
@@ -2070,7 +1995,10 @@ static int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
2070 * group - group_rwsem prevents new threads from appearing, and if 1995 * group - group_rwsem prevents new threads from appearing, and if
2071 * threads exit, this will just be an over-estimate. 1996 * threads exit, this will just be an over-estimate.
2072 */ 1997 */
2073 group_size = get_nr_threads(leader); 1998 if (threadgroup)
1999 group_size = get_nr_threads(tsk);
2000 else
2001 group_size = 1;
2074 /* flex_array supports very large thread-groups better than kmalloc. */ 2002 /* flex_array supports very large thread-groups better than kmalloc. */
2075 group = flex_array_alloc(sizeof(*tc), group_size, GFP_KERNEL); 2003 group = flex_array_alloc(sizeof(*tc), group_size, GFP_KERNEL);
2076 if (!group) 2004 if (!group)
@@ -2080,7 +2008,6 @@ static int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
2080 if (retval) 2008 if (retval)
2081 goto out_free_group_list; 2009 goto out_free_group_list;
2082 2010
2083 tsk = leader;
2084 i = 0; 2011 i = 0;
2085 /* 2012 /*
2086 * Prevent freeing of tasks while we take a snapshot. Tasks that are 2013 * Prevent freeing of tasks while we take a snapshot. Tasks that are
@@ -2109,6 +2036,9 @@ static int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
2109 retval = flex_array_put(group, i, &ent, GFP_ATOMIC); 2036 retval = flex_array_put(group, i, &ent, GFP_ATOMIC);
2110 BUG_ON(retval != 0); 2037 BUG_ON(retval != 0);
2111 i++; 2038 i++;
2039
2040 if (!threadgroup)
2041 break;
2112 } while_each_thread(leader, tsk); 2042 } while_each_thread(leader, tsk);
2113 rcu_read_unlock(); 2043 rcu_read_unlock();
2114 /* remember the number of threads in the array for later. */ 2044 /* remember the number of threads in the array for later. */
@@ -2262,9 +2192,10 @@ retry_find_task:
2262 put_task_struct(tsk); 2192 put_task_struct(tsk);
2263 goto retry_find_task; 2193 goto retry_find_task;
2264 } 2194 }
2265 ret = cgroup_attach_proc(cgrp, tsk); 2195 }
2266 } else 2196
2267 ret = cgroup_attach_task(cgrp, tsk); 2197 ret = cgroup_attach_task(cgrp, tsk, threadgroup);
2198
2268 threadgroup_unlock(tsk); 2199 threadgroup_unlock(tsk);
2269 2200
2270 put_task_struct(tsk); 2201 put_task_struct(tsk);