aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-03-20 21:11:21 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-03-20 21:11:21 -0400
commit0d9cabdccedb79ee5f27b77ff51f29a9e7d23275 (patch)
tree8bfb64c3672d058eb90aec3c3a9c4f61cef9097c /kernel
parent701085b219016d38f105b031381b9cee6200253a (diff)
parent3ce3230a0cff484e5130153f244d4fb8a56b3a8b (diff)
Merge branch 'for-3.4' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/cgroup
Pull cgroup changes from Tejun Heo: "Out of the 8 commits, one fixes a long-standing locking issue around tasklist walking and others are cleanups." * 'for-3.4' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/cgroup: cgroup: Walk task list under tasklist_lock in cgroup_enable_task_cg_list cgroup: Remove wrong comment on cgroup_enable_task_cg_list() cgroup: remove cgroup_subsys argument from callbacks cgroup: remove extra calls to find_existing_css_set cgroup: replace tasklist_lock with rcu_read_lock cgroup: simplify double-check locking in cgroup_attach_proc cgroup: move struct cgroup_pidlist out from the header file cgroup: remove cgroup_attach_task_current_cg()
Diffstat (limited to 'kernel')
-rw-r--r--kernel/cgroup.c327
-rw-r--r--kernel/cgroup_freezer.c11
-rw-r--r--kernel/cpuset.c16
-rw-r--r--kernel/events/core.c13
-rw-r--r--kernel/sched/core.c20
5 files changed, 158 insertions, 229 deletions
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index a5d3b5325f77..c6877fe9a831 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -818,7 +818,7 @@ static int cgroup_call_pre_destroy(struct cgroup *cgrp)
818 818
819 for_each_subsys(cgrp->root, ss) 819 for_each_subsys(cgrp->root, ss)
820 if (ss->pre_destroy) { 820 if (ss->pre_destroy) {
821 ret = ss->pre_destroy(ss, cgrp); 821 ret = ss->pre_destroy(cgrp);
822 if (ret) 822 if (ret)
823 break; 823 break;
824 } 824 }
@@ -846,7 +846,7 @@ static void cgroup_diput(struct dentry *dentry, struct inode *inode)
846 * Release the subsystem state objects. 846 * Release the subsystem state objects.
847 */ 847 */
848 for_each_subsys(cgrp->root, ss) 848 for_each_subsys(cgrp->root, ss)
849 ss->destroy(ss, cgrp); 849 ss->destroy(cgrp);
850 850
851 cgrp->root->number_of_cgroups--; 851 cgrp->root->number_of_cgroups--;
852 mutex_unlock(&cgroup_mutex); 852 mutex_unlock(&cgroup_mutex);
@@ -1015,7 +1015,7 @@ static int rebind_subsystems(struct cgroupfs_root *root,
1015 list_move(&ss->sibling, &root->subsys_list); 1015 list_move(&ss->sibling, &root->subsys_list);
1016 ss->root = root; 1016 ss->root = root;
1017 if (ss->bind) 1017 if (ss->bind)
1018 ss->bind(ss, cgrp); 1018 ss->bind(cgrp);
1019 mutex_unlock(&ss->hierarchy_mutex); 1019 mutex_unlock(&ss->hierarchy_mutex);
1020 /* refcount was already taken, and we're keeping it */ 1020 /* refcount was already taken, and we're keeping it */
1021 } else if (bit & removed_bits) { 1021 } else if (bit & removed_bits) {
@@ -1025,7 +1025,7 @@ static int rebind_subsystems(struct cgroupfs_root *root,
1025 BUG_ON(cgrp->subsys[i]->cgroup != cgrp); 1025 BUG_ON(cgrp->subsys[i]->cgroup != cgrp);
1026 mutex_lock(&ss->hierarchy_mutex); 1026 mutex_lock(&ss->hierarchy_mutex);
1027 if (ss->bind) 1027 if (ss->bind)
1028 ss->bind(ss, dummytop); 1028 ss->bind(dummytop);
1029 dummytop->subsys[i]->cgroup = dummytop; 1029 dummytop->subsys[i]->cgroup = dummytop;
1030 cgrp->subsys[i] = NULL; 1030 cgrp->subsys[i] = NULL;
1031 subsys[i]->root = &rootnode; 1031 subsys[i]->root = &rootnode;
@@ -1763,6 +1763,7 @@ EXPORT_SYMBOL_GPL(cgroup_path);
1763struct task_and_cgroup { 1763struct task_and_cgroup {
1764 struct task_struct *task; 1764 struct task_struct *task;
1765 struct cgroup *cgrp; 1765 struct cgroup *cgrp;
1766 struct css_set *cg;
1766}; 1767};
1767 1768
1768struct cgroup_taskset { 1769struct cgroup_taskset {
@@ -1843,11 +1844,10 @@ EXPORT_SYMBOL_GPL(cgroup_taskset_size);
1843 * will already exist. If not set, this function might sleep, and can fail with 1844 * will already exist. If not set, this function might sleep, and can fail with
1844 * -ENOMEM. Must be called with cgroup_mutex and threadgroup locked. 1845 * -ENOMEM. Must be called with cgroup_mutex and threadgroup locked.
1845 */ 1846 */
1846static int cgroup_task_migrate(struct cgroup *cgrp, struct cgroup *oldcgrp, 1847static void cgroup_task_migrate(struct cgroup *cgrp, struct cgroup *oldcgrp,
1847 struct task_struct *tsk, bool guarantee) 1848 struct task_struct *tsk, struct css_set *newcg)
1848{ 1849{
1849 struct css_set *oldcg; 1850 struct css_set *oldcg;
1850 struct css_set *newcg;
1851 1851
1852 /* 1852 /*
1853 * We are synchronized through threadgroup_lock() against PF_EXITING 1853 * We are synchronized through threadgroup_lock() against PF_EXITING
@@ -1857,23 +1857,6 @@ static int cgroup_task_migrate(struct cgroup *cgrp, struct cgroup *oldcgrp,
1857 WARN_ON_ONCE(tsk->flags & PF_EXITING); 1857 WARN_ON_ONCE(tsk->flags & PF_EXITING);
1858 oldcg = tsk->cgroups; 1858 oldcg = tsk->cgroups;
1859 1859
1860 /* locate or allocate a new css_set for this task. */
1861 if (guarantee) {
1862 /* we know the css_set we want already exists. */
1863 struct cgroup_subsys_state *template[CGROUP_SUBSYS_COUNT];
1864 read_lock(&css_set_lock);
1865 newcg = find_existing_css_set(oldcg, cgrp, template);
1866 BUG_ON(!newcg);
1867 get_css_set(newcg);
1868 read_unlock(&css_set_lock);
1869 } else {
1870 might_sleep();
1871 /* find_css_set will give us newcg already referenced. */
1872 newcg = find_css_set(oldcg, cgrp);
1873 if (!newcg)
1874 return -ENOMEM;
1875 }
1876
1877 task_lock(tsk); 1860 task_lock(tsk);
1878 rcu_assign_pointer(tsk->cgroups, newcg); 1861 rcu_assign_pointer(tsk->cgroups, newcg);
1879 task_unlock(tsk); 1862 task_unlock(tsk);
@@ -1892,7 +1875,6 @@ static int cgroup_task_migrate(struct cgroup *cgrp, struct cgroup *oldcgrp,
1892 put_css_set(oldcg); 1875 put_css_set(oldcg);
1893 1876
1894 set_bit(CGRP_RELEASABLE, &oldcgrp->flags); 1877 set_bit(CGRP_RELEASABLE, &oldcgrp->flags);
1895 return 0;
1896} 1878}
1897 1879
1898/** 1880/**
@@ -1910,6 +1892,7 @@ int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
1910 struct cgroup *oldcgrp; 1892 struct cgroup *oldcgrp;
1911 struct cgroupfs_root *root = cgrp->root; 1893 struct cgroupfs_root *root = cgrp->root;
1912 struct cgroup_taskset tset = { }; 1894 struct cgroup_taskset tset = { };
1895 struct css_set *newcg;
1913 1896
1914 /* @tsk either already exited or can't exit until the end */ 1897 /* @tsk either already exited or can't exit until the end */
1915 if (tsk->flags & PF_EXITING) 1898 if (tsk->flags & PF_EXITING)
@@ -1925,7 +1908,7 @@ int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
1925 1908
1926 for_each_subsys(root, ss) { 1909 for_each_subsys(root, ss) {
1927 if (ss->can_attach) { 1910 if (ss->can_attach) {
1928 retval = ss->can_attach(ss, cgrp, &tset); 1911 retval = ss->can_attach(cgrp, &tset);
1929 if (retval) { 1912 if (retval) {
1930 /* 1913 /*
1931 * Remember on which subsystem the can_attach() 1914 * Remember on which subsystem the can_attach()
@@ -1939,13 +1922,17 @@ int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
1939 } 1922 }
1940 } 1923 }
1941 1924
1942 retval = cgroup_task_migrate(cgrp, oldcgrp, tsk, false); 1925 newcg = find_css_set(tsk->cgroups, cgrp);
1943 if (retval) 1926 if (!newcg) {
1927 retval = -ENOMEM;
1944 goto out; 1928 goto out;
1929 }
1930
1931 cgroup_task_migrate(cgrp, oldcgrp, tsk, newcg);
1945 1932
1946 for_each_subsys(root, ss) { 1933 for_each_subsys(root, ss) {
1947 if (ss->attach) 1934 if (ss->attach)
1948 ss->attach(ss, cgrp, &tset); 1935 ss->attach(cgrp, &tset);
1949 } 1936 }
1950 1937
1951 synchronize_rcu(); 1938 synchronize_rcu();
@@ -1967,7 +1954,7 @@ out:
1967 */ 1954 */
1968 break; 1955 break;
1969 if (ss->cancel_attach) 1956 if (ss->cancel_attach)
1970 ss->cancel_attach(ss, cgrp, &tset); 1957 ss->cancel_attach(cgrp, &tset);
1971 } 1958 }
1972 } 1959 }
1973 return retval; 1960 return retval;
@@ -1997,66 +1984,6 @@ int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk)
1997} 1984}
1998EXPORT_SYMBOL_GPL(cgroup_attach_task_all); 1985EXPORT_SYMBOL_GPL(cgroup_attach_task_all);
1999 1986
2000/*
2001 * cgroup_attach_proc works in two stages, the first of which prefetches all
2002 * new css_sets needed (to make sure we have enough memory before committing
2003 * to the move) and stores them in a list of entries of the following type.
2004 * TODO: possible optimization: use css_set->rcu_head for chaining instead
2005 */
2006struct cg_list_entry {
2007 struct css_set *cg;
2008 struct list_head links;
2009};
2010
2011static bool css_set_check_fetched(struct cgroup *cgrp,
2012 struct task_struct *tsk, struct css_set *cg,
2013 struct list_head *newcg_list)
2014{
2015 struct css_set *newcg;
2016 struct cg_list_entry *cg_entry;
2017 struct cgroup_subsys_state *template[CGROUP_SUBSYS_COUNT];
2018
2019 read_lock(&css_set_lock);
2020 newcg = find_existing_css_set(cg, cgrp, template);
2021 read_unlock(&css_set_lock);
2022
2023 /* doesn't exist at all? */
2024 if (!newcg)
2025 return false;
2026 /* see if it's already in the list */
2027 list_for_each_entry(cg_entry, newcg_list, links)
2028 if (cg_entry->cg == newcg)
2029 return true;
2030
2031 /* not found */
2032 return false;
2033}
2034
2035/*
2036 * Find the new css_set and store it in the list in preparation for moving the
2037 * given task to the given cgroup. Returns 0 or -ENOMEM.
2038 */
2039static int css_set_prefetch(struct cgroup *cgrp, struct css_set *cg,
2040 struct list_head *newcg_list)
2041{
2042 struct css_set *newcg;
2043 struct cg_list_entry *cg_entry;
2044
2045 /* ensure a new css_set will exist for this thread */
2046 newcg = find_css_set(cg, cgrp);
2047 if (!newcg)
2048 return -ENOMEM;
2049 /* add it to the list */
2050 cg_entry = kmalloc(sizeof(struct cg_list_entry), GFP_KERNEL);
2051 if (!cg_entry) {
2052 put_css_set(newcg);
2053 return -ENOMEM;
2054 }
2055 cg_entry->cg = newcg;
2056 list_add(&cg_entry->links, newcg_list);
2057 return 0;
2058}
2059
2060/** 1987/**
2061 * cgroup_attach_proc - attach all threads in a threadgroup to a cgroup 1988 * cgroup_attach_proc - attach all threads in a threadgroup to a cgroup
2062 * @cgrp: the cgroup to attach to 1989 * @cgrp: the cgroup to attach to
@@ -2070,20 +1997,12 @@ static int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
2070 int retval, i, group_size; 1997 int retval, i, group_size;
2071 struct cgroup_subsys *ss, *failed_ss = NULL; 1998 struct cgroup_subsys *ss, *failed_ss = NULL;
2072 /* guaranteed to be initialized later, but the compiler needs this */ 1999 /* guaranteed to be initialized later, but the compiler needs this */
2073 struct css_set *oldcg;
2074 struct cgroupfs_root *root = cgrp->root; 2000 struct cgroupfs_root *root = cgrp->root;
2075 /* threadgroup list cursor and array */ 2001 /* threadgroup list cursor and array */
2076 struct task_struct *tsk; 2002 struct task_struct *tsk;
2077 struct task_and_cgroup *tc; 2003 struct task_and_cgroup *tc;
2078 struct flex_array *group; 2004 struct flex_array *group;
2079 struct cgroup_taskset tset = { }; 2005 struct cgroup_taskset tset = { };
2080 /*
2081 * we need to make sure we have css_sets for all the tasks we're
2082 * going to move -before- we actually start moving them, so that in
2083 * case we get an ENOMEM we can bail out before making any changes.
2084 */
2085 struct list_head newcg_list;
2086 struct cg_list_entry *cg_entry, *temp_nobe;
2087 2006
2088 /* 2007 /*
2089 * step 0: in order to do expensive, possibly blocking operations for 2008 * step 0: in order to do expensive, possibly blocking operations for
@@ -2102,23 +2021,14 @@ static int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
2102 if (retval) 2021 if (retval)
2103 goto out_free_group_list; 2022 goto out_free_group_list;
2104 2023
2105 /* prevent changes to the threadgroup list while we take a snapshot. */
2106 read_lock(&tasklist_lock);
2107 if (!thread_group_leader(leader)) {
2108 /*
2109 * a race with de_thread from another thread's exec() may strip
2110 * us of our leadership, making while_each_thread unsafe to use
2111 * on this task. if this happens, there is no choice but to
2112 * throw this task away and try again (from cgroup_procs_write);
2113 * this is "double-double-toil-and-trouble-check locking".
2114 */
2115 read_unlock(&tasklist_lock);
2116 retval = -EAGAIN;
2117 goto out_free_group_list;
2118 }
2119
2120 tsk = leader; 2024 tsk = leader;
2121 i = 0; 2025 i = 0;
2026 /*
2027 * Prevent freeing of tasks while we take a snapshot. Tasks that are
2028 * already PF_EXITING could be freed from underneath us unless we
2029 * take an rcu_read_lock.
2030 */
2031 rcu_read_lock();
2122 do { 2032 do {
2123 struct task_and_cgroup ent; 2033 struct task_and_cgroup ent;
2124 2034
@@ -2128,24 +2038,24 @@ static int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
2128 2038
2129 /* as per above, nr_threads may decrease, but not increase. */ 2039 /* as per above, nr_threads may decrease, but not increase. */
2130 BUG_ON(i >= group_size); 2040 BUG_ON(i >= group_size);
2131 /*
2132 * saying GFP_ATOMIC has no effect here because we did prealloc
2133 * earlier, but it's good form to communicate our expectations.
2134 */
2135 ent.task = tsk; 2041 ent.task = tsk;
2136 ent.cgrp = task_cgroup_from_root(tsk, root); 2042 ent.cgrp = task_cgroup_from_root(tsk, root);
2137 /* nothing to do if this task is already in the cgroup */ 2043 /* nothing to do if this task is already in the cgroup */
2138 if (ent.cgrp == cgrp) 2044 if (ent.cgrp == cgrp)
2139 continue; 2045 continue;
2046 /*
2047 * saying GFP_ATOMIC has no effect here because we did prealloc
2048 * earlier, but it's good form to communicate our expectations.
2049 */
2140 retval = flex_array_put(group, i, &ent, GFP_ATOMIC); 2050 retval = flex_array_put(group, i, &ent, GFP_ATOMIC);
2141 BUG_ON(retval != 0); 2051 BUG_ON(retval != 0);
2142 i++; 2052 i++;
2143 } while_each_thread(leader, tsk); 2053 } while_each_thread(leader, tsk);
2054 rcu_read_unlock();
2144 /* remember the number of threads in the array for later. */ 2055 /* remember the number of threads in the array for later. */
2145 group_size = i; 2056 group_size = i;
2146 tset.tc_array = group; 2057 tset.tc_array = group;
2147 tset.tc_array_len = group_size; 2058 tset.tc_array_len = group_size;
2148 read_unlock(&tasklist_lock);
2149 2059
2150 /* methods shouldn't be called if no task is actually migrating */ 2060 /* methods shouldn't be called if no task is actually migrating */
2151 retval = 0; 2061 retval = 0;
@@ -2157,7 +2067,7 @@ static int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
2157 */ 2067 */
2158 for_each_subsys(root, ss) { 2068 for_each_subsys(root, ss) {
2159 if (ss->can_attach) { 2069 if (ss->can_attach) {
2160 retval = ss->can_attach(ss, cgrp, &tset); 2070 retval = ss->can_attach(cgrp, &tset);
2161 if (retval) { 2071 if (retval) {
2162 failed_ss = ss; 2072 failed_ss = ss;
2163 goto out_cancel_attach; 2073 goto out_cancel_attach;
@@ -2169,17 +2079,12 @@ static int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
2169 * step 2: make sure css_sets exist for all threads to be migrated. 2079 * step 2: make sure css_sets exist for all threads to be migrated.
2170 * we use find_css_set, which allocates a new one if necessary. 2080 * we use find_css_set, which allocates a new one if necessary.
2171 */ 2081 */
2172 INIT_LIST_HEAD(&newcg_list);
2173 for (i = 0; i < group_size; i++) { 2082 for (i = 0; i < group_size; i++) {
2174 tc = flex_array_get(group, i); 2083 tc = flex_array_get(group, i);
2175 oldcg = tc->task->cgroups; 2084 tc->cg = find_css_set(tc->task->cgroups, cgrp);
2176 2085 if (!tc->cg) {
2177 /* if we don't already have it in the list get a new one */ 2086 retval = -ENOMEM;
2178 if (!css_set_check_fetched(cgrp, tc->task, oldcg, 2087 goto out_put_css_set_refs;
2179 &newcg_list)) {
2180 retval = css_set_prefetch(cgrp, oldcg, &newcg_list);
2181 if (retval)
2182 goto out_list_teardown;
2183 } 2088 }
2184 } 2089 }
2185 2090
@@ -2190,8 +2095,7 @@ static int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
2190 */ 2095 */
2191 for (i = 0; i < group_size; i++) { 2096 for (i = 0; i < group_size; i++) {
2192 tc = flex_array_get(group, i); 2097 tc = flex_array_get(group, i);
2193 retval = cgroup_task_migrate(cgrp, tc->cgrp, tc->task, true); 2098 cgroup_task_migrate(cgrp, tc->cgrp, tc->task, tc->cg);
2194 BUG_ON(retval);
2195 } 2099 }
2196 /* nothing is sensitive to fork() after this point. */ 2100 /* nothing is sensitive to fork() after this point. */
2197 2101
@@ -2200,7 +2104,7 @@ static int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
2200 */ 2104 */
2201 for_each_subsys(root, ss) { 2105 for_each_subsys(root, ss) {
2202 if (ss->attach) 2106 if (ss->attach)
2203 ss->attach(ss, cgrp, &tset); 2107 ss->attach(cgrp, &tset);
2204 } 2108 }
2205 2109
2206 /* 2110 /*
@@ -2209,21 +2113,22 @@ static int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
2209 synchronize_rcu(); 2113 synchronize_rcu();
2210 cgroup_wakeup_rmdir_waiter(cgrp); 2114 cgroup_wakeup_rmdir_waiter(cgrp);
2211 retval = 0; 2115 retval = 0;
2212out_list_teardown: 2116out_put_css_set_refs:
2213 /* clean up the list of prefetched css_sets. */ 2117 if (retval) {
2214 list_for_each_entry_safe(cg_entry, temp_nobe, &newcg_list, links) { 2118 for (i = 0; i < group_size; i++) {
2215 list_del(&cg_entry->links); 2119 tc = flex_array_get(group, i);
2216 put_css_set(cg_entry->cg); 2120 if (!tc->cg)
2217 kfree(cg_entry); 2121 break;
2122 put_css_set(tc->cg);
2123 }
2218 } 2124 }
2219out_cancel_attach: 2125out_cancel_attach:
2220 /* same deal as in cgroup_attach_task */
2221 if (retval) { 2126 if (retval) {
2222 for_each_subsys(root, ss) { 2127 for_each_subsys(root, ss) {
2223 if (ss == failed_ss) 2128 if (ss == failed_ss)
2224 break; 2129 break;
2225 if (ss->cancel_attach) 2130 if (ss->cancel_attach)
2226 ss->cancel_attach(ss, cgrp, &tset); 2131 ss->cancel_attach(cgrp, &tset);
2227 } 2132 }
2228 } 2133 }
2229out_free_group_list: 2134out_free_group_list:
@@ -2245,22 +2150,14 @@ static int attach_task_by_pid(struct cgroup *cgrp, u64 pid, bool threadgroup)
2245 if (!cgroup_lock_live_group(cgrp)) 2150 if (!cgroup_lock_live_group(cgrp))
2246 return -ENODEV; 2151 return -ENODEV;
2247 2152
2153retry_find_task:
2154 rcu_read_lock();
2248 if (pid) { 2155 if (pid) {
2249 rcu_read_lock();
2250 tsk = find_task_by_vpid(pid); 2156 tsk = find_task_by_vpid(pid);
2251 if (!tsk) { 2157 if (!tsk) {
2252 rcu_read_unlock(); 2158 rcu_read_unlock();
2253 cgroup_unlock(); 2159 ret= -ESRCH;
2254 return -ESRCH; 2160 goto out_unlock_cgroup;
2255 }
2256 if (threadgroup) {
2257 /*
2258 * RCU protects this access, since tsk was found in the
2259 * tid map. a race with de_thread may cause group_leader
2260 * to stop being the leader, but cgroup_attach_proc will
2261 * detect it later.
2262 */
2263 tsk = tsk->group_leader;
2264 } 2161 }
2265 /* 2162 /*
2266 * even if we're attaching all tasks in the thread group, we 2163 * even if we're attaching all tasks in the thread group, we
@@ -2271,29 +2168,38 @@ static int attach_task_by_pid(struct cgroup *cgrp, u64 pid, bool threadgroup)
2271 cred->euid != tcred->uid && 2168 cred->euid != tcred->uid &&
2272 cred->euid != tcred->suid) { 2169 cred->euid != tcred->suid) {
2273 rcu_read_unlock(); 2170 rcu_read_unlock();
2274 cgroup_unlock(); 2171 ret = -EACCES;
2275 return -EACCES; 2172 goto out_unlock_cgroup;
2276 } 2173 }
2277 get_task_struct(tsk); 2174 } else
2278 rcu_read_unlock(); 2175 tsk = current;
2279 } else {
2280 if (threadgroup)
2281 tsk = current->group_leader;
2282 else
2283 tsk = current;
2284 get_task_struct(tsk);
2285 }
2286
2287 threadgroup_lock(tsk);
2288 2176
2289 if (threadgroup) 2177 if (threadgroup)
2178 tsk = tsk->group_leader;
2179 get_task_struct(tsk);
2180 rcu_read_unlock();
2181
2182 threadgroup_lock(tsk);
2183 if (threadgroup) {
2184 if (!thread_group_leader(tsk)) {
2185 /*
2186 * a race with de_thread from another thread's exec()
2187 * may strip us of our leadership, if this happens,
2188 * there is no choice but to throw this task away and
2189 * try again; this is
2190 * "double-double-toil-and-trouble-check locking".
2191 */
2192 threadgroup_unlock(tsk);
2193 put_task_struct(tsk);
2194 goto retry_find_task;
2195 }
2290 ret = cgroup_attach_proc(cgrp, tsk); 2196 ret = cgroup_attach_proc(cgrp, tsk);
2291 else 2197 } else
2292 ret = cgroup_attach_task(cgrp, tsk); 2198 ret = cgroup_attach_task(cgrp, tsk);
2293
2294 threadgroup_unlock(tsk); 2199 threadgroup_unlock(tsk);
2295 2200
2296 put_task_struct(tsk); 2201 put_task_struct(tsk);
2202out_unlock_cgroup:
2297 cgroup_unlock(); 2203 cgroup_unlock();
2298 return ret; 2204 return ret;
2299} 2205}
@@ -2305,16 +2211,7 @@ static int cgroup_tasks_write(struct cgroup *cgrp, struct cftype *cft, u64 pid)
2305 2211
2306static int cgroup_procs_write(struct cgroup *cgrp, struct cftype *cft, u64 tgid) 2212static int cgroup_procs_write(struct cgroup *cgrp, struct cftype *cft, u64 tgid)
2307{ 2213{
2308 int ret; 2214 return attach_task_by_pid(cgrp, tgid, true);
2309 do {
2310 /*
2311 * attach_proc fails with -EAGAIN if threadgroup leadership
2312 * changes in the middle of the operation, in which case we need
2313 * to find the task_struct for the new leader and start over.
2314 */
2315 ret = attach_task_by_pid(cgrp, tgid, true);
2316 } while (ret == -EAGAIN);
2317 return ret;
2318} 2215}
2319 2216
2320/** 2217/**
@@ -2804,15 +2701,20 @@ static void cgroup_advance_iter(struct cgroup *cgrp,
2804 * using their cgroups capability, we don't maintain the lists running 2701 * using their cgroups capability, we don't maintain the lists running
2805 * through each css_set to its tasks until we see the list actually 2702 * through each css_set to its tasks until we see the list actually
2806 * used - in other words after the first call to cgroup_iter_start(). 2703 * used - in other words after the first call to cgroup_iter_start().
2807 *
2808 * The tasklist_lock is not held here, as do_each_thread() and
2809 * while_each_thread() are protected by RCU.
2810 */ 2704 */
2811static void cgroup_enable_task_cg_lists(void) 2705static void cgroup_enable_task_cg_lists(void)
2812{ 2706{
2813 struct task_struct *p, *g; 2707 struct task_struct *p, *g;
2814 write_lock(&css_set_lock); 2708 write_lock(&css_set_lock);
2815 use_task_css_set_links = 1; 2709 use_task_css_set_links = 1;
2710 /*
2711 * We need tasklist_lock because RCU is not safe against
2712 * while_each_thread(). Besides, a forking task that has passed
2713 * cgroup_post_fork() without seeing use_task_css_set_links = 1
2714 * is not guaranteed to have its child immediately visible in the
2715 * tasklist if we walk through it with RCU.
2716 */
2717 read_lock(&tasklist_lock);
2816 do_each_thread(g, p) { 2718 do_each_thread(g, p) {
2817 task_lock(p); 2719 task_lock(p);
2818 /* 2720 /*
@@ -2824,6 +2726,7 @@ static void cgroup_enable_task_cg_lists(void)
2824 list_add(&p->cg_list, &p->cgroups->tasks); 2726 list_add(&p->cg_list, &p->cgroups->tasks);
2825 task_unlock(p); 2727 task_unlock(p);
2826 } while_each_thread(g, p); 2728 } while_each_thread(g, p);
2729 read_unlock(&tasklist_lock);
2827 write_unlock(&css_set_lock); 2730 write_unlock(&css_set_lock);
2828} 2731}
2829 2732
@@ -3043,6 +2946,38 @@ int cgroup_scan_tasks(struct cgroup_scanner *scan)
3043 * 2946 *
3044 */ 2947 */
3045 2948
2949/* which pidlist file are we talking about? */
2950enum cgroup_filetype {
2951 CGROUP_FILE_PROCS,
2952 CGROUP_FILE_TASKS,
2953};
2954
2955/*
2956 * A pidlist is a list of pids that virtually represents the contents of one
2957 * of the cgroup files ("procs" or "tasks"). We keep a list of such pidlists,
2958 * a pair (one each for procs, tasks) for each pid namespace that's relevant
2959 * to the cgroup.
2960 */
2961struct cgroup_pidlist {
2962 /*
2963 * used to find which pidlist is wanted. doesn't change as long as
2964 * this particular list stays in the list.
2965 */
2966 struct { enum cgroup_filetype type; struct pid_namespace *ns; } key;
2967 /* array of xids */
2968 pid_t *list;
2969 /* how many elements the above list has */
2970 int length;
2971 /* how many files are using the current array */
2972 int use_count;
2973 /* each of these stored in a list by its cgroup */
2974 struct list_head links;
2975 /* pointer to the cgroup we belong to, for list removal purposes */
2976 struct cgroup *owner;
2977 /* protects the other fields */
2978 struct rw_semaphore mutex;
2979};
2980
3046/* 2981/*
3047 * The following two functions "fix" the issue where there are more pids 2982 * The following two functions "fix" the issue where there are more pids
3048 * than kmalloc will give memory for; in such cases, we use vmalloc/vfree. 2983 * than kmalloc will give memory for; in such cases, we use vmalloc/vfree.
@@ -3827,7 +3762,7 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
3827 set_bit(CGRP_CLONE_CHILDREN, &cgrp->flags); 3762 set_bit(CGRP_CLONE_CHILDREN, &cgrp->flags);
3828 3763
3829 for_each_subsys(root, ss) { 3764 for_each_subsys(root, ss) {
3830 struct cgroup_subsys_state *css = ss->create(ss, cgrp); 3765 struct cgroup_subsys_state *css = ss->create(cgrp);
3831 3766
3832 if (IS_ERR(css)) { 3767 if (IS_ERR(css)) {
3833 err = PTR_ERR(css); 3768 err = PTR_ERR(css);
@@ -3841,7 +3776,7 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
3841 } 3776 }
3842 /* At error, ->destroy() callback has to free assigned ID. */ 3777 /* At error, ->destroy() callback has to free assigned ID. */
3843 if (clone_children(parent) && ss->post_clone) 3778 if (clone_children(parent) && ss->post_clone)
3844 ss->post_clone(ss, cgrp); 3779 ss->post_clone(cgrp);
3845 } 3780 }
3846 3781
3847 cgroup_lock_hierarchy(root); 3782 cgroup_lock_hierarchy(root);
@@ -3875,7 +3810,7 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
3875 3810
3876 for_each_subsys(root, ss) { 3811 for_each_subsys(root, ss) {
3877 if (cgrp->subsys[ss->subsys_id]) 3812 if (cgrp->subsys[ss->subsys_id])
3878 ss->destroy(ss, cgrp); 3813 ss->destroy(cgrp);
3879 } 3814 }
3880 3815
3881 mutex_unlock(&cgroup_mutex); 3816 mutex_unlock(&cgroup_mutex);
@@ -4099,7 +4034,7 @@ static void __init cgroup_init_subsys(struct cgroup_subsys *ss)
4099 /* Create the top cgroup state for this subsystem */ 4034 /* Create the top cgroup state for this subsystem */
4100 list_add(&ss->sibling, &rootnode.subsys_list); 4035 list_add(&ss->sibling, &rootnode.subsys_list);
4101 ss->root = &rootnode; 4036 ss->root = &rootnode;
4102 css = ss->create(ss, dummytop); 4037 css = ss->create(dummytop);
4103 /* We don't handle early failures gracefully */ 4038 /* We don't handle early failures gracefully */
4104 BUG_ON(IS_ERR(css)); 4039 BUG_ON(IS_ERR(css));
4105 init_cgroup_css(css, ss, dummytop); 4040 init_cgroup_css(css, ss, dummytop);
@@ -4188,7 +4123,7 @@ int __init_or_module cgroup_load_subsys(struct cgroup_subsys *ss)
4188 * no ss->create seems to need anything important in the ss struct, so 4123 * no ss->create seems to need anything important in the ss struct, so
4189 * this can happen first (i.e. before the rootnode attachment). 4124 * this can happen first (i.e. before the rootnode attachment).
4190 */ 4125 */
4191 css = ss->create(ss, dummytop); 4126 css = ss->create(dummytop);
4192 if (IS_ERR(css)) { 4127 if (IS_ERR(css)) {
4193 /* failure case - need to deassign the subsys[] slot. */ 4128 /* failure case - need to deassign the subsys[] slot. */
4194 subsys[i] = NULL; 4129 subsys[i] = NULL;
@@ -4206,7 +4141,7 @@ int __init_or_module cgroup_load_subsys(struct cgroup_subsys *ss)
4206 int ret = cgroup_init_idr(ss, css); 4141 int ret = cgroup_init_idr(ss, css);
4207 if (ret) { 4142 if (ret) {
4208 dummytop->subsys[ss->subsys_id] = NULL; 4143 dummytop->subsys[ss->subsys_id] = NULL;
4209 ss->destroy(ss, dummytop); 4144 ss->destroy(dummytop);
4210 subsys[i] = NULL; 4145 subsys[i] = NULL;
4211 mutex_unlock(&cgroup_mutex); 4146 mutex_unlock(&cgroup_mutex);
4212 return ret; 4147 return ret;
@@ -4304,7 +4239,7 @@ void cgroup_unload_subsys(struct cgroup_subsys *ss)
4304 * pointer to find their state. note that this also takes care of 4239 * pointer to find their state. note that this also takes care of
4305 * freeing the css_id. 4240 * freeing the css_id.
4306 */ 4241 */
4307 ss->destroy(ss, dummytop); 4242 ss->destroy(dummytop);
4308 dummytop->subsys[ss->subsys_id] = NULL; 4243 dummytop->subsys[ss->subsys_id] = NULL;
4309 4244
4310 mutex_unlock(&cgroup_mutex); 4245 mutex_unlock(&cgroup_mutex);
@@ -4580,7 +4515,7 @@ void cgroup_fork_callbacks(struct task_struct *child)
4580 for (i = 0; i < CGROUP_BUILTIN_SUBSYS_COUNT; i++) { 4515 for (i = 0; i < CGROUP_BUILTIN_SUBSYS_COUNT; i++) {
4581 struct cgroup_subsys *ss = subsys[i]; 4516 struct cgroup_subsys *ss = subsys[i];
4582 if (ss->fork) 4517 if (ss->fork)
4583 ss->fork(ss, child); 4518 ss->fork(child);
4584 } 4519 }
4585 } 4520 }
4586} 4521}
@@ -4596,6 +4531,17 @@ void cgroup_fork_callbacks(struct task_struct *child)
4596 */ 4531 */
4597void cgroup_post_fork(struct task_struct *child) 4532void cgroup_post_fork(struct task_struct *child)
4598{ 4533{
4534 /*
4535 * use_task_css_set_links is set to 1 before we walk the tasklist
4536 * under the tasklist_lock and we read it here after we added the child
4537 * to the tasklist under the tasklist_lock as well. If the child wasn't
4538 * yet in the tasklist when we walked through it from
4539 * cgroup_enable_task_cg_lists(), then use_task_css_set_links value
4540 * should be visible now due to the paired locking and barriers implied
4541 * by LOCK/UNLOCK: it is written before the tasklist_lock unlock
4542 * in cgroup_enable_task_cg_lists() and read here after the tasklist_lock
4543 * lock on fork.
4544 */
4599 if (use_task_css_set_links) { 4545 if (use_task_css_set_links) {
4600 write_lock(&css_set_lock); 4546 write_lock(&css_set_lock);
4601 if (list_empty(&child->cg_list)) { 4547 if (list_empty(&child->cg_list)) {
@@ -4682,7 +4628,7 @@ void cgroup_exit(struct task_struct *tsk, int run_callbacks)
4682 struct cgroup *old_cgrp = 4628 struct cgroup *old_cgrp =
4683 rcu_dereference_raw(cg->subsys[i])->cgroup; 4629 rcu_dereference_raw(cg->subsys[i])->cgroup;
4684 struct cgroup *cgrp = task_cgroup(tsk, i); 4630 struct cgroup *cgrp = task_cgroup(tsk, i);
4685 ss->exit(ss, cgrp, old_cgrp, tsk); 4631 ss->exit(cgrp, old_cgrp, tsk);
4686 } 4632 }
4687 } 4633 }
4688 } 4634 }
@@ -5137,8 +5083,7 @@ struct cgroup_subsys_state *cgroup_css_from_dir(struct file *f, int id)
5137} 5083}
5138 5084
5139#ifdef CONFIG_CGROUP_DEBUG 5085#ifdef CONFIG_CGROUP_DEBUG
5140static struct cgroup_subsys_state *debug_create(struct cgroup_subsys *ss, 5086static struct cgroup_subsys_state *debug_create(struct cgroup *cont)
5141 struct cgroup *cont)
5142{ 5087{
5143 struct cgroup_subsys_state *css = kzalloc(sizeof(*css), GFP_KERNEL); 5088 struct cgroup_subsys_state *css = kzalloc(sizeof(*css), GFP_KERNEL);
5144 5089
@@ -5148,7 +5093,7 @@ static struct cgroup_subsys_state *debug_create(struct cgroup_subsys *ss,
5148 return css; 5093 return css;
5149} 5094}
5150 5095
5151static void debug_destroy(struct cgroup_subsys *ss, struct cgroup *cont) 5096static void debug_destroy(struct cgroup *cont)
5152{ 5097{
5153 kfree(cont->subsys[debug_subsys_id]); 5098 kfree(cont->subsys[debug_subsys_id]);
5154} 5099}
diff --git a/kernel/cgroup_freezer.c b/kernel/cgroup_freezer.c
index fc0646b78a64..f86e93920b62 100644
--- a/kernel/cgroup_freezer.c
+++ b/kernel/cgroup_freezer.c
@@ -128,8 +128,7 @@ struct cgroup_subsys freezer_subsys;
128 * task->alloc_lock (inside __thaw_task(), prevents race with refrigerator()) 128 * task->alloc_lock (inside __thaw_task(), prevents race with refrigerator())
129 * sighand->siglock 129 * sighand->siglock
130 */ 130 */
131static struct cgroup_subsys_state *freezer_create(struct cgroup_subsys *ss, 131static struct cgroup_subsys_state *freezer_create(struct cgroup *cgroup)
132 struct cgroup *cgroup)
133{ 132{
134 struct freezer *freezer; 133 struct freezer *freezer;
135 134
@@ -142,8 +141,7 @@ static struct cgroup_subsys_state *freezer_create(struct cgroup_subsys *ss,
142 return &freezer->css; 141 return &freezer->css;
143} 142}
144 143
145static void freezer_destroy(struct cgroup_subsys *ss, 144static void freezer_destroy(struct cgroup *cgroup)
146 struct cgroup *cgroup)
147{ 145{
148 struct freezer *freezer = cgroup_freezer(cgroup); 146 struct freezer *freezer = cgroup_freezer(cgroup);
149 147
@@ -164,8 +162,7 @@ static bool is_task_frozen_enough(struct task_struct *task)
164 * a write to that file racing against an attach, and hence the 162 * a write to that file racing against an attach, and hence the
165 * can_attach() result will remain valid until the attach completes. 163 * can_attach() result will remain valid until the attach completes.
166 */ 164 */
167static int freezer_can_attach(struct cgroup_subsys *ss, 165static int freezer_can_attach(struct cgroup *new_cgroup,
168 struct cgroup *new_cgroup,
169 struct cgroup_taskset *tset) 166 struct cgroup_taskset *tset)
170{ 167{
171 struct freezer *freezer; 168 struct freezer *freezer;
@@ -185,7 +182,7 @@ static int freezer_can_attach(struct cgroup_subsys *ss,
185 return 0; 182 return 0;
186} 183}
187 184
188static void freezer_fork(struct cgroup_subsys *ss, struct task_struct *task) 185static void freezer_fork(struct task_struct *task)
189{ 186{
190 struct freezer *freezer; 187 struct freezer *freezer;
191 188
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index a09ac2b9a661..5d575836dba6 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -1399,8 +1399,7 @@ static nodemask_t cpuset_attach_nodemask_from;
1399static nodemask_t cpuset_attach_nodemask_to; 1399static nodemask_t cpuset_attach_nodemask_to;
1400 1400
1401/* Called by cgroups to determine if a cpuset is usable; cgroup_mutex held */ 1401/* Called by cgroups to determine if a cpuset is usable; cgroup_mutex held */
1402static int cpuset_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp, 1402static int cpuset_can_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
1403 struct cgroup_taskset *tset)
1404{ 1403{
1405 struct cpuset *cs = cgroup_cs(cgrp); 1404 struct cpuset *cs = cgroup_cs(cgrp);
1406 struct task_struct *task; 1405 struct task_struct *task;
@@ -1436,8 +1435,7 @@ static int cpuset_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
1436 return 0; 1435 return 0;
1437} 1436}
1438 1437
1439static void cpuset_attach(struct cgroup_subsys *ss, struct cgroup *cgrp, 1438static void cpuset_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
1440 struct cgroup_taskset *tset)
1441{ 1439{
1442 struct mm_struct *mm; 1440 struct mm_struct *mm;
1443 struct task_struct *task; 1441 struct task_struct *task;
@@ -1833,8 +1831,7 @@ static int cpuset_populate(struct cgroup_subsys *ss, struct cgroup *cont)
1833 * (and likewise for mems) to the new cgroup. Called with cgroup_mutex 1831 * (and likewise for mems) to the new cgroup. Called with cgroup_mutex
1834 * held. 1832 * held.
1835 */ 1833 */
1836static void cpuset_post_clone(struct cgroup_subsys *ss, 1834static void cpuset_post_clone(struct cgroup *cgroup)
1837 struct cgroup *cgroup)
1838{ 1835{
1839 struct cgroup *parent, *child; 1836 struct cgroup *parent, *child;
1840 struct cpuset *cs, *parent_cs; 1837 struct cpuset *cs, *parent_cs;
@@ -1857,13 +1854,10 @@ static void cpuset_post_clone(struct cgroup_subsys *ss,
1857 1854
1858/* 1855/*
1859 * cpuset_create - create a cpuset 1856 * cpuset_create - create a cpuset
1860 * ss: cpuset cgroup subsystem
1861 * cont: control group that the new cpuset will be part of 1857 * cont: control group that the new cpuset will be part of
1862 */ 1858 */
1863 1859
1864static struct cgroup_subsys_state *cpuset_create( 1860static struct cgroup_subsys_state *cpuset_create(struct cgroup *cont)
1865 struct cgroup_subsys *ss,
1866 struct cgroup *cont)
1867{ 1861{
1868 struct cpuset *cs; 1862 struct cpuset *cs;
1869 struct cpuset *parent; 1863 struct cpuset *parent;
@@ -1902,7 +1896,7 @@ static struct cgroup_subsys_state *cpuset_create(
1902 * will call async_rebuild_sched_domains(). 1896 * will call async_rebuild_sched_domains().
1903 */ 1897 */
1904 1898
1905static void cpuset_destroy(struct cgroup_subsys *ss, struct cgroup *cont) 1899static void cpuset_destroy(struct cgroup *cont)
1906{ 1900{
1907 struct cpuset *cs = cgroup_cs(cont); 1901 struct cpuset *cs = cgroup_cs(cont);
1908 1902
diff --git a/kernel/events/core.c b/kernel/events/core.c
index c61234b1a988..4b50357914fb 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -7147,8 +7147,7 @@ unlock:
7147device_initcall(perf_event_sysfs_init); 7147device_initcall(perf_event_sysfs_init);
7148 7148
7149#ifdef CONFIG_CGROUP_PERF 7149#ifdef CONFIG_CGROUP_PERF
7150static struct cgroup_subsys_state *perf_cgroup_create( 7150static struct cgroup_subsys_state *perf_cgroup_create(struct cgroup *cont)
7151 struct cgroup_subsys *ss, struct cgroup *cont)
7152{ 7151{
7153 struct perf_cgroup *jc; 7152 struct perf_cgroup *jc;
7154 7153
@@ -7165,8 +7164,7 @@ static struct cgroup_subsys_state *perf_cgroup_create(
7165 return &jc->css; 7164 return &jc->css;
7166} 7165}
7167 7166
7168static void perf_cgroup_destroy(struct cgroup_subsys *ss, 7167static void perf_cgroup_destroy(struct cgroup *cont)
7169 struct cgroup *cont)
7170{ 7168{
7171 struct perf_cgroup *jc; 7169 struct perf_cgroup *jc;
7172 jc = container_of(cgroup_subsys_state(cont, perf_subsys_id), 7170 jc = container_of(cgroup_subsys_state(cont, perf_subsys_id),
@@ -7182,8 +7180,7 @@ static int __perf_cgroup_move(void *info)
7182 return 0; 7180 return 0;
7183} 7181}
7184 7182
7185static void perf_cgroup_attach(struct cgroup_subsys *ss, struct cgroup *cgrp, 7183static void perf_cgroup_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
7186 struct cgroup_taskset *tset)
7187{ 7184{
7188 struct task_struct *task; 7185 struct task_struct *task;
7189 7186
@@ -7191,8 +7188,8 @@ static void perf_cgroup_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
7191 task_function_call(task, __perf_cgroup_move, task); 7188 task_function_call(task, __perf_cgroup_move, task);
7192} 7189}
7193 7190
7194static void perf_cgroup_exit(struct cgroup_subsys *ss, struct cgroup *cgrp, 7191static void perf_cgroup_exit(struct cgroup *cgrp, struct cgroup *old_cgrp,
7195 struct cgroup *old_cgrp, struct task_struct *task) 7192 struct task_struct *task)
7196{ 7193{
7197 /* 7194 /*
7198 * cgroup_exit() is called in the copy_process() failure path. 7195 * cgroup_exit() is called in the copy_process() failure path.
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index d2bd4647586c..a35cb8dbd8c4 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -7571,8 +7571,7 @@ static inline struct task_group *cgroup_tg(struct cgroup *cgrp)
7571 struct task_group, css); 7571 struct task_group, css);
7572} 7572}
7573 7573
7574static struct cgroup_subsys_state * 7574static struct cgroup_subsys_state *cpu_cgroup_create(struct cgroup *cgrp)
7575cpu_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cgrp)
7576{ 7575{
7577 struct task_group *tg, *parent; 7576 struct task_group *tg, *parent;
7578 7577
@@ -7589,15 +7588,14 @@ cpu_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cgrp)
7589 return &tg->css; 7588 return &tg->css;
7590} 7589}
7591 7590
7592static void 7591static void cpu_cgroup_destroy(struct cgroup *cgrp)
7593cpu_cgroup_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp)
7594{ 7592{
7595 struct task_group *tg = cgroup_tg(cgrp); 7593 struct task_group *tg = cgroup_tg(cgrp);
7596 7594
7597 sched_destroy_group(tg); 7595 sched_destroy_group(tg);
7598} 7596}
7599 7597
7600static int cpu_cgroup_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp, 7598static int cpu_cgroup_can_attach(struct cgroup *cgrp,
7601 struct cgroup_taskset *tset) 7599 struct cgroup_taskset *tset)
7602{ 7600{
7603 struct task_struct *task; 7601 struct task_struct *task;
@@ -7615,7 +7613,7 @@ static int cpu_cgroup_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
7615 return 0; 7613 return 0;
7616} 7614}
7617 7615
7618static void cpu_cgroup_attach(struct cgroup_subsys *ss, struct cgroup *cgrp, 7616static void cpu_cgroup_attach(struct cgroup *cgrp,
7619 struct cgroup_taskset *tset) 7617 struct cgroup_taskset *tset)
7620{ 7618{
7621 struct task_struct *task; 7619 struct task_struct *task;
@@ -7625,8 +7623,8 @@ static void cpu_cgroup_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
7625} 7623}
7626 7624
7627static void 7625static void
7628cpu_cgroup_exit(struct cgroup_subsys *ss, struct cgroup *cgrp, 7626cpu_cgroup_exit(struct cgroup *cgrp, struct cgroup *old_cgrp,
7629 struct cgroup *old_cgrp, struct task_struct *task) 7627 struct task_struct *task)
7630{ 7628{
7631 /* 7629 /*
7632 * cgroup_exit() is called in the copy_process() failure path. 7630 * cgroup_exit() is called in the copy_process() failure path.
@@ -7976,8 +7974,7 @@ struct cgroup_subsys cpu_cgroup_subsys = {
7976 */ 7974 */
7977 7975
7978/* create a new cpu accounting group */ 7976/* create a new cpu accounting group */
7979static struct cgroup_subsys_state *cpuacct_create( 7977static struct cgroup_subsys_state *cpuacct_create(struct cgroup *cgrp)
7980 struct cgroup_subsys *ss, struct cgroup *cgrp)
7981{ 7978{
7982 struct cpuacct *ca; 7979 struct cpuacct *ca;
7983 7980
@@ -8007,8 +8004,7 @@ out:
8007} 8004}
8008 8005
8009/* destroy an existing cpu accounting group */ 8006/* destroy an existing cpu accounting group */
8010static void 8007static void cpuacct_destroy(struct cgroup *cgrp)
8011cpuacct_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp)
8012{ 8008{
8013 struct cpuacct *ca = cgroup_ca(cgrp); 8009 struct cpuacct *ca = cgroup_ca(cgrp);
8014 8010