diff options
author | Li Zefan <lizf@cn.fujitsu.com> | 2012-01-31 00:47:36 -0500 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2012-02-02 12:20:22 -0500 |
commit | 761b3ef50e1c2649cffbfa67a4dcb2dcdb7982ed (patch) | |
tree | 67ab6a9a2520811c9c0b4d70d1c19b4bfca16237 /kernel | |
parent | 61d1d219c4c0761059236a46867bc49943c4d29d (diff) |
cgroup: remove cgroup_subsys argument from callbacks
The argument is not used at all, and it's not necessary, because
a specific callback handler of course knows which subsys it
belongs to.
Now only ->pupulate() takes this argument, because the handlers of
this callback always call cgroup_add_file()/cgroup_add_files().
So we reduce a few lines of code, though the shrinking of object size
is minimal.
16 files changed, 113 insertions(+), 162 deletions(-)
text data bss dec hex filename
5486240 656987 7039960 13183187 c928d3 vmlinux.o.orig
5486170 656987 7039960 13183117 c9288d vmlinux.o
Signed-off-by: Li Zefan <lizf@cn.fujitsu.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/cgroup.c | 43 | ||||
-rw-r--r-- | kernel/cgroup_freezer.c | 11 | ||||
-rw-r--r-- | kernel/cpuset.c | 16 | ||||
-rw-r--r-- | kernel/events/core.c | 13 | ||||
-rw-r--r-- | kernel/sched/core.c | 20 |
5 files changed, 43 insertions, 60 deletions
diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 43a224f167b5..865d89a580c7 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c | |||
@@ -818,7 +818,7 @@ static int cgroup_call_pre_destroy(struct cgroup *cgrp) | |||
818 | 818 | ||
819 | for_each_subsys(cgrp->root, ss) | 819 | for_each_subsys(cgrp->root, ss) |
820 | if (ss->pre_destroy) { | 820 | if (ss->pre_destroy) { |
821 | ret = ss->pre_destroy(ss, cgrp); | 821 | ret = ss->pre_destroy(cgrp); |
822 | if (ret) | 822 | if (ret) |
823 | break; | 823 | break; |
824 | } | 824 | } |
@@ -846,7 +846,7 @@ static void cgroup_diput(struct dentry *dentry, struct inode *inode) | |||
846 | * Release the subsystem state objects. | 846 | * Release the subsystem state objects. |
847 | */ | 847 | */ |
848 | for_each_subsys(cgrp->root, ss) | 848 | for_each_subsys(cgrp->root, ss) |
849 | ss->destroy(ss, cgrp); | 849 | ss->destroy(cgrp); |
850 | 850 | ||
851 | cgrp->root->number_of_cgroups--; | 851 | cgrp->root->number_of_cgroups--; |
852 | mutex_unlock(&cgroup_mutex); | 852 | mutex_unlock(&cgroup_mutex); |
@@ -1015,7 +1015,7 @@ static int rebind_subsystems(struct cgroupfs_root *root, | |||
1015 | list_move(&ss->sibling, &root->subsys_list); | 1015 | list_move(&ss->sibling, &root->subsys_list); |
1016 | ss->root = root; | 1016 | ss->root = root; |
1017 | if (ss->bind) | 1017 | if (ss->bind) |
1018 | ss->bind(ss, cgrp); | 1018 | ss->bind(cgrp); |
1019 | mutex_unlock(&ss->hierarchy_mutex); | 1019 | mutex_unlock(&ss->hierarchy_mutex); |
1020 | /* refcount was already taken, and we're keeping it */ | 1020 | /* refcount was already taken, and we're keeping it */ |
1021 | } else if (bit & removed_bits) { | 1021 | } else if (bit & removed_bits) { |
@@ -1025,7 +1025,7 @@ static int rebind_subsystems(struct cgroupfs_root *root, | |||
1025 | BUG_ON(cgrp->subsys[i]->cgroup != cgrp); | 1025 | BUG_ON(cgrp->subsys[i]->cgroup != cgrp); |
1026 | mutex_lock(&ss->hierarchy_mutex); | 1026 | mutex_lock(&ss->hierarchy_mutex); |
1027 | if (ss->bind) | 1027 | if (ss->bind) |
1028 | ss->bind(ss, dummytop); | 1028 | ss->bind(dummytop); |
1029 | dummytop->subsys[i]->cgroup = dummytop; | 1029 | dummytop->subsys[i]->cgroup = dummytop; |
1030 | cgrp->subsys[i] = NULL; | 1030 | cgrp->subsys[i] = NULL; |
1031 | subsys[i]->root = &rootnode; | 1031 | subsys[i]->root = &rootnode; |
@@ -1908,7 +1908,7 @@ int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk) | |||
1908 | 1908 | ||
1909 | for_each_subsys(root, ss) { | 1909 | for_each_subsys(root, ss) { |
1910 | if (ss->can_attach) { | 1910 | if (ss->can_attach) { |
1911 | retval = ss->can_attach(ss, cgrp, &tset); | 1911 | retval = ss->can_attach(cgrp, &tset); |
1912 | if (retval) { | 1912 | if (retval) { |
1913 | /* | 1913 | /* |
1914 | * Remember on which subsystem the can_attach() | 1914 | * Remember on which subsystem the can_attach() |
@@ -1932,7 +1932,7 @@ int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk) | |||
1932 | 1932 | ||
1933 | for_each_subsys(root, ss) { | 1933 | for_each_subsys(root, ss) { |
1934 | if (ss->attach) | 1934 | if (ss->attach) |
1935 | ss->attach(ss, cgrp, &tset); | 1935 | ss->attach(cgrp, &tset); |
1936 | } | 1936 | } |
1937 | 1937 | ||
1938 | synchronize_rcu(); | 1938 | synchronize_rcu(); |
@@ -1954,7 +1954,7 @@ out: | |||
1954 | */ | 1954 | */ |
1955 | break; | 1955 | break; |
1956 | if (ss->cancel_attach) | 1956 | if (ss->cancel_attach) |
1957 | ss->cancel_attach(ss, cgrp, &tset); | 1957 | ss->cancel_attach(cgrp, &tset); |
1958 | } | 1958 | } |
1959 | } | 1959 | } |
1960 | return retval; | 1960 | return retval; |
@@ -2067,7 +2067,7 @@ static int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader) | |||
2067 | */ | 2067 | */ |
2068 | for_each_subsys(root, ss) { | 2068 | for_each_subsys(root, ss) { |
2069 | if (ss->can_attach) { | 2069 | if (ss->can_attach) { |
2070 | retval = ss->can_attach(ss, cgrp, &tset); | 2070 | retval = ss->can_attach(cgrp, &tset); |
2071 | if (retval) { | 2071 | if (retval) { |
2072 | failed_ss = ss; | 2072 | failed_ss = ss; |
2073 | goto out_cancel_attach; | 2073 | goto out_cancel_attach; |
@@ -2104,7 +2104,7 @@ static int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader) | |||
2104 | */ | 2104 | */ |
2105 | for_each_subsys(root, ss) { | 2105 | for_each_subsys(root, ss) { |
2106 | if (ss->attach) | 2106 | if (ss->attach) |
2107 | ss->attach(ss, cgrp, &tset); | 2107 | ss->attach(cgrp, &tset); |
2108 | } | 2108 | } |
2109 | 2109 | ||
2110 | /* | 2110 | /* |
@@ -2128,7 +2128,7 @@ out_cancel_attach: | |||
2128 | if (ss == failed_ss) | 2128 | if (ss == failed_ss) |
2129 | break; | 2129 | break; |
2130 | if (ss->cancel_attach) | 2130 | if (ss->cancel_attach) |
2131 | ss->cancel_attach(ss, cgrp, &tset); | 2131 | ss->cancel_attach(cgrp, &tset); |
2132 | } | 2132 | } |
2133 | } | 2133 | } |
2134 | out_free_group_list: | 2134 | out_free_group_list: |
@@ -3756,7 +3756,7 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry, | |||
3756 | set_bit(CGRP_CLONE_CHILDREN, &cgrp->flags); | 3756 | set_bit(CGRP_CLONE_CHILDREN, &cgrp->flags); |
3757 | 3757 | ||
3758 | for_each_subsys(root, ss) { | 3758 | for_each_subsys(root, ss) { |
3759 | struct cgroup_subsys_state *css = ss->create(ss, cgrp); | 3759 | struct cgroup_subsys_state *css = ss->create(cgrp); |
3760 | 3760 | ||
3761 | if (IS_ERR(css)) { | 3761 | if (IS_ERR(css)) { |
3762 | err = PTR_ERR(css); | 3762 | err = PTR_ERR(css); |
@@ -3770,7 +3770,7 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry, | |||
3770 | } | 3770 | } |
3771 | /* At error, ->destroy() callback has to free assigned ID. */ | 3771 | /* At error, ->destroy() callback has to free assigned ID. */ |
3772 | if (clone_children(parent) && ss->post_clone) | 3772 | if (clone_children(parent) && ss->post_clone) |
3773 | ss->post_clone(ss, cgrp); | 3773 | ss->post_clone(cgrp); |
3774 | } | 3774 | } |
3775 | 3775 | ||
3776 | cgroup_lock_hierarchy(root); | 3776 | cgroup_lock_hierarchy(root); |
@@ -3804,7 +3804,7 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry, | |||
3804 | 3804 | ||
3805 | for_each_subsys(root, ss) { | 3805 | for_each_subsys(root, ss) { |
3806 | if (cgrp->subsys[ss->subsys_id]) | 3806 | if (cgrp->subsys[ss->subsys_id]) |
3807 | ss->destroy(ss, cgrp); | 3807 | ss->destroy(cgrp); |
3808 | } | 3808 | } |
3809 | 3809 | ||
3810 | mutex_unlock(&cgroup_mutex); | 3810 | mutex_unlock(&cgroup_mutex); |
@@ -4028,7 +4028,7 @@ static void __init cgroup_init_subsys(struct cgroup_subsys *ss) | |||
4028 | /* Create the top cgroup state for this subsystem */ | 4028 | /* Create the top cgroup state for this subsystem */ |
4029 | list_add(&ss->sibling, &rootnode.subsys_list); | 4029 | list_add(&ss->sibling, &rootnode.subsys_list); |
4030 | ss->root = &rootnode; | 4030 | ss->root = &rootnode; |
4031 | css = ss->create(ss, dummytop); | 4031 | css = ss->create(dummytop); |
4032 | /* We don't handle early failures gracefully */ | 4032 | /* We don't handle early failures gracefully */ |
4033 | BUG_ON(IS_ERR(css)); | 4033 | BUG_ON(IS_ERR(css)); |
4034 | init_cgroup_css(css, ss, dummytop); | 4034 | init_cgroup_css(css, ss, dummytop); |
@@ -4117,7 +4117,7 @@ int __init_or_module cgroup_load_subsys(struct cgroup_subsys *ss) | |||
4117 | * no ss->create seems to need anything important in the ss struct, so | 4117 | * no ss->create seems to need anything important in the ss struct, so |
4118 | * this can happen first (i.e. before the rootnode attachment). | 4118 | * this can happen first (i.e. before the rootnode attachment). |
4119 | */ | 4119 | */ |
4120 | css = ss->create(ss, dummytop); | 4120 | css = ss->create(dummytop); |
4121 | if (IS_ERR(css)) { | 4121 | if (IS_ERR(css)) { |
4122 | /* failure case - need to deassign the subsys[] slot. */ | 4122 | /* failure case - need to deassign the subsys[] slot. */ |
4123 | subsys[i] = NULL; | 4123 | subsys[i] = NULL; |
@@ -4135,7 +4135,7 @@ int __init_or_module cgroup_load_subsys(struct cgroup_subsys *ss) | |||
4135 | int ret = cgroup_init_idr(ss, css); | 4135 | int ret = cgroup_init_idr(ss, css); |
4136 | if (ret) { | 4136 | if (ret) { |
4137 | dummytop->subsys[ss->subsys_id] = NULL; | 4137 | dummytop->subsys[ss->subsys_id] = NULL; |
4138 | ss->destroy(ss, dummytop); | 4138 | ss->destroy(dummytop); |
4139 | subsys[i] = NULL; | 4139 | subsys[i] = NULL; |
4140 | mutex_unlock(&cgroup_mutex); | 4140 | mutex_unlock(&cgroup_mutex); |
4141 | return ret; | 4141 | return ret; |
@@ -4233,7 +4233,7 @@ void cgroup_unload_subsys(struct cgroup_subsys *ss) | |||
4233 | * pointer to find their state. note that this also takes care of | 4233 | * pointer to find their state. note that this also takes care of |
4234 | * freeing the css_id. | 4234 | * freeing the css_id. |
4235 | */ | 4235 | */ |
4236 | ss->destroy(ss, dummytop); | 4236 | ss->destroy(dummytop); |
4237 | dummytop->subsys[ss->subsys_id] = NULL; | 4237 | dummytop->subsys[ss->subsys_id] = NULL; |
4238 | 4238 | ||
4239 | mutex_unlock(&cgroup_mutex); | 4239 | mutex_unlock(&cgroup_mutex); |
@@ -4509,7 +4509,7 @@ void cgroup_fork_callbacks(struct task_struct *child) | |||
4509 | for (i = 0; i < CGROUP_BUILTIN_SUBSYS_COUNT; i++) { | 4509 | for (i = 0; i < CGROUP_BUILTIN_SUBSYS_COUNT; i++) { |
4510 | struct cgroup_subsys *ss = subsys[i]; | 4510 | struct cgroup_subsys *ss = subsys[i]; |
4511 | if (ss->fork) | 4511 | if (ss->fork) |
4512 | ss->fork(ss, child); | 4512 | ss->fork(child); |
4513 | } | 4513 | } |
4514 | } | 4514 | } |
4515 | } | 4515 | } |
@@ -4611,7 +4611,7 @@ void cgroup_exit(struct task_struct *tsk, int run_callbacks) | |||
4611 | struct cgroup *old_cgrp = | 4611 | struct cgroup *old_cgrp = |
4612 | rcu_dereference_raw(cg->subsys[i])->cgroup; | 4612 | rcu_dereference_raw(cg->subsys[i])->cgroup; |
4613 | struct cgroup *cgrp = task_cgroup(tsk, i); | 4613 | struct cgroup *cgrp = task_cgroup(tsk, i); |
4614 | ss->exit(ss, cgrp, old_cgrp, tsk); | 4614 | ss->exit(cgrp, old_cgrp, tsk); |
4615 | } | 4615 | } |
4616 | } | 4616 | } |
4617 | } | 4617 | } |
@@ -5066,8 +5066,7 @@ struct cgroup_subsys_state *cgroup_css_from_dir(struct file *f, int id) | |||
5066 | } | 5066 | } |
5067 | 5067 | ||
5068 | #ifdef CONFIG_CGROUP_DEBUG | 5068 | #ifdef CONFIG_CGROUP_DEBUG |
5069 | static struct cgroup_subsys_state *debug_create(struct cgroup_subsys *ss, | 5069 | static struct cgroup_subsys_state *debug_create(struct cgroup *cont) |
5070 | struct cgroup *cont) | ||
5071 | { | 5070 | { |
5072 | struct cgroup_subsys_state *css = kzalloc(sizeof(*css), GFP_KERNEL); | 5071 | struct cgroup_subsys_state *css = kzalloc(sizeof(*css), GFP_KERNEL); |
5073 | 5072 | ||
@@ -5077,7 +5076,7 @@ static struct cgroup_subsys_state *debug_create(struct cgroup_subsys *ss, | |||
5077 | return css; | 5076 | return css; |
5078 | } | 5077 | } |
5079 | 5078 | ||
5080 | static void debug_destroy(struct cgroup_subsys *ss, struct cgroup *cont) | 5079 | static void debug_destroy(struct cgroup *cont) |
5081 | { | 5080 | { |
5082 | kfree(cont->subsys[debug_subsys_id]); | 5081 | kfree(cont->subsys[debug_subsys_id]); |
5083 | } | 5082 | } |
diff --git a/kernel/cgroup_freezer.c b/kernel/cgroup_freezer.c index fc0646b78a64..f86e93920b62 100644 --- a/kernel/cgroup_freezer.c +++ b/kernel/cgroup_freezer.c | |||
@@ -128,8 +128,7 @@ struct cgroup_subsys freezer_subsys; | |||
128 | * task->alloc_lock (inside __thaw_task(), prevents race with refrigerator()) | 128 | * task->alloc_lock (inside __thaw_task(), prevents race with refrigerator()) |
129 | * sighand->siglock | 129 | * sighand->siglock |
130 | */ | 130 | */ |
131 | static struct cgroup_subsys_state *freezer_create(struct cgroup_subsys *ss, | 131 | static struct cgroup_subsys_state *freezer_create(struct cgroup *cgroup) |
132 | struct cgroup *cgroup) | ||
133 | { | 132 | { |
134 | struct freezer *freezer; | 133 | struct freezer *freezer; |
135 | 134 | ||
@@ -142,8 +141,7 @@ static struct cgroup_subsys_state *freezer_create(struct cgroup_subsys *ss, | |||
142 | return &freezer->css; | 141 | return &freezer->css; |
143 | } | 142 | } |
144 | 143 | ||
145 | static void freezer_destroy(struct cgroup_subsys *ss, | 144 | static void freezer_destroy(struct cgroup *cgroup) |
146 | struct cgroup *cgroup) | ||
147 | { | 145 | { |
148 | struct freezer *freezer = cgroup_freezer(cgroup); | 146 | struct freezer *freezer = cgroup_freezer(cgroup); |
149 | 147 | ||
@@ -164,8 +162,7 @@ static bool is_task_frozen_enough(struct task_struct *task) | |||
164 | * a write to that file racing against an attach, and hence the | 162 | * a write to that file racing against an attach, and hence the |
165 | * can_attach() result will remain valid until the attach completes. | 163 | * can_attach() result will remain valid until the attach completes. |
166 | */ | 164 | */ |
167 | static int freezer_can_attach(struct cgroup_subsys *ss, | 165 | static int freezer_can_attach(struct cgroup *new_cgroup, |
168 | struct cgroup *new_cgroup, | ||
169 | struct cgroup_taskset *tset) | 166 | struct cgroup_taskset *tset) |
170 | { | 167 | { |
171 | struct freezer *freezer; | 168 | struct freezer *freezer; |
@@ -185,7 +182,7 @@ static int freezer_can_attach(struct cgroup_subsys *ss, | |||
185 | return 0; | 182 | return 0; |
186 | } | 183 | } |
187 | 184 | ||
188 | static void freezer_fork(struct cgroup_subsys *ss, struct task_struct *task) | 185 | static void freezer_fork(struct task_struct *task) |
189 | { | 186 | { |
190 | struct freezer *freezer; | 187 | struct freezer *freezer; |
191 | 188 | ||
diff --git a/kernel/cpuset.c b/kernel/cpuset.c index a09ac2b9a661..5d575836dba6 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c | |||
@@ -1399,8 +1399,7 @@ static nodemask_t cpuset_attach_nodemask_from; | |||
1399 | static nodemask_t cpuset_attach_nodemask_to; | 1399 | static nodemask_t cpuset_attach_nodemask_to; |
1400 | 1400 | ||
1401 | /* Called by cgroups to determine if a cpuset is usable; cgroup_mutex held */ | 1401 | /* Called by cgroups to determine if a cpuset is usable; cgroup_mutex held */ |
1402 | static int cpuset_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp, | 1402 | static int cpuset_can_attach(struct cgroup *cgrp, struct cgroup_taskset *tset) |
1403 | struct cgroup_taskset *tset) | ||
1404 | { | 1403 | { |
1405 | struct cpuset *cs = cgroup_cs(cgrp); | 1404 | struct cpuset *cs = cgroup_cs(cgrp); |
1406 | struct task_struct *task; | 1405 | struct task_struct *task; |
@@ -1436,8 +1435,7 @@ static int cpuset_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp, | |||
1436 | return 0; | 1435 | return 0; |
1437 | } | 1436 | } |
1438 | 1437 | ||
1439 | static void cpuset_attach(struct cgroup_subsys *ss, struct cgroup *cgrp, | 1438 | static void cpuset_attach(struct cgroup *cgrp, struct cgroup_taskset *tset) |
1440 | struct cgroup_taskset *tset) | ||
1441 | { | 1439 | { |
1442 | struct mm_struct *mm; | 1440 | struct mm_struct *mm; |
1443 | struct task_struct *task; | 1441 | struct task_struct *task; |
@@ -1833,8 +1831,7 @@ static int cpuset_populate(struct cgroup_subsys *ss, struct cgroup *cont) | |||
1833 | * (and likewise for mems) to the new cgroup. Called with cgroup_mutex | 1831 | * (and likewise for mems) to the new cgroup. Called with cgroup_mutex |
1834 | * held. | 1832 | * held. |
1835 | */ | 1833 | */ |
1836 | static void cpuset_post_clone(struct cgroup_subsys *ss, | 1834 | static void cpuset_post_clone(struct cgroup *cgroup) |
1837 | struct cgroup *cgroup) | ||
1838 | { | 1835 | { |
1839 | struct cgroup *parent, *child; | 1836 | struct cgroup *parent, *child; |
1840 | struct cpuset *cs, *parent_cs; | 1837 | struct cpuset *cs, *parent_cs; |
@@ -1857,13 +1854,10 @@ static void cpuset_post_clone(struct cgroup_subsys *ss, | |||
1857 | 1854 | ||
1858 | /* | 1855 | /* |
1859 | * cpuset_create - create a cpuset | 1856 | * cpuset_create - create a cpuset |
1860 | * ss: cpuset cgroup subsystem | ||
1861 | * cont: control group that the new cpuset will be part of | 1857 | * cont: control group that the new cpuset will be part of |
1862 | */ | 1858 | */ |
1863 | 1859 | ||
1864 | static struct cgroup_subsys_state *cpuset_create( | 1860 | static struct cgroup_subsys_state *cpuset_create(struct cgroup *cont) |
1865 | struct cgroup_subsys *ss, | ||
1866 | struct cgroup *cont) | ||
1867 | { | 1861 | { |
1868 | struct cpuset *cs; | 1862 | struct cpuset *cs; |
1869 | struct cpuset *parent; | 1863 | struct cpuset *parent; |
@@ -1902,7 +1896,7 @@ static struct cgroup_subsys_state *cpuset_create( | |||
1902 | * will call async_rebuild_sched_domains(). | 1896 | * will call async_rebuild_sched_domains(). |
1903 | */ | 1897 | */ |
1904 | 1898 | ||
1905 | static void cpuset_destroy(struct cgroup_subsys *ss, struct cgroup *cont) | 1899 | static void cpuset_destroy(struct cgroup *cont) |
1906 | { | 1900 | { |
1907 | struct cpuset *cs = cgroup_cs(cont); | 1901 | struct cpuset *cs = cgroup_cs(cont); |
1908 | 1902 | ||
diff --git a/kernel/events/core.c b/kernel/events/core.c index a8f4ac001a00..a5d1ee92b0d9 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
@@ -6906,8 +6906,7 @@ unlock: | |||
6906 | device_initcall(perf_event_sysfs_init); | 6906 | device_initcall(perf_event_sysfs_init); |
6907 | 6907 | ||
6908 | #ifdef CONFIG_CGROUP_PERF | 6908 | #ifdef CONFIG_CGROUP_PERF |
6909 | static struct cgroup_subsys_state *perf_cgroup_create( | 6909 | static struct cgroup_subsys_state *perf_cgroup_create(struct cgroup *cont) |
6910 | struct cgroup_subsys *ss, struct cgroup *cont) | ||
6911 | { | 6910 | { |
6912 | struct perf_cgroup *jc; | 6911 | struct perf_cgroup *jc; |
6913 | 6912 | ||
@@ -6924,8 +6923,7 @@ static struct cgroup_subsys_state *perf_cgroup_create( | |||
6924 | return &jc->css; | 6923 | return &jc->css; |
6925 | } | 6924 | } |
6926 | 6925 | ||
6927 | static void perf_cgroup_destroy(struct cgroup_subsys *ss, | 6926 | static void perf_cgroup_destroy(struct cgroup *cont) |
6928 | struct cgroup *cont) | ||
6929 | { | 6927 | { |
6930 | struct perf_cgroup *jc; | 6928 | struct perf_cgroup *jc; |
6931 | jc = container_of(cgroup_subsys_state(cont, perf_subsys_id), | 6929 | jc = container_of(cgroup_subsys_state(cont, perf_subsys_id), |
@@ -6941,8 +6939,7 @@ static int __perf_cgroup_move(void *info) | |||
6941 | return 0; | 6939 | return 0; |
6942 | } | 6940 | } |
6943 | 6941 | ||
6944 | static void perf_cgroup_attach(struct cgroup_subsys *ss, struct cgroup *cgrp, | 6942 | static void perf_cgroup_attach(struct cgroup *cgrp, struct cgroup_taskset *tset) |
6945 | struct cgroup_taskset *tset) | ||
6946 | { | 6943 | { |
6947 | struct task_struct *task; | 6944 | struct task_struct *task; |
6948 | 6945 | ||
@@ -6950,8 +6947,8 @@ static void perf_cgroup_attach(struct cgroup_subsys *ss, struct cgroup *cgrp, | |||
6950 | task_function_call(task, __perf_cgroup_move, task); | 6947 | task_function_call(task, __perf_cgroup_move, task); |
6951 | } | 6948 | } |
6952 | 6949 | ||
6953 | static void perf_cgroup_exit(struct cgroup_subsys *ss, struct cgroup *cgrp, | 6950 | static void perf_cgroup_exit(struct cgroup *cgrp, struct cgroup *old_cgrp, |
6954 | struct cgroup *old_cgrp, struct task_struct *task) | 6951 | struct task_struct *task) |
6955 | { | 6952 | { |
6956 | /* | 6953 | /* |
6957 | * cgroup_exit() is called in the copy_process() failure path. | 6954 | * cgroup_exit() is called in the copy_process() failure path. |
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index df00cb09263e..ff12f7216062 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
@@ -7530,8 +7530,7 @@ static inline struct task_group *cgroup_tg(struct cgroup *cgrp) | |||
7530 | struct task_group, css); | 7530 | struct task_group, css); |
7531 | } | 7531 | } |
7532 | 7532 | ||
7533 | static struct cgroup_subsys_state * | 7533 | static struct cgroup_subsys_state *cpu_cgroup_create(struct cgroup *cgrp) |
7534 | cpu_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cgrp) | ||
7535 | { | 7534 | { |
7536 | struct task_group *tg, *parent; | 7535 | struct task_group *tg, *parent; |
7537 | 7536 | ||
@@ -7548,15 +7547,14 @@ cpu_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cgrp) | |||
7548 | return &tg->css; | 7547 | return &tg->css; |
7549 | } | 7548 | } |
7550 | 7549 | ||
7551 | static void | 7550 | static void cpu_cgroup_destroy(struct cgroup *cgrp) |
7552 | cpu_cgroup_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp) | ||
7553 | { | 7551 | { |
7554 | struct task_group *tg = cgroup_tg(cgrp); | 7552 | struct task_group *tg = cgroup_tg(cgrp); |
7555 | 7553 | ||
7556 | sched_destroy_group(tg); | 7554 | sched_destroy_group(tg); |
7557 | } | 7555 | } |
7558 | 7556 | ||
7559 | static int cpu_cgroup_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp, | 7557 | static int cpu_cgroup_can_attach(struct cgroup *cgrp, |
7560 | struct cgroup_taskset *tset) | 7558 | struct cgroup_taskset *tset) |
7561 | { | 7559 | { |
7562 | struct task_struct *task; | 7560 | struct task_struct *task; |
@@ -7574,7 +7572,7 @@ static int cpu_cgroup_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp, | |||
7574 | return 0; | 7572 | return 0; |
7575 | } | 7573 | } |
7576 | 7574 | ||
7577 | static void cpu_cgroup_attach(struct cgroup_subsys *ss, struct cgroup *cgrp, | 7575 | static void cpu_cgroup_attach(struct cgroup *cgrp, |
7578 | struct cgroup_taskset *tset) | 7576 | struct cgroup_taskset *tset) |
7579 | { | 7577 | { |
7580 | struct task_struct *task; | 7578 | struct task_struct *task; |
@@ -7584,8 +7582,8 @@ static void cpu_cgroup_attach(struct cgroup_subsys *ss, struct cgroup *cgrp, | |||
7584 | } | 7582 | } |
7585 | 7583 | ||
7586 | static void | 7584 | static void |
7587 | cpu_cgroup_exit(struct cgroup_subsys *ss, struct cgroup *cgrp, | 7585 | cpu_cgroup_exit(struct cgroup *cgrp, struct cgroup *old_cgrp, |
7588 | struct cgroup *old_cgrp, struct task_struct *task) | 7586 | struct task_struct *task) |
7589 | { | 7587 | { |
7590 | /* | 7588 | /* |
7591 | * cgroup_exit() is called in the copy_process() failure path. | 7589 | * cgroup_exit() is called in the copy_process() failure path. |
@@ -7935,8 +7933,7 @@ struct cgroup_subsys cpu_cgroup_subsys = { | |||
7935 | */ | 7933 | */ |
7936 | 7934 | ||
7937 | /* create a new cpu accounting group */ | 7935 | /* create a new cpu accounting group */ |
7938 | static struct cgroup_subsys_state *cpuacct_create( | 7936 | static struct cgroup_subsys_state *cpuacct_create(struct cgroup *cgrp) |
7939 | struct cgroup_subsys *ss, struct cgroup *cgrp) | ||
7940 | { | 7937 | { |
7941 | struct cpuacct *ca; | 7938 | struct cpuacct *ca; |
7942 | 7939 | ||
@@ -7966,8 +7963,7 @@ out: | |||
7966 | } | 7963 | } |
7967 | 7964 | ||
7968 | /* destroy an existing cpu accounting group */ | 7965 | /* destroy an existing cpu accounting group */ |
7969 | static void | 7966 | static void cpuacct_destroy(struct cgroup *cgrp) |
7970 | cpuacct_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp) | ||
7971 | { | 7967 | { |
7972 | struct cpuacct *ca = cgroup_ca(cgrp); | 7968 | struct cpuacct *ca = cgroup_ca(cgrp); |
7973 | 7969 | ||