summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorOleg Nesterov <oleg@redhat.com>2015-12-03 10:24:08 -0500
committerTejun Heo <tj@kernel.org>2015-12-03 10:24:08 -0500
commitb53202e6308939d33ba0c78712e850f891b4e76f (patch)
tree2c8998ba6b2580481a93352381a3f2d972901789 /kernel
parent8075b542cf9f5d8a6afd92b4a940e29a677a7510 (diff)
cgroup: kill cgrp_ss_priv[CGROUP_CANFORK_COUNT] and friends
Now that nobody use the "priv" arg passed to can_fork/cancel_fork/fork we can kill CGROUP_CANFORK_COUNT/SUBSYS_TAG/etc and cgrp_ss_priv[] in copy_process(). Signed-off-by: Oleg Nesterov <oleg@redhat.com> Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/cgroup.c30
-rw-r--r--kernel/cgroup_freezer.c2
-rw-r--r--kernel/cgroup_pids.c4
-rw-r--r--kernel/fork.c7
-rw-r--r--kernel/sched/core.c2
5 files changed, 14 insertions, 31 deletions
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index ad35ac033d9b..7f2f007397fe 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -5488,19 +5488,6 @@ static const struct file_operations proc_cgroupstats_operations = {
5488 .release = single_release, 5488 .release = single_release,
5489}; 5489};
5490 5490
5491static void **subsys_canfork_priv_p(void *ss_priv[CGROUP_CANFORK_COUNT], int i)
5492{
5493 if (CGROUP_CANFORK_START <= i && i < CGROUP_CANFORK_END)
5494 return &ss_priv[i - CGROUP_CANFORK_START];
5495 return NULL;
5496}
5497
5498static void *subsys_canfork_priv(void *ss_priv[CGROUP_CANFORK_COUNT], int i)
5499{
5500 void **private = subsys_canfork_priv_p(ss_priv, i);
5501 return private ? *private : NULL;
5502}
5503
5504/** 5491/**
5505 * cgroup_fork - initialize cgroup related fields during copy_process() 5492 * cgroup_fork - initialize cgroup related fields during copy_process()
5506 * @child: pointer to task_struct of forking parent process. 5493 * @child: pointer to task_struct of forking parent process.
@@ -5523,14 +5510,13 @@ void cgroup_fork(struct task_struct *child)
5523 * returns an error, the fork aborts with that error code. This allows for 5510 * returns an error, the fork aborts with that error code. This allows for
5524 * a cgroup subsystem to conditionally allow or deny new forks. 5511 * a cgroup subsystem to conditionally allow or deny new forks.
5525 */ 5512 */
5526int cgroup_can_fork(struct task_struct *child, 5513int cgroup_can_fork(struct task_struct *child)
5527 void *ss_priv[CGROUP_CANFORK_COUNT])
5528{ 5514{
5529 struct cgroup_subsys *ss; 5515 struct cgroup_subsys *ss;
5530 int i, j, ret; 5516 int i, j, ret;
5531 5517
5532 for_each_subsys_which(ss, i, &have_canfork_callback) { 5518 for_each_subsys_which(ss, i, &have_canfork_callback) {
5533 ret = ss->can_fork(child, subsys_canfork_priv_p(ss_priv, i)); 5519 ret = ss->can_fork(child);
5534 if (ret) 5520 if (ret)
5535 goto out_revert; 5521 goto out_revert;
5536 } 5522 }
@@ -5542,7 +5528,7 @@ out_revert:
5542 if (j >= i) 5528 if (j >= i)
5543 break; 5529 break;
5544 if (ss->cancel_fork) 5530 if (ss->cancel_fork)
5545 ss->cancel_fork(child, subsys_canfork_priv(ss_priv, j)); 5531 ss->cancel_fork(child);
5546 } 5532 }
5547 5533
5548 return ret; 5534 return ret;
@@ -5555,15 +5541,14 @@ out_revert:
5555 * This calls the cancel_fork() callbacks if a fork failed *after* 5541 * This calls the cancel_fork() callbacks if a fork failed *after*
5556 * cgroup_can_fork() succeded. 5542 * cgroup_can_fork() succeded.
5557 */ 5543 */
5558void cgroup_cancel_fork(struct task_struct *child, 5544void cgroup_cancel_fork(struct task_struct *child)
5559 void *ss_priv[CGROUP_CANFORK_COUNT])
5560{ 5545{
5561 struct cgroup_subsys *ss; 5546 struct cgroup_subsys *ss;
5562 int i; 5547 int i;
5563 5548
5564 for_each_subsys(ss, i) 5549 for_each_subsys(ss, i)
5565 if (ss->cancel_fork) 5550 if (ss->cancel_fork)
5566 ss->cancel_fork(child, subsys_canfork_priv(ss_priv, i)); 5551 ss->cancel_fork(child);
5567} 5552}
5568 5553
5569/** 5554/**
@@ -5576,8 +5561,7 @@ void cgroup_cancel_fork(struct task_struct *child,
5576 * cgroup_task_iter_start() - to guarantee that the new task ends up on its 5561 * cgroup_task_iter_start() - to guarantee that the new task ends up on its
5577 * list. 5562 * list.
5578 */ 5563 */
5579void cgroup_post_fork(struct task_struct *child, 5564void cgroup_post_fork(struct task_struct *child)
5580 void *old_ss_priv[CGROUP_CANFORK_COUNT])
5581{ 5565{
5582 struct cgroup_subsys *ss; 5566 struct cgroup_subsys *ss;
5583 int i; 5567 int i;
@@ -5621,7 +5605,7 @@ void cgroup_post_fork(struct task_struct *child,
5621 * and addition to css_set. 5605 * and addition to css_set.
5622 */ 5606 */
5623 for_each_subsys_which(ss, i, &have_fork_callback) 5607 for_each_subsys_which(ss, i, &have_fork_callback)
5624 ss->fork(child, subsys_canfork_priv(old_ss_priv, i)); 5608 ss->fork(child);
5625} 5609}
5626 5610
5627/** 5611/**
diff --git a/kernel/cgroup_freezer.c b/kernel/cgroup_freezer.c
index 2d3df82c54f2..1b72d56edce5 100644
--- a/kernel/cgroup_freezer.c
+++ b/kernel/cgroup_freezer.c
@@ -200,7 +200,7 @@ static void freezer_attach(struct cgroup_taskset *tset)
200 * to do anything as freezer_attach() will put @task into the appropriate 200 * to do anything as freezer_attach() will put @task into the appropriate
201 * state. 201 * state.
202 */ 202 */
203static void freezer_fork(struct task_struct *task, void *private) 203static void freezer_fork(struct task_struct *task)
204{ 204{
205 struct freezer *freezer; 205 struct freezer *freezer;
206 206
diff --git a/kernel/cgroup_pids.c b/kernel/cgroup_pids.c
index b50d5a167fda..18107aea2895 100644
--- a/kernel/cgroup_pids.c
+++ b/kernel/cgroup_pids.c
@@ -209,7 +209,7 @@ static void pids_cancel_attach(struct cgroup_taskset *tset)
209 * task_css_check(true) in pids_can_fork() and pids_cancel_fork() relies 209 * task_css_check(true) in pids_can_fork() and pids_cancel_fork() relies
210 * on threadgroup_change_begin() held by the copy_process(). 210 * on threadgroup_change_begin() held by the copy_process().
211 */ 211 */
212static int pids_can_fork(struct task_struct *task, void **priv_p) 212static int pids_can_fork(struct task_struct *task)
213{ 213{
214 struct cgroup_subsys_state *css; 214 struct cgroup_subsys_state *css;
215 struct pids_cgroup *pids; 215 struct pids_cgroup *pids;
@@ -219,7 +219,7 @@ static int pids_can_fork(struct task_struct *task, void **priv_p)
219 return pids_try_charge(pids, 1); 219 return pids_try_charge(pids, 1);
220} 220}
221 221
222static void pids_cancel_fork(struct task_struct *task, void *priv) 222static void pids_cancel_fork(struct task_struct *task)
223{ 223{
224 struct cgroup_subsys_state *css; 224 struct cgroup_subsys_state *css;
225 struct pids_cgroup *pids; 225 struct pids_cgroup *pids;
diff --git a/kernel/fork.c b/kernel/fork.c
index fce002ee3ddf..ba7d1c037490 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1249,7 +1249,6 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1249{ 1249{
1250 int retval; 1250 int retval;
1251 struct task_struct *p; 1251 struct task_struct *p;
1252 void *cgrp_ss_priv[CGROUP_CANFORK_COUNT] = {};
1253 1252
1254 if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS)) 1253 if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS))
1255 return ERR_PTR(-EINVAL); 1254 return ERR_PTR(-EINVAL);
@@ -1526,7 +1525,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1526 * between here and cgroup_post_fork() if an organisation operation is in 1525 * between here and cgroup_post_fork() if an organisation operation is in
1527 * progress. 1526 * progress.
1528 */ 1527 */
1529 retval = cgroup_can_fork(p, cgrp_ss_priv); 1528 retval = cgroup_can_fork(p);
1530 if (retval) 1529 if (retval)
1531 goto bad_fork_free_pid; 1530 goto bad_fork_free_pid;
1532 1531
@@ -1608,7 +1607,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1608 write_unlock_irq(&tasklist_lock); 1607 write_unlock_irq(&tasklist_lock);
1609 1608
1610 proc_fork_connector(p); 1609 proc_fork_connector(p);
1611 cgroup_post_fork(p, cgrp_ss_priv); 1610 cgroup_post_fork(p);
1612 threadgroup_change_end(current); 1611 threadgroup_change_end(current);
1613 perf_event_fork(p); 1612 perf_event_fork(p);
1614 1613
@@ -1618,7 +1617,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1618 return p; 1617 return p;
1619 1618
1620bad_fork_cancel_cgroup: 1619bad_fork_cancel_cgroup:
1621 cgroup_cancel_fork(p, cgrp_ss_priv); 1620 cgroup_cancel_fork(p);
1622bad_fork_free_pid: 1621bad_fork_free_pid:
1623 if (pid != &init_struct_pid) 1622 if (pid != &init_struct_pid)
1624 free_pid(pid); 1623 free_pid(pid);
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index a9db4819e586..b7d2271cd948 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -8212,7 +8212,7 @@ static void cpu_cgroup_css_offline(struct cgroup_subsys_state *css)
8212 sched_offline_group(tg); 8212 sched_offline_group(tg);
8213} 8213}
8214 8214
8215static void cpu_cgroup_fork(struct task_struct *task, void *private) 8215static void cpu_cgroup_fork(struct task_struct *task)
8216{ 8216{
8217 sched_move_task(task); 8217 sched_move_task(task);
8218} 8218}