aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2014-02-13 06:58:41 -0500
committerTejun Heo <tj@kernel.org>2014-02-13 06:58:41 -0500
commit924f0d9a2078f49ff331bb43196ec5afadc16b8f (patch)
tree6df0c9852c7b16c2f2eb69b04f90aacae9f95132 /kernel
parentcb0f1fe9ba47c202a98a9d41ad5c12c0ac7732e9 (diff)
cgroup: drop @skip_css from cgroup_taskset_for_each()
If !NULL, @skip_css makes cgroup_taskset_for_each() skip the matching css. The intention of the interface is to make it easy to skip css's (cgroup_subsys_states) which already match the migration target; however, this is entirely unnecessary as migration taskset doesn't include tasks which are already in the target cgroup. Drop @skip_css from cgroup_taskset_for_each(). Signed-off-by: Tejun Heo <tj@kernel.org> Acked-by: Li Zefan <lizefan@huawei.com> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Paul Mackerras <paulus@samba.org> Cc: Ingo Molnar <mingo@redhat.com> Cc: Arnaldo Carvalho de Melo <acme@ghostprotocols.net> Cc: Daniel Borkmann <dborkman@redhat.com>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/cgroup_freezer.c2
-rw-r--r--kernel/cpuset.c4
-rw-r--r--kernel/events/core.c2
-rw-r--r--kernel/sched/core.c4
4 files changed, 6 insertions, 6 deletions
diff --git a/kernel/cgroup_freezer.c b/kernel/cgroup_freezer.c
index 98ea26a99076..7201a637c405 100644
--- a/kernel/cgroup_freezer.c
+++ b/kernel/cgroup_freezer.c
@@ -187,7 +187,7 @@ static void freezer_attach(struct cgroup_subsys_state *new_css,
187 * current state before executing the following - !frozen tasks may 187 * current state before executing the following - !frozen tasks may
188 * be visible in a FROZEN cgroup and frozen tasks in a THAWED one. 188 * be visible in a FROZEN cgroup and frozen tasks in a THAWED one.
189 */ 189 */
190 cgroup_taskset_for_each(task, new_css, tset) { 190 cgroup_taskset_for_each(task, tset) {
191 if (!(freezer->state & CGROUP_FREEZING)) { 191 if (!(freezer->state & CGROUP_FREEZING)) {
192 __thaw_task(task); 192 __thaw_task(task);
193 } else { 193 } else {
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 65ae0bdf4af8..bf20e4ac2f75 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -1398,7 +1398,7 @@ static int cpuset_can_attach(struct cgroup_subsys_state *css,
1398 (cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed))) 1398 (cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed)))
1399 goto out_unlock; 1399 goto out_unlock;
1400 1400
1401 cgroup_taskset_for_each(task, css, tset) { 1401 cgroup_taskset_for_each(task, tset) {
1402 /* 1402 /*
1403 * Kthreads which disallow setaffinity shouldn't be moved 1403 * Kthreads which disallow setaffinity shouldn't be moved
1404 * to a new cpuset; we don't want to change their cpu 1404 * to a new cpuset; we don't want to change their cpu
@@ -1467,7 +1467,7 @@ static void cpuset_attach(struct cgroup_subsys_state *css,
1467 1467
1468 guarantee_online_mems(mems_cs, &cpuset_attach_nodemask_to); 1468 guarantee_online_mems(mems_cs, &cpuset_attach_nodemask_to);
1469 1469
1470 cgroup_taskset_for_each(task, css, tset) { 1470 cgroup_taskset_for_each(task, tset) {
1471 /* 1471 /*
1472 * can_attach beforehand should guarantee that this doesn't 1472 * can_attach beforehand should guarantee that this doesn't
1473 * fail. TODO: have a better way to handle failure here 1473 * fail. TODO: have a better way to handle failure here
diff --git a/kernel/events/core.c b/kernel/events/core.c
index a3c3ab50271a..6dd714955b04 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -8021,7 +8021,7 @@ static void perf_cgroup_attach(struct cgroup_subsys_state *css,
8021{ 8021{
8022 struct task_struct *task; 8022 struct task_struct *task;
8023 8023
8024 cgroup_taskset_for_each(task, css, tset) 8024 cgroup_taskset_for_each(task, tset)
8025 task_function_call(task, __perf_cgroup_move, task); 8025 task_function_call(task, __perf_cgroup_move, task);
8026} 8026}
8027 8027
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index d4cfc5561830..ba386a06ab11 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -7600,7 +7600,7 @@ static int cpu_cgroup_can_attach(struct cgroup_subsys_state *css,
7600{ 7600{
7601 struct task_struct *task; 7601 struct task_struct *task;
7602 7602
7603 cgroup_taskset_for_each(task, css, tset) { 7603 cgroup_taskset_for_each(task, tset) {
7604#ifdef CONFIG_RT_GROUP_SCHED 7604#ifdef CONFIG_RT_GROUP_SCHED
7605 if (!sched_rt_can_attach(css_tg(css), task)) 7605 if (!sched_rt_can_attach(css_tg(css), task))
7606 return -EINVAL; 7606 return -EINVAL;
@@ -7618,7 +7618,7 @@ static void cpu_cgroup_attach(struct cgroup_subsys_state *css,
7618{ 7618{
7619 struct task_struct *task; 7619 struct task_struct *task;
7620 7620
7621 cgroup_taskset_for_each(task, css, tset) 7621 cgroup_taskset_for_each(task, tset)
7622 sched_move_task(task); 7622 sched_move_task(task);
7623} 7623}
7624 7624