aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2014-02-13 06:58:41 -0500
committerTejun Heo <tj@kernel.org>2014-02-13 06:58:41 -0500
commit924f0d9a2078f49ff331bb43196ec5afadc16b8f (patch)
tree6df0c9852c7b16c2f2eb69b04f90aacae9f95132
parentcb0f1fe9ba47c202a98a9d41ad5c12c0ac7732e9 (diff)
cgroup: drop @skip_css from cgroup_taskset_for_each()
If !NULL, @skip_css makes cgroup_taskset_for_each() skip the matching css. The intention of the interface is to make it easy to skip css's (cgroup_subsys_states) which already match the migration target; however, this is entirely unnecessary as migration taskset doesn't include tasks which are already in the target cgroup. Drop @skip_css from cgroup_taskset_for_each(). Signed-off-by: Tejun Heo <tj@kernel.org> Acked-by: Li Zefan <lizefan@huawei.com> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Paul Mackerras <paulus@samba.org> Cc: Ingo Molnar <mingo@redhat.com> Cc: Arnaldo Carvalho de Melo <acme@ghostprotocols.net> Cc: Daniel Borkmann <dborkman@redhat.com>
-rw-r--r--block/blk-cgroup.c2
-rw-r--r--include/linux/cgroup.h8
-rw-r--r--kernel/cgroup_freezer.c2
-rw-r--r--kernel/cpuset.c4
-rw-r--r--kernel/events/core.c2
-rw-r--r--kernel/sched/core.c4
-rw-r--r--net/core/netclassid_cgroup.c2
-rw-r--r--net/core/netprio_cgroup.c2
8 files changed, 11 insertions, 15 deletions
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 1cef07cf9c21..4aefd46d7d95 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -894,7 +894,7 @@ static int blkcg_can_attach(struct cgroup_subsys_state *css,
894 int ret = 0; 894 int ret = 0;
895 895
896 /* task_lock() is needed to avoid races with exit_io_context() */ 896 /* task_lock() is needed to avoid races with exit_io_context() */
897 cgroup_taskset_for_each(task, css, tset) { 897 cgroup_taskset_for_each(task, tset) {
898 task_lock(task); 898 task_lock(task);
899 ioc = task->io_context; 899 ioc = task->io_context;
900 if (ioc && atomic_read(&ioc->nr_tasks) > 1) 900 if (ioc && atomic_read(&ioc->nr_tasks) > 1)
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index 3bd0a7138371..581a124c7bc8 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -535,15 +535,11 @@ int cgroup_taskset_size(struct cgroup_taskset *tset);
535/** 535/**
536 * cgroup_taskset_for_each - iterate cgroup_taskset 536 * cgroup_taskset_for_each - iterate cgroup_taskset
537 * @task: the loop cursor 537 * @task: the loop cursor
538 * @skip_css: skip if task's css matches this, %NULL to iterate through all
539 * @tset: taskset to iterate 538 * @tset: taskset to iterate
540 */ 539 */
541#define cgroup_taskset_for_each(task, skip_css, tset) \ 540#define cgroup_taskset_for_each(task, tset) \
542 for ((task) = cgroup_taskset_first((tset)); (task); \ 541 for ((task) = cgroup_taskset_first((tset)); (task); \
543 (task) = cgroup_taskset_next((tset))) \ 542 (task) = cgroup_taskset_next((tset)))
544 if (!(skip_css) || \
545 cgroup_taskset_cur_css((tset), \
546 (skip_css)->ss->id) != (skip_css))
547 543
548/* 544/*
549 * Control Group subsystem type. 545 * Control Group subsystem type.
diff --git a/kernel/cgroup_freezer.c b/kernel/cgroup_freezer.c
index 98ea26a99076..7201a637c405 100644
--- a/kernel/cgroup_freezer.c
+++ b/kernel/cgroup_freezer.c
@@ -187,7 +187,7 @@ static void freezer_attach(struct cgroup_subsys_state *new_css,
187 * current state before executing the following - !frozen tasks may 187 * current state before executing the following - !frozen tasks may
188 * be visible in a FROZEN cgroup and frozen tasks in a THAWED one. 188 * be visible in a FROZEN cgroup and frozen tasks in a THAWED one.
189 */ 189 */
190 cgroup_taskset_for_each(task, new_css, tset) { 190 cgroup_taskset_for_each(task, tset) {
191 if (!(freezer->state & CGROUP_FREEZING)) { 191 if (!(freezer->state & CGROUP_FREEZING)) {
192 __thaw_task(task); 192 __thaw_task(task);
193 } else { 193 } else {
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 65ae0bdf4af8..bf20e4ac2f75 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -1398,7 +1398,7 @@ static int cpuset_can_attach(struct cgroup_subsys_state *css,
1398 (cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed))) 1398 (cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed)))
1399 goto out_unlock; 1399 goto out_unlock;
1400 1400
1401 cgroup_taskset_for_each(task, css, tset) { 1401 cgroup_taskset_for_each(task, tset) {
1402 /* 1402 /*
1403 * Kthreads which disallow setaffinity shouldn't be moved 1403 * Kthreads which disallow setaffinity shouldn't be moved
1404 * to a new cpuset; we don't want to change their cpu 1404 * to a new cpuset; we don't want to change their cpu
@@ -1467,7 +1467,7 @@ static void cpuset_attach(struct cgroup_subsys_state *css,
1467 1467
1468 guarantee_online_mems(mems_cs, &cpuset_attach_nodemask_to); 1468 guarantee_online_mems(mems_cs, &cpuset_attach_nodemask_to);
1469 1469
1470 cgroup_taskset_for_each(task, css, tset) { 1470 cgroup_taskset_for_each(task, tset) {
1471 /* 1471 /*
1472 * can_attach beforehand should guarantee that this doesn't 1472 * can_attach beforehand should guarantee that this doesn't
1473 * fail. TODO: have a better way to handle failure here 1473 * fail. TODO: have a better way to handle failure here
diff --git a/kernel/events/core.c b/kernel/events/core.c
index a3c3ab50271a..6dd714955b04 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -8021,7 +8021,7 @@ static void perf_cgroup_attach(struct cgroup_subsys_state *css,
8021{ 8021{
8022 struct task_struct *task; 8022 struct task_struct *task;
8023 8023
8024 cgroup_taskset_for_each(task, css, tset) 8024 cgroup_taskset_for_each(task, tset)
8025 task_function_call(task, __perf_cgroup_move, task); 8025 task_function_call(task, __perf_cgroup_move, task);
8026} 8026}
8027 8027
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index d4cfc5561830..ba386a06ab11 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -7600,7 +7600,7 @@ static int cpu_cgroup_can_attach(struct cgroup_subsys_state *css,
7600{ 7600{
7601 struct task_struct *task; 7601 struct task_struct *task;
7602 7602
7603 cgroup_taskset_for_each(task, css, tset) { 7603 cgroup_taskset_for_each(task, tset) {
7604#ifdef CONFIG_RT_GROUP_SCHED 7604#ifdef CONFIG_RT_GROUP_SCHED
7605 if (!sched_rt_can_attach(css_tg(css), task)) 7605 if (!sched_rt_can_attach(css_tg(css), task))
7606 return -EINVAL; 7606 return -EINVAL;
@@ -7618,7 +7618,7 @@ static void cpu_cgroup_attach(struct cgroup_subsys_state *css,
7618{ 7618{
7619 struct task_struct *task; 7619 struct task_struct *task;
7620 7620
7621 cgroup_taskset_for_each(task, css, tset) 7621 cgroup_taskset_for_each(task, tset)
7622 sched_move_task(task); 7622 sched_move_task(task);
7623} 7623}
7624 7624
diff --git a/net/core/netclassid_cgroup.c b/net/core/netclassid_cgroup.c
index b865662fba71..22931e1b99b4 100644
--- a/net/core/netclassid_cgroup.c
+++ b/net/core/netclassid_cgroup.c
@@ -73,7 +73,7 @@ static void cgrp_attach(struct cgroup_subsys_state *css,
73 void *v = (void *)(unsigned long)cs->classid; 73 void *v = (void *)(unsigned long)cs->classid;
74 struct task_struct *p; 74 struct task_struct *p;
75 75
76 cgroup_taskset_for_each(p, css, tset) { 76 cgroup_taskset_for_each(p, tset) {
77 task_lock(p); 77 task_lock(p);
78 iterate_fd(p->files, 0, update_classid, v); 78 iterate_fd(p->files, 0, update_classid, v);
79 task_unlock(p); 79 task_unlock(p);
diff --git a/net/core/netprio_cgroup.c b/net/core/netprio_cgroup.c
index d7d23e28fafd..f9f3a40d3350 100644
--- a/net/core/netprio_cgroup.c
+++ b/net/core/netprio_cgroup.c
@@ -224,7 +224,7 @@ static void net_prio_attach(struct cgroup_subsys_state *css,
224 struct task_struct *p; 224 struct task_struct *p;
225 void *v = (void *)(unsigned long)css->cgroup->id; 225 void *v = (void *)(unsigned long)css->cgroup->id;
226 226
227 cgroup_taskset_for_each(p, css, tset) { 227 cgroup_taskset_for_each(p, tset) {
228 task_lock(p); 228 task_lock(p);
229 iterate_fd(p->files, 0, update_netprio, v); 229 iterate_fd(p->files, 0, update_netprio, v);
230 task_unlock(p); 230 task_unlock(p);