aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2013-08-08 20:11:26 -0400
committerTejun Heo <tj@kernel.org>2013-08-08 20:11:26 -0400
commit72ec7029937f0518eff21b8762743c31591684f5 (patch)
tree43743a5d9e6a36548a23d5ff34ffc4c4fede8aa1
parente535837b1dae17b5a2d76ea1bc22ac1a79354624 (diff)
cgroup: make task iterators deal with cgroup_subsys_state instead of cgroup
cgroup is in the process of converting to css (cgroup_subsys_state) from cgroup as the principal subsystem interface handle. This is mostly to prepare for the unified hierarchy support where css's will be created and destroyed dynamically but also helps cleaning up subsystem implementations as css is usually what they are interested in anyway. This patch converts task iterators to deal with css instead of cgroup. Note that under unified hierarchy, different sets of tasks will be considered belonging to a given cgroup depending on the subsystem in question and making the iterators deal with css instead cgroup provides them with enough information about the iteration. While at it, fix several function comment formats in cpuset.c. This patch doesn't introduce any behavior differences. Signed-off-by: Tejun Heo <tj@kernel.org> Acked-by: Li Zefan <lizefan@huawei.com> Acked-by: Michal Hocko <mhocko@suse.cz> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Balbir Singh <bsingharora@gmail.com> Cc: Matt Helsley <matthltc@us.ibm.com>
-rw-r--r--include/linux/cgroup.h21
-rw-r--r--kernel/cgroup.c112
-rw-r--r--kernel/cgroup_freezer.c26
-rw-r--r--kernel/cpuset.c41
-rw-r--r--mm/memcontrol.c11
5 files changed, 104 insertions, 107 deletions
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index 8472ed576b64..cd105fce089c 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -880,21 +880,22 @@ css_next_descendant_post(struct cgroup_subsys_state *pos,
880 for ((pos) = css_next_descendant_post(NULL, (css)); (pos); \ 880 for ((pos) = css_next_descendant_post(NULL, (css)); (pos); \
881 (pos) = css_next_descendant_post((pos), (css))) 881 (pos) = css_next_descendant_post((pos), (css)))
882 882
883/* A cgroup_task_iter should be treated as an opaque object */ 883/* A css_task_iter should be treated as an opaque object */
884struct cgroup_task_iter { 884struct css_task_iter {
885 struct cgroup *origin_cgrp; 885 struct cgroup_subsys_state *origin_css;
886 struct list_head *cset_link; 886 struct list_head *cset_link;
887 struct list_head *task; 887 struct list_head *task;
888}; 888};
889 889
890void cgroup_task_iter_start(struct cgroup *cgrp, struct cgroup_task_iter *it); 890void css_task_iter_start(struct cgroup_subsys_state *css,
891struct task_struct *cgroup_task_iter_next(struct cgroup_task_iter *it); 891 struct css_task_iter *it);
892void cgroup_task_iter_end(struct cgroup_task_iter *it); 892struct task_struct *css_task_iter_next(struct css_task_iter *it);
893void css_task_iter_end(struct css_task_iter *it);
893 894
894int cgroup_scan_tasks(struct cgroup *cgrp, 895int css_scan_tasks(struct cgroup_subsys_state *css,
895 bool (*test)(struct task_struct *, void *), 896 bool (*test)(struct task_struct *, void *),
896 void (*process)(struct task_struct *, void *), 897 void (*process)(struct task_struct *, void *),
897 void *data, struct ptr_heap *heap); 898 void *data, struct ptr_heap *heap);
898 899
899int cgroup_attach_task_all(struct task_struct *from, struct task_struct *); 900int cgroup_attach_task_all(struct task_struct *from, struct task_struct *);
900int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from); 901int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from);
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 7b16ddb2569b..8c57301d0561 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -370,7 +370,7 @@ static int cgroup_init_idr(struct cgroup_subsys *ss,
370/* 370/*
371 * css_set_lock protects the list of css_set objects, and the chain of 371 * css_set_lock protects the list of css_set objects, and the chain of
372 * tasks off each css_set. Nests outside task->alloc_lock due to 372 * tasks off each css_set. Nests outside task->alloc_lock due to
373 * cgroup_task_iter_start(). 373 * css_task_iter_start().
374 */ 374 */
375static DEFINE_RWLOCK(css_set_lock); 375static DEFINE_RWLOCK(css_set_lock);
376static int css_set_count; 376static int css_set_count;
@@ -398,9 +398,9 @@ static unsigned long css_set_hash(struct cgroup_subsys_state *css[])
398 398
399/* 399/*
400 * We don't maintain the lists running through each css_set to its task 400 * We don't maintain the lists running through each css_set to its task
401 * until after the first call to cgroup_task_iter_start(). This reduces 401 * until after the first call to css_task_iter_start(). This reduces the
402 * the fork()/exit() overhead for people who have cgroups compiled into 402 * fork()/exit() overhead for people who have cgroups compiled into their
403 * their kernel but not actually in use. 403 * kernel but not actually in use.
404 */ 404 */
405static int use_task_css_set_links __read_mostly; 405static int use_task_css_set_links __read_mostly;
406 406
@@ -2989,7 +2989,7 @@ int cgroup_task_count(const struct cgroup *cgrp)
2989 * To reduce the fork() overhead for systems that are not actually using 2989 * To reduce the fork() overhead for systems that are not actually using
2990 * their cgroups capability, we don't maintain the lists running through 2990 * their cgroups capability, we don't maintain the lists running through
2991 * each css_set to its tasks until we see the list actually used - in other 2991 * each css_set to its tasks until we see the list actually used - in other
2992 * words after the first call to cgroup_task_iter_start(). 2992 * words after the first call to css_task_iter_start().
2993 */ 2993 */
2994static void cgroup_enable_task_cg_lists(void) 2994static void cgroup_enable_task_cg_lists(void)
2995{ 2995{
@@ -3204,12 +3204,12 @@ css_next_descendant_post(struct cgroup_subsys_state *pos,
3204EXPORT_SYMBOL_GPL(css_next_descendant_post); 3204EXPORT_SYMBOL_GPL(css_next_descendant_post);
3205 3205
3206/** 3206/**
3207 * cgroup_advance_task_iter - advance a task itererator to the next css_set 3207 * css_advance_task_iter - advance a task itererator to the next css_set
3208 * @it: the iterator to advance 3208 * @it: the iterator to advance
3209 * 3209 *
3210 * Advance @it to the next css_set to walk. 3210 * Advance @it to the next css_set to walk.
3211 */ 3211 */
3212static void cgroup_advance_task_iter(struct cgroup_task_iter *it) 3212static void css_advance_task_iter(struct css_task_iter *it)
3213{ 3213{
3214 struct list_head *l = it->cset_link; 3214 struct list_head *l = it->cset_link;
3215 struct cgrp_cset_link *link; 3215 struct cgrp_cset_link *link;
@@ -3218,7 +3218,7 @@ static void cgroup_advance_task_iter(struct cgroup_task_iter *it)
3218 /* Advance to the next non-empty css_set */ 3218 /* Advance to the next non-empty css_set */
3219 do { 3219 do {
3220 l = l->next; 3220 l = l->next;
3221 if (l == &it->origin_cgrp->cset_links) { 3221 if (l == &it->origin_css->cgroup->cset_links) {
3222 it->cset_link = NULL; 3222 it->cset_link = NULL;
3223 return; 3223 return;
3224 } 3224 }
@@ -3230,47 +3230,48 @@ static void cgroup_advance_task_iter(struct cgroup_task_iter *it)
3230} 3230}
3231 3231
3232/** 3232/**
3233 * cgroup_task_iter_start - initiate task iteration 3233 * css_task_iter_start - initiate task iteration
3234 * @cgrp: the cgroup to walk tasks of 3234 * @css: the css to walk tasks of
3235 * @it: the task iterator to use 3235 * @it: the task iterator to use
3236 * 3236 *
3237 * Initiate iteration through the tasks of @cgrp. The caller can call 3237 * Initiate iteration through the tasks of @css. The caller can call
3238 * cgroup_task_iter_next() to walk through the tasks until the function 3238 * css_task_iter_next() to walk through the tasks until the function
3239 * returns NULL. On completion of iteration, cgroup_task_iter_end() must 3239 * returns NULL. On completion of iteration, css_task_iter_end() must be
3240 * be called. 3240 * called.
3241 * 3241 *
3242 * Note that this function acquires a lock which is released when the 3242 * Note that this function acquires a lock which is released when the
3243 * iteration finishes. The caller can't sleep while iteration is in 3243 * iteration finishes. The caller can't sleep while iteration is in
3244 * progress. 3244 * progress.
3245 */ 3245 */
3246void cgroup_task_iter_start(struct cgroup *cgrp, struct cgroup_task_iter *it) 3246void css_task_iter_start(struct cgroup_subsys_state *css,
3247 struct css_task_iter *it)
3247 __acquires(css_set_lock) 3248 __acquires(css_set_lock)
3248{ 3249{
3249 /* 3250 /*
3250 * The first time anyone tries to iterate across a cgroup, 3251 * The first time anyone tries to iterate across a css, we need to
3251 * we need to enable the list linking each css_set to its 3252 * enable the list linking each css_set to its tasks, and fix up
3252 * tasks, and fix up all existing tasks. 3253 * all existing tasks.
3253 */ 3254 */
3254 if (!use_task_css_set_links) 3255 if (!use_task_css_set_links)
3255 cgroup_enable_task_cg_lists(); 3256 cgroup_enable_task_cg_lists();
3256 3257
3257 read_lock(&css_set_lock); 3258 read_lock(&css_set_lock);
3258 3259
3259 it->origin_cgrp = cgrp; 3260 it->origin_css = css;
3260 it->cset_link = &cgrp->cset_links; 3261 it->cset_link = &css->cgroup->cset_links;
3261 3262
3262 cgroup_advance_task_iter(it); 3263 css_advance_task_iter(it);
3263} 3264}
3264 3265
3265/** 3266/**
3266 * cgroup_task_iter_next - return the next task for the iterator 3267 * css_task_iter_next - return the next task for the iterator
3267 * @it: the task iterator being iterated 3268 * @it: the task iterator being iterated
3268 * 3269 *
3269 * The "next" function for task iteration. @it should have been 3270 * The "next" function for task iteration. @it should have been
3270 * initialized via cgroup_task_iter_start(). Returns NULL when the 3271 * initialized via css_task_iter_start(). Returns NULL when the iteration
3271 * iteration reaches the end. 3272 * reaches the end.
3272 */ 3273 */
3273struct task_struct *cgroup_task_iter_next(struct cgroup_task_iter *it) 3274struct task_struct *css_task_iter_next(struct css_task_iter *it)
3274{ 3275{
3275 struct task_struct *res; 3276 struct task_struct *res;
3276 struct list_head *l = it->task; 3277 struct list_head *l = it->task;
@@ -3288,7 +3289,7 @@ struct task_struct *cgroup_task_iter_next(struct cgroup_task_iter *it)
3288 * We reached the end of this task list - move on to the 3289 * We reached the end of this task list - move on to the
3289 * next cgrp_cset_link. 3290 * next cgrp_cset_link.
3290 */ 3291 */
3291 cgroup_advance_task_iter(it); 3292 css_advance_task_iter(it);
3292 } else { 3293 } else {
3293 it->task = l; 3294 it->task = l;
3294 } 3295 }
@@ -3296,12 +3297,12 @@ struct task_struct *cgroup_task_iter_next(struct cgroup_task_iter *it)
3296} 3297}
3297 3298
3298/** 3299/**
3299 * cgroup_task_iter_end - finish task iteration 3300 * css_task_iter_end - finish task iteration
3300 * @it: the task iterator to finish 3301 * @it: the task iterator to finish
3301 * 3302 *
3302 * Finish task iteration started by cgroup_task_iter_start(). 3303 * Finish task iteration started by css_task_iter_start().
3303 */ 3304 */
3304void cgroup_task_iter_end(struct cgroup_task_iter *it) 3305void css_task_iter_end(struct css_task_iter *it)
3305 __releases(css_set_lock) 3306 __releases(css_set_lock)
3306{ 3307{
3307 read_unlock(&css_set_lock); 3308 read_unlock(&css_set_lock);
@@ -3342,24 +3343,24 @@ static inline int started_after(void *p1, void *p2)
3342} 3343}
3343 3344
3344/** 3345/**
3345 * cgroup_scan_tasks - iterate though all the tasks in a cgroup 3346 * css_scan_tasks - iterate though all the tasks in a css
3346 * @cgrp: the cgroup to iterate tasks of 3347 * @css: the css to iterate tasks of
3347 * @test: optional test callback 3348 * @test: optional test callback
3348 * @process: process callback 3349 * @process: process callback
3349 * @data: data passed to @test and @process 3350 * @data: data passed to @test and @process
3350 * @heap: optional pre-allocated heap used for task iteration 3351 * @heap: optional pre-allocated heap used for task iteration
3351 * 3352 *
3352 * Iterate through all the tasks in a cgroup, calling @test for each, and 3353 * Iterate through all the tasks in @css, calling @test for each, and if it
3353 * if it returns %true, call @process for it also. 3354 * returns %true, call @process for it also.
3354 * 3355 *
3355 * @test may be NULL, meaning always true (select all tasks), which 3356 * @test may be NULL, meaning always true (select all tasks), which
3356 * effectively duplicates cgroup_task_iter_{start,next,end}() but does not 3357 * effectively duplicates css_task_iter_{start,next,end}() but does not
3357 * lock css_set_lock for the call to @process. 3358 * lock css_set_lock for the call to @process.
3358 * 3359 *
3359 * It is guaranteed that @process will act on every task that is a member 3360 * It is guaranteed that @process will act on every task that is a member
3360 * of @cgrp for the duration of this call. This function may or may not 3361 * of @css for the duration of this call. This function may or may not
3361 * call @process for tasks that exit or move to a different cgroup during 3362 * call @process for tasks that exit or move to a different css during the
3362 * the call, or are forked or move into the cgroup during the call. 3363 * call, or are forked or move into the css during the call.
3363 * 3364 *
3364 * Note that @test may be called with locks held, and may in some 3365 * Note that @test may be called with locks held, and may in some
3365 * situations be called multiple times for the same task, so it should be 3366 * situations be called multiple times for the same task, so it should be
@@ -3370,13 +3371,13 @@ static inline int started_after(void *p1, void *p2)
3370 * temporary heap will be used (allocation of which may cause this function 3371 * temporary heap will be used (allocation of which may cause this function
3371 * to fail). 3372 * to fail).
3372 */ 3373 */
3373int cgroup_scan_tasks(struct cgroup *cgrp, 3374int css_scan_tasks(struct cgroup_subsys_state *css,
3374 bool (*test)(struct task_struct *, void *), 3375 bool (*test)(struct task_struct *, void *),
3375 void (*process)(struct task_struct *, void *), 3376 void (*process)(struct task_struct *, void *),
3376 void *data, struct ptr_heap *heap) 3377 void *data, struct ptr_heap *heap)
3377{ 3378{
3378 int retval, i; 3379 int retval, i;
3379 struct cgroup_task_iter it; 3380 struct css_task_iter it;
3380 struct task_struct *p, *dropped; 3381 struct task_struct *p, *dropped;
3381 /* Never dereference latest_task, since it's not refcounted */ 3382 /* Never dereference latest_task, since it's not refcounted */
3382 struct task_struct *latest_task = NULL; 3383 struct task_struct *latest_task = NULL;
@@ -3397,7 +3398,7 @@ int cgroup_scan_tasks(struct cgroup *cgrp,
3397 3398
3398 again: 3399 again:
3399 /* 3400 /*
3400 * Scan tasks in the cgroup, using the @test callback to determine 3401 * Scan tasks in the css, using the @test callback to determine
3401 * which are of interest, and invoking @process callback on the 3402 * which are of interest, and invoking @process callback on the
3402 * ones which need an update. Since we don't want to hold any 3403 * ones which need an update. Since we don't want to hold any
3403 * locks during the task updates, gather tasks to be processed in a 3404 * locks during the task updates, gather tasks to be processed in a
@@ -3408,8 +3409,8 @@ int cgroup_scan_tasks(struct cgroup *cgrp,
3408 * guarantees forward progress and that we don't miss any tasks. 3409 * guarantees forward progress and that we don't miss any tasks.
3409 */ 3410 */
3410 heap->size = 0; 3411 heap->size = 0;
3411 cgroup_task_iter_start(cgrp, &it); 3412 css_task_iter_start(css, &it);
3412 while ((p = cgroup_task_iter_next(&it))) { 3413 while ((p = css_task_iter_next(&it))) {
3413 /* 3414 /*
3414 * Only affect tasks that qualify per the caller's callback, 3415 * Only affect tasks that qualify per the caller's callback,
3415 * if he provided one 3416 * if he provided one
@@ -3442,7 +3443,7 @@ int cgroup_scan_tasks(struct cgroup *cgrp,
3442 * the heap and wasn't inserted 3443 * the heap and wasn't inserted
3443 */ 3444 */
3444 } 3445 }
3445 cgroup_task_iter_end(&it); 3446 css_task_iter_end(&it);
3446 3447
3447 if (heap->size) { 3448 if (heap->size) {
3448 for (i = 0; i < heap->size; i++) { 3449 for (i = 0; i < heap->size; i++) {
@@ -3485,7 +3486,8 @@ static void cgroup_transfer_one_task(struct task_struct *task, void *data)
3485 */ 3486 */
3486int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from) 3487int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from)
3487{ 3488{
3488 return cgroup_scan_tasks(from, NULL, cgroup_transfer_one_task, to, NULL); 3489 return css_scan_tasks(&from->dummy_css, NULL, cgroup_transfer_one_task,
3490 to, NULL);
3489} 3491}
3490 3492
3491/* 3493/*
@@ -3639,7 +3641,7 @@ static int pidlist_array_load(struct cgroup *cgrp, enum cgroup_filetype type,
3639 pid_t *array; 3641 pid_t *array;
3640 int length; 3642 int length;
3641 int pid, n = 0; /* used for populating the array */ 3643 int pid, n = 0; /* used for populating the array */
3642 struct cgroup_task_iter it; 3644 struct css_task_iter it;
3643 struct task_struct *tsk; 3645 struct task_struct *tsk;
3644 struct cgroup_pidlist *l; 3646 struct cgroup_pidlist *l;
3645 3647
@@ -3654,8 +3656,8 @@ static int pidlist_array_load(struct cgroup *cgrp, enum cgroup_filetype type,
3654 if (!array) 3656 if (!array)
3655 return -ENOMEM; 3657 return -ENOMEM;
3656 /* now, populate the array */ 3658 /* now, populate the array */
3657 cgroup_task_iter_start(cgrp, &it); 3659 css_task_iter_start(&cgrp->dummy_css, &it);
3658 while ((tsk = cgroup_task_iter_next(&it))) { 3660 while ((tsk = css_task_iter_next(&it))) {
3659 if (unlikely(n == length)) 3661 if (unlikely(n == length))
3660 break; 3662 break;
3661 /* get tgid or pid for procs or tasks file respectively */ 3663 /* get tgid or pid for procs or tasks file respectively */
@@ -3666,7 +3668,7 @@ static int pidlist_array_load(struct cgroup *cgrp, enum cgroup_filetype type,
3666 if (pid > 0) /* make sure to only use valid results */ 3668 if (pid > 0) /* make sure to only use valid results */
3667 array[n++] = pid; 3669 array[n++] = pid;
3668 } 3670 }
3669 cgroup_task_iter_end(&it); 3671 css_task_iter_end(&it);
3670 length = n; 3672 length = n;
3671 /* now sort & (if procs) strip out duplicates */ 3673 /* now sort & (if procs) strip out duplicates */
3672 sort(array, length, sizeof(pid_t), cmppid, NULL); 3674 sort(array, length, sizeof(pid_t), cmppid, NULL);
@@ -3700,7 +3702,7 @@ int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry)
3700{ 3702{
3701 int ret = -EINVAL; 3703 int ret = -EINVAL;
3702 struct cgroup *cgrp; 3704 struct cgroup *cgrp;
3703 struct cgroup_task_iter it; 3705 struct css_task_iter it;
3704 struct task_struct *tsk; 3706 struct task_struct *tsk;
3705 3707
3706 /* 3708 /*
@@ -3714,8 +3716,8 @@ int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry)
3714 ret = 0; 3716 ret = 0;
3715 cgrp = dentry->d_fsdata; 3717 cgrp = dentry->d_fsdata;
3716 3718
3717 cgroup_task_iter_start(cgrp, &it); 3719 css_task_iter_start(&cgrp->dummy_css, &it);
3718 while ((tsk = cgroup_task_iter_next(&it))) { 3720 while ((tsk = css_task_iter_next(&it))) {
3719 switch (tsk->state) { 3721 switch (tsk->state) {
3720 case TASK_RUNNING: 3722 case TASK_RUNNING:
3721 stats->nr_running++; 3723 stats->nr_running++;
@@ -3735,7 +3737,7 @@ int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry)
3735 break; 3737 break;
3736 } 3738 }
3737 } 3739 }
3738 cgroup_task_iter_end(&it); 3740 css_task_iter_end(&it);
3739 3741
3740err: 3742err:
3741 return ret; 3743 return ret;
diff --git a/kernel/cgroup_freezer.c b/kernel/cgroup_freezer.c
index e0ab9bfd679a..5cd2b6d55243 100644
--- a/kernel/cgroup_freezer.c
+++ b/kernel/cgroup_freezer.c
@@ -258,7 +258,7 @@ static void update_if_frozen(struct cgroup_subsys_state *css)
258{ 258{
259 struct freezer *freezer = css_freezer(css); 259 struct freezer *freezer = css_freezer(css);
260 struct cgroup_subsys_state *pos; 260 struct cgroup_subsys_state *pos;
261 struct cgroup_task_iter it; 261 struct css_task_iter it;
262 struct task_struct *task; 262 struct task_struct *task;
263 263
264 WARN_ON_ONCE(!rcu_read_lock_held()); 264 WARN_ON_ONCE(!rcu_read_lock_held());
@@ -279,9 +279,9 @@ static void update_if_frozen(struct cgroup_subsys_state *css)
279 } 279 }
280 280
281 /* are all tasks frozen? */ 281 /* are all tasks frozen? */
282 cgroup_task_iter_start(css->cgroup, &it); 282 css_task_iter_start(css, &it);
283 283
284 while ((task = cgroup_task_iter_next(&it))) { 284 while ((task = css_task_iter_next(&it))) {
285 if (freezing(task)) { 285 if (freezing(task)) {
286 /* 286 /*
287 * freezer_should_skip() indicates that the task 287 * freezer_should_skip() indicates that the task
@@ -296,7 +296,7 @@ static void update_if_frozen(struct cgroup_subsys_state *css)
296 296
297 freezer->state |= CGROUP_FROZEN; 297 freezer->state |= CGROUP_FROZEN;
298out_iter_end: 298out_iter_end:
299 cgroup_task_iter_end(&it); 299 css_task_iter_end(&it);
300out_unlock: 300out_unlock:
301 spin_unlock_irq(&freezer->lock); 301 spin_unlock_irq(&freezer->lock);
302} 302}
@@ -322,26 +322,24 @@ static int freezer_read(struct cgroup_subsys_state *css, struct cftype *cft,
322 322
323static void freeze_cgroup(struct freezer *freezer) 323static void freeze_cgroup(struct freezer *freezer)
324{ 324{
325 struct cgroup *cgroup = freezer->css.cgroup; 325 struct css_task_iter it;
326 struct cgroup_task_iter it;
327 struct task_struct *task; 326 struct task_struct *task;
328 327
329 cgroup_task_iter_start(cgroup, &it); 328 css_task_iter_start(&freezer->css, &it);
330 while ((task = cgroup_task_iter_next(&it))) 329 while ((task = css_task_iter_next(&it)))
331 freeze_task(task); 330 freeze_task(task);
332 cgroup_task_iter_end(&it); 331 css_task_iter_end(&it);
333} 332}
334 333
335static void unfreeze_cgroup(struct freezer *freezer) 334static void unfreeze_cgroup(struct freezer *freezer)
336{ 335{
337 struct cgroup *cgroup = freezer->css.cgroup; 336 struct css_task_iter it;
338 struct cgroup_task_iter it;
339 struct task_struct *task; 337 struct task_struct *task;
340 338
341 cgroup_task_iter_start(cgroup, &it); 339 css_task_iter_start(&freezer->css, &it);
342 while ((task = cgroup_task_iter_next(&it))) 340 while ((task = css_task_iter_next(&it)))
343 __thaw_task(task); 341 __thaw_task(task);
344 cgroup_task_iter_end(&it); 342 css_task_iter_end(&it);
345} 343}
346 344
347/** 345/**
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 6fe23f2ac742..39e52175f4af 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -832,8 +832,8 @@ static struct cpuset *effective_nodemask_cpuset(struct cpuset *cs)
832 * @tsk: task to test 832 * @tsk: task to test
833 * @data: cpuset to @tsk belongs to 833 * @data: cpuset to @tsk belongs to
834 * 834 *
835 * Called by cgroup_scan_tasks() for each task in a cgroup whose 835 * Called by css_scan_tasks() for each task in a cgroup whose cpus_allowed
836 * cpus_allowed mask needs to be changed. 836 * mask needs to be changed.
837 * 837 *
838 * We don't need to re-check for the cgroup/cpuset membership, since we're 838 * We don't need to re-check for the cgroup/cpuset membership, since we're
839 * holding cpuset_mutex at this point. 839 * holding cpuset_mutex at this point.
@@ -849,27 +849,26 @@ static void cpuset_change_cpumask(struct task_struct *tsk, void *data)
849/** 849/**
850 * update_tasks_cpumask - Update the cpumasks of tasks in the cpuset. 850 * update_tasks_cpumask - Update the cpumasks of tasks in the cpuset.
851 * @cs: the cpuset in which each task's cpus_allowed mask needs to be changed 851 * @cs: the cpuset in which each task's cpus_allowed mask needs to be changed
852 * @heap: if NULL, defer allocating heap memory to cgroup_scan_tasks() 852 * @heap: if NULL, defer allocating heap memory to css_scan_tasks()
853 * 853 *
854 * Called with cpuset_mutex held 854 * Called with cpuset_mutex held
855 * 855 *
856 * The cgroup_scan_tasks() function will scan all the tasks in a cgroup, 856 * The css_scan_tasks() function will scan all the tasks in a cgroup,
857 * calling callback functions for each. 857 * calling callback functions for each.
858 * 858 *
859 * No return value. It's guaranteed that cgroup_scan_tasks() always returns 0 859 * No return value. It's guaranteed that css_scan_tasks() always returns 0
860 * if @heap != NULL. 860 * if @heap != NULL.
861 */ 861 */
862static void update_tasks_cpumask(struct cpuset *cs, struct ptr_heap *heap) 862static void update_tasks_cpumask(struct cpuset *cs, struct ptr_heap *heap)
863{ 863{
864 cgroup_scan_tasks(cs->css.cgroup, NULL, cpuset_change_cpumask, cs, 864 css_scan_tasks(&cs->css, NULL, cpuset_change_cpumask, cs, heap);
865 heap);
866} 865}
867 866
868/* 867/*
869 * update_tasks_cpumask_hier - Update the cpumasks of tasks in the hierarchy. 868 * update_tasks_cpumask_hier - Update the cpumasks of tasks in the hierarchy.
870 * @root_cs: the root cpuset of the hierarchy 869 * @root_cs: the root cpuset of the hierarchy
871 * @update_root: update root cpuset or not? 870 * @update_root: update root cpuset or not?
872 * @heap: the heap used by cgroup_scan_tasks() 871 * @heap: the heap used by css_scan_tasks()
873 * 872 *
874 * This will update cpumasks of tasks in @root_cs and all other empty cpusets 873 * This will update cpumasks of tasks in @root_cs and all other empty cpusets
875 * which take on cpumask of @root_cs. 874 * which take on cpumask of @root_cs.
@@ -1082,11 +1081,10 @@ static void *cpuset_being_rebound;
1082/** 1081/**
1083 * update_tasks_nodemask - Update the nodemasks of tasks in the cpuset. 1082 * update_tasks_nodemask - Update the nodemasks of tasks in the cpuset.
1084 * @cs: the cpuset in which each task's mems_allowed mask needs to be changed 1083 * @cs: the cpuset in which each task's mems_allowed mask needs to be changed
1085 * @heap: if NULL, defer allocating heap memory to cgroup_scan_tasks() 1084 * @heap: if NULL, defer allocating heap memory to css_scan_tasks()
1086 * 1085 *
1087 * Called with cpuset_mutex held 1086 * Called with cpuset_mutex held. No return value. It's guaranteed that
1088 * No return value. It's guaranteed that cgroup_scan_tasks() always returns 0 1087 * css_scan_tasks() always returns 0 if @heap != NULL.
1089 * if @heap != NULL.
1090 */ 1088 */
1091static void update_tasks_nodemask(struct cpuset *cs, struct ptr_heap *heap) 1089static void update_tasks_nodemask(struct cpuset *cs, struct ptr_heap *heap)
1092{ 1090{
@@ -1109,8 +1107,7 @@ static void update_tasks_nodemask(struct cpuset *cs, struct ptr_heap *heap)
1109 * It's ok if we rebind the same mm twice; mpol_rebind_mm() 1107 * It's ok if we rebind the same mm twice; mpol_rebind_mm()
1110 * is idempotent. Also migrate pages in each mm to new nodes. 1108 * is idempotent. Also migrate pages in each mm to new nodes.
1111 */ 1109 */
1112 cgroup_scan_tasks(cs->css.cgroup, NULL, cpuset_change_nodemask, &arg, 1110 css_scan_tasks(&cs->css, NULL, cpuset_change_nodemask, &arg, heap);
1113 heap);
1114 1111
1115 /* 1112 /*
1116 * All the tasks' nodemasks have been updated, update 1113 * All the tasks' nodemasks have been updated, update
@@ -1126,7 +1123,7 @@ static void update_tasks_nodemask(struct cpuset *cs, struct ptr_heap *heap)
1126 * update_tasks_nodemask_hier - Update the nodemasks of tasks in the hierarchy. 1123 * update_tasks_nodemask_hier - Update the nodemasks of tasks in the hierarchy.
1127 * @cs: the root cpuset of the hierarchy 1124 * @cs: the root cpuset of the hierarchy
1128 * @update_root: update the root cpuset or not? 1125 * @update_root: update the root cpuset or not?
1129 * @heap: the heap used by cgroup_scan_tasks() 1126 * @heap: the heap used by css_scan_tasks()
1130 * 1127 *
1131 * This will update nodemasks of tasks in @root_cs and all other empty cpusets 1128 * This will update nodemasks of tasks in @root_cs and all other empty cpusets
1132 * which take on nodemask of @root_cs. 1129 * which take on nodemask of @root_cs.
@@ -1254,12 +1251,12 @@ static int update_relax_domain_level(struct cpuset *cs, s64 val)
1254 return 0; 1251 return 0;
1255} 1252}
1256 1253
1257/* 1254/**
1258 * cpuset_change_flag - make a task's spread flags the same as its cpuset's 1255 * cpuset_change_flag - make a task's spread flags the same as its cpuset's
1259 * @tsk: task to be updated 1256 * @tsk: task to be updated
1260 * @data: cpuset to @tsk belongs to 1257 * @data: cpuset to @tsk belongs to
1261 * 1258 *
1262 * Called by cgroup_scan_tasks() for each task in a cgroup. 1259 * Called by css_scan_tasks() for each task in a cgroup.
1263 * 1260 *
1264 * We don't need to re-check for the cgroup/cpuset membership, since we're 1261 * We don't need to re-check for the cgroup/cpuset membership, since we're
1265 * holding cpuset_mutex at this point. 1262 * holding cpuset_mutex at this point.
@@ -1271,22 +1268,22 @@ static void cpuset_change_flag(struct task_struct *tsk, void *data)
1271 cpuset_update_task_spread_flag(cs, tsk); 1268 cpuset_update_task_spread_flag(cs, tsk);
1272} 1269}
1273 1270
1274/* 1271/**
1275 * update_tasks_flags - update the spread flags of tasks in the cpuset. 1272 * update_tasks_flags - update the spread flags of tasks in the cpuset.
1276 * @cs: the cpuset in which each task's spread flags needs to be changed 1273 * @cs: the cpuset in which each task's spread flags needs to be changed
1277 * @heap: if NULL, defer allocating heap memory to cgroup_scan_tasks() 1274 * @heap: if NULL, defer allocating heap memory to css_scan_tasks()
1278 * 1275 *
1279 * Called with cpuset_mutex held 1276 * Called with cpuset_mutex held
1280 * 1277 *
1281 * The cgroup_scan_tasks() function will scan all the tasks in a cgroup, 1278 * The css_scan_tasks() function will scan all the tasks in a cgroup,
1282 * calling callback functions for each. 1279 * calling callback functions for each.
1283 * 1280 *
1284 * No return value. It's guaranteed that cgroup_scan_tasks() always returns 0 1281 * No return value. It's guaranteed that css_scan_tasks() always returns 0
1285 * if @heap != NULL. 1282 * if @heap != NULL.
1286 */ 1283 */
1287static void update_tasks_flags(struct cpuset *cs, struct ptr_heap *heap) 1284static void update_tasks_flags(struct cpuset *cs, struct ptr_heap *heap)
1288{ 1285{
1289 cgroup_scan_tasks(cs->css.cgroup, NULL, cpuset_change_flag, cs, heap); 1286 css_scan_tasks(&cs->css, NULL, cpuset_change_flag, cs, heap);
1290} 1287}
1291 1288
1292/* 1289/*
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 5a5f4dc649f0..95106a993777 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -1799,12 +1799,11 @@ static void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
1799 check_panic_on_oom(CONSTRAINT_MEMCG, gfp_mask, order, NULL); 1799 check_panic_on_oom(CONSTRAINT_MEMCG, gfp_mask, order, NULL);
1800 totalpages = mem_cgroup_get_limit(memcg) >> PAGE_SHIFT ? : 1; 1800 totalpages = mem_cgroup_get_limit(memcg) >> PAGE_SHIFT ? : 1;
1801 for_each_mem_cgroup_tree(iter, memcg) { 1801 for_each_mem_cgroup_tree(iter, memcg) {
1802 struct cgroup *cgroup = iter->css.cgroup; 1802 struct css_task_iter it;
1803 struct cgroup_task_iter it;
1804 struct task_struct *task; 1803 struct task_struct *task;
1805 1804
1806 cgroup_task_iter_start(cgroup, &it); 1805 css_task_iter_start(&iter->css, &it);
1807 while ((task = cgroup_task_iter_next(&it))) { 1806 while ((task = css_task_iter_next(&it))) {
1808 switch (oom_scan_process_thread(task, totalpages, NULL, 1807 switch (oom_scan_process_thread(task, totalpages, NULL,
1809 false)) { 1808 false)) {
1810 case OOM_SCAN_SELECT: 1809 case OOM_SCAN_SELECT:
@@ -1817,7 +1816,7 @@ static void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
1817 case OOM_SCAN_CONTINUE: 1816 case OOM_SCAN_CONTINUE:
1818 continue; 1817 continue;
1819 case OOM_SCAN_ABORT: 1818 case OOM_SCAN_ABORT:
1820 cgroup_task_iter_end(&it); 1819 css_task_iter_end(&it);
1821 mem_cgroup_iter_break(memcg, iter); 1820 mem_cgroup_iter_break(memcg, iter);
1822 if (chosen) 1821 if (chosen)
1823 put_task_struct(chosen); 1822 put_task_struct(chosen);
@@ -1834,7 +1833,7 @@ static void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
1834 get_task_struct(chosen); 1833 get_task_struct(chosen);
1835 } 1834 }
1836 } 1835 }
1837 cgroup_task_iter_end(&it); 1836 css_task_iter_end(&it);
1838 } 1837 }
1839 1838
1840 if (!chosen) 1839 if (!chosen)