aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/cgroup.c104
1 files changed, 56 insertions, 48 deletions
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 52444673e9a2..0b6e50c67b65 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -664,6 +664,52 @@ static void css_set_update_populated(struct css_set *cset, bool populated)
664 cgroup_update_populated(link->cgrp, populated); 664 cgroup_update_populated(link->cgrp, populated);
665} 665}
666 666
667/**
668 * css_set_move_task - move a task from one css_set to another
669 * @task: task being moved
670 * @from_cset: css_set @task currently belongs to (may be NULL)
671 * @to_cset: new css_set @task is being moved to (may be NULL)
672 * @use_mg_tasks: move to @to_cset->mg_tasks instead of ->tasks
673 *
674 * Move @task from @from_cset to @to_cset. If @task didn't belong to any
675 * css_set, @from_cset can be NULL. If @task is being disassociated
676 * instead of moved, @to_cset can be NULL.
677 *
678 * This function automatically handles populated_cnt updates but the caller
679 * is responsible for managing @from_cset and @to_cset's reference counts.
680 */
681static void css_set_move_task(struct task_struct *task,
682 struct css_set *from_cset, struct css_set *to_cset,
683 bool use_mg_tasks)
684{
685 lockdep_assert_held(&css_set_rwsem);
686
687 if (from_cset) {
688 WARN_ON_ONCE(list_empty(&task->cg_list));
689 list_del_init(&task->cg_list);
690 if (!css_set_populated(from_cset))
691 css_set_update_populated(from_cset, false);
692 } else {
693 WARN_ON_ONCE(!list_empty(&task->cg_list));
694 }
695
696 if (to_cset) {
697 /*
698 * We are synchronized through cgroup_threadgroup_rwsem
699 * against PF_EXITING setting such that we can't race
700 * against cgroup_exit() changing the css_set to
701 * init_css_set and dropping the old one.
702 */
703 WARN_ON_ONCE(task->flags & PF_EXITING);
704
705 if (!css_set_populated(to_cset))
706 css_set_update_populated(to_cset, true);
707 rcu_assign_pointer(task->cgroups, to_cset);
708 list_add_tail(&task->cg_list, use_mg_tasks ? &to_cset->mg_tasks :
709 &to_cset->tasks);
710 }
711}
712
667/* 713/*
668 * hash table for cgroup groups. This improves the performance to find 714 * hash table for cgroup groups. This improves the performance to find
669 * an existing css_set. This hash doesn't (currently) take into 715 * an existing css_set. This hash doesn't (currently) take into
@@ -2262,47 +2308,6 @@ struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset)
2262} 2308}
2263 2309
2264/** 2310/**
2265 * cgroup_task_migrate - move a task from one cgroup to another.
2266 * @tsk: the task being migrated
2267 * @new_cset: the new css_set @tsk is being attached to
2268 *
2269 * Must be called with cgroup_mutex, threadgroup and css_set_rwsem locked.
2270 */
2271static void cgroup_task_migrate(struct task_struct *tsk,
2272 struct css_set *new_cset)
2273{
2274 struct css_set *old_cset;
2275
2276 lockdep_assert_held(&cgroup_mutex);
2277 lockdep_assert_held(&css_set_rwsem);
2278
2279 /*
2280 * We are synchronized through cgroup_threadgroup_rwsem against
2281 * PF_EXITING setting such that we can't race against cgroup_exit()
2282 * changing the css_set to init_css_set and dropping the old one.
2283 */
2284 WARN_ON_ONCE(tsk->flags & PF_EXITING);
2285 old_cset = task_css_set(tsk);
2286
2287 if (!css_set_populated(new_cset))
2288 css_set_update_populated(new_cset, true);
2289
2290 get_css_set(new_cset);
2291 rcu_assign_pointer(tsk->cgroups, new_cset);
2292 list_move_tail(&tsk->cg_list, &new_cset->mg_tasks);
2293
2294 if (!css_set_populated(old_cset))
2295 css_set_update_populated(old_cset, false);
2296
2297 /*
2298 * We just gained a reference on old_cset by taking it from the
2299 * task. As trading it for new_cset is protected by cgroup_mutex,
2300 * we're safe to drop it here; it will be freed under RCU.
2301 */
2302 put_css_set_locked(old_cset);
2303}
2304
2305/**
2306 * cgroup_taskset_migrate - migrate a taskset to a cgroup 2311 * cgroup_taskset_migrate - migrate a taskset to a cgroup
2307 * @tset: taget taskset 2312 * @tset: taget taskset
2308 * @dst_cgrp: destination cgroup 2313 * @dst_cgrp: destination cgroup
@@ -2342,8 +2347,14 @@ static int cgroup_taskset_migrate(struct cgroup_taskset *tset,
2342 */ 2347 */
2343 down_write(&css_set_rwsem); 2348 down_write(&css_set_rwsem);
2344 list_for_each_entry(cset, &tset->src_csets, mg_node) { 2349 list_for_each_entry(cset, &tset->src_csets, mg_node) {
2345 list_for_each_entry_safe(task, tmp_task, &cset->mg_tasks, cg_list) 2350 list_for_each_entry_safe(task, tmp_task, &cset->mg_tasks, cg_list) {
2346 cgroup_task_migrate(task, cset->mg_dst_cset); 2351 struct css_set *from_cset = task_css_set(task);
2352 struct css_set *to_cset = cset->mg_dst_cset;
2353
2354 get_css_set(to_cset);
2355 css_set_move_task(task, from_cset, to_cset, true);
2356 put_css_set_locked(from_cset);
2357 }
2347 } 2358 }
2348 up_write(&css_set_rwsem); 2359 up_write(&css_set_rwsem);
2349 2360
@@ -5478,9 +5489,8 @@ void cgroup_post_fork(struct task_struct *child,
5478 down_write(&css_set_rwsem); 5489 down_write(&css_set_rwsem);
5479 cset = task_css_set(current); 5490 cset = task_css_set(current);
5480 if (list_empty(&child->cg_list)) { 5491 if (list_empty(&child->cg_list)) {
5481 rcu_assign_pointer(child->cgroups, cset);
5482 list_add_tail(&child->cg_list, &cset->tasks);
5483 get_css_set(cset); 5492 get_css_set(cset);
5493 css_set_move_task(child, NULL, cset, false);
5484 } 5494 }
5485 up_write(&css_set_rwsem); 5495 up_write(&css_set_rwsem);
5486 } 5496 }
@@ -5528,9 +5538,7 @@ void cgroup_exit(struct task_struct *tsk)
5528 5538
5529 if (!list_empty(&tsk->cg_list)) { 5539 if (!list_empty(&tsk->cg_list)) {
5530 down_write(&css_set_rwsem); 5540 down_write(&css_set_rwsem);
5531 list_del_init(&tsk->cg_list); 5541 css_set_move_task(tsk, cset, NULL, false);
5532 if (!css_set_populated(cset))
5533 css_set_update_populated(cset, false);
5534 up_write(&css_set_rwsem); 5542 up_write(&css_set_rwsem);
5535 put_cset = true; 5543 put_cset = true;
5536 } 5544 }