aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--kernel/cgroup.c41
1 files changed, 27 insertions, 14 deletions
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 878cd1810ad1..506f6da67ad1 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -173,6 +173,7 @@ static int cgroup_destroy_locked(struct cgroup *cgrp);
173static int cgroup_addrm_files(struct cgroup *cgrp, struct cftype cfts[], 173static int cgroup_addrm_files(struct cgroup *cgrp, struct cftype cfts[],
174 bool is_add); 174 bool is_add);
175static void cgroup_pidlist_destroy_all(struct cgroup *cgrp); 175static void cgroup_pidlist_destroy_all(struct cgroup *cgrp);
176static void cgroup_enable_task_cg_lists(void);
176 177
177/** 178/**
178 * cgroup_css - obtain a cgroup's css for the specified subsystem 179 * cgroup_css - obtain a cgroup's css for the specified subsystem
@@ -375,7 +376,7 @@ static unsigned long css_set_hash(struct cgroup_subsys_state *css[])
375 * fork()/exit() overhead for people who have cgroups compiled into their 376 * fork()/exit() overhead for people who have cgroups compiled into their
376 * kernel but not actually in use. 377 * kernel but not actually in use.
377 */ 378 */
378static int use_task_css_set_links __read_mostly; 379static bool use_task_css_set_links __read_mostly;
379 380
380static void __put_css_set(struct css_set *cset, int taskexit) 381static void __put_css_set(struct css_set *cset, int taskexit)
381{ 382{
@@ -1441,6 +1442,13 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type,
1441 struct cgroup_sb_opts opts; 1442 struct cgroup_sb_opts opts;
1442 struct dentry *dentry; 1443 struct dentry *dentry;
1443 int ret; 1444 int ret;
1445
1446 /*
1447 * The first time anyone tries to mount a cgroup, enable the list
1448 * linking each css_set to its tasks and fix up all existing tasks.
1449 */
1450 if (!use_task_css_set_links)
1451 cgroup_enable_task_cg_lists();
1444retry: 1452retry:
1445 mutex_lock(&cgroup_tree_mutex); 1453 mutex_lock(&cgroup_tree_mutex);
1446 mutex_lock(&cgroup_mutex); 1454 mutex_lock(&cgroup_mutex);
@@ -1692,10 +1700,8 @@ static void cgroup_task_migrate(struct cgroup *old_cgrp,
1692 rcu_assign_pointer(tsk->cgroups, new_cset); 1700 rcu_assign_pointer(tsk->cgroups, new_cset);
1693 task_unlock(tsk); 1701 task_unlock(tsk);
1694 1702
1695 /* Update the css_set linked lists if we're using them */
1696 write_lock(&css_set_lock); 1703 write_lock(&css_set_lock);
1697 if (!list_empty(&tsk->cg_list)) 1704 list_move(&tsk->cg_list, &new_cset->tasks);
1698 list_move(&tsk->cg_list, &new_cset->tasks);
1699 write_unlock(&css_set_lock); 1705 write_unlock(&css_set_lock);
1700 1706
1701 /* 1707 /*
@@ -2362,13 +2368,19 @@ int cgroup_task_count(const struct cgroup *cgrp)
2362 * To reduce the fork() overhead for systems that are not actually using 2368 * To reduce the fork() overhead for systems that are not actually using
2363 * their cgroups capability, we don't maintain the lists running through 2369 * their cgroups capability, we don't maintain the lists running through
2364 * each css_set to its tasks until we see the list actually used - in other 2370 * each css_set to its tasks until we see the list actually used - in other
2365 * words after the first call to css_task_iter_start(). 2371 * words after the first mount.
2366 */ 2372 */
2367static void cgroup_enable_task_cg_lists(void) 2373static void cgroup_enable_task_cg_lists(void)
2368{ 2374{
2369 struct task_struct *p, *g; 2375 struct task_struct *p, *g;
2376
2370 write_lock(&css_set_lock); 2377 write_lock(&css_set_lock);
2371 use_task_css_set_links = 1; 2378
2379 if (use_task_css_set_links)
2380 goto out_unlock;
2381
2382 use_task_css_set_links = true;
2383
2372 /* 2384 /*
2373 * We need tasklist_lock because RCU is not safe against 2385 * We need tasklist_lock because RCU is not safe against
2374 * while_each_thread(). Besides, a forking task that has passed 2386 * while_each_thread(). Besides, a forking task that has passed
@@ -2379,16 +2391,22 @@ static void cgroup_enable_task_cg_lists(void)
2379 read_lock(&tasklist_lock); 2391 read_lock(&tasklist_lock);
2380 do_each_thread(g, p) { 2392 do_each_thread(g, p) {
2381 task_lock(p); 2393 task_lock(p);
2394
2395 WARN_ON_ONCE(!list_empty(&p->cg_list) ||
2396 task_css_set(p) != &init_css_set);
2397
2382 /* 2398 /*
2383 * We should check if the process is exiting, otherwise 2399 * We should check if the process is exiting, otherwise
2384 * it will race with cgroup_exit() in that the list 2400 * it will race with cgroup_exit() in that the list
2385 * entry won't be deleted though the process has exited. 2401 * entry won't be deleted though the process has exited.
2386 */ 2402 */
2387 if (!(p->flags & PF_EXITING) && list_empty(&p->cg_list)) 2403 if (!(p->flags & PF_EXITING))
2388 list_add(&p->cg_list, &task_css_set(p)->tasks); 2404 list_add(&p->cg_list, &task_css_set(p)->tasks);
2405
2389 task_unlock(p); 2406 task_unlock(p);
2390 } while_each_thread(g, p); 2407 } while_each_thread(g, p);
2391 read_unlock(&tasklist_lock); 2408 read_unlock(&tasklist_lock);
2409out_unlock:
2392 write_unlock(&css_set_lock); 2410 write_unlock(&css_set_lock);
2393} 2411}
2394 2412
@@ -2621,13 +2639,8 @@ void css_task_iter_start(struct cgroup_subsys_state *css,
2621 struct css_task_iter *it) 2639 struct css_task_iter *it)
2622 __acquires(css_set_lock) 2640 __acquires(css_set_lock)
2623{ 2641{
2624 /* 2642 /* no one should try to iterate before mounting cgroups */
2625 * The first time anyone tries to iterate across a css, we need to 2643 WARN_ON_ONCE(!use_task_css_set_links);
2626 * enable the list linking each css_set to its tasks, and fix up
2627 * all existing tasks.
2628 */
2629 if (!use_task_css_set_links)
2630 cgroup_enable_task_cg_lists();
2631 2644
2632 read_lock(&css_set_lock); 2645 read_lock(&css_set_lock);
2633 2646