aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/cgroup.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2014-02-13 06:58:39 -0500
committerTejun Heo <tj@kernel.org>2014-02-13 06:58:39 -0500
commitafeb0f9fd425239aa477c842480f240bfb6325b3 (patch)
tree9c13d5f1837c4d99d2ff932c4f13bfc5feadc09a /kernel/cgroup.c
parent56fde9e01de45bcfabbb444d33e8bdd8388d2da0 (diff)
cgroup: relocate cgroup_enable_task_cg_lists()
Move it above so that prototype isn't necessary. Let's also move the definition of use_task_css_set_links next to it. This is purely cosmetic. Signed-off-by: Tejun Heo <tj@kernel.org> Acked-by: Li Zefan <lizefan@huawei.com>
Diffstat (limited to 'kernel/cgroup.c')
-rw-r--r--kernel/cgroup.c103
1 files changed, 48 insertions, 55 deletions
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 506f6da67ad1..2469699408bd 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -173,7 +173,6 @@ static int cgroup_destroy_locked(struct cgroup *cgrp);
173static int cgroup_addrm_files(struct cgroup *cgrp, struct cftype cfts[], 173static int cgroup_addrm_files(struct cgroup *cgrp, struct cftype cfts[],
174 bool is_add); 174 bool is_add);
175static void cgroup_pidlist_destroy_all(struct cgroup *cgrp); 175static void cgroup_pidlist_destroy_all(struct cgroup *cgrp);
176static void cgroup_enable_task_cg_lists(void);
177 176
178/** 177/**
179 * cgroup_css - obtain a cgroup's css for the specified subsystem 178 * cgroup_css - obtain a cgroup's css for the specified subsystem
@@ -370,14 +369,6 @@ static unsigned long css_set_hash(struct cgroup_subsys_state *css[])
370 return key; 369 return key;
371} 370}
372 371
373/*
374 * We don't maintain the lists running through each css_set to its task
375 * until after the first call to css_task_iter_start(). This reduces the
376 * fork()/exit() overhead for people who have cgroups compiled into their
377 * kernel but not actually in use.
378 */
379static bool use_task_css_set_links __read_mostly;
380
381static void __put_css_set(struct css_set *cset, int taskexit) 372static void __put_css_set(struct css_set *cset, int taskexit)
382{ 373{
383 struct cgrp_cset_link *link, *tmp_link; 374 struct cgrp_cset_link *link, *tmp_link;
@@ -1307,6 +1298,54 @@ static int cgroup_remount(struct kernfs_root *kf_root, int *flags, char *data)
1307 return ret; 1298 return ret;
1308} 1299}
1309 1300
1301/*
1302 * To reduce the fork() overhead for systems that are not actually using
1303 * their cgroups capability, we don't maintain the lists running through
1304 * each css_set to its tasks until we see the list actually used - in other
1305 * words after the first mount.
1306 */
1307static bool use_task_css_set_links __read_mostly;
1308
1309static void cgroup_enable_task_cg_lists(void)
1310{
1311 struct task_struct *p, *g;
1312
1313 write_lock(&css_set_lock);
1314
1315 if (use_task_css_set_links)
1316 goto out_unlock;
1317
1318 use_task_css_set_links = true;
1319
1320 /*
1321 * We need tasklist_lock because RCU is not safe against
1322 * while_each_thread(). Besides, a forking task that has passed
1323 * cgroup_post_fork() without seeing use_task_css_set_links = 1
1324 * is not guaranteed to have its child immediately visible in the
1325 * tasklist if we walk through it with RCU.
1326 */
1327 read_lock(&tasklist_lock);
1328 do_each_thread(g, p) {
1329 task_lock(p);
1330
1331 WARN_ON_ONCE(!list_empty(&p->cg_list) ||
1332 task_css_set(p) != &init_css_set);
1333
1334 /*
1335 * We should check if the process is exiting, otherwise
1336 * it will race with cgroup_exit() in that the list
1337 * entry won't be deleted though the process has exited.
1338 */
1339 if (!(p->flags & PF_EXITING))
1340 list_add(&p->cg_list, &task_css_set(p)->tasks);
1341
1342 task_unlock(p);
1343 } while_each_thread(g, p);
1344 read_unlock(&tasklist_lock);
1345out_unlock:
1346 write_unlock(&css_set_lock);
1347}
1348
1310static void init_cgroup_housekeeping(struct cgroup *cgrp) 1349static void init_cgroup_housekeeping(struct cgroup *cgrp)
1311{ 1350{
1312 atomic_set(&cgrp->refcnt, 1); 1351 atomic_set(&cgrp->refcnt, 1);
@@ -2364,52 +2403,6 @@ int cgroup_task_count(const struct cgroup *cgrp)
2364 return count; 2403 return count;
2365} 2404}
2366 2405
2367/*
2368 * To reduce the fork() overhead for systems that are not actually using
2369 * their cgroups capability, we don't maintain the lists running through
2370 * each css_set to its tasks until we see the list actually used - in other
2371 * words after the first mount.
2372 */
2373static void cgroup_enable_task_cg_lists(void)
2374{
2375 struct task_struct *p, *g;
2376
2377 write_lock(&css_set_lock);
2378
2379 if (use_task_css_set_links)
2380 goto out_unlock;
2381
2382 use_task_css_set_links = true;
2383
2384 /*
2385 * We need tasklist_lock because RCU is not safe against
2386 * while_each_thread(). Besides, a forking task that has passed
2387 * cgroup_post_fork() without seeing use_task_css_set_links = 1
2388 * is not guaranteed to have its child immediately visible in the
2389 * tasklist if we walk through it with RCU.
2390 */
2391 read_lock(&tasklist_lock);
2392 do_each_thread(g, p) {
2393 task_lock(p);
2394
2395 WARN_ON_ONCE(!list_empty(&p->cg_list) ||
2396 task_css_set(p) != &init_css_set);
2397
2398 /*
2399 * We should check if the process is exiting, otherwise
2400 * it will race with cgroup_exit() in that the list
2401 * entry won't be deleted though the process has exited.
2402 */
2403 if (!(p->flags & PF_EXITING))
2404 list_add(&p->cg_list, &task_css_set(p)->tasks);
2405
2406 task_unlock(p);
2407 } while_each_thread(g, p);
2408 read_unlock(&tasklist_lock);
2409out_unlock:
2410 write_unlock(&css_set_lock);
2411}
2412
2413/** 2406/**
2414 * css_next_child - find the next child of a given css 2407 * css_next_child - find the next child of a given css
2415 * @pos_css: the current position (%NULL to initiate traversal) 2408 * @pos_css: the current position (%NULL to initiate traversal)