diff options
Diffstat (limited to 'kernel/cgroup.c')
-rw-r--r-- | kernel/cgroup.c | 51 |
1 files changed, 41 insertions, 10 deletions
diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 3a53c771e503..422cb19f156e 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c | |||
@@ -2994,7 +2994,6 @@ static void cgroup_event_remove(struct work_struct *work) | |||
2994 | remove); | 2994 | remove); |
2995 | struct cgroup *cgrp = event->cgrp; | 2995 | struct cgroup *cgrp = event->cgrp; |
2996 | 2996 | ||
2997 | /* TODO: check return code */ | ||
2998 | event->cft->unregister_event(cgrp, event->cft, event->eventfd); | 2997 | event->cft->unregister_event(cgrp, event->cft, event->eventfd); |
2999 | 2998 | ||
3000 | eventfd_ctx_put(event->eventfd); | 2999 | eventfd_ctx_put(event->eventfd); |
@@ -3016,7 +3015,7 @@ static int cgroup_event_wake(wait_queue_t *wait, unsigned mode, | |||
3016 | unsigned long flags = (unsigned long)key; | 3015 | unsigned long flags = (unsigned long)key; |
3017 | 3016 | ||
3018 | if (flags & POLLHUP) { | 3017 | if (flags & POLLHUP) { |
3019 | remove_wait_queue_locked(event->wqh, &event->wait); | 3018 | __remove_wait_queue(event->wqh, &event->wait); |
3020 | spin_lock(&cgrp->event_list_lock); | 3019 | spin_lock(&cgrp->event_list_lock); |
3021 | list_del(&event->list); | 3020 | list_del(&event->list); |
3022 | spin_unlock(&cgrp->event_list_lock); | 3021 | spin_unlock(&cgrp->event_list_lock); |
@@ -3615,7 +3614,7 @@ static void __init cgroup_init_subsys(struct cgroup_subsys *ss) | |||
3615 | * @ss: the subsystem to load | 3614 | * @ss: the subsystem to load |
3616 | * | 3615 | * |
3617 | * This function should be called in a modular subsystem's initcall. If the | 3616 | * This function should be called in a modular subsystem's initcall. If the |
3618 | * subsytem is built as a module, it will be assigned a new subsys_id and set | 3617 | * subsystem is built as a module, it will be assigned a new subsys_id and set |
3619 | * up for use. If the subsystem is built-in anyway, work is delegated to the | 3618 | * up for use. If the subsystem is built-in anyway, work is delegated to the |
3620 | * simpler cgroup_init_subsys. | 3619 | * simpler cgroup_init_subsys. |
3621 | */ | 3620 | */ |
@@ -4435,7 +4434,15 @@ __setup("cgroup_disable=", cgroup_disable); | |||
4435 | */ | 4434 | */ |
4436 | unsigned short css_id(struct cgroup_subsys_state *css) | 4435 | unsigned short css_id(struct cgroup_subsys_state *css) |
4437 | { | 4436 | { |
4438 | struct css_id *cssid = rcu_dereference(css->id); | 4437 | struct css_id *cssid; |
4438 | |||
4439 | /* | ||
4440 | * This css_id() can return correct value when somone has refcnt | ||
4441 | * on this or this is under rcu_read_lock(). Once css->id is allocated, | ||
4442 | * it's unchanged until freed. | ||
4443 | */ | ||
4444 | cssid = rcu_dereference_check(css->id, | ||
4445 | rcu_read_lock_held() || atomic_read(&css->refcnt)); | ||
4439 | 4446 | ||
4440 | if (cssid) | 4447 | if (cssid) |
4441 | return cssid->id; | 4448 | return cssid->id; |
@@ -4445,7 +4452,10 @@ EXPORT_SYMBOL_GPL(css_id); | |||
4445 | 4452 | ||
4446 | unsigned short css_depth(struct cgroup_subsys_state *css) | 4453 | unsigned short css_depth(struct cgroup_subsys_state *css) |
4447 | { | 4454 | { |
4448 | struct css_id *cssid = rcu_dereference(css->id); | 4455 | struct css_id *cssid; |
4456 | |||
4457 | cssid = rcu_dereference_check(css->id, | ||
4458 | rcu_read_lock_held() || atomic_read(&css->refcnt)); | ||
4449 | 4459 | ||
4450 | if (cssid) | 4460 | if (cssid) |
4451 | return cssid->depth; | 4461 | return cssid->depth; |
@@ -4453,15 +4463,36 @@ unsigned short css_depth(struct cgroup_subsys_state *css) | |||
4453 | } | 4463 | } |
4454 | EXPORT_SYMBOL_GPL(css_depth); | 4464 | EXPORT_SYMBOL_GPL(css_depth); |
4455 | 4465 | ||
4466 | /** | ||
4467 | * css_is_ancestor - test "root" css is an ancestor of "child" | ||
4468 | * @child: the css to be tested. | ||
4469 | * @root: the css supporsed to be an ancestor of the child. | ||
4470 | * | ||
4471 | * Returns true if "root" is an ancestor of "child" in its hierarchy. Because | ||
4472 | * this function reads css->id, this use rcu_dereference() and rcu_read_lock(). | ||
4473 | * But, considering usual usage, the csses should be valid objects after test. | ||
4474 | * Assuming that the caller will do some action to the child if this returns | ||
4475 | * returns true, the caller must take "child";s reference count. | ||
4476 | * If "child" is valid object and this returns true, "root" is valid, too. | ||
4477 | */ | ||
4478 | |||
4456 | bool css_is_ancestor(struct cgroup_subsys_state *child, | 4479 | bool css_is_ancestor(struct cgroup_subsys_state *child, |
4457 | const struct cgroup_subsys_state *root) | 4480 | const struct cgroup_subsys_state *root) |
4458 | { | 4481 | { |
4459 | struct css_id *child_id = rcu_dereference(child->id); | 4482 | struct css_id *child_id; |
4460 | struct css_id *root_id = rcu_dereference(root->id); | 4483 | struct css_id *root_id; |
4484 | bool ret = true; | ||
4461 | 4485 | ||
4462 | if (!child_id || !root_id || (child_id->depth < root_id->depth)) | 4486 | rcu_read_lock(); |
4463 | return false; | 4487 | child_id = rcu_dereference(child->id); |
4464 | return child_id->stack[root_id->depth] == root_id->id; | 4488 | root_id = rcu_dereference(root->id); |
4489 | if (!child_id | ||
4490 | || !root_id | ||
4491 | || (child_id->depth < root_id->depth) | ||
4492 | || (child_id->stack[root_id->depth] != root_id->id)) | ||
4493 | ret = false; | ||
4494 | rcu_read_unlock(); | ||
4495 | return ret; | ||
4465 | } | 4496 | } |
4466 | 4497 | ||
4467 | static void __free_css_id_cb(struct rcu_head *head) | 4498 | static void __free_css_id_cb(struct rcu_head *head) |