aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2013-06-21 18:52:04 -0400
committerTejun Heo <tj@kernel.org>2013-06-26 13:48:38 -0400
commita8ad805cfde00be8fe3b3dae8890996dbeb91e2c (patch)
treecfe59b30636f951244c748eaab000da68d20a159
parent14611e51a57df10240817d8ada510842faf0ec51 (diff)
cgroup: fix RCU accesses around task->cgroups
There are several places in kernel/cgroup.c where task->cgroups is accessed and modified without going through proper RCU accessors. None is broken as they're all lock protected accesses; however, this still triggers sparse RCU address space warnings. * Consistently use task_css_set() for task->cgroups dereferencing. * Use RCU_INIT_POINTER() to clear task->cgroups to &init_css_set on exit. * Remove unnecessary rcu_dereference_raw() from cset->subsys[] dereference in cgroup_exit(). Signed-off-by: Tejun Heo <tj@kernel.org> Reported-by: Fengguang Wu <fengguang.wu@intel.com> Acked-by: Li Zefan <lizefan@huawei.com>
-rw-r--r--kernel/cgroup.c24
1 files changed, 13 insertions, 11 deletions
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 2d3a132e881d..ee9f0c1c8bff 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -724,7 +724,7 @@ static struct cgroup *task_cgroup_from_root(struct task_struct *task,
724 * task can't change groups, so the only thing that can happen 724 * task can't change groups, so the only thing that can happen
725 * is that it exits and its css is set back to init_css_set. 725 * is that it exits and its css is set back to init_css_set.
726 */ 726 */
727 cset = task->cgroups; 727 cset = task_css_set(task);
728 if (cset == &init_css_set) { 728 if (cset == &init_css_set) {
729 res = &root->top_cgroup; 729 res = &root->top_cgroup;
730 } else { 730 } else {
@@ -1971,7 +1971,7 @@ static void cgroup_task_migrate(struct cgroup *old_cgrp,
1971 * css_set to init_css_set and dropping the old one. 1971 * css_set to init_css_set and dropping the old one.
1972 */ 1972 */
1973 WARN_ON_ONCE(tsk->flags & PF_EXITING); 1973 WARN_ON_ONCE(tsk->flags & PF_EXITING);
1974 old_cset = tsk->cgroups; 1974 old_cset = task_css_set(tsk);
1975 1975
1976 task_lock(tsk); 1976 task_lock(tsk);
1977 rcu_assign_pointer(tsk->cgroups, new_cset); 1977 rcu_assign_pointer(tsk->cgroups, new_cset);
@@ -2094,8 +2094,11 @@ static int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk,
2094 * we use find_css_set, which allocates a new one if necessary. 2094 * we use find_css_set, which allocates a new one if necessary.
2095 */ 2095 */
2096 for (i = 0; i < group_size; i++) { 2096 for (i = 0; i < group_size; i++) {
2097 struct css_set *old_cset;
2098
2097 tc = flex_array_get(group, i); 2099 tc = flex_array_get(group, i);
2098 tc->cg = find_css_set(tc->task->cgroups, cgrp); 2100 old_cset = task_css_set(tc->task);
2101 tc->cg = find_css_set(old_cset, cgrp);
2099 if (!tc->cg) { 2102 if (!tc->cg) {
2100 retval = -ENOMEM; 2103 retval = -ENOMEM;
2101 goto out_put_css_set_refs; 2104 goto out_put_css_set_refs;
@@ -3012,7 +3015,7 @@ static void cgroup_enable_task_cg_lists(void)
3012 * entry won't be deleted though the process has exited. 3015 * entry won't be deleted though the process has exited.
3013 */ 3016 */
3014 if (!(p->flags & PF_EXITING) && list_empty(&p->cg_list)) 3017 if (!(p->flags & PF_EXITING) && list_empty(&p->cg_list))
3015 list_add(&p->cg_list, &p->cgroups->tasks); 3018 list_add(&p->cg_list, &task_css_set(p)->tasks);
3016 task_unlock(p); 3019 task_unlock(p);
3017 } while_each_thread(g, p); 3020 } while_each_thread(g, p);
3018 read_unlock(&tasklist_lock); 3021 read_unlock(&tasklist_lock);
@@ -5061,8 +5064,8 @@ static const struct file_operations proc_cgroupstats_operations = {
5061void cgroup_fork(struct task_struct *child) 5064void cgroup_fork(struct task_struct *child)
5062{ 5065{
5063 task_lock(current); 5066 task_lock(current);
5067 get_css_set(task_css_set(current));
5064 child->cgroups = current->cgroups; 5068 child->cgroups = current->cgroups;
5065 get_css_set(child->cgroups);
5066 task_unlock(current); 5069 task_unlock(current);
5067 INIT_LIST_HEAD(&child->cg_list); 5070 INIT_LIST_HEAD(&child->cg_list);
5068} 5071}
@@ -5097,7 +5100,7 @@ void cgroup_post_fork(struct task_struct *child)
5097 write_lock(&css_set_lock); 5100 write_lock(&css_set_lock);
5098 task_lock(child); 5101 task_lock(child);
5099 if (list_empty(&child->cg_list)) 5102 if (list_empty(&child->cg_list))
5100 list_add(&child->cg_list, &child->cgroups->tasks); 5103 list_add(&child->cg_list, &task_css_set(child)->tasks);
5101 task_unlock(child); 5104 task_unlock(child);
5102 write_unlock(&css_set_lock); 5105 write_unlock(&css_set_lock);
5103 } 5106 }
@@ -5177,8 +5180,8 @@ void cgroup_exit(struct task_struct *tsk, int run_callbacks)
5177 5180
5178 /* Reassign the task to the init_css_set. */ 5181 /* Reassign the task to the init_css_set. */
5179 task_lock(tsk); 5182 task_lock(tsk);
5180 cset = tsk->cgroups; 5183 cset = task_css_set(tsk);
5181 tsk->cgroups = &init_css_set; 5184 RCU_INIT_POINTER(tsk->cgroups, &init_css_set);
5182 5185
5183 if (run_callbacks && need_forkexit_callback) { 5186 if (run_callbacks && need_forkexit_callback) {
5184 /* 5187 /*
@@ -5187,8 +5190,7 @@ void cgroup_exit(struct task_struct *tsk, int run_callbacks)
5187 */ 5190 */
5188 for_each_builtin_subsys(ss, i) { 5191 for_each_builtin_subsys(ss, i) {
5189 if (ss->exit) { 5192 if (ss->exit) {
5190 struct cgroup *old_cgrp = 5193 struct cgroup *old_cgrp = cset->subsys[i]->cgroup;
5191 rcu_dereference_raw(cset->subsys[i])->cgroup;
5192 struct cgroup *cgrp = task_cgroup(tsk, i); 5194 struct cgroup *cgrp = task_cgroup(tsk, i);
5193 5195
5194 ss->exit(cgrp, old_cgrp, tsk); 5196 ss->exit(cgrp, old_cgrp, tsk);
@@ -5555,7 +5557,7 @@ static u64 current_css_set_refcount_read(struct cgroup *cgrp,
5555 u64 count; 5557 u64 count;
5556 5558
5557 rcu_read_lock(); 5559 rcu_read_lock();
5558 count = atomic_read(&current->cgroups->refcount); 5560 count = atomic_read(&task_css_set(current)->refcount);
5559 rcu_read_unlock(); 5561 rcu_read_unlock();
5560 return count; 5562 return count;
5561} 5563}