diff options
author | Li Zefan <lizefan@huawei.com> | 2013-01-24 01:30:48 -0500 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2013-01-24 15:05:18 -0500 |
commit | ace783b9bbfa2182b4a561498db3f09a0c56bc79 (patch) | |
tree | e490b561cbd0d14f6514f4f2cf1413bcab05c275 | |
parent | fe1c06ca7523baa668c1eaf1e1016fa64753c32e (diff) |
sched: split out css_online/css_offline from tg creation/destruction
This is a preparaton for later patches.
- What do we gain from cpu_cgroup_css_online():
After ss->css_alloc() and before ss->css_online(), there's a small
window that tg->css.cgroup is NULL. With this change, tg won't be seen
before ss->css_online(), where it's added to the global list, so we're
guaranteed we'll never see NULL tg->css.cgroup.
- What do we gain from cpu_cgroup_css_offline():
tg is freed via RCU, so is cgroup. Without this change, This is how
synchronization works:
cgroup_rmdir()
no ss->css_offline()
diput()
syncornize_rcu()
ss->css_free() <-- unregister tg, and free it via call_rcu()
kfree_rcu(cgroup) <-- wait possible refs to cgroup, and free cgroup
We can't just kfree(cgroup), because tg might access tg->css.cgroup.
With this change:
cgroup_rmdir()
ss->css_offline() <-- unregister tg
diput()
synchronize_rcu() <-- wait possible refs to tg and cgroup
ss->css_free() <-- free tg
kfree_rcu(cgroup) <-- free cgroup
As you see, kfree_rcu() is redundant now.
Signed-off-by: Li Zefan <lizefan@huawei.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
Acked-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r-- | include/linux/sched.h | 3 | ||||
-rw-r--r-- | kernel/sched/auto_group.c | 3 | ||||
-rw-r--r-- | kernel/sched/core.c | 49 |
3 files changed, 45 insertions, 10 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index 206bb089c06b..577eb973de7a 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -2750,7 +2750,10 @@ extern void normalize_rt_tasks(void); | |||
2750 | extern struct task_group root_task_group; | 2750 | extern struct task_group root_task_group; |
2751 | 2751 | ||
2752 | extern struct task_group *sched_create_group(struct task_group *parent); | 2752 | extern struct task_group *sched_create_group(struct task_group *parent); |
2753 | extern void sched_online_group(struct task_group *tg, | ||
2754 | struct task_group *parent); | ||
2753 | extern void sched_destroy_group(struct task_group *tg); | 2755 | extern void sched_destroy_group(struct task_group *tg); |
2756 | extern void sched_offline_group(struct task_group *tg); | ||
2754 | extern void sched_move_task(struct task_struct *tsk); | 2757 | extern void sched_move_task(struct task_struct *tsk); |
2755 | #ifdef CONFIG_FAIR_GROUP_SCHED | 2758 | #ifdef CONFIG_FAIR_GROUP_SCHED |
2756 | extern int sched_group_set_shares(struct task_group *tg, unsigned long shares); | 2759 | extern int sched_group_set_shares(struct task_group *tg, unsigned long shares); |
diff --git a/kernel/sched/auto_group.c b/kernel/sched/auto_group.c index 0984a21076a3..64de5f8b0c9e 100644 --- a/kernel/sched/auto_group.c +++ b/kernel/sched/auto_group.c | |||
@@ -35,6 +35,7 @@ static inline void autogroup_destroy(struct kref *kref) | |||
35 | ag->tg->rt_se = NULL; | 35 | ag->tg->rt_se = NULL; |
36 | ag->tg->rt_rq = NULL; | 36 | ag->tg->rt_rq = NULL; |
37 | #endif | 37 | #endif |
38 | sched_offline_group(ag->tg); | ||
38 | sched_destroy_group(ag->tg); | 39 | sched_destroy_group(ag->tg); |
39 | } | 40 | } |
40 | 41 | ||
@@ -76,6 +77,8 @@ static inline struct autogroup *autogroup_create(void) | |||
76 | if (IS_ERR(tg)) | 77 | if (IS_ERR(tg)) |
77 | goto out_free; | 78 | goto out_free; |
78 | 79 | ||
80 | sched_online_group(tg, &root_task_group); | ||
81 | |||
79 | kref_init(&ag->kref); | 82 | kref_init(&ag->kref); |
80 | init_rwsem(&ag->lock); | 83 | init_rwsem(&ag->lock); |
81 | ag->id = atomic_inc_return(&autogroup_seq_nr); | 84 | ag->id = atomic_inc_return(&autogroup_seq_nr); |
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 257002c13bb0..106167243d68 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
@@ -7159,7 +7159,6 @@ static void free_sched_group(struct task_group *tg) | |||
7159 | struct task_group *sched_create_group(struct task_group *parent) | 7159 | struct task_group *sched_create_group(struct task_group *parent) |
7160 | { | 7160 | { |
7161 | struct task_group *tg; | 7161 | struct task_group *tg; |
7162 | unsigned long flags; | ||
7163 | 7162 | ||
7164 | tg = kzalloc(sizeof(*tg), GFP_KERNEL); | 7163 | tg = kzalloc(sizeof(*tg), GFP_KERNEL); |
7165 | if (!tg) | 7164 | if (!tg) |
@@ -7171,6 +7170,17 @@ struct task_group *sched_create_group(struct task_group *parent) | |||
7171 | if (!alloc_rt_sched_group(tg, parent)) | 7170 | if (!alloc_rt_sched_group(tg, parent)) |
7172 | goto err; | 7171 | goto err; |
7173 | 7172 | ||
7173 | return tg; | ||
7174 | |||
7175 | err: | ||
7176 | free_sched_group(tg); | ||
7177 | return ERR_PTR(-ENOMEM); | ||
7178 | } | ||
7179 | |||
7180 | void sched_online_group(struct task_group *tg, struct task_group *parent) | ||
7181 | { | ||
7182 | unsigned long flags; | ||
7183 | |||
7174 | spin_lock_irqsave(&task_group_lock, flags); | 7184 | spin_lock_irqsave(&task_group_lock, flags); |
7175 | list_add_rcu(&tg->list, &task_groups); | 7185 | list_add_rcu(&tg->list, &task_groups); |
7176 | 7186 | ||
@@ -7180,12 +7190,6 @@ struct task_group *sched_create_group(struct task_group *parent) | |||
7180 | INIT_LIST_HEAD(&tg->children); | 7190 | INIT_LIST_HEAD(&tg->children); |
7181 | list_add_rcu(&tg->siblings, &parent->children); | 7191 | list_add_rcu(&tg->siblings, &parent->children); |
7182 | spin_unlock_irqrestore(&task_group_lock, flags); | 7192 | spin_unlock_irqrestore(&task_group_lock, flags); |
7183 | |||
7184 | return tg; | ||
7185 | |||
7186 | err: | ||
7187 | free_sched_group(tg); | ||
7188 | return ERR_PTR(-ENOMEM); | ||
7189 | } | 7193 | } |
7190 | 7194 | ||
7191 | /* rcu callback to free various structures associated with a task group */ | 7195 | /* rcu callback to free various structures associated with a task group */ |
@@ -7198,6 +7202,12 @@ static void free_sched_group_rcu(struct rcu_head *rhp) | |||
7198 | /* Destroy runqueue etc associated with a task group */ | 7202 | /* Destroy runqueue etc associated with a task group */ |
7199 | void sched_destroy_group(struct task_group *tg) | 7203 | void sched_destroy_group(struct task_group *tg) |
7200 | { | 7204 | { |
7205 | /* wait for possible concurrent references to cfs_rqs complete */ | ||
7206 | call_rcu(&tg->rcu, free_sched_group_rcu); | ||
7207 | } | ||
7208 | |||
7209 | void sched_offline_group(struct task_group *tg) | ||
7210 | { | ||
7201 | unsigned long flags; | 7211 | unsigned long flags; |
7202 | int i; | 7212 | int i; |
7203 | 7213 | ||
@@ -7209,9 +7219,6 @@ void sched_destroy_group(struct task_group *tg) | |||
7209 | list_del_rcu(&tg->list); | 7219 | list_del_rcu(&tg->list); |
7210 | list_del_rcu(&tg->siblings); | 7220 | list_del_rcu(&tg->siblings); |
7211 | spin_unlock_irqrestore(&task_group_lock, flags); | 7221 | spin_unlock_irqrestore(&task_group_lock, flags); |
7212 | |||
7213 | /* wait for possible concurrent references to cfs_rqs complete */ | ||
7214 | call_rcu(&tg->rcu, free_sched_group_rcu); | ||
7215 | } | 7222 | } |
7216 | 7223 | ||
7217 | /* change task's runqueue when it moves between groups. | 7224 | /* change task's runqueue when it moves between groups. |
@@ -7563,6 +7570,19 @@ static struct cgroup_subsys_state *cpu_cgroup_css_alloc(struct cgroup *cgrp) | |||
7563 | return &tg->css; | 7570 | return &tg->css; |
7564 | } | 7571 | } |
7565 | 7572 | ||
7573 | static int cpu_cgroup_css_online(struct cgroup *cgrp) | ||
7574 | { | ||
7575 | struct task_group *tg = cgroup_tg(cgrp); | ||
7576 | struct task_group *parent; | ||
7577 | |||
7578 | if (!cgrp->parent) | ||
7579 | return 0; | ||
7580 | |||
7581 | parent = cgroup_tg(cgrp->parent); | ||
7582 | sched_online_group(tg, parent); | ||
7583 | return 0; | ||
7584 | } | ||
7585 | |||
7566 | static void cpu_cgroup_css_free(struct cgroup *cgrp) | 7586 | static void cpu_cgroup_css_free(struct cgroup *cgrp) |
7567 | { | 7587 | { |
7568 | struct task_group *tg = cgroup_tg(cgrp); | 7588 | struct task_group *tg = cgroup_tg(cgrp); |
@@ -7570,6 +7590,13 @@ static void cpu_cgroup_css_free(struct cgroup *cgrp) | |||
7570 | sched_destroy_group(tg); | 7590 | sched_destroy_group(tg); |
7571 | } | 7591 | } |
7572 | 7592 | ||
7593 | static void cpu_cgroup_css_offline(struct cgroup *cgrp) | ||
7594 | { | ||
7595 | struct task_group *tg = cgroup_tg(cgrp); | ||
7596 | |||
7597 | sched_offline_group(tg); | ||
7598 | } | ||
7599 | |||
7573 | static int cpu_cgroup_can_attach(struct cgroup *cgrp, | 7600 | static int cpu_cgroup_can_attach(struct cgroup *cgrp, |
7574 | struct cgroup_taskset *tset) | 7601 | struct cgroup_taskset *tset) |
7575 | { | 7602 | { |
@@ -7925,6 +7952,8 @@ struct cgroup_subsys cpu_cgroup_subsys = { | |||
7925 | .name = "cpu", | 7952 | .name = "cpu", |
7926 | .css_alloc = cpu_cgroup_css_alloc, | 7953 | .css_alloc = cpu_cgroup_css_alloc, |
7927 | .css_free = cpu_cgroup_css_free, | 7954 | .css_free = cpu_cgroup_css_free, |
7955 | .css_online = cpu_cgroup_css_online, | ||
7956 | .css_offline = cpu_cgroup_css_offline, | ||
7928 | .can_attach = cpu_cgroup_can_attach, | 7957 | .can_attach = cpu_cgroup_can_attach, |
7929 | .attach = cpu_cgroup_attach, | 7958 | .attach = cpu_cgroup_attach, |
7930 | .exit = cpu_cgroup_exit, | 7959 | .exit = cpu_cgroup_exit, |