diff options
-rw-r--r-- | kernel/sched.c | 20 |
1 files changed, 20 insertions, 0 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index e03b45ccf789..debb06a4a660 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -271,6 +271,10 @@ struct task_group { | |||
271 | 271 | ||
272 | struct rcu_head rcu; | 272 | struct rcu_head rcu; |
273 | struct list_head list; | 273 | struct list_head list; |
274 | |||
275 | struct task_group *parent; | ||
276 | struct list_head siblings; | ||
277 | struct list_head children; | ||
274 | }; | 278 | }; |
275 | 279 | ||
276 | #ifdef CONFIG_USER_SCHED | 280 | #ifdef CONFIG_USER_SCHED |
@@ -7578,6 +7582,13 @@ void __init sched_init(void) | |||
7578 | 7582 | ||
7579 | #ifdef CONFIG_GROUP_SCHED | 7583 | #ifdef CONFIG_GROUP_SCHED |
7580 | list_add(&init_task_group.list, &task_groups); | 7584 | list_add(&init_task_group.list, &task_groups); |
7585 | INIT_LIST_HEAD(&init_task_group.children); | ||
7586 | |||
7587 | #ifdef CONFIG_USER_SCHED | ||
7588 | INIT_LIST_HEAD(&root_task_group.children); | ||
7589 | init_task_group.parent = &root_task_group; | ||
7590 | list_add(&init_task_group.siblings, &root_task_group.children); | ||
7591 | #endif | ||
7581 | #endif | 7592 | #endif |
7582 | 7593 | ||
7583 | for_each_possible_cpu(i) { | 7594 | for_each_possible_cpu(i) { |
@@ -8039,6 +8050,12 @@ struct task_group *sched_create_group(struct task_group *parent) | |||
8039 | register_rt_sched_group(tg, i); | 8050 | register_rt_sched_group(tg, i); |
8040 | } | 8051 | } |
8041 | list_add_rcu(&tg->list, &task_groups); | 8052 | list_add_rcu(&tg->list, &task_groups); |
8053 | |||
8054 | WARN_ON(!parent); /* root should already exist */ | ||
8055 | |||
8056 | tg->parent = parent; | ||
8057 | list_add_rcu(&tg->siblings, &parent->children); | ||
8058 | INIT_LIST_HEAD(&tg->children); | ||
8042 | spin_unlock_irqrestore(&task_group_lock, flags); | 8059 | spin_unlock_irqrestore(&task_group_lock, flags); |
8043 | 8060 | ||
8044 | return tg; | 8061 | return tg; |
@@ -8067,6 +8084,7 @@ void sched_destroy_group(struct task_group *tg) | |||
8067 | unregister_rt_sched_group(tg, i); | 8084 | unregister_rt_sched_group(tg, i); |
8068 | } | 8085 | } |
8069 | list_del_rcu(&tg->list); | 8086 | list_del_rcu(&tg->list); |
8087 | list_del_rcu(&tg->siblings); | ||
8070 | spin_unlock_irqrestore(&task_group_lock, flags); | 8088 | spin_unlock_irqrestore(&task_group_lock, flags); |
8071 | 8089 | ||
8072 | /* wait for possible concurrent references to cfs_rqs complete */ | 8090 | /* wait for possible concurrent references to cfs_rqs complete */ |
@@ -8162,6 +8180,7 @@ int sched_group_set_shares(struct task_group *tg, unsigned long shares) | |||
8162 | spin_lock_irqsave(&task_group_lock, flags); | 8180 | spin_lock_irqsave(&task_group_lock, flags); |
8163 | for_each_possible_cpu(i) | 8181 | for_each_possible_cpu(i) |
8164 | unregister_fair_sched_group(tg, i); | 8182 | unregister_fair_sched_group(tg, i); |
8183 | list_del_rcu(&tg->siblings); | ||
8165 | spin_unlock_irqrestore(&task_group_lock, flags); | 8184 | spin_unlock_irqrestore(&task_group_lock, flags); |
8166 | 8185 | ||
8167 | /* wait for any ongoing reference to this group to finish */ | 8186 | /* wait for any ongoing reference to this group to finish */ |
@@ -8182,6 +8201,7 @@ int sched_group_set_shares(struct task_group *tg, unsigned long shares) | |||
8182 | spin_lock_irqsave(&task_group_lock, flags); | 8201 | spin_lock_irqsave(&task_group_lock, flags); |
8183 | for_each_possible_cpu(i) | 8202 | for_each_possible_cpu(i) |
8184 | register_fair_sched_group(tg, i); | 8203 | register_fair_sched_group(tg, i); |
8204 | list_add_rcu(&tg->siblings, &tg->parent->children); | ||
8185 | spin_unlock_irqrestore(&task_group_lock, flags); | 8205 | spin_unlock_irqrestore(&task_group_lock, flags); |
8186 | done: | 8206 | done: |
8187 | mutex_unlock(&shares_mutex); | 8207 | mutex_unlock(&shares_mutex); |