aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--kernel/sched.c33
-rw-r--r--kernel/sched_fair.c4
2 files changed, 28 insertions, 9 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 7f827b70ae02..cfa695819252 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -169,8 +169,6 @@ struct task_group {
169 /* runqueue "owned" by this group on each cpu */ 169 /* runqueue "owned" by this group on each cpu */
170 struct cfs_rq **cfs_rq; 170 struct cfs_rq **cfs_rq;
171 unsigned long shares; 171 unsigned long shares;
172 /* spinlock to serialize modification to shares */
173 spinlock_t lock;
174 struct rcu_head rcu; 172 struct rcu_head rcu;
175}; 173};
176 174
@@ -182,6 +180,11 @@ static DEFINE_PER_CPU(struct cfs_rq, init_cfs_rq) ____cacheline_aligned_in_smp;
182static struct sched_entity *init_sched_entity_p[NR_CPUS]; 180static struct sched_entity *init_sched_entity_p[NR_CPUS];
183static struct cfs_rq *init_cfs_rq_p[NR_CPUS]; 181static struct cfs_rq *init_cfs_rq_p[NR_CPUS];
184 182
183/* task_group_mutex serializes add/remove of task groups and also changes to
184 * a task group's cpu shares.
185 */
186static DEFINE_MUTEX(task_group_mutex);
187
185/* Default task group. 188/* Default task group.
186 * Every task in system belong to this group at bootup. 189 * Every task in system belong to this group at bootup.
187 */ 190 */
@@ -221,9 +224,21 @@ static inline void set_task_cfs_rq(struct task_struct *p, unsigned int cpu)
221 p->se.parent = task_group(p)->se[cpu]; 224 p->se.parent = task_group(p)->se[cpu];
222} 225}
223 226
227static inline void lock_task_group_list(void)
228{
229 mutex_lock(&task_group_mutex);
230}
231
232static inline void unlock_task_group_list(void)
233{
234 mutex_unlock(&task_group_mutex);
235}
236
224#else 237#else
225 238
226static inline void set_task_cfs_rq(struct task_struct *p, unsigned int cpu) { } 239static inline void set_task_cfs_rq(struct task_struct *p, unsigned int cpu) { }
240static inline void lock_task_group_list(void) { }
241static inline void unlock_task_group_list(void) { }
227 242
228#endif /* CONFIG_FAIR_GROUP_SCHED */ 243#endif /* CONFIG_FAIR_GROUP_SCHED */
229 244
@@ -6768,7 +6783,6 @@ void __init sched_init(void)
6768 se->parent = NULL; 6783 se->parent = NULL;
6769 } 6784 }
6770 init_task_group.shares = init_task_group_load; 6785 init_task_group.shares = init_task_group_load;
6771 spin_lock_init(&init_task_group.lock);
6772#endif 6786#endif
6773 6787
6774 for (j = 0; j < CPU_LOAD_IDX_MAX; j++) 6788 for (j = 0; j < CPU_LOAD_IDX_MAX; j++)
@@ -7008,14 +7022,15 @@ struct task_group *sched_create_group(void)
7008 se->parent = NULL; 7022 se->parent = NULL;
7009 } 7023 }
7010 7024
7025 tg->shares = NICE_0_LOAD;
7026
7027 lock_task_group_list();
7011 for_each_possible_cpu(i) { 7028 for_each_possible_cpu(i) {
7012 rq = cpu_rq(i); 7029 rq = cpu_rq(i);
7013 cfs_rq = tg->cfs_rq[i]; 7030 cfs_rq = tg->cfs_rq[i];
7014 list_add_rcu(&cfs_rq->leaf_cfs_rq_list, &rq->leaf_cfs_rq_list); 7031 list_add_rcu(&cfs_rq->leaf_cfs_rq_list, &rq->leaf_cfs_rq_list);
7015 } 7032 }
7016 7033 unlock_task_group_list();
7017 tg->shares = NICE_0_LOAD;
7018 spin_lock_init(&tg->lock);
7019 7034
7020 return tg; 7035 return tg;
7021 7036
@@ -7061,10 +7076,12 @@ void sched_destroy_group(struct task_group *tg)
7061 struct cfs_rq *cfs_rq = NULL; 7076 struct cfs_rq *cfs_rq = NULL;
7062 int i; 7077 int i;
7063 7078
7079 lock_task_group_list();
7064 for_each_possible_cpu(i) { 7080 for_each_possible_cpu(i) {
7065 cfs_rq = tg->cfs_rq[i]; 7081 cfs_rq = tg->cfs_rq[i];
7066 list_del_rcu(&cfs_rq->leaf_cfs_rq_list); 7082 list_del_rcu(&cfs_rq->leaf_cfs_rq_list);
7067 } 7083 }
7084 unlock_task_group_list();
7068 7085
7069 BUG_ON(!cfs_rq); 7086 BUG_ON(!cfs_rq);
7070 7087
@@ -7146,7 +7163,7 @@ int sched_group_set_shares(struct task_group *tg, unsigned long shares)
7146 if (shares < 2) 7163 if (shares < 2)
7147 shares = 2; 7164 shares = 2;
7148 7165
7149 spin_lock(&tg->lock); 7166 lock_task_group_list();
7150 if (tg->shares == shares) 7167 if (tg->shares == shares)
7151 goto done; 7168 goto done;
7152 7169
@@ -7155,7 +7172,7 @@ int sched_group_set_shares(struct task_group *tg, unsigned long shares)
7155 set_se_shares(tg->se[i], shares); 7172 set_se_shares(tg->se[i], shares);
7156 7173
7157done: 7174done:
7158 spin_unlock(&tg->lock); 7175 unlock_task_group_list();
7159 return 0; 7176 return 0;
7160} 7177}
7161 7178
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index da7c061e7206..0c5fdce67228 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -690,7 +690,7 @@ static inline struct cfs_rq *cpu_cfs_rq(struct cfs_rq *cfs_rq, int this_cpu)
690 690
691/* Iterate thr' all leaf cfs_rq's on a runqueue */ 691/* Iterate thr' all leaf cfs_rq's on a runqueue */
692#define for_each_leaf_cfs_rq(rq, cfs_rq) \ 692#define for_each_leaf_cfs_rq(rq, cfs_rq) \
693 list_for_each_entry(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list) 693 list_for_each_entry_rcu(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list)
694 694
695/* Do the two (enqueued) entities belong to the same group ? */ 695/* Do the two (enqueued) entities belong to the same group ? */
696static inline int 696static inline int
@@ -1132,7 +1132,9 @@ static void print_cfs_stats(struct seq_file *m, int cpu)
1132#ifdef CONFIG_FAIR_GROUP_SCHED 1132#ifdef CONFIG_FAIR_GROUP_SCHED
1133 print_cfs_rq(m, cpu, &cpu_rq(cpu)->cfs); 1133 print_cfs_rq(m, cpu, &cpu_rq(cpu)->cfs);
1134#endif 1134#endif
1135 lock_task_group_list();
1135 for_each_leaf_cfs_rq(cpu_rq(cpu), cfs_rq) 1136 for_each_leaf_cfs_rq(cpu_rq(cpu), cfs_rq)
1136 print_cfs_rq(m, cpu, cfs_rq); 1137 print_cfs_rq(m, cpu, cfs_rq);
1138 unlock_task_group_list();
1137} 1139}
1138#endif 1140#endif