aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorSrivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>2008-01-25 15:07:59 -0500
committerIngo Molnar <mingo@elte.hu>2008-01-25 15:07:59 -0500
commitec2c507fe8c8fa3c04fc6cb99a382a965c477379 (patch)
treef139f738ba08576318ec09047148cdc7ae33ac9b /kernel/sched.c
parent93f992ccc008dd4030381caeebb252e85e66684b (diff)
sched: group scheduling, minor fixes
Minor bug fixes for the group scheduler: - Use a mutex to serialize add/remove of task groups and also when changing shares of a task group. Use the same mutex when printing cfs_rq debugging stats for various task groups. - Use list_for_each_entry_rcu in for_each_leaf_cfs_rq macro (when walking task group list) Signed-off-by: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c33
1 files changed, 25 insertions, 8 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 7f827b70ae02..cfa695819252 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -169,8 +169,6 @@ struct task_group {
169 /* runqueue "owned" by this group on each cpu */ 169 /* runqueue "owned" by this group on each cpu */
170 struct cfs_rq **cfs_rq; 170 struct cfs_rq **cfs_rq;
171 unsigned long shares; 171 unsigned long shares;
172 /* spinlock to serialize modification to shares */
173 spinlock_t lock;
174 struct rcu_head rcu; 172 struct rcu_head rcu;
175}; 173};
176 174
@@ -182,6 +180,11 @@ static DEFINE_PER_CPU(struct cfs_rq, init_cfs_rq) ____cacheline_aligned_in_smp;
182static struct sched_entity *init_sched_entity_p[NR_CPUS]; 180static struct sched_entity *init_sched_entity_p[NR_CPUS];
183static struct cfs_rq *init_cfs_rq_p[NR_CPUS]; 181static struct cfs_rq *init_cfs_rq_p[NR_CPUS];
184 182
183/* task_group_mutex serializes add/remove of task groups and also changes to
184 * a task group's cpu shares.
185 */
186static DEFINE_MUTEX(task_group_mutex);
187
185/* Default task group. 188/* Default task group.
186 * Every task in system belong to this group at bootup. 189 * Every task in system belong to this group at bootup.
187 */ 190 */
@@ -221,9 +224,21 @@ static inline void set_task_cfs_rq(struct task_struct *p, unsigned int cpu)
221 p->se.parent = task_group(p)->se[cpu]; 224 p->se.parent = task_group(p)->se[cpu];
222} 225}
223 226
227static inline void lock_task_group_list(void)
228{
229 mutex_lock(&task_group_mutex);
230}
231
232static inline void unlock_task_group_list(void)
233{
234 mutex_unlock(&task_group_mutex);
235}
236
224#else 237#else
225 238
226static inline void set_task_cfs_rq(struct task_struct *p, unsigned int cpu) { } 239static inline void set_task_cfs_rq(struct task_struct *p, unsigned int cpu) { }
240static inline void lock_task_group_list(void) { }
241static inline void unlock_task_group_list(void) { }
227 242
228#endif /* CONFIG_FAIR_GROUP_SCHED */ 243#endif /* CONFIG_FAIR_GROUP_SCHED */
229 244
@@ -6768,7 +6783,6 @@ void __init sched_init(void)
6768 se->parent = NULL; 6783 se->parent = NULL;
6769 } 6784 }
6770 init_task_group.shares = init_task_group_load; 6785 init_task_group.shares = init_task_group_load;
6771 spin_lock_init(&init_task_group.lock);
6772#endif 6786#endif
6773 6787
6774 for (j = 0; j < CPU_LOAD_IDX_MAX; j++) 6788 for (j = 0; j < CPU_LOAD_IDX_MAX; j++)
@@ -7008,14 +7022,15 @@ struct task_group *sched_create_group(void)
7008 se->parent = NULL; 7022 se->parent = NULL;
7009 } 7023 }
7010 7024
7025 tg->shares = NICE_0_LOAD;
7026
7027 lock_task_group_list();
7011 for_each_possible_cpu(i) { 7028 for_each_possible_cpu(i) {
7012 rq = cpu_rq(i); 7029 rq = cpu_rq(i);
7013 cfs_rq = tg->cfs_rq[i]; 7030 cfs_rq = tg->cfs_rq[i];
7014 list_add_rcu(&cfs_rq->leaf_cfs_rq_list, &rq->leaf_cfs_rq_list); 7031 list_add_rcu(&cfs_rq->leaf_cfs_rq_list, &rq->leaf_cfs_rq_list);
7015 } 7032 }
7016 7033 unlock_task_group_list();
7017 tg->shares = NICE_0_LOAD;
7018 spin_lock_init(&tg->lock);
7019 7034
7020 return tg; 7035 return tg;
7021 7036
@@ -7061,10 +7076,12 @@ void sched_destroy_group(struct task_group *tg)
7061 struct cfs_rq *cfs_rq = NULL; 7076 struct cfs_rq *cfs_rq = NULL;
7062 int i; 7077 int i;
7063 7078
7079 lock_task_group_list();
7064 for_each_possible_cpu(i) { 7080 for_each_possible_cpu(i) {
7065 cfs_rq = tg->cfs_rq[i]; 7081 cfs_rq = tg->cfs_rq[i];
7066 list_del_rcu(&cfs_rq->leaf_cfs_rq_list); 7082 list_del_rcu(&cfs_rq->leaf_cfs_rq_list);
7067 } 7083 }
7084 unlock_task_group_list();
7068 7085
7069 BUG_ON(!cfs_rq); 7086 BUG_ON(!cfs_rq);
7070 7087
@@ -7146,7 +7163,7 @@ int sched_group_set_shares(struct task_group *tg, unsigned long shares)
7146 if (shares < 2) 7163 if (shares < 2)
7147 shares = 2; 7164 shares = 2;
7148 7165
7149 spin_lock(&tg->lock); 7166 lock_task_group_list();
7150 if (tg->shares == shares) 7167 if (tg->shares == shares)
7151 goto done; 7168 goto done;
7152 7169
@@ -7155,7 +7172,7 @@ int sched_group_set_shares(struct task_group *tg, unsigned long shares)
7155 set_se_shares(tg->se[i], shares); 7172 set_se_shares(tg->se[i], shares);
7156 7173
7157done: 7174done:
7158 spin_unlock(&tg->lock); 7175 unlock_task_group_list();
7159 return 0; 7176 return 0;
7160} 7177}
7161 7178