aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorSrivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>2007-10-29 16:18:11 -0400
committerIngo Molnar <mingo@elte.hu>2007-10-29 16:18:11 -0400
commitae8393e508e5f17add66964688c49bf0bfe4fcf9 (patch)
tree41df1c369499d862b2b4a7896f938fd4d30463b7 /kernel/sched.c
parent7bae49d498de87f0da0c20c67adaa278eac84566 (diff)
sched: move rcu_head to task_group struct
Peter Zijlstra noticed that the rcu_head object need not be present in every cfs_rq of a group. Move it to the task_group structure instead. Signed-off-by: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c8
1 files changed, 4 insertions, 4 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 235952b100eb..470480790f3f 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -172,6 +172,7 @@ struct task_group {
172 unsigned long shares; 172 unsigned long shares;
173 /* spinlock to serialize modification to shares */ 173 /* spinlock to serialize modification to shares */
174 spinlock_t lock; 174 spinlock_t lock;
175 struct rcu_head rcu;
175}; 176};
176 177
177/* Default task group's sched entity on each cpu */ 178/* Default task group's sched entity on each cpu */
@@ -258,7 +259,6 @@ struct cfs_rq {
258 */ 259 */
259 struct list_head leaf_cfs_rq_list; /* Better name : task_cfs_rq_list? */ 260 struct list_head leaf_cfs_rq_list; /* Better name : task_cfs_rq_list? */
260 struct task_group *tg; /* group that "owns" this runqueue */ 261 struct task_group *tg; /* group that "owns" this runqueue */
261 struct rcu_head rcu;
262#endif 262#endif
263}; 263};
264 264
@@ -7019,8 +7019,8 @@ err:
7019/* rcu callback to free various structures associated with a task group */ 7019/* rcu callback to free various structures associated with a task group */
7020static void free_sched_group(struct rcu_head *rhp) 7020static void free_sched_group(struct rcu_head *rhp)
7021{ 7021{
7022 struct cfs_rq *cfs_rq = container_of(rhp, struct cfs_rq, rcu); 7022 struct task_group *tg = container_of(rhp, struct task_group, rcu);
7023 struct task_group *tg = cfs_rq->tg; 7023 struct cfs_rq *cfs_rq;
7024 struct sched_entity *se; 7024 struct sched_entity *se;
7025 int i; 7025 int i;
7026 7026
@@ -7052,7 +7052,7 @@ void sched_destroy_group(struct task_group *tg)
7052 BUG_ON(!cfs_rq); 7052 BUG_ON(!cfs_rq);
7053 7053
7054 /* wait for possible concurrent references to cfs_rqs complete */ 7054 /* wait for possible concurrent references to cfs_rqs complete */
7055 call_rcu(&cfs_rq->rcu, free_sched_group); 7055 call_rcu(&tg->rcu, free_sched_group);
7056} 7056}
7057 7057
7058/* change task's runqueue when it moves between groups. 7058/* change task's runqueue when it moves between groups.