diff options
author | Ingo Molnar <mingo@elte.hu> | 2007-10-15 11:00:14 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2007-10-15 11:00:14 -0400 |
commit | 4cf86d77f5942336e7cd9de874b38b3c83b54d5e (patch) | |
tree | a62b0a1b5a71f715257b82c0f65f894153757c84 /kernel/sched.c | |
parent | 06877c33fe9261ccdf143492c28de93c56493079 (diff) |
sched: cleanup: rename task_grp to task_group
cleanup: rename task_grp to task_group. No need to save two characters
and 'grp' is annoying to read.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 36 |
1 files changed, 18 insertions, 18 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 5bfe1df73f0f..f2b8db4d6802 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -156,7 +156,7 @@ struct rt_prio_array { | |||
156 | struct cfs_rq; | 156 | struct cfs_rq; |
157 | 157 | ||
158 | /* task group related information */ | 158 | /* task group related information */ |
159 | struct task_grp { | 159 | struct task_group { |
160 | /* schedulable entities of this group on each cpu */ | 160 | /* schedulable entities of this group on each cpu */ |
161 | struct sched_entity **se; | 161 | struct sched_entity **se; |
162 | /* runqueue "owned" by this group on each cpu */ | 162 | /* runqueue "owned" by this group on each cpu */ |
@@ -175,7 +175,7 @@ static struct cfs_rq *init_cfs_rq_p[NR_CPUS]; | |||
175 | /* Default task group. | 175 | /* Default task group. |
176 | * Every task in system belong to this group at bootup. | 176 | * Every task in system belong to this group at bootup. |
177 | */ | 177 | */ |
178 | struct task_grp init_task_grp = { | 178 | struct task_group init_task_group = { |
179 | .se = init_sched_entity_p, | 179 | .se = init_sched_entity_p, |
180 | .cfs_rq = init_cfs_rq_p, | 180 | .cfs_rq = init_cfs_rq_p, |
181 | }; | 181 | }; |
@@ -186,17 +186,17 @@ struct task_grp init_task_grp = { | |||
186 | # define INIT_TASK_GRP_LOAD NICE_0_LOAD | 186 | # define INIT_TASK_GRP_LOAD NICE_0_LOAD |
187 | #endif | 187 | #endif |
188 | 188 | ||
189 | static int init_task_grp_load = INIT_TASK_GRP_LOAD; | 189 | static int init_task_group_load = INIT_TASK_GRP_LOAD; |
190 | 190 | ||
191 | /* return group to which a task belongs */ | 191 | /* return group to which a task belongs */ |
192 | static inline struct task_grp *task_grp(struct task_struct *p) | 192 | static inline struct task_group *task_group(struct task_struct *p) |
193 | { | 193 | { |
194 | struct task_grp *tg; | 194 | struct task_group *tg; |
195 | 195 | ||
196 | #ifdef CONFIG_FAIR_USER_SCHED | 196 | #ifdef CONFIG_FAIR_USER_SCHED |
197 | tg = p->user->tg; | 197 | tg = p->user->tg; |
198 | #else | 198 | #else |
199 | tg = &init_task_grp; | 199 | tg = &init_task_group; |
200 | #endif | 200 | #endif |
201 | 201 | ||
202 | return tg; | 202 | return tg; |
@@ -205,8 +205,8 @@ static inline struct task_grp *task_grp(struct task_struct *p) | |||
205 | /* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */ | 205 | /* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */ |
206 | static inline void set_task_cfs_rq(struct task_struct *p) | 206 | static inline void set_task_cfs_rq(struct task_struct *p) |
207 | { | 207 | { |
208 | p->se.cfs_rq = task_grp(p)->cfs_rq[task_cpu(p)]; | 208 | p->se.cfs_rq = task_group(p)->cfs_rq[task_cpu(p)]; |
209 | p->se.parent = task_grp(p)->se[task_cpu(p)]; | 209 | p->se.parent = task_group(p)->se[task_cpu(p)]; |
210 | } | 210 | } |
211 | 211 | ||
212 | #else | 212 | #else |
@@ -244,7 +244,7 @@ struct cfs_rq { | |||
244 | * list is used during load balance. | 244 | * list is used during load balance. |
245 | */ | 245 | */ |
246 | struct list_head leaf_cfs_rq_list; /* Better name : task_cfs_rq_list? */ | 246 | struct list_head leaf_cfs_rq_list; /* Better name : task_cfs_rq_list? */ |
247 | struct task_grp *tg; /* group that "owns" this runqueue */ | 247 | struct task_group *tg; /* group that "owns" this runqueue */ |
248 | struct rcu_head rcu; | 248 | struct rcu_head rcu; |
249 | #endif | 249 | #endif |
250 | }; | 250 | }; |
@@ -6522,19 +6522,19 @@ void __init sched_init(void) | |||
6522 | 6522 | ||
6523 | init_cfs_rq_p[i] = cfs_rq; | 6523 | init_cfs_rq_p[i] = cfs_rq; |
6524 | init_cfs_rq(cfs_rq, rq); | 6524 | init_cfs_rq(cfs_rq, rq); |
6525 | cfs_rq->tg = &init_task_grp; | 6525 | cfs_rq->tg = &init_task_group; |
6526 | list_add(&cfs_rq->leaf_cfs_rq_list, | 6526 | list_add(&cfs_rq->leaf_cfs_rq_list, |
6527 | &rq->leaf_cfs_rq_list); | 6527 | &rq->leaf_cfs_rq_list); |
6528 | 6528 | ||
6529 | init_sched_entity_p[i] = se; | 6529 | init_sched_entity_p[i] = se; |
6530 | se->cfs_rq = &rq->cfs; | 6530 | se->cfs_rq = &rq->cfs; |
6531 | se->my_q = cfs_rq; | 6531 | se->my_q = cfs_rq; |
6532 | se->load.weight = init_task_grp_load; | 6532 | se->load.weight = init_task_group_load; |
6533 | se->load.inv_weight = | 6533 | se->load.inv_weight = |
6534 | div64_64(1ULL<<32, init_task_grp_load); | 6534 | div64_64(1ULL<<32, init_task_group_load); |
6535 | se->parent = NULL; | 6535 | se->parent = NULL; |
6536 | } | 6536 | } |
6537 | init_task_grp.shares = init_task_grp_load; | 6537 | init_task_group.shares = init_task_group_load; |
6538 | #endif | 6538 | #endif |
6539 | 6539 | ||
6540 | for (j = 0; j < CPU_LOAD_IDX_MAX; j++) | 6540 | for (j = 0; j < CPU_LOAD_IDX_MAX; j++) |
@@ -6725,9 +6725,9 @@ void set_curr_task(int cpu, struct task_struct *p) | |||
6725 | #ifdef CONFIG_FAIR_GROUP_SCHED | 6725 | #ifdef CONFIG_FAIR_GROUP_SCHED |
6726 | 6726 | ||
6727 | /* allocate runqueue etc for a new task group */ | 6727 | /* allocate runqueue etc for a new task group */ |
6728 | struct task_grp *sched_create_group(void) | 6728 | struct task_group *sched_create_group(void) |
6729 | { | 6729 | { |
6730 | struct task_grp *tg; | 6730 | struct task_group *tg; |
6731 | struct cfs_rq *cfs_rq; | 6731 | struct cfs_rq *cfs_rq; |
6732 | struct sched_entity *se; | 6732 | struct sched_entity *se; |
6733 | struct rq *rq; | 6733 | struct rq *rq; |
@@ -6800,7 +6800,7 @@ err: | |||
6800 | static void free_sched_group(struct rcu_head *rhp) | 6800 | static void free_sched_group(struct rcu_head *rhp) |
6801 | { | 6801 | { |
6802 | struct cfs_rq *cfs_rq = container_of(rhp, struct cfs_rq, rcu); | 6802 | struct cfs_rq *cfs_rq = container_of(rhp, struct cfs_rq, rcu); |
6803 | struct task_grp *tg = cfs_rq->tg; | 6803 | struct task_group *tg = cfs_rq->tg; |
6804 | struct sched_entity *se; | 6804 | struct sched_entity *se; |
6805 | int i; | 6805 | int i; |
6806 | 6806 | ||
@@ -6819,7 +6819,7 @@ static void free_sched_group(struct rcu_head *rhp) | |||
6819 | } | 6819 | } |
6820 | 6820 | ||
6821 | /* Destroy runqueue etc associated with a task group */ | 6821 | /* Destroy runqueue etc associated with a task group */ |
6822 | void sched_destroy_group(struct task_grp *tg) | 6822 | void sched_destroy_group(struct task_group *tg) |
6823 | { | 6823 | { |
6824 | struct cfs_rq *cfs_rq; | 6824 | struct cfs_rq *cfs_rq; |
6825 | int i; | 6825 | int i; |
@@ -6895,7 +6895,7 @@ static void set_se_shares(struct sched_entity *se, unsigned long shares) | |||
6895 | spin_unlock_irq(&rq->lock); | 6895 | spin_unlock_irq(&rq->lock); |
6896 | } | 6896 | } |
6897 | 6897 | ||
6898 | int sched_group_set_shares(struct task_grp *tg, unsigned long shares) | 6898 | int sched_group_set_shares(struct task_group *tg, unsigned long shares) |
6899 | { | 6899 | { |
6900 | int i; | 6900 | int i; |
6901 | 6901 | ||