diff options
author | Ingo Molnar <mingo@elte.hu> | 2007-10-15 11:00:14 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2007-10-15 11:00:14 -0400 |
commit | 4cf86d77f5942336e7cd9de874b38b3c83b54d5e (patch) | |
tree | a62b0a1b5a71f715257b82c0f65f894153757c84 | |
parent | 06877c33fe9261ccdf143492c28de93c56493079 (diff) |
sched: cleanup: rename task_grp to task_group
cleanup: rename task_grp to task_group. No need to save two characters
and 'grp' is annoying to read.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r-- | include/linux/sched.h | 12 | ||||
-rw-r--r-- | kernel/sched.c | 36 | ||||
-rw-r--r-- | kernel/sched_debug.c | 6 | ||||
-rw-r--r-- | kernel/user.c | 2 |
4 files changed, 28 insertions, 28 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index 49c7b374eac8..3cddbfc0c91d 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -136,7 +136,7 @@ extern unsigned long weighted_cpuload(const int cpu); | |||
136 | 136 | ||
137 | struct seq_file; | 137 | struct seq_file; |
138 | struct cfs_rq; | 138 | struct cfs_rq; |
139 | struct task_grp; | 139 | struct task_group; |
140 | #ifdef CONFIG_SCHED_DEBUG | 140 | #ifdef CONFIG_SCHED_DEBUG |
141 | extern void proc_sched_show_task(struct task_struct *p, struct seq_file *m); | 141 | extern void proc_sched_show_task(struct task_struct *p, struct seq_file *m); |
142 | extern void proc_sched_set_task(struct task_struct *p); | 142 | extern void proc_sched_set_task(struct task_struct *p); |
@@ -598,7 +598,7 @@ struct user_struct { | |||
598 | uid_t uid; | 598 | uid_t uid; |
599 | 599 | ||
600 | #ifdef CONFIG_FAIR_USER_SCHED | 600 | #ifdef CONFIG_FAIR_USER_SCHED |
601 | struct task_grp *tg; | 601 | struct task_group *tg; |
602 | #endif | 602 | #endif |
603 | }; | 603 | }; |
604 | 604 | ||
@@ -1842,12 +1842,12 @@ extern void normalize_rt_tasks(void); | |||
1842 | 1842 | ||
1843 | #ifdef CONFIG_FAIR_GROUP_SCHED | 1843 | #ifdef CONFIG_FAIR_GROUP_SCHED |
1844 | 1844 | ||
1845 | extern struct task_grp init_task_grp; | 1845 | extern struct task_group init_task_group; |
1846 | 1846 | ||
1847 | extern struct task_grp *sched_create_group(void); | 1847 | extern struct task_group *sched_create_group(void); |
1848 | extern void sched_destroy_group(struct task_grp *tg); | 1848 | extern void sched_destroy_group(struct task_group *tg); |
1849 | extern void sched_move_task(struct task_struct *tsk); | 1849 | extern void sched_move_task(struct task_struct *tsk); |
1850 | extern int sched_group_set_shares(struct task_grp *tg, unsigned long shares); | 1850 | extern int sched_group_set_shares(struct task_group *tg, unsigned long shares); |
1851 | 1851 | ||
1852 | #endif | 1852 | #endif |
1853 | 1853 | ||
diff --git a/kernel/sched.c b/kernel/sched.c index 5bfe1df73f0f..f2b8db4d6802 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -156,7 +156,7 @@ struct rt_prio_array { | |||
156 | struct cfs_rq; | 156 | struct cfs_rq; |
157 | 157 | ||
158 | /* task group related information */ | 158 | /* task group related information */ |
159 | struct task_grp { | 159 | struct task_group { |
160 | /* schedulable entities of this group on each cpu */ | 160 | /* schedulable entities of this group on each cpu */ |
161 | struct sched_entity **se; | 161 | struct sched_entity **se; |
162 | /* runqueue "owned" by this group on each cpu */ | 162 | /* runqueue "owned" by this group on each cpu */ |
@@ -175,7 +175,7 @@ static struct cfs_rq *init_cfs_rq_p[NR_CPUS]; | |||
175 | /* Default task group. | 175 | /* Default task group. |
176 | * Every task in system belong to this group at bootup. | 176 | * Every task in system belong to this group at bootup. |
177 | */ | 177 | */ |
178 | struct task_grp init_task_grp = { | 178 | struct task_group init_task_group = { |
179 | .se = init_sched_entity_p, | 179 | .se = init_sched_entity_p, |
180 | .cfs_rq = init_cfs_rq_p, | 180 | .cfs_rq = init_cfs_rq_p, |
181 | }; | 181 | }; |
@@ -186,17 +186,17 @@ struct task_grp init_task_grp = { | |||
186 | # define INIT_TASK_GRP_LOAD NICE_0_LOAD | 186 | # define INIT_TASK_GRP_LOAD NICE_0_LOAD |
187 | #endif | 187 | #endif |
188 | 188 | ||
189 | static int init_task_grp_load = INIT_TASK_GRP_LOAD; | 189 | static int init_task_group_load = INIT_TASK_GRP_LOAD; |
190 | 190 | ||
191 | /* return group to which a task belongs */ | 191 | /* return group to which a task belongs */ |
192 | static inline struct task_grp *task_grp(struct task_struct *p) | 192 | static inline struct task_group *task_group(struct task_struct *p) |
193 | { | 193 | { |
194 | struct task_grp *tg; | 194 | struct task_group *tg; |
195 | 195 | ||
196 | #ifdef CONFIG_FAIR_USER_SCHED | 196 | #ifdef CONFIG_FAIR_USER_SCHED |
197 | tg = p->user->tg; | 197 | tg = p->user->tg; |
198 | #else | 198 | #else |
199 | tg = &init_task_grp; | 199 | tg = &init_task_group; |
200 | #endif | 200 | #endif |
201 | 201 | ||
202 | return tg; | 202 | return tg; |
@@ -205,8 +205,8 @@ static inline struct task_grp *task_grp(struct task_struct *p) | |||
205 | /* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */ | 205 | /* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */ |
206 | static inline void set_task_cfs_rq(struct task_struct *p) | 206 | static inline void set_task_cfs_rq(struct task_struct *p) |
207 | { | 207 | { |
208 | p->se.cfs_rq = task_grp(p)->cfs_rq[task_cpu(p)]; | 208 | p->se.cfs_rq = task_group(p)->cfs_rq[task_cpu(p)]; |
209 | p->se.parent = task_grp(p)->se[task_cpu(p)]; | 209 | p->se.parent = task_group(p)->se[task_cpu(p)]; |
210 | } | 210 | } |
211 | 211 | ||
212 | #else | 212 | #else |
@@ -244,7 +244,7 @@ struct cfs_rq { | |||
244 | * list is used during load balance. | 244 | * list is used during load balance. |
245 | */ | 245 | */ |
246 | struct list_head leaf_cfs_rq_list; /* Better name : task_cfs_rq_list? */ | 246 | struct list_head leaf_cfs_rq_list; /* Better name : task_cfs_rq_list? */ |
247 | struct task_grp *tg; /* group that "owns" this runqueue */ | 247 | struct task_group *tg; /* group that "owns" this runqueue */ |
248 | struct rcu_head rcu; | 248 | struct rcu_head rcu; |
249 | #endif | 249 | #endif |
250 | }; | 250 | }; |
@@ -6522,19 +6522,19 @@ void __init sched_init(void) | |||
6522 | 6522 | ||
6523 | init_cfs_rq_p[i] = cfs_rq; | 6523 | init_cfs_rq_p[i] = cfs_rq; |
6524 | init_cfs_rq(cfs_rq, rq); | 6524 | init_cfs_rq(cfs_rq, rq); |
6525 | cfs_rq->tg = &init_task_grp; | 6525 | cfs_rq->tg = &init_task_group; |
6526 | list_add(&cfs_rq->leaf_cfs_rq_list, | 6526 | list_add(&cfs_rq->leaf_cfs_rq_list, |
6527 | &rq->leaf_cfs_rq_list); | 6527 | &rq->leaf_cfs_rq_list); |
6528 | 6528 | ||
6529 | init_sched_entity_p[i] = se; | 6529 | init_sched_entity_p[i] = se; |
6530 | se->cfs_rq = &rq->cfs; | 6530 | se->cfs_rq = &rq->cfs; |
6531 | se->my_q = cfs_rq; | 6531 | se->my_q = cfs_rq; |
6532 | se->load.weight = init_task_grp_load; | 6532 | se->load.weight = init_task_group_load; |
6533 | se->load.inv_weight = | 6533 | se->load.inv_weight = |
6534 | div64_64(1ULL<<32, init_task_grp_load); | 6534 | div64_64(1ULL<<32, init_task_group_load); |
6535 | se->parent = NULL; | 6535 | se->parent = NULL; |
6536 | } | 6536 | } |
6537 | init_task_grp.shares = init_task_grp_load; | 6537 | init_task_group.shares = init_task_group_load; |
6538 | #endif | 6538 | #endif |
6539 | 6539 | ||
6540 | for (j = 0; j < CPU_LOAD_IDX_MAX; j++) | 6540 | for (j = 0; j < CPU_LOAD_IDX_MAX; j++) |
@@ -6725,9 +6725,9 @@ void set_curr_task(int cpu, struct task_struct *p) | |||
6725 | #ifdef CONFIG_FAIR_GROUP_SCHED | 6725 | #ifdef CONFIG_FAIR_GROUP_SCHED |
6726 | 6726 | ||
6727 | /* allocate runqueue etc for a new task group */ | 6727 | /* allocate runqueue etc for a new task group */ |
6728 | struct task_grp *sched_create_group(void) | 6728 | struct task_group *sched_create_group(void) |
6729 | { | 6729 | { |
6730 | struct task_grp *tg; | 6730 | struct task_group *tg; |
6731 | struct cfs_rq *cfs_rq; | 6731 | struct cfs_rq *cfs_rq; |
6732 | struct sched_entity *se; | 6732 | struct sched_entity *se; |
6733 | struct rq *rq; | 6733 | struct rq *rq; |
@@ -6800,7 +6800,7 @@ err: | |||
6800 | static void free_sched_group(struct rcu_head *rhp) | 6800 | static void free_sched_group(struct rcu_head *rhp) |
6801 | { | 6801 | { |
6802 | struct cfs_rq *cfs_rq = container_of(rhp, struct cfs_rq, rcu); | 6802 | struct cfs_rq *cfs_rq = container_of(rhp, struct cfs_rq, rcu); |
6803 | struct task_grp *tg = cfs_rq->tg; | 6803 | struct task_group *tg = cfs_rq->tg; |
6804 | struct sched_entity *se; | 6804 | struct sched_entity *se; |
6805 | int i; | 6805 | int i; |
6806 | 6806 | ||
@@ -6819,7 +6819,7 @@ static void free_sched_group(struct rcu_head *rhp) | |||
6819 | } | 6819 | } |
6820 | 6820 | ||
6821 | /* Destroy runqueue etc associated with a task group */ | 6821 | /* Destroy runqueue etc associated with a task group */ |
6822 | void sched_destroy_group(struct task_grp *tg) | 6822 | void sched_destroy_group(struct task_group *tg) |
6823 | { | 6823 | { |
6824 | struct cfs_rq *cfs_rq; | 6824 | struct cfs_rq *cfs_rq; |
6825 | int i; | 6825 | int i; |
@@ -6895,7 +6895,7 @@ static void set_se_shares(struct sched_entity *se, unsigned long shares) | |||
6895 | spin_unlock_irq(&rq->lock); | 6895 | spin_unlock_irq(&rq->lock); |
6896 | } | 6896 | } |
6897 | 6897 | ||
6898 | int sched_group_set_shares(struct task_grp *tg, unsigned long shares) | 6898 | int sched_group_set_shares(struct task_group *tg, unsigned long shares) |
6899 | { | 6899 | { |
6900 | int i; | 6900 | int i; |
6901 | 6901 | ||
diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c index 48748d04144d..6f87b31d233c 100644 --- a/kernel/sched_debug.c +++ b/kernel/sched_debug.c | |||
@@ -239,7 +239,7 @@ static int | |||
239 | root_user_share_read_proc(char *page, char **start, off_t off, int count, | 239 | root_user_share_read_proc(char *page, char **start, off_t off, int count, |
240 | int *eof, void *data) | 240 | int *eof, void *data) |
241 | { | 241 | { |
242 | return sprintf(page, "%d\n", init_task_grp_load); | 242 | return sprintf(page, "%d\n", init_task_group_load); |
243 | } | 243 | } |
244 | 244 | ||
245 | static int | 245 | static int |
@@ -260,8 +260,8 @@ root_user_share_write_proc(struct file *file, const char __user *buffer, | |||
260 | 260 | ||
261 | mutex_lock(&root_user_share_mutex); | 261 | mutex_lock(&root_user_share_mutex); |
262 | 262 | ||
263 | init_task_grp_load = shares; | 263 | init_task_group_load = shares; |
264 | rc = sched_group_set_shares(&init_task_grp, shares); | 264 | rc = sched_group_set_shares(&init_task_group, shares); |
265 | 265 | ||
266 | mutex_unlock(&root_user_share_mutex); | 266 | mutex_unlock(&root_user_share_mutex); |
267 | 267 | ||
diff --git a/kernel/user.c b/kernel/user.c index c6387fac932d..0c9a7870d08f 100644 --- a/kernel/user.c +++ b/kernel/user.c | |||
@@ -51,7 +51,7 @@ struct user_struct root_user = { | |||
51 | .session_keyring = &root_session_keyring, | 51 | .session_keyring = &root_session_keyring, |
52 | #endif | 52 | #endif |
53 | #ifdef CONFIG_FAIR_USER_SCHED | 53 | #ifdef CONFIG_FAIR_USER_SCHED |
54 | .tg = &init_task_grp, | 54 | .tg = &init_task_group, |
55 | #endif | 55 | #endif |
56 | }; | 56 | }; |
57 | 57 | ||