aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2006-07-03 03:25:41 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2006-07-03 18:27:11 -0400
commit36c8b586896f60cb91a4fd526233190b34316baf (patch)
tree003246e1e676de33703daa979b3e3109ca202a89 /include/linux
parent48f24c4da1ee7f3f22289cb85e8b8a73e4df4db5 (diff)
[PATCH] sched: cleanup, remove task_t, convert to struct task_struct
cleanup: remove task_t and convert all the uses to struct task_struct. I introduced it for the scheduler anno and it was a mistake. Conversion was mostly scripted, the result was reviewed and all secondary whitespace and style impact (if any) was fixed up by hand. Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/sched.h55
1 files changed, 28 insertions, 27 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 8ebddba4448d..c2797f04d931 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -184,11 +184,11 @@ extern unsigned long weighted_cpuload(const int cpu);
184extern rwlock_t tasklist_lock; 184extern rwlock_t tasklist_lock;
185extern spinlock_t mmlist_lock; 185extern spinlock_t mmlist_lock;
186 186
187typedef struct task_struct task_t; 187struct task_struct;
188 188
189extern void sched_init(void); 189extern void sched_init(void);
190extern void sched_init_smp(void); 190extern void sched_init_smp(void);
191extern void init_idle(task_t *idle, int cpu); 191extern void init_idle(struct task_struct *idle, int cpu);
192 192
193extern cpumask_t nohz_cpu_mask; 193extern cpumask_t nohz_cpu_mask;
194 194
@@ -383,7 +383,7 @@ struct signal_struct {
383 wait_queue_head_t wait_chldexit; /* for wait4() */ 383 wait_queue_head_t wait_chldexit; /* for wait4() */
384 384
385 /* current thread group signal load-balancing target: */ 385 /* current thread group signal load-balancing target: */
386 task_t *curr_target; 386 struct task_struct *curr_target;
387 387
388 /* shared signal handling: */ 388 /* shared signal handling: */
389 struct sigpending shared_pending; 389 struct sigpending shared_pending;
@@ -699,7 +699,7 @@ extern int groups_search(struct group_info *group_info, gid_t grp);
699 ((gi)->blocks[(i)/NGROUPS_PER_BLOCK][(i)%NGROUPS_PER_BLOCK]) 699 ((gi)->blocks[(i)/NGROUPS_PER_BLOCK][(i)%NGROUPS_PER_BLOCK])
700 700
701#ifdef ARCH_HAS_PREFETCH_SWITCH_STACK 701#ifdef ARCH_HAS_PREFETCH_SWITCH_STACK
702extern void prefetch_stack(struct task_struct*); 702extern void prefetch_stack(struct task_struct *t);
703#else 703#else
704static inline void prefetch_stack(struct task_struct *t) { } 704static inline void prefetch_stack(struct task_struct *t) { }
705#endif 705#endif
@@ -1031,9 +1031,9 @@ static inline void put_task_struct(struct task_struct *t)
1031#define used_math() tsk_used_math(current) 1031#define used_math() tsk_used_math(current)
1032 1032
1033#ifdef CONFIG_SMP 1033#ifdef CONFIG_SMP
1034extern int set_cpus_allowed(task_t *p, cpumask_t new_mask); 1034extern int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask);
1035#else 1035#else
1036static inline int set_cpus_allowed(task_t *p, cpumask_t new_mask) 1036static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
1037{ 1037{
1038 if (!cpu_isset(0, new_mask)) 1038 if (!cpu_isset(0, new_mask))
1039 return -EINVAL; 1039 return -EINVAL;
@@ -1042,7 +1042,8 @@ static inline int set_cpus_allowed(task_t *p, cpumask_t new_mask)
1042#endif 1042#endif
1043 1043
1044extern unsigned long long sched_clock(void); 1044extern unsigned long long sched_clock(void);
1045extern unsigned long long current_sched_time(const task_t *current_task); 1045extern unsigned long long
1046current_sched_time(const struct task_struct *current_task);
1046 1047
1047/* sched_exec is called by processes performing an exec */ 1048/* sched_exec is called by processes performing an exec */
1048#ifdef CONFIG_SMP 1049#ifdef CONFIG_SMP
@@ -1060,27 +1061,27 @@ static inline void idle_task_exit(void) {}
1060extern void sched_idle_next(void); 1061extern void sched_idle_next(void);
1061 1062
1062#ifdef CONFIG_RT_MUTEXES 1063#ifdef CONFIG_RT_MUTEXES
1063extern int rt_mutex_getprio(task_t *p); 1064extern int rt_mutex_getprio(struct task_struct *p);
1064extern void rt_mutex_setprio(task_t *p, int prio); 1065extern void rt_mutex_setprio(struct task_struct *p, int prio);
1065extern void rt_mutex_adjust_pi(task_t *p); 1066extern void rt_mutex_adjust_pi(struct task_struct *p);
1066#else 1067#else
1067static inline int rt_mutex_getprio(task_t *p) 1068static inline int rt_mutex_getprio(struct task_struct *p)
1068{ 1069{
1069 return p->normal_prio; 1070 return p->normal_prio;
1070} 1071}
1071# define rt_mutex_adjust_pi(p) do { } while (0) 1072# define rt_mutex_adjust_pi(p) do { } while (0)
1072#endif 1073#endif
1073 1074
1074extern void set_user_nice(task_t *p, long nice); 1075extern void set_user_nice(struct task_struct *p, long nice);
1075extern int task_prio(const task_t *p); 1076extern int task_prio(const struct task_struct *p);
1076extern int task_nice(const task_t *p); 1077extern int task_nice(const struct task_struct *p);
1077extern int can_nice(const task_t *p, const int nice); 1078extern int can_nice(const struct task_struct *p, const int nice);
1078extern int task_curr(const task_t *p); 1079extern int task_curr(const struct task_struct *p);
1079extern int idle_cpu(int cpu); 1080extern int idle_cpu(int cpu);
1080extern int sched_setscheduler(struct task_struct *, int, struct sched_param *); 1081extern int sched_setscheduler(struct task_struct *, int, struct sched_param *);
1081extern task_t *idle_task(int cpu); 1082extern struct task_struct *idle_task(int cpu);
1082extern task_t *curr_task(int cpu); 1083extern struct task_struct *curr_task(int cpu);
1083extern void set_curr_task(int cpu, task_t *p); 1084extern void set_curr_task(int cpu, struct task_struct *p);
1084 1085
1085void yield(void); 1086void yield(void);
1086 1087
@@ -1137,8 +1138,8 @@ extern void FASTCALL(wake_up_new_task(struct task_struct * tsk,
1137#else 1138#else
1138 static inline void kick_process(struct task_struct *tsk) { } 1139 static inline void kick_process(struct task_struct *tsk) { }
1139#endif 1140#endif
1140extern void FASTCALL(sched_fork(task_t * p, int clone_flags)); 1141extern void FASTCALL(sched_fork(struct task_struct * p, int clone_flags));
1141extern void FASTCALL(sched_exit(task_t * p)); 1142extern void FASTCALL(sched_exit(struct task_struct * p));
1142 1143
1143extern int in_group_p(gid_t); 1144extern int in_group_p(gid_t);
1144extern int in_egroup_p(gid_t); 1145extern int in_egroup_p(gid_t);
@@ -1243,17 +1244,17 @@ extern NORET_TYPE void do_group_exit(int);
1243extern void daemonize(const char *, ...); 1244extern void daemonize(const char *, ...);
1244extern int allow_signal(int); 1245extern int allow_signal(int);
1245extern int disallow_signal(int); 1246extern int disallow_signal(int);
1246extern task_t *child_reaper; 1247extern struct task_struct *child_reaper;
1247 1248
1248extern int do_execve(char *, char __user * __user *, char __user * __user *, struct pt_regs *); 1249extern int do_execve(char *, char __user * __user *, char __user * __user *, struct pt_regs *);
1249extern long do_fork(unsigned long, unsigned long, struct pt_regs *, unsigned long, int __user *, int __user *); 1250extern long do_fork(unsigned long, unsigned long, struct pt_regs *, unsigned long, int __user *, int __user *);
1250task_t *fork_idle(int); 1251struct task_struct *fork_idle(int);
1251 1252
1252extern void set_task_comm(struct task_struct *tsk, char *from); 1253extern void set_task_comm(struct task_struct *tsk, char *from);
1253extern void get_task_comm(char *to, struct task_struct *tsk); 1254extern void get_task_comm(char *to, struct task_struct *tsk);
1254 1255
1255#ifdef CONFIG_SMP 1256#ifdef CONFIG_SMP
1256extern void wait_task_inactive(task_t * p); 1257extern void wait_task_inactive(struct task_struct * p);
1257#else 1258#else
1258#define wait_task_inactive(p) do { } while (0) 1259#define wait_task_inactive(p) do { } while (0)
1259#endif 1260#endif
@@ -1279,13 +1280,13 @@ extern void wait_task_inactive(task_t * p);
1279/* de_thread depends on thread_group_leader not being a pid based check */ 1280/* de_thread depends on thread_group_leader not being a pid based check */
1280#define thread_group_leader(p) (p == p->group_leader) 1281#define thread_group_leader(p) (p == p->group_leader)
1281 1282
1282static inline task_t *next_thread(const task_t *p) 1283static inline struct task_struct *next_thread(const struct task_struct *p)
1283{ 1284{
1284 return list_entry(rcu_dereference(p->thread_group.next), 1285 return list_entry(rcu_dereference(p->thread_group.next),
1285 task_t, thread_group); 1286 struct task_struct, thread_group);
1286} 1287}
1287 1288
1288static inline int thread_group_empty(task_t *p) 1289static inline int thread_group_empty(struct task_struct *p)
1289{ 1290{
1290 return list_empty(&p->thread_group); 1291 return list_empty(&p->thread_group);
1291} 1292}