aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/asm-ia64/thread_info.h2
-rw-r--r--include/asm-m32r/system.h2
-rw-r--r--include/asm-sh/system.h2
-rw-r--r--include/linux/sched.h55
4 files changed, 31 insertions, 30 deletions
diff --git a/include/asm-ia64/thread_info.h b/include/asm-ia64/thread_info.h
index 8bc9869e5765..8adcde0934ca 100644
--- a/include/asm-ia64/thread_info.h
+++ b/include/asm-ia64/thread_info.h
@@ -68,7 +68,7 @@ struct thread_info {
68#define end_of_stack(p) (unsigned long *)((void *)(p) + IA64_RBS_OFFSET) 68#define end_of_stack(p) (unsigned long *)((void *)(p) + IA64_RBS_OFFSET)
69 69
70#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR 70#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
71#define alloc_task_struct() ((task_t *)__get_free_pages(GFP_KERNEL | __GFP_COMP, KERNEL_STACK_SIZE_ORDER)) 71#define alloc_task_struct() ((struct task_struct *)__get_free_pages(GFP_KERNEL | __GFP_COMP, KERNEL_STACK_SIZE_ORDER))
72#define free_task_struct(tsk) free_pages((unsigned long) (tsk), KERNEL_STACK_SIZE_ORDER) 72#define free_task_struct(tsk) free_pages((unsigned long) (tsk), KERNEL_STACK_SIZE_ORDER)
73 73
74#endif /* !__ASSEMBLY */ 74#endif /* !__ASSEMBLY */
diff --git a/include/asm-m32r/system.h b/include/asm-m32r/system.h
index 66c4742f09e7..311cebf44eff 100644
--- a/include/asm-m32r/system.h
+++ b/include/asm-m32r/system.h
@@ -18,7 +18,7 @@
18 * switch_to(prev, next) should switch from task `prev' to `next' 18 * switch_to(prev, next) should switch from task `prev' to `next'
19 * `prev' will never be the same as `next'. 19 * `prev' will never be the same as `next'.
20 * 20 *
21 * `next' and `prev' should be task_t, but it isn't always defined 21 * `next' and `prev' should be struct task_struct, but it isn't always defined
22 */ 22 */
23 23
24#define switch_to(prev, next, last) do { \ 24#define switch_to(prev, next, last) do { \
diff --git a/include/asm-sh/system.h b/include/asm-sh/system.h
index b752e5cbb830..ce2e60664a86 100644
--- a/include/asm-sh/system.h
+++ b/include/asm-sh/system.h
@@ -12,7 +12,7 @@
12 */ 12 */
13 13
14#define switch_to(prev, next, last) do { \ 14#define switch_to(prev, next, last) do { \
15 task_t *__last; \ 15 struct task_struct *__last; \
16 register unsigned long *__ts1 __asm__ ("r1") = &prev->thread.sp; \ 16 register unsigned long *__ts1 __asm__ ("r1") = &prev->thread.sp; \
17 register unsigned long *__ts2 __asm__ ("r2") = &prev->thread.pc; \ 17 register unsigned long *__ts2 __asm__ ("r2") = &prev->thread.pc; \
18 register unsigned long *__ts4 __asm__ ("r4") = (unsigned long *)prev; \ 18 register unsigned long *__ts4 __asm__ ("r4") = (unsigned long *)prev; \
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 8ebddba4448d..c2797f04d931 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -184,11 +184,11 @@ extern unsigned long weighted_cpuload(const int cpu);
184extern rwlock_t tasklist_lock; 184extern rwlock_t tasklist_lock;
185extern spinlock_t mmlist_lock; 185extern spinlock_t mmlist_lock;
186 186
187typedef struct task_struct task_t; 187struct task_struct;
188 188
189extern void sched_init(void); 189extern void sched_init(void);
190extern void sched_init_smp(void); 190extern void sched_init_smp(void);
191extern void init_idle(task_t *idle, int cpu); 191extern void init_idle(struct task_struct *idle, int cpu);
192 192
193extern cpumask_t nohz_cpu_mask; 193extern cpumask_t nohz_cpu_mask;
194 194
@@ -383,7 +383,7 @@ struct signal_struct {
383 wait_queue_head_t wait_chldexit; /* for wait4() */ 383 wait_queue_head_t wait_chldexit; /* for wait4() */
384 384
385 /* current thread group signal load-balancing target: */ 385 /* current thread group signal load-balancing target: */
386 task_t *curr_target; 386 struct task_struct *curr_target;
387 387
388 /* shared signal handling: */ 388 /* shared signal handling: */
389 struct sigpending shared_pending; 389 struct sigpending shared_pending;
@@ -699,7 +699,7 @@ extern int groups_search(struct group_info *group_info, gid_t grp);
699 ((gi)->blocks[(i)/NGROUPS_PER_BLOCK][(i)%NGROUPS_PER_BLOCK]) 699 ((gi)->blocks[(i)/NGROUPS_PER_BLOCK][(i)%NGROUPS_PER_BLOCK])
700 700
701#ifdef ARCH_HAS_PREFETCH_SWITCH_STACK 701#ifdef ARCH_HAS_PREFETCH_SWITCH_STACK
702extern void prefetch_stack(struct task_struct*); 702extern void prefetch_stack(struct task_struct *t);
703#else 703#else
704static inline void prefetch_stack(struct task_struct *t) { } 704static inline void prefetch_stack(struct task_struct *t) { }
705#endif 705#endif
@@ -1031,9 +1031,9 @@ static inline void put_task_struct(struct task_struct *t)
1031#define used_math() tsk_used_math(current) 1031#define used_math() tsk_used_math(current)
1032 1032
1033#ifdef CONFIG_SMP 1033#ifdef CONFIG_SMP
1034extern int set_cpus_allowed(task_t *p, cpumask_t new_mask); 1034extern int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask);
1035#else 1035#else
1036static inline int set_cpus_allowed(task_t *p, cpumask_t new_mask) 1036static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
1037{ 1037{
1038 if (!cpu_isset(0, new_mask)) 1038 if (!cpu_isset(0, new_mask))
1039 return -EINVAL; 1039 return -EINVAL;
@@ -1042,7 +1042,8 @@ static inline int set_cpus_allowed(task_t *p, cpumask_t new_mask)
1042#endif 1042#endif
1043 1043
1044extern unsigned long long sched_clock(void); 1044extern unsigned long long sched_clock(void);
1045extern unsigned long long current_sched_time(const task_t *current_task); 1045extern unsigned long long
1046current_sched_time(const struct task_struct *current_task);
1046 1047
1047/* sched_exec is called by processes performing an exec */ 1048/* sched_exec is called by processes performing an exec */
1048#ifdef CONFIG_SMP 1049#ifdef CONFIG_SMP
@@ -1060,27 +1061,27 @@ static inline void idle_task_exit(void) {}
1060extern void sched_idle_next(void); 1061extern void sched_idle_next(void);
1061 1062
1062#ifdef CONFIG_RT_MUTEXES 1063#ifdef CONFIG_RT_MUTEXES
1063extern int rt_mutex_getprio(task_t *p); 1064extern int rt_mutex_getprio(struct task_struct *p);
1064extern void rt_mutex_setprio(task_t *p, int prio); 1065extern void rt_mutex_setprio(struct task_struct *p, int prio);
1065extern void rt_mutex_adjust_pi(task_t *p); 1066extern void rt_mutex_adjust_pi(struct task_struct *p);
1066#else 1067#else
1067static inline int rt_mutex_getprio(task_t *p) 1068static inline int rt_mutex_getprio(struct task_struct *p)
1068{ 1069{
1069 return p->normal_prio; 1070 return p->normal_prio;
1070} 1071}
1071# define rt_mutex_adjust_pi(p) do { } while (0) 1072# define rt_mutex_adjust_pi(p) do { } while (0)
1072#endif 1073#endif
1073 1074
1074extern void set_user_nice(task_t *p, long nice); 1075extern void set_user_nice(struct task_struct *p, long nice);
1075extern int task_prio(const task_t *p); 1076extern int task_prio(const struct task_struct *p);
1076extern int task_nice(const task_t *p); 1077extern int task_nice(const struct task_struct *p);
1077extern int can_nice(const task_t *p, const int nice); 1078extern int can_nice(const struct task_struct *p, const int nice);
1078extern int task_curr(const task_t *p); 1079extern int task_curr(const struct task_struct *p);
1079extern int idle_cpu(int cpu); 1080extern int idle_cpu(int cpu);
1080extern int sched_setscheduler(struct task_struct *, int, struct sched_param *); 1081extern int sched_setscheduler(struct task_struct *, int, struct sched_param *);
1081extern task_t *idle_task(int cpu); 1082extern struct task_struct *idle_task(int cpu);
1082extern task_t *curr_task(int cpu); 1083extern struct task_struct *curr_task(int cpu);
1083extern void set_curr_task(int cpu, task_t *p); 1084extern void set_curr_task(int cpu, struct task_struct *p);
1084 1085
1085void yield(void); 1086void yield(void);
1086 1087
@@ -1137,8 +1138,8 @@ extern void FASTCALL(wake_up_new_task(struct task_struct * tsk,
1137#else 1138#else
1138 static inline void kick_process(struct task_struct *tsk) { } 1139 static inline void kick_process(struct task_struct *tsk) { }
1139#endif 1140#endif
1140extern void FASTCALL(sched_fork(task_t * p, int clone_flags)); 1141extern void FASTCALL(sched_fork(struct task_struct * p, int clone_flags));
1141extern void FASTCALL(sched_exit(task_t * p)); 1142extern void FASTCALL(sched_exit(struct task_struct * p));
1142 1143
1143extern int in_group_p(gid_t); 1144extern int in_group_p(gid_t);
1144extern int in_egroup_p(gid_t); 1145extern int in_egroup_p(gid_t);
@@ -1243,17 +1244,17 @@ extern NORET_TYPE void do_group_exit(int);
1243extern void daemonize(const char *, ...); 1244extern void daemonize(const char *, ...);
1244extern int allow_signal(int); 1245extern int allow_signal(int);
1245extern int disallow_signal(int); 1246extern int disallow_signal(int);
1246extern task_t *child_reaper; 1247extern struct task_struct *child_reaper;
1247 1248
1248extern int do_execve(char *, char __user * __user *, char __user * __user *, struct pt_regs *); 1249extern int do_execve(char *, char __user * __user *, char __user * __user *, struct pt_regs *);
1249extern long do_fork(unsigned long, unsigned long, struct pt_regs *, unsigned long, int __user *, int __user *); 1250extern long do_fork(unsigned long, unsigned long, struct pt_regs *, unsigned long, int __user *, int __user *);
1250task_t *fork_idle(int); 1251struct task_struct *fork_idle(int);
1251 1252
1252extern void set_task_comm(struct task_struct *tsk, char *from); 1253extern void set_task_comm(struct task_struct *tsk, char *from);
1253extern void get_task_comm(char *to, struct task_struct *tsk); 1254extern void get_task_comm(char *to, struct task_struct *tsk);
1254 1255
1255#ifdef CONFIG_SMP 1256#ifdef CONFIG_SMP
1256extern void wait_task_inactive(task_t * p); 1257extern void wait_task_inactive(struct task_struct * p);
1257#else 1258#else
1258#define wait_task_inactive(p) do { } while (0) 1259#define wait_task_inactive(p) do { } while (0)
1259#endif 1260#endif
@@ -1279,13 +1280,13 @@ extern void wait_task_inactive(task_t * p);
1279/* de_thread depends on thread_group_leader not being a pid based check */ 1280/* de_thread depends on thread_group_leader not being a pid based check */
1280#define thread_group_leader(p) (p == p->group_leader) 1281#define thread_group_leader(p) (p == p->group_leader)
1281 1282
1282static inline task_t *next_thread(const task_t *p) 1283static inline struct task_struct *next_thread(const struct task_struct *p)
1283{ 1284{
1284 return list_entry(rcu_dereference(p->thread_group.next), 1285 return list_entry(rcu_dereference(p->thread_group.next),
1285 task_t, thread_group); 1286 struct task_struct, thread_group);
1286} 1287}
1287 1288
1288static inline int thread_group_empty(task_t *p) 1289static inline int thread_group_empty(struct task_struct *p)
1289{ 1290{
1290 return list_empty(&p->thread_group); 1291 return list_empty(&p->thread_group);
1291} 1292}