diff options
Diffstat (limited to 'include/linux/sched.h')
| -rw-r--r-- | include/linux/sched.h | 86 |
1 files changed, 53 insertions, 33 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index aaf723308ed4..1c876e27ff93 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
| @@ -184,11 +184,11 @@ extern unsigned long weighted_cpuload(const int cpu); | |||
| 184 | extern rwlock_t tasklist_lock; | 184 | extern rwlock_t tasklist_lock; |
| 185 | extern spinlock_t mmlist_lock; | 185 | extern spinlock_t mmlist_lock; |
| 186 | 186 | ||
| 187 | typedef struct task_struct task_t; | 187 | struct task_struct; |
| 188 | 188 | ||
| 189 | extern void sched_init(void); | 189 | extern void sched_init(void); |
| 190 | extern void sched_init_smp(void); | 190 | extern void sched_init_smp(void); |
| 191 | extern void init_idle(task_t *idle, int cpu); | 191 | extern void init_idle(struct task_struct *idle, int cpu); |
| 192 | 192 | ||
| 193 | extern cpumask_t nohz_cpu_mask; | 193 | extern cpumask_t nohz_cpu_mask; |
| 194 | 194 | ||
| @@ -383,7 +383,7 @@ struct signal_struct { | |||
| 383 | wait_queue_head_t wait_chldexit; /* for wait4() */ | 383 | wait_queue_head_t wait_chldexit; /* for wait4() */ |
| 384 | 384 | ||
| 385 | /* current thread group signal load-balancing target: */ | 385 | /* current thread group signal load-balancing target: */ |
| 386 | task_t *curr_target; | 386 | struct task_struct *curr_target; |
| 387 | 387 | ||
| 388 | /* shared signal handling: */ | 388 | /* shared signal handling: */ |
| 389 | struct sigpending shared_pending; | 389 | struct sigpending shared_pending; |
| @@ -534,7 +534,6 @@ extern struct user_struct *find_user(uid_t); | |||
| 534 | extern struct user_struct root_user; | 534 | extern struct user_struct root_user; |
| 535 | #define INIT_USER (&root_user) | 535 | #define INIT_USER (&root_user) |
| 536 | 536 | ||
| 537 | typedef struct prio_array prio_array_t; | ||
| 538 | struct backing_dev_info; | 537 | struct backing_dev_info; |
| 539 | struct reclaim_state; | 538 | struct reclaim_state; |
| 540 | 539 | ||
| @@ -699,7 +698,7 @@ extern int groups_search(struct group_info *group_info, gid_t grp); | |||
| 699 | ((gi)->blocks[(i)/NGROUPS_PER_BLOCK][(i)%NGROUPS_PER_BLOCK]) | 698 | ((gi)->blocks[(i)/NGROUPS_PER_BLOCK][(i)%NGROUPS_PER_BLOCK]) |
| 700 | 699 | ||
| 701 | #ifdef ARCH_HAS_PREFETCH_SWITCH_STACK | 700 | #ifdef ARCH_HAS_PREFETCH_SWITCH_STACK |
| 702 | extern void prefetch_stack(struct task_struct*); | 701 | extern void prefetch_stack(struct task_struct *t); |
| 703 | #else | 702 | #else |
| 704 | static inline void prefetch_stack(struct task_struct *t) { } | 703 | static inline void prefetch_stack(struct task_struct *t) { } |
| 705 | #endif | 704 | #endif |
| @@ -715,6 +714,8 @@ enum sleep_type { | |||
| 715 | SLEEP_INTERRUPTED, | 714 | SLEEP_INTERRUPTED, |
| 716 | }; | 715 | }; |
| 717 | 716 | ||
| 717 | struct prio_array; | ||
| 718 | |||
| 718 | struct task_struct { | 719 | struct task_struct { |
| 719 | volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */ | 720 | volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */ |
| 720 | struct thread_info *thread_info; | 721 | struct thread_info *thread_info; |
| @@ -732,7 +733,7 @@ struct task_struct { | |||
| 732 | int load_weight; /* for niceness load balancing purposes */ | 733 | int load_weight; /* for niceness load balancing purposes */ |
| 733 | int prio, static_prio, normal_prio; | 734 | int prio, static_prio, normal_prio; |
| 734 | struct list_head run_list; | 735 | struct list_head run_list; |
| 735 | prio_array_t *array; | 736 | struct prio_array *array; |
| 736 | 737 | ||
| 737 | unsigned short ioprio; | 738 | unsigned short ioprio; |
| 738 | unsigned int btrace_seq; | 739 | unsigned int btrace_seq; |
| @@ -865,16 +866,34 @@ struct task_struct { | |||
| 865 | struct plist_head pi_waiters; | 866 | struct plist_head pi_waiters; |
| 866 | /* Deadlock detection and priority inheritance handling */ | 867 | /* Deadlock detection and priority inheritance handling */ |
| 867 | struct rt_mutex_waiter *pi_blocked_on; | 868 | struct rt_mutex_waiter *pi_blocked_on; |
| 868 | # ifdef CONFIG_DEBUG_RT_MUTEXES | ||
| 869 | spinlock_t held_list_lock; | ||
| 870 | struct list_head held_list_head; | ||
| 871 | # endif | ||
| 872 | #endif | 869 | #endif |
| 873 | 870 | ||
| 874 | #ifdef CONFIG_DEBUG_MUTEXES | 871 | #ifdef CONFIG_DEBUG_MUTEXES |
| 875 | /* mutex deadlock detection */ | 872 | /* mutex deadlock detection */ |
| 876 | struct mutex_waiter *blocked_on; | 873 | struct mutex_waiter *blocked_on; |
| 877 | #endif | 874 | #endif |
| 875 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
| 876 | unsigned int irq_events; | ||
| 877 | int hardirqs_enabled; | ||
| 878 | unsigned long hardirq_enable_ip; | ||
| 879 | unsigned int hardirq_enable_event; | ||
| 880 | unsigned long hardirq_disable_ip; | ||
| 881 | unsigned int hardirq_disable_event; | ||
| 882 | int softirqs_enabled; | ||
| 883 | unsigned long softirq_disable_ip; | ||
| 884 | unsigned int softirq_disable_event; | ||
| 885 | unsigned long softirq_enable_ip; | ||
| 886 | unsigned int softirq_enable_event; | ||
| 887 | int hardirq_context; | ||
| 888 | int softirq_context; | ||
| 889 | #endif | ||
| 890 | #ifdef CONFIG_LOCKDEP | ||
| 891 | # define MAX_LOCK_DEPTH 30UL | ||
| 892 | u64 curr_chain_key; | ||
| 893 | int lockdep_depth; | ||
| 894 | struct held_lock held_locks[MAX_LOCK_DEPTH]; | ||
| 895 | unsigned int lockdep_recursion; | ||
| 896 | #endif | ||
| 878 | 897 | ||
| 879 | /* journalling filesystem info */ | 898 | /* journalling filesystem info */ |
| 880 | void *journal_info; | 899 | void *journal_info; |
| @@ -1013,9 +1032,9 @@ static inline void put_task_struct(struct task_struct *t) | |||
| 1013 | #define used_math() tsk_used_math(current) | 1032 | #define used_math() tsk_used_math(current) |
| 1014 | 1033 | ||
| 1015 | #ifdef CONFIG_SMP | 1034 | #ifdef CONFIG_SMP |
| 1016 | extern int set_cpus_allowed(task_t *p, cpumask_t new_mask); | 1035 | extern int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask); |
| 1017 | #else | 1036 | #else |
| 1018 | static inline int set_cpus_allowed(task_t *p, cpumask_t new_mask) | 1037 | static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask) |
| 1019 | { | 1038 | { |
| 1020 | if (!cpu_isset(0, new_mask)) | 1039 | if (!cpu_isset(0, new_mask)) |
| 1021 | return -EINVAL; | 1040 | return -EINVAL; |
| @@ -1024,7 +1043,8 @@ static inline int set_cpus_allowed(task_t *p, cpumask_t new_mask) | |||
| 1024 | #endif | 1043 | #endif |
| 1025 | 1044 | ||
| 1026 | extern unsigned long long sched_clock(void); | 1045 | extern unsigned long long sched_clock(void); |
| 1027 | extern unsigned long long current_sched_time(const task_t *current_task); | 1046 | extern unsigned long long |
| 1047 | current_sched_time(const struct task_struct *current_task); | ||
| 1028 | 1048 | ||
| 1029 | /* sched_exec is called by processes performing an exec */ | 1049 | /* sched_exec is called by processes performing an exec */ |
| 1030 | #ifdef CONFIG_SMP | 1050 | #ifdef CONFIG_SMP |
| @@ -1042,27 +1062,27 @@ static inline void idle_task_exit(void) {} | |||
| 1042 | extern void sched_idle_next(void); | 1062 | extern void sched_idle_next(void); |
| 1043 | 1063 | ||
| 1044 | #ifdef CONFIG_RT_MUTEXES | 1064 | #ifdef CONFIG_RT_MUTEXES |
| 1045 | extern int rt_mutex_getprio(task_t *p); | 1065 | extern int rt_mutex_getprio(struct task_struct *p); |
| 1046 | extern void rt_mutex_setprio(task_t *p, int prio); | 1066 | extern void rt_mutex_setprio(struct task_struct *p, int prio); |
| 1047 | extern void rt_mutex_adjust_pi(task_t *p); | 1067 | extern void rt_mutex_adjust_pi(struct task_struct *p); |
| 1048 | #else | 1068 | #else |
| 1049 | static inline int rt_mutex_getprio(task_t *p) | 1069 | static inline int rt_mutex_getprio(struct task_struct *p) |
| 1050 | { | 1070 | { |
| 1051 | return p->normal_prio; | 1071 | return p->normal_prio; |
| 1052 | } | 1072 | } |
| 1053 | # define rt_mutex_adjust_pi(p) do { } while (0) | 1073 | # define rt_mutex_adjust_pi(p) do { } while (0) |
| 1054 | #endif | 1074 | #endif |
| 1055 | 1075 | ||
| 1056 | extern void set_user_nice(task_t *p, long nice); | 1076 | extern void set_user_nice(struct task_struct *p, long nice); |
| 1057 | extern int task_prio(const task_t *p); | 1077 | extern int task_prio(const struct task_struct *p); |
| 1058 | extern int task_nice(const task_t *p); | 1078 | extern int task_nice(const struct task_struct *p); |
| 1059 | extern int can_nice(const task_t *p, const int nice); | 1079 | extern int can_nice(const struct task_struct *p, const int nice); |
| 1060 | extern int task_curr(const task_t *p); | 1080 | extern int task_curr(const struct task_struct *p); |
| 1061 | extern int idle_cpu(int cpu); | 1081 | extern int idle_cpu(int cpu); |
| 1062 | extern int sched_setscheduler(struct task_struct *, int, struct sched_param *); | 1082 | extern int sched_setscheduler(struct task_struct *, int, struct sched_param *); |
| 1063 | extern task_t *idle_task(int cpu); | 1083 | extern struct task_struct *idle_task(int cpu); |
| 1064 | extern task_t *curr_task(int cpu); | 1084 | extern struct task_struct *curr_task(int cpu); |
| 1065 | extern void set_curr_task(int cpu, task_t *p); | 1085 | extern void set_curr_task(int cpu, struct task_struct *p); |
| 1066 | 1086 | ||
| 1067 | void yield(void); | 1087 | void yield(void); |
| 1068 | 1088 | ||
| @@ -1119,8 +1139,8 @@ extern void FASTCALL(wake_up_new_task(struct task_struct * tsk, | |||
| 1119 | #else | 1139 | #else |
| 1120 | static inline void kick_process(struct task_struct *tsk) { } | 1140 | static inline void kick_process(struct task_struct *tsk) { } |
| 1121 | #endif | 1141 | #endif |
| 1122 | extern void FASTCALL(sched_fork(task_t * p, int clone_flags)); | 1142 | extern void FASTCALL(sched_fork(struct task_struct * p, int clone_flags)); |
| 1123 | extern void FASTCALL(sched_exit(task_t * p)); | 1143 | extern void FASTCALL(sched_exit(struct task_struct * p)); |
| 1124 | 1144 | ||
| 1125 | extern int in_group_p(gid_t); | 1145 | extern int in_group_p(gid_t); |
| 1126 | extern int in_egroup_p(gid_t); | 1146 | extern int in_egroup_p(gid_t); |
| @@ -1225,17 +1245,17 @@ extern NORET_TYPE void do_group_exit(int); | |||
| 1225 | extern void daemonize(const char *, ...); | 1245 | extern void daemonize(const char *, ...); |
| 1226 | extern int allow_signal(int); | 1246 | extern int allow_signal(int); |
| 1227 | extern int disallow_signal(int); | 1247 | extern int disallow_signal(int); |
| 1228 | extern task_t *child_reaper; | 1248 | extern struct task_struct *child_reaper; |
| 1229 | 1249 | ||
| 1230 | extern int do_execve(char *, char __user * __user *, char __user * __user *, struct pt_regs *); | 1250 | extern int do_execve(char *, char __user * __user *, char __user * __user *, struct pt_regs *); |
| 1231 | extern long do_fork(unsigned long, unsigned long, struct pt_regs *, unsigned long, int __user *, int __user *); | 1251 | extern long do_fork(unsigned long, unsigned long, struct pt_regs *, unsigned long, int __user *, int __user *); |
| 1232 | task_t *fork_idle(int); | 1252 | struct task_struct *fork_idle(int); |
| 1233 | 1253 | ||
| 1234 | extern void set_task_comm(struct task_struct *tsk, char *from); | 1254 | extern void set_task_comm(struct task_struct *tsk, char *from); |
| 1235 | extern void get_task_comm(char *to, struct task_struct *tsk); | 1255 | extern void get_task_comm(char *to, struct task_struct *tsk); |
| 1236 | 1256 | ||
| 1237 | #ifdef CONFIG_SMP | 1257 | #ifdef CONFIG_SMP |
| 1238 | extern void wait_task_inactive(task_t * p); | 1258 | extern void wait_task_inactive(struct task_struct * p); |
| 1239 | #else | 1259 | #else |
| 1240 | #define wait_task_inactive(p) do { } while (0) | 1260 | #define wait_task_inactive(p) do { } while (0) |
| 1241 | #endif | 1261 | #endif |
| @@ -1261,13 +1281,13 @@ extern void wait_task_inactive(task_t * p); | |||
| 1261 | /* de_thread depends on thread_group_leader not being a pid based check */ | 1281 | /* de_thread depends on thread_group_leader not being a pid based check */ |
| 1262 | #define thread_group_leader(p) (p == p->group_leader) | 1282 | #define thread_group_leader(p) (p == p->group_leader) |
| 1263 | 1283 | ||
| 1264 | static inline task_t *next_thread(const task_t *p) | 1284 | static inline struct task_struct *next_thread(const struct task_struct *p) |
| 1265 | { | 1285 | { |
| 1266 | return list_entry(rcu_dereference(p->thread_group.next), | 1286 | return list_entry(rcu_dereference(p->thread_group.next), |
| 1267 | task_t, thread_group); | 1287 | struct task_struct, thread_group); |
| 1268 | } | 1288 | } |
| 1269 | 1289 | ||
| 1270 | static inline int thread_group_empty(task_t *p) | 1290 | static inline int thread_group_empty(struct task_struct *p) |
| 1271 | { | 1291 | { |
| 1272 | return list_empty(&p->thread_group); | 1292 | return list_empty(&p->thread_group); |
| 1273 | } | 1293 | } |
