diff options
author | Ingo Molnar <mingo@elte.hu> | 2011-04-18 08:53:18 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2011-04-18 08:53:33 -0400 |
commit | 6ddafdaab3f809b110ada253d2f2d4910ebd3ac5 (patch) | |
tree | 366bb7513511a05b6e11ab89bfe3b2dbd1d62a03 /include/linux/sched.h | |
parent | 3905c54f2bd2c6f937f87307987ca072eabc3e7b (diff) | |
parent | bd8e7dded88a3e1c085c333f19ff31387616f71a (diff) |
Merge branch 'sched/locking' into sched/core
Merge reason: the rq locking changes are stable,
propagate them into the .40 queue.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r-- | include/linux/sched.h | 24 |
1 files changed, 15 insertions, 9 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index e43e5b0ab0b5..d9ca3aa511ff 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -360,7 +360,7 @@ extern signed long schedule_timeout_interruptible(signed long timeout); | |||
360 | extern signed long schedule_timeout_killable(signed long timeout); | 360 | extern signed long schedule_timeout_killable(signed long timeout); |
361 | extern signed long schedule_timeout_uninterruptible(signed long timeout); | 361 | extern signed long schedule_timeout_uninterruptible(signed long timeout); |
362 | asmlinkage void schedule(void); | 362 | asmlinkage void schedule(void); |
363 | extern int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner); | 363 | extern int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner); |
364 | 364 | ||
365 | struct nsproxy; | 365 | struct nsproxy; |
366 | struct user_namespace; | 366 | struct user_namespace; |
@@ -1038,8 +1038,12 @@ struct sched_domain; | |||
1038 | #define WF_FORK 0x02 /* child wakeup after fork */ | 1038 | #define WF_FORK 0x02 /* child wakeup after fork */ |
1039 | 1039 | ||
1040 | #define ENQUEUE_WAKEUP 1 | 1040 | #define ENQUEUE_WAKEUP 1 |
1041 | #define ENQUEUE_WAKING 2 | 1041 | #define ENQUEUE_HEAD 2 |
1042 | #define ENQUEUE_HEAD 4 | 1042 | #ifdef CONFIG_SMP |
1043 | #define ENQUEUE_WAKING 4 /* sched_class::task_waking was called */ | ||
1044 | #else | ||
1045 | #define ENQUEUE_WAKING 0 | ||
1046 | #endif | ||
1043 | 1047 | ||
1044 | #define DEQUEUE_SLEEP 1 | 1048 | #define DEQUEUE_SLEEP 1 |
1045 | 1049 | ||
@@ -1057,12 +1061,11 @@ struct sched_class { | |||
1057 | void (*put_prev_task) (struct rq *rq, struct task_struct *p); | 1061 | void (*put_prev_task) (struct rq *rq, struct task_struct *p); |
1058 | 1062 | ||
1059 | #ifdef CONFIG_SMP | 1063 | #ifdef CONFIG_SMP |
1060 | int (*select_task_rq)(struct rq *rq, struct task_struct *p, | 1064 | int (*select_task_rq)(struct task_struct *p, int sd_flag, int flags); |
1061 | int sd_flag, int flags); | ||
1062 | 1065 | ||
1063 | void (*pre_schedule) (struct rq *this_rq, struct task_struct *task); | 1066 | void (*pre_schedule) (struct rq *this_rq, struct task_struct *task); |
1064 | void (*post_schedule) (struct rq *this_rq); | 1067 | void (*post_schedule) (struct rq *this_rq); |
1065 | void (*task_waking) (struct rq *this_rq, struct task_struct *task); | 1068 | void (*task_waking) (struct task_struct *task); |
1066 | void (*task_woken) (struct rq *this_rq, struct task_struct *task); | 1069 | void (*task_woken) (struct rq *this_rq, struct task_struct *task); |
1067 | 1070 | ||
1068 | void (*set_cpus_allowed)(struct task_struct *p, | 1071 | void (*set_cpus_allowed)(struct task_struct *p, |
@@ -1190,10 +1193,10 @@ struct task_struct { | |||
1190 | int lock_depth; /* BKL lock depth */ | 1193 | int lock_depth; /* BKL lock depth */ |
1191 | 1194 | ||
1192 | #ifdef CONFIG_SMP | 1195 | #ifdef CONFIG_SMP |
1193 | #ifdef __ARCH_WANT_UNLOCKED_CTXSW | 1196 | struct task_struct *wake_entry; |
1194 | int oncpu; | 1197 | int on_cpu; |
1195 | #endif | ||
1196 | #endif | 1198 | #endif |
1199 | int on_rq; | ||
1197 | 1200 | ||
1198 | int prio, static_prio, normal_prio; | 1201 | int prio, static_prio, normal_prio; |
1199 | unsigned int rt_priority; | 1202 | unsigned int rt_priority; |
@@ -1261,6 +1264,7 @@ struct task_struct { | |||
1261 | 1264 | ||
1262 | /* Revert to default priority/policy when forking */ | 1265 | /* Revert to default priority/policy when forking */ |
1263 | unsigned sched_reset_on_fork:1; | 1266 | unsigned sched_reset_on_fork:1; |
1267 | unsigned sched_contributes_to_load:1; | ||
1264 | 1268 | ||
1265 | pid_t pid; | 1269 | pid_t pid; |
1266 | pid_t tgid; | 1270 | pid_t tgid; |
@@ -2179,8 +2183,10 @@ extern void set_task_comm(struct task_struct *tsk, char *from); | |||
2179 | extern char *get_task_comm(char *to, struct task_struct *tsk); | 2183 | extern char *get_task_comm(char *to, struct task_struct *tsk); |
2180 | 2184 | ||
2181 | #ifdef CONFIG_SMP | 2185 | #ifdef CONFIG_SMP |
2186 | void scheduler_ipi(void); | ||
2182 | extern unsigned long wait_task_inactive(struct task_struct *, long match_state); | 2187 | extern unsigned long wait_task_inactive(struct task_struct *, long match_state); |
2183 | #else | 2188 | #else |
2189 | static inline void scheduler_ipi(void) { } | ||
2184 | static inline unsigned long wait_task_inactive(struct task_struct *p, | 2190 | static inline unsigned long wait_task_inactive(struct task_struct *p, |
2185 | long match_state) | 2191 | long match_state) |
2186 | { | 2192 | { |