diff options
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r-- | include/linux/sched.h | 119 |
1 files changed, 99 insertions, 20 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index 49e617fa0f66..b0ad6f30679e 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -107,13 +107,25 @@ extern unsigned long nr_iowait(void); | |||
107 | 107 | ||
108 | #include <asm/processor.h> | 108 | #include <asm/processor.h> |
109 | 109 | ||
110 | /* | ||
111 | * Task state bitmask. NOTE! These bits are also | ||
112 | * encoded in fs/proc/array.c: get_task_state(). | ||
113 | * | ||
114 | * We have two separate sets of flags: task->state | ||
115 | * is about runnability, while task->exit_state are | ||
116 | * about the task exiting. Confusing, but this way | ||
117 | * modifying one set can't modify the other one by | ||
118 | * mistake. | ||
119 | */ | ||
110 | #define TASK_RUNNING 0 | 120 | #define TASK_RUNNING 0 |
111 | #define TASK_INTERRUPTIBLE 1 | 121 | #define TASK_INTERRUPTIBLE 1 |
112 | #define TASK_UNINTERRUPTIBLE 2 | 122 | #define TASK_UNINTERRUPTIBLE 2 |
113 | #define TASK_STOPPED 4 | 123 | #define TASK_STOPPED 4 |
114 | #define TASK_TRACED 8 | 124 | #define TASK_TRACED 8 |
125 | /* in tsk->exit_state */ | ||
115 | #define EXIT_ZOMBIE 16 | 126 | #define EXIT_ZOMBIE 16 |
116 | #define EXIT_DEAD 32 | 127 | #define EXIT_DEAD 32 |
128 | /* in tsk->state again */ | ||
117 | #define TASK_NONINTERACTIVE 64 | 129 | #define TASK_NONINTERACTIVE 64 |
118 | 130 | ||
119 | #define __set_task_state(tsk, state_value) \ | 131 | #define __set_task_state(tsk, state_value) \ |
@@ -237,6 +249,36 @@ arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr, | |||
237 | extern void arch_unmap_area(struct mm_struct *, unsigned long); | 249 | extern void arch_unmap_area(struct mm_struct *, unsigned long); |
238 | extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long); | 250 | extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long); |
239 | 251 | ||
252 | #if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS | ||
253 | /* | ||
254 | * The mm counters are not protected by its page_table_lock, | ||
255 | * so must be incremented atomically. | ||
256 | */ | ||
257 | #ifdef ATOMIC64_INIT | ||
258 | #define set_mm_counter(mm, member, value) atomic64_set(&(mm)->_##member, value) | ||
259 | #define get_mm_counter(mm, member) ((unsigned long)atomic64_read(&(mm)->_##member)) | ||
260 | #define add_mm_counter(mm, member, value) atomic64_add(value, &(mm)->_##member) | ||
261 | #define inc_mm_counter(mm, member) atomic64_inc(&(mm)->_##member) | ||
262 | #define dec_mm_counter(mm, member) atomic64_dec(&(mm)->_##member) | ||
263 | typedef atomic64_t mm_counter_t; | ||
264 | #else /* !ATOMIC64_INIT */ | ||
265 | /* | ||
266 | * The counters wrap back to 0 at 2^32 * PAGE_SIZE, | ||
267 | * that is, at 16TB if using 4kB page size. | ||
268 | */ | ||
269 | #define set_mm_counter(mm, member, value) atomic_set(&(mm)->_##member, value) | ||
270 | #define get_mm_counter(mm, member) ((unsigned long)atomic_read(&(mm)->_##member)) | ||
271 | #define add_mm_counter(mm, member, value) atomic_add(value, &(mm)->_##member) | ||
272 | #define inc_mm_counter(mm, member) atomic_inc(&(mm)->_##member) | ||
273 | #define dec_mm_counter(mm, member) atomic_dec(&(mm)->_##member) | ||
274 | typedef atomic_t mm_counter_t; | ||
275 | #endif /* !ATOMIC64_INIT */ | ||
276 | |||
277 | #else /* NR_CPUS < CONFIG_SPLIT_PTLOCK_CPUS */ | ||
278 | /* | ||
279 | * The mm counters are protected by its page_table_lock, | ||
280 | * so can be incremented directly. | ||
281 | */ | ||
240 | #define set_mm_counter(mm, member, value) (mm)->_##member = (value) | 282 | #define set_mm_counter(mm, member, value) (mm)->_##member = (value) |
241 | #define get_mm_counter(mm, member) ((mm)->_##member) | 283 | #define get_mm_counter(mm, member) ((mm)->_##member) |
242 | #define add_mm_counter(mm, member, value) (mm)->_##member += (value) | 284 | #define add_mm_counter(mm, member, value) (mm)->_##member += (value) |
@@ -244,6 +286,20 @@ extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long); | |||
244 | #define dec_mm_counter(mm, member) (mm)->_##member-- | 286 | #define dec_mm_counter(mm, member) (mm)->_##member-- |
245 | typedef unsigned long mm_counter_t; | 287 | typedef unsigned long mm_counter_t; |
246 | 288 | ||
289 | #endif /* NR_CPUS < CONFIG_SPLIT_PTLOCK_CPUS */ | ||
290 | |||
291 | #define get_mm_rss(mm) \ | ||
292 | (get_mm_counter(mm, file_rss) + get_mm_counter(mm, anon_rss)) | ||
293 | #define update_hiwater_rss(mm) do { \ | ||
294 | unsigned long _rss = get_mm_rss(mm); \ | ||
295 | if ((mm)->hiwater_rss < _rss) \ | ||
296 | (mm)->hiwater_rss = _rss; \ | ||
297 | } while (0) | ||
298 | #define update_hiwater_vm(mm) do { \ | ||
299 | if ((mm)->hiwater_vm < (mm)->total_vm) \ | ||
300 | (mm)->hiwater_vm = (mm)->total_vm; \ | ||
301 | } while (0) | ||
302 | |||
247 | struct mm_struct { | 303 | struct mm_struct { |
248 | struct vm_area_struct * mmap; /* list of VMAs */ | 304 | struct vm_area_struct * mmap; /* list of VMAs */ |
249 | struct rb_root mm_rb; | 305 | struct rb_root mm_rb; |
@@ -267,15 +323,20 @@ struct mm_struct { | |||
267 | * by mmlist_lock | 323 | * by mmlist_lock |
268 | */ | 324 | */ |
269 | 325 | ||
326 | /* Special counters, in some configurations protected by the | ||
327 | * page_table_lock, in other configurations by being atomic. | ||
328 | */ | ||
329 | mm_counter_t _file_rss; | ||
330 | mm_counter_t _anon_rss; | ||
331 | |||
332 | unsigned long hiwater_rss; /* High-watermark of RSS usage */ | ||
333 | unsigned long hiwater_vm; /* High-water virtual memory usage */ | ||
334 | |||
335 | unsigned long total_vm, locked_vm, shared_vm, exec_vm; | ||
336 | unsigned long stack_vm, reserved_vm, def_flags, nr_ptes; | ||
270 | unsigned long start_code, end_code, start_data, end_data; | 337 | unsigned long start_code, end_code, start_data, end_data; |
271 | unsigned long start_brk, brk, start_stack; | 338 | unsigned long start_brk, brk, start_stack; |
272 | unsigned long arg_start, arg_end, env_start, env_end; | 339 | unsigned long arg_start, arg_end, env_start, env_end; |
273 | unsigned long total_vm, locked_vm, shared_vm; | ||
274 | unsigned long exec_vm, stack_vm, reserved_vm, def_flags, nr_ptes; | ||
275 | |||
276 | /* Special counters protected by the page_table_lock */ | ||
277 | mm_counter_t _rss; | ||
278 | mm_counter_t _anon_rss; | ||
279 | 340 | ||
280 | unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */ | 341 | unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */ |
281 | 342 | ||
@@ -296,11 +357,6 @@ struct mm_struct { | |||
296 | /* aio bits */ | 357 | /* aio bits */ |
297 | rwlock_t ioctx_list_lock; | 358 | rwlock_t ioctx_list_lock; |
298 | struct kioctx *ioctx_list; | 359 | struct kioctx *ioctx_list; |
299 | |||
300 | struct kioctx default_kioctx; | ||
301 | |||
302 | unsigned long hiwater_rss; /* High-water RSS usage */ | ||
303 | unsigned long hiwater_vm; /* High-water virtual memory usage */ | ||
304 | }; | 360 | }; |
305 | 361 | ||
306 | struct sighand_struct { | 362 | struct sighand_struct { |
@@ -883,7 +939,7 @@ extern int set_cpus_allowed(task_t *p, cpumask_t new_mask); | |||
883 | #else | 939 | #else |
884 | static inline int set_cpus_allowed(task_t *p, cpumask_t new_mask) | 940 | static inline int set_cpus_allowed(task_t *p, cpumask_t new_mask) |
885 | { | 941 | { |
886 | if (!cpus_intersects(new_mask, cpu_online_map)) | 942 | if (!cpu_isset(0, new_mask)) |
887 | return -EINVAL; | 943 | return -EINVAL; |
888 | return 0; | 944 | return 0; |
889 | } | 945 | } |
@@ -1006,6 +1062,7 @@ extern int force_sig_info(int, struct siginfo *, struct task_struct *); | |||
1006 | extern int __kill_pg_info(int sig, struct siginfo *info, pid_t pgrp); | 1062 | extern int __kill_pg_info(int sig, struct siginfo *info, pid_t pgrp); |
1007 | extern int kill_pg_info(int, struct siginfo *, pid_t); | 1063 | extern int kill_pg_info(int, struct siginfo *, pid_t); |
1008 | extern int kill_proc_info(int, struct siginfo *, pid_t); | 1064 | extern int kill_proc_info(int, struct siginfo *, pid_t); |
1065 | extern int kill_proc_info_as_uid(int, struct siginfo *, pid_t, uid_t, uid_t); | ||
1009 | extern void do_notify_parent(struct task_struct *, int); | 1066 | extern void do_notify_parent(struct task_struct *, int); |
1010 | extern void force_sig(int, struct task_struct *); | 1067 | extern void force_sig(int, struct task_struct *); |
1011 | extern void force_sig_specific(int, struct task_struct *); | 1068 | extern void force_sig_specific(int, struct task_struct *); |
@@ -1026,6 +1083,11 @@ extern int do_sigaltstack(const stack_t __user *, stack_t __user *, unsigned lon | |||
1026 | #define SEND_SIG_PRIV ((struct siginfo *) 1) | 1083 | #define SEND_SIG_PRIV ((struct siginfo *) 1) |
1027 | #define SEND_SIG_FORCED ((struct siginfo *) 2) | 1084 | #define SEND_SIG_FORCED ((struct siginfo *) 2) |
1028 | 1085 | ||
1086 | static inline int is_si_special(const struct siginfo *info) | ||
1087 | { | ||
1088 | return info <= SEND_SIG_FORCED; | ||
1089 | } | ||
1090 | |||
1029 | /* True if we are on the alternate signal stack. */ | 1091 | /* True if we are on the alternate signal stack. */ |
1030 | 1092 | ||
1031 | static inline int on_sig_stack(unsigned long sp) | 1093 | static inline int on_sig_stack(unsigned long sp) |
@@ -1153,7 +1215,7 @@ extern void unhash_process(struct task_struct *p); | |||
1153 | /* | 1215 | /* |
1154 | * Protects ->fs, ->files, ->mm, ->ptrace, ->group_info, ->comm, keyring | 1216 | * Protects ->fs, ->files, ->mm, ->ptrace, ->group_info, ->comm, keyring |
1155 | * subscriptions and synchronises with wait4(). Also used in procfs. Also | 1217 | * subscriptions and synchronises with wait4(). Also used in procfs. Also |
1156 | * pins the final release of task.io_context. | 1218 | * pins the final release of task.io_context. Also protects ->cpuset. |
1157 | * | 1219 | * |
1158 | * Nests both inside and outside of read_lock(&tasklist_lock). | 1220 | * Nests both inside and outside of read_lock(&tasklist_lock). |
1159 | * It must not be nested with write_lock_irq(&tasklist_lock), | 1221 | * It must not be nested with write_lock_irq(&tasklist_lock), |
@@ -1169,32 +1231,49 @@ static inline void task_unlock(struct task_struct *p) | |||
1169 | spin_unlock(&p->alloc_lock); | 1231 | spin_unlock(&p->alloc_lock); |
1170 | } | 1232 | } |
1171 | 1233 | ||
1234 | #ifndef __HAVE_THREAD_FUNCTIONS | ||
1235 | |||
1236 | #define task_thread_info(task) (task)->thread_info | ||
1237 | |||
1238 | static inline void setup_thread_stack(struct task_struct *p, struct task_struct *org) | ||
1239 | { | ||
1240 | *task_thread_info(p) = *task_thread_info(org); | ||
1241 | task_thread_info(p)->task = p; | ||
1242 | } | ||
1243 | |||
1244 | static inline unsigned long *end_of_stack(struct task_struct *p) | ||
1245 | { | ||
1246 | return (unsigned long *)(p->thread_info + 1); | ||
1247 | } | ||
1248 | |||
1249 | #endif | ||
1250 | |||
1172 | /* set thread flags in other task's structures | 1251 | /* set thread flags in other task's structures |
1173 | * - see asm/thread_info.h for TIF_xxxx flags available | 1252 | * - see asm/thread_info.h for TIF_xxxx flags available |
1174 | */ | 1253 | */ |
1175 | static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag) | 1254 | static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag) |
1176 | { | 1255 | { |
1177 | set_ti_thread_flag(tsk->thread_info,flag); | 1256 | set_ti_thread_flag(task_thread_info(tsk), flag); |
1178 | } | 1257 | } |
1179 | 1258 | ||
1180 | static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag) | 1259 | static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag) |
1181 | { | 1260 | { |
1182 | clear_ti_thread_flag(tsk->thread_info,flag); | 1261 | clear_ti_thread_flag(task_thread_info(tsk), flag); |
1183 | } | 1262 | } |
1184 | 1263 | ||
1185 | static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag) | 1264 | static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag) |
1186 | { | 1265 | { |
1187 | return test_and_set_ti_thread_flag(tsk->thread_info,flag); | 1266 | return test_and_set_ti_thread_flag(task_thread_info(tsk), flag); |
1188 | } | 1267 | } |
1189 | 1268 | ||
1190 | static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag) | 1269 | static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag) |
1191 | { | 1270 | { |
1192 | return test_and_clear_ti_thread_flag(tsk->thread_info,flag); | 1271 | return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag); |
1193 | } | 1272 | } |
1194 | 1273 | ||
1195 | static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag) | 1274 | static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag) |
1196 | { | 1275 | { |
1197 | return test_ti_thread_flag(tsk->thread_info,flag); | 1276 | return test_ti_thread_flag(task_thread_info(tsk), flag); |
1198 | } | 1277 | } |
1199 | 1278 | ||
1200 | static inline void set_tsk_need_resched(struct task_struct *tsk) | 1279 | static inline void set_tsk_need_resched(struct task_struct *tsk) |
@@ -1265,12 +1344,12 @@ extern void signal_wake_up(struct task_struct *t, int resume_stopped); | |||
1265 | 1344 | ||
1266 | static inline unsigned int task_cpu(const struct task_struct *p) | 1345 | static inline unsigned int task_cpu(const struct task_struct *p) |
1267 | { | 1346 | { |
1268 | return p->thread_info->cpu; | 1347 | return task_thread_info(p)->cpu; |
1269 | } | 1348 | } |
1270 | 1349 | ||
1271 | static inline void set_task_cpu(struct task_struct *p, unsigned int cpu) | 1350 | static inline void set_task_cpu(struct task_struct *p, unsigned int cpu) |
1272 | { | 1351 | { |
1273 | p->thread_info->cpu = cpu; | 1352 | task_thread_info(p)->cpu = cpu; |
1274 | } | 1353 | } |
1275 | 1354 | ||
1276 | #else | 1355 | #else |