diff options
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r-- | include/linux/sched.h | 158 |
1 files changed, 103 insertions, 55 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index 8c216e057c94..b4c38bc8049c 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -68,7 +68,7 @@ struct sched_param { | |||
68 | #include <linux/smp.h> | 68 | #include <linux/smp.h> |
69 | #include <linux/sem.h> | 69 | #include <linux/sem.h> |
70 | #include <linux/signal.h> | 70 | #include <linux/signal.h> |
71 | #include <linux/fs_struct.h> | 71 | #include <linux/path.h> |
72 | #include <linux/compiler.h> | 72 | #include <linux/compiler.h> |
73 | #include <linux/completion.h> | 73 | #include <linux/completion.h> |
74 | #include <linux/pid.h> | 74 | #include <linux/pid.h> |
@@ -97,6 +97,7 @@ struct futex_pi_state; | |||
97 | struct robust_list_head; | 97 | struct robust_list_head; |
98 | struct bio; | 98 | struct bio; |
99 | struct bts_tracer; | 99 | struct bts_tracer; |
100 | struct fs_struct; | ||
100 | 101 | ||
101 | /* | 102 | /* |
102 | * List of flags we want to share for kernel threads, | 103 | * List of flags we want to share for kernel threads, |
@@ -137,6 +138,8 @@ extern unsigned long nr_uninterruptible(void); | |||
137 | extern unsigned long nr_active(void); | 138 | extern unsigned long nr_active(void); |
138 | extern unsigned long nr_iowait(void); | 139 | extern unsigned long nr_iowait(void); |
139 | 140 | ||
141 | extern unsigned long get_parent_ip(unsigned long addr); | ||
142 | |||
140 | struct seq_file; | 143 | struct seq_file; |
141 | struct cfs_rq; | 144 | struct cfs_rq; |
142 | struct task_group; | 145 | struct task_group; |
@@ -202,7 +205,8 @@ extern unsigned long long time_sync_thresh; | |||
202 | #define task_is_stopped_or_traced(task) \ | 205 | #define task_is_stopped_or_traced(task) \ |
203 | ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0) | 206 | ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0) |
204 | #define task_contributes_to_load(task) \ | 207 | #define task_contributes_to_load(task) \ |
205 | ((task->state & TASK_UNINTERRUPTIBLE) != 0) | 208 | ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \ |
209 | (task->flags & PF_FROZEN) == 0) | ||
206 | 210 | ||
207 | #define __set_task_state(tsk, state_value) \ | 211 | #define __set_task_state(tsk, state_value) \ |
208 | do { (tsk)->state = (state_value); } while (0) | 212 | do { (tsk)->state = (state_value); } while (0) |
@@ -297,17 +301,11 @@ extern int proc_dosoftlockup_thresh(struct ctl_table *table, int write, | |||
297 | struct file *filp, void __user *buffer, | 301 | struct file *filp, void __user *buffer, |
298 | size_t *lenp, loff_t *ppos); | 302 | size_t *lenp, loff_t *ppos); |
299 | extern unsigned int softlockup_panic; | 303 | extern unsigned int softlockup_panic; |
300 | extern unsigned long sysctl_hung_task_check_count; | ||
301 | extern unsigned long sysctl_hung_task_timeout_secs; | ||
302 | extern unsigned long sysctl_hung_task_warnings; | ||
303 | extern int softlockup_thresh; | 304 | extern int softlockup_thresh; |
304 | #else | 305 | #else |
305 | static inline void softlockup_tick(void) | 306 | static inline void softlockup_tick(void) |
306 | { | 307 | { |
307 | } | 308 | } |
308 | static inline void spawn_softlockup_task(void) | ||
309 | { | ||
310 | } | ||
311 | static inline void touch_softlockup_watchdog(void) | 309 | static inline void touch_softlockup_watchdog(void) |
312 | { | 310 | { |
313 | } | 311 | } |
@@ -316,6 +314,15 @@ static inline void touch_all_softlockup_watchdogs(void) | |||
316 | } | 314 | } |
317 | #endif | 315 | #endif |
318 | 316 | ||
317 | #ifdef CONFIG_DETECT_HUNG_TASK | ||
318 | extern unsigned int sysctl_hung_task_panic; | ||
319 | extern unsigned long sysctl_hung_task_check_count; | ||
320 | extern unsigned long sysctl_hung_task_timeout_secs; | ||
321 | extern unsigned long sysctl_hung_task_warnings; | ||
322 | extern int proc_dohung_task_timeout_secs(struct ctl_table *table, int write, | ||
323 | struct file *filp, void __user *buffer, | ||
324 | size_t *lenp, loff_t *ppos); | ||
325 | #endif | ||
319 | 326 | ||
320 | /* Attach to any functions which should be ignored in wchan output. */ | 327 | /* Attach to any functions which should be ignored in wchan output. */ |
321 | #define __sched __attribute__((__section__(".sched.text"))) | 328 | #define __sched __attribute__((__section__(".sched.text"))) |
@@ -331,7 +338,9 @@ extern signed long schedule_timeout(signed long timeout); | |||
331 | extern signed long schedule_timeout_interruptible(signed long timeout); | 338 | extern signed long schedule_timeout_interruptible(signed long timeout); |
332 | extern signed long schedule_timeout_killable(signed long timeout); | 339 | extern signed long schedule_timeout_killable(signed long timeout); |
333 | extern signed long schedule_timeout_uninterruptible(signed long timeout); | 340 | extern signed long schedule_timeout_uninterruptible(signed long timeout); |
341 | asmlinkage void __schedule(void); | ||
334 | asmlinkage void schedule(void); | 342 | asmlinkage void schedule(void); |
343 | extern int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner); | ||
335 | 344 | ||
336 | struct nsproxy; | 345 | struct nsproxy; |
337 | struct user_namespace; | 346 | struct user_namespace; |
@@ -389,8 +398,15 @@ extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long); | |||
389 | (mm)->hiwater_vm = (mm)->total_vm; \ | 398 | (mm)->hiwater_vm = (mm)->total_vm; \ |
390 | } while (0) | 399 | } while (0) |
391 | 400 | ||
392 | #define get_mm_hiwater_rss(mm) max((mm)->hiwater_rss, get_mm_rss(mm)) | 401 | static inline unsigned long get_mm_hiwater_rss(struct mm_struct *mm) |
393 | #define get_mm_hiwater_vm(mm) max((mm)->hiwater_vm, (mm)->total_vm) | 402 | { |
403 | return max(mm->hiwater_rss, get_mm_rss(mm)); | ||
404 | } | ||
405 | |||
406 | static inline unsigned long get_mm_hiwater_vm(struct mm_struct *mm) | ||
407 | { | ||
408 | return max(mm->hiwater_vm, mm->total_vm); | ||
409 | } | ||
394 | 410 | ||
395 | extern void set_dumpable(struct mm_struct *mm, int value); | 411 | extern void set_dumpable(struct mm_struct *mm, int value); |
396 | extern int get_dumpable(struct mm_struct *mm); | 412 | extern int get_dumpable(struct mm_struct *mm); |
@@ -538,25 +554,8 @@ struct signal_struct { | |||
538 | 554 | ||
539 | struct list_head cpu_timers[3]; | 555 | struct list_head cpu_timers[3]; |
540 | 556 | ||
541 | /* job control IDs */ | ||
542 | |||
543 | /* | ||
544 | * pgrp and session fields are deprecated. | ||
545 | * use the task_session_Xnr and task_pgrp_Xnr routines below | ||
546 | */ | ||
547 | |||
548 | union { | ||
549 | pid_t pgrp __deprecated; | ||
550 | pid_t __pgrp; | ||
551 | }; | ||
552 | |||
553 | struct pid *tty_old_pgrp; | 557 | struct pid *tty_old_pgrp; |
554 | 558 | ||
555 | union { | ||
556 | pid_t session __deprecated; | ||
557 | pid_t __session; | ||
558 | }; | ||
559 | |||
560 | /* boolean value for session group leader */ | 559 | /* boolean value for session group leader */ |
561 | int leader; | 560 | int leader; |
562 | 561 | ||
@@ -998,6 +997,7 @@ struct sched_class { | |||
998 | struct rq *busiest, struct sched_domain *sd, | 997 | struct rq *busiest, struct sched_domain *sd, |
999 | enum cpu_idle_type idle); | 998 | enum cpu_idle_type idle); |
1000 | void (*pre_schedule) (struct rq *this_rq, struct task_struct *task); | 999 | void (*pre_schedule) (struct rq *this_rq, struct task_struct *task); |
1000 | int (*needs_post_schedule) (struct rq *this_rq); | ||
1001 | void (*post_schedule) (struct rq *this_rq); | 1001 | void (*post_schedule) (struct rq *this_rq); |
1002 | void (*task_wake_up) (struct rq *this_rq, struct task_struct *task); | 1002 | void (*task_wake_up) (struct rq *this_rq, struct task_struct *task); |
1003 | 1003 | ||
@@ -1052,6 +1052,10 @@ struct sched_entity { | |||
1052 | u64 last_wakeup; | 1052 | u64 last_wakeup; |
1053 | u64 avg_overlap; | 1053 | u64 avg_overlap; |
1054 | 1054 | ||
1055 | u64 start_runtime; | ||
1056 | u64 avg_wakeup; | ||
1057 | u64 nr_migrations; | ||
1058 | |||
1055 | #ifdef CONFIG_SCHEDSTATS | 1059 | #ifdef CONFIG_SCHEDSTATS |
1056 | u64 wait_start; | 1060 | u64 wait_start; |
1057 | u64 wait_max; | 1061 | u64 wait_max; |
@@ -1067,7 +1071,6 @@ struct sched_entity { | |||
1067 | u64 exec_max; | 1071 | u64 exec_max; |
1068 | u64 slice_max; | 1072 | u64 slice_max; |
1069 | 1073 | ||
1070 | u64 nr_migrations; | ||
1071 | u64 nr_migrations_cold; | 1074 | u64 nr_migrations_cold; |
1072 | u64 nr_failed_migrations_affine; | 1075 | u64 nr_failed_migrations_affine; |
1073 | u64 nr_failed_migrations_running; | 1076 | u64 nr_failed_migrations_running; |
@@ -1164,6 +1167,7 @@ struct task_struct { | |||
1164 | #endif | 1167 | #endif |
1165 | 1168 | ||
1166 | struct list_head tasks; | 1169 | struct list_head tasks; |
1170 | struct plist_node pushable_tasks; | ||
1167 | 1171 | ||
1168 | struct mm_struct *mm, *active_mm; | 1172 | struct mm_struct *mm, *active_mm; |
1169 | 1173 | ||
@@ -1175,13 +1179,14 @@ struct task_struct { | |||
1175 | /* ??? */ | 1179 | /* ??? */ |
1176 | unsigned int personality; | 1180 | unsigned int personality; |
1177 | unsigned did_exec:1; | 1181 | unsigned did_exec:1; |
1182 | unsigned in_execve:1; /* Tell the LSMs that the process is doing an | ||
1183 | * execve */ | ||
1178 | pid_t pid; | 1184 | pid_t pid; |
1179 | pid_t tgid; | 1185 | pid_t tgid; |
1180 | 1186 | ||
1181 | #ifdef CONFIG_CC_STACKPROTECTOR | ||
1182 | /* Canary value for the -fstack-protector gcc feature */ | 1187 | /* Canary value for the -fstack-protector gcc feature */ |
1183 | unsigned long stack_canary; | 1188 | unsigned long stack_canary; |
1184 | #endif | 1189 | |
1185 | /* | 1190 | /* |
1186 | * pointers to (original) parent process, youngest child, younger sibling, | 1191 | * pointers to (original) parent process, youngest child, younger sibling, |
1187 | * older sibling, respectively. (p->father can be replaced with | 1192 | * older sibling, respectively. (p->father can be replaced with |
@@ -1254,9 +1259,8 @@ struct task_struct { | |||
1254 | /* ipc stuff */ | 1259 | /* ipc stuff */ |
1255 | struct sysv_sem sysvsem; | 1260 | struct sysv_sem sysvsem; |
1256 | #endif | 1261 | #endif |
1257 | #ifdef CONFIG_DETECT_SOFTLOCKUP | 1262 | #ifdef CONFIG_DETECT_HUNG_TASK |
1258 | /* hung task detection */ | 1263 | /* hung task detection */ |
1259 | unsigned long last_switch_timestamp; | ||
1260 | unsigned long last_switch_count; | 1264 | unsigned long last_switch_count; |
1261 | #endif | 1265 | #endif |
1262 | /* CPU-specific state of this task */ | 1266 | /* CPU-specific state of this task */ |
@@ -1293,6 +1297,11 @@ struct task_struct { | |||
1293 | /* Protection of (de-)allocation: mm, files, fs, tty, keyrings */ | 1297 | /* Protection of (de-)allocation: mm, files, fs, tty, keyrings */ |
1294 | spinlock_t alloc_lock; | 1298 | spinlock_t alloc_lock; |
1295 | 1299 | ||
1300 | #ifdef CONFIG_GENERIC_HARDIRQS | ||
1301 | /* IRQ handler threads */ | ||
1302 | struct irqaction *irqaction; | ||
1303 | #endif | ||
1304 | |||
1296 | /* Protection of the PI data structures: */ | 1305 | /* Protection of the PI data structures: */ |
1297 | spinlock_t pi_lock; | 1306 | spinlock_t pi_lock; |
1298 | 1307 | ||
@@ -1328,6 +1337,7 @@ struct task_struct { | |||
1328 | int lockdep_depth; | 1337 | int lockdep_depth; |
1329 | unsigned int lockdep_recursion; | 1338 | unsigned int lockdep_recursion; |
1330 | struct held_lock held_locks[MAX_LOCK_DEPTH]; | 1339 | struct held_lock held_locks[MAX_LOCK_DEPTH]; |
1340 | gfp_t lockdep_reclaim_gfp; | ||
1331 | #endif | 1341 | #endif |
1332 | 1342 | ||
1333 | /* journalling filesystem info */ | 1343 | /* journalling filesystem info */ |
@@ -1405,6 +1415,8 @@ struct task_struct { | |||
1405 | int curr_ret_stack; | 1415 | int curr_ret_stack; |
1406 | /* Stack of return addresses for return function tracing */ | 1416 | /* Stack of return addresses for return function tracing */ |
1407 | struct ftrace_ret_stack *ret_stack; | 1417 | struct ftrace_ret_stack *ret_stack; |
1418 | /* time stamp for last schedule */ | ||
1419 | unsigned long long ftrace_timestamp; | ||
1408 | /* | 1420 | /* |
1409 | * Number of functions that haven't been traced | 1421 | * Number of functions that haven't been traced |
1410 | * because of depth overrun. | 1422 | * because of depth overrun. |
@@ -1419,6 +1431,9 @@ struct task_struct { | |||
1419 | #endif | 1431 | #endif |
1420 | }; | 1432 | }; |
1421 | 1433 | ||
1434 | /* Future-safe accessor for struct task_struct's cpus_allowed. */ | ||
1435 | #define tsk_cpumask(tsk) (&(tsk)->cpus_allowed) | ||
1436 | |||
1422 | /* | 1437 | /* |
1423 | * Priority of a process goes from 0..MAX_PRIO-1, valid RT | 1438 | * Priority of a process goes from 0..MAX_PRIO-1, valid RT |
1424 | * priority is 0..MAX_RT_PRIO-1, and SCHED_NORMAL/SCHED_BATCH | 1439 | * priority is 0..MAX_RT_PRIO-1, and SCHED_NORMAL/SCHED_BATCH |
@@ -1450,16 +1465,6 @@ static inline int rt_task(struct task_struct *p) | |||
1450 | return rt_prio(p->prio); | 1465 | return rt_prio(p->prio); |
1451 | } | 1466 | } |
1452 | 1467 | ||
1453 | static inline void set_task_session(struct task_struct *tsk, pid_t session) | ||
1454 | { | ||
1455 | tsk->signal->__session = session; | ||
1456 | } | ||
1457 | |||
1458 | static inline void set_task_pgrp(struct task_struct *tsk, pid_t pgrp) | ||
1459 | { | ||
1460 | tsk->signal->__pgrp = pgrp; | ||
1461 | } | ||
1462 | |||
1463 | static inline struct pid *task_pid(struct task_struct *task) | 1468 | static inline struct pid *task_pid(struct task_struct *task) |
1464 | { | 1469 | { |
1465 | return task->pids[PIDTYPE_PID].pid; | 1470 | return task->pids[PIDTYPE_PID].pid; |
@@ -1470,6 +1475,11 @@ static inline struct pid *task_tgid(struct task_struct *task) | |||
1470 | return task->group_leader->pids[PIDTYPE_PID].pid; | 1475 | return task->group_leader->pids[PIDTYPE_PID].pid; |
1471 | } | 1476 | } |
1472 | 1477 | ||
1478 | /* | ||
1479 | * Without tasklist or rcu lock it is not safe to dereference | ||
1480 | * the result of task_pgrp/task_session even if task == current, | ||
1481 | * we can race with another thread doing sys_setsid/sys_setpgid. | ||
1482 | */ | ||
1473 | static inline struct pid *task_pgrp(struct task_struct *task) | 1483 | static inline struct pid *task_pgrp(struct task_struct *task) |
1474 | { | 1484 | { |
1475 | return task->group_leader->pids[PIDTYPE_PGID].pid; | 1485 | return task->group_leader->pids[PIDTYPE_PGID].pid; |
@@ -1495,17 +1505,23 @@ struct pid_namespace; | |||
1495 | * | 1505 | * |
1496 | * see also pid_nr() etc in include/linux/pid.h | 1506 | * see also pid_nr() etc in include/linux/pid.h |
1497 | */ | 1507 | */ |
1508 | pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type, | ||
1509 | struct pid_namespace *ns); | ||
1498 | 1510 | ||
1499 | static inline pid_t task_pid_nr(struct task_struct *tsk) | 1511 | static inline pid_t task_pid_nr(struct task_struct *tsk) |
1500 | { | 1512 | { |
1501 | return tsk->pid; | 1513 | return tsk->pid; |
1502 | } | 1514 | } |
1503 | 1515 | ||
1504 | pid_t task_pid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns); | 1516 | static inline pid_t task_pid_nr_ns(struct task_struct *tsk, |
1517 | struct pid_namespace *ns) | ||
1518 | { | ||
1519 | return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns); | ||
1520 | } | ||
1505 | 1521 | ||
1506 | static inline pid_t task_pid_vnr(struct task_struct *tsk) | 1522 | static inline pid_t task_pid_vnr(struct task_struct *tsk) |
1507 | { | 1523 | { |
1508 | return pid_vnr(task_pid(tsk)); | 1524 | return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL); |
1509 | } | 1525 | } |
1510 | 1526 | ||
1511 | 1527 | ||
@@ -1522,31 +1538,34 @@ static inline pid_t task_tgid_vnr(struct task_struct *tsk) | |||
1522 | } | 1538 | } |
1523 | 1539 | ||
1524 | 1540 | ||
1525 | static inline pid_t task_pgrp_nr(struct task_struct *tsk) | 1541 | static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk, |
1542 | struct pid_namespace *ns) | ||
1526 | { | 1543 | { |
1527 | return tsk->signal->__pgrp; | 1544 | return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns); |
1528 | } | 1545 | } |
1529 | 1546 | ||
1530 | pid_t task_pgrp_nr_ns(struct task_struct *tsk, struct pid_namespace *ns); | ||
1531 | |||
1532 | static inline pid_t task_pgrp_vnr(struct task_struct *tsk) | 1547 | static inline pid_t task_pgrp_vnr(struct task_struct *tsk) |
1533 | { | 1548 | { |
1534 | return pid_vnr(task_pgrp(tsk)); | 1549 | return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL); |
1535 | } | 1550 | } |
1536 | 1551 | ||
1537 | 1552 | ||
1538 | static inline pid_t task_session_nr(struct task_struct *tsk) | 1553 | static inline pid_t task_session_nr_ns(struct task_struct *tsk, |
1554 | struct pid_namespace *ns) | ||
1539 | { | 1555 | { |
1540 | return tsk->signal->__session; | 1556 | return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns); |
1541 | } | 1557 | } |
1542 | 1558 | ||
1543 | pid_t task_session_nr_ns(struct task_struct *tsk, struct pid_namespace *ns); | ||
1544 | |||
1545 | static inline pid_t task_session_vnr(struct task_struct *tsk) | 1559 | static inline pid_t task_session_vnr(struct task_struct *tsk) |
1546 | { | 1560 | { |
1547 | return pid_vnr(task_session(tsk)); | 1561 | return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL); |
1548 | } | 1562 | } |
1549 | 1563 | ||
1564 | /* obsolete, do not use */ | ||
1565 | static inline pid_t task_pgrp_nr(struct task_struct *tsk) | ||
1566 | { | ||
1567 | return task_pgrp_nr_ns(tsk, &init_pid_ns); | ||
1568 | } | ||
1550 | 1569 | ||
1551 | /** | 1570 | /** |
1552 | * pid_alive - check that a task structure is not stale | 1571 | * pid_alive - check that a task structure is not stale |
@@ -1670,6 +1689,16 @@ static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask) | |||
1670 | return set_cpus_allowed_ptr(p, &new_mask); | 1689 | return set_cpus_allowed_ptr(p, &new_mask); |
1671 | } | 1690 | } |
1672 | 1691 | ||
1692 | /* | ||
1693 | * Architectures can set this to 1 if they have specified | ||
1694 | * CONFIG_HAVE_UNSTABLE_SCHED_CLOCK in their arch Kconfig, | ||
1695 | * but then during bootup it turns out that sched_clock() | ||
1696 | * is reliable after all: | ||
1697 | */ | ||
1698 | #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK | ||
1699 | extern int sched_clock_stable; | ||
1700 | #endif | ||
1701 | |||
1673 | extern unsigned long long sched_clock(void); | 1702 | extern unsigned long long sched_clock(void); |
1674 | 1703 | ||
1675 | extern void sched_clock_init(void); | 1704 | extern void sched_clock_init(void); |
@@ -1946,7 +1975,8 @@ extern void mm_release(struct task_struct *, struct mm_struct *); | |||
1946 | /* Allocate a new mm structure and copy contents from tsk->mm */ | 1975 | /* Allocate a new mm structure and copy contents from tsk->mm */ |
1947 | extern struct mm_struct *dup_mm(struct task_struct *tsk); | 1976 | extern struct mm_struct *dup_mm(struct task_struct *tsk); |
1948 | 1977 | ||
1949 | extern int copy_thread(int, unsigned long, unsigned long, unsigned long, struct task_struct *, struct pt_regs *); | 1978 | extern int copy_thread(unsigned long, unsigned long, unsigned long, |
1979 | struct task_struct *, struct pt_regs *); | ||
1950 | extern void flush_thread(void); | 1980 | extern void flush_thread(void); |
1951 | extern void exit_thread(void); | 1981 | extern void exit_thread(void); |
1952 | 1982 | ||
@@ -2031,6 +2061,11 @@ static inline int thread_group_empty(struct task_struct *p) | |||
2031 | #define delay_group_leader(p) \ | 2061 | #define delay_group_leader(p) \ |
2032 | (thread_group_leader(p) && !thread_group_empty(p)) | 2062 | (thread_group_leader(p) && !thread_group_empty(p)) |
2033 | 2063 | ||
2064 | static inline int task_detached(struct task_struct *p) | ||
2065 | { | ||
2066 | return p->exit_signal == -1; | ||
2067 | } | ||
2068 | |||
2034 | /* | 2069 | /* |
2035 | * Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring | 2070 | * Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring |
2036 | * subscriptions and synchronises with wait4(). Also used in procfs. Also | 2071 | * subscriptions and synchronises with wait4(). Also used in procfs. Also |
@@ -2087,6 +2122,19 @@ static inline int object_is_on_stack(void *obj) | |||
2087 | 2122 | ||
2088 | extern void thread_info_cache_init(void); | 2123 | extern void thread_info_cache_init(void); |
2089 | 2124 | ||
2125 | #ifdef CONFIG_DEBUG_STACK_USAGE | ||
2126 | static inline unsigned long stack_not_used(struct task_struct *p) | ||
2127 | { | ||
2128 | unsigned long *n = end_of_stack(p); | ||
2129 | |||
2130 | do { /* Skip over canary */ | ||
2131 | n++; | ||
2132 | } while (!*n); | ||
2133 | |||
2134 | return (unsigned long)n - (unsigned long)end_of_stack(p); | ||
2135 | } | ||
2136 | #endif | ||
2137 | |||
2090 | /* set thread flags in other task's structures | 2138 | /* set thread flags in other task's structures |
2091 | * - see asm/thread_info.h for TIF_xxxx flags available | 2139 | * - see asm/thread_info.h for TIF_xxxx flags available |
2092 | */ | 2140 | */ |