aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/sched.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r--include/linux/sched.h125
1 files changed, 79 insertions, 46 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 011db2f4c94c..9da5aa0771ef 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -68,7 +68,7 @@ struct sched_param {
68#include <linux/smp.h> 68#include <linux/smp.h>
69#include <linux/sem.h> 69#include <linux/sem.h>
70#include <linux/signal.h> 70#include <linux/signal.h>
71#include <linux/fs_struct.h> 71#include <linux/path.h>
72#include <linux/compiler.h> 72#include <linux/compiler.h>
73#include <linux/completion.h> 73#include <linux/completion.h>
74#include <linux/pid.h> 74#include <linux/pid.h>
@@ -97,6 +97,7 @@ struct futex_pi_state;
97struct robust_list_head; 97struct robust_list_head;
98struct bio; 98struct bio;
99struct bts_tracer; 99struct bts_tracer;
100struct fs_struct;
100 101
101/* 102/*
102 * List of flags we want to share for kernel threads, 103 * List of flags we want to share for kernel threads,
@@ -331,7 +332,9 @@ extern signed long schedule_timeout(signed long timeout);
331extern signed long schedule_timeout_interruptible(signed long timeout); 332extern signed long schedule_timeout_interruptible(signed long timeout);
332extern signed long schedule_timeout_killable(signed long timeout); 333extern signed long schedule_timeout_killable(signed long timeout);
333extern signed long schedule_timeout_uninterruptible(signed long timeout); 334extern signed long schedule_timeout_uninterruptible(signed long timeout);
335asmlinkage void __schedule(void);
334asmlinkage void schedule(void); 336asmlinkage void schedule(void);
337extern int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner);
335 338
336struct nsproxy; 339struct nsproxy;
337struct user_namespace; 340struct user_namespace;
@@ -389,8 +392,15 @@ extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long);
389 (mm)->hiwater_vm = (mm)->total_vm; \ 392 (mm)->hiwater_vm = (mm)->total_vm; \
390} while (0) 393} while (0)
391 394
392#define get_mm_hiwater_rss(mm) max((mm)->hiwater_rss, get_mm_rss(mm)) 395static inline unsigned long get_mm_hiwater_rss(struct mm_struct *mm)
393#define get_mm_hiwater_vm(mm) max((mm)->hiwater_vm, (mm)->total_vm) 396{
397 return max(mm->hiwater_rss, get_mm_rss(mm));
398}
399
400static inline unsigned long get_mm_hiwater_vm(struct mm_struct *mm)
401{
402 return max(mm->hiwater_vm, mm->total_vm);
403}
394 404
395extern void set_dumpable(struct mm_struct *mm, int value); 405extern void set_dumpable(struct mm_struct *mm, int value);
396extern int get_dumpable(struct mm_struct *mm); 406extern int get_dumpable(struct mm_struct *mm);
@@ -538,25 +548,8 @@ struct signal_struct {
538 548
539 struct list_head cpu_timers[3]; 549 struct list_head cpu_timers[3];
540 550
541 /* job control IDs */
542
543 /*
544 * pgrp and session fields are deprecated.
545 * use the task_session_Xnr and task_pgrp_Xnr routines below
546 */
547
548 union {
549 pid_t pgrp __deprecated;
550 pid_t __pgrp;
551 };
552
553 struct pid *tty_old_pgrp; 551 struct pid *tty_old_pgrp;
554 552
555 union {
556 pid_t session __deprecated;
557 pid_t __session;
558 };
559
560 /* boolean value for session group leader */ 553 /* boolean value for session group leader */
561 int leader; 554 int leader;
562 555
@@ -998,6 +991,7 @@ struct sched_class {
998 struct rq *busiest, struct sched_domain *sd, 991 struct rq *busiest, struct sched_domain *sd,
999 enum cpu_idle_type idle); 992 enum cpu_idle_type idle);
1000 void (*pre_schedule) (struct rq *this_rq, struct task_struct *task); 993 void (*pre_schedule) (struct rq *this_rq, struct task_struct *task);
994 int (*needs_post_schedule) (struct rq *this_rq);
1001 void (*post_schedule) (struct rq *this_rq); 995 void (*post_schedule) (struct rq *this_rq);
1002 void (*task_wake_up) (struct rq *this_rq, struct task_struct *task); 996 void (*task_wake_up) (struct rq *this_rq, struct task_struct *task);
1003 997
@@ -1052,6 +1046,10 @@ struct sched_entity {
1052 u64 last_wakeup; 1046 u64 last_wakeup;
1053 u64 avg_overlap; 1047 u64 avg_overlap;
1054 1048
1049 u64 start_runtime;
1050 u64 avg_wakeup;
1051 u64 nr_migrations;
1052
1055#ifdef CONFIG_SCHEDSTATS 1053#ifdef CONFIG_SCHEDSTATS
1056 u64 wait_start; 1054 u64 wait_start;
1057 u64 wait_max; 1055 u64 wait_max;
@@ -1067,7 +1065,6 @@ struct sched_entity {
1067 u64 exec_max; 1065 u64 exec_max;
1068 u64 slice_max; 1066 u64 slice_max;
1069 1067
1070 u64 nr_migrations;
1071 u64 nr_migrations_cold; 1068 u64 nr_migrations_cold;
1072 u64 nr_failed_migrations_affine; 1069 u64 nr_failed_migrations_affine;
1073 u64 nr_failed_migrations_running; 1070 u64 nr_failed_migrations_running;
@@ -1164,6 +1161,7 @@ struct task_struct {
1164#endif 1161#endif
1165 1162
1166 struct list_head tasks; 1163 struct list_head tasks;
1164 struct plist_node pushable_tasks;
1167 1165
1168 struct mm_struct *mm, *active_mm; 1166 struct mm_struct *mm, *active_mm;
1169 1167
@@ -1175,13 +1173,14 @@ struct task_struct {
1175 /* ??? */ 1173 /* ??? */
1176 unsigned int personality; 1174 unsigned int personality;
1177 unsigned did_exec:1; 1175 unsigned did_exec:1;
1176 unsigned in_execve:1; /* Tell the LSMs that the process is doing an
1177 * execve */
1178 pid_t pid; 1178 pid_t pid;
1179 pid_t tgid; 1179 pid_t tgid;
1180 1180
1181#ifdef CONFIG_CC_STACKPROTECTOR
1182 /* Canary value for the -fstack-protector gcc feature */ 1181 /* Canary value for the -fstack-protector gcc feature */
1183 unsigned long stack_canary; 1182 unsigned long stack_canary;
1184#endif 1183
1185 /* 1184 /*
1186 * pointers to (original) parent process, youngest child, younger sibling, 1185 * pointers to (original) parent process, youngest child, younger sibling,
1187 * older sibling, respectively. (p->father can be replaced with 1186 * older sibling, respectively. (p->father can be replaced with
@@ -1328,6 +1327,7 @@ struct task_struct {
1328 int lockdep_depth; 1327 int lockdep_depth;
1329 unsigned int lockdep_recursion; 1328 unsigned int lockdep_recursion;
1330 struct held_lock held_locks[MAX_LOCK_DEPTH]; 1329 struct held_lock held_locks[MAX_LOCK_DEPTH];
1330 gfp_t lockdep_reclaim_gfp;
1331#endif 1331#endif
1332 1332
1333/* journalling filesystem info */ 1333/* journalling filesystem info */
@@ -1453,16 +1453,6 @@ static inline int rt_task(struct task_struct *p)
1453 return rt_prio(p->prio); 1453 return rt_prio(p->prio);
1454} 1454}
1455 1455
1456static inline void set_task_session(struct task_struct *tsk, pid_t session)
1457{
1458 tsk->signal->__session = session;
1459}
1460
1461static inline void set_task_pgrp(struct task_struct *tsk, pid_t pgrp)
1462{
1463 tsk->signal->__pgrp = pgrp;
1464}
1465
1466static inline struct pid *task_pid(struct task_struct *task) 1456static inline struct pid *task_pid(struct task_struct *task)
1467{ 1457{
1468 return task->pids[PIDTYPE_PID].pid; 1458 return task->pids[PIDTYPE_PID].pid;
@@ -1473,6 +1463,11 @@ static inline struct pid *task_tgid(struct task_struct *task)
1473 return task->group_leader->pids[PIDTYPE_PID].pid; 1463 return task->group_leader->pids[PIDTYPE_PID].pid;
1474} 1464}
1475 1465
1466/*
1467 * Without tasklist or rcu lock it is not safe to dereference
1468 * the result of task_pgrp/task_session even if task == current,
1469 * we can race with another thread doing sys_setsid/sys_setpgid.
1470 */
1476static inline struct pid *task_pgrp(struct task_struct *task) 1471static inline struct pid *task_pgrp(struct task_struct *task)
1477{ 1472{
1478 return task->group_leader->pids[PIDTYPE_PGID].pid; 1473 return task->group_leader->pids[PIDTYPE_PGID].pid;
@@ -1498,17 +1493,23 @@ struct pid_namespace;
1498 * 1493 *
1499 * see also pid_nr() etc in include/linux/pid.h 1494 * see also pid_nr() etc in include/linux/pid.h
1500 */ 1495 */
1496pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
1497 struct pid_namespace *ns);
1501 1498
1502static inline pid_t task_pid_nr(struct task_struct *tsk) 1499static inline pid_t task_pid_nr(struct task_struct *tsk)
1503{ 1500{
1504 return tsk->pid; 1501 return tsk->pid;
1505} 1502}
1506 1503
1507pid_t task_pid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns); 1504static inline pid_t task_pid_nr_ns(struct task_struct *tsk,
1505 struct pid_namespace *ns)
1506{
1507 return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns);
1508}
1508 1509
1509static inline pid_t task_pid_vnr(struct task_struct *tsk) 1510static inline pid_t task_pid_vnr(struct task_struct *tsk)
1510{ 1511{
1511 return pid_vnr(task_pid(tsk)); 1512 return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL);
1512} 1513}
1513 1514
1514 1515
@@ -1525,31 +1526,34 @@ static inline pid_t task_tgid_vnr(struct task_struct *tsk)
1525} 1526}
1526 1527
1527 1528
1528static inline pid_t task_pgrp_nr(struct task_struct *tsk) 1529static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk,
1530 struct pid_namespace *ns)
1529{ 1531{
1530 return tsk->signal->__pgrp; 1532 return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns);
1531} 1533}
1532 1534
1533pid_t task_pgrp_nr_ns(struct task_struct *tsk, struct pid_namespace *ns);
1534
1535static inline pid_t task_pgrp_vnr(struct task_struct *tsk) 1535static inline pid_t task_pgrp_vnr(struct task_struct *tsk)
1536{ 1536{
1537 return pid_vnr(task_pgrp(tsk)); 1537 return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL);
1538} 1538}
1539 1539
1540 1540
1541static inline pid_t task_session_nr(struct task_struct *tsk) 1541static inline pid_t task_session_nr_ns(struct task_struct *tsk,
1542 struct pid_namespace *ns)
1542{ 1543{
1543 return tsk->signal->__session; 1544 return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns);
1544} 1545}
1545 1546
1546pid_t task_session_nr_ns(struct task_struct *tsk, struct pid_namespace *ns);
1547
1548static inline pid_t task_session_vnr(struct task_struct *tsk) 1547static inline pid_t task_session_vnr(struct task_struct *tsk)
1549{ 1548{
1550 return pid_vnr(task_session(tsk)); 1549 return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL);
1551} 1550}
1552 1551
1552/* obsolete, do not use */
1553static inline pid_t task_pgrp_nr(struct task_struct *tsk)
1554{
1555 return task_pgrp_nr_ns(tsk, &init_pid_ns);
1556}
1553 1557
1554/** 1558/**
1555 * pid_alive - check that a task structure is not stale 1559 * pid_alive - check that a task structure is not stale
@@ -1673,6 +1677,16 @@ static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
1673 return set_cpus_allowed_ptr(p, &new_mask); 1677 return set_cpus_allowed_ptr(p, &new_mask);
1674} 1678}
1675 1679
1680/*
1681 * Architectures can set this to 1 if they have specified
1682 * CONFIG_HAVE_UNSTABLE_SCHED_CLOCK in their arch Kconfig,
1683 * but then during bootup it turns out that sched_clock()
1684 * is reliable after all:
1685 */
1686#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
1687extern int sched_clock_stable;
1688#endif
1689
1676extern unsigned long long sched_clock(void); 1690extern unsigned long long sched_clock(void);
1677 1691
1678extern void sched_clock_init(void); 1692extern void sched_clock_init(void);
@@ -1949,7 +1963,8 @@ extern void mm_release(struct task_struct *, struct mm_struct *);
1949/* Allocate a new mm structure and copy contents from tsk->mm */ 1963/* Allocate a new mm structure and copy contents from tsk->mm */
1950extern struct mm_struct *dup_mm(struct task_struct *tsk); 1964extern struct mm_struct *dup_mm(struct task_struct *tsk);
1951 1965
1952extern int copy_thread(int, unsigned long, unsigned long, unsigned long, struct task_struct *, struct pt_regs *); 1966extern int copy_thread(unsigned long, unsigned long, unsigned long,
1967 struct task_struct *, struct pt_regs *);
1953extern void flush_thread(void); 1968extern void flush_thread(void);
1954extern void exit_thread(void); 1969extern void exit_thread(void);
1955 1970
@@ -2034,6 +2049,11 @@ static inline int thread_group_empty(struct task_struct *p)
2034#define delay_group_leader(p) \ 2049#define delay_group_leader(p) \
2035 (thread_group_leader(p) && !thread_group_empty(p)) 2050 (thread_group_leader(p) && !thread_group_empty(p))
2036 2051
2052static inline int task_detached(struct task_struct *p)
2053{
2054 return p->exit_signal == -1;
2055}
2056
2037/* 2057/*
2038 * Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring 2058 * Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring
2039 * subscriptions and synchronises with wait4(). Also used in procfs. Also 2059 * subscriptions and synchronises with wait4(). Also used in procfs. Also
@@ -2090,6 +2110,19 @@ static inline int object_is_on_stack(void *obj)
2090 2110
2091extern void thread_info_cache_init(void); 2111extern void thread_info_cache_init(void);
2092 2112
2113#ifdef CONFIG_DEBUG_STACK_USAGE
2114static inline unsigned long stack_not_used(struct task_struct *p)
2115{
2116 unsigned long *n = end_of_stack(p);
2117
2118 do { /* Skip over canary */
2119 n++;
2120 } while (!*n);
2121
2122 return (unsigned long)n - (unsigned long)end_of_stack(p);
2123}
2124#endif
2125
2093/* set thread flags in other task's structures 2126/* set thread flags in other task's structures
2094 * - see asm/thread_info.h for TIF_xxxx flags available 2127 * - see asm/thread_info.h for TIF_xxxx flags available
2095 */ 2128 */