aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/sched.h
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-04-07 05:15:40 -0400
committerIngo Molnar <mingo@elte.hu>2009-04-07 05:15:40 -0400
commit5e34437840d33554f69380584311743b39e8fbeb (patch)
treee081135619ee146af5efb9ee883afca950df5757 /include/linux/sched.h
parent77d05632baee21b1cef8730d7c06aa69601e4dca (diff)
parentd508afb437daee7cf07da085b635c44a4ebf9b38 (diff)
Merge branch 'linus' into core/softlockup
Conflicts: kernel/sysctl.c
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r--include/linux/sched.h136
1 files changed, 90 insertions, 46 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index d05e2b3ae41a..763b525227dc 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -68,7 +68,7 @@ struct sched_param {
68#include <linux/smp.h> 68#include <linux/smp.h>
69#include <linux/sem.h> 69#include <linux/sem.h>
70#include <linux/signal.h> 70#include <linux/signal.h>
71#include <linux/fs_struct.h> 71#include <linux/path.h>
72#include <linux/compiler.h> 72#include <linux/compiler.h>
73#include <linux/completion.h> 73#include <linux/completion.h>
74#include <linux/pid.h> 74#include <linux/pid.h>
@@ -97,6 +97,7 @@ struct futex_pi_state;
97struct robust_list_head; 97struct robust_list_head;
98struct bio; 98struct bio;
99struct bts_tracer; 99struct bts_tracer;
100struct fs_struct;
100 101
101/* 102/*
102 * List of flags we want to share for kernel threads, 103 * List of flags we want to share for kernel threads,
@@ -137,6 +138,8 @@ extern unsigned long nr_uninterruptible(void);
137extern unsigned long nr_active(void); 138extern unsigned long nr_active(void);
138extern unsigned long nr_iowait(void); 139extern unsigned long nr_iowait(void);
139 140
141extern unsigned long get_parent_ip(unsigned long addr);
142
140struct seq_file; 143struct seq_file;
141struct cfs_rq; 144struct cfs_rq;
142struct task_group; 145struct task_group;
@@ -334,7 +337,9 @@ extern signed long schedule_timeout(signed long timeout);
334extern signed long schedule_timeout_interruptible(signed long timeout); 337extern signed long schedule_timeout_interruptible(signed long timeout);
335extern signed long schedule_timeout_killable(signed long timeout); 338extern signed long schedule_timeout_killable(signed long timeout);
336extern signed long schedule_timeout_uninterruptible(signed long timeout); 339extern signed long schedule_timeout_uninterruptible(signed long timeout);
340asmlinkage void __schedule(void);
337asmlinkage void schedule(void); 341asmlinkage void schedule(void);
342extern int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner);
338 343
339struct nsproxy; 344struct nsproxy;
340struct user_namespace; 345struct user_namespace;
@@ -392,8 +397,15 @@ extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long);
392 (mm)->hiwater_vm = (mm)->total_vm; \ 397 (mm)->hiwater_vm = (mm)->total_vm; \
393} while (0) 398} while (0)
394 399
395#define get_mm_hiwater_rss(mm) max((mm)->hiwater_rss, get_mm_rss(mm)) 400static inline unsigned long get_mm_hiwater_rss(struct mm_struct *mm)
396#define get_mm_hiwater_vm(mm) max((mm)->hiwater_vm, (mm)->total_vm) 401{
402 return max(mm->hiwater_rss, get_mm_rss(mm));
403}
404
405static inline unsigned long get_mm_hiwater_vm(struct mm_struct *mm)
406{
407 return max(mm->hiwater_vm, mm->total_vm);
408}
397 409
398extern void set_dumpable(struct mm_struct *mm, int value); 410extern void set_dumpable(struct mm_struct *mm, int value);
399extern int get_dumpable(struct mm_struct *mm); 411extern int get_dumpable(struct mm_struct *mm);
@@ -541,25 +553,8 @@ struct signal_struct {
541 553
542 struct list_head cpu_timers[3]; 554 struct list_head cpu_timers[3];
543 555
544 /* job control IDs */
545
546 /*
547 * pgrp and session fields are deprecated.
548 * use the task_session_Xnr and task_pgrp_Xnr routines below
549 */
550
551 union {
552 pid_t pgrp __deprecated;
553 pid_t __pgrp;
554 };
555
556 struct pid *tty_old_pgrp; 556 struct pid *tty_old_pgrp;
557 557
558 union {
559 pid_t session __deprecated;
560 pid_t __session;
561 };
562
563 /* boolean value for session group leader */ 558 /* boolean value for session group leader */
564 int leader; 559 int leader;
565 560
@@ -1001,6 +996,7 @@ struct sched_class {
1001 struct rq *busiest, struct sched_domain *sd, 996 struct rq *busiest, struct sched_domain *sd,
1002 enum cpu_idle_type idle); 997 enum cpu_idle_type idle);
1003 void (*pre_schedule) (struct rq *this_rq, struct task_struct *task); 998 void (*pre_schedule) (struct rq *this_rq, struct task_struct *task);
999 int (*needs_post_schedule) (struct rq *this_rq);
1004 void (*post_schedule) (struct rq *this_rq); 1000 void (*post_schedule) (struct rq *this_rq);
1005 void (*task_wake_up) (struct rq *this_rq, struct task_struct *task); 1001 void (*task_wake_up) (struct rq *this_rq, struct task_struct *task);
1006 1002
@@ -1055,6 +1051,10 @@ struct sched_entity {
1055 u64 last_wakeup; 1051 u64 last_wakeup;
1056 u64 avg_overlap; 1052 u64 avg_overlap;
1057 1053
1054 u64 start_runtime;
1055 u64 avg_wakeup;
1056 u64 nr_migrations;
1057
1058#ifdef CONFIG_SCHEDSTATS 1058#ifdef CONFIG_SCHEDSTATS
1059 u64 wait_start; 1059 u64 wait_start;
1060 u64 wait_max; 1060 u64 wait_max;
@@ -1070,7 +1070,6 @@ struct sched_entity {
1070 u64 exec_max; 1070 u64 exec_max;
1071 u64 slice_max; 1071 u64 slice_max;
1072 1072
1073 u64 nr_migrations;
1074 u64 nr_migrations_cold; 1073 u64 nr_migrations_cold;
1075 u64 nr_failed_migrations_affine; 1074 u64 nr_failed_migrations_affine;
1076 u64 nr_failed_migrations_running; 1075 u64 nr_failed_migrations_running;
@@ -1167,6 +1166,7 @@ struct task_struct {
1167#endif 1166#endif
1168 1167
1169 struct list_head tasks; 1168 struct list_head tasks;
1169 struct plist_node pushable_tasks;
1170 1170
1171 struct mm_struct *mm, *active_mm; 1171 struct mm_struct *mm, *active_mm;
1172 1172
@@ -1178,13 +1178,14 @@ struct task_struct {
1178 /* ??? */ 1178 /* ??? */
1179 unsigned int personality; 1179 unsigned int personality;
1180 unsigned did_exec:1; 1180 unsigned did_exec:1;
1181 unsigned in_execve:1; /* Tell the LSMs that the process is doing an
1182 * execve */
1181 pid_t pid; 1183 pid_t pid;
1182 pid_t tgid; 1184 pid_t tgid;
1183 1185
1184#ifdef CONFIG_CC_STACKPROTECTOR
1185 /* Canary value for the -fstack-protector gcc feature */ 1186 /* Canary value for the -fstack-protector gcc feature */
1186 unsigned long stack_canary; 1187 unsigned long stack_canary;
1187#endif 1188
1188 /* 1189 /*
1189 * pointers to (original) parent process, youngest child, younger sibling, 1190 * pointers to (original) parent process, youngest child, younger sibling,
1190 * older sibling, respectively. (p->father can be replaced with 1191 * older sibling, respectively. (p->father can be replaced with
@@ -1330,6 +1331,7 @@ struct task_struct {
1330 int lockdep_depth; 1331 int lockdep_depth;
1331 unsigned int lockdep_recursion; 1332 unsigned int lockdep_recursion;
1332 struct held_lock held_locks[MAX_LOCK_DEPTH]; 1333 struct held_lock held_locks[MAX_LOCK_DEPTH];
1334 gfp_t lockdep_reclaim_gfp;
1333#endif 1335#endif
1334 1336
1335/* journalling filesystem info */ 1337/* journalling filesystem info */
@@ -1407,6 +1409,8 @@ struct task_struct {
1407 int curr_ret_stack; 1409 int curr_ret_stack;
1408 /* Stack of return addresses for return function tracing */ 1410 /* Stack of return addresses for return function tracing */
1409 struct ftrace_ret_stack *ret_stack; 1411 struct ftrace_ret_stack *ret_stack;
1412 /* time stamp for last schedule */
1413 unsigned long long ftrace_timestamp;
1410 /* 1414 /*
1411 * Number of functions that haven't been traced 1415 * Number of functions that haven't been traced
1412 * because of depth overrun. 1416 * because of depth overrun.
@@ -1421,6 +1425,9 @@ struct task_struct {
1421#endif 1425#endif
1422}; 1426};
1423 1427
1428/* Future-safe accessor for struct task_struct's cpus_allowed. */
1429#define tsk_cpumask(tsk) (&(tsk)->cpus_allowed)
1430
1424/* 1431/*
1425 * Priority of a process goes from 0..MAX_PRIO-1, valid RT 1432 * Priority of a process goes from 0..MAX_PRIO-1, valid RT
1426 * priority is 0..MAX_RT_PRIO-1, and SCHED_NORMAL/SCHED_BATCH 1433 * priority is 0..MAX_RT_PRIO-1, and SCHED_NORMAL/SCHED_BATCH
@@ -1452,16 +1459,6 @@ static inline int rt_task(struct task_struct *p)
1452 return rt_prio(p->prio); 1459 return rt_prio(p->prio);
1453} 1460}
1454 1461
1455static inline void set_task_session(struct task_struct *tsk, pid_t session)
1456{
1457 tsk->signal->__session = session;
1458}
1459
1460static inline void set_task_pgrp(struct task_struct *tsk, pid_t pgrp)
1461{
1462 tsk->signal->__pgrp = pgrp;
1463}
1464
1465static inline struct pid *task_pid(struct task_struct *task) 1462static inline struct pid *task_pid(struct task_struct *task)
1466{ 1463{
1467 return task->pids[PIDTYPE_PID].pid; 1464 return task->pids[PIDTYPE_PID].pid;
@@ -1472,6 +1469,11 @@ static inline struct pid *task_tgid(struct task_struct *task)
1472 return task->group_leader->pids[PIDTYPE_PID].pid; 1469 return task->group_leader->pids[PIDTYPE_PID].pid;
1473} 1470}
1474 1471
1472/*
1473 * Without tasklist or rcu lock it is not safe to dereference
1474 * the result of task_pgrp/task_session even if task == current,
1475 * we can race with another thread doing sys_setsid/sys_setpgid.
1476 */
1475static inline struct pid *task_pgrp(struct task_struct *task) 1477static inline struct pid *task_pgrp(struct task_struct *task)
1476{ 1478{
1477 return task->group_leader->pids[PIDTYPE_PGID].pid; 1479 return task->group_leader->pids[PIDTYPE_PGID].pid;
@@ -1497,17 +1499,23 @@ struct pid_namespace;
1497 * 1499 *
1498 * see also pid_nr() etc in include/linux/pid.h 1500 * see also pid_nr() etc in include/linux/pid.h
1499 */ 1501 */
1502pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
1503 struct pid_namespace *ns);
1500 1504
1501static inline pid_t task_pid_nr(struct task_struct *tsk) 1505static inline pid_t task_pid_nr(struct task_struct *tsk)
1502{ 1506{
1503 return tsk->pid; 1507 return tsk->pid;
1504} 1508}
1505 1509
1506pid_t task_pid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns); 1510static inline pid_t task_pid_nr_ns(struct task_struct *tsk,
1511 struct pid_namespace *ns)
1512{
1513 return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns);
1514}
1507 1515
1508static inline pid_t task_pid_vnr(struct task_struct *tsk) 1516static inline pid_t task_pid_vnr(struct task_struct *tsk)
1509{ 1517{
1510 return pid_vnr(task_pid(tsk)); 1518 return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL);
1511} 1519}
1512 1520
1513 1521
@@ -1524,31 +1532,34 @@ static inline pid_t task_tgid_vnr(struct task_struct *tsk)
1524} 1532}
1525 1533
1526 1534
1527static inline pid_t task_pgrp_nr(struct task_struct *tsk) 1535static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk,
1536 struct pid_namespace *ns)
1528{ 1537{
1529 return tsk->signal->__pgrp; 1538 return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns);
1530} 1539}
1531 1540
1532pid_t task_pgrp_nr_ns(struct task_struct *tsk, struct pid_namespace *ns);
1533
1534static inline pid_t task_pgrp_vnr(struct task_struct *tsk) 1541static inline pid_t task_pgrp_vnr(struct task_struct *tsk)
1535{ 1542{
1536 return pid_vnr(task_pgrp(tsk)); 1543 return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL);
1537} 1544}
1538 1545
1539 1546
1540static inline pid_t task_session_nr(struct task_struct *tsk) 1547static inline pid_t task_session_nr_ns(struct task_struct *tsk,
1548 struct pid_namespace *ns)
1541{ 1549{
1542 return tsk->signal->__session; 1550 return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns);
1543} 1551}
1544 1552
1545pid_t task_session_nr_ns(struct task_struct *tsk, struct pid_namespace *ns);
1546
1547static inline pid_t task_session_vnr(struct task_struct *tsk) 1553static inline pid_t task_session_vnr(struct task_struct *tsk)
1548{ 1554{
1549 return pid_vnr(task_session(tsk)); 1555 return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL);
1550} 1556}
1551 1557
1558/* obsolete, do not use */
1559static inline pid_t task_pgrp_nr(struct task_struct *tsk)
1560{
1561 return task_pgrp_nr_ns(tsk, &init_pid_ns);
1562}
1552 1563
1553/** 1564/**
1554 * pid_alive - check that a task structure is not stale 1565 * pid_alive - check that a task structure is not stale
@@ -1672,6 +1683,16 @@ static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
1672 return set_cpus_allowed_ptr(p, &new_mask); 1683 return set_cpus_allowed_ptr(p, &new_mask);
1673} 1684}
1674 1685
1686/*
1687 * Architectures can set this to 1 if they have specified
1688 * CONFIG_HAVE_UNSTABLE_SCHED_CLOCK in their arch Kconfig,
1689 * but then during bootup it turns out that sched_clock()
1690 * is reliable after all:
1691 */
1692#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
1693extern int sched_clock_stable;
1694#endif
1695
1675extern unsigned long long sched_clock(void); 1696extern unsigned long long sched_clock(void);
1676 1697
1677extern void sched_clock_init(void); 1698extern void sched_clock_init(void);
@@ -1948,7 +1969,8 @@ extern void mm_release(struct task_struct *, struct mm_struct *);
1948/* Allocate a new mm structure and copy contents from tsk->mm */ 1969/* Allocate a new mm structure and copy contents from tsk->mm */
1949extern struct mm_struct *dup_mm(struct task_struct *tsk); 1970extern struct mm_struct *dup_mm(struct task_struct *tsk);
1950 1971
1951extern int copy_thread(int, unsigned long, unsigned long, unsigned long, struct task_struct *, struct pt_regs *); 1972extern int copy_thread(unsigned long, unsigned long, unsigned long,
1973 struct task_struct *, struct pt_regs *);
1952extern void flush_thread(void); 1974extern void flush_thread(void);
1953extern void exit_thread(void); 1975extern void exit_thread(void);
1954 1976
@@ -2033,6 +2055,11 @@ static inline int thread_group_empty(struct task_struct *p)
2033#define delay_group_leader(p) \ 2055#define delay_group_leader(p) \
2034 (thread_group_leader(p) && !thread_group_empty(p)) 2056 (thread_group_leader(p) && !thread_group_empty(p))
2035 2057
2058static inline int task_detached(struct task_struct *p)
2059{
2060 return p->exit_signal == -1;
2061}
2062
2036/* 2063/*
2037 * Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring 2064 * Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring
2038 * subscriptions and synchronises with wait4(). Also used in procfs. Also 2065 * subscriptions and synchronises with wait4(). Also used in procfs. Also
@@ -2089,6 +2116,19 @@ static inline int object_is_on_stack(void *obj)
2089 2116
2090extern void thread_info_cache_init(void); 2117extern void thread_info_cache_init(void);
2091 2118
2119#ifdef CONFIG_DEBUG_STACK_USAGE
2120static inline unsigned long stack_not_used(struct task_struct *p)
2121{
2122 unsigned long *n = end_of_stack(p);
2123
2124 do { /* Skip over canary */
2125 n++;
2126 } while (!*n);
2127
2128 return (unsigned long)n - (unsigned long)end_of_stack(p);
2129}
2130#endif
2131
2092/* set thread flags in other task's structures 2132/* set thread flags in other task's structures
2093 * - see asm/thread_info.h for TIF_xxxx flags available 2133 * - see asm/thread_info.h for TIF_xxxx flags available
2094 */ 2134 */
@@ -2293,9 +2333,13 @@ extern long sched_group_rt_runtime(struct task_group *tg);
2293extern int sched_group_set_rt_period(struct task_group *tg, 2333extern int sched_group_set_rt_period(struct task_group *tg,
2294 long rt_period_us); 2334 long rt_period_us);
2295extern long sched_group_rt_period(struct task_group *tg); 2335extern long sched_group_rt_period(struct task_group *tg);
2336extern int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk);
2296#endif 2337#endif
2297#endif 2338#endif
2298 2339
2340extern int task_can_switch_user(struct user_struct *up,
2341 struct task_struct *tsk);
2342
2299#ifdef CONFIG_TASK_XACCT 2343#ifdef CONFIG_TASK_XACCT
2300static inline void add_rchar(struct task_struct *tsk, ssize_t amt) 2344static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
2301{ 2345{