diff options
author | Ingo Molnar <mingo@elte.hu> | 2009-04-06 03:02:57 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-04-06 03:02:57 -0400 |
commit | f541ae326fa120fa5c57433e4d9a133df212ce41 (patch) | |
tree | bdbd94ec72cfc601118051cb35e8617d55510177 /include/linux/sched.h | |
parent | e255357764f92afcafafbd4879b222b8c752065a (diff) | |
parent | 0221c81b1b8eb0cbb6b30a0ced52ead32d2b4e4c (diff) |
Merge branch 'linus' into perfcounters/core-v2
Merge reason: we have gathered quite a few conflicts, need to merge upstream
Conflicts:
arch/powerpc/kernel/Makefile
arch/x86/ia32/ia32entry.S
arch/x86/include/asm/hardirq.h
arch/x86/include/asm/unistd_32.h
arch/x86/include/asm/unistd_64.h
arch/x86/kernel/cpu/common.c
arch/x86/kernel/irq.c
arch/x86/kernel/syscall_table_32.S
arch/x86/mm/iomap_32.c
include/linux/sched.h
kernel/Makefile
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r-- | include/linux/sched.h | 114 |
1 files changed, 71 insertions, 43 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index 3aee42384f0e..75b2fc5306d8 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -68,7 +68,7 @@ struct sched_param { | |||
68 | #include <linux/smp.h> | 68 | #include <linux/smp.h> |
69 | #include <linux/sem.h> | 69 | #include <linux/sem.h> |
70 | #include <linux/signal.h> | 70 | #include <linux/signal.h> |
71 | #include <linux/fs_struct.h> | 71 | #include <linux/path.h> |
72 | #include <linux/compiler.h> | 72 | #include <linux/compiler.h> |
73 | #include <linux/completion.h> | 73 | #include <linux/completion.h> |
74 | #include <linux/perf_counter.h> | 74 | #include <linux/perf_counter.h> |
@@ -98,6 +98,7 @@ struct futex_pi_state; | |||
98 | struct robust_list_head; | 98 | struct robust_list_head; |
99 | struct bio; | 99 | struct bio; |
100 | struct bts_tracer; | 100 | struct bts_tracer; |
101 | struct fs_struct; | ||
101 | 102 | ||
102 | /* | 103 | /* |
103 | * List of flags we want to share for kernel threads, | 104 | * List of flags we want to share for kernel threads, |
@@ -140,6 +141,8 @@ extern unsigned long nr_iowait(void); | |||
140 | extern u64 cpu_nr_switches(int cpu); | 141 | extern u64 cpu_nr_switches(int cpu); |
141 | extern u64 cpu_nr_migrations(int cpu); | 142 | extern u64 cpu_nr_migrations(int cpu); |
142 | 143 | ||
144 | extern unsigned long get_parent_ip(unsigned long addr); | ||
145 | |||
143 | struct seq_file; | 146 | struct seq_file; |
144 | struct cfs_rq; | 147 | struct cfs_rq; |
145 | struct task_group; | 148 | struct task_group; |
@@ -334,7 +337,9 @@ extern signed long schedule_timeout(signed long timeout); | |||
334 | extern signed long schedule_timeout_interruptible(signed long timeout); | 337 | extern signed long schedule_timeout_interruptible(signed long timeout); |
335 | extern signed long schedule_timeout_killable(signed long timeout); | 338 | extern signed long schedule_timeout_killable(signed long timeout); |
336 | extern signed long schedule_timeout_uninterruptible(signed long timeout); | 339 | extern signed long schedule_timeout_uninterruptible(signed long timeout); |
340 | asmlinkage void __schedule(void); | ||
337 | asmlinkage void schedule(void); | 341 | asmlinkage void schedule(void); |
342 | extern int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner); | ||
338 | 343 | ||
339 | struct nsproxy; | 344 | struct nsproxy; |
340 | struct user_namespace; | 345 | struct user_namespace; |
@@ -392,8 +397,15 @@ extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long); | |||
392 | (mm)->hiwater_vm = (mm)->total_vm; \ | 397 | (mm)->hiwater_vm = (mm)->total_vm; \ |
393 | } while (0) | 398 | } while (0) |
394 | 399 | ||
395 | #define get_mm_hiwater_rss(mm) max((mm)->hiwater_rss, get_mm_rss(mm)) | 400 | static inline unsigned long get_mm_hiwater_rss(struct mm_struct *mm) |
396 | #define get_mm_hiwater_vm(mm) max((mm)->hiwater_vm, (mm)->total_vm) | 401 | { |
402 | return max(mm->hiwater_rss, get_mm_rss(mm)); | ||
403 | } | ||
404 | |||
405 | static inline unsigned long get_mm_hiwater_vm(struct mm_struct *mm) | ||
406 | { | ||
407 | return max(mm->hiwater_vm, mm->total_vm); | ||
408 | } | ||
397 | 409 | ||
398 | extern void set_dumpable(struct mm_struct *mm, int value); | 410 | extern void set_dumpable(struct mm_struct *mm, int value); |
399 | extern int get_dumpable(struct mm_struct *mm); | 411 | extern int get_dumpable(struct mm_struct *mm); |
@@ -541,25 +553,8 @@ struct signal_struct { | |||
541 | 553 | ||
542 | struct list_head cpu_timers[3]; | 554 | struct list_head cpu_timers[3]; |
543 | 555 | ||
544 | /* job control IDs */ | ||
545 | |||
546 | /* | ||
547 | * pgrp and session fields are deprecated. | ||
548 | * use the task_session_Xnr and task_pgrp_Xnr routines below | ||
549 | */ | ||
550 | |||
551 | union { | ||
552 | pid_t pgrp __deprecated; | ||
553 | pid_t __pgrp; | ||
554 | }; | ||
555 | |||
556 | struct pid *tty_old_pgrp; | 556 | struct pid *tty_old_pgrp; |
557 | 557 | ||
558 | union { | ||
559 | pid_t session __deprecated; | ||
560 | pid_t __session; | ||
561 | }; | ||
562 | |||
563 | /* boolean value for session group leader */ | 558 | /* boolean value for session group leader */ |
564 | int leader; | 559 | int leader; |
565 | 560 | ||
@@ -1001,6 +996,7 @@ struct sched_class { | |||
1001 | struct rq *busiest, struct sched_domain *sd, | 996 | struct rq *busiest, struct sched_domain *sd, |
1002 | enum cpu_idle_type idle); | 997 | enum cpu_idle_type idle); |
1003 | void (*pre_schedule) (struct rq *this_rq, struct task_struct *task); | 998 | void (*pre_schedule) (struct rq *this_rq, struct task_struct *task); |
999 | int (*needs_post_schedule) (struct rq *this_rq); | ||
1004 | void (*post_schedule) (struct rq *this_rq); | 1000 | void (*post_schedule) (struct rq *this_rq); |
1005 | void (*task_wake_up) (struct rq *this_rq, struct task_struct *task); | 1001 | void (*task_wake_up) (struct rq *this_rq, struct task_struct *task); |
1006 | 1002 | ||
@@ -1057,6 +1053,9 @@ struct sched_entity { | |||
1057 | 1053 | ||
1058 | u64 nr_migrations; | 1054 | u64 nr_migrations; |
1059 | 1055 | ||
1056 | u64 start_runtime; | ||
1057 | u64 avg_wakeup; | ||
1058 | |||
1060 | #ifdef CONFIG_SCHEDSTATS | 1059 | #ifdef CONFIG_SCHEDSTATS |
1061 | u64 wait_start; | 1060 | u64 wait_start; |
1062 | u64 wait_max; | 1061 | u64 wait_max; |
@@ -1168,6 +1167,7 @@ struct task_struct { | |||
1168 | #endif | 1167 | #endif |
1169 | 1168 | ||
1170 | struct list_head tasks; | 1169 | struct list_head tasks; |
1170 | struct plist_node pushable_tasks; | ||
1171 | 1171 | ||
1172 | struct mm_struct *mm, *active_mm; | 1172 | struct mm_struct *mm, *active_mm; |
1173 | 1173 | ||
@@ -1179,6 +1179,8 @@ struct task_struct { | |||
1179 | /* ??? */ | 1179 | /* ??? */ |
1180 | unsigned int personality; | 1180 | unsigned int personality; |
1181 | unsigned did_exec:1; | 1181 | unsigned did_exec:1; |
1182 | unsigned in_execve:1; /* Tell the LSMs that the process is doing an | ||
1183 | * execve */ | ||
1182 | pid_t pid; | 1184 | pid_t pid; |
1183 | pid_t tgid; | 1185 | pid_t tgid; |
1184 | 1186 | ||
@@ -1331,6 +1333,7 @@ struct task_struct { | |||
1331 | int lockdep_depth; | 1333 | int lockdep_depth; |
1332 | unsigned int lockdep_recursion; | 1334 | unsigned int lockdep_recursion; |
1333 | struct held_lock held_locks[MAX_LOCK_DEPTH]; | 1335 | struct held_lock held_locks[MAX_LOCK_DEPTH]; |
1336 | gfp_t lockdep_reclaim_gfp; | ||
1334 | #endif | 1337 | #endif |
1335 | 1338 | ||
1336 | /* journalling filesystem info */ | 1339 | /* journalling filesystem info */ |
@@ -1409,6 +1412,8 @@ struct task_struct { | |||
1409 | int curr_ret_stack; | 1412 | int curr_ret_stack; |
1410 | /* Stack of return addresses for return function tracing */ | 1413 | /* Stack of return addresses for return function tracing */ |
1411 | struct ftrace_ret_stack *ret_stack; | 1414 | struct ftrace_ret_stack *ret_stack; |
1415 | /* time stamp for last schedule */ | ||
1416 | unsigned long long ftrace_timestamp; | ||
1412 | /* | 1417 | /* |
1413 | * Number of functions that haven't been traced | 1418 | * Number of functions that haven't been traced |
1414 | * because of depth overrun. | 1419 | * because of depth overrun. |
@@ -1423,6 +1428,9 @@ struct task_struct { | |||
1423 | #endif | 1428 | #endif |
1424 | }; | 1429 | }; |
1425 | 1430 | ||
1431 | /* Future-safe accessor for struct task_struct's cpus_allowed. */ | ||
1432 | #define tsk_cpumask(tsk) (&(tsk)->cpus_allowed) | ||
1433 | |||
1426 | /* | 1434 | /* |
1427 | * Priority of a process goes from 0..MAX_PRIO-1, valid RT | 1435 | * Priority of a process goes from 0..MAX_PRIO-1, valid RT |
1428 | * priority is 0..MAX_RT_PRIO-1, and SCHED_NORMAL/SCHED_BATCH | 1436 | * priority is 0..MAX_RT_PRIO-1, and SCHED_NORMAL/SCHED_BATCH |
@@ -1454,16 +1462,6 @@ static inline int rt_task(struct task_struct *p) | |||
1454 | return rt_prio(p->prio); | 1462 | return rt_prio(p->prio); |
1455 | } | 1463 | } |
1456 | 1464 | ||
1457 | static inline void set_task_session(struct task_struct *tsk, pid_t session) | ||
1458 | { | ||
1459 | tsk->signal->__session = session; | ||
1460 | } | ||
1461 | |||
1462 | static inline void set_task_pgrp(struct task_struct *tsk, pid_t pgrp) | ||
1463 | { | ||
1464 | tsk->signal->__pgrp = pgrp; | ||
1465 | } | ||
1466 | |||
1467 | static inline struct pid *task_pid(struct task_struct *task) | 1465 | static inline struct pid *task_pid(struct task_struct *task) |
1468 | { | 1466 | { |
1469 | return task->pids[PIDTYPE_PID].pid; | 1467 | return task->pids[PIDTYPE_PID].pid; |
@@ -1474,6 +1472,11 @@ static inline struct pid *task_tgid(struct task_struct *task) | |||
1474 | return task->group_leader->pids[PIDTYPE_PID].pid; | 1472 | return task->group_leader->pids[PIDTYPE_PID].pid; |
1475 | } | 1473 | } |
1476 | 1474 | ||
1475 | /* | ||
1476 | * Without tasklist or rcu lock it is not safe to dereference | ||
1477 | * the result of task_pgrp/task_session even if task == current, | ||
1478 | * we can race with another thread doing sys_setsid/sys_setpgid. | ||
1479 | */ | ||
1477 | static inline struct pid *task_pgrp(struct task_struct *task) | 1480 | static inline struct pid *task_pgrp(struct task_struct *task) |
1478 | { | 1481 | { |
1479 | return task->group_leader->pids[PIDTYPE_PGID].pid; | 1482 | return task->group_leader->pids[PIDTYPE_PGID].pid; |
@@ -1499,17 +1502,23 @@ struct pid_namespace; | |||
1499 | * | 1502 | * |
1500 | * see also pid_nr() etc in include/linux/pid.h | 1503 | * see also pid_nr() etc in include/linux/pid.h |
1501 | */ | 1504 | */ |
1505 | pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type, | ||
1506 | struct pid_namespace *ns); | ||
1502 | 1507 | ||
1503 | static inline pid_t task_pid_nr(struct task_struct *tsk) | 1508 | static inline pid_t task_pid_nr(struct task_struct *tsk) |
1504 | { | 1509 | { |
1505 | return tsk->pid; | 1510 | return tsk->pid; |
1506 | } | 1511 | } |
1507 | 1512 | ||
1508 | pid_t task_pid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns); | 1513 | static inline pid_t task_pid_nr_ns(struct task_struct *tsk, |
1514 | struct pid_namespace *ns) | ||
1515 | { | ||
1516 | return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns); | ||
1517 | } | ||
1509 | 1518 | ||
1510 | static inline pid_t task_pid_vnr(struct task_struct *tsk) | 1519 | static inline pid_t task_pid_vnr(struct task_struct *tsk) |
1511 | { | 1520 | { |
1512 | return pid_vnr(task_pid(tsk)); | 1521 | return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL); |
1513 | } | 1522 | } |
1514 | 1523 | ||
1515 | 1524 | ||
@@ -1526,31 +1535,34 @@ static inline pid_t task_tgid_vnr(struct task_struct *tsk) | |||
1526 | } | 1535 | } |
1527 | 1536 | ||
1528 | 1537 | ||
1529 | static inline pid_t task_pgrp_nr(struct task_struct *tsk) | 1538 | static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk, |
1539 | struct pid_namespace *ns) | ||
1530 | { | 1540 | { |
1531 | return tsk->signal->__pgrp; | 1541 | return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns); |
1532 | } | 1542 | } |
1533 | 1543 | ||
1534 | pid_t task_pgrp_nr_ns(struct task_struct *tsk, struct pid_namespace *ns); | ||
1535 | |||
1536 | static inline pid_t task_pgrp_vnr(struct task_struct *tsk) | 1544 | static inline pid_t task_pgrp_vnr(struct task_struct *tsk) |
1537 | { | 1545 | { |
1538 | return pid_vnr(task_pgrp(tsk)); | 1546 | return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL); |
1539 | } | 1547 | } |
1540 | 1548 | ||
1541 | 1549 | ||
1542 | static inline pid_t task_session_nr(struct task_struct *tsk) | 1550 | static inline pid_t task_session_nr_ns(struct task_struct *tsk, |
1551 | struct pid_namespace *ns) | ||
1543 | { | 1552 | { |
1544 | return tsk->signal->__session; | 1553 | return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns); |
1545 | } | 1554 | } |
1546 | 1555 | ||
1547 | pid_t task_session_nr_ns(struct task_struct *tsk, struct pid_namespace *ns); | ||
1548 | |||
1549 | static inline pid_t task_session_vnr(struct task_struct *tsk) | 1556 | static inline pid_t task_session_vnr(struct task_struct *tsk) |
1550 | { | 1557 | { |
1551 | return pid_vnr(task_session(tsk)); | 1558 | return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL); |
1552 | } | 1559 | } |
1553 | 1560 | ||
1561 | /* obsolete, do not use */ | ||
1562 | static inline pid_t task_pgrp_nr(struct task_struct *tsk) | ||
1563 | { | ||
1564 | return task_pgrp_nr_ns(tsk, &init_pid_ns); | ||
1565 | } | ||
1554 | 1566 | ||
1555 | /** | 1567 | /** |
1556 | * pid_alive - check that a task structure is not stale | 1568 | * pid_alive - check that a task structure is not stale |
@@ -1674,6 +1686,16 @@ static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask) | |||
1674 | return set_cpus_allowed_ptr(p, &new_mask); | 1686 | return set_cpus_allowed_ptr(p, &new_mask); |
1675 | } | 1687 | } |
1676 | 1688 | ||
1689 | /* | ||
1690 | * Architectures can set this to 1 if they have specified | ||
1691 | * CONFIG_HAVE_UNSTABLE_SCHED_CLOCK in their arch Kconfig, | ||
1692 | * but then during bootup it turns out that sched_clock() | ||
1693 | * is reliable after all: | ||
1694 | */ | ||
1695 | #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK | ||
1696 | extern int sched_clock_stable; | ||
1697 | #endif | ||
1698 | |||
1677 | extern unsigned long long sched_clock(void); | 1699 | extern unsigned long long sched_clock(void); |
1678 | 1700 | ||
1679 | extern void sched_clock_init(void); | 1701 | extern void sched_clock_init(void); |
@@ -1950,7 +1972,8 @@ extern void mm_release(struct task_struct *, struct mm_struct *); | |||
1950 | /* Allocate a new mm structure and copy contents from tsk->mm */ | 1972 | /* Allocate a new mm structure and copy contents from tsk->mm */ |
1951 | extern struct mm_struct *dup_mm(struct task_struct *tsk); | 1973 | extern struct mm_struct *dup_mm(struct task_struct *tsk); |
1952 | 1974 | ||
1953 | extern int copy_thread(int, unsigned long, unsigned long, unsigned long, struct task_struct *, struct pt_regs *); | 1975 | extern int copy_thread(unsigned long, unsigned long, unsigned long, |
1976 | struct task_struct *, struct pt_regs *); | ||
1954 | extern void flush_thread(void); | 1977 | extern void flush_thread(void); |
1955 | extern void exit_thread(void); | 1978 | extern void exit_thread(void); |
1956 | 1979 | ||
@@ -2035,6 +2058,11 @@ static inline int thread_group_empty(struct task_struct *p) | |||
2035 | #define delay_group_leader(p) \ | 2058 | #define delay_group_leader(p) \ |
2036 | (thread_group_leader(p) && !thread_group_empty(p)) | 2059 | (thread_group_leader(p) && !thread_group_empty(p)) |
2037 | 2060 | ||
2061 | static inline int task_detached(struct task_struct *p) | ||
2062 | { | ||
2063 | return p->exit_signal == -1; | ||
2064 | } | ||
2065 | |||
2038 | /* | 2066 | /* |
2039 | * Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring | 2067 | * Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring |
2040 | * subscriptions and synchronises with wait4(). Also used in procfs. Also | 2068 | * subscriptions and synchronises with wait4(). Also used in procfs. Also |