diff options
| author | Steve French <sfrench@us.ibm.com> | 2008-05-06 13:55:32 -0400 |
|---|---|---|
| committer | Steve French <sfrench@us.ibm.com> | 2008-05-06 13:55:32 -0400 |
| commit | a815752ac0ffdb910e92958d41d28f4fb28e5296 (patch) | |
| tree | a3aa16a282354da0debe8e3a3a7ed8aac6e54001 /include/linux/sched.h | |
| parent | 5ade9deaaa3e1f7291467d97b238648e43eae15e (diff) | |
| parent | a15306365a16380f3bafee9e181ba01231d4acd7 (diff) | |
Merge branch 'master' of /pub/scm/linux/kernel/git/torvalds/linux-2.6
Diffstat (limited to 'include/linux/sched.h')
| -rw-r--r-- | include/linux/sched.h | 69 |
1 files changed, 64 insertions, 5 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index 024d72b47a0c..0c35b0343a76 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
| @@ -158,6 +158,8 @@ print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) | |||
| 158 | } | 158 | } |
| 159 | #endif | 159 | #endif |
| 160 | 160 | ||
| 161 | extern unsigned long long time_sync_thresh; | ||
| 162 | |||
| 161 | /* | 163 | /* |
| 162 | * Task state bitmask. NOTE! These bits are also | 164 | * Task state bitmask. NOTE! These bits are also |
| 163 | * encoded in fs/proc/array.c: get_task_state(). | 165 | * encoded in fs/proc/array.c: get_task_state(). |
| @@ -554,6 +556,14 @@ struct signal_struct { | |||
| 554 | #define SIGNAL_STOP_DEQUEUED 0x00000002 /* stop signal dequeued */ | 556 | #define SIGNAL_STOP_DEQUEUED 0x00000002 /* stop signal dequeued */ |
| 555 | #define SIGNAL_STOP_CONTINUED 0x00000004 /* SIGCONT since WCONTINUED reap */ | 557 | #define SIGNAL_STOP_CONTINUED 0x00000004 /* SIGCONT since WCONTINUED reap */ |
| 556 | #define SIGNAL_GROUP_EXIT 0x00000008 /* group exit in progress */ | 558 | #define SIGNAL_GROUP_EXIT 0x00000008 /* group exit in progress */ |
| 559 | /* | ||
| 560 | * Pending notifications to parent. | ||
| 561 | */ | ||
| 562 | #define SIGNAL_CLD_STOPPED 0x00000010 | ||
| 563 | #define SIGNAL_CLD_CONTINUED 0x00000020 | ||
| 564 | #define SIGNAL_CLD_MASK (SIGNAL_CLD_STOPPED|SIGNAL_CLD_CONTINUED) | ||
| 565 | |||
| 566 | #define SIGNAL_UNKILLABLE 0x00000040 /* for init: ignore fatal signals */ | ||
| 557 | 567 | ||
| 558 | /* If true, all threads except ->group_exit_task have pending SIGKILL */ | 568 | /* If true, all threads except ->group_exit_task have pending SIGKILL */ |
| 559 | static inline int signal_group_exit(const struct signal_struct *sig) | 569 | static inline int signal_group_exit(const struct signal_struct *sig) |
| @@ -1167,7 +1177,7 @@ struct task_struct { | |||
| 1167 | struct sighand_struct *sighand; | 1177 | struct sighand_struct *sighand; |
| 1168 | 1178 | ||
| 1169 | sigset_t blocked, real_blocked; | 1179 | sigset_t blocked, real_blocked; |
| 1170 | sigset_t saved_sigmask; /* To be restored with TIF_RESTORE_SIGMASK */ | 1180 | sigset_t saved_sigmask; /* restored if set_restore_sigmask() was used */ |
| 1171 | struct sigpending pending; | 1181 | struct sigpending pending; |
| 1172 | 1182 | ||
| 1173 | unsigned long sas_ss_sp; | 1183 | unsigned long sas_ss_sp; |
| @@ -1543,6 +1553,35 @@ static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask) | |||
| 1543 | 1553 | ||
| 1544 | extern unsigned long long sched_clock(void); | 1554 | extern unsigned long long sched_clock(void); |
| 1545 | 1555 | ||
| 1556 | #ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK | ||
| 1557 | static inline void sched_clock_init(void) | ||
| 1558 | { | ||
| 1559 | } | ||
| 1560 | |||
| 1561 | static inline u64 sched_clock_cpu(int cpu) | ||
| 1562 | { | ||
| 1563 | return sched_clock(); | ||
| 1564 | } | ||
| 1565 | |||
| 1566 | static inline void sched_clock_tick(void) | ||
| 1567 | { | ||
| 1568 | } | ||
| 1569 | |||
| 1570 | static inline void sched_clock_idle_sleep_event(void) | ||
| 1571 | { | ||
| 1572 | } | ||
| 1573 | |||
| 1574 | static inline void sched_clock_idle_wakeup_event(u64 delta_ns) | ||
| 1575 | { | ||
| 1576 | } | ||
| 1577 | #else | ||
| 1578 | extern void sched_clock_init(void); | ||
| 1579 | extern u64 sched_clock_cpu(int cpu); | ||
| 1580 | extern void sched_clock_tick(void); | ||
| 1581 | extern void sched_clock_idle_sleep_event(void); | ||
| 1582 | extern void sched_clock_idle_wakeup_event(u64 delta_ns); | ||
| 1583 | #endif | ||
| 1584 | |||
| 1546 | /* | 1585 | /* |
| 1547 | * For kernel-internal use: high-speed (but slightly incorrect) per-cpu | 1586 | * For kernel-internal use: high-speed (but slightly incorrect) per-cpu |
| 1548 | * clock constructed from sched_clock(): | 1587 | * clock constructed from sched_clock(): |
| @@ -1669,7 +1708,10 @@ extern struct pid_namespace init_pid_ns; | |||
| 1669 | extern struct task_struct *find_task_by_pid_type_ns(int type, int pid, | 1708 | extern struct task_struct *find_task_by_pid_type_ns(int type, int pid, |
| 1670 | struct pid_namespace *ns); | 1709 | struct pid_namespace *ns); |
| 1671 | 1710 | ||
| 1672 | extern struct task_struct *find_task_by_pid(pid_t nr); | 1711 | static inline struct task_struct *__deprecated find_task_by_pid(pid_t nr) |
| 1712 | { | ||
| 1713 | return find_task_by_pid_type_ns(PIDTYPE_PID, nr, &init_pid_ns); | ||
| 1714 | } | ||
| 1673 | extern struct task_struct *find_task_by_vpid(pid_t nr); | 1715 | extern struct task_struct *find_task_by_vpid(pid_t nr); |
| 1674 | extern struct task_struct *find_task_by_pid_ns(pid_t nr, | 1716 | extern struct task_struct *find_task_by_pid_ns(pid_t nr, |
| 1675 | struct pid_namespace *ns); | 1717 | struct pid_namespace *ns); |
| @@ -1745,8 +1787,7 @@ extern void zap_other_threads(struct task_struct *p); | |||
| 1745 | extern int kill_proc(pid_t, int, int); | 1787 | extern int kill_proc(pid_t, int, int); |
| 1746 | extern struct sigqueue *sigqueue_alloc(void); | 1788 | extern struct sigqueue *sigqueue_alloc(void); |
| 1747 | extern void sigqueue_free(struct sigqueue *); | 1789 | extern void sigqueue_free(struct sigqueue *); |
| 1748 | extern int send_sigqueue(int, struct sigqueue *, struct task_struct *); | 1790 | extern int send_sigqueue(struct sigqueue *, struct task_struct *, int group); |
| 1749 | extern int send_group_sigqueue(int, struct sigqueue *, struct task_struct *); | ||
| 1750 | extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *); | 1791 | extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *); |
| 1751 | extern int do_sigaltstack(const stack_t __user *, stack_t __user *, unsigned long); | 1792 | extern int do_sigaltstack(const stack_t __user *, stack_t __user *, unsigned long); |
| 1752 | 1793 | ||
| @@ -1967,6 +2008,11 @@ static inline void clear_tsk_need_resched(struct task_struct *tsk) | |||
| 1967 | clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED); | 2008 | clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED); |
| 1968 | } | 2009 | } |
| 1969 | 2010 | ||
| 2011 | static inline int test_tsk_need_resched(struct task_struct *tsk) | ||
| 2012 | { | ||
| 2013 | return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED)); | ||
| 2014 | } | ||
| 2015 | |||
| 1970 | static inline int signal_pending(struct task_struct *p) | 2016 | static inline int signal_pending(struct task_struct *p) |
| 1971 | { | 2017 | { |
| 1972 | return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING)); | 2018 | return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING)); |
| @@ -1981,7 +2027,7 @@ static inline int fatal_signal_pending(struct task_struct *p) | |||
| 1981 | 2027 | ||
| 1982 | static inline int need_resched(void) | 2028 | static inline int need_resched(void) |
| 1983 | { | 2029 | { |
| 1984 | return unlikely(test_thread_flag(TIF_NEED_RESCHED)); | 2030 | return unlikely(test_tsk_need_resched(current)); |
| 1985 | } | 2031 | } |
| 1986 | 2032 | ||
| 1987 | /* | 2033 | /* |
| @@ -2148,6 +2194,19 @@ static inline void migration_init(void) | |||
| 2148 | #define TASK_SIZE_OF(tsk) TASK_SIZE | 2194 | #define TASK_SIZE_OF(tsk) TASK_SIZE |
| 2149 | #endif | 2195 | #endif |
| 2150 | 2196 | ||
| 2197 | #ifdef CONFIG_MM_OWNER | ||
| 2198 | extern void mm_update_next_owner(struct mm_struct *mm); | ||
| 2199 | extern void mm_init_owner(struct mm_struct *mm, struct task_struct *p); | ||
| 2200 | #else | ||
| 2201 | static inline void mm_update_next_owner(struct mm_struct *mm) | ||
| 2202 | { | ||
| 2203 | } | ||
| 2204 | |||
| 2205 | static inline void mm_init_owner(struct mm_struct *mm, struct task_struct *p) | ||
| 2206 | { | ||
| 2207 | } | ||
| 2208 | #endif /* CONFIG_MM_OWNER */ | ||
| 2209 | |||
| 2151 | #endif /* __KERNEL__ */ | 2210 | #endif /* __KERNEL__ */ |
| 2152 | 2211 | ||
| 2153 | #endif | 2212 | #endif |
