diff options
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r-- | include/linux/sched.h | 38 |
1 files changed, 37 insertions, 1 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index 03c238088aee..0c35b0343a76 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -158,6 +158,8 @@ print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) | |||
158 | } | 158 | } |
159 | #endif | 159 | #endif |
160 | 160 | ||
161 | extern unsigned long long time_sync_thresh; | ||
162 | |||
161 | /* | 163 | /* |
162 | * Task state bitmask. NOTE! These bits are also | 164 | * Task state bitmask. NOTE! These bits are also |
163 | * encoded in fs/proc/array.c: get_task_state(). | 165 | * encoded in fs/proc/array.c: get_task_state(). |
@@ -1551,6 +1553,35 @@ static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask) | |||
1551 | 1553 | ||
1552 | extern unsigned long long sched_clock(void); | 1554 | extern unsigned long long sched_clock(void); |
1553 | 1555 | ||
1556 | #ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK | ||
1557 | static inline void sched_clock_init(void) | ||
1558 | { | ||
1559 | } | ||
1560 | |||
1561 | static inline u64 sched_clock_cpu(int cpu) | ||
1562 | { | ||
1563 | return sched_clock(); | ||
1564 | } | ||
1565 | |||
1566 | static inline void sched_clock_tick(void) | ||
1567 | { | ||
1568 | } | ||
1569 | |||
1570 | static inline void sched_clock_idle_sleep_event(void) | ||
1571 | { | ||
1572 | } | ||
1573 | |||
1574 | static inline void sched_clock_idle_wakeup_event(u64 delta_ns) | ||
1575 | { | ||
1576 | } | ||
1577 | #else | ||
1578 | extern void sched_clock_init(void); | ||
1579 | extern u64 sched_clock_cpu(int cpu); | ||
1580 | extern void sched_clock_tick(void); | ||
1581 | extern void sched_clock_idle_sleep_event(void); | ||
1582 | extern void sched_clock_idle_wakeup_event(u64 delta_ns); | ||
1583 | #endif | ||
1584 | |||
1554 | /* | 1585 | /* |
1555 | * For kernel-internal use: high-speed (but slightly incorrect) per-cpu | 1586 | * For kernel-internal use: high-speed (but slightly incorrect) per-cpu |
1556 | * clock constructed from sched_clock(): | 1587 | * clock constructed from sched_clock(): |
@@ -1977,6 +2008,11 @@ static inline void clear_tsk_need_resched(struct task_struct *tsk) | |||
1977 | clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED); | 2008 | clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED); |
1978 | } | 2009 | } |
1979 | 2010 | ||
2011 | static inline int test_tsk_need_resched(struct task_struct *tsk) | ||
2012 | { | ||
2013 | return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED)); | ||
2014 | } | ||
2015 | |||
1980 | static inline int signal_pending(struct task_struct *p) | 2016 | static inline int signal_pending(struct task_struct *p) |
1981 | { | 2017 | { |
1982 | return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING)); | 2018 | return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING)); |
@@ -1991,7 +2027,7 @@ static inline int fatal_signal_pending(struct task_struct *p) | |||
1991 | 2027 | ||
1992 | static inline int need_resched(void) | 2028 | static inline int need_resched(void) |
1993 | { | 2029 | { |
1994 | return unlikely(test_thread_flag(TIF_NEED_RESCHED)); | 2030 | return unlikely(test_tsk_need_resched(current)); |
1995 | } | 2031 | } |
1996 | 2032 | ||
1997 | /* | 2033 | /* |