aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/sched.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r--include/linux/sched.h44
1 files changed, 42 insertions, 2 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 03c238088aee..5395a6176f4b 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -158,6 +158,8 @@ print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
158} 158}
159#endif 159#endif
160 160
161extern unsigned long long time_sync_thresh;
162
161/* 163/*
162 * Task state bitmask. NOTE! These bits are also 164 * Task state bitmask. NOTE! These bits are also
163 * encoded in fs/proc/array.c: get_task_state(). 165 * encoded in fs/proc/array.c: get_task_state().
@@ -1551,6 +1553,35 @@ static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
1551 1553
1552extern unsigned long long sched_clock(void); 1554extern unsigned long long sched_clock(void);
1553 1555
1556#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
1557static inline void sched_clock_init(void)
1558{
1559}
1560
1561static inline u64 sched_clock_cpu(int cpu)
1562{
1563 return sched_clock();
1564}
1565
1566static inline void sched_clock_tick(void)
1567{
1568}
1569
1570static inline void sched_clock_idle_sleep_event(void)
1571{
1572}
1573
1574static inline void sched_clock_idle_wakeup_event(u64 delta_ns)
1575{
1576}
1577#else
1578extern void sched_clock_init(void);
1579extern u64 sched_clock_cpu(int cpu);
1580extern void sched_clock_tick(void);
1581extern void sched_clock_idle_sleep_event(void);
1582extern void sched_clock_idle_wakeup_event(u64 delta_ns);
1583#endif
1584
1554/* 1585/*
1555 * For kernel-internal use: high-speed (but slightly incorrect) per-cpu 1586 * For kernel-internal use: high-speed (but slightly incorrect) per-cpu
1556 * clock constructed from sched_clock(): 1587 * clock constructed from sched_clock():
@@ -1977,6 +2008,11 @@ static inline void clear_tsk_need_resched(struct task_struct *tsk)
1977 clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED); 2008 clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
1978} 2009}
1979 2010
2011static inline int test_tsk_need_resched(struct task_struct *tsk)
2012{
2013 return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
2014}
2015
1980static inline int signal_pending(struct task_struct *p) 2016static inline int signal_pending(struct task_struct *p)
1981{ 2017{
1982 return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING)); 2018 return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING));
@@ -2001,13 +2037,13 @@ static inline int need_resched(void)
2001 * cond_resched_lock() will drop the spinlock before scheduling, 2037 * cond_resched_lock() will drop the spinlock before scheduling,
2002 * cond_resched_softirq() will enable bhs before scheduling. 2038 * cond_resched_softirq() will enable bhs before scheduling.
2003 */ 2039 */
2004#ifdef CONFIG_PREEMPT 2040extern int _cond_resched(void);
2041#ifdef CONFIG_PREEMPT_BKL
2005static inline int cond_resched(void) 2042static inline int cond_resched(void)
2006{ 2043{
2007 return 0; 2044 return 0;
2008} 2045}
2009#else 2046#else
2010extern int _cond_resched(void);
2011static inline int cond_resched(void) 2047static inline int cond_resched(void)
2012{ 2048{
2013 return _cond_resched(); 2049 return _cond_resched();
@@ -2015,6 +2051,10 @@ static inline int cond_resched(void)
2015#endif 2051#endif
2016extern int cond_resched_lock(spinlock_t * lock); 2052extern int cond_resched_lock(spinlock_t * lock);
2017extern int cond_resched_softirq(void); 2053extern int cond_resched_softirq(void);
2054static inline int cond_resched_bkl(void)
2055{
2056 return _cond_resched();
2057}
2018 2058
2019/* 2059/*
2020 * Does a critical section need to be broken due to another 2060 * Does a critical section need to be broken due to another