aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/sched.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r--include/linux/sched.h14
1 files changed, 14 insertions, 0 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index b339a0bef024..75bf49291c60 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1675,6 +1675,16 @@ static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
1675 return set_cpus_allowed_ptr(p, &new_mask); 1675 return set_cpus_allowed_ptr(p, &new_mask);
1676} 1676}
1677 1677
1678/*
1679 * Architectures can set this to 1 if they have specified
1680 * CONFIG_HAVE_UNSTABLE_SCHED_CLOCK in their arch Kconfig,
1681 * but then during bootup it turns out that sched_clock()
1682 * is reliable after all:
1683 */
1684#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
1685extern int sched_clock_stable;
1686#endif
1687
1678extern unsigned long long sched_clock(void); 1688extern unsigned long long sched_clock(void);
1679 1689
1680extern void sched_clock_init(void); 1690extern void sched_clock_init(void);
@@ -2296,9 +2306,13 @@ extern long sched_group_rt_runtime(struct task_group *tg);
2296extern int sched_group_set_rt_period(struct task_group *tg, 2306extern int sched_group_set_rt_period(struct task_group *tg,
2297 long rt_period_us); 2307 long rt_period_us);
2298extern long sched_group_rt_period(struct task_group *tg); 2308extern long sched_group_rt_period(struct task_group *tg);
2309extern int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk);
2299#endif 2310#endif
2300#endif 2311#endif
2301 2312
2313extern int task_can_switch_user(struct user_struct *up,
2314 struct task_struct *tsk);
2315
2302#ifdef CONFIG_TASK_XACCT 2316#ifdef CONFIG_TASK_XACCT
2303static inline void add_rchar(struct task_struct *tsk, ssize_t amt) 2317static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
2304{ 2318{