aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/clocksource.h3
-rw-r--r--include/linux/compat.h20
-rw-r--r--include/linux/cputime.h7
-rw-r--r--include/linux/delayacct.h1
-rw-r--r--include/linux/jiffies.h2
-rw-r--r--include/linux/kernel_stat.h14
-rw-r--r--include/linux/mutex.h4
-rw-r--r--include/linux/posix-timers.h14
-rw-r--r--include/linux/sched.h80
-rw-r--r--include/linux/sched/sysctl.h1
-rw-r--r--include/linux/vtime.h7
11 files changed, 85 insertions, 68 deletions
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
index e315d04a2fd9..cfc75848a35d 100644
--- a/include/linux/clocksource.h
+++ b/include/linux/clocksource.h
@@ -62,6 +62,8 @@ struct module;
62 * @archdata: arch-specific data 62 * @archdata: arch-specific data
63 * @suspend: suspend function for the clocksource, if necessary 63 * @suspend: suspend function for the clocksource, if necessary
64 * @resume: resume function for the clocksource, if necessary 64 * @resume: resume function for the clocksource, if necessary
65 * @mark_unstable: Optional function to inform the clocksource driver that
66 * the watchdog marked the clocksource unstable
65 * @owner: module reference, must be set by clocksource in modules 67 * @owner: module reference, must be set by clocksource in modules
66 * 68 *
67 * Note: This struct is not used in hotpathes of the timekeeping code 69 * Note: This struct is not used in hotpathes of the timekeeping code
@@ -93,6 +95,7 @@ struct clocksource {
93 unsigned long flags; 95 unsigned long flags;
94 void (*suspend)(struct clocksource *cs); 96 void (*suspend)(struct clocksource *cs);
95 void (*resume)(struct clocksource *cs); 97 void (*resume)(struct clocksource *cs);
98 void (*mark_unstable)(struct clocksource *cs);
96 99
97 /* private: */ 100 /* private: */
98#ifdef CONFIG_CLOCKSOURCE_WATCHDOG 101#ifdef CONFIG_CLOCKSOURCE_WATCHDOG
diff --git a/include/linux/compat.h b/include/linux/compat.h
index 63609398ef9f..9e40be522793 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -731,7 +731,25 @@ asmlinkage long compat_sys_fanotify_mark(int, unsigned int, __u32, __u32,
731static inline bool in_compat_syscall(void) { return is_compat_task(); } 731static inline bool in_compat_syscall(void) { return is_compat_task(); }
732#endif 732#endif
733 733
734#else 734/**
735 * ns_to_compat_timeval - Compat version of ns_to_timeval
736 * @nsec: the nanoseconds value to be converted
737 *
738 * Returns the compat_timeval representation of the nsec parameter.
739 */
740static inline struct compat_timeval ns_to_compat_timeval(s64 nsec)
741{
742 struct timeval tv;
743 struct compat_timeval ctv;
744
745 tv = ns_to_timeval(nsec);
746 ctv.tv_sec = tv.tv_sec;
747 ctv.tv_usec = tv.tv_usec;
748
749 return ctv;
750}
751
752#else /* !CONFIG_COMPAT */
735 753
736#define is_compat_task() (0) 754#define is_compat_task() (0)
737static inline bool in_compat_syscall(void) { return false; } 755static inline bool in_compat_syscall(void) { return false; }
diff --git a/include/linux/cputime.h b/include/linux/cputime.h
index f2eb2ee535ca..a691dc4ddc13 100644
--- a/include/linux/cputime.h
+++ b/include/linux/cputime.h
@@ -1,6 +1,7 @@
1#ifndef __LINUX_CPUTIME_H 1#ifndef __LINUX_CPUTIME_H
2#define __LINUX_CPUTIME_H 2#define __LINUX_CPUTIME_H
3 3
4#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
4#include <asm/cputime.h> 5#include <asm/cputime.h>
5 6
6#ifndef cputime_to_nsecs 7#ifndef cputime_to_nsecs
@@ -8,9 +9,5 @@
8 (cputime_to_usecs(__ct) * NSEC_PER_USEC) 9 (cputime_to_usecs(__ct) * NSEC_PER_USEC)
9#endif 10#endif
10 11
11#ifndef nsecs_to_cputime 12#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
12# define nsecs_to_cputime(__nsecs) \
13 usecs_to_cputime((__nsecs) / NSEC_PER_USEC)
14#endif
15
16#endif /* __LINUX_CPUTIME_H */ 13#endif /* __LINUX_CPUTIME_H */
diff --git a/include/linux/delayacct.h b/include/linux/delayacct.h
index 6cee17c22313..00e60f79a9cc 100644
--- a/include/linux/delayacct.h
+++ b/include/linux/delayacct.h
@@ -17,6 +17,7 @@
17#ifndef _LINUX_DELAYACCT_H 17#ifndef _LINUX_DELAYACCT_H
18#define _LINUX_DELAYACCT_H 18#define _LINUX_DELAYACCT_H
19 19
20#include <uapi/linux/taskstats.h>
20#include <linux/sched.h> 21#include <linux/sched.h>
21#include <linux/slab.h> 22#include <linux/slab.h>
22 23
diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h
index 589d14e970ad..624215cebee5 100644
--- a/include/linux/jiffies.h
+++ b/include/linux/jiffies.h
@@ -293,6 +293,8 @@ static inline u64 jiffies_to_nsecs(const unsigned long j)
293 return (u64)jiffies_to_usecs(j) * NSEC_PER_USEC; 293 return (u64)jiffies_to_usecs(j) * NSEC_PER_USEC;
294} 294}
295 295
296extern u64 jiffies64_to_nsecs(u64 j);
297
296extern unsigned long __msecs_to_jiffies(const unsigned int m); 298extern unsigned long __msecs_to_jiffies(const unsigned int m);
297#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ) 299#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
298/* 300/*
diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h
index 00f776816aa3..66be8b6beceb 100644
--- a/include/linux/kernel_stat.h
+++ b/include/linux/kernel_stat.h
@@ -9,7 +9,6 @@
9#include <linux/sched.h> 9#include <linux/sched.h>
10#include <linux/vtime.h> 10#include <linux/vtime.h>
11#include <asm/irq.h> 11#include <asm/irq.h>
12#include <linux/cputime.h>
13 12
14/* 13/*
15 * 'kernel_stat.h' contains the definitions needed for doing 14 * 'kernel_stat.h' contains the definitions needed for doing
@@ -78,15 +77,18 @@ static inline unsigned int kstat_cpu_irqs_sum(unsigned int cpu)
78 return kstat_cpu(cpu).irqs_sum; 77 return kstat_cpu(cpu).irqs_sum;
79} 78}
80 79
81extern void account_user_time(struct task_struct *, cputime_t); 80extern void account_user_time(struct task_struct *, u64);
82extern void account_system_time(struct task_struct *, int, cputime_t); 81extern void account_guest_time(struct task_struct *, u64);
83extern void account_steal_time(cputime_t); 82extern void account_system_time(struct task_struct *, int, u64);
84extern void account_idle_time(cputime_t); 83extern void account_system_index_time(struct task_struct *, u64,
84 enum cpu_usage_stat);
85extern void account_steal_time(u64);
86extern void account_idle_time(u64);
85 87
86#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE 88#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
87static inline void account_process_tick(struct task_struct *tsk, int user) 89static inline void account_process_tick(struct task_struct *tsk, int user)
88{ 90{
89 vtime_account_user(tsk); 91 vtime_flush(tsk);
90} 92}
91#else 93#else
92extern void account_process_tick(struct task_struct *, int user); 94extern void account_process_tick(struct task_struct *, int user);
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index b97870f2debd..7fffbfcd5430 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -156,10 +156,12 @@ extern int __must_check mutex_lock_interruptible_nested(struct mutex *lock,
156 unsigned int subclass); 156 unsigned int subclass);
157extern int __must_check mutex_lock_killable_nested(struct mutex *lock, 157extern int __must_check mutex_lock_killable_nested(struct mutex *lock,
158 unsigned int subclass); 158 unsigned int subclass);
159extern void mutex_lock_io_nested(struct mutex *lock, unsigned int subclass);
159 160
160#define mutex_lock(lock) mutex_lock_nested(lock, 0) 161#define mutex_lock(lock) mutex_lock_nested(lock, 0)
161#define mutex_lock_interruptible(lock) mutex_lock_interruptible_nested(lock, 0) 162#define mutex_lock_interruptible(lock) mutex_lock_interruptible_nested(lock, 0)
162#define mutex_lock_killable(lock) mutex_lock_killable_nested(lock, 0) 163#define mutex_lock_killable(lock) mutex_lock_killable_nested(lock, 0)
164#define mutex_lock_io(lock) mutex_lock_io_nested(lock, 0)
163 165
164#define mutex_lock_nest_lock(lock, nest_lock) \ 166#define mutex_lock_nest_lock(lock, nest_lock) \
165do { \ 167do { \
@@ -171,11 +173,13 @@ do { \
171extern void mutex_lock(struct mutex *lock); 173extern void mutex_lock(struct mutex *lock);
172extern int __must_check mutex_lock_interruptible(struct mutex *lock); 174extern int __must_check mutex_lock_interruptible(struct mutex *lock);
173extern int __must_check mutex_lock_killable(struct mutex *lock); 175extern int __must_check mutex_lock_killable(struct mutex *lock);
176extern void mutex_lock_io(struct mutex *lock);
174 177
175# define mutex_lock_nested(lock, subclass) mutex_lock(lock) 178# define mutex_lock_nested(lock, subclass) mutex_lock(lock)
176# define mutex_lock_interruptible_nested(lock, subclass) mutex_lock_interruptible(lock) 179# define mutex_lock_interruptible_nested(lock, subclass) mutex_lock_interruptible(lock)
177# define mutex_lock_killable_nested(lock, subclass) mutex_lock_killable(lock) 180# define mutex_lock_killable_nested(lock, subclass) mutex_lock_killable(lock)
178# define mutex_lock_nest_lock(lock, nest_lock) mutex_lock(lock) 181# define mutex_lock_nest_lock(lock, nest_lock) mutex_lock(lock)
182# define mutex_lock_io_nested(lock, subclass) mutex_lock(lock)
179#endif 183#endif
180 184
181/* 185/*
diff --git a/include/linux/posix-timers.h b/include/linux/posix-timers.h
index 62d44c176071..64aa189efe21 100644
--- a/include/linux/posix-timers.h
+++ b/include/linux/posix-timers.h
@@ -8,19 +8,9 @@
8#include <linux/alarmtimer.h> 8#include <linux/alarmtimer.h>
9 9
10 10
11static inline unsigned long long cputime_to_expires(cputime_t expires)
12{
13 return (__force unsigned long long)expires;
14}
15
16static inline cputime_t expires_to_cputime(unsigned long long expires)
17{
18 return (__force cputime_t)expires;
19}
20
21struct cpu_timer_list { 11struct cpu_timer_list {
22 struct list_head entry; 12 struct list_head entry;
23 unsigned long long expires, incr; 13 u64 expires, incr;
24 struct task_struct *task; 14 struct task_struct *task;
25 int firing; 15 int firing;
26}; 16};
@@ -129,7 +119,7 @@ void run_posix_cpu_timers(struct task_struct *task);
129void posix_cpu_timers_exit(struct task_struct *task); 119void posix_cpu_timers_exit(struct task_struct *task);
130void posix_cpu_timers_exit_group(struct task_struct *task); 120void posix_cpu_timers_exit_group(struct task_struct *task);
131void set_process_cpu_timer(struct task_struct *task, unsigned int clock_idx, 121void set_process_cpu_timer(struct task_struct *task, unsigned int clock_idx,
132 cputime_t *newval, cputime_t *oldval); 122 u64 *newval, u64 *oldval);
133 123
134long clock_nanosleep_restart(struct restart_block *restart_block); 124long clock_nanosleep_restart(struct restart_block *restart_block);
135 125
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 6e4782eae076..c89b7fdec41e 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -29,7 +29,6 @@ struct sched_param {
29 29
30#include <asm/page.h> 30#include <asm/page.h>
31#include <asm/ptrace.h> 31#include <asm/ptrace.h>
32#include <linux/cputime.h>
33 32
34#include <linux/smp.h> 33#include <linux/smp.h>
35#include <linux/sem.h> 34#include <linux/sem.h>
@@ -461,12 +460,10 @@ extern signed long schedule_timeout_idle(signed long timeout);
461asmlinkage void schedule(void); 460asmlinkage void schedule(void);
462extern void schedule_preempt_disabled(void); 461extern void schedule_preempt_disabled(void);
463 462
463extern int __must_check io_schedule_prepare(void);
464extern void io_schedule_finish(int token);
464extern long io_schedule_timeout(long timeout); 465extern long io_schedule_timeout(long timeout);
465 466extern void io_schedule(void);
466static inline void io_schedule(void)
467{
468 io_schedule_timeout(MAX_SCHEDULE_TIMEOUT);
469}
470 467
471void __noreturn do_task_dead(void); 468void __noreturn do_task_dead(void);
472 469
@@ -565,15 +562,13 @@ struct pacct_struct {
565 int ac_flag; 562 int ac_flag;
566 long ac_exitcode; 563 long ac_exitcode;
567 unsigned long ac_mem; 564 unsigned long ac_mem;
568 cputime_t ac_utime, ac_stime; 565 u64 ac_utime, ac_stime;
569 unsigned long ac_minflt, ac_majflt; 566 unsigned long ac_minflt, ac_majflt;
570}; 567};
571 568
572struct cpu_itimer { 569struct cpu_itimer {
573 cputime_t expires; 570 u64 expires;
574 cputime_t incr; 571 u64 incr;
575 u32 error;
576 u32 incr_error;
577}; 572};
578 573
579/** 574/**
@@ -587,8 +582,8 @@ struct cpu_itimer {
587 */ 582 */
588struct prev_cputime { 583struct prev_cputime {
589#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE 584#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
590 cputime_t utime; 585 u64 utime;
591 cputime_t stime; 586 u64 stime;
592 raw_spinlock_t lock; 587 raw_spinlock_t lock;
593#endif 588#endif
594}; 589};
@@ -603,8 +598,8 @@ static inline void prev_cputime_init(struct prev_cputime *prev)
603 598
604/** 599/**
605 * struct task_cputime - collected CPU time counts 600 * struct task_cputime - collected CPU time counts
606 * @utime: time spent in user mode, in &cputime_t units 601 * @utime: time spent in user mode, in nanoseconds
607 * @stime: time spent in kernel mode, in &cputime_t units 602 * @stime: time spent in kernel mode, in nanoseconds
608 * @sum_exec_runtime: total time spent on the CPU, in nanoseconds 603 * @sum_exec_runtime: total time spent on the CPU, in nanoseconds
609 * 604 *
610 * This structure groups together three kinds of CPU time that are tracked for 605 * This structure groups together three kinds of CPU time that are tracked for
@@ -612,8 +607,8 @@ static inline void prev_cputime_init(struct prev_cputime *prev)
612 * these counts together and treat all three of them in parallel. 607 * these counts together and treat all three of them in parallel.
613 */ 608 */
614struct task_cputime { 609struct task_cputime {
615 cputime_t utime; 610 u64 utime;
616 cputime_t stime; 611 u64 stime;
617 unsigned long long sum_exec_runtime; 612 unsigned long long sum_exec_runtime;
618}; 613};
619 614
@@ -622,13 +617,6 @@ struct task_cputime {
622#define prof_exp stime 617#define prof_exp stime
623#define sched_exp sum_exec_runtime 618#define sched_exp sum_exec_runtime
624 619
625#define INIT_CPUTIME \
626 (struct task_cputime) { \
627 .utime = 0, \
628 .stime = 0, \
629 .sum_exec_runtime = 0, \
630 }
631
632/* 620/*
633 * This is the atomic variant of task_cputime, which can be used for 621 * This is the atomic variant of task_cputime, which can be used for
634 * storing and updating task_cputime statistics without locking. 622 * storing and updating task_cputime statistics without locking.
@@ -787,9 +775,9 @@ struct signal_struct {
787 * in __exit_signal, except for the group leader. 775 * in __exit_signal, except for the group leader.
788 */ 776 */
789 seqlock_t stats_lock; 777 seqlock_t stats_lock;
790 cputime_t utime, stime, cutime, cstime; 778 u64 utime, stime, cutime, cstime;
791 cputime_t gtime; 779 u64 gtime;
792 cputime_t cgtime; 780 u64 cgtime;
793 struct prev_cputime prev_cputime; 781 struct prev_cputime prev_cputime;
794 unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw; 782 unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw;
795 unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt; 783 unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt;
@@ -1668,11 +1656,11 @@ struct task_struct {
1668 int __user *set_child_tid; /* CLONE_CHILD_SETTID */ 1656 int __user *set_child_tid; /* CLONE_CHILD_SETTID */
1669 int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */ 1657 int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
1670 1658
1671 cputime_t utime, stime; 1659 u64 utime, stime;
1672#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME 1660#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
1673 cputime_t utimescaled, stimescaled; 1661 u64 utimescaled, stimescaled;
1674#endif 1662#endif
1675 cputime_t gtime; 1663 u64 gtime;
1676 struct prev_cputime prev_cputime; 1664 struct prev_cputime prev_cputime;
1677#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN 1665#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
1678 seqcount_t vtime_seqcount; 1666 seqcount_t vtime_seqcount;
@@ -1824,7 +1812,7 @@ struct task_struct {
1824#if defined(CONFIG_TASK_XACCT) 1812#if defined(CONFIG_TASK_XACCT)
1825 u64 acct_rss_mem1; /* accumulated rss usage */ 1813 u64 acct_rss_mem1; /* accumulated rss usage */
1826 u64 acct_vm_mem1; /* accumulated virtual memory usage */ 1814 u64 acct_vm_mem1; /* accumulated virtual memory usage */
1827 cputime_t acct_timexpd; /* stime + utime since last update */ 1815 u64 acct_timexpd; /* stime + utime since last update */
1828#endif 1816#endif
1829#ifdef CONFIG_CPUSETS 1817#ifdef CONFIG_CPUSETS
1830 nodemask_t mems_allowed; /* Protected by alloc_lock */ 1818 nodemask_t mems_allowed; /* Protected by alloc_lock */
@@ -2269,17 +2257,17 @@ struct task_struct *try_get_task_struct(struct task_struct **ptask);
2269 2257
2270#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN 2258#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
2271extern void task_cputime(struct task_struct *t, 2259extern void task_cputime(struct task_struct *t,
2272 cputime_t *utime, cputime_t *stime); 2260 u64 *utime, u64 *stime);
2273extern cputime_t task_gtime(struct task_struct *t); 2261extern u64 task_gtime(struct task_struct *t);
2274#else 2262#else
2275static inline void task_cputime(struct task_struct *t, 2263static inline void task_cputime(struct task_struct *t,
2276 cputime_t *utime, cputime_t *stime) 2264 u64 *utime, u64 *stime)
2277{ 2265{
2278 *utime = t->utime; 2266 *utime = t->utime;
2279 *stime = t->stime; 2267 *stime = t->stime;
2280} 2268}
2281 2269
2282static inline cputime_t task_gtime(struct task_struct *t) 2270static inline u64 task_gtime(struct task_struct *t)
2283{ 2271{
2284 return t->gtime; 2272 return t->gtime;
2285} 2273}
@@ -2287,23 +2275,23 @@ static inline cputime_t task_gtime(struct task_struct *t)
2287 2275
2288#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME 2276#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
2289static inline void task_cputime_scaled(struct task_struct *t, 2277static inline void task_cputime_scaled(struct task_struct *t,
2290 cputime_t *utimescaled, 2278 u64 *utimescaled,
2291 cputime_t *stimescaled) 2279 u64 *stimescaled)
2292{ 2280{
2293 *utimescaled = t->utimescaled; 2281 *utimescaled = t->utimescaled;
2294 *stimescaled = t->stimescaled; 2282 *stimescaled = t->stimescaled;
2295} 2283}
2296#else 2284#else
2297static inline void task_cputime_scaled(struct task_struct *t, 2285static inline void task_cputime_scaled(struct task_struct *t,
2298 cputime_t *utimescaled, 2286 u64 *utimescaled,
2299 cputime_t *stimescaled) 2287 u64 *stimescaled)
2300{ 2288{
2301 task_cputime(t, utimescaled, stimescaled); 2289 task_cputime(t, utimescaled, stimescaled);
2302} 2290}
2303#endif 2291#endif
2304 2292
2305extern void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st); 2293extern void task_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st);
2306extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st); 2294extern void thread_group_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st);
2307 2295
2308/* 2296/*
2309 * Per process flags 2297 * Per process flags
@@ -2522,10 +2510,18 @@ extern u64 sched_clock_cpu(int cpu);
2522extern void sched_clock_init(void); 2510extern void sched_clock_init(void);
2523 2511
2524#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK 2512#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
2513static inline void sched_clock_init_late(void)
2514{
2515}
2516
2525static inline void sched_clock_tick(void) 2517static inline void sched_clock_tick(void)
2526{ 2518{
2527} 2519}
2528 2520
2521static inline void clear_sched_clock_stable(void)
2522{
2523}
2524
2529static inline void sched_clock_idle_sleep_event(void) 2525static inline void sched_clock_idle_sleep_event(void)
2530{ 2526{
2531} 2527}
@@ -2544,6 +2540,7 @@ static inline u64 local_clock(void)
2544 return sched_clock(); 2540 return sched_clock();
2545} 2541}
2546#else 2542#else
2543extern void sched_clock_init_late(void);
2547/* 2544/*
2548 * Architectures can set this to 1 if they have specified 2545 * Architectures can set this to 1 if they have specified
2549 * CONFIG_HAVE_UNSTABLE_SCHED_CLOCK in their arch Kconfig, 2546 * CONFIG_HAVE_UNSTABLE_SCHED_CLOCK in their arch Kconfig,
@@ -2551,7 +2548,6 @@ static inline u64 local_clock(void)
2551 * is reliable after all: 2548 * is reliable after all:
2552 */ 2549 */
2553extern int sched_clock_stable(void); 2550extern int sched_clock_stable(void);
2554extern void set_sched_clock_stable(void);
2555extern void clear_sched_clock_stable(void); 2551extern void clear_sched_clock_stable(void);
2556 2552
2557extern void sched_clock_tick(void); 2553extern void sched_clock_tick(void);
diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
index 441145351301..49308e142aae 100644
--- a/include/linux/sched/sysctl.h
+++ b/include/linux/sched/sysctl.h
@@ -59,6 +59,7 @@ extern unsigned int sysctl_sched_cfs_bandwidth_slice;
59extern unsigned int sysctl_sched_autogroup_enabled; 59extern unsigned int sysctl_sched_autogroup_enabled;
60#endif 60#endif
61 61
62extern int sysctl_sched_rr_timeslice;
62extern int sched_rr_timeslice; 63extern int sched_rr_timeslice;
63 64
64extern int sched_rr_handler(struct ctl_table *table, int write, 65extern int sched_rr_handler(struct ctl_table *table, int write,
diff --git a/include/linux/vtime.h b/include/linux/vtime.h
index aa9bfea8804a..0681fe25abeb 100644
--- a/include/linux/vtime.h
+++ b/include/linux/vtime.h
@@ -58,27 +58,28 @@ static inline void vtime_task_switch(struct task_struct *prev)
58 58
59extern void vtime_account_system(struct task_struct *tsk); 59extern void vtime_account_system(struct task_struct *tsk);
60extern void vtime_account_idle(struct task_struct *tsk); 60extern void vtime_account_idle(struct task_struct *tsk);
61extern void vtime_account_user(struct task_struct *tsk);
62 61
63#else /* !CONFIG_VIRT_CPU_ACCOUNTING */ 62#else /* !CONFIG_VIRT_CPU_ACCOUNTING */
64 63
65static inline void vtime_task_switch(struct task_struct *prev) { } 64static inline void vtime_task_switch(struct task_struct *prev) { }
66static inline void vtime_account_system(struct task_struct *tsk) { } 65static inline void vtime_account_system(struct task_struct *tsk) { }
67static inline void vtime_account_user(struct task_struct *tsk) { }
68#endif /* !CONFIG_VIRT_CPU_ACCOUNTING */ 66#endif /* !CONFIG_VIRT_CPU_ACCOUNTING */
69 67
70#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN 68#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
71extern void arch_vtime_task_switch(struct task_struct *tsk); 69extern void arch_vtime_task_switch(struct task_struct *tsk);
70extern void vtime_account_user(struct task_struct *tsk);
72extern void vtime_user_enter(struct task_struct *tsk); 71extern void vtime_user_enter(struct task_struct *tsk);
73 72
74static inline void vtime_user_exit(struct task_struct *tsk) 73static inline void vtime_user_exit(struct task_struct *tsk)
75{ 74{
76 vtime_account_user(tsk); 75 vtime_account_user(tsk);
77} 76}
77
78extern void vtime_guest_enter(struct task_struct *tsk); 78extern void vtime_guest_enter(struct task_struct *tsk);
79extern void vtime_guest_exit(struct task_struct *tsk); 79extern void vtime_guest_exit(struct task_struct *tsk);
80extern void vtime_init_idle(struct task_struct *tsk, int cpu); 80extern void vtime_init_idle(struct task_struct *tsk, int cpu);
81#else /* !CONFIG_VIRT_CPU_ACCOUNTING_GEN */ 81#else /* !CONFIG_VIRT_CPU_ACCOUNTING_GEN */
82static inline void vtime_account_user(struct task_struct *tsk) { }
82static inline void vtime_user_enter(struct task_struct *tsk) { } 83static inline void vtime_user_enter(struct task_struct *tsk) { }
83static inline void vtime_user_exit(struct task_struct *tsk) { } 84static inline void vtime_user_exit(struct task_struct *tsk) { }
84static inline void vtime_guest_enter(struct task_struct *tsk) { } 85static inline void vtime_guest_enter(struct task_struct *tsk) { }
@@ -93,9 +94,11 @@ static inline void vtime_account_irq_exit(struct task_struct *tsk)
93 /* On hard|softirq exit we always account to hard|softirq cputime */ 94 /* On hard|softirq exit we always account to hard|softirq cputime */
94 vtime_account_system(tsk); 95 vtime_account_system(tsk);
95} 96}
97extern void vtime_flush(struct task_struct *tsk);
96#else /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */ 98#else /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
97static inline void vtime_account_irq_enter(struct task_struct *tsk) { } 99static inline void vtime_account_irq_enter(struct task_struct *tsk) { }
98static inline void vtime_account_irq_exit(struct task_struct *tsk) { } 100static inline void vtime_account_irq_exit(struct task_struct *tsk) { }
101static inline void vtime_flush(struct task_struct *tsk) { }
99#endif 102#endif
100 103
101 104