aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/sched.h14
-rw-r--r--kernel/sched_clock.c19
2 files changed, 20 insertions, 13 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index ea436bc1a0e2..5850bfb968a8 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1551,16 +1551,10 @@ static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
1551 1551
1552extern unsigned long long sched_clock(void); 1552extern unsigned long long sched_clock(void);
1553 1553
1554#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK 1554extern void sched_clock_init(void);
1555static inline void sched_clock_init(void) 1555extern u64 sched_clock_cpu(int cpu);
1556{
1557}
1558
1559static inline u64 sched_clock_cpu(int cpu)
1560{
1561 return sched_clock();
1562}
1563 1556
1557#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
1564static inline void sched_clock_tick(void) 1558static inline void sched_clock_tick(void)
1565{ 1559{
1566} 1560}
@@ -1573,8 +1567,6 @@ static inline void sched_clock_idle_wakeup_event(u64 delta_ns)
1573{ 1567{
1574} 1568}
1575#else 1569#else
1576extern void sched_clock_init(void);
1577extern u64 sched_clock_cpu(int cpu);
1578extern void sched_clock_tick(void); 1570extern void sched_clock_tick(void);
1579extern void sched_clock_idle_sleep_event(void); 1571extern void sched_clock_idle_sleep_event(void);
1580extern void sched_clock_idle_wakeup_event(u64 delta_ns); 1572extern void sched_clock_idle_wakeup_event(u64 delta_ns);
diff --git a/kernel/sched_clock.c b/kernel/sched_clock.c
index 074edc989379..204991a0bfa7 100644
--- a/kernel/sched_clock.c
+++ b/kernel/sched_clock.c
@@ -42,6 +42,8 @@ unsigned long long __attribute__((weak)) sched_clock(void)
42 return (unsigned long long)jiffies * (NSEC_PER_SEC / HZ); 42 return (unsigned long long)jiffies * (NSEC_PER_SEC / HZ);
43} 43}
44 44
45static __read_mostly int sched_clock_running;
46
45#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK 47#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
46 48
47struct sched_clock_data { 49struct sched_clock_data {
@@ -70,8 +72,6 @@ static inline struct sched_clock_data *cpu_sdc(int cpu)
70 return &per_cpu(sched_clock_data, cpu); 72 return &per_cpu(sched_clock_data, cpu);
71} 73}
72 74
73static __read_mostly int sched_clock_running;
74
75void sched_clock_init(void) 75void sched_clock_init(void)
76{ 76{
77 u64 ktime_now = ktime_to_ns(ktime_get()); 77 u64 ktime_now = ktime_to_ns(ktime_get());
@@ -248,6 +248,21 @@ void sched_clock_idle_wakeup_event(u64 delta_ns)
248} 248}
249EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event); 249EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event);
250 250
251#else /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */
252
253void sched_clock_init(void)
254{
255 sched_clock_running = 1;
256}
257
258u64 sched_clock_cpu(int cpu)
259{
260 if (unlikely(!sched_clock_running))
261 return 0;
262
263 return sched_clock();
264}
265
251#endif 266#endif
252 267
253unsigned long long cpu_clock(int cpu) 268unsigned long long cpu_clock(int cpu)