aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched_clock.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched_clock.c')
-rw-r--r--kernel/sched_clock.c47
1 files changed, 23 insertions, 24 deletions
diff --git a/kernel/sched_clock.c b/kernel/sched_clock.c
index db69174b1178..7ec82c1c61c5 100644
--- a/kernel/sched_clock.c
+++ b/kernel/sched_clock.c
@@ -24,12 +24,12 @@
24 * The clock: sched_clock_cpu() is monotonic per cpu, and should be somewhat 24 * The clock: sched_clock_cpu() is monotonic per cpu, and should be somewhat
25 * consistent between cpus (never more than 2 jiffies difference). 25 * consistent between cpus (never more than 2 jiffies difference).
26 */ 26 */
27#include <linux/sched.h>
28#include <linux/percpu.h>
29#include <linux/spinlock.h> 27#include <linux/spinlock.h>
30#include <linux/ktime.h>
31#include <linux/module.h>
32#include <linux/hardirq.h> 28#include <linux/hardirq.h>
29#include <linux/module.h>
30#include <linux/percpu.h>
31#include <linux/ktime.h>
32#include <linux/sched.h>
33 33
34/* 34/*
35 * Scheduler clock - returns current time in nanosec units. 35 * Scheduler clock - returns current time in nanosec units.
@@ -44,6 +44,10 @@ unsigned long long __attribute__((weak)) sched_clock(void)
44static __read_mostly int sched_clock_running; 44static __read_mostly int sched_clock_running;
45 45
46#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK 46#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
47__read_mostly int sched_clock_stable;
48#else
49static const int sched_clock_stable = 1;
50#endif
47 51
48struct sched_clock_data { 52struct sched_clock_data {
49 /* 53 /*
@@ -88,7 +92,7 @@ void sched_clock_init(void)
88} 92}
89 93
90/* 94/*
91 * min,max except they take wrapping into account 95 * min, max except they take wrapping into account
92 */ 96 */
93 97
94static inline u64 wrap_min(u64 x, u64 y) 98static inline u64 wrap_min(u64 x, u64 y)
@@ -117,10 +121,13 @@ static u64 __update_sched_clock(struct sched_clock_data *scd, u64 now)
117 if (unlikely(delta < 0)) 121 if (unlikely(delta < 0))
118 delta = 0; 122 delta = 0;
119 123
124 if (unlikely(!sched_clock_running))
125 return 0ull;
126
120 /* 127 /*
121 * scd->clock = clamp(scd->tick_gtod + delta, 128 * scd->clock = clamp(scd->tick_gtod + delta,
122 * max(scd->tick_gtod, scd->clock), 129 * max(scd->tick_gtod, scd->clock),
123 * scd->tick_gtod + TICK_NSEC); 130 * scd->tick_gtod + TICK_NSEC);
124 */ 131 */
125 132
126 clock = scd->tick_gtod + delta; 133 clock = scd->tick_gtod + delta;
@@ -149,8 +156,13 @@ static void lock_double_clock(struct sched_clock_data *data1,
149 156
150u64 sched_clock_cpu(int cpu) 157u64 sched_clock_cpu(int cpu)
151{ 158{
152 struct sched_clock_data *scd = cpu_sdc(cpu);
153 u64 now, clock, this_clock, remote_clock; 159 u64 now, clock, this_clock, remote_clock;
160 struct sched_clock_data *scd;
161
162 if (sched_clock_stable)
163 return sched_clock();
164
165 scd = cpu_sdc(cpu);
154 166
155 /* 167 /*
156 * Normally this is not called in NMI context - but if it is, 168 * Normally this is not called in NMI context - but if it is,
@@ -201,6 +213,8 @@ u64 sched_clock_cpu(int cpu)
201 return clock; 213 return clock;
202} 214}
203 215
216#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
217
204void sched_clock_tick(void) 218void sched_clock_tick(void)
205{ 219{
206 struct sched_clock_data *scd = this_scd(); 220 struct sched_clock_data *scd = this_scd();
@@ -243,22 +257,7 @@ void sched_clock_idle_wakeup_event(u64 delta_ns)
243} 257}
244EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event); 258EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event);
245 259
246#else /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */ 260#endif /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */
247
248void sched_clock_init(void)
249{
250 sched_clock_running = 1;
251}
252
253u64 sched_clock_cpu(int cpu)
254{
255 if (unlikely(!sched_clock_running))
256 return 0;
257
258 return sched_clock();
259}
260
261#endif
262 261
263unsigned long long cpu_clock(int cpu) 262unsigned long long cpu_clock(int cpu)
264{ 263{