aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched_clock.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched_clock.c')
-rw-r--r--kernel/sched_clock.c30
1 files changed, 17 insertions, 13 deletions
diff --git a/kernel/sched_clock.c b/kernel/sched_clock.c
index a0b0852414cc..390f33234bd0 100644
--- a/kernel/sched_clock.c
+++ b/kernel/sched_clock.c
@@ -24,11 +24,11 @@
24 * The clock: sched_clock_cpu() is monotonic per cpu, and should be somewhat 24 * The clock: sched_clock_cpu() is monotonic per cpu, and should be somewhat
25 * consistent between cpus (never more than 2 jiffies difference). 25 * consistent between cpus (never more than 2 jiffies difference).
26 */ 26 */
27#include <linux/sched.h>
28#include <linux/percpu.h>
29#include <linux/spinlock.h> 27#include <linux/spinlock.h>
30#include <linux/ktime.h>
31#include <linux/module.h> 28#include <linux/module.h>
29#include <linux/percpu.h>
30#include <linux/ktime.h>
31#include <linux/sched.h>
32 32
33/* 33/*
34 * Scheduler clock - returns current time in nanosec units. 34 * Scheduler clock - returns current time in nanosec units.
@@ -43,6 +43,7 @@ unsigned long long __attribute__((weak)) sched_clock(void)
43static __read_mostly int sched_clock_running; 43static __read_mostly int sched_clock_running;
44 44
45#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK 45#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
46__read_mostly int sched_clock_stable;
46 47
47struct sched_clock_data { 48struct sched_clock_data {
48 /* 49 /*
@@ -87,7 +88,7 @@ void sched_clock_init(void)
87} 88}
88 89
89/* 90/*
90 * min,max except they take wrapping into account 91 * min, max except they take wrapping into account
91 */ 92 */
92 93
93static inline u64 wrap_min(u64 x, u64 y) 94static inline u64 wrap_min(u64 x, u64 y)
@@ -111,15 +112,13 @@ static u64 __update_sched_clock(struct sched_clock_data *scd, u64 now)
111 s64 delta = now - scd->tick_raw; 112 s64 delta = now - scd->tick_raw;
112 u64 clock, min_clock, max_clock; 113 u64 clock, min_clock, max_clock;
113 114
114 WARN_ON_ONCE(!irqs_disabled());
115
116 if (unlikely(delta < 0)) 115 if (unlikely(delta < 0))
117 delta = 0; 116 delta = 0;
118 117
119 /* 118 /*
120 * scd->clock = clamp(scd->tick_gtod + delta, 119 * scd->clock = clamp(scd->tick_gtod + delta,
121 * max(scd->tick_gtod, scd->clock), 120 * max(scd->tick_gtod, scd->clock),
122 * scd->tick_gtod + TICK_NSEC); 121 * scd->tick_gtod + TICK_NSEC);
123 */ 122 */
124 123
125 clock = scd->tick_gtod + delta; 124 clock = scd->tick_gtod + delta;
@@ -148,12 +147,13 @@ static void lock_double_clock(struct sched_clock_data *data1,
148 147
149u64 sched_clock_cpu(int cpu) 148u64 sched_clock_cpu(int cpu)
150{ 149{
151 struct sched_clock_data *scd = cpu_sdc(cpu);
152 u64 now, clock, this_clock, remote_clock; 150 u64 now, clock, this_clock, remote_clock;
151 struct sched_clock_data *scd;
153 152
154 if (unlikely(!sched_clock_running)) 153 if (sched_clock_stable)
155 return 0ull; 154 return sched_clock();
156 155
156 scd = cpu_sdc(cpu);
157 WARN_ON_ONCE(!irqs_disabled()); 157 WARN_ON_ONCE(!irqs_disabled());
158 now = sched_clock(); 158 now = sched_clock();
159 159
@@ -195,14 +195,18 @@ u64 sched_clock_cpu(int cpu)
195 195
196void sched_clock_tick(void) 196void sched_clock_tick(void)
197{ 197{
198 struct sched_clock_data *scd = this_scd(); 198 struct sched_clock_data *scd;
199 u64 now, now_gtod; 199 u64 now, now_gtod;
200 200
201 if (sched_clock_stable)
202 return;
203
201 if (unlikely(!sched_clock_running)) 204 if (unlikely(!sched_clock_running))
202 return; 205 return;
203 206
204 WARN_ON_ONCE(!irqs_disabled()); 207 WARN_ON_ONCE(!irqs_disabled());
205 208
209 scd = this_scd();
206 now_gtod = ktime_to_ns(ktime_get()); 210 now_gtod = ktime_to_ns(ktime_get());
207 now = sched_clock(); 211 now = sched_clock();
208 212
@@ -250,7 +254,7 @@ u64 sched_clock_cpu(int cpu)
250 return sched_clock(); 254 return sched_clock();
251} 255}
252 256
253#endif 257#endif /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */
254 258
255unsigned long long cpu_clock(int cpu) 259unsigned long long cpu_clock(int cpu)
256{ 260{