aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched_clock.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched_clock.c')
-rw-r--r--kernel/sched_clock.c31
1 files changed, 20 insertions, 11 deletions
diff --git a/kernel/sched_clock.c b/kernel/sched_clock.c
index 7ec82c1c61c5..819f17ac796e 100644
--- a/kernel/sched_clock.c
+++ b/kernel/sched_clock.c
@@ -45,9 +45,6 @@ static __read_mostly int sched_clock_running;
45 45
46#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK 46#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
47__read_mostly int sched_clock_stable; 47__read_mostly int sched_clock_stable;
48#else
49static const int sched_clock_stable = 1;
50#endif
51 48
52struct sched_clock_data { 49struct sched_clock_data {
53 /* 50 /*
@@ -116,14 +113,9 @@ static u64 __update_sched_clock(struct sched_clock_data *scd, u64 now)
116 s64 delta = now - scd->tick_raw; 113 s64 delta = now - scd->tick_raw;
117 u64 clock, min_clock, max_clock; 114 u64 clock, min_clock, max_clock;
118 115
119 WARN_ON_ONCE(!irqs_disabled());
120
121 if (unlikely(delta < 0)) 116 if (unlikely(delta < 0))
122 delta = 0; 117 delta = 0;
123 118
124 if (unlikely(!sched_clock_running))
125 return 0ull;
126
127 /* 119 /*
128 * scd->clock = clamp(scd->tick_gtod + delta, 120 * scd->clock = clamp(scd->tick_gtod + delta,
129 * max(scd->tick_gtod, scd->clock), 121 * max(scd->tick_gtod, scd->clock),
@@ -213,18 +205,20 @@ u64 sched_clock_cpu(int cpu)
213 return clock; 205 return clock;
214} 206}
215 207
216#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
217
218void sched_clock_tick(void) 208void sched_clock_tick(void)
219{ 209{
220 struct sched_clock_data *scd = this_scd(); 210 struct sched_clock_data *scd;
221 u64 now, now_gtod; 211 u64 now, now_gtod;
222 212
213 if (sched_clock_stable)
214 return;
215
223 if (unlikely(!sched_clock_running)) 216 if (unlikely(!sched_clock_running))
224 return; 217 return;
225 218
226 WARN_ON_ONCE(!irqs_disabled()); 219 WARN_ON_ONCE(!irqs_disabled());
227 220
221 scd = this_scd();
228 now_gtod = ktime_to_ns(ktime_get()); 222 now_gtod = ktime_to_ns(ktime_get());
229 now = sched_clock(); 223 now = sched_clock();
230 224
@@ -257,6 +251,21 @@ void sched_clock_idle_wakeup_event(u64 delta_ns)
257} 251}
258EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event); 252EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event);
259 253
254#else /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */
255
256void sched_clock_init(void)
257{
258 sched_clock_running = 1;
259}
260
261u64 sched_clock_cpu(int cpu)
262{
263 if (unlikely(!sched_clock_running))
264 return 0;
265
266 return sched_clock();
267}
268
260#endif /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */ 269#endif /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */
261 270
262unsigned long long cpu_clock(int cpu) 271unsigned long long cpu_clock(int cpu)