aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched/clock.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched/clock.c')
-rw-r--r--kernel/sched/clock.c107
1 files changed, 76 insertions, 31 deletions
diff --git a/kernel/sched/clock.c b/kernel/sched/clock.c
index c3ae1446461c..b30a2924ef14 100644
--- a/kernel/sched/clock.c
+++ b/kernel/sched/clock.c
@@ -26,9 +26,10 @@
26 * at 0 on boot (but people really shouldn't rely on that). 26 * at 0 on boot (but people really shouldn't rely on that).
27 * 27 *
28 * cpu_clock(i) -- can be used from any context, including NMI. 28 * cpu_clock(i) -- can be used from any context, including NMI.
29 * sched_clock_cpu(i) -- must be used with local IRQs disabled (implied by NMI)
30 * local_clock() -- is cpu_clock() on the current cpu. 29 * local_clock() -- is cpu_clock() on the current cpu.
31 * 30 *
31 * sched_clock_cpu(i)
32 *
32 * How: 33 * How:
33 * 34 *
34 * The implementation either uses sched_clock() when 35 * The implementation either uses sched_clock() when
@@ -50,15 +51,6 @@
50 * Furthermore, explicit sleep and wakeup hooks allow us to account for time 51 * Furthermore, explicit sleep and wakeup hooks allow us to account for time
51 * that is otherwise invisible (TSC gets stopped). 52 * that is otherwise invisible (TSC gets stopped).
52 * 53 *
53 *
54 * Notes:
55 *
56 * The !IRQ-safetly of sched_clock() and sched_clock_cpu() comes from things
57 * like cpufreq interrupts that can change the base clock (TSC) multiplier
58 * and cause funny jumps in time -- although the filtering provided by
59 * sched_clock_cpu() should mitigate serious artifacts we cannot rely on it
60 * in general since for !CONFIG_HAVE_UNSTABLE_SCHED_CLOCK we fully rely on
61 * sched_clock().
62 */ 54 */
63#include <linux/spinlock.h> 55#include <linux/spinlock.h>
64#include <linux/hardirq.h> 56#include <linux/hardirq.h>
@@ -66,6 +58,8 @@
66#include <linux/percpu.h> 58#include <linux/percpu.h>
67#include <linux/ktime.h> 59#include <linux/ktime.h>
68#include <linux/sched.h> 60#include <linux/sched.h>
61#include <linux/static_key.h>
62#include <linux/workqueue.h>
69 63
70/* 64/*
71 * Scheduler clock - returns current time in nanosec units. 65 * Scheduler clock - returns current time in nanosec units.
@@ -82,7 +76,52 @@ EXPORT_SYMBOL_GPL(sched_clock);
82__read_mostly int sched_clock_running; 76__read_mostly int sched_clock_running;
83 77
84#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK 78#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
85__read_mostly int sched_clock_stable; 79static struct static_key __sched_clock_stable = STATIC_KEY_INIT;
80static int __sched_clock_stable_early;
81
82int sched_clock_stable(void)
83{
84 return static_key_false(&__sched_clock_stable);
85}
86
87static void __set_sched_clock_stable(void)
88{
89 if (!sched_clock_stable())
90 static_key_slow_inc(&__sched_clock_stable);
91}
92
93void set_sched_clock_stable(void)
94{
95 __sched_clock_stable_early = 1;
96
97 smp_mb(); /* matches sched_clock_init() */
98
99 if (!sched_clock_running)
100 return;
101
102 __set_sched_clock_stable();
103}
104
105static void __clear_sched_clock_stable(struct work_struct *work)
106{
107 /* XXX worry about clock continuity */
108 if (sched_clock_stable())
109 static_key_slow_dec(&__sched_clock_stable);
110}
111
112static DECLARE_WORK(sched_clock_work, __clear_sched_clock_stable);
113
114void clear_sched_clock_stable(void)
115{
116 __sched_clock_stable_early = 0;
117
118 smp_mb(); /* matches sched_clock_init() */
119
120 if (!sched_clock_running)
121 return;
122
123 schedule_work(&sched_clock_work);
124}
86 125
87struct sched_clock_data { 126struct sched_clock_data {
88 u64 tick_raw; 127 u64 tick_raw;
@@ -116,6 +155,20 @@ void sched_clock_init(void)
116 } 155 }
117 156
118 sched_clock_running = 1; 157 sched_clock_running = 1;
158
159 /*
160 * Ensure that it is impossible to not do a static_key update.
161 *
162 * Either {set,clear}_sched_clock_stable() must see sched_clock_running
163 * and do the update, or we must see their __sched_clock_stable_early
164 * and do the update, or both.
165 */
166 smp_mb(); /* matches {set,clear}_sched_clock_stable() */
167
168 if (__sched_clock_stable_early)
169 __set_sched_clock_stable();
170 else
171 __clear_sched_clock_stable(NULL);
119} 172}
120 173
121/* 174/*
@@ -242,20 +295,20 @@ u64 sched_clock_cpu(int cpu)
242 struct sched_clock_data *scd; 295 struct sched_clock_data *scd;
243 u64 clock; 296 u64 clock;
244 297
245 WARN_ON_ONCE(!irqs_disabled()); 298 if (sched_clock_stable())
246
247 if (sched_clock_stable)
248 return sched_clock(); 299 return sched_clock();
249 300
250 if (unlikely(!sched_clock_running)) 301 if (unlikely(!sched_clock_running))
251 return 0ull; 302 return 0ull;
252 303
304 preempt_disable_notrace();
253 scd = cpu_sdc(cpu); 305 scd = cpu_sdc(cpu);
254 306
255 if (cpu != smp_processor_id()) 307 if (cpu != smp_processor_id())
256 clock = sched_clock_remote(scd); 308 clock = sched_clock_remote(scd);
257 else 309 else
258 clock = sched_clock_local(scd); 310 clock = sched_clock_local(scd);
311 preempt_enable_notrace();
259 312
260 return clock; 313 return clock;
261} 314}
@@ -265,7 +318,7 @@ void sched_clock_tick(void)
265 struct sched_clock_data *scd; 318 struct sched_clock_data *scd;
266 u64 now, now_gtod; 319 u64 now, now_gtod;
267 320
268 if (sched_clock_stable) 321 if (sched_clock_stable())
269 return; 322 return;
270 323
271 if (unlikely(!sched_clock_running)) 324 if (unlikely(!sched_clock_running))
@@ -316,14 +369,10 @@ EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event);
316 */ 369 */
317u64 cpu_clock(int cpu) 370u64 cpu_clock(int cpu)
318{ 371{
319 u64 clock; 372 if (!sched_clock_stable())
320 unsigned long flags; 373 return sched_clock_cpu(cpu);
321
322 local_irq_save(flags);
323 clock = sched_clock_cpu(cpu);
324 local_irq_restore(flags);
325 374
326 return clock; 375 return sched_clock();
327} 376}
328 377
329/* 378/*
@@ -335,14 +384,10 @@ u64 cpu_clock(int cpu)
335 */ 384 */
336u64 local_clock(void) 385u64 local_clock(void)
337{ 386{
338 u64 clock; 387 if (!sched_clock_stable())
339 unsigned long flags; 388 return sched_clock_cpu(raw_smp_processor_id());
340 389
341 local_irq_save(flags); 390 return sched_clock();
342 clock = sched_clock_cpu(smp_processor_id());
343 local_irq_restore(flags);
344
345 return clock;
346} 391}
347 392
348#else /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */ 393#else /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */
@@ -362,12 +407,12 @@ u64 sched_clock_cpu(int cpu)
362 407
363u64 cpu_clock(int cpu) 408u64 cpu_clock(int cpu)
364{ 409{
365 return sched_clock_cpu(cpu); 410 return sched_clock();
366} 411}
367 412
368u64 local_clock(void) 413u64 local_clock(void)
369{ 414{
370 return sched_clock_cpu(0); 415 return sched_clock();
371} 416}
372 417
373#endif /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */ 418#endif /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */