diff options
Diffstat (limited to 'kernel/sched/clock.c')
-rw-r--r-- | kernel/sched/clock.c | 41 |
1 files changed, 34 insertions, 7 deletions
diff --git a/kernel/sched/clock.c b/kernel/sched/clock.c index 59371549ddf0..c9b34c4e3ecc 100644 --- a/kernel/sched/clock.c +++ b/kernel/sched/clock.c | |||
@@ -58,6 +58,7 @@ | |||
58 | #include <linux/percpu.h> | 58 | #include <linux/percpu.h> |
59 | #include <linux/ktime.h> | 59 | #include <linux/ktime.h> |
60 | #include <linux/sched.h> | 60 | #include <linux/sched.h> |
61 | #include <linux/static_key.h> | ||
61 | 62 | ||
62 | /* | 63 | /* |
63 | * Scheduler clock - returns current time in nanosec units. | 64 | * Scheduler clock - returns current time in nanosec units. |
@@ -74,7 +75,27 @@ EXPORT_SYMBOL_GPL(sched_clock); | |||
74 | __read_mostly int sched_clock_running; | 75 | __read_mostly int sched_clock_running; |
75 | 76 | ||
76 | #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK | 77 | #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK |
77 | __read_mostly int sched_clock_stable; | 78 | static struct static_key __sched_clock_stable = STATIC_KEY_INIT; |
79 | |||
80 | int sched_clock_stable(void) | ||
81 | { | ||
82 | if (static_key_false(&__sched_clock_stable)) | ||
83 | return false; | ||
84 | return true; | ||
85 | } | ||
86 | |||
87 | void set_sched_clock_stable(void) | ||
88 | { | ||
89 | if (!sched_clock_stable()) | ||
90 | static_key_slow_dec(&__sched_clock_stable); | ||
91 | } | ||
92 | |||
93 | void clear_sched_clock_stable(void) | ||
94 | { | ||
95 | /* XXX worry about clock continuity */ | ||
96 | if (sched_clock_stable()) | ||
97 | static_key_slow_inc(&__sched_clock_stable); | ||
98 | } | ||
78 | 99 | ||
79 | struct sched_clock_data { | 100 | struct sched_clock_data { |
80 | u64 tick_raw; | 101 | u64 tick_raw; |
@@ -234,7 +255,7 @@ u64 sched_clock_cpu(int cpu) | |||
234 | struct sched_clock_data *scd; | 255 | struct sched_clock_data *scd; |
235 | u64 clock; | 256 | u64 clock; |
236 | 257 | ||
237 | if (sched_clock_stable) | 258 | if (sched_clock_stable()) |
238 | return sched_clock(); | 259 | return sched_clock(); |
239 | 260 | ||
240 | if (unlikely(!sched_clock_running)) | 261 | if (unlikely(!sched_clock_running)) |
@@ -257,7 +278,7 @@ void sched_clock_tick(void) | |||
257 | struct sched_clock_data *scd; | 278 | struct sched_clock_data *scd; |
258 | u64 now, now_gtod; | 279 | u64 now, now_gtod; |
259 | 280 | ||
260 | if (sched_clock_stable) | 281 | if (sched_clock_stable()) |
261 | return; | 282 | return; |
262 | 283 | ||
263 | if (unlikely(!sched_clock_running)) | 284 | if (unlikely(!sched_clock_running)) |
@@ -308,7 +329,10 @@ EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event); | |||
308 | */ | 329 | */ |
309 | u64 cpu_clock(int cpu) | 330 | u64 cpu_clock(int cpu) |
310 | { | 331 | { |
311 | return sched_clock_cpu(cpu); | 332 | if (static_key_false(&__sched_clock_stable)) |
333 | return sched_clock_cpu(cpu); | ||
334 | |||
335 | return sched_clock(); | ||
312 | } | 336 | } |
313 | 337 | ||
314 | /* | 338 | /* |
@@ -320,7 +344,10 @@ u64 cpu_clock(int cpu) | |||
320 | */ | 344 | */ |
321 | u64 local_clock(void) | 345 | u64 local_clock(void) |
322 | { | 346 | { |
323 | return sched_clock_cpu(raw_smp_processor_id()); | 347 | if (static_key_false(&__sched_clock_stable)) |
348 | return sched_clock_cpu(raw_smp_processor_id()); | ||
349 | |||
350 | return sched_clock(); | ||
324 | } | 351 | } |
325 | 352 | ||
326 | #else /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */ | 353 | #else /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */ |
@@ -340,12 +367,12 @@ u64 sched_clock_cpu(int cpu) | |||
340 | 367 | ||
341 | u64 cpu_clock(int cpu) | 368 | u64 cpu_clock(int cpu) |
342 | { | 369 | { |
343 | return sched_clock_cpu(cpu); | 370 | return sched_clock(); |
344 | } | 371 | } |
345 | 372 | ||
346 | u64 local_clock(void) | 373 | u64 local_clock(void) |
347 | { | 374 | { |
348 | return sched_clock_cpu(0); | 375 | return sched_clock(); |
349 | } | 376 | } |
350 | 377 | ||
351 | #endif /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */ | 378 | #endif /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */ |