diff options
| author | Peter Zijlstra <peterz@infradead.org> | 2013-11-28 13:38:42 -0500 |
|---|---|---|
| committer | Ingo Molnar <mingo@kernel.org> | 2014-01-13 09:13:13 -0500 |
| commit | 35af99e646c7f7ea46dc2977601e9e71a51dadd5 (patch) | |
| tree | d999820d233844278549cd826c4bfd6c7aa1ecc8 /kernel | |
| parent | ef08f0fff87630d4f67ceb09514d8b444df833f8 (diff) | |
sched/clock, x86: Use a static_key for sched_clock_stable
In order to avoid the runtime condition and variable load turn
sched_clock_stable into a static_key.
Also provide a shorter implementation of local_clock() and
cpu_clock(int) when sched_clock_stable==1.
MAINLINE PRE POST
sched_clock_stable: 1 1 1
(cold) sched_clock: 329841 221876 215295
(cold) local_clock: 301773 234692 220773
(warm) sched_clock: 38375 25602 25659
(warm) local_clock: 100371 33265 27242
(warm) rdtsc: 27340 24214 24208
sched_clock_stable: 0 0 0
(cold) sched_clock: 382634 235941 237019
(cold) local_clock: 396890 297017 294819
(warm) sched_clock: 38194 25233 25609
(warm) local_clock: 143452 71234 71232
(warm) rdtsc: 27345 24245 24243
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Link: http://lkml.kernel.org/n/tip-eummbdechzz37mwmpags1gjr@git.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/sched/clock.c | 41 | ||||
| -rw-r--r-- | kernel/sched/debug.c | 2 | ||||
| -rw-r--r-- | kernel/time/tick-sched.c | 2 | ||||
| -rw-r--r-- | kernel/trace/ring_buffer.c | 2 |
4 files changed, 37 insertions, 10 deletions
diff --git a/kernel/sched/clock.c b/kernel/sched/clock.c index 59371549ddf0..c9b34c4e3ecc 100644 --- a/kernel/sched/clock.c +++ b/kernel/sched/clock.c | |||
| @@ -58,6 +58,7 @@ | |||
| 58 | #include <linux/percpu.h> | 58 | #include <linux/percpu.h> |
| 59 | #include <linux/ktime.h> | 59 | #include <linux/ktime.h> |
| 60 | #include <linux/sched.h> | 60 | #include <linux/sched.h> |
| 61 | #include <linux/static_key.h> | ||
| 61 | 62 | ||
| 62 | /* | 63 | /* |
| 63 | * Scheduler clock - returns current time in nanosec units. | 64 | * Scheduler clock - returns current time in nanosec units. |
| @@ -74,7 +75,27 @@ EXPORT_SYMBOL_GPL(sched_clock); | |||
| 74 | __read_mostly int sched_clock_running; | 75 | __read_mostly int sched_clock_running; |
| 75 | 76 | ||
| 76 | #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK | 77 | #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK |
| 77 | __read_mostly int sched_clock_stable; | 78 | static struct static_key __sched_clock_stable = STATIC_KEY_INIT; |
| 79 | |||
| 80 | int sched_clock_stable(void) | ||
| 81 | { | ||
| 82 | if (static_key_false(&__sched_clock_stable)) | ||
| 83 | return false; | ||
| 84 | return true; | ||
| 85 | } | ||
| 86 | |||
| 87 | void set_sched_clock_stable(void) | ||
| 88 | { | ||
| 89 | if (!sched_clock_stable()) | ||
| 90 | static_key_slow_dec(&__sched_clock_stable); | ||
| 91 | } | ||
| 92 | |||
| 93 | void clear_sched_clock_stable(void) | ||
| 94 | { | ||
| 95 | /* XXX worry about clock continuity */ | ||
| 96 | if (sched_clock_stable()) | ||
| 97 | static_key_slow_inc(&__sched_clock_stable); | ||
| 98 | } | ||
| 78 | 99 | ||
| 79 | struct sched_clock_data { | 100 | struct sched_clock_data { |
| 80 | u64 tick_raw; | 101 | u64 tick_raw; |
| @@ -234,7 +255,7 @@ u64 sched_clock_cpu(int cpu) | |||
| 234 | struct sched_clock_data *scd; | 255 | struct sched_clock_data *scd; |
| 235 | u64 clock; | 256 | u64 clock; |
| 236 | 257 | ||
| 237 | if (sched_clock_stable) | 258 | if (sched_clock_stable()) |
| 238 | return sched_clock(); | 259 | return sched_clock(); |
| 239 | 260 | ||
| 240 | if (unlikely(!sched_clock_running)) | 261 | if (unlikely(!sched_clock_running)) |
| @@ -257,7 +278,7 @@ void sched_clock_tick(void) | |||
| 257 | struct sched_clock_data *scd; | 278 | struct sched_clock_data *scd; |
| 258 | u64 now, now_gtod; | 279 | u64 now, now_gtod; |
| 259 | 280 | ||
| 260 | if (sched_clock_stable) | 281 | if (sched_clock_stable()) |
| 261 | return; | 282 | return; |
| 262 | 283 | ||
| 263 | if (unlikely(!sched_clock_running)) | 284 | if (unlikely(!sched_clock_running)) |
| @@ -308,7 +329,10 @@ EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event); | |||
| 308 | */ | 329 | */ |
| 309 | u64 cpu_clock(int cpu) | 330 | u64 cpu_clock(int cpu) |
| 310 | { | 331 | { |
| 311 | return sched_clock_cpu(cpu); | 332 | if (static_key_false(&__sched_clock_stable)) |
| 333 | return sched_clock_cpu(cpu); | ||
| 334 | |||
| 335 | return sched_clock(); | ||
| 312 | } | 336 | } |
| 313 | 337 | ||
| 314 | /* | 338 | /* |
| @@ -320,7 +344,10 @@ u64 cpu_clock(int cpu) | |||
| 320 | */ | 344 | */ |
| 321 | u64 local_clock(void) | 345 | u64 local_clock(void) |
| 322 | { | 346 | { |
| 323 | return sched_clock_cpu(raw_smp_processor_id()); | 347 | if (static_key_false(&__sched_clock_stable)) |
| 348 | return sched_clock_cpu(raw_smp_processor_id()); | ||
| 349 | |||
| 350 | return sched_clock(); | ||
| 324 | } | 351 | } |
| 325 | 352 | ||
| 326 | #else /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */ | 353 | #else /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */ |
| @@ -340,12 +367,12 @@ u64 sched_clock_cpu(int cpu) | |||
| 340 | 367 | ||
| 341 | u64 cpu_clock(int cpu) | 368 | u64 cpu_clock(int cpu) |
| 342 | { | 369 | { |
| 343 | return sched_clock_cpu(cpu); | 370 | return sched_clock(); |
| 344 | } | 371 | } |
| 345 | 372 | ||
| 346 | u64 local_clock(void) | 373 | u64 local_clock(void) |
| 347 | { | 374 | { |
| 348 | return sched_clock_cpu(0); | 375 | return sched_clock(); |
| 349 | } | 376 | } |
| 350 | 377 | ||
| 351 | #endif /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */ | 378 | #endif /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */ |
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c index 374fe04a5e6e..dd52e7ffb10e 100644 --- a/kernel/sched/debug.c +++ b/kernel/sched/debug.c | |||
| @@ -371,7 +371,7 @@ static void sched_debug_header(struct seq_file *m) | |||
| 371 | PN(cpu_clk); | 371 | PN(cpu_clk); |
| 372 | P(jiffies); | 372 | P(jiffies); |
| 373 | #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK | 373 | #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK |
| 374 | P(sched_clock_stable); | 374 | P(sched_clock_stable()); |
| 375 | #endif | 375 | #endif |
| 376 | #undef PN | 376 | #undef PN |
| 377 | #undef P | 377 | #undef P |
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index ea20f7d1ac2c..c833249ab0fb 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c | |||
| @@ -177,7 +177,7 @@ static bool can_stop_full_tick(void) | |||
| 177 | * TODO: kick full dynticks CPUs when | 177 | * TODO: kick full dynticks CPUs when |
| 178 | * sched_clock_stable is set. | 178 | * sched_clock_stable is set. |
| 179 | */ | 179 | */ |
| 180 | if (!sched_clock_stable) { | 180 | if (!sched_clock_stable()) { |
| 181 | trace_tick_stop(0, "unstable sched clock\n"); | 181 | trace_tick_stop(0, "unstable sched clock\n"); |
| 182 | /* | 182 | /* |
| 183 | * Don't allow the user to think they can get | 183 | * Don't allow the user to think they can get |
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index cc2f66f68dc5..294b8a271a04 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
| @@ -2558,7 +2558,7 @@ rb_reserve_next_event(struct ring_buffer *buffer, | |||
| 2558 | if (unlikely(test_time_stamp(delta))) { | 2558 | if (unlikely(test_time_stamp(delta))) { |
| 2559 | int local_clock_stable = 1; | 2559 | int local_clock_stable = 1; |
| 2560 | #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK | 2560 | #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK |
| 2561 | local_clock_stable = sched_clock_stable; | 2561 | local_clock_stable = sched_clock_stable(); |
| 2562 | #endif | 2562 | #endif |
| 2563 | WARN_ONCE(delta > (1ULL << 59), | 2563 | WARN_ONCE(delta > (1ULL << 59), |
| 2564 | KERN_WARNING "Delta way too big! %llu ts=%llu write stamp = %llu\n%s", | 2564 | KERN_WARNING "Delta way too big! %llu ts=%llu write stamp = %llu\n%s", |
