diff options
author | Pavel Tatashin <pasha.tatashin@oracle.com> | 2018-07-19 16:55:43 -0400 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2018-07-19 18:02:43 -0400 |
commit | 46457ea464f5341d1f9dad8dd213805d45f7f117 (patch) | |
tree | 316f05b91ec512ef269e8bdd0cc89e7ea062e0f3 | |
parent | 857baa87b6422bcfb84ed3631d6839920cb5b09d (diff) |
sched/clock: Use static key for sched_clock_running
sched_clock_running may be read every time sched_clock_cpu() is called.
Yet, this variable is updated only twice during boot, and never changes
again, therefore it is better to make it a static key.
Signed-off-by: Pavel Tatashin <pasha.tatashin@oracle.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: Peter Zijlstra <peterz@infradead.org>
Cc: steven.sistare@oracle.com
Cc: daniel.m.jordan@oracle.com
Cc: linux@armlinux.org.uk
Cc: schwidefsky@de.ibm.com
Cc: heiko.carstens@de.ibm.com
Cc: john.stultz@linaro.org
Cc: sboyd@codeaurora.org
Cc: hpa@zytor.com
Cc: douly.fnst@cn.fujitsu.com
Cc: prarit@redhat.com
Cc: feng.tang@intel.com
Cc: pmladek@suse.com
Cc: gnomes@lxorguk.ukuu.org.uk
Cc: linux-s390@vger.kernel.org
Cc: boris.ostrovsky@oracle.com
Cc: jgross@suse.com
Cc: pbonzini@redhat.com
Link: https://lkml.kernel.org/r/20180719205545.16512-25-pasha.tatashin@oracle.com
-rw-r--r-- | kernel/sched/clock.c | 16 | ||||
-rw-r--r-- | kernel/sched/debug.c | 2 |
2 files changed, 8 insertions, 10 deletions
diff --git a/kernel/sched/clock.c b/kernel/sched/clock.c index 422cd63f8f17..c5c47ad3f386 100644 --- a/kernel/sched/clock.c +++ b/kernel/sched/clock.c | |||
@@ -67,7 +67,7 @@ unsigned long long __weak sched_clock(void) | |||
67 | } | 67 | } |
68 | EXPORT_SYMBOL_GPL(sched_clock); | 68 | EXPORT_SYMBOL_GPL(sched_clock); |
69 | 69 | ||
70 | __read_mostly int sched_clock_running; | 70 | static DEFINE_STATIC_KEY_FALSE(sched_clock_running); |
71 | 71 | ||
72 | #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK | 72 | #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK |
73 | /* | 73 | /* |
@@ -191,7 +191,7 @@ void clear_sched_clock_stable(void) | |||
191 | 191 | ||
192 | smp_mb(); /* matches sched_clock_init_late() */ | 192 | smp_mb(); /* matches sched_clock_init_late() */ |
193 | 193 | ||
194 | if (sched_clock_running == 2) | 194 | if (static_key_count(&sched_clock_running.key) == 2) |
195 | __clear_sched_clock_stable(); | 195 | __clear_sched_clock_stable(); |
196 | } | 196 | } |
197 | 197 | ||
@@ -215,7 +215,7 @@ void __init sched_clock_init(void) | |||
215 | __sched_clock_gtod_offset(); | 215 | __sched_clock_gtod_offset(); |
216 | local_irq_restore(flags); | 216 | local_irq_restore(flags); |
217 | 217 | ||
218 | sched_clock_running = 1; | 218 | static_branch_inc(&sched_clock_running); |
219 | 219 | ||
220 | /* Now that sched_clock_running is set adjust scd */ | 220 | /* Now that sched_clock_running is set adjust scd */ |
221 | local_irq_save(flags); | 221 | local_irq_save(flags); |
@@ -228,7 +228,7 @@ void __init sched_clock_init(void) | |||
228 | */ | 228 | */ |
229 | static int __init sched_clock_init_late(void) | 229 | static int __init sched_clock_init_late(void) |
230 | { | 230 | { |
231 | sched_clock_running = 2; | 231 | static_branch_inc(&sched_clock_running); |
232 | /* | 232 | /* |
233 | * Ensure that it is impossible to not do a static_key update. | 233 | * Ensure that it is impossible to not do a static_key update. |
234 | * | 234 | * |
@@ -373,7 +373,7 @@ u64 sched_clock_cpu(int cpu) | |||
373 | if (sched_clock_stable()) | 373 | if (sched_clock_stable()) |
374 | return sched_clock() + __sched_clock_offset; | 374 | return sched_clock() + __sched_clock_offset; |
375 | 375 | ||
376 | if (unlikely(!sched_clock_running)) | 376 | if (!static_branch_unlikely(&sched_clock_running)) |
377 | return sched_clock(); | 377 | return sched_clock(); |
378 | 378 | ||
379 | preempt_disable_notrace(); | 379 | preempt_disable_notrace(); |
@@ -396,7 +396,7 @@ void sched_clock_tick(void) | |||
396 | if (sched_clock_stable()) | 396 | if (sched_clock_stable()) |
397 | return; | 397 | return; |
398 | 398 | ||
399 | if (unlikely(!sched_clock_running)) | 399 | if (!static_branch_unlikely(&sched_clock_running)) |
400 | return; | 400 | return; |
401 | 401 | ||
402 | lockdep_assert_irqs_disabled(); | 402 | lockdep_assert_irqs_disabled(); |
@@ -455,13 +455,13 @@ EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event); | |||
455 | 455 | ||
456 | void __init sched_clock_init(void) | 456 | void __init sched_clock_init(void) |
457 | { | 457 | { |
458 | sched_clock_running = 1; | 458 | static_branch_inc(&sched_clock_running); |
459 | generic_sched_clock_init(); | 459 | generic_sched_clock_init(); |
460 | } | 460 | } |
461 | 461 | ||
462 | u64 sched_clock_cpu(int cpu) | 462 | u64 sched_clock_cpu(int cpu) |
463 | { | 463 | { |
464 | if (unlikely(!sched_clock_running)) | 464 | if (!static_branch_unlikely(&sched_clock_running)) |
465 | return 0; | 465 | return 0; |
466 | 466 | ||
467 | return sched_clock(); | 467 | return sched_clock(); |
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c index e593b4118578..b0212f489a33 100644 --- a/kernel/sched/debug.c +++ b/kernel/sched/debug.c | |||
@@ -623,8 +623,6 @@ void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq) | |||
623 | #undef PU | 623 | #undef PU |
624 | } | 624 | } |
625 | 625 | ||
626 | extern __read_mostly int sched_clock_running; | ||
627 | |||
628 | static void print_cpu(struct seq_file *m, int cpu) | 626 | static void print_cpu(struct seq_file *m, int cpu) |
629 | { | 627 | { |
630 | struct rq *rq = cpu_rq(cpu); | 628 | struct rq *rq = cpu_rq(cpu); |