aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched
diff options
context:
space:
mode:
authorDaniel Lezcano <daniel.lezcano@linaro.org>2016-04-11 10:38:34 -0400
committerIngo Molnar <mingo@kernel.org>2016-04-13 06:25:22 -0400
commit2c923e94cd9c6acff3b22f0ae29cfe65e2658b40 (patch)
tree138907178baed5ae0f6be39fb1204fa5e285818d /kernel/sched
parentc78b17e28cc2c2df74264afc408bdc6aaf3fbcc8 (diff)
sched/clock: Make local_clock()/cpu_clock() inline
The local_clock/cpu_clock functions were changed to prevent a double identical test with sched_clock_cpu() when HAVE_UNSTABLE_SCHED_CLOCK is set. That resulted in one line functions. As these functions are in all the cases one line functions and in the hot path, it is useful to specify them as static inline in order to give a strong hint to the compiler. After verification, it appears the compiler does not inline them without this hint. Change those functions to static inline. sched_clock_cpu() is called via the inlined local_clock()/cpu_clock() functions from sched.h. So any module code including sched.h will reference sched_clock_cpu(). Thus it must be exported with the EXPORT_SYMBOL_GPL macro. Signed-off-by: Daniel Lezcano <daniel.lezcano@linaro.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Link: http://lkml.kernel.org/r/1460385514-14700-2-git-send-email-daniel.lezcano@linaro.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/clock.c42
1 files changed, 1 insertions, 41 deletions
diff --git a/kernel/sched/clock.c b/kernel/sched/clock.c
index 30c4b202f0ba..e85a725e5c34 100644
--- a/kernel/sched/clock.c
+++ b/kernel/sched/clock.c
@@ -318,6 +318,7 @@ u64 sched_clock_cpu(int cpu)
318 318
319 return clock; 319 return clock;
320} 320}
321EXPORT_SYMBOL_GPL(sched_clock_cpu);
321 322
322void sched_clock_tick(void) 323void sched_clock_tick(void)
323{ 324{
@@ -363,33 +364,6 @@ void sched_clock_idle_wakeup_event(u64 delta_ns)
363} 364}
364EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event); 365EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event);
365 366
366/*
367 * As outlined at the top, provides a fast, high resolution, nanosecond
368 * time source that is monotonic per cpu argument and has bounded drift
369 * between cpus.
370 *
371 * ######################### BIG FAT WARNING ##########################
372 * # when comparing cpu_clock(i) to cpu_clock(j) for i != j, time can #
373 * # go backwards !! #
374 * ####################################################################
375 */
376u64 cpu_clock(int cpu)
377{
378 return sched_clock_cpu(cpu);
379}
380
381/*
382 * Similar to cpu_clock() for the current cpu. Time will only be observed
383 * to be monotonic if care is taken to only compare timestampt taken on the
384 * same CPU.
385 *
386 * See cpu_clock().
387 */
388u64 local_clock(void)
389{
390 return sched_clock_cpu(raw_smp_processor_id());
391}
392
393#else /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */ 367#else /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */
394 368
395void sched_clock_init(void) 369void sched_clock_init(void)
@@ -404,22 +378,8 @@ u64 sched_clock_cpu(int cpu)
404 378
405 return sched_clock(); 379 return sched_clock();
406} 380}
407
408u64 cpu_clock(int cpu)
409{
410 return sched_clock();
411}
412
413u64 local_clock(void)
414{
415 return sched_clock();
416}
417
418#endif /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */ 381#endif /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */
419 382
420EXPORT_SYMBOL_GPL(cpu_clock);
421EXPORT_SYMBOL_GPL(local_clock);
422
423/* 383/*
424 * Running clock - returns the time that has elapsed while a guest has been 384 * Running clock - returns the time that has elapsed while a guest has been
425 * running. 385 * running.