aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPavel Tatashin <pasha.tatashin@oracle.com>2018-07-19 16:55:42 -0400
committerThomas Gleixner <tglx@linutronix.de>2018-07-19 18:02:43 -0400
commit857baa87b6422bcfb84ed3631d6839920cb5b09d (patch)
treeb63b887069a177a5897821c39f91177173696434
parent5d2a4e91a541cb04d20d11602f0f9340291322ac (diff)
sched/clock: Enable sched clock early
Allow sched_clock() to be used before schec_clock_init() is called. This provides a way to get early boot timestamps on machines with unstable clocks. Signed-off-by: Pavel Tatashin <pasha.tatashin@oracle.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: steven.sistare@oracle.com Cc: daniel.m.jordan@oracle.com Cc: linux@armlinux.org.uk Cc: schwidefsky@de.ibm.com Cc: heiko.carstens@de.ibm.com Cc: john.stultz@linaro.org Cc: sboyd@codeaurora.org Cc: hpa@zytor.com Cc: douly.fnst@cn.fujitsu.com Cc: peterz@infradead.org Cc: prarit@redhat.com Cc: feng.tang@intel.com Cc: pmladek@suse.com Cc: gnomes@lxorguk.ukuu.org.uk Cc: linux-s390@vger.kernel.org Cc: boris.ostrovsky@oracle.com Cc: jgross@suse.com Cc: pbonzini@redhat.com Link: https://lkml.kernel.org/r/20180719205545.16512-24-pasha.tatashin@oracle.com
-rw-r--r--init/main.c2
-rw-r--r--kernel/sched/clock.c20
2 files changed, 20 insertions, 2 deletions
diff --git a/init/main.c b/init/main.c
index 162d931c9511..ff0a24170b95 100644
--- a/init/main.c
+++ b/init/main.c
@@ -642,7 +642,6 @@ asmlinkage __visible void __init start_kernel(void)
642 softirq_init(); 642 softirq_init();
643 timekeeping_init(); 643 timekeeping_init();
644 time_init(); 644 time_init();
645 sched_clock_init();
646 printk_safe_init(); 645 printk_safe_init();
647 perf_event_init(); 646 perf_event_init();
648 profile_init(); 647 profile_init();
@@ -697,6 +696,7 @@ asmlinkage __visible void __init start_kernel(void)
697 acpi_early_init(); 696 acpi_early_init();
698 if (late_time_init) 697 if (late_time_init)
699 late_time_init(); 698 late_time_init();
699 sched_clock_init();
700 calibrate_delay(); 700 calibrate_delay();
701 pid_idr_init(); 701 pid_idr_init();
702 anon_vma_init(); 702 anon_vma_init();
diff --git a/kernel/sched/clock.c b/kernel/sched/clock.c
index 0e9dbb2d9aea..422cd63f8f17 100644
--- a/kernel/sched/clock.c
+++ b/kernel/sched/clock.c
@@ -202,7 +202,25 @@ static void __sched_clock_gtod_offset(void)
202 202
203void __init sched_clock_init(void) 203void __init sched_clock_init(void)
204{ 204{
205 unsigned long flags;
206
207 /*
208 * Set __gtod_offset such that once we mark sched_clock_running,
209 * sched_clock_tick() continues where sched_clock() left off.
210 *
211 * Even if TSC is buggered, we're still UP at this point so it
212 * can't really be out of sync.
213 */
214 local_irq_save(flags);
215 __sched_clock_gtod_offset();
216 local_irq_restore(flags);
217
205 sched_clock_running = 1; 218 sched_clock_running = 1;
219
220 /* Now that sched_clock_running is set adjust scd */
221 local_irq_save(flags);
222 sched_clock_tick();
223 local_irq_restore(flags);
206} 224}
207/* 225/*
208 * We run this as late_initcall() such that it runs after all built-in drivers, 226 * We run this as late_initcall() such that it runs after all built-in drivers,
@@ -356,7 +374,7 @@ u64 sched_clock_cpu(int cpu)
356 return sched_clock() + __sched_clock_offset; 374 return sched_clock() + __sched_clock_offset;
357 375
358 if (unlikely(!sched_clock_running)) 376 if (unlikely(!sched_clock_running))
359 return 0ull; 377 return sched_clock();
360 378
361 preempt_disable_notrace(); 379 preempt_disable_notrace();
362 scd = cpu_sdc(cpu); 380 scd = cpu_sdc(cpu);