summaryrefslogtreecommitdiffstats
path: root/drivers/clocksource
diff options
context:
space:
mode:
authorTianyu Lan <Tianyu.Lan@microsoft.com>2019-08-14 08:32:16 -0400
committerThomas Gleixner <tglx@linutronix.de>2019-08-23 10:59:54 -0400
commitbd00cd52d5be655a2f217e2ed74b91a71cb2b14f (patch)
tree90b8fe8d71a849f6066d1c2ba7500dba0fca9819 /drivers/clocksource
parentadb87ff4f96c9700718e09c97a804124d5cd61ff (diff)
clocksource/drivers/hyperv: Add Hyper-V specific sched clock function
Hyper-V guests use the default native_sched_clock() in pv_ops.time.sched_clock on x86. But native_sched_clock() directly uses the raw TSC value, which can be discontinuous in a Hyper-V VM. Add the generic hv_setup_sched_clock() to set the sched clock function appropriately. On x86, this sets pv_ops.time.sched_clock to read the Hyper-V reference TSC value that is scaled and adjusted to be continuous. Also move the Hyper-V reference TSC initialization much earlier in the boot process so no discontinuity is observed when pv_ops.time.sched_clock calculates its offset. [ tglx: Folded build fix ] Signed-off-by: Tianyu Lan <Tianyu.Lan@microsoft.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Michael Kelley <mikelley@microsoft.com> Link: https://lkml.kernel.org/r/20190814123216.32245-3-Tianyu.Lan@microsoft.com
Diffstat (limited to 'drivers/clocksource')
-rw-r--r--drivers/clocksource/hyperv_timer.c22
1 files changed, 12 insertions, 10 deletions
diff --git a/drivers/clocksource/hyperv_timer.c b/drivers/clocksource/hyperv_timer.c
index 432aa331df04..c322ab4d3689 100644
--- a/drivers/clocksource/hyperv_timer.c
+++ b/drivers/clocksource/hyperv_timer.c
@@ -22,6 +22,7 @@
22#include <asm/mshyperv.h> 22#include <asm/mshyperv.h>
23 23
24static struct clock_event_device __percpu *hv_clock_event; 24static struct clock_event_device __percpu *hv_clock_event;
25static u64 hv_sched_clock_offset __ro_after_init;
25 26
26/* 27/*
27 * If false, we're using the old mechanism for stimer0 interrupts 28 * If false, we're using the old mechanism for stimer0 interrupts
@@ -222,7 +223,7 @@ struct ms_hyperv_tsc_page *hv_get_tsc_page(void)
222} 223}
223EXPORT_SYMBOL_GPL(hv_get_tsc_page); 224EXPORT_SYMBOL_GPL(hv_get_tsc_page);
224 225
225static u64 notrace read_hv_sched_clock_tsc(void) 226static u64 notrace read_hv_clock_tsc(struct clocksource *arg)
226{ 227{
227 u64 current_tick = hv_read_tsc_page(&tsc_pg); 228 u64 current_tick = hv_read_tsc_page(&tsc_pg);
228 229
@@ -232,9 +233,9 @@ static u64 notrace read_hv_sched_clock_tsc(void)
232 return current_tick; 233 return current_tick;
233} 234}
234 235
235static u64 read_hv_clock_tsc(struct clocksource *arg) 236static u64 read_hv_sched_clock_tsc(void)
236{ 237{
237 return read_hv_sched_clock_tsc(); 238 return read_hv_clock_tsc(NULL) - hv_sched_clock_offset;
238} 239}
239 240
240static struct clocksource hyperv_cs_tsc = { 241static struct clocksource hyperv_cs_tsc = {
@@ -246,7 +247,7 @@ static struct clocksource hyperv_cs_tsc = {
246}; 247};
247#endif 248#endif
248 249
249static u64 notrace read_hv_sched_clock_msr(void) 250static u64 notrace read_hv_clock_msr(struct clocksource *arg)
250{ 251{
251 u64 current_tick; 252 u64 current_tick;
252 /* 253 /*
@@ -258,9 +259,9 @@ static u64 notrace read_hv_sched_clock_msr(void)
258 return current_tick; 259 return current_tick;
259} 260}
260 261
261static u64 read_hv_clock_msr(struct clocksource *arg) 262static u64 read_hv_sched_clock_msr(void)
262{ 263{
263 return read_hv_sched_clock_msr(); 264 return read_hv_clock_msr(NULL) - hv_sched_clock_offset;
264} 265}
265 266
266static struct clocksource hyperv_cs_msr = { 267static struct clocksource hyperv_cs_msr = {
@@ -298,8 +299,9 @@ static bool __init hv_init_tsc_clocksource(void)
298 hv_set_clocksource_vdso(hyperv_cs_tsc); 299 hv_set_clocksource_vdso(hyperv_cs_tsc);
299 clocksource_register_hz(&hyperv_cs_tsc, NSEC_PER_SEC/100); 300 clocksource_register_hz(&hyperv_cs_tsc, NSEC_PER_SEC/100);
300 301
301 /* sched_clock_register is needed on ARM64 but is a no-op on x86 */ 302 hv_sched_clock_offset = hyperv_cs->read(hyperv_cs);
302 sched_clock_register(read_hv_sched_clock_tsc, 64, HV_CLOCK_HZ); 303 hv_setup_sched_clock(read_hv_sched_clock_tsc);
304
303 return true; 305 return true;
304} 306}
305#else 307#else
@@ -329,7 +331,7 @@ void __init hv_init_clocksource(void)
329 hyperv_cs = &hyperv_cs_msr; 331 hyperv_cs = &hyperv_cs_msr;
330 clocksource_register_hz(&hyperv_cs_msr, NSEC_PER_SEC/100); 332 clocksource_register_hz(&hyperv_cs_msr, NSEC_PER_SEC/100);
331 333
332 /* sched_clock_register is needed on ARM64 but is a no-op on x86 */ 334 hv_sched_clock_offset = hyperv_cs->read(hyperv_cs);
333 sched_clock_register(read_hv_sched_clock_msr, 64, HV_CLOCK_HZ); 335 hv_setup_sched_clock(read_hv_sched_clock_msr);
334} 336}
335EXPORT_SYMBOL_GPL(hv_init_clocksource); 337EXPORT_SYMBOL_GPL(hv_init_clocksource);