aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/tsc.c
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2009-06-16 15:34:17 -0400
committerIngo Molnar <mingo@elte.hu>2009-06-17 10:03:54 -0400
commit84599f8a59e77699f18f06948cea171a349a3f0f (patch)
tree9918eccc9bcc2a91eb0f5609f9ad69c59ac66bd0 /arch/x86/kernel/tsc.c
parent03347e2592078a90df818670fddf97a33eec70fb (diff)
sched, x86: Fix cpufreq + sched_clock() TSC scaling
For freqency dependent TSCs we only scale the cycles, we do not account for the discrepancy in absolute value. Our current formula is: time = cycles * mult (where mult is a function of the cpu-speed on variable tsc machines) Suppose our current cycle count is 10, and we have a multiplier of 5, then our time value would end up being 50. Now cpufreq comes along and changes the multiplier to say 3 or 7, which would result in our time being resp. 30 or 70. That means that we can observe random jumps in the time value due to frequency changes in both fwd and bwd direction. So what this patch does is change the formula to: time = cycles * frequency + offset And we calculate offset so that time_before == time_after, thereby ridding us of these jumps in time. [ Impact: fix/reduce sched_clock() jumps across frequency changing events ] Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <new-submission> Signed-off-by: Ingo Molnar <mingo@elte.hu> Chucked-on-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'arch/x86/kernel/tsc.c')
-rw-r--r--arch/x86/kernel/tsc.c8
1 files changed, 6 insertions, 2 deletions
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 3e1c057e98fe..ef4dac50143f 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -589,22 +589,26 @@ EXPORT_SYMBOL(recalibrate_cpu_khz);
589 */ 589 */
590 590
591DEFINE_PER_CPU(unsigned long, cyc2ns); 591DEFINE_PER_CPU(unsigned long, cyc2ns);
592DEFINE_PER_CPU(unsigned long long, cyc2ns_offset);
592 593
593static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu) 594static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu)
594{ 595{
595 unsigned long long tsc_now, ns_now; 596 unsigned long long tsc_now, ns_now, *offset;
596 unsigned long flags, *scale; 597 unsigned long flags, *scale;
597 598
598 local_irq_save(flags); 599 local_irq_save(flags);
599 sched_clock_idle_sleep_event(); 600 sched_clock_idle_sleep_event();
600 601
601 scale = &per_cpu(cyc2ns, cpu); 602 scale = &per_cpu(cyc2ns, cpu);
603 offset = &per_cpu(cyc2ns_offset, cpu);
602 604
603 rdtscll(tsc_now); 605 rdtscll(tsc_now);
604 ns_now = __cycles_2_ns(tsc_now); 606 ns_now = __cycles_2_ns(tsc_now);
605 607
606 if (cpu_khz) 608 if (cpu_khz) {
607 *scale = (NSEC_PER_MSEC << CYC2NS_SCALE_FACTOR)/cpu_khz; 609 *scale = (NSEC_PER_MSEC << CYC2NS_SCALE_FACTOR)/cpu_khz;
610 *offset = ns_now - (tsc_now * *scale >> CYC2NS_SCALE_FACTOR);
611 }
608 612
609 sched_clock_idle_wakeup_event(0); 613 sched_clock_idle_wakeup_event(0);
610 local_irq_restore(flags); 614 local_irq_restore(flags);