aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/tsc.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel/tsc.c')
-rw-r--r--arch/x86/kernel/tsc.c38
1 files changed, 38 insertions, 0 deletions
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index ce8e50239332..d632934cb638 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -626,6 +626,44 @@ static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu)
626 local_irq_restore(flags); 626 local_irq_restore(flags);
627} 627}
628 628
629static unsigned long long cyc2ns_suspend;
630
631void save_sched_clock_state(void)
632{
633 if (!sched_clock_stable)
634 return;
635
636 cyc2ns_suspend = sched_clock();
637}
638
639/*
640 * Even on processors with invariant TSC, TSC gets reset in some the
641 * ACPI system sleep states. And in some systems BIOS seem to reinit TSC to
642 * arbitrary value (still sync'd across cpu's) during resume from such sleep
643 * states. To cope up with this, recompute the cyc2ns_offset for each cpu so
644 * that sched_clock() continues from the point where it was left off during
645 * suspend.
646 */
647void restore_sched_clock_state(void)
648{
649 unsigned long long offset;
650 unsigned long flags;
651 int cpu;
652
653 if (!sched_clock_stable)
654 return;
655
656 local_irq_save(flags);
657
658 get_cpu_var(cyc2ns_offset) = 0;
659 offset = cyc2ns_suspend - sched_clock();
660
661 for_each_possible_cpu(cpu)
662 per_cpu(cyc2ns_offset, cpu) = offset;
663
664 local_irq_restore(flags);
665}
666
629#ifdef CONFIG_CPU_FREQ 667#ifdef CONFIG_CPU_FREQ
630 668
631/* Frequency scaling support. Adjust the TSC based timer when the cpu frequency 669/* Frequency scaling support. Adjust the TSC based timer when the cpu frequency