aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/tsc.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel/tsc.c')
-rw-r--r--arch/x86/kernel/tsc.c34
1 files changed, 0 insertions, 34 deletions
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 24249a5360b6..6cc6922262af 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -763,40 +763,6 @@ static cycle_t read_tsc(struct clocksource *cs)
763 ret : clocksource_tsc.cycle_last; 763 ret : clocksource_tsc.cycle_last;
764} 764}
765 765
766#ifdef CONFIG_X86_64
767static cycle_t __vsyscall_fn vread_tsc(void)
768{
769 cycle_t ret;
770 u64 last;
771
772 /*
773 * Empirically, a fence (of type that depends on the CPU)
774 * before rdtsc is enough to ensure that rdtsc is ordered
775 * with respect to loads. The various CPU manuals are unclear
776 * as to whether rdtsc can be reordered with later loads,
777 * but no one has ever seen it happen.
778 */
779 rdtsc_barrier();
780 ret = (cycle_t)vget_cycles();
781
782 last = VVAR(vsyscall_gtod_data).clock.cycle_last;
783
784 if (likely(ret >= last))
785 return ret;
786
787 /*
788 * GCC likes to generate cmov here, but this branch is extremely
789 * predictable (it's just a funciton of time and the likely is
790 * very likely) and there's a data dependence, so force GCC
791 * to generate a branch instead. I don't barrier() because
792 * we don't actually need a barrier, and if this function
793 * ever gets inlined it will generate worse code.
794 */
795 asm volatile ("");
796 return last;
797}
798#endif
799
800static void resume_tsc(struct clocksource *cs) 766static void resume_tsc(struct clocksource *cs)
801{ 767{
802 clocksource_tsc.cycle_last = 0; 768 clocksource_tsc.cycle_last = 0;