aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/tsc_64.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel/tsc_64.c')
-rw-r--r--arch/x86/kernel/tsc_64.c106
1 files changed, 0 insertions, 106 deletions
diff --git a/arch/x86/kernel/tsc_64.c b/arch/x86/kernel/tsc_64.c
deleted file mode 100644
index 80a274b018c2..000000000000
--- a/arch/x86/kernel/tsc_64.c
+++ /dev/null
@@ -1,106 +0,0 @@
1#include <linux/kernel.h>
2#include <linux/sched.h>
3#include <linux/interrupt.h>
4#include <linux/init.h>
5#include <linux/clocksource.h>
6#include <linux/time.h>
7#include <linux/acpi.h>
8#include <linux/cpufreq.h>
9#include <linux/acpi_pmtmr.h>
10
11#include <asm/hpet.h>
12#include <asm/timex.h>
13#include <asm/timer.h>
14#include <asm/vgtod.h>
15
16extern int tsc_unstable;
17extern int tsc_disabled;
18
19/*
20 * Make an educated guess if the TSC is trustworthy and synchronized
21 * over all CPUs.
22 */
23__cpuinit int unsynchronized_tsc(void)
24{
25 if (tsc_unstable)
26 return 1;
27
28#ifdef CONFIG_SMP
29 if (apic_is_clustered_box())
30 return 1;
31#endif
32
33 if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
34 return 0;
35
36 /* Assume multi socket systems are not synchronized */
37 return num_present_cpus() > 1;
38}
39
40static struct clocksource clocksource_tsc;
41
42/*
43 * We compare the TSC to the cycle_last value in the clocksource
44 * structure to avoid a nasty time-warp. This can be observed in a
45 * very small window right after one CPU updated cycle_last under
46 * xtime/vsyscall_gtod lock and the other CPU reads a TSC value which
47 * is smaller than the cycle_last reference value due to a TSC which
48 * is slighty behind. This delta is nowhere else observable, but in
49 * that case it results in a forward time jump in the range of hours
50 * due to the unsigned delta calculation of the time keeping core
51 * code, which is necessary to support wrapping clocksources like pm
52 * timer.
53 */
54static cycle_t read_tsc(void)
55{
56 cycle_t ret = (cycle_t)get_cycles();
57
58 return ret >= clocksource_tsc.cycle_last ?
59 ret : clocksource_tsc.cycle_last;
60}
61
62static cycle_t __vsyscall_fn vread_tsc(void)
63{
64 cycle_t ret = (cycle_t)vget_cycles();
65
66 return ret >= __vsyscall_gtod_data.clock.cycle_last ?
67 ret : __vsyscall_gtod_data.clock.cycle_last;
68}
69
70static struct clocksource clocksource_tsc = {
71 .name = "tsc",
72 .rating = 300,
73 .read = read_tsc,
74 .mask = CLOCKSOURCE_MASK(64),
75 .shift = 22,
76 .flags = CLOCK_SOURCE_IS_CONTINUOUS |
77 CLOCK_SOURCE_MUST_VERIFY,
78 .vread = vread_tsc,
79};
80
81void mark_tsc_unstable(char *reason)
82{
83 if (!tsc_unstable) {
84 tsc_unstable = 1;
85 printk("Marking TSC unstable due to %s\n", reason);
86 /* Change only the rating, when not registered */
87 if (clocksource_tsc.mult)
88 clocksource_change_rating(&clocksource_tsc, 0);
89 else
90 clocksource_tsc.rating = 0;
91 }
92}
93EXPORT_SYMBOL_GPL(mark_tsc_unstable);
94
95void __init init_tsc_clocksource(void)
96{
97 if (tsc_disabled > 0)
98 return;
99
100 clocksource_tsc.mult = clocksource_khz2mult(tsc_khz,
101 clocksource_tsc.shift);
102 if (check_tsc_unstable())
103 clocksource_tsc.rating = 0;
104
105 clocksource_register(&clocksource_tsc);
106}