aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/tsc_32.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel/tsc_32.c')
-rw-r--r--arch/x86/kernel/tsc_32.c188
1 files changed, 0 insertions, 188 deletions
diff --git a/arch/x86/kernel/tsc_32.c b/arch/x86/kernel/tsc_32.c
deleted file mode 100644
index bbc153d36f84..000000000000
--- a/arch/x86/kernel/tsc_32.c
+++ /dev/null
@@ -1,188 +0,0 @@
1#include <linux/sched.h>
2#include <linux/clocksource.h>
3#include <linux/workqueue.h>
4#include <linux/delay.h>
5#include <linux/cpufreq.h>
6#include <linux/jiffies.h>
7#include <linux/init.h>
8#include <linux/dmi.h>
9#include <linux/percpu.h>
10
11#include <asm/delay.h>
12#include <asm/tsc.h>
13#include <asm/io.h>
14#include <asm/timer.h>
15
16#include "mach_timer.h"
17
18extern int tsc_unstable;
19extern int tsc_disabled;
20
21/* clock source code */
22
23static struct clocksource clocksource_tsc;
24
25/*
26 * We compare the TSC to the cycle_last value in the clocksource
27 * structure to avoid a nasty time-warp issue. This can be observed in
28 * a very small window right after one CPU updated cycle_last under
29 * xtime lock and the other CPU reads a TSC value which is smaller
30 * than the cycle_last reference value due to a TSC which is slighty
31 * behind. This delta is nowhere else observable, but in that case it
32 * results in a forward time jump in the range of hours due to the
33 * unsigned delta calculation of the time keeping core code, which is
34 * necessary to support wrapping clocksources like pm timer.
35 */
36static cycle_t read_tsc(void)
37{
38 cycle_t ret;
39
40 rdtscll(ret);
41
42 return ret >= clocksource_tsc.cycle_last ?
43 ret : clocksource_tsc.cycle_last;
44}
45
46static struct clocksource clocksource_tsc = {
47 .name = "tsc",
48 .rating = 300,
49 .read = read_tsc,
50 .mask = CLOCKSOURCE_MASK(64),
51 .mult = 0, /* to be set */
52 .shift = 22,
53 .flags = CLOCK_SOURCE_IS_CONTINUOUS |
54 CLOCK_SOURCE_MUST_VERIFY,
55};
56
57void mark_tsc_unstable(char *reason)
58{
59 if (!tsc_unstable) {
60 tsc_unstable = 1;
61 printk("Marking TSC unstable due to: %s.\n", reason);
62 /* Can be called before registration */
63 if (clocksource_tsc.mult)
64 clocksource_change_rating(&clocksource_tsc, 0);
65 else
66 clocksource_tsc.rating = 0;
67 }
68}
69EXPORT_SYMBOL_GPL(mark_tsc_unstable);
70
71static int __init dmi_mark_tsc_unstable(const struct dmi_system_id *d)
72{
73 printk(KERN_NOTICE "%s detected: marking TSC unstable.\n",
74 d->ident);
75 tsc_unstable = 1;
76 return 0;
77}
78
79/* List of systems that have known TSC problems */
80static struct dmi_system_id __initdata bad_tsc_dmi_table[] = {
81 {
82 .callback = dmi_mark_tsc_unstable,
83 .ident = "IBM Thinkpad 380XD",
84 .matches = {
85 DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
86 DMI_MATCH(DMI_BOARD_NAME, "2635FA0"),
87 },
88 },
89 {}
90};
91
92/*
93 * Make an educated guess if the TSC is trustworthy and synchronized
94 * over all CPUs.
95 */
96__cpuinit int unsynchronized_tsc(void)
97{
98 if (!cpu_has_tsc || tsc_unstable)
99 return 1;
100
101 /* Anything with constant TSC should be synchronized */
102 if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
103 return 0;
104
105 /*
106 * Intel systems are normally all synchronized.
107 * Exceptions must mark TSC as unstable:
108 */
109 if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) {
110 /* assume multi socket systems are not synchronized: */
111 if (num_possible_cpus() > 1)
112 tsc_unstable = 1;
113 }
114 return tsc_unstable;
115}
116
117/*
118 * Geode_LX - the OLPC CPU has a possibly a very reliable TSC
119 */
120#ifdef CONFIG_MGEODE_LX
121/* RTSC counts during suspend */
122#define RTSC_SUSP 0x100
123
124static void __init check_geode_tsc_reliable(void)
125{
126 unsigned long res_low, res_high;
127
128 rdmsr_safe(MSR_GEODE_BUSCONT_CONF0, &res_low, &res_high);
129 if (res_low & RTSC_SUSP)
130 clocksource_tsc.flags &= ~CLOCK_SOURCE_MUST_VERIFY;
131}
132#else
133static inline void check_geode_tsc_reliable(void) { }
134#endif
135
136
137void __init tsc_init(void)
138{
139 int cpu;
140 u64 lpj;
141
142 if (!cpu_has_tsc || tsc_disabled > 0)
143 return;
144
145 cpu_khz = calculate_cpu_khz();
146 tsc_khz = cpu_khz;
147
148 if (!cpu_khz) {
149 mark_tsc_unstable("could not calculate TSC khz");
150 return;
151 }
152
153 lpj = ((u64)tsc_khz * 1000);
154 do_div(lpj, HZ);
155 lpj_fine = lpj;
156
157 /* now allow native_sched_clock() to use rdtsc */
158 tsc_disabled = 0;
159
160 printk("Detected %lu.%03lu MHz processor.\n",
161 (unsigned long)cpu_khz / 1000,
162 (unsigned long)cpu_khz % 1000);
163
164 /*
165 * Secondary CPUs do not run through tsc_init(), so set up
166 * all the scale factors for all CPUs, assuming the same
167 * speed as the bootup CPU. (cpufreq notifiers will fix this
168 * up if their speed diverges)
169 */
170 for_each_possible_cpu(cpu)
171 set_cyc2ns_scale(cpu_khz, cpu);
172
173 use_tsc_delay();
174
175 /* Check and install the TSC clocksource */
176 dmi_check_system(bad_tsc_dmi_table);
177
178 unsynchronized_tsc();
179 check_geode_tsc_reliable();
180 clocksource_tsc.mult = clocksource_khz2mult(tsc_khz,
181 clocksource_tsc.shift);
182 /* lower the rating if we already know its unstable: */
183 if (check_tsc_unstable()) {
184 clocksource_tsc.rating = 0;
185 clocksource_tsc.flags &= ~CLOCK_SOURCE_IS_CONTINUOUS;
186 }
187 clocksource_register(&clocksource_tsc);
188}