diff options
Diffstat (limited to 'arch/x86/kernel/tsc_32.c')
-rw-r--r-- | arch/x86/kernel/tsc_32.c | 453 |
1 files changed, 0 insertions, 453 deletions
diff --git a/arch/x86/kernel/tsc_32.c b/arch/x86/kernel/tsc_32.c deleted file mode 100644 index 068759db63dd..000000000000 --- a/arch/x86/kernel/tsc_32.c +++ /dev/null | |||
@@ -1,453 +0,0 @@ | |||
1 | #include <linux/sched.h> | ||
2 | #include <linux/clocksource.h> | ||
3 | #include <linux/workqueue.h> | ||
4 | #include <linux/cpufreq.h> | ||
5 | #include <linux/jiffies.h> | ||
6 | #include <linux/init.h> | ||
7 | #include <linux/dmi.h> | ||
8 | #include <linux/percpu.h> | ||
9 | |||
10 | #include <asm/delay.h> | ||
11 | #include <asm/tsc.h> | ||
12 | #include <asm/io.h> | ||
13 | #include <asm/timer.h> | ||
14 | |||
15 | #include "mach_timer.h" | ||
16 | |||
17 | static int tsc_disabled; | ||
18 | |||
19 | /* | ||
20 | * On some systems the TSC frequency does not | ||
21 | * change with the cpu frequency. So we need | ||
22 | * an extra value to store the TSC freq | ||
23 | */ | ||
24 | unsigned int tsc_khz; | ||
25 | EXPORT_SYMBOL_GPL(tsc_khz); | ||
26 | |||
27 | #ifdef CONFIG_X86_TSC | ||
28 | static int __init tsc_setup(char *str) | ||
29 | { | ||
30 | printk(KERN_WARNING "notsc: Kernel compiled with CONFIG_X86_TSC, " | ||
31 | "cannot disable TSC completely.\n"); | ||
32 | tsc_disabled = 1; | ||
33 | return 1; | ||
34 | } | ||
35 | #else | ||
36 | /* | ||
37 | * disable flag for tsc. Takes effect by clearing the TSC cpu flag | ||
38 | * in cpu/common.c | ||
39 | */ | ||
40 | static int __init tsc_setup(char *str) | ||
41 | { | ||
42 | setup_clear_cpu_cap(X86_FEATURE_TSC); | ||
43 | return 1; | ||
44 | } | ||
45 | #endif | ||
46 | |||
47 | __setup("notsc", tsc_setup); | ||
48 | |||
49 | /* | ||
50 | * code to mark and check if the TSC is unstable | ||
51 | * due to cpufreq or due to unsynced TSCs | ||
52 | */ | ||
53 | static int tsc_unstable; | ||
54 | |||
55 | int check_tsc_unstable(void) | ||
56 | { | ||
57 | return tsc_unstable; | ||
58 | } | ||
59 | EXPORT_SYMBOL_GPL(check_tsc_unstable); | ||
60 | |||
61 | /* Accelerators for sched_clock() | ||
62 | * convert from cycles(64bits) => nanoseconds (64bits) | ||
63 | * basic equation: | ||
64 | * ns = cycles / (freq / ns_per_sec) | ||
65 | * ns = cycles * (ns_per_sec / freq) | ||
66 | * ns = cycles * (10^9 / (cpu_khz * 10^3)) | ||
67 | * ns = cycles * (10^6 / cpu_khz) | ||
68 | * | ||
69 | * Then we use scaling math (suggested by george@mvista.com) to get: | ||
70 | * ns = cycles * (10^6 * SC / cpu_khz) / SC | ||
71 | * ns = cycles * cyc2ns_scale / SC | ||
72 | * | ||
73 | * And since SC is a constant power of two, we can convert the div | ||
74 | * into a shift. | ||
75 | * | ||
76 | * We can use khz divisor instead of mhz to keep a better precision, since | ||
77 | * cyc2ns_scale is limited to 10^6 * 2^10, which fits in 32 bits. | ||
78 | * (mathieu.desnoyers@polymtl.ca) | ||
79 | * | ||
80 | * -johnstul@us.ibm.com "math is hard, lets go shopping!" | ||
81 | */ | ||
82 | |||
83 | DEFINE_PER_CPU(unsigned long, cyc2ns); | ||
84 | |||
85 | static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu) | ||
86 | { | ||
87 | unsigned long long tsc_now, ns_now; | ||
88 | unsigned long flags, *scale; | ||
89 | |||
90 | local_irq_save(flags); | ||
91 | sched_clock_idle_sleep_event(); | ||
92 | |||
93 | scale = &per_cpu(cyc2ns, cpu); | ||
94 | |||
95 | rdtscll(tsc_now); | ||
96 | ns_now = __cycles_2_ns(tsc_now); | ||
97 | |||
98 | if (cpu_khz) | ||
99 | *scale = (NSEC_PER_MSEC << CYC2NS_SCALE_FACTOR)/cpu_khz; | ||
100 | |||
101 | /* | ||
102 | * Start smoothly with the new frequency: | ||
103 | */ | ||
104 | sched_clock_idle_wakeup_event(0); | ||
105 | local_irq_restore(flags); | ||
106 | } | ||
107 | |||
108 | /* | ||
109 | * Scheduler clock - returns current time in nanosec units. | ||
110 | */ | ||
111 | unsigned long long native_sched_clock(void) | ||
112 | { | ||
113 | unsigned long long this_offset; | ||
114 | |||
115 | /* | ||
116 | * Fall back to jiffies if there's no TSC available: | ||
117 | * ( But note that we still use it if the TSC is marked | ||
118 | * unstable. We do this because unlike Time Of Day, | ||
119 | * the scheduler clock tolerates small errors and it's | ||
120 | * very important for it to be as fast as the platform | ||
121 | * can achive it. ) | ||
122 | */ | ||
123 | if (unlikely(tsc_disabled)) | ||
124 | /* No locking but a rare wrong value is not a big deal: */ | ||
125 | return (jiffies_64 - INITIAL_JIFFIES) * (1000000000 / HZ); | ||
126 | |||
127 | /* read the Time Stamp Counter: */ | ||
128 | rdtscll(this_offset); | ||
129 | |||
130 | /* return the value in ns */ | ||
131 | return cycles_2_ns(this_offset); | ||
132 | } | ||
133 | |||
134 | /* We need to define a real function for sched_clock, to override the | ||
135 | weak default version */ | ||
136 | #ifdef CONFIG_PARAVIRT | ||
137 | unsigned long long sched_clock(void) | ||
138 | { | ||
139 | return paravirt_sched_clock(); | ||
140 | } | ||
141 | #else | ||
142 | unsigned long long sched_clock(void) | ||
143 | __attribute__((alias("native_sched_clock"))); | ||
144 | #endif | ||
145 | |||
146 | unsigned long native_calculate_cpu_khz(void) | ||
147 | { | ||
148 | unsigned long long start, end; | ||
149 | unsigned long count; | ||
150 | u64 delta64 = (u64)ULLONG_MAX; | ||
151 | int i; | ||
152 | unsigned long flags; | ||
153 | |||
154 | local_irq_save(flags); | ||
155 | |||
156 | /* run 3 times to ensure the cache is warm and to get an accurate reading */ | ||
157 | for (i = 0; i < 3; i++) { | ||
158 | mach_prepare_counter(); | ||
159 | rdtscll(start); | ||
160 | mach_countup(&count); | ||
161 | rdtscll(end); | ||
162 | |||
163 | /* | ||
164 | * Error: ECTCNEVERSET | ||
165 | * The CTC wasn't reliable: we got a hit on the very first read, | ||
166 | * or the CPU was so fast/slow that the quotient wouldn't fit in | ||
167 | * 32 bits.. | ||
168 | */ | ||
169 | if (count <= 1) | ||
170 | continue; | ||
171 | |||
172 | /* cpu freq too slow: */ | ||
173 | if ((end - start) <= CALIBRATE_TIME_MSEC) | ||
174 | continue; | ||
175 | |||
176 | /* | ||
177 | * We want the minimum time of all runs in case one of them | ||
178 | * is inaccurate due to SMI or other delay | ||
179 | */ | ||
180 | delta64 = min(delta64, (end - start)); | ||
181 | } | ||
182 | |||
183 | /* cpu freq too fast (or every run was bad): */ | ||
184 | if (delta64 > (1ULL<<32)) | ||
185 | goto err; | ||
186 | |||
187 | delta64 += CALIBRATE_TIME_MSEC/2; /* round for do_div */ | ||
188 | do_div(delta64,CALIBRATE_TIME_MSEC); | ||
189 | |||
190 | local_irq_restore(flags); | ||
191 | return (unsigned long)delta64; | ||
192 | err: | ||
193 | local_irq_restore(flags); | ||
194 | return 0; | ||
195 | } | ||
196 | |||
197 | int recalibrate_cpu_khz(void) | ||
198 | { | ||
199 | #ifndef CONFIG_SMP | ||
200 | unsigned long cpu_khz_old = cpu_khz; | ||
201 | |||
202 | if (cpu_has_tsc) { | ||
203 | cpu_khz = calculate_cpu_khz(); | ||
204 | tsc_khz = cpu_khz; | ||
205 | cpu_data(0).loops_per_jiffy = | ||
206 | cpufreq_scale(cpu_data(0).loops_per_jiffy, | ||
207 | cpu_khz_old, cpu_khz); | ||
208 | return 0; | ||
209 | } else | ||
210 | return -ENODEV; | ||
211 | #else | ||
212 | return -ENODEV; | ||
213 | #endif | ||
214 | } | ||
215 | |||
216 | EXPORT_SYMBOL(recalibrate_cpu_khz); | ||
217 | |||
218 | #ifdef CONFIG_CPU_FREQ | ||
219 | |||
220 | /* | ||
221 | * if the CPU frequency is scaled, TSC-based delays will need a different | ||
222 | * loops_per_jiffy value to function properly. | ||
223 | */ | ||
224 | static unsigned int ref_freq; | ||
225 | static unsigned long loops_per_jiffy_ref; | ||
226 | static unsigned long cpu_khz_ref; | ||
227 | |||
228 | static int | ||
229 | time_cpufreq_notifier(struct notifier_block *nb, unsigned long val, void *data) | ||
230 | { | ||
231 | struct cpufreq_freqs *freq = data; | ||
232 | |||
233 | if (!ref_freq) { | ||
234 | if (!freq->old){ | ||
235 | ref_freq = freq->new; | ||
236 | return 0; | ||
237 | } | ||
238 | ref_freq = freq->old; | ||
239 | loops_per_jiffy_ref = cpu_data(freq->cpu).loops_per_jiffy; | ||
240 | cpu_khz_ref = cpu_khz; | ||
241 | } | ||
242 | |||
243 | if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) || | ||
244 | (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) || | ||
245 | (val == CPUFREQ_RESUMECHANGE)) { | ||
246 | if (!(freq->flags & CPUFREQ_CONST_LOOPS)) | ||
247 | cpu_data(freq->cpu).loops_per_jiffy = | ||
248 | cpufreq_scale(loops_per_jiffy_ref, | ||
249 | ref_freq, freq->new); | ||
250 | |||
251 | if (cpu_khz) { | ||
252 | |||
253 | if (num_online_cpus() == 1) | ||
254 | cpu_khz = cpufreq_scale(cpu_khz_ref, | ||
255 | ref_freq, freq->new); | ||
256 | if (!(freq->flags & CPUFREQ_CONST_LOOPS)) { | ||
257 | tsc_khz = cpu_khz; | ||
258 | set_cyc2ns_scale(cpu_khz, freq->cpu); | ||
259 | /* | ||
260 | * TSC based sched_clock turns | ||
261 | * to junk w/ cpufreq | ||
262 | */ | ||
263 | mark_tsc_unstable("cpufreq changes"); | ||
264 | } | ||
265 | } | ||
266 | } | ||
267 | |||
268 | return 0; | ||
269 | } | ||
270 | |||
271 | static struct notifier_block time_cpufreq_notifier_block = { | ||
272 | .notifier_call = time_cpufreq_notifier | ||
273 | }; | ||
274 | |||
275 | static int __init cpufreq_tsc(void) | ||
276 | { | ||
277 | return cpufreq_register_notifier(&time_cpufreq_notifier_block, | ||
278 | CPUFREQ_TRANSITION_NOTIFIER); | ||
279 | } | ||
280 | core_initcall(cpufreq_tsc); | ||
281 | |||
282 | #endif | ||
283 | |||
284 | /* clock source code */ | ||
285 | |||
286 | static unsigned long current_tsc_khz; | ||
287 | static struct clocksource clocksource_tsc; | ||
288 | |||
289 | /* | ||
290 | * We compare the TSC to the cycle_last value in the clocksource | ||
291 | * structure to avoid a nasty time-warp issue. This can be observed in | ||
292 | * a very small window right after one CPU updated cycle_last under | ||
293 | * xtime lock and the other CPU reads a TSC value which is smaller | ||
294 | * than the cycle_last reference value due to a TSC which is slighty | ||
295 | * behind. This delta is nowhere else observable, but in that case it | ||
296 | * results in a forward time jump in the range of hours due to the | ||
297 | * unsigned delta calculation of the time keeping core code, which is | ||
298 | * necessary to support wrapping clocksources like pm timer. | ||
299 | */ | ||
300 | static cycle_t read_tsc(void) | ||
301 | { | ||
302 | cycle_t ret; | ||
303 | |||
304 | rdtscll(ret); | ||
305 | |||
306 | return ret >= clocksource_tsc.cycle_last ? | ||
307 | ret : clocksource_tsc.cycle_last; | ||
308 | } | ||
309 | |||
310 | static struct clocksource clocksource_tsc = { | ||
311 | .name = "tsc", | ||
312 | .rating = 300, | ||
313 | .read = read_tsc, | ||
314 | .mask = CLOCKSOURCE_MASK(64), | ||
315 | .mult = 0, /* to be set */ | ||
316 | .shift = 22, | ||
317 | .flags = CLOCK_SOURCE_IS_CONTINUOUS | | ||
318 | CLOCK_SOURCE_MUST_VERIFY, | ||
319 | }; | ||
320 | |||
321 | void mark_tsc_unstable(char *reason) | ||
322 | { | ||
323 | if (!tsc_unstable) { | ||
324 | tsc_unstable = 1; | ||
325 | printk("Marking TSC unstable due to: %s.\n", reason); | ||
326 | /* Can be called before registration */ | ||
327 | if (clocksource_tsc.mult) | ||
328 | clocksource_change_rating(&clocksource_tsc, 0); | ||
329 | else | ||
330 | clocksource_tsc.rating = 0; | ||
331 | } | ||
332 | } | ||
333 | EXPORT_SYMBOL_GPL(mark_tsc_unstable); | ||
334 | |||
335 | static int __init dmi_mark_tsc_unstable(const struct dmi_system_id *d) | ||
336 | { | ||
337 | printk(KERN_NOTICE "%s detected: marking TSC unstable.\n", | ||
338 | d->ident); | ||
339 | tsc_unstable = 1; | ||
340 | return 0; | ||
341 | } | ||
342 | |||
343 | /* List of systems that have known TSC problems */ | ||
344 | static struct dmi_system_id __initdata bad_tsc_dmi_table[] = { | ||
345 | { | ||
346 | .callback = dmi_mark_tsc_unstable, | ||
347 | .ident = "IBM Thinkpad 380XD", | ||
348 | .matches = { | ||
349 | DMI_MATCH(DMI_BOARD_VENDOR, "IBM"), | ||
350 | DMI_MATCH(DMI_BOARD_NAME, "2635FA0"), | ||
351 | }, | ||
352 | }, | ||
353 | {} | ||
354 | }; | ||
355 | |||
356 | /* | ||
357 | * Make an educated guess if the TSC is trustworthy and synchronized | ||
358 | * over all CPUs. | ||
359 | */ | ||
360 | __cpuinit int unsynchronized_tsc(void) | ||
361 | { | ||
362 | if (!cpu_has_tsc || tsc_unstable) | ||
363 | return 1; | ||
364 | |||
365 | /* Anything with constant TSC should be synchronized */ | ||
366 | if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) | ||
367 | return 0; | ||
368 | |||
369 | /* | ||
370 | * Intel systems are normally all synchronized. | ||
371 | * Exceptions must mark TSC as unstable: | ||
372 | */ | ||
373 | if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) { | ||
374 | /* assume multi socket systems are not synchronized: */ | ||
375 | if (num_possible_cpus() > 1) | ||
376 | tsc_unstable = 1; | ||
377 | } | ||
378 | return tsc_unstable; | ||
379 | } | ||
380 | |||
381 | /* | ||
382 | * Geode_LX - the OLPC CPU has a possibly a very reliable TSC | ||
383 | */ | ||
384 | #ifdef CONFIG_MGEODE_LX | ||
385 | /* RTSC counts during suspend */ | ||
386 | #define RTSC_SUSP 0x100 | ||
387 | |||
388 | static void __init check_geode_tsc_reliable(void) | ||
389 | { | ||
390 | unsigned long res_low, res_high; | ||
391 | |||
392 | rdmsr_safe(MSR_GEODE_BUSCONT_CONF0, &res_low, &res_high); | ||
393 | if (res_low & RTSC_SUSP) | ||
394 | clocksource_tsc.flags &= ~CLOCK_SOURCE_MUST_VERIFY; | ||
395 | } | ||
396 | #else | ||
397 | static inline void check_geode_tsc_reliable(void) { } | ||
398 | #endif | ||
399 | |||
400 | |||
401 | void __init tsc_init(void) | ||
402 | { | ||
403 | int cpu; | ||
404 | |||
405 | if (!cpu_has_tsc || tsc_disabled) { | ||
406 | /* Disable the TSC in case of !cpu_has_tsc */ | ||
407 | tsc_disabled = 1; | ||
408 | return; | ||
409 | } | ||
410 | |||
411 | cpu_khz = calculate_cpu_khz(); | ||
412 | tsc_khz = cpu_khz; | ||
413 | |||
414 | if (!cpu_khz) { | ||
415 | mark_tsc_unstable("could not calculate TSC khz"); | ||
416 | /* | ||
417 | * We need to disable the TSC completely in this case | ||
418 | * to prevent sched_clock() from using it. | ||
419 | */ | ||
420 | tsc_disabled = 1; | ||
421 | return; | ||
422 | } | ||
423 | |||
424 | printk("Detected %lu.%03lu MHz processor.\n", | ||
425 | (unsigned long)cpu_khz / 1000, | ||
426 | (unsigned long)cpu_khz % 1000); | ||
427 | |||
428 | /* | ||
429 | * Secondary CPUs do not run through tsc_init(), so set up | ||
430 | * all the scale factors for all CPUs, assuming the same | ||
431 | * speed as the bootup CPU. (cpufreq notifiers will fix this | ||
432 | * up if their speed diverges) | ||
433 | */ | ||
434 | for_each_possible_cpu(cpu) | ||
435 | set_cyc2ns_scale(cpu_khz, cpu); | ||
436 | |||
437 | use_tsc_delay(); | ||
438 | |||
439 | /* Check and install the TSC clocksource */ | ||
440 | dmi_check_system(bad_tsc_dmi_table); | ||
441 | |||
442 | unsynchronized_tsc(); | ||
443 | check_geode_tsc_reliable(); | ||
444 | current_tsc_khz = tsc_khz; | ||
445 | clocksource_tsc.mult = clocksource_khz2mult(current_tsc_khz, | ||
446 | clocksource_tsc.shift); | ||
447 | /* lower the rating if we already know its unstable: */ | ||
448 | if (check_tsc_unstable()) { | ||
449 | clocksource_tsc.rating = 0; | ||
450 | clocksource_tsc.flags &= ~CLOCK_SOURCE_IS_CONTINUOUS; | ||
451 | } | ||
452 | clocksource_register(&clocksource_tsc); | ||
453 | } | ||