diff options
Diffstat (limited to 'arch/x86/kernel/tsc_32.c')
-rw-r--r-- | arch/x86/kernel/tsc_32.c | 451 |
1 files changed, 0 insertions, 451 deletions
diff --git a/arch/x86/kernel/tsc_32.c b/arch/x86/kernel/tsc_32.c deleted file mode 100644 index 65b70637ad97..000000000000 --- a/arch/x86/kernel/tsc_32.c +++ /dev/null | |||
@@ -1,451 +0,0 @@ | |||
1 | #include <linux/sched.h> | ||
2 | #include <linux/clocksource.h> | ||
3 | #include <linux/workqueue.h> | ||
4 | #include <linux/cpufreq.h> | ||
5 | #include <linux/jiffies.h> | ||
6 | #include <linux/init.h> | ||
7 | #include <linux/dmi.h> | ||
8 | #include <linux/percpu.h> | ||
9 | |||
10 | #include <asm/delay.h> | ||
11 | #include <asm/tsc.h> | ||
12 | #include <asm/io.h> | ||
13 | #include <asm/timer.h> | ||
14 | |||
15 | #include "mach_timer.h" | ||
16 | |||
17 | /* native_sched_clock() is called before tsc_init(), so | ||
18 | we must start with the TSC soft disabled to prevent | ||
19 | erroneous rdtsc usage on !cpu_has_tsc processors */ | ||
20 | static int tsc_disabled = -1; | ||
21 | |||
22 | /* | ||
23 | * On some systems the TSC frequency does not | ||
24 | * change with the cpu frequency. So we need | ||
25 | * an extra value to store the TSC freq | ||
26 | */ | ||
27 | unsigned int tsc_khz; | ||
28 | EXPORT_SYMBOL_GPL(tsc_khz); | ||
29 | |||
30 | #ifdef CONFIG_X86_TSC | ||
31 | static int __init tsc_setup(char *str) | ||
32 | { | ||
33 | printk(KERN_WARNING "notsc: Kernel compiled with CONFIG_X86_TSC, " | ||
34 | "cannot disable TSC completely.\n"); | ||
35 | tsc_disabled = 1; | ||
36 | return 1; | ||
37 | } | ||
38 | #else | ||
39 | /* | ||
40 | * disable flag for tsc. Takes effect by clearing the TSC cpu flag | ||
41 | * in cpu/common.c | ||
42 | */ | ||
43 | static int __init tsc_setup(char *str) | ||
44 | { | ||
45 | setup_clear_cpu_cap(X86_FEATURE_TSC); | ||
46 | return 1; | ||
47 | } | ||
48 | #endif | ||
49 | |||
50 | __setup("notsc", tsc_setup); | ||
51 | |||
52 | /* | ||
53 | * code to mark and check if the TSC is unstable | ||
54 | * due to cpufreq or due to unsynced TSCs | ||
55 | */ | ||
56 | static int tsc_unstable; | ||
57 | |||
58 | int check_tsc_unstable(void) | ||
59 | { | ||
60 | return tsc_unstable; | ||
61 | } | ||
62 | EXPORT_SYMBOL_GPL(check_tsc_unstable); | ||
63 | |||
64 | /* Accelerators for sched_clock() | ||
65 | * convert from cycles(64bits) => nanoseconds (64bits) | ||
66 | * basic equation: | ||
67 | * ns = cycles / (freq / ns_per_sec) | ||
68 | * ns = cycles * (ns_per_sec / freq) | ||
69 | * ns = cycles * (10^9 / (cpu_khz * 10^3)) | ||
70 | * ns = cycles * (10^6 / cpu_khz) | ||
71 | * | ||
72 | * Then we use scaling math (suggested by george@mvista.com) to get: | ||
73 | * ns = cycles * (10^6 * SC / cpu_khz) / SC | ||
74 | * ns = cycles * cyc2ns_scale / SC | ||
75 | * | ||
76 | * And since SC is a constant power of two, we can convert the div | ||
77 | * into a shift. | ||
78 | * | ||
79 | * We can use khz divisor instead of mhz to keep a better precision, since | ||
80 | * cyc2ns_scale is limited to 10^6 * 2^10, which fits in 32 bits. | ||
81 | * (mathieu.desnoyers@polymtl.ca) | ||
82 | * | ||
83 | * -johnstul@us.ibm.com "math is hard, lets go shopping!" | ||
84 | */ | ||
85 | |||
86 | DEFINE_PER_CPU(unsigned long, cyc2ns); | ||
87 | |||
88 | static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu) | ||
89 | { | ||
90 | unsigned long long tsc_now, ns_now; | ||
91 | unsigned long flags, *scale; | ||
92 | |||
93 | local_irq_save(flags); | ||
94 | sched_clock_idle_sleep_event(); | ||
95 | |||
96 | scale = &per_cpu(cyc2ns, cpu); | ||
97 | |||
98 | rdtscll(tsc_now); | ||
99 | ns_now = __cycles_2_ns(tsc_now); | ||
100 | |||
101 | if (cpu_khz) | ||
102 | *scale = (NSEC_PER_MSEC << CYC2NS_SCALE_FACTOR)/cpu_khz; | ||
103 | |||
104 | /* | ||
105 | * Start smoothly with the new frequency: | ||
106 | */ | ||
107 | sched_clock_idle_wakeup_event(0); | ||
108 | local_irq_restore(flags); | ||
109 | } | ||
110 | |||
111 | /* | ||
112 | * Scheduler clock - returns current time in nanosec units. | ||
113 | */ | ||
114 | unsigned long long native_sched_clock(void) | ||
115 | { | ||
116 | unsigned long long this_offset; | ||
117 | |||
118 | /* | ||
119 | * Fall back to jiffies if there's no TSC available: | ||
120 | * ( But note that we still use it if the TSC is marked | ||
121 | * unstable. We do this because unlike Time Of Day, | ||
122 | * the scheduler clock tolerates small errors and it's | ||
123 | * very important for it to be as fast as the platform | ||
124 | * can achive it. ) | ||
125 | */ | ||
126 | if (unlikely(tsc_disabled)) | ||
127 | /* No locking but a rare wrong value is not a big deal: */ | ||
128 | return (jiffies_64 - INITIAL_JIFFIES) * (1000000000 / HZ); | ||
129 | |||
130 | /* read the Time Stamp Counter: */ | ||
131 | rdtscll(this_offset); | ||
132 | |||
133 | /* return the value in ns */ | ||
134 | return cycles_2_ns(this_offset); | ||
135 | } | ||
136 | |||
137 | /* We need to define a real function for sched_clock, to override the | ||
138 | weak default version */ | ||
139 | #ifdef CONFIG_PARAVIRT | ||
140 | unsigned long long sched_clock(void) | ||
141 | { | ||
142 | return paravirt_sched_clock(); | ||
143 | } | ||
144 | #else | ||
145 | unsigned long long sched_clock(void) | ||
146 | __attribute__((alias("native_sched_clock"))); | ||
147 | #endif | ||
148 | |||
149 | unsigned long native_calculate_cpu_khz(void) | ||
150 | { | ||
151 | unsigned long long start, end; | ||
152 | unsigned long count; | ||
153 | u64 delta64 = (u64)ULLONG_MAX; | ||
154 | int i; | ||
155 | unsigned long flags; | ||
156 | |||
157 | local_irq_save(flags); | ||
158 | |||
159 | /* run 3 times to ensure the cache is warm and to get an accurate reading */ | ||
160 | for (i = 0; i < 3; i++) { | ||
161 | mach_prepare_counter(); | ||
162 | rdtscll(start); | ||
163 | mach_countup(&count); | ||
164 | rdtscll(end); | ||
165 | |||
166 | /* | ||
167 | * Error: ECTCNEVERSET | ||
168 | * The CTC wasn't reliable: we got a hit on the very first read, | ||
169 | * or the CPU was so fast/slow that the quotient wouldn't fit in | ||
170 | * 32 bits.. | ||
171 | */ | ||
172 | if (count <= 1) | ||
173 | continue; | ||
174 | |||
175 | /* cpu freq too slow: */ | ||
176 | if ((end - start) <= CALIBRATE_TIME_MSEC) | ||
177 | continue; | ||
178 | |||
179 | /* | ||
180 | * We want the minimum time of all runs in case one of them | ||
181 | * is inaccurate due to SMI or other delay | ||
182 | */ | ||
183 | delta64 = min(delta64, (end - start)); | ||
184 | } | ||
185 | |||
186 | /* cpu freq too fast (or every run was bad): */ | ||
187 | if (delta64 > (1ULL<<32)) | ||
188 | goto err; | ||
189 | |||
190 | delta64 += CALIBRATE_TIME_MSEC/2; /* round for do_div */ | ||
191 | do_div(delta64,CALIBRATE_TIME_MSEC); | ||
192 | |||
193 | local_irq_restore(flags); | ||
194 | return (unsigned long)delta64; | ||
195 | err: | ||
196 | local_irq_restore(flags); | ||
197 | return 0; | ||
198 | } | ||
199 | |||
200 | int recalibrate_cpu_khz(void) | ||
201 | { | ||
202 | #ifndef CONFIG_SMP | ||
203 | unsigned long cpu_khz_old = cpu_khz; | ||
204 | |||
205 | if (cpu_has_tsc) { | ||
206 | cpu_khz = calculate_cpu_khz(); | ||
207 | tsc_khz = cpu_khz; | ||
208 | cpu_data(0).loops_per_jiffy = | ||
209 | cpufreq_scale(cpu_data(0).loops_per_jiffy, | ||
210 | cpu_khz_old, cpu_khz); | ||
211 | return 0; | ||
212 | } else | ||
213 | return -ENODEV; | ||
214 | #else | ||
215 | return -ENODEV; | ||
216 | #endif | ||
217 | } | ||
218 | |||
219 | EXPORT_SYMBOL(recalibrate_cpu_khz); | ||
220 | |||
221 | #ifdef CONFIG_CPU_FREQ | ||
222 | |||
223 | /* | ||
224 | * if the CPU frequency is scaled, TSC-based delays will need a different | ||
225 | * loops_per_jiffy value to function properly. | ||
226 | */ | ||
227 | static unsigned int ref_freq; | ||
228 | static unsigned long loops_per_jiffy_ref; | ||
229 | static unsigned long cpu_khz_ref; | ||
230 | |||
231 | static int | ||
232 | time_cpufreq_notifier(struct notifier_block *nb, unsigned long val, void *data) | ||
233 | { | ||
234 | struct cpufreq_freqs *freq = data; | ||
235 | |||
236 | if (!ref_freq) { | ||
237 | if (!freq->old){ | ||
238 | ref_freq = freq->new; | ||
239 | return 0; | ||
240 | } | ||
241 | ref_freq = freq->old; | ||
242 | loops_per_jiffy_ref = cpu_data(freq->cpu).loops_per_jiffy; | ||
243 | cpu_khz_ref = cpu_khz; | ||
244 | } | ||
245 | |||
246 | if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) || | ||
247 | (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) || | ||
248 | (val == CPUFREQ_RESUMECHANGE)) { | ||
249 | if (!(freq->flags & CPUFREQ_CONST_LOOPS)) | ||
250 | cpu_data(freq->cpu).loops_per_jiffy = | ||
251 | cpufreq_scale(loops_per_jiffy_ref, | ||
252 | ref_freq, freq->new); | ||
253 | |||
254 | if (cpu_khz) { | ||
255 | |||
256 | if (num_online_cpus() == 1) | ||
257 | cpu_khz = cpufreq_scale(cpu_khz_ref, | ||
258 | ref_freq, freq->new); | ||
259 | if (!(freq->flags & CPUFREQ_CONST_LOOPS)) { | ||
260 | tsc_khz = cpu_khz; | ||
261 | set_cyc2ns_scale(cpu_khz, freq->cpu); | ||
262 | /* | ||
263 | * TSC based sched_clock turns | ||
264 | * to junk w/ cpufreq | ||
265 | */ | ||
266 | mark_tsc_unstable("cpufreq changes"); | ||
267 | } | ||
268 | } | ||
269 | } | ||
270 | |||
271 | return 0; | ||
272 | } | ||
273 | |||
274 | static struct notifier_block time_cpufreq_notifier_block = { | ||
275 | .notifier_call = time_cpufreq_notifier | ||
276 | }; | ||
277 | |||
278 | static int __init cpufreq_tsc(void) | ||
279 | { | ||
280 | return cpufreq_register_notifier(&time_cpufreq_notifier_block, | ||
281 | CPUFREQ_TRANSITION_NOTIFIER); | ||
282 | } | ||
283 | core_initcall(cpufreq_tsc); | ||
284 | |||
285 | #endif | ||
286 | |||
287 | /* clock source code */ | ||
288 | |||
289 | static unsigned long current_tsc_khz; | ||
290 | static struct clocksource clocksource_tsc; | ||
291 | |||
292 | /* | ||
293 | * We compare the TSC to the cycle_last value in the clocksource | ||
294 | * structure to avoid a nasty time-warp issue. This can be observed in | ||
295 | * a very small window right after one CPU updated cycle_last under | ||
296 | * xtime lock and the other CPU reads a TSC value which is smaller | ||
297 | * than the cycle_last reference value due to a TSC which is slighty | ||
298 | * behind. This delta is nowhere else observable, but in that case it | ||
299 | * results in a forward time jump in the range of hours due to the | ||
300 | * unsigned delta calculation of the time keeping core code, which is | ||
301 | * necessary to support wrapping clocksources like pm timer. | ||
302 | */ | ||
303 | static cycle_t read_tsc(void) | ||
304 | { | ||
305 | cycle_t ret; | ||
306 | |||
307 | rdtscll(ret); | ||
308 | |||
309 | return ret >= clocksource_tsc.cycle_last ? | ||
310 | ret : clocksource_tsc.cycle_last; | ||
311 | } | ||
312 | |||
313 | static struct clocksource clocksource_tsc = { | ||
314 | .name = "tsc", | ||
315 | .rating = 300, | ||
316 | .read = read_tsc, | ||
317 | .mask = CLOCKSOURCE_MASK(64), | ||
318 | .mult = 0, /* to be set */ | ||
319 | .shift = 22, | ||
320 | .flags = CLOCK_SOURCE_IS_CONTINUOUS | | ||
321 | CLOCK_SOURCE_MUST_VERIFY, | ||
322 | }; | ||
323 | |||
324 | void mark_tsc_unstable(char *reason) | ||
325 | { | ||
326 | if (!tsc_unstable) { | ||
327 | tsc_unstable = 1; | ||
328 | printk("Marking TSC unstable due to: %s.\n", reason); | ||
329 | /* Can be called before registration */ | ||
330 | if (clocksource_tsc.mult) | ||
331 | clocksource_change_rating(&clocksource_tsc, 0); | ||
332 | else | ||
333 | clocksource_tsc.rating = 0; | ||
334 | } | ||
335 | } | ||
336 | EXPORT_SYMBOL_GPL(mark_tsc_unstable); | ||
337 | |||
338 | static int __init dmi_mark_tsc_unstable(const struct dmi_system_id *d) | ||
339 | { | ||
340 | printk(KERN_NOTICE "%s detected: marking TSC unstable.\n", | ||
341 | d->ident); | ||
342 | tsc_unstable = 1; | ||
343 | return 0; | ||
344 | } | ||
345 | |||
346 | /* List of systems that have known TSC problems */ | ||
347 | static struct dmi_system_id __initdata bad_tsc_dmi_table[] = { | ||
348 | { | ||
349 | .callback = dmi_mark_tsc_unstable, | ||
350 | .ident = "IBM Thinkpad 380XD", | ||
351 | .matches = { | ||
352 | DMI_MATCH(DMI_BOARD_VENDOR, "IBM"), | ||
353 | DMI_MATCH(DMI_BOARD_NAME, "2635FA0"), | ||
354 | }, | ||
355 | }, | ||
356 | {} | ||
357 | }; | ||
358 | |||
359 | /* | ||
360 | * Make an educated guess if the TSC is trustworthy and synchronized | ||
361 | * over all CPUs. | ||
362 | */ | ||
363 | __cpuinit int unsynchronized_tsc(void) | ||
364 | { | ||
365 | if (!cpu_has_tsc || tsc_unstable) | ||
366 | return 1; | ||
367 | |||
368 | /* Anything with constant TSC should be synchronized */ | ||
369 | if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) | ||
370 | return 0; | ||
371 | |||
372 | /* | ||
373 | * Intel systems are normally all synchronized. | ||
374 | * Exceptions must mark TSC as unstable: | ||
375 | */ | ||
376 | if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) { | ||
377 | /* assume multi socket systems are not synchronized: */ | ||
378 | if (num_possible_cpus() > 1) | ||
379 | tsc_unstable = 1; | ||
380 | } | ||
381 | return tsc_unstable; | ||
382 | } | ||
383 | |||
384 | /* | ||
385 | * Geode_LX - the OLPC CPU has a possibly a very reliable TSC | ||
386 | */ | ||
387 | #ifdef CONFIG_MGEODE_LX | ||
388 | /* RTSC counts during suspend */ | ||
389 | #define RTSC_SUSP 0x100 | ||
390 | |||
391 | static void __init check_geode_tsc_reliable(void) | ||
392 | { | ||
393 | unsigned long res_low, res_high; | ||
394 | |||
395 | rdmsr_safe(MSR_GEODE_BUSCONT_CONF0, &res_low, &res_high); | ||
396 | if (res_low & RTSC_SUSP) | ||
397 | clocksource_tsc.flags &= ~CLOCK_SOURCE_MUST_VERIFY; | ||
398 | } | ||
399 | #else | ||
400 | static inline void check_geode_tsc_reliable(void) { } | ||
401 | #endif | ||
402 | |||
403 | |||
404 | void __init tsc_init(void) | ||
405 | { | ||
406 | int cpu; | ||
407 | |||
408 | if (!cpu_has_tsc || tsc_disabled > 0) | ||
409 | return; | ||
410 | |||
411 | cpu_khz = calculate_cpu_khz(); | ||
412 | tsc_khz = cpu_khz; | ||
413 | |||
414 | if (!cpu_khz) { | ||
415 | mark_tsc_unstable("could not calculate TSC khz"); | ||
416 | return; | ||
417 | } | ||
418 | |||
419 | /* now allow native_sched_clock() to use rdtsc */ | ||
420 | tsc_disabled = 0; | ||
421 | |||
422 | printk("Detected %lu.%03lu MHz processor.\n", | ||
423 | (unsigned long)cpu_khz / 1000, | ||
424 | (unsigned long)cpu_khz % 1000); | ||
425 | |||
426 | /* | ||
427 | * Secondary CPUs do not run through tsc_init(), so set up | ||
428 | * all the scale factors for all CPUs, assuming the same | ||
429 | * speed as the bootup CPU. (cpufreq notifiers will fix this | ||
430 | * up if their speed diverges) | ||
431 | */ | ||
432 | for_each_possible_cpu(cpu) | ||
433 | set_cyc2ns_scale(cpu_khz, cpu); | ||
434 | |||
435 | use_tsc_delay(); | ||
436 | |||
437 | /* Check and install the TSC clocksource */ | ||
438 | dmi_check_system(bad_tsc_dmi_table); | ||
439 | |||
440 | unsynchronized_tsc(); | ||
441 | check_geode_tsc_reliable(); | ||
442 | current_tsc_khz = tsc_khz; | ||
443 | clocksource_tsc.mult = clocksource_khz2mult(current_tsc_khz, | ||
444 | clocksource_tsc.shift); | ||
445 | /* lower the rating if we already know its unstable: */ | ||
446 | if (check_tsc_unstable()) { | ||
447 | clocksource_tsc.rating = 0; | ||
448 | clocksource_tsc.flags &= ~CLOCK_SOURCE_IS_CONTINUOUS; | ||
449 | } | ||
450 | clocksource_register(&clocksource_tsc); | ||
451 | } | ||