aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/tsc_32.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel/tsc_32.c')
-rw-r--r--arch/x86/kernel/tsc_32.c29
1 files changed, 15 insertions, 14 deletions
diff --git a/arch/x86/kernel/tsc_32.c b/arch/x86/kernel/tsc_32.c
index e4790728b224..774a5a83c296 100644
--- a/arch/x86/kernel/tsc_32.c
+++ b/arch/x86/kernel/tsc_32.c
@@ -14,7 +14,10 @@
14 14
15#include "mach_timer.h" 15#include "mach_timer.h"
16 16
17static int tsc_enabled; 17/* native_sched_clock() is called before tsc_init(), so
18 we must start with the TSC soft disabled to prevent
19 erroneous rdtsc usage on !cpu_has_tsc processors */
20static int tsc_disabled = -1;
18 21
19/* 22/*
20 * On some systems the TSC frequency does not 23 * On some systems the TSC frequency does not
@@ -28,8 +31,8 @@ EXPORT_SYMBOL_GPL(tsc_khz);
28static int __init tsc_setup(char *str) 31static int __init tsc_setup(char *str)
29{ 32{
30 printk(KERN_WARNING "notsc: Kernel compiled with CONFIG_X86_TSC, " 33 printk(KERN_WARNING "notsc: Kernel compiled with CONFIG_X86_TSC, "
31 "cannot disable TSC completely.\n"); 34 "cannot disable TSC completely.\n");
32 mark_tsc_unstable("user disabled TSC"); 35 tsc_disabled = 1;
33 return 1; 36 return 1;
34} 37}
35#else 38#else
@@ -120,7 +123,7 @@ unsigned long long native_sched_clock(void)
120 * very important for it to be as fast as the platform 123 * very important for it to be as fast as the platform
121 * can achive it. ) 124 * can achive it. )
122 */ 125 */
123 if (unlikely(!tsc_enabled && !tsc_unstable)) 126 if (unlikely(tsc_disabled))
124 /* No locking but a rare wrong value is not a big deal: */ 127 /* No locking but a rare wrong value is not a big deal: */
125 return (jiffies_64 - INITIAL_JIFFIES) * (1000000000 / HZ); 128 return (jiffies_64 - INITIAL_JIFFIES) * (1000000000 / HZ);
126 129
@@ -283,7 +286,6 @@ core_initcall(cpufreq_tsc);
283 286
284/* clock source code */ 287/* clock source code */
285 288
286static unsigned long current_tsc_khz;
287static struct clocksource clocksource_tsc; 289static struct clocksource clocksource_tsc;
288 290
289/* 291/*
@@ -322,7 +324,6 @@ void mark_tsc_unstable(char *reason)
322{ 324{
323 if (!tsc_unstable) { 325 if (!tsc_unstable) {
324 tsc_unstable = 1; 326 tsc_unstable = 1;
325 tsc_enabled = 0;
326 printk("Marking TSC unstable due to: %s.\n", reason); 327 printk("Marking TSC unstable due to: %s.\n", reason);
327 /* Can be called before registration */ 328 /* Can be called before registration */
328 if (clocksource_tsc.mult) 329 if (clocksource_tsc.mult)
@@ -336,7 +337,7 @@ EXPORT_SYMBOL_GPL(mark_tsc_unstable);
336static int __init dmi_mark_tsc_unstable(const struct dmi_system_id *d) 337static int __init dmi_mark_tsc_unstable(const struct dmi_system_id *d)
337{ 338{
338 printk(KERN_NOTICE "%s detected: marking TSC unstable.\n", 339 printk(KERN_NOTICE "%s detected: marking TSC unstable.\n",
339 d->ident); 340 d->ident);
340 tsc_unstable = 1; 341 tsc_unstable = 1;
341 return 0; 342 return 0;
342} 343}
@@ -403,7 +404,7 @@ void __init tsc_init(void)
403{ 404{
404 int cpu; 405 int cpu;
405 406
406 if (!cpu_has_tsc) 407 if (!cpu_has_tsc || tsc_disabled > 0)
407 return; 408 return;
408 409
409 cpu_khz = calculate_cpu_khz(); 410 cpu_khz = calculate_cpu_khz();
@@ -414,6 +415,9 @@ void __init tsc_init(void)
414 return; 415 return;
415 } 416 }
416 417
418 /* now allow native_sched_clock() to use rdtsc */
419 tsc_disabled = 0;
420
417 printk("Detected %lu.%03lu MHz processor.\n", 421 printk("Detected %lu.%03lu MHz processor.\n",
418 (unsigned long)cpu_khz / 1000, 422 (unsigned long)cpu_khz / 1000,
419 (unsigned long)cpu_khz % 1000); 423 (unsigned long)cpu_khz % 1000);
@@ -434,15 +438,12 @@ void __init tsc_init(void)
434 438
435 unsynchronized_tsc(); 439 unsynchronized_tsc();
436 check_geode_tsc_reliable(); 440 check_geode_tsc_reliable();
437 current_tsc_khz = tsc_khz; 441 clocksource_tsc.mult = clocksource_khz2mult(tsc_khz,
438 clocksource_tsc.mult = clocksource_khz2mult(current_tsc_khz, 442 clocksource_tsc.shift);
439 clocksource_tsc.shift);
440 /* lower the rating if we already know its unstable: */ 443 /* lower the rating if we already know its unstable: */
441 if (check_tsc_unstable()) { 444 if (check_tsc_unstable()) {
442 clocksource_tsc.rating = 0; 445 clocksource_tsc.rating = 0;
443 clocksource_tsc.flags &= ~CLOCK_SOURCE_IS_CONTINUOUS; 446 clocksource_tsc.flags &= ~CLOCK_SOURCE_IS_CONTINUOUS;
444 } else 447 }
445 tsc_enabled = 1;
446
447 clocksource_register(&clocksource_tsc); 448 clocksource_register(&clocksource_tsc);
448} 449}