aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMikael Pettersson <mikpe@it.uu.se>2008-06-14 20:19:56 -0400
committerIngo Molnar <mingo@elte.hu>2008-06-19 04:08:47 -0400
commitdf17b1d990fc214f033c5588e58216ec941591e0 (patch)
tree37dff99a5f2a2630931bb0d2d18131f33c2bb66d
parent75118a82e21cafb4a82b53bb85d1c7689787e046 (diff)
x86, 32-bit: fix boot failure on TSC-less processors
Booting 2.6.26-rc6 on my 486 DX/4 fails with a "BUG: Int 6" (invalid opcode) and a kernel halt immediately after the kernel has been uncompressed. The BUG shows EIP pointing to an rdtsc instruction in native_read_tsc(), invoked from native_sched_clock(). (This error occurs so early that not even the serial console can capture it.) A bisection showed that this bug first occurs in 2.6.26-rc3-git7, via commit 9ccc906c97e34fd91dc6aaf5b69b52d824386910: >x86: distangle user disabled TSC from unstable > >tsc_enabled is set to 0 from the command line switch "notsc" and from >the mark_tsc_unstable code. Seperate those functionalities and replace >tsc_enable with tsc_disable. This makes also the native_sched_clock() >decision when to use TSC understandable. > >Preparatory patch to solve the sched_clock() issue on 32 bit. > >Signed-off-by: Thomas Gleixner <tglx@linutronix.de> The core reason for this bug is that native_sched_clock() gets called before tsc_init(). Before the commit above, tsc_32.c used a "tsc_enabled" variable which defaulted to 0 == disabled, and which only got enabled late in tsc_init(). Thus early calls to native_sched_clock() would skip the TSC and use jiffies instead. After the commit above, tsc_32.c uses a "tsc_disabled" variable which defaults to 0, meaning that the TSC is Ok to use. Early calls to native_sched_clock() now erroneously try to use the TSC on !cpu_has_tsc processors, leading to invalid opcode exceptions. My proposed fix is to initialise tsc_disabled to a "soft disabled" state distinct from the hard disabled state set up by the "notsc" kernel option. This fixes the native_sched_clock() problem. It also allows tsc_init() to be simplified: instead of setting tsc_disabled = 1 on every error return, we just set tsc_disabled = 0 once when all checks have succeeded. I've verified that this lets my 486 boot again. I've also verified that a Core2 machine still uses the TSC as clocksource after the patch. Signed-off-by: Mikael Pettersson <mikpe@it.uu.se> Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--arch/x86/kernel/tsc_32.c18
1 files changed, 8 insertions, 10 deletions
diff --git a/arch/x86/kernel/tsc_32.c b/arch/x86/kernel/tsc_32.c
index 068759db63dd..65b70637ad97 100644
--- a/arch/x86/kernel/tsc_32.c
+++ b/arch/x86/kernel/tsc_32.c
@@ -14,7 +14,10 @@
14 14
15#include "mach_timer.h" 15#include "mach_timer.h"
16 16
17static int tsc_disabled; 17/* native_sched_clock() is called before tsc_init(), so
18 we must start with the TSC soft disabled to prevent
19 erroneous rdtsc usage on !cpu_has_tsc processors */
20static int tsc_disabled = -1;
18 21
19/* 22/*
20 * On some systems the TSC frequency does not 23 * On some systems the TSC frequency does not
@@ -402,25 +405,20 @@ void __init tsc_init(void)
402{ 405{
403 int cpu; 406 int cpu;
404 407
405 if (!cpu_has_tsc || tsc_disabled) { 408 if (!cpu_has_tsc || tsc_disabled > 0)
406 /* Disable the TSC in case of !cpu_has_tsc */
407 tsc_disabled = 1;
408 return; 409 return;
409 }
410 410
411 cpu_khz = calculate_cpu_khz(); 411 cpu_khz = calculate_cpu_khz();
412 tsc_khz = cpu_khz; 412 tsc_khz = cpu_khz;
413 413
414 if (!cpu_khz) { 414 if (!cpu_khz) {
415 mark_tsc_unstable("could not calculate TSC khz"); 415 mark_tsc_unstable("could not calculate TSC khz");
416 /*
417 * We need to disable the TSC completely in this case
418 * to prevent sched_clock() from using it.
419 */
420 tsc_disabled = 1;
421 return; 416 return;
422 } 417 }
423 418
419 /* now allow native_sched_clock() to use rdtsc */
420 tsc_disabled = 0;
421
424 printk("Detected %lu.%03lu MHz processor.\n", 422 printk("Detected %lu.%03lu MHz processor.\n",
425 (unsigned long)cpu_khz / 1000, 423 (unsigned long)cpu_khz / 1000,
426 (unsigned long)cpu_khz % 1000); 424 (unsigned long)cpu_khz % 1000);