aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorH. Peter Anvin <hpa@zytor.com>2008-09-16 12:29:40 -0400
committerH. Peter Anvin <hpa@zytor.com>2008-09-16 12:33:57 -0400
commitba0593bf553c450a03dbc5f8c1f0ff58b778a0c8 (patch)
tree027517cfd97e0ddc3513273a1dcb92d00db38234 /arch
parent5132895f14a57607152f7865dc862fb076ce2585 (diff)
x86: completely disable NOPL on 32 bits
Completely disable NOPL on 32 bits. It turns out that Microsoft Virtual PC is so broken it can't even reliably *fail* in the presence of NOPL. This leaves the infrastructure in place but disables it unconditionally. Signed-off-by: H. Peter Anvin <hpa@zytor.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/kernel/cpu/common.c24
1 files changed, 4 insertions, 20 deletions
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 8aab8517642e..4e456bd955bb 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -344,31 +344,15 @@ static void __init early_cpu_detect(void)
344 344
345/* 345/*
346 * The NOPL instruction is supposed to exist on all CPUs with 346 * The NOPL instruction is supposed to exist on all CPUs with
347 * family >= 6, unfortunately, that's not true in practice because 347 * family >= 6; unfortunately, that's not true in practice because
348 * of early VIA chips and (more importantly) broken virtualizers that 348 * of early VIA chips and (more importantly) broken virtualizers that
349 * are not easy to detect. Hence, probe for it based on first 349 * are not easy to detect. In the latter case it doesn't even *fail*
350 * principles. 350 * reliably, so probing for it doesn't even work. Disable it completely
351 * unless we can find a reliable way to detect all the broken cases.
351 */ 352 */
352static void __cpuinit detect_nopl(struct cpuinfo_x86 *c) 353static void __cpuinit detect_nopl(struct cpuinfo_x86 *c)
353{ 354{
354 const u32 nopl_signature = 0x888c53b1; /* Random number */
355 u32 has_nopl = nopl_signature;
356
357 clear_cpu_cap(c, X86_FEATURE_NOPL); 355 clear_cpu_cap(c, X86_FEATURE_NOPL);
358 if (c->x86 >= 6) {
359 asm volatile("\n"
360 "1: .byte 0x0f,0x1f,0xc0\n" /* nopl %eax */
361 "2:\n"
362 " .section .fixup,\"ax\"\n"
363 "3: xor %0,%0\n"
364 " jmp 2b\n"
365 " .previous\n"
366 _ASM_EXTABLE(1b,3b)
367 : "+a" (has_nopl));
368
369 if (has_nopl == nopl_signature)
370 set_cpu_cap(c, X86_FEATURE_NOPL);
371 }
372} 356}
373 357
374static void __cpuinit generic_identify(struct cpuinfo_x86 *c) 358static void __cpuinit generic_identify(struct cpuinfo_x86 *c)