aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/cpu/common.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel/cpu/common.c')
-rw-r--r--arch/x86/kernel/cpu/common.c24
1 files changed, 4 insertions, 20 deletions
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 8aab8517642e..4e456bd955bb 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -344,31 +344,15 @@ static void __init early_cpu_detect(void)
344 344
345/* 345/*
346 * The NOPL instruction is supposed to exist on all CPUs with 346 * The NOPL instruction is supposed to exist on all CPUs with
347 * family >= 6, unfortunately, that's not true in practice because 347 * family >= 6; unfortunately, that's not true in practice because
348 * of early VIA chips and (more importantly) broken virtualizers that 348 * of early VIA chips and (more importantly) broken virtualizers that
349 * are not easy to detect. Hence, probe for it based on first 349 * are not easy to detect. In the latter case it doesn't even *fail*
350 * principles. 350 * reliably, so probing for it doesn't even work. Disable it completely
351 * unless we can find a reliable way to detect all the broken cases.
351 */ 352 */
352static void __cpuinit detect_nopl(struct cpuinfo_x86 *c) 353static void __cpuinit detect_nopl(struct cpuinfo_x86 *c)
353{ 354{
354 const u32 nopl_signature = 0x888c53b1; /* Random number */
355 u32 has_nopl = nopl_signature;
356
357 clear_cpu_cap(c, X86_FEATURE_NOPL); 355 clear_cpu_cap(c, X86_FEATURE_NOPL);
358 if (c->x86 >= 6) {
359 asm volatile("\n"
360 "1: .byte 0x0f,0x1f,0xc0\n" /* nopl %eax */
361 "2:\n"
362 " .section .fixup,\"ax\"\n"
363 "3: xor %0,%0\n"
364 " jmp 2b\n"
365 " .previous\n"
366 _ASM_EXTABLE(1b,3b)
367 : "+a" (has_nopl));
368
369 if (has_nopl == nopl_signature)
370 set_cpu_cap(c, X86_FEATURE_NOPL);
371 }
372} 356}
373 357
374static void __cpuinit generic_identify(struct cpuinfo_x86 *c) 358static void __cpuinit generic_identify(struct cpuinfo_x86 *c)