aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/cpu/intel.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel/cpu/intel.c')
-rw-r--r--arch/x86/kernel/cpu/intel.c29
1 files changed, 26 insertions, 3 deletions
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index 7437fa133c02..3260ab044996 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -86,6 +86,29 @@ static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
86 */ 86 */
87 if (c->x86 == 6 && c->x86_model < 15) 87 if (c->x86 == 6 && c->x86_model < 15)
88 clear_cpu_cap(c, X86_FEATURE_PAT); 88 clear_cpu_cap(c, X86_FEATURE_PAT);
89
90#ifdef CONFIG_KMEMCHECK
91 /*
92 * P4s have a "fast strings" feature which causes single-
93 * stepping REP instructions to only generate a #DB on
94 * cache-line boundaries.
95 *
96 * Ingo Molnar reported a Pentium D (model 6) and a Xeon
97 * (model 2) with the same problem.
98 */
99 if (c->x86 == 15) {
100 u64 misc_enable;
101
102 rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
103
104 if (misc_enable & MSR_IA32_MISC_ENABLE_FAST_STRING) {
105 printk(KERN_INFO "kmemcheck: Disabling fast string operations\n");
106
107 misc_enable &= ~MSR_IA32_MISC_ENABLE_FAST_STRING;
108 wrmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
109 }
110 }
111#endif
89} 112}
90 113
91#ifdef CONFIG_X86_32 114#ifdef CONFIG_X86_32
@@ -229,12 +252,12 @@ static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c)
229} 252}
230#endif 253#endif
231 254
232static void __cpuinit srat_detect_node(void) 255static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c)
233{ 256{
234#if defined(CONFIG_NUMA) && defined(CONFIG_X86_64) 257#if defined(CONFIG_NUMA) && defined(CONFIG_X86_64)
235 unsigned node; 258 unsigned node;
236 int cpu = smp_processor_id(); 259 int cpu = smp_processor_id();
237 int apicid = hard_smp_processor_id(); 260 int apicid = cpu_has_apic ? hard_smp_processor_id() : c->apicid;
238 261
239 /* Don't do the funky fallback heuristics the AMD version employs 262 /* Don't do the funky fallback heuristics the AMD version employs
240 for now. */ 263 for now. */
@@ -400,7 +423,7 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c)
400 } 423 }
401 424
402 /* Work around errata */ 425 /* Work around errata */
403 srat_detect_node(); 426 srat_detect_node(c);
404 427
405 if (cpu_has(c, X86_FEATURE_VMX)) 428 if (cpu_has(c, X86_FEATURE_VMX))
406 detect_vmx_virtcap(c); 429 detect_vmx_virtcap(c);