aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/cpu
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel/cpu')
-rw-r--r--arch/x86/kernel/cpu/addon_cpuid_features.c34
-rw-r--r--arch/x86/kernel/cpu/common.c74
-rw-r--r--arch/x86/kernel/cpu/intel.c14
3 files changed, 71 insertions, 51 deletions
diff --git a/arch/x86/kernel/cpu/addon_cpuid_features.c b/arch/x86/kernel/cpu/addon_cpuid_features.c
index 2cf23634b6d9..4e581fdc0a5a 100644
--- a/arch/x86/kernel/cpu/addon_cpuid_features.c
+++ b/arch/x86/kernel/cpu/addon_cpuid_features.c
@@ -143,37 +143,3 @@ void __cpuinit detect_extended_topology(struct cpuinfo_x86 *c)
143 return; 143 return;
144#endif 144#endif
145} 145}
146
147#ifdef CONFIG_X86_PAT
148void __cpuinit validate_pat_support(struct cpuinfo_x86 *c)
149{
150 if (!cpu_has_pat)
151 pat_disable("PAT not supported by CPU.");
152
153 switch (c->x86_vendor) {
154 case X86_VENDOR_INTEL:
155 /*
156 * There is a known erratum on Pentium III and Core Solo
157 * and Core Duo CPUs.
158 * " Page with PAT set to WC while associated MTRR is UC
159 * may consolidate to UC "
160 * Because of this erratum, it is better to stick with
161 * setting WC in MTRR rather than using PAT on these CPUs.
162 *
163 * Enable PAT WC only on P4, Core 2 or later CPUs.
164 */
165 if (c->x86 > 0x6 || (c->x86 == 6 && c->x86_model >= 15))
166 return;
167
168 pat_disable("PAT WC disabled due to known CPU erratum.");
169 return;
170
171 case X86_VENDOR_AMD:
172 case X86_VENDOR_CENTAUR:
173 case X86_VENDOR_TRANSMETA:
174 return;
175 }
176
177 pat_disable("PAT disabled. Not yet verified on this CPU type.");
178}
179#endif
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 652fdc9a757a..275e2cb43b91 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -224,6 +224,49 @@ static inline void squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
224#endif 224#endif
225 225
226/* 226/*
227 * Some CPU features depend on higher CPUID levels, which may not always
228 * be available due to CPUID level capping or broken virtualization
229 * software. Add those features to this table to auto-disable them.
230 */
231struct cpuid_dependent_feature {
232 u32 feature;
233 u32 level;
234};
235static const struct cpuid_dependent_feature __cpuinitconst
236cpuid_dependent_features[] = {
237 { X86_FEATURE_MWAIT, 0x00000005 },
238 { X86_FEATURE_DCA, 0x00000009 },
239 { X86_FEATURE_XSAVE, 0x0000000d },
240 { 0, 0 }
241};
242
243static void __cpuinit filter_cpuid_features(struct cpuinfo_x86 *c, bool warn)
244{
245 const struct cpuid_dependent_feature *df;
246 for (df = cpuid_dependent_features; df->feature; df++) {
247 /*
248 * Note: cpuid_level is set to -1 if unavailable, but
249 * extended_extended_level is set to 0 if unavailable
250 * and the legitimate extended levels are all negative
251 * when signed; hence the weird messing around with
252 * signs here...
253 */
254 if (cpu_has(c, df->feature) &&
255 ((s32)df->feature < 0 ?
256 (u32)df->feature > (u32)c->extended_cpuid_level :
257 (s32)df->feature > (s32)c->cpuid_level)) {
258 clear_cpu_cap(c, df->feature);
259 if (warn)
260 printk(KERN_WARNING
261 "CPU: CPU feature %s disabled "
262 "due to lack of CPUID level 0x%x\n",
263 x86_cap_flags[df->feature],
264 df->level);
265 }
266 }
267}
268
269/*
227 * Naming convention should be: <Name> [(<Codename>)] 270 * Naming convention should be: <Name> [(<Codename>)]
228 * This table only is used unless init_<vendor>() below doesn't set it; 271 * This table only is used unless init_<vendor>() below doesn't set it;
229 * in particular, if CPUID levels 0x80000002..4 are supported, this isn't used 272 * in particular, if CPUID levels 0x80000002..4 are supported, this isn't used
@@ -586,11 +629,10 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
586 if (this_cpu->c_early_init) 629 if (this_cpu->c_early_init)
587 this_cpu->c_early_init(c); 630 this_cpu->c_early_init(c);
588 631
589 validate_pat_support(c);
590
591#ifdef CONFIG_SMP 632#ifdef CONFIG_SMP
592 c->cpu_index = boot_cpu_id; 633 c->cpu_index = boot_cpu_id;
593#endif 634#endif
635 filter_cpuid_features(c, false);
594} 636}
595 637
596void __init early_cpu_init(void) 638void __init early_cpu_init(void)
@@ -724,6 +766,9 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
724 * we do "generic changes." 766 * we do "generic changes."
725 */ 767 */
726 768
769 /* Filter out anything that depends on CPUID levels we don't have */
770 filter_cpuid_features(c, true);
771
727 /* If the model name is still unset, do table lookup. */ 772 /* If the model name is still unset, do table lookup. */
728 if (!c->x86_model_id[0]) { 773 if (!c->x86_model_id[0]) {
729 char *p; 774 char *p;
@@ -1053,22 +1098,19 @@ void __cpuinit cpu_init(void)
1053 */ 1098 */
1054 if (kgdb_connected && arch_kgdb_ops.correct_hw_break) 1099 if (kgdb_connected && arch_kgdb_ops.correct_hw_break)
1055 arch_kgdb_ops.correct_hw_break(); 1100 arch_kgdb_ops.correct_hw_break();
1056 else { 1101 else
1057#endif 1102#endif
1058 /* 1103 {
1059 * Clear all 6 debug registers: 1104 /*
1060 */ 1105 * Clear all 6 debug registers:
1061 1106 */
1062 set_debugreg(0UL, 0); 1107 set_debugreg(0UL, 0);
1063 set_debugreg(0UL, 1); 1108 set_debugreg(0UL, 1);
1064 set_debugreg(0UL, 2); 1109 set_debugreg(0UL, 2);
1065 set_debugreg(0UL, 3); 1110 set_debugreg(0UL, 3);
1066 set_debugreg(0UL, 6); 1111 set_debugreg(0UL, 6);
1067 set_debugreg(0UL, 7); 1112 set_debugreg(0UL, 7);
1068#ifdef CONFIG_KGDB
1069 /* If the kgdb is connected no debug regs should be altered. */
1070 } 1113 }
1071#endif
1072 1114
1073 fpu_init(); 1115 fpu_init();
1074 1116
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index 549f2ada55f5..5deefae9064d 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -30,7 +30,7 @@
30static void __cpuinit early_init_intel(struct cpuinfo_x86 *c) 30static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
31{ 31{
32 /* Unmask CPUID levels if masked: */ 32 /* Unmask CPUID levels if masked: */
33 if (c->x86 == 6 && c->x86_model >= 15) { 33 if (c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xd)) {
34 u64 misc_enable; 34 u64 misc_enable;
35 35
36 rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable); 36 rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
@@ -63,6 +63,18 @@ static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
63 set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC); 63 set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
64 } 64 }
65 65
66 /*
67 * There is a known erratum on Pentium III and Core Solo
68 * and Core Duo CPUs.
69 * " Page with PAT set to WC while associated MTRR is UC
70 * may consolidate to UC "
71 * Because of this erratum, it is better to stick with
72 * setting WC in MTRR rather than using PAT on these CPUs.
73 *
74 * Enable PAT WC only on P4, Core 2 or later CPUs.
75 */
76 if (c->x86 == 6 && c->x86_model < 15)
77 clear_cpu_cap(c, X86_FEATURE_PAT);
66} 78}
67 79
68#ifdef CONFIG_X86_32 80#ifdef CONFIG_X86_32