aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/cpu/common.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel/cpu/common.c')
-rw-r--r--arch/x86/kernel/cpu/common.c99
1 files changed, 65 insertions, 34 deletions
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 260fe4cb2c82..826d5c876278 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -23,11 +23,9 @@
23#include <asm/smp.h> 23#include <asm/smp.h>
24#include <asm/cpu.h> 24#include <asm/cpu.h>
25#include <asm/cpumask.h> 25#include <asm/cpumask.h>
26#ifdef CONFIG_X86_LOCAL_APIC
27#include <asm/mpspec.h>
28#include <asm/apic.h> 26#include <asm/apic.h>
29#include <mach_apic.h> 27
30#include <asm/genapic.h> 28#ifdef CONFIG_X86_LOCAL_APIC
31#include <asm/uv/uv.h> 29#include <asm/uv/uv.h>
32#endif 30#endif
33 31
@@ -226,6 +224,49 @@ static inline void squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
226#endif 224#endif
227 225
228/* 226/*
227 * Some CPU features depend on higher CPUID levels, which may not always
228 * be available due to CPUID level capping or broken virtualization
229 * software. Add those features to this table to auto-disable them.
230 */
231struct cpuid_dependent_feature {
232 u32 feature;
233 u32 level;
234};
235static const struct cpuid_dependent_feature __cpuinitconst
236cpuid_dependent_features[] = {
237 { X86_FEATURE_MWAIT, 0x00000005 },
238 { X86_FEATURE_DCA, 0x00000009 },
239 { X86_FEATURE_XSAVE, 0x0000000d },
240 { 0, 0 }
241};
242
243static void __cpuinit filter_cpuid_features(struct cpuinfo_x86 *c, bool warn)
244{
245 const struct cpuid_dependent_feature *df;
246 for (df = cpuid_dependent_features; df->feature; df++) {
247 /*
248 * Note: cpuid_level is set to -1 if unavailable, but
249 * extended_extended_level is set to 0 if unavailable
250 * and the legitimate extended levels are all negative
251 * when signed; hence the weird messing around with
252 * signs here...
253 */
254 if (cpu_has(c, df->feature) &&
255 ((s32)df->level < 0 ?
256 (u32)df->level > (u32)c->extended_cpuid_level :
257 (s32)df->level > (s32)c->cpuid_level)) {
258 clear_cpu_cap(c, df->feature);
259 if (warn)
260 printk(KERN_WARNING
261 "CPU: CPU feature %s disabled "
262 "due to lack of CPUID level 0x%x\n",
263 x86_cap_flags[df->feature],
264 df->level);
265 }
266 }
267}
268
269/*
229 * Naming convention should be: <Name> [(<Codename>)] 270 * Naming convention should be: <Name> [(<Codename>)]
230 * This table only is used unless init_<vendor>() below doesn't set it; 271 * This table only is used unless init_<vendor>() below doesn't set it;
231 * in particular, if CPUID levels 0x80000002..4 are supported, this isn't used 272 * in particular, if CPUID levels 0x80000002..4 are supported, this isn't used
@@ -407,11 +448,7 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c)
407 } 448 }
408 449
409 index_msb = get_count_order(smp_num_siblings); 450 index_msb = get_count_order(smp_num_siblings);
410#ifdef CONFIG_X86_64 451 c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid, index_msb);
411 c->phys_proc_id = phys_pkg_id(index_msb);
412#else
413 c->phys_proc_id = phys_pkg_id(c->initial_apicid, index_msb);
414#endif
415 452
416 smp_num_siblings = smp_num_siblings / c->x86_max_cores; 453 smp_num_siblings = smp_num_siblings / c->x86_max_cores;
417 454
@@ -419,13 +456,8 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c)
419 456
420 core_bits = get_count_order(c->x86_max_cores); 457 core_bits = get_count_order(c->x86_max_cores);
421 458
422#ifdef CONFIG_X86_64 459 c->cpu_core_id = apic->phys_pkg_id(c->initial_apicid, index_msb) &
423 c->cpu_core_id = phys_pkg_id(index_msb) &
424 ((1 << core_bits) - 1); 460 ((1 << core_bits) - 1);
425#else
426 c->cpu_core_id = phys_pkg_id(c->initial_apicid, index_msb) &
427 ((1 << core_bits) - 1);
428#endif
429 } 461 }
430 462
431out: 463out:
@@ -594,11 +626,10 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
594 if (this_cpu->c_early_init) 626 if (this_cpu->c_early_init)
595 this_cpu->c_early_init(c); 627 this_cpu->c_early_init(c);
596 628
597 validate_pat_support(c);
598
599#ifdef CONFIG_SMP 629#ifdef CONFIG_SMP
600 c->cpu_index = boot_cpu_id; 630 c->cpu_index = boot_cpu_id;
601#endif 631#endif
632 filter_cpuid_features(c, false);
602} 633}
603 634
604void __init early_cpu_init(void) 635void __init early_cpu_init(void)
@@ -661,7 +692,7 @@ static void __cpuinit generic_identify(struct cpuinfo_x86 *c)
661 c->initial_apicid = (cpuid_ebx(1) >> 24) & 0xFF; 692 c->initial_apicid = (cpuid_ebx(1) >> 24) & 0xFF;
662#ifdef CONFIG_X86_32 693#ifdef CONFIG_X86_32
663# ifdef CONFIG_X86_HT 694# ifdef CONFIG_X86_HT
664 c->apicid = phys_pkg_id(c->initial_apicid, 0); 695 c->apicid = apic->phys_pkg_id(c->initial_apicid, 0);
665# else 696# else
666 c->apicid = c->initial_apicid; 697 c->apicid = c->initial_apicid;
667# endif 698# endif
@@ -708,7 +739,7 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
708 this_cpu->c_identify(c); 739 this_cpu->c_identify(c);
709 740
710#ifdef CONFIG_X86_64 741#ifdef CONFIG_X86_64
711 c->apicid = phys_pkg_id(0); 742 c->apicid = apic->phys_pkg_id(c->initial_apicid, 0);
712#endif 743#endif
713 744
714 /* 745 /*
@@ -732,6 +763,9 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
732 * we do "generic changes." 763 * we do "generic changes."
733 */ 764 */
734 765
766 /* Filter out anything that depends on CPUID levels we don't have */
767 filter_cpuid_features(c, true);
768
735 /* If the model name is still unset, do table lookup. */ 769 /* If the model name is still unset, do table lookup. */
736 if (!c->x86_model_id[0]) { 770 if (!c->x86_model_id[0]) {
737 char *p; 771 char *p;
@@ -1015,7 +1049,7 @@ void __cpuinit cpu_init(void)
1015 barrier(); 1049 barrier();
1016 1050
1017 check_efer(); 1051 check_efer();
1018 if (cpu != 0 && x2apic) 1052 if (cpu != 0)
1019 enable_x2apic(); 1053 enable_x2apic();
1020 1054
1021 /* 1055 /*
@@ -1062,22 +1096,19 @@ void __cpuinit cpu_init(void)
1062 */ 1096 */
1063 if (kgdb_connected && arch_kgdb_ops.correct_hw_break) 1097 if (kgdb_connected && arch_kgdb_ops.correct_hw_break)
1064 arch_kgdb_ops.correct_hw_break(); 1098 arch_kgdb_ops.correct_hw_break();
1065 else { 1099 else
1066#endif 1100#endif
1067 /* 1101 {
1068 * Clear all 6 debug registers: 1102 /*
1069 */ 1103 * Clear all 6 debug registers:
1070 1104 */
1071 set_debugreg(0UL, 0); 1105 set_debugreg(0UL, 0);
1072 set_debugreg(0UL, 1); 1106 set_debugreg(0UL, 1);
1073 set_debugreg(0UL, 2); 1107 set_debugreg(0UL, 2);
1074 set_debugreg(0UL, 3); 1108 set_debugreg(0UL, 3);
1075 set_debugreg(0UL, 6); 1109 set_debugreg(0UL, 6);
1076 set_debugreg(0UL, 7); 1110 set_debugreg(0UL, 7);
1077#ifdef CONFIG_KGDB
1078 /* If the kgdb is connected no debug regs should be altered. */
1079 } 1111 }
1080#endif
1081 1112
1082 fpu_init(); 1113 fpu_init();
1083 1114