aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/cpu
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-04-30 11:34:38 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-04-30 11:34:38 -0400
commitdf8edfa9af5b2160549ed1a79b72e3ed13b6c7e2 (patch)
treec61b836ee22594de97cca58f018585dbe05f61a1 /arch/x86/kernel/cpu
parent874f6d1be7699b5d1873283b4737712cbabd7754 (diff)
parent1077c932db63ecc571c31df1c24d4a44e30928e5 (diff)
Merge branch 'x86-cpu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 cpuid changes from Ingo Molnar: "The biggest change is x86 CPU bug handling refactoring and cleanups, by Borislav Petkov" * 'x86-cpu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86, CPU, AMD: Drop useless label x86, AMD: Correct {rd,wr}msr_amd_safe warnings x86: Fold-in trivial check_config function x86, cpu: Convert AMD Erratum 400 x86, cpu: Convert AMD Erratum 383 x86, cpu: Convert Cyrix coma bug detection x86, cpu: Convert FDIV bug detection x86, cpu: Convert F00F bug detection x86, cpu: Expand cpufeature facility to include cpu bugs
Diffstat (limited to 'arch/x86/kernel/cpu')
-rw-r--r--arch/x86/kernel/cpu/amd.c48
-rw-r--r--arch/x86/kernel/cpu/bugs.c34
-rw-r--r--arch/x86/kernel/cpu/common.c4
-rw-r--r--arch/x86/kernel/cpu/cyrix.c5
-rw-r--r--arch/x86/kernel/cpu/intel.c4
-rw-r--r--arch/x86/kernel/cpu/proc.c6
6 files changed, 56 insertions, 45 deletions
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index fa96eb0d02fb..5013a48d1aff 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -20,11 +20,11 @@
20 20
21static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p) 21static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
22{ 22{
23 struct cpuinfo_x86 *c = &cpu_data(smp_processor_id());
24 u32 gprs[8] = { 0 }; 23 u32 gprs[8] = { 0 };
25 int err; 24 int err;
26 25
27 WARN_ONCE((c->x86 != 0xf), "%s should only be used on K8!\n", __func__); 26 WARN_ONCE((boot_cpu_data.x86 != 0xf),
27 "%s should only be used on K8!\n", __func__);
28 28
29 gprs[1] = msr; 29 gprs[1] = msr;
30 gprs[7] = 0x9c5a203a; 30 gprs[7] = 0x9c5a203a;
@@ -38,10 +38,10 @@ static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
38 38
39static inline int wrmsrl_amd_safe(unsigned msr, unsigned long long val) 39static inline int wrmsrl_amd_safe(unsigned msr, unsigned long long val)
40{ 40{
41 struct cpuinfo_x86 *c = &cpu_data(smp_processor_id());
42 u32 gprs[8] = { 0 }; 41 u32 gprs[8] = { 0 };
43 42
44 WARN_ONCE((c->x86 != 0xf), "%s should only be used on K8!\n", __func__); 43 WARN_ONCE((boot_cpu_data.x86 != 0xf),
44 "%s should only be used on K8!\n", __func__);
45 45
46 gprs[0] = (u32)val; 46 gprs[0] = (u32)val;
47 gprs[1] = msr; 47 gprs[1] = msr;
@@ -192,11 +192,11 @@ static void __cpuinit amd_k7_smp_check(struct cpuinfo_x86 *c)
192 /* Athlon 660/661 is valid. */ 192 /* Athlon 660/661 is valid. */
193 if ((c->x86_model == 6) && ((c->x86_mask == 0) || 193 if ((c->x86_model == 6) && ((c->x86_mask == 0) ||
194 (c->x86_mask == 1))) 194 (c->x86_mask == 1)))
195 goto valid_k7; 195 return;
196 196
197 /* Duron 670 is valid */ 197 /* Duron 670 is valid */
198 if ((c->x86_model == 7) && (c->x86_mask == 0)) 198 if ((c->x86_model == 7) && (c->x86_mask == 0))
199 goto valid_k7; 199 return;
200 200
201 /* 201 /*
202 * Athlon 662, Duron 671, and Athlon >model 7 have capability 202 * Athlon 662, Duron 671, and Athlon >model 7 have capability
@@ -209,7 +209,7 @@ static void __cpuinit amd_k7_smp_check(struct cpuinfo_x86 *c)
209 ((c->x86_model == 7) && (c->x86_mask >= 1)) || 209 ((c->x86_model == 7) && (c->x86_mask >= 1)) ||
210 (c->x86_model > 7)) 210 (c->x86_model > 7))
211 if (cpu_has_mp) 211 if (cpu_has_mp)
212 goto valid_k7; 212 return;
213 213
214 /* If we get here, not a certified SMP capable AMD system. */ 214 /* If we get here, not a certified SMP capable AMD system. */
215 215
@@ -220,9 +220,6 @@ static void __cpuinit amd_k7_smp_check(struct cpuinfo_x86 *c)
220 WARN_ONCE(1, "WARNING: This combination of AMD" 220 WARN_ONCE(1, "WARNING: This combination of AMD"
221 " processors is not suitable for SMP.\n"); 221 " processors is not suitable for SMP.\n");
222 add_taint(TAINT_UNSAFE_SMP, LOCKDEP_NOW_UNRELIABLE); 222 add_taint(TAINT_UNSAFE_SMP, LOCKDEP_NOW_UNRELIABLE);
223
224valid_k7:
225 ;
226} 223}
227 224
228static void __cpuinit init_amd_k7(struct cpuinfo_x86 *c) 225static void __cpuinit init_amd_k7(struct cpuinfo_x86 *c)
@@ -513,6 +510,10 @@ static void __cpuinit early_init_amd(struct cpuinfo_x86 *c)
513#endif 510#endif
514} 511}
515 512
513static const int amd_erratum_383[];
514static const int amd_erratum_400[];
515static bool cpu_has_amd_erratum(const int *erratum);
516
516static void __cpuinit init_amd(struct cpuinfo_x86 *c) 517static void __cpuinit init_amd(struct cpuinfo_x86 *c)
517{ 518{
518 u32 dummy; 519 u32 dummy;
@@ -727,8 +728,14 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
727 rdmsrl_safe(MSR_AMD64_BU_CFG2, &value); 728 rdmsrl_safe(MSR_AMD64_BU_CFG2, &value);
728 value &= ~(1ULL << 24); 729 value &= ~(1ULL << 24);
729 wrmsrl_safe(MSR_AMD64_BU_CFG2, value); 730 wrmsrl_safe(MSR_AMD64_BU_CFG2, value);
731
732 if (cpu_has_amd_erratum(amd_erratum_383))
733 set_cpu_bug(c, X86_BUG_AMD_TLB_MMATCH);
730 } 734 }
731 735
736 if (cpu_has_amd_erratum(amd_erratum_400))
737 set_cpu_bug(c, X86_BUG_AMD_APIC_C1E);
738
732 rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy); 739 rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy);
733} 740}
734 741
@@ -847,8 +854,7 @@ cpu_dev_register(amd_cpu_dev);
847 * AMD_OSVW_ERRATUM() macros. The latter is intended for newer errata that 854 * AMD_OSVW_ERRATUM() macros. The latter is intended for newer errata that
848 * have an OSVW id assigned, which it takes as first argument. Both take a 855 * have an OSVW id assigned, which it takes as first argument. Both take a
849 * variable number of family-specific model-stepping ranges created by 856 * variable number of family-specific model-stepping ranges created by
850 * AMD_MODEL_RANGE(). Each erratum also has to be declared as extern const 857 * AMD_MODEL_RANGE().
851 * int[] in arch/x86/include/asm/processor.h.
852 * 858 *
853 * Example: 859 * Example:
854 * 860 *
@@ -858,16 +864,22 @@ cpu_dev_register(amd_cpu_dev);
858 * AMD_MODEL_RANGE(0x10, 0x9, 0x0, 0x9, 0x0)); 864 * AMD_MODEL_RANGE(0x10, 0x9, 0x0, 0x9, 0x0));
859 */ 865 */
860 866
861const int amd_erratum_400[] = 867#define AMD_LEGACY_ERRATUM(...) { -1, __VA_ARGS__, 0 }
868#define AMD_OSVW_ERRATUM(osvw_id, ...) { osvw_id, __VA_ARGS__, 0 }
869#define AMD_MODEL_RANGE(f, m_start, s_start, m_end, s_end) \
870 ((f << 24) | (m_start << 16) | (s_start << 12) | (m_end << 4) | (s_end))
871#define AMD_MODEL_RANGE_FAMILY(range) (((range) >> 24) & 0xff)
872#define AMD_MODEL_RANGE_START(range) (((range) >> 12) & 0xfff)
873#define AMD_MODEL_RANGE_END(range) ((range) & 0xfff)
874
875static const int amd_erratum_400[] =
862 AMD_OSVW_ERRATUM(1, AMD_MODEL_RANGE(0xf, 0x41, 0x2, 0xff, 0xf), 876 AMD_OSVW_ERRATUM(1, AMD_MODEL_RANGE(0xf, 0x41, 0x2, 0xff, 0xf),
863 AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0xff, 0xf)); 877 AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0xff, 0xf));
864EXPORT_SYMBOL_GPL(amd_erratum_400);
865 878
866const int amd_erratum_383[] = 879static const int amd_erratum_383[] =
867 AMD_OSVW_ERRATUM(3, AMD_MODEL_RANGE(0x10, 0, 0, 0xff, 0xf)); 880 AMD_OSVW_ERRATUM(3, AMD_MODEL_RANGE(0x10, 0, 0, 0xff, 0xf));
868EXPORT_SYMBOL_GPL(amd_erratum_383);
869 881
870bool cpu_has_amd_erratum(const int *erratum) 882static bool cpu_has_amd_erratum(const int *erratum)
871{ 883{
872 struct cpuinfo_x86 *cpu = __this_cpu_ptr(&cpu_info); 884 struct cpuinfo_x86 *cpu = __this_cpu_ptr(&cpu_info);
873 int osvw_id = *erratum++; 885 int osvw_id = *erratum++;
@@ -908,5 +920,3 @@ bool cpu_has_amd_erratum(const int *erratum)
908 920
909 return false; 921 return false;
910} 922}
911
912EXPORT_SYMBOL_GPL(cpu_has_amd_erratum);
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index af6455e3fcc9..4112be9a4659 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -59,7 +59,7 @@ static void __init check_fpu(void)
59 * trap_init() enabled FXSR and company _before_ testing for FP 59 * trap_init() enabled FXSR and company _before_ testing for FP
60 * problems here. 60 * problems here.
61 * 61 *
62 * Test for the divl bug.. 62 * Test for the divl bug: http://en.wikipedia.org/wiki/Fdiv_bug
63 */ 63 */
64 __asm__("fninit\n\t" 64 __asm__("fninit\n\t"
65 "fldl %1\n\t" 65 "fldl %1\n\t"
@@ -75,26 +75,12 @@ static void __init check_fpu(void)
75 75
76 kernel_fpu_end(); 76 kernel_fpu_end();
77 77
78 boot_cpu_data.fdiv_bug = fdiv_bug; 78 if (fdiv_bug) {
79 if (boot_cpu_data.fdiv_bug) 79 set_cpu_bug(&boot_cpu_data, X86_BUG_FDIV);
80 pr_warn("Hmm, FPU with FDIV bug\n"); 80 pr_warn("Hmm, FPU with FDIV bug\n");
81 }
81} 82}
82 83
83/*
84 * Check whether we are able to run this kernel safely on SMP.
85 *
86 * - i386 is no longer supported.
87 * - In order to run on anything without a TSC, we need to be
88 * compiled for a i486.
89 */
90
91static void __init check_config(void)
92{
93 if (boot_cpu_data.x86 < 4)
94 panic("Kernel requires i486+ for 'invlpg' and other features");
95}
96
97
98void __init check_bugs(void) 84void __init check_bugs(void)
99{ 85{
100 identify_boot_cpu(); 86 identify_boot_cpu();
@@ -102,7 +88,17 @@ void __init check_bugs(void)
102 pr_info("CPU: "); 88 pr_info("CPU: ");
103 print_cpu_info(&boot_cpu_data); 89 print_cpu_info(&boot_cpu_data);
104#endif 90#endif
105 check_config(); 91
92 /*
93 * Check whether we are able to run this kernel safely on SMP.
94 *
95 * - i386 is no longer supported.
96 * - In order to run on anything without a TSC, we need to be
97 * compiled for a i486.
98 */
99 if (boot_cpu_data.x86 < 4)
100 panic("Kernel requires i486+ for 'invlpg' and other features");
101
106 init_utsname()->machine[1] = 102 init_utsname()->machine[1] =
107 '0' + (boot_cpu_data.x86 > 6 ? 6 : boot_cpu_data.x86); 103 '0' + (boot_cpu_data.x86 > 6 ? 6 : boot_cpu_data.x86);
108 alternative_instructions(); 104 alternative_instructions();
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index d814772c5bed..22018f70a671 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -920,6 +920,10 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
920 /* AND the already accumulated flags with these */ 920 /* AND the already accumulated flags with these */
921 for (i = 0; i < NCAPINTS; i++) 921 for (i = 0; i < NCAPINTS; i++)
922 boot_cpu_data.x86_capability[i] &= c->x86_capability[i]; 922 boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
923
924 /* OR, i.e. replicate the bug flags */
925 for (i = NCAPINTS; i < NCAPINTS + NBUGINTS; i++)
926 c->x86_capability[i] |= boot_cpu_data.x86_capability[i];
923 } 927 }
924 928
925 /* Init Machine Check Exception if available. */ 929 /* Init Machine Check Exception if available. */
diff --git a/arch/x86/kernel/cpu/cyrix.c b/arch/x86/kernel/cpu/cyrix.c
index 4fbd384fb645..d048d5ca43c1 100644
--- a/arch/x86/kernel/cpu/cyrix.c
+++ b/arch/x86/kernel/cpu/cyrix.c
@@ -249,7 +249,7 @@ static void __cpuinit init_cyrix(struct cpuinfo_x86 *c)
249 /* Emulate MTRRs using Cyrix's ARRs. */ 249 /* Emulate MTRRs using Cyrix's ARRs. */
250 set_cpu_cap(c, X86_FEATURE_CYRIX_ARR); 250 set_cpu_cap(c, X86_FEATURE_CYRIX_ARR);
251 /* 6x86's contain this bug */ 251 /* 6x86's contain this bug */
252 c->coma_bug = 1; 252 set_cpu_bug(c, X86_BUG_COMA);
253 break; 253 break;
254 254
255 case 4: /* MediaGX/GXm or Geode GXM/GXLV/GX1 */ 255 case 4: /* MediaGX/GXm or Geode GXM/GXLV/GX1 */
@@ -317,7 +317,8 @@ static void __cpuinit init_cyrix(struct cpuinfo_x86 *c)
317 /* Enable MMX extensions (App note 108) */ 317 /* Enable MMX extensions (App note 108) */
318 setCx86_old(CX86_CCR7, getCx86_old(CX86_CCR7)|1); 318 setCx86_old(CX86_CCR7, getCx86_old(CX86_CCR7)|1);
319 } else { 319 } else {
320 c->coma_bug = 1; /* 6x86MX, it has the bug. */ 320 /* A 6x86MX - it has the bug. */
321 set_cpu_bug(c, X86_BUG_COMA);
321 } 322 }
322 tmp = (!(dir0_lsn & 7) || dir0_lsn & 1) ? 2 : 0; 323 tmp = (!(dir0_lsn & 7) || dir0_lsn & 1) ? 2 : 0;
323 Cx86_cb[tmp] = cyrix_model_mult2[dir0_lsn & 7]; 324 Cx86_cb[tmp] = cyrix_model_mult2[dir0_lsn & 7];
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index e7ae0d89e7e0..a942b7c2ccee 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -221,11 +221,11 @@ static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c)
221 * system. 221 * system.
222 * Note that the workaround only should be initialized once... 222 * Note that the workaround only should be initialized once...
223 */ 223 */
224 c->f00f_bug = 0; 224 clear_cpu_bug(c, X86_BUG_F00F);
225 if (!paravirt_enabled() && c->x86 == 5) { 225 if (!paravirt_enabled() && c->x86 == 5) {
226 static int f00f_workaround_enabled; 226 static int f00f_workaround_enabled;
227 227
228 c->f00f_bug = 1; 228 set_cpu_bug(c, X86_BUG_F00F);
229 if (!f00f_workaround_enabled) { 229 if (!f00f_workaround_enabled) {
230 trap_init_f00f_bug(); 230 trap_init_f00f_bug();
231 printk(KERN_NOTICE "Intel Pentium with F0 0F bug - workaround enabled.\n"); 231 printk(KERN_NOTICE "Intel Pentium with F0 0F bug - workaround enabled.\n");
diff --git a/arch/x86/kernel/cpu/proc.c b/arch/x86/kernel/cpu/proc.c
index e280253f6f94..37a198bd48c8 100644
--- a/arch/x86/kernel/cpu/proc.c
+++ b/arch/x86/kernel/cpu/proc.c
@@ -34,9 +34,9 @@ static void show_cpuinfo_misc(struct seq_file *m, struct cpuinfo_x86 *c)
34 "fpu_exception\t: %s\n" 34 "fpu_exception\t: %s\n"
35 "cpuid level\t: %d\n" 35 "cpuid level\t: %d\n"
36 "wp\t\t: %s\n", 36 "wp\t\t: %s\n",
37 c->fdiv_bug ? "yes" : "no", 37 static_cpu_has_bug(X86_BUG_FDIV) ? "yes" : "no",
38 c->f00f_bug ? "yes" : "no", 38 static_cpu_has_bug(X86_BUG_F00F) ? "yes" : "no",
39 c->coma_bug ? "yes" : "no", 39 static_cpu_has_bug(X86_BUG_COMA) ? "yes" : "no",
40 c->hard_math ? "yes" : "no", 40 c->hard_math ? "yes" : "no",
41 c->hard_math ? "yes" : "no", 41 c->hard_math ? "yes" : "no",
42 c->cpuid_level, 42 c->cpuid_level,