aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-04-30 11:34:38 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-04-30 11:34:38 -0400
commitdf8edfa9af5b2160549ed1a79b72e3ed13b6c7e2 (patch)
treec61b836ee22594de97cca58f018585dbe05f61a1 /arch
parent874f6d1be7699b5d1873283b4737712cbabd7754 (diff)
parent1077c932db63ecc571c31df1c24d4a44e30928e5 (diff)
Merge branch 'x86-cpu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 cpuid changes from Ingo Molnar: "The biggest change is x86 CPU bug handling refactoring and cleanups, by Borislav Petkov" * 'x86-cpu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86, CPU, AMD: Drop useless label x86, AMD: Correct {rd,wr}msr_amd_safe warnings x86: Fold-in trivial check_config function x86, cpu: Convert AMD Erratum 400 x86, cpu: Convert AMD Erratum 383 x86, cpu: Convert Cyrix coma bug detection x86, cpu: Convert FDIV bug detection x86, cpu: Convert F00F bug detection x86, cpu: Expand cpufeature facility to include cpu bugs
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/include/asm/cpufeature.h19
-rw-r--r--arch/x86/include/asm/processor.h25
-rw-r--r--arch/x86/kernel/alternative.c2
-rw-r--r--arch/x86/kernel/cpu/amd.c48
-rw-r--r--arch/x86/kernel/cpu/bugs.c34
-rw-r--r--arch/x86/kernel/cpu/common.c4
-rw-r--r--arch/x86/kernel/cpu/cyrix.c5
-rw-r--r--arch/x86/kernel/cpu/intel.c4
-rw-r--r--arch/x86/kernel/cpu/proc.c6
-rw-r--r--arch/x86/kernel/process.c2
-rw-r--r--arch/x86/kernel/setup.c2
-rw-r--r--arch/x86/kvm/svm.c2
-rw-r--r--arch/x86/mm/fault.c2
13 files changed, 80 insertions, 75 deletions
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
index 6ce479800258..398f7cb1353d 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -9,6 +9,7 @@
9#endif 9#endif
10 10
11#define NCAPINTS 10 /* N 32-bit words worth of info */ 11#define NCAPINTS 10 /* N 32-bit words worth of info */
12#define NBUGINTS 1 /* N 32-bit bug flags */
12 13
13/* 14/*
14 * Note: If the comment begins with a quoted string, that string is used 15 * Note: If the comment begins with a quoted string, that string is used
@@ -218,6 +219,17 @@
218#define X86_FEATURE_ADX (9*32+19) /* The ADCX and ADOX instructions */ 219#define X86_FEATURE_ADX (9*32+19) /* The ADCX and ADOX instructions */
219#define X86_FEATURE_SMAP (9*32+20) /* Supervisor Mode Access Prevention */ 220#define X86_FEATURE_SMAP (9*32+20) /* Supervisor Mode Access Prevention */
220 221
222/*
223 * BUG word(s)
224 */
225#define X86_BUG(x) (NCAPINTS*32 + (x))
226
227#define X86_BUG_F00F X86_BUG(0) /* Intel F00F */
228#define X86_BUG_FDIV X86_BUG(1) /* FPU FDIV */
229#define X86_BUG_COMA X86_BUG(2) /* Cyrix 6x86 coma */
230#define X86_BUG_AMD_TLB_MMATCH X86_BUG(3) /* AMD Erratum 383 */
231#define X86_BUG_AMD_APIC_C1E X86_BUG(4) /* AMD Erratum 400 */
232
221#if defined(__KERNEL__) && !defined(__ASSEMBLY__) 233#if defined(__KERNEL__) && !defined(__ASSEMBLY__)
222 234
223#include <asm/asm.h> 235#include <asm/asm.h>
@@ -404,6 +416,13 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
404#define static_cpu_has(bit) boot_cpu_has(bit) 416#define static_cpu_has(bit) boot_cpu_has(bit)
405#endif 417#endif
406 418
419#define cpu_has_bug(c, bit) cpu_has(c, (bit))
420#define set_cpu_bug(c, bit) set_cpu_cap(c, (bit))
421#define clear_cpu_bug(c, bit) clear_cpu_cap(c, (bit));
422
423#define static_cpu_has_bug(bit) static_cpu_has((bit))
424#define boot_cpu_has_bug(bit) cpu_has_bug(&boot_cpu_data, (bit))
425
407#endif /* defined(__KERNEL__) && !defined(__ASSEMBLY__) */ 426#endif /* defined(__KERNEL__) && !defined(__ASSEMBLY__) */
408 427
409#endif /* _ASM_X86_CPUFEATURE_H */ 428#endif /* _ASM_X86_CPUFEATURE_H */
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 3270116b1488..22224b3b43bb 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -91,9 +91,6 @@ struct cpuinfo_x86 {
91 /* Problems on some 486Dx4's and old 386's: */ 91 /* Problems on some 486Dx4's and old 386's: */
92 char hard_math; 92 char hard_math;
93 char rfu; 93 char rfu;
94 char fdiv_bug;
95 char f00f_bug;
96 char coma_bug;
97 char pad0; 94 char pad0;
98#else 95#else
99 /* Number of 4K pages in DTLB/ITLB combined(in pages): */ 96 /* Number of 4K pages in DTLB/ITLB combined(in pages): */
@@ -107,7 +104,7 @@ struct cpuinfo_x86 {
107 __u32 extended_cpuid_level; 104 __u32 extended_cpuid_level;
108 /* Maximum supported CPUID level, -1=no CPUID: */ 105 /* Maximum supported CPUID level, -1=no CPUID: */
109 int cpuid_level; 106 int cpuid_level;
110 __u32 x86_capability[NCAPINTS]; 107 __u32 x86_capability[NCAPINTS + NBUGINTS];
111 char x86_vendor_id[16]; 108 char x86_vendor_id[16];
112 char x86_model_id[64]; 109 char x86_model_id[64];
113 /* in KB - valid for CPUS which support this call: */ 110 /* in KB - valid for CPUS which support this call: */
@@ -973,26 +970,6 @@ unsigned long calc_aperfmperf_ratio(struct aperfmperf *old,
973 return ratio; 970 return ratio;
974} 971}
975 972
976/*
977 * AMD errata checking
978 */
979#ifdef CONFIG_CPU_SUP_AMD
980extern const int amd_erratum_383[];
981extern const int amd_erratum_400[];
982extern bool cpu_has_amd_erratum(const int *);
983
984#define AMD_LEGACY_ERRATUM(...) { -1, __VA_ARGS__, 0 }
985#define AMD_OSVW_ERRATUM(osvw_id, ...) { osvw_id, __VA_ARGS__, 0 }
986#define AMD_MODEL_RANGE(f, m_start, s_start, m_end, s_end) \
987 ((f << 24) | (m_start << 16) | (s_start << 12) | (m_end << 4) | (s_end))
988#define AMD_MODEL_RANGE_FAMILY(range) (((range) >> 24) & 0xff)
989#define AMD_MODEL_RANGE_START(range) (((range) >> 12) & 0xfff)
990#define AMD_MODEL_RANGE_END(range) ((range) & 0xfff)
991
992#else
993#define cpu_has_amd_erratum(x) (false)
994#endif /* CONFIG_CPU_SUP_AMD */
995
996extern unsigned long arch_align_stack(unsigned long sp); 973extern unsigned long arch_align_stack(unsigned long sp);
997extern void free_init_pages(char *what, unsigned long begin, unsigned long end); 974extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
998 975
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index ef5ccca79a6c..c15cf9a25e27 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -271,7 +271,7 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
271 replacement = (u8 *)&a->repl_offset + a->repl_offset; 271 replacement = (u8 *)&a->repl_offset + a->repl_offset;
272 BUG_ON(a->replacementlen > a->instrlen); 272 BUG_ON(a->replacementlen > a->instrlen);
273 BUG_ON(a->instrlen > sizeof(insnbuf)); 273 BUG_ON(a->instrlen > sizeof(insnbuf));
274 BUG_ON(a->cpuid >= NCAPINTS*32); 274 BUG_ON(a->cpuid >= (NCAPINTS + NBUGINTS) * 32);
275 if (!boot_cpu_has(a->cpuid)) 275 if (!boot_cpu_has(a->cpuid))
276 continue; 276 continue;
277 277
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index fa96eb0d02fb..5013a48d1aff 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -20,11 +20,11 @@
20 20
21static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p) 21static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
22{ 22{
23 struct cpuinfo_x86 *c = &cpu_data(smp_processor_id());
24 u32 gprs[8] = { 0 }; 23 u32 gprs[8] = { 0 };
25 int err; 24 int err;
26 25
27 WARN_ONCE((c->x86 != 0xf), "%s should only be used on K8!\n", __func__); 26 WARN_ONCE((boot_cpu_data.x86 != 0xf),
27 "%s should only be used on K8!\n", __func__);
28 28
29 gprs[1] = msr; 29 gprs[1] = msr;
30 gprs[7] = 0x9c5a203a; 30 gprs[7] = 0x9c5a203a;
@@ -38,10 +38,10 @@ static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
38 38
39static inline int wrmsrl_amd_safe(unsigned msr, unsigned long long val) 39static inline int wrmsrl_amd_safe(unsigned msr, unsigned long long val)
40{ 40{
41 struct cpuinfo_x86 *c = &cpu_data(smp_processor_id());
42 u32 gprs[8] = { 0 }; 41 u32 gprs[8] = { 0 };
43 42
44 WARN_ONCE((c->x86 != 0xf), "%s should only be used on K8!\n", __func__); 43 WARN_ONCE((boot_cpu_data.x86 != 0xf),
44 "%s should only be used on K8!\n", __func__);
45 45
46 gprs[0] = (u32)val; 46 gprs[0] = (u32)val;
47 gprs[1] = msr; 47 gprs[1] = msr;
@@ -192,11 +192,11 @@ static void __cpuinit amd_k7_smp_check(struct cpuinfo_x86 *c)
192 /* Athlon 660/661 is valid. */ 192 /* Athlon 660/661 is valid. */
193 if ((c->x86_model == 6) && ((c->x86_mask == 0) || 193 if ((c->x86_model == 6) && ((c->x86_mask == 0) ||
194 (c->x86_mask == 1))) 194 (c->x86_mask == 1)))
195 goto valid_k7; 195 return;
196 196
197 /* Duron 670 is valid */ 197 /* Duron 670 is valid */
198 if ((c->x86_model == 7) && (c->x86_mask == 0)) 198 if ((c->x86_model == 7) && (c->x86_mask == 0))
199 goto valid_k7; 199 return;
200 200
201 /* 201 /*
202 * Athlon 662, Duron 671, and Athlon >model 7 have capability 202 * Athlon 662, Duron 671, and Athlon >model 7 have capability
@@ -209,7 +209,7 @@ static void __cpuinit amd_k7_smp_check(struct cpuinfo_x86 *c)
209 ((c->x86_model == 7) && (c->x86_mask >= 1)) || 209 ((c->x86_model == 7) && (c->x86_mask >= 1)) ||
210 (c->x86_model > 7)) 210 (c->x86_model > 7))
211 if (cpu_has_mp) 211 if (cpu_has_mp)
212 goto valid_k7; 212 return;
213 213
214 /* If we get here, not a certified SMP capable AMD system. */ 214 /* If we get here, not a certified SMP capable AMD system. */
215 215
@@ -220,9 +220,6 @@ static void __cpuinit amd_k7_smp_check(struct cpuinfo_x86 *c)
220 WARN_ONCE(1, "WARNING: This combination of AMD" 220 WARN_ONCE(1, "WARNING: This combination of AMD"
221 " processors is not suitable for SMP.\n"); 221 " processors is not suitable for SMP.\n");
222 add_taint(TAINT_UNSAFE_SMP, LOCKDEP_NOW_UNRELIABLE); 222 add_taint(TAINT_UNSAFE_SMP, LOCKDEP_NOW_UNRELIABLE);
223
224valid_k7:
225 ;
226} 223}
227 224
228static void __cpuinit init_amd_k7(struct cpuinfo_x86 *c) 225static void __cpuinit init_amd_k7(struct cpuinfo_x86 *c)
@@ -513,6 +510,10 @@ static void __cpuinit early_init_amd(struct cpuinfo_x86 *c)
513#endif 510#endif
514} 511}
515 512
513static const int amd_erratum_383[];
514static const int amd_erratum_400[];
515static bool cpu_has_amd_erratum(const int *erratum);
516
516static void __cpuinit init_amd(struct cpuinfo_x86 *c) 517static void __cpuinit init_amd(struct cpuinfo_x86 *c)
517{ 518{
518 u32 dummy; 519 u32 dummy;
@@ -727,8 +728,14 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
727 rdmsrl_safe(MSR_AMD64_BU_CFG2, &value); 728 rdmsrl_safe(MSR_AMD64_BU_CFG2, &value);
728 value &= ~(1ULL << 24); 729 value &= ~(1ULL << 24);
729 wrmsrl_safe(MSR_AMD64_BU_CFG2, value); 730 wrmsrl_safe(MSR_AMD64_BU_CFG2, value);
731
732 if (cpu_has_amd_erratum(amd_erratum_383))
733 set_cpu_bug(c, X86_BUG_AMD_TLB_MMATCH);
730 } 734 }
731 735
736 if (cpu_has_amd_erratum(amd_erratum_400))
737 set_cpu_bug(c, X86_BUG_AMD_APIC_C1E);
738
732 rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy); 739 rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy);
733} 740}
734 741
@@ -847,8 +854,7 @@ cpu_dev_register(amd_cpu_dev);
847 * AMD_OSVW_ERRATUM() macros. The latter is intended for newer errata that 854 * AMD_OSVW_ERRATUM() macros. The latter is intended for newer errata that
848 * have an OSVW id assigned, which it takes as first argument. Both take a 855 * have an OSVW id assigned, which it takes as first argument. Both take a
849 * variable number of family-specific model-stepping ranges created by 856 * variable number of family-specific model-stepping ranges created by
850 * AMD_MODEL_RANGE(). Each erratum also has to be declared as extern const 857 * AMD_MODEL_RANGE().
851 * int[] in arch/x86/include/asm/processor.h.
852 * 858 *
853 * Example: 859 * Example:
854 * 860 *
@@ -858,16 +864,22 @@ cpu_dev_register(amd_cpu_dev);
858 * AMD_MODEL_RANGE(0x10, 0x9, 0x0, 0x9, 0x0)); 864 * AMD_MODEL_RANGE(0x10, 0x9, 0x0, 0x9, 0x0));
859 */ 865 */
860 866
861const int amd_erratum_400[] = 867#define AMD_LEGACY_ERRATUM(...) { -1, __VA_ARGS__, 0 }
868#define AMD_OSVW_ERRATUM(osvw_id, ...) { osvw_id, __VA_ARGS__, 0 }
869#define AMD_MODEL_RANGE(f, m_start, s_start, m_end, s_end) \
870 ((f << 24) | (m_start << 16) | (s_start << 12) | (m_end << 4) | (s_end))
871#define AMD_MODEL_RANGE_FAMILY(range) (((range) >> 24) & 0xff)
872#define AMD_MODEL_RANGE_START(range) (((range) >> 12) & 0xfff)
873#define AMD_MODEL_RANGE_END(range) ((range) & 0xfff)
874
875static const int amd_erratum_400[] =
862 AMD_OSVW_ERRATUM(1, AMD_MODEL_RANGE(0xf, 0x41, 0x2, 0xff, 0xf), 876 AMD_OSVW_ERRATUM(1, AMD_MODEL_RANGE(0xf, 0x41, 0x2, 0xff, 0xf),
863 AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0xff, 0xf)); 877 AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0xff, 0xf));
864EXPORT_SYMBOL_GPL(amd_erratum_400);
865 878
866const int amd_erratum_383[] = 879static const int amd_erratum_383[] =
867 AMD_OSVW_ERRATUM(3, AMD_MODEL_RANGE(0x10, 0, 0, 0xff, 0xf)); 880 AMD_OSVW_ERRATUM(3, AMD_MODEL_RANGE(0x10, 0, 0, 0xff, 0xf));
868EXPORT_SYMBOL_GPL(amd_erratum_383);
869 881
870bool cpu_has_amd_erratum(const int *erratum) 882static bool cpu_has_amd_erratum(const int *erratum)
871{ 883{
872 struct cpuinfo_x86 *cpu = __this_cpu_ptr(&cpu_info); 884 struct cpuinfo_x86 *cpu = __this_cpu_ptr(&cpu_info);
873 int osvw_id = *erratum++; 885 int osvw_id = *erratum++;
@@ -908,5 +920,3 @@ bool cpu_has_amd_erratum(const int *erratum)
908 920
909 return false; 921 return false;
910} 922}
911
912EXPORT_SYMBOL_GPL(cpu_has_amd_erratum);
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index af6455e3fcc9..4112be9a4659 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -59,7 +59,7 @@ static void __init check_fpu(void)
59 * trap_init() enabled FXSR and company _before_ testing for FP 59 * trap_init() enabled FXSR and company _before_ testing for FP
60 * problems here. 60 * problems here.
61 * 61 *
62 * Test for the divl bug.. 62 * Test for the divl bug: http://en.wikipedia.org/wiki/Fdiv_bug
63 */ 63 */
64 __asm__("fninit\n\t" 64 __asm__("fninit\n\t"
65 "fldl %1\n\t" 65 "fldl %1\n\t"
@@ -75,26 +75,12 @@ static void __init check_fpu(void)
75 75
76 kernel_fpu_end(); 76 kernel_fpu_end();
77 77
78 boot_cpu_data.fdiv_bug = fdiv_bug; 78 if (fdiv_bug) {
79 if (boot_cpu_data.fdiv_bug) 79 set_cpu_bug(&boot_cpu_data, X86_BUG_FDIV);
80 pr_warn("Hmm, FPU with FDIV bug\n"); 80 pr_warn("Hmm, FPU with FDIV bug\n");
81 }
81} 82}
82 83
83/*
84 * Check whether we are able to run this kernel safely on SMP.
85 *
86 * - i386 is no longer supported.
87 * - In order to run on anything without a TSC, we need to be
88 * compiled for a i486.
89 */
90
91static void __init check_config(void)
92{
93 if (boot_cpu_data.x86 < 4)
94 panic("Kernel requires i486+ for 'invlpg' and other features");
95}
96
97
98void __init check_bugs(void) 84void __init check_bugs(void)
99{ 85{
100 identify_boot_cpu(); 86 identify_boot_cpu();
@@ -102,7 +88,17 @@ void __init check_bugs(void)
102 pr_info("CPU: "); 88 pr_info("CPU: ");
103 print_cpu_info(&boot_cpu_data); 89 print_cpu_info(&boot_cpu_data);
104#endif 90#endif
105 check_config(); 91
92 /*
93 * Check whether we are able to run this kernel safely on SMP.
94 *
95 * - i386 is no longer supported.
96 * - In order to run on anything without a TSC, we need to be
97 * compiled for a i486.
98 */
99 if (boot_cpu_data.x86 < 4)
100 panic("Kernel requires i486+ for 'invlpg' and other features");
101
106 init_utsname()->machine[1] = 102 init_utsname()->machine[1] =
107 '0' + (boot_cpu_data.x86 > 6 ? 6 : boot_cpu_data.x86); 103 '0' + (boot_cpu_data.x86 > 6 ? 6 : boot_cpu_data.x86);
108 alternative_instructions(); 104 alternative_instructions();
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index d814772c5bed..22018f70a671 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -920,6 +920,10 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
920 /* AND the already accumulated flags with these */ 920 /* AND the already accumulated flags with these */
921 for (i = 0; i < NCAPINTS; i++) 921 for (i = 0; i < NCAPINTS; i++)
922 boot_cpu_data.x86_capability[i] &= c->x86_capability[i]; 922 boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
923
924 /* OR, i.e. replicate the bug flags */
925 for (i = NCAPINTS; i < NCAPINTS + NBUGINTS; i++)
926 c->x86_capability[i] |= boot_cpu_data.x86_capability[i];
923 } 927 }
924 928
925 /* Init Machine Check Exception if available. */ 929 /* Init Machine Check Exception if available. */
diff --git a/arch/x86/kernel/cpu/cyrix.c b/arch/x86/kernel/cpu/cyrix.c
index 4fbd384fb645..d048d5ca43c1 100644
--- a/arch/x86/kernel/cpu/cyrix.c
+++ b/arch/x86/kernel/cpu/cyrix.c
@@ -249,7 +249,7 @@ static void __cpuinit init_cyrix(struct cpuinfo_x86 *c)
249 /* Emulate MTRRs using Cyrix's ARRs. */ 249 /* Emulate MTRRs using Cyrix's ARRs. */
250 set_cpu_cap(c, X86_FEATURE_CYRIX_ARR); 250 set_cpu_cap(c, X86_FEATURE_CYRIX_ARR);
251 /* 6x86's contain this bug */ 251 /* 6x86's contain this bug */
252 c->coma_bug = 1; 252 set_cpu_bug(c, X86_BUG_COMA);
253 break; 253 break;
254 254
255 case 4: /* MediaGX/GXm or Geode GXM/GXLV/GX1 */ 255 case 4: /* MediaGX/GXm or Geode GXM/GXLV/GX1 */
@@ -317,7 +317,8 @@ static void __cpuinit init_cyrix(struct cpuinfo_x86 *c)
317 /* Enable MMX extensions (App note 108) */ 317 /* Enable MMX extensions (App note 108) */
318 setCx86_old(CX86_CCR7, getCx86_old(CX86_CCR7)|1); 318 setCx86_old(CX86_CCR7, getCx86_old(CX86_CCR7)|1);
319 } else { 319 } else {
320 c->coma_bug = 1; /* 6x86MX, it has the bug. */ 320 /* A 6x86MX - it has the bug. */
321 set_cpu_bug(c, X86_BUG_COMA);
321 } 322 }
322 tmp = (!(dir0_lsn & 7) || dir0_lsn & 1) ? 2 : 0; 323 tmp = (!(dir0_lsn & 7) || dir0_lsn & 1) ? 2 : 0;
323 Cx86_cb[tmp] = cyrix_model_mult2[dir0_lsn & 7]; 324 Cx86_cb[tmp] = cyrix_model_mult2[dir0_lsn & 7];
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index e7ae0d89e7e0..a942b7c2ccee 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -221,11 +221,11 @@ static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c)
221 * system. 221 * system.
222 * Note that the workaround only should be initialized once... 222 * Note that the workaround only should be initialized once...
223 */ 223 */
224 c->f00f_bug = 0; 224 clear_cpu_bug(c, X86_BUG_F00F);
225 if (!paravirt_enabled() && c->x86 == 5) { 225 if (!paravirt_enabled() && c->x86 == 5) {
226 static int f00f_workaround_enabled; 226 static int f00f_workaround_enabled;
227 227
228 c->f00f_bug = 1; 228 set_cpu_bug(c, X86_BUG_F00F);
229 if (!f00f_workaround_enabled) { 229 if (!f00f_workaround_enabled) {
230 trap_init_f00f_bug(); 230 trap_init_f00f_bug();
231 printk(KERN_NOTICE "Intel Pentium with F0 0F bug - workaround enabled.\n"); 231 printk(KERN_NOTICE "Intel Pentium with F0 0F bug - workaround enabled.\n");
diff --git a/arch/x86/kernel/cpu/proc.c b/arch/x86/kernel/cpu/proc.c
index e280253f6f94..37a198bd48c8 100644
--- a/arch/x86/kernel/cpu/proc.c
+++ b/arch/x86/kernel/cpu/proc.c
@@ -34,9 +34,9 @@ static void show_cpuinfo_misc(struct seq_file *m, struct cpuinfo_x86 *c)
34 "fpu_exception\t: %s\n" 34 "fpu_exception\t: %s\n"
35 "cpuid level\t: %d\n" 35 "cpuid level\t: %d\n"
36 "wp\t\t: %s\n", 36 "wp\t\t: %s\n",
37 c->fdiv_bug ? "yes" : "no", 37 static_cpu_has_bug(X86_BUG_FDIV) ? "yes" : "no",
38 c->f00f_bug ? "yes" : "no", 38 static_cpu_has_bug(X86_BUG_F00F) ? "yes" : "no",
39 c->coma_bug ? "yes" : "no", 39 static_cpu_has_bug(X86_BUG_COMA) ? "yes" : "no",
40 c->hard_math ? "yes" : "no", 40 c->hard_math ? "yes" : "no",
41 c->hard_math ? "yes" : "no", 41 c->hard_math ? "yes" : "no",
42 c->cpuid_level, 42 c->cpuid_level,
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 6833bffaadb7..14fcf55a5c5b 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -444,7 +444,7 @@ void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
444 if (x86_idle || boot_option_idle_override == IDLE_POLL) 444 if (x86_idle || boot_option_idle_override == IDLE_POLL)
445 return; 445 return;
446 446
447 if (cpu_has_amd_erratum(amd_erratum_400)) { 447 if (cpu_has_bug(c, X86_BUG_AMD_APIC_C1E)) {
448 /* E400: APIC timer interrupt does not wake up CPU from C1e */ 448 /* E400: APIC timer interrupt does not wake up CPU from C1e */
449 pr_info("using AMD E400 aware idle routine\n"); 449 pr_info("using AMD E400 aware idle routine\n");
450 x86_idle = amd_e400_idle; 450 x86_idle = amd_e400_idle;
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index fae9134a2de9..a5d550f2fa6e 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -173,12 +173,10 @@ static struct resource bss_resource = {
173/* cpu data as detected by the assembly code in head.S */ 173/* cpu data as detected by the assembly code in head.S */
174struct cpuinfo_x86 new_cpu_data __cpuinitdata = { 174struct cpuinfo_x86 new_cpu_data __cpuinitdata = {
175 .wp_works_ok = -1, 175 .wp_works_ok = -1,
176 .fdiv_bug = -1,
177}; 176};
178/* common cpu data for all cpus */ 177/* common cpu data for all cpus */
179struct cpuinfo_x86 boot_cpu_data __read_mostly = { 178struct cpuinfo_x86 boot_cpu_data __read_mostly = {
180 .wp_works_ok = -1, 179 .wp_works_ok = -1,
181 .fdiv_bug = -1,
182}; 180};
183EXPORT_SYMBOL(boot_cpu_data); 181EXPORT_SYMBOL(boot_cpu_data);
184 182
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index e1b1ce21bc00..7d39d70647e3 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -555,7 +555,7 @@ static void svm_init_erratum_383(void)
555 int err; 555 int err;
556 u64 val; 556 u64 val;
557 557
558 if (!cpu_has_amd_erratum(amd_erratum_383)) 558 if (!static_cpu_has_bug(X86_BUG_AMD_TLB_MMATCH))
559 return; 559 return;
560 560
561 /* Use _safe variants to not break nested virtualization */ 561 /* Use _safe variants to not break nested virtualization */
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 022a9a0a3c63..654be4ae3047 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -557,7 +557,7 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
557 /* 557 /*
558 * Pentium F0 0F C7 C8 bug workaround: 558 * Pentium F0 0F C7 C8 bug workaround:
559 */ 559 */
560 if (boot_cpu_data.f00f_bug) { 560 if (boot_cpu_has_bug(X86_BUG_F00F)) {
561 nr = (address - idt_descr.address) >> 3; 561 nr = (address - idt_descr.address) >> 3;
562 562
563 if (nr == 6) { 563 if (nr == 6) {