aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-03-31 15:00:45 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-03-31 15:00:45 -0400
commit918d80a136430aeb23659aa75f8b415090500667 (patch)
treed11d394f63ed9ea0d1830b87cae6d5200501a7cd /arch/x86/kernel
parent26a5c0dfbc9c4b1c455821c0a0ea6d6116082397 (diff)
parent69f2366c9456d0ce784cf5aba87ee77eeadc1d5e (diff)
Merge branch 'x86-cpu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 cpu handling changes from Ingo Molnar: "Bigger changes: - Intel CPU hardware-enablement: new vector instructions support (AVX-512), by Fenghua Yu. - Support the clflushopt instruction and use it in appropriate places. clflushopt is similar to clflush but with more relaxed ordering, by Ross Zwisler. - MSR accessor cleanups, by Borislav Petkov. - 'forcepae' boot flag for those who have way too much time to spend on way too old Pentium-M systems and want to live way too dangerously, by Chris Bainbridge" * 'x86-cpu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86, cpu: Add forcepae parameter for booting PAE kernels on PAE-disabled Pentium M Rename TAINT_UNSAFE_SMP to TAINT_CPU_OUT_OF_SPEC x86, intel: Make MSR_IA32_MISC_ENABLE bit constants systematic x86, Intel: Convert to the new bit access MSR accessors x86, AMD: Convert to the new bit access MSR accessors x86: Add another set of MSR accessor functions x86: Use clflushopt in drm_clflush_virt_range x86: Use clflushopt in drm_clflush_page x86: Use clflushopt in clflush_cache_range x86: Add support for the clflushopt instruction x86, AVX-512: Enable AVX-512 States Context Switch x86, AVX-512: AVX-512 Feature Detection
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r--arch/x86/kernel/cpu/amd.c50
-rw-r--r--arch/x86/kernel/cpu/intel.c53
2 files changed, 43 insertions, 60 deletions
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index c67ffa686064..ce8b8ff0e0ef 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -218,7 +218,7 @@ static void amd_k7_smp_check(struct cpuinfo_x86 *c)
218 */ 218 */
219 WARN_ONCE(1, "WARNING: This combination of AMD" 219 WARN_ONCE(1, "WARNING: This combination of AMD"
220 " processors is not suitable for SMP.\n"); 220 " processors is not suitable for SMP.\n");
221 add_taint(TAINT_UNSAFE_SMP, LOCKDEP_NOW_UNRELIABLE); 221 add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_NOW_UNRELIABLE);
222} 222}
223 223
224static void init_amd_k7(struct cpuinfo_x86 *c) 224static void init_amd_k7(struct cpuinfo_x86 *c)
@@ -233,9 +233,7 @@ static void init_amd_k7(struct cpuinfo_x86 *c)
233 if (c->x86_model >= 6 && c->x86_model <= 10) { 233 if (c->x86_model >= 6 && c->x86_model <= 10) {
234 if (!cpu_has(c, X86_FEATURE_XMM)) { 234 if (!cpu_has(c, X86_FEATURE_XMM)) {
235 printk(KERN_INFO "Enabling disabled K7/SSE Support.\n"); 235 printk(KERN_INFO "Enabling disabled K7/SSE Support.\n");
236 rdmsr(MSR_K7_HWCR, l, h); 236 msr_clear_bit(MSR_K7_HWCR, 15);
237 l &= ~0x00008000;
238 wrmsr(MSR_K7_HWCR, l, h);
239 set_cpu_cap(c, X86_FEATURE_XMM); 237 set_cpu_cap(c, X86_FEATURE_XMM);
240 } 238 }
241 } 239 }
@@ -509,14 +507,8 @@ static void early_init_amd(struct cpuinfo_x86 *c)
509#endif 507#endif
510 508
511 /* F16h erratum 793, CVE-2013-6885 */ 509 /* F16h erratum 793, CVE-2013-6885 */
512 if (c->x86 == 0x16 && c->x86_model <= 0xf) { 510 if (c->x86 == 0x16 && c->x86_model <= 0xf)
513 u64 val; 511 msr_set_bit(MSR_AMD64_LS_CFG, 15);
514
515 rdmsrl(MSR_AMD64_LS_CFG, val);
516 if (!(val & BIT(15)))
517 wrmsrl(MSR_AMD64_LS_CFG, val | BIT(15));
518 }
519
520} 512}
521 513
522static const int amd_erratum_383[]; 514static const int amd_erratum_383[];
@@ -536,11 +528,8 @@ static void init_amd(struct cpuinfo_x86 *c)
536 * Errata 63 for SH-B3 steppings 528 * Errata 63 for SH-B3 steppings
537 * Errata 122 for all steppings (F+ have it disabled by default) 529 * Errata 122 for all steppings (F+ have it disabled by default)
538 */ 530 */
539 if (c->x86 == 0xf) { 531 if (c->x86 == 0xf)
540 rdmsrl(MSR_K7_HWCR, value); 532 msr_set_bit(MSR_K7_HWCR, 6);
541 value |= 1 << 6;
542 wrmsrl(MSR_K7_HWCR, value);
543 }
544#endif 533#endif
545 534
546 early_init_amd(c); 535 early_init_amd(c);
@@ -623,14 +612,11 @@ static void init_amd(struct cpuinfo_x86 *c)
623 (c->x86_model >= 0x10) && (c->x86_model <= 0x1f) && 612 (c->x86_model >= 0x10) && (c->x86_model <= 0x1f) &&
624 !cpu_has(c, X86_FEATURE_TOPOEXT)) { 613 !cpu_has(c, X86_FEATURE_TOPOEXT)) {
625 614
626 if (!rdmsrl_safe(0xc0011005, &value)) { 615 if (msr_set_bit(0xc0011005, 54) > 0) {
627 value |= 1ULL << 54;
628 wrmsrl_safe(0xc0011005, value);
629 rdmsrl(0xc0011005, value); 616 rdmsrl(0xc0011005, value);
630 if (value & (1ULL << 54)) { 617 if (value & BIT_64(54)) {
631 set_cpu_cap(c, X86_FEATURE_TOPOEXT); 618 set_cpu_cap(c, X86_FEATURE_TOPOEXT);
632 printk(KERN_INFO FW_INFO "CPU: Re-enabling " 619 pr_info(FW_INFO "CPU: Re-enabling disabled Topology Extensions Support.\n");
633 "disabled Topology Extensions Support\n");
634 } 620 }
635 } 621 }
636 } 622 }
@@ -709,19 +695,12 @@ static void init_amd(struct cpuinfo_x86 *c)
709 * Disable GART TLB Walk Errors on Fam10h. We do this here 695 * Disable GART TLB Walk Errors on Fam10h. We do this here
710 * because this is always needed when GART is enabled, even in a 696 * because this is always needed when GART is enabled, even in a
711 * kernel which has no MCE support built in. 697 * kernel which has no MCE support built in.
712 * BIOS should disable GartTlbWlk Errors themself. If 698 * BIOS should disable GartTlbWlk Errors already. If
713 * it doesn't do it here as suggested by the BKDG. 699 * it doesn't, do it here as suggested by the BKDG.
714 * 700 *
715 * Fixes: https://bugzilla.kernel.org/show_bug.cgi?id=33012 701 * Fixes: https://bugzilla.kernel.org/show_bug.cgi?id=33012
716 */ 702 */
717 u64 mask; 703 msr_set_bit(MSR_AMD64_MCx_MASK(4), 10);
718 int err;
719
720 err = rdmsrl_safe(MSR_AMD64_MCx_MASK(4), &mask);
721 if (err == 0) {
722 mask |= (1 << 10);
723 wrmsrl_safe(MSR_AMD64_MCx_MASK(4), mask);
724 }
725 704
726 /* 705 /*
727 * On family 10h BIOS may not have properly enabled WC+ support, 706 * On family 10h BIOS may not have properly enabled WC+ support,
@@ -733,10 +712,7 @@ static void init_amd(struct cpuinfo_x86 *c)
733 * NOTE: we want to use the _safe accessors so as not to #GP kvm 712 * NOTE: we want to use the _safe accessors so as not to #GP kvm
734 * guests on older kvm hosts. 713 * guests on older kvm hosts.
735 */ 714 */
736 715 msr_clear_bit(MSR_AMD64_BU_CFG2, 24);
737 rdmsrl_safe(MSR_AMD64_BU_CFG2, &value);
738 value &= ~(1ULL << 24);
739 wrmsrl_safe(MSR_AMD64_BU_CFG2, value);
740 716
741 if (cpu_has_amd_erratum(c, amd_erratum_383)) 717 if (cpu_has_amd_erratum(c, amd_erratum_383))
742 set_cpu_bug(c, X86_BUG_AMD_TLB_MMATCH); 718 set_cpu_bug(c, X86_BUG_AMD_TLB_MMATCH);
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index 5cd9bfabd645..897d6201ef10 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -31,11 +31,8 @@ static void early_init_intel(struct cpuinfo_x86 *c)
31 31
32 /* Unmask CPUID levels if masked: */ 32 /* Unmask CPUID levels if masked: */
33 if (c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xd)) { 33 if (c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xd)) {
34 rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable); 34 if (msr_clear_bit(MSR_IA32_MISC_ENABLE,
35 35 MSR_IA32_MISC_ENABLE_LIMIT_CPUID_BIT) > 0) {
36 if (misc_enable & MSR_IA32_MISC_ENABLE_LIMIT_CPUID) {
37 misc_enable &= ~MSR_IA32_MISC_ENABLE_LIMIT_CPUID;
38 wrmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
39 c->cpuid_level = cpuid_eax(0); 36 c->cpuid_level = cpuid_eax(0);
40 get_cpu_cap(c); 37 get_cpu_cap(c);
41 } 38 }
@@ -129,16 +126,10 @@ static void early_init_intel(struct cpuinfo_x86 *c)
129 * Ingo Molnar reported a Pentium D (model 6) and a Xeon 126 * Ingo Molnar reported a Pentium D (model 6) and a Xeon
130 * (model 2) with the same problem. 127 * (model 2) with the same problem.
131 */ 128 */
132 if (c->x86 == 15) { 129 if (c->x86 == 15)
133 rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable); 130 if (msr_clear_bit(MSR_IA32_MISC_ENABLE,
134 131 MSR_IA32_MISC_ENABLE_FAST_STRING_BIT) > 0)
135 if (misc_enable & MSR_IA32_MISC_ENABLE_FAST_STRING) { 132 pr_info("kmemcheck: Disabling fast string operations\n");
136 printk(KERN_INFO "kmemcheck: Disabling fast string operations\n");
137
138 misc_enable &= ~MSR_IA32_MISC_ENABLE_FAST_STRING;
139 wrmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
140 }
141 }
142#endif 133#endif
143 134
144 /* 135 /*
@@ -195,10 +186,16 @@ static void intel_smp_check(struct cpuinfo_x86 *c)
195 } 186 }
196} 187}
197 188
198static void intel_workarounds(struct cpuinfo_x86 *c) 189static int forcepae;
190static int __init forcepae_setup(char *__unused)
199{ 191{
200 unsigned long lo, hi; 192 forcepae = 1;
193 return 1;
194}
195__setup("forcepae", forcepae_setup);
201 196
197static void intel_workarounds(struct cpuinfo_x86 *c)
198{
202#ifdef CONFIG_X86_F00F_BUG 199#ifdef CONFIG_X86_F00F_BUG
203 /* 200 /*
204 * All current models of Pentium and Pentium with MMX technology CPUs 201 * All current models of Pentium and Pentium with MMX technology CPUs
@@ -225,16 +222,26 @@ static void intel_workarounds(struct cpuinfo_x86 *c)
225 clear_cpu_cap(c, X86_FEATURE_SEP); 222 clear_cpu_cap(c, X86_FEATURE_SEP);
226 223
227 /* 224 /*
225 * PAE CPUID issue: many Pentium M report no PAE but may have a
226 * functionally usable PAE implementation.
227 * Forcefully enable PAE if kernel parameter "forcepae" is present.
228 */
229 if (forcepae) {
230 printk(KERN_WARNING "PAE forced!\n");
231 set_cpu_cap(c, X86_FEATURE_PAE);
232 add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_NOW_UNRELIABLE);
233 }
234
235 /*
228 * P4 Xeon errata 037 workaround. 236 * P4 Xeon errata 037 workaround.
229 * Hardware prefetcher may cause stale data to be loaded into the cache. 237 * Hardware prefetcher may cause stale data to be loaded into the cache.
230 */ 238 */
231 if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_mask == 1)) { 239 if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_mask == 1)) {
232 rdmsr(MSR_IA32_MISC_ENABLE, lo, hi); 240 if (msr_set_bit(MSR_IA32_MISC_ENABLE,
233 if ((lo & MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE) == 0) { 241 MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE_BIT)
234 printk (KERN_INFO "CPU: C0 stepping P4 Xeon detected.\n"); 242 > 0) {
235 printk (KERN_INFO "CPU: Disabling hardware prefetching (Errata 037)\n"); 243 pr_info("CPU: C0 stepping P4 Xeon detected.\n");
236 lo |= MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE; 244 pr_info("CPU: Disabling hardware prefetching (Errata 037)\n");
237 wrmsr(MSR_IA32_MISC_ENABLE, lo, hi);
238 } 245 }
239 } 246 }
240 247