aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/cpu/bugs.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel/cpu/bugs.c')
-rw-r--r--arch/x86/kernel/cpu/bugs.c46
1 files changed, 41 insertions, 5 deletions
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index 4c2313d0b9ca..40bdaea97fe7 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -668,6 +668,45 @@ EXPORT_SYMBOL_GPL(l1tf_mitigation);
668enum vmx_l1d_flush_state l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO; 668enum vmx_l1d_flush_state l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO;
669EXPORT_SYMBOL_GPL(l1tf_vmx_mitigation); 669EXPORT_SYMBOL_GPL(l1tf_vmx_mitigation);
670 670
671/*
672 * These CPUs all support 44bits physical address space internally in the
673 * cache but CPUID can report a smaller number of physical address bits.
674 *
675 * The L1TF mitigation uses the top most address bit for the inversion of
676 * non present PTEs. When the installed memory reaches into the top most
677 * address bit due to memory holes, which has been observed on machines
678 * which report 36bits physical address bits and have 32G RAM installed,
679 * then the mitigation range check in l1tf_select_mitigation() triggers.
680 * This is a false positive because the mitigation is still possible due to
681 * the fact that the cache uses 44bit internally. Use the cache bits
682 * instead of the reported physical bits and adjust them on the affected
683 * machines to 44bit if the reported bits are less than 44.
684 */
685static void override_cache_bits(struct cpuinfo_x86 *c)
686{
687 if (c->x86 != 6)
688 return;
689
690 switch (c->x86_model) {
691 case INTEL_FAM6_NEHALEM:
692 case INTEL_FAM6_WESTMERE:
693 case INTEL_FAM6_SANDYBRIDGE:
694 case INTEL_FAM6_IVYBRIDGE:
695 case INTEL_FAM6_HASWELL_CORE:
696 case INTEL_FAM6_HASWELL_ULT:
697 case INTEL_FAM6_HASWELL_GT3E:
698 case INTEL_FAM6_BROADWELL_CORE:
699 case INTEL_FAM6_BROADWELL_GT3E:
700 case INTEL_FAM6_SKYLAKE_MOBILE:
701 case INTEL_FAM6_SKYLAKE_DESKTOP:
702 case INTEL_FAM6_KABYLAKE_MOBILE:
703 case INTEL_FAM6_KABYLAKE_DESKTOP:
704 if (c->x86_cache_bits < 44)
705 c->x86_cache_bits = 44;
706 break;
707 }
708}
709
671static void __init l1tf_select_mitigation(void) 710static void __init l1tf_select_mitigation(void)
672{ 711{
673 u64 half_pa; 712 u64 half_pa;
@@ -675,6 +714,8 @@ static void __init l1tf_select_mitigation(void)
675 if (!boot_cpu_has_bug(X86_BUG_L1TF)) 714 if (!boot_cpu_has_bug(X86_BUG_L1TF))
676 return; 715 return;
677 716
717 override_cache_bits(&boot_cpu_data);
718
678 switch (l1tf_mitigation) { 719 switch (l1tf_mitigation) {
679 case L1TF_MITIGATION_OFF: 720 case L1TF_MITIGATION_OFF:
680 case L1TF_MITIGATION_FLUSH_NOWARN: 721 case L1TF_MITIGATION_FLUSH_NOWARN:
@@ -694,11 +735,6 @@ static void __init l1tf_select_mitigation(void)
694 return; 735 return;
695#endif 736#endif
696 737
697 /*
698 * This is extremely unlikely to happen because almost all
699 * systems have far more MAX_PA/2 than RAM can be fit into
700 * DIMM slots.
701 */
702 half_pa = (u64)l1tf_pfn_limit() << PAGE_SHIFT; 738 half_pa = (u64)l1tf_pfn_limit() << PAGE_SHIFT;
703 if (e820__mapped_any(half_pa, ULLONG_MAX - half_pa, E820_TYPE_RAM)) { 739 if (e820__mapped_any(half_pa, ULLONG_MAX - half_pa, E820_TYPE_RAM)) {
704 pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n"); 740 pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n");