diff options
Diffstat (limited to 'arch/x86/kernel/cpu/common.c')
-rw-r--r-- | arch/x86/kernel/cpu/common.c | 70 |
1 files changed, 65 insertions, 5 deletions
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index ef29ad001991..d63f4b5706e4 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c | |||
@@ -47,6 +47,8 @@ | |||
47 | #include <asm/pat.h> | 47 | #include <asm/pat.h> |
48 | #include <asm/microcode.h> | 48 | #include <asm/microcode.h> |
49 | #include <asm/microcode_intel.h> | 49 | #include <asm/microcode_intel.h> |
50 | #include <asm/intel-family.h> | ||
51 | #include <asm/cpu_device_id.h> | ||
50 | 52 | ||
51 | #ifdef CONFIG_X86_LOCAL_APIC | 53 | #ifdef CONFIG_X86_LOCAL_APIC |
52 | #include <asm/uv/uv.h> | 54 | #include <asm/uv/uv.h> |
@@ -748,6 +750,26 @@ static void apply_forced_caps(struct cpuinfo_x86 *c) | |||
748 | } | 750 | } |
749 | } | 751 | } |
750 | 752 | ||
753 | static void init_speculation_control(struct cpuinfo_x86 *c) | ||
754 | { | ||
755 | /* | ||
756 | * The Intel SPEC_CTRL CPUID bit implies IBRS and IBPB support, | ||
757 | * and they also have a different bit for STIBP support. Also, | ||
758 | * a hypervisor might have set the individual AMD bits even on | ||
759 | * Intel CPUs, for finer-grained selection of what's available. | ||
760 | * | ||
761 | * We use the AMD bits in 0x8000_0008 EBX as the generic hardware | ||
762 | * features, which are visible in /proc/cpuinfo and used by the | ||
763 | * kernel. So set those accordingly from the Intel bits. | ||
764 | */ | ||
765 | if (cpu_has(c, X86_FEATURE_SPEC_CTRL)) { | ||
766 | set_cpu_cap(c, X86_FEATURE_IBRS); | ||
767 | set_cpu_cap(c, X86_FEATURE_IBPB); | ||
768 | } | ||
769 | if (cpu_has(c, X86_FEATURE_INTEL_STIBP)) | ||
770 | set_cpu_cap(c, X86_FEATURE_STIBP); | ||
771 | } | ||
772 | |||
751 | void get_cpu_cap(struct cpuinfo_x86 *c) | 773 | void get_cpu_cap(struct cpuinfo_x86 *c) |
752 | { | 774 | { |
753 | u32 eax, ebx, ecx, edx; | 775 | u32 eax, ebx, ecx, edx; |
@@ -769,6 +791,7 @@ void get_cpu_cap(struct cpuinfo_x86 *c) | |||
769 | cpuid_count(0x00000007, 0, &eax, &ebx, &ecx, &edx); | 791 | cpuid_count(0x00000007, 0, &eax, &ebx, &ecx, &edx); |
770 | c->x86_capability[CPUID_7_0_EBX] = ebx; | 792 | c->x86_capability[CPUID_7_0_EBX] = ebx; |
771 | c->x86_capability[CPUID_7_ECX] = ecx; | 793 | c->x86_capability[CPUID_7_ECX] = ecx; |
794 | c->x86_capability[CPUID_7_EDX] = edx; | ||
772 | } | 795 | } |
773 | 796 | ||
774 | /* Extended state features: level 0x0000000d */ | 797 | /* Extended state features: level 0x0000000d */ |
@@ -841,6 +864,7 @@ void get_cpu_cap(struct cpuinfo_x86 *c) | |||
841 | c->x86_capability[CPUID_8000_000A_EDX] = cpuid_edx(0x8000000a); | 864 | c->x86_capability[CPUID_8000_000A_EDX] = cpuid_edx(0x8000000a); |
842 | 865 | ||
843 | init_scattered_cpuid_features(c); | 866 | init_scattered_cpuid_features(c); |
867 | init_speculation_control(c); | ||
844 | 868 | ||
845 | /* | 869 | /* |
846 | * Clear/Set all flags overridden by options, after probe. | 870 | * Clear/Set all flags overridden by options, after probe. |
@@ -876,6 +900,41 @@ static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c) | |||
876 | #endif | 900 | #endif |
877 | } | 901 | } |
878 | 902 | ||
903 | static const __initconst struct x86_cpu_id cpu_no_speculation[] = { | ||
904 | { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_CEDARVIEW, X86_FEATURE_ANY }, | ||
905 | { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_CLOVERVIEW, X86_FEATURE_ANY }, | ||
906 | { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_LINCROFT, X86_FEATURE_ANY }, | ||
907 | { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_PENWELL, X86_FEATURE_ANY }, | ||
908 | { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_PINEVIEW, X86_FEATURE_ANY }, | ||
909 | { X86_VENDOR_CENTAUR, 5 }, | ||
910 | { X86_VENDOR_INTEL, 5 }, | ||
911 | { X86_VENDOR_NSC, 5 }, | ||
912 | { X86_VENDOR_ANY, 4 }, | ||
913 | {} | ||
914 | }; | ||
915 | |||
916 | static const __initconst struct x86_cpu_id cpu_no_meltdown[] = { | ||
917 | { X86_VENDOR_AMD }, | ||
918 | {} | ||
919 | }; | ||
920 | |||
921 | static bool __init cpu_vulnerable_to_meltdown(struct cpuinfo_x86 *c) | ||
922 | { | ||
923 | u64 ia32_cap = 0; | ||
924 | |||
925 | if (x86_match_cpu(cpu_no_meltdown)) | ||
926 | return false; | ||
927 | |||
928 | if (cpu_has(c, X86_FEATURE_ARCH_CAPABILITIES)) | ||
929 | rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap); | ||
930 | |||
931 | /* Rogue Data Cache Load? No! */ | ||
932 | if (ia32_cap & ARCH_CAP_RDCL_NO) | ||
933 | return false; | ||
934 | |||
935 | return true; | ||
936 | } | ||
937 | |||
879 | /* | 938 | /* |
880 | * Do minimum CPU detection early. | 939 | * Do minimum CPU detection early. |
881 | * Fields really needed: vendor, cpuid_level, family, model, mask, | 940 | * Fields really needed: vendor, cpuid_level, family, model, mask, |
@@ -923,11 +982,12 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c) | |||
923 | 982 | ||
924 | setup_force_cpu_cap(X86_FEATURE_ALWAYS); | 983 | setup_force_cpu_cap(X86_FEATURE_ALWAYS); |
925 | 984 | ||
926 | if (c->x86_vendor != X86_VENDOR_AMD) | 985 | if (!x86_match_cpu(cpu_no_speculation)) { |
927 | setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN); | 986 | if (cpu_vulnerable_to_meltdown(c)) |
928 | 987 | setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN); | |
929 | setup_force_cpu_bug(X86_BUG_SPECTRE_V1); | 988 | setup_force_cpu_bug(X86_BUG_SPECTRE_V1); |
930 | setup_force_cpu_bug(X86_BUG_SPECTRE_V2); | 989 | setup_force_cpu_bug(X86_BUG_SPECTRE_V2); |
990 | } | ||
931 | 991 | ||
932 | fpu__init_system(c); | 992 | fpu__init_system(c); |
933 | 993 | ||