aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJeremy Linton <jeremy.linton@arm.com>2019-04-15 17:21:27 -0400
committerWill Deacon <will.deacon@arm.com>2019-04-26 11:32:45 -0400
commitd42281b6e49510f078ace15a8ea10f71e6262581 (patch)
tree232ae720c361bc84f51245f188a3fb4a90389bc3
parentd2532e27b5638bb2e2dd52b80b7ea2ec65135377 (diff)
arm64: Always enable ssb vulnerability detection
Ensure we are always able to detect whether or not the CPU is affected by SSB, so that we can later advertise this to userspace. Signed-off-by: Jeremy Linton <jeremy.linton@arm.com> Reviewed-by: Andre Przywara <andre.przywara@arm.com> Reviewed-by: Catalin Marinas <catalin.marinas@arm.com> Tested-by: Stefan Wahren <stefan.wahren@i2se.com> [will: Use IS_ENABLED instead of #ifdef] Signed-off-by: Will Deacon <will.deacon@arm.com>
-rw-r--r--arch/arm64/include/asm/cpufeature.h4
-rw-r--r--arch/arm64/kernel/cpu_errata.c9
2 files changed, 5 insertions, 8 deletions
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index e505e1fbd2b9..6ccdc97e5d6a 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -638,11 +638,7 @@ static inline int arm64_get_ssbd_state(void)
638#endif 638#endif
639} 639}
640 640
641#ifdef CONFIG_ARM64_SSBD
642void arm64_set_ssbd_mitigation(bool state); 641void arm64_set_ssbd_mitigation(bool state);
643#else
644static inline void arm64_set_ssbd_mitigation(bool state) {}
645#endif
646 642
647extern int do_emulate_mrs(struct pt_regs *regs, u32 sys_reg, u32 rt); 643extern int do_emulate_mrs(struct pt_regs *regs, u32 sys_reg, u32 rt);
648 644
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
index d2bbafa04b3c..b6132783e8cf 100644
--- a/arch/arm64/kernel/cpu_errata.c
+++ b/arch/arm64/kernel/cpu_errata.c
@@ -275,7 +275,6 @@ static int detect_harden_bp_fw(void)
275 return 1; 275 return 1;
276} 276}
277 277
278#ifdef CONFIG_ARM64_SSBD
279DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required); 278DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
280 279
281int ssbd_state __read_mostly = ARM64_SSBD_KERNEL; 280int ssbd_state __read_mostly = ARM64_SSBD_KERNEL;
@@ -348,6 +347,11 @@ void __init arm64_enable_wa2_handling(struct alt_instr *alt,
348 347
349void arm64_set_ssbd_mitigation(bool state) 348void arm64_set_ssbd_mitigation(bool state)
350{ 349{
350 if (!IS_ENABLED(CONFIG_ARM64_SSBD)) {
351 pr_info_once("SSBD disabled by kernel configuration\n");
352 return;
353 }
354
351 if (this_cpu_has_cap(ARM64_SSBS)) { 355 if (this_cpu_has_cap(ARM64_SSBS)) {
352 if (state) 356 if (state)
353 asm volatile(SET_PSTATE_SSBS(0)); 357 asm volatile(SET_PSTATE_SSBS(0));
@@ -467,7 +471,6 @@ out_printmsg:
467 471
468 return required; 472 return required;
469} 473}
470#endif /* CONFIG_ARM64_SSBD */
471 474
472static void __maybe_unused 475static void __maybe_unused
473cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused) 476cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused)
@@ -759,14 +762,12 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
759 ERRATA_MIDR_RANGE_LIST(arm64_harden_el2_vectors), 762 ERRATA_MIDR_RANGE_LIST(arm64_harden_el2_vectors),
760 }, 763 },
761#endif 764#endif
762#ifdef CONFIG_ARM64_SSBD
763 { 765 {
764 .desc = "Speculative Store Bypass Disable", 766 .desc = "Speculative Store Bypass Disable",
765 .capability = ARM64_SSBD, 767 .capability = ARM64_SSBD,
766 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, 768 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
767 .matches = has_ssbd_mitigation, 769 .matches = has_ssbd_mitigation,
768 }, 770 },
769#endif
770#ifdef CONFIG_ARM64_ERRATUM_1188873 771#ifdef CONFIG_ARM64_ERRATUM_1188873
771 { 772 {
772 /* Cortex-A76 r0p0 to r2p0 */ 773 /* Cortex-A76 r0p0 to r2p0 */