aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJeremy Linton <jeremy.linton@arm.com>2019-04-15 17:21:28 -0400
committerWill Deacon <will.deacon@arm.com>2019-05-01 09:47:55 -0400
commit526e065dbca6df0b5a130b84b836b8b3c9f54e21 (patch)
tree300e37207d1293a08de24348aabaf40e5ee405e6
parentd42281b6e49510f078ace15a8ea10f71e6262581 (diff)
arm64: add sysfs vulnerability show for speculative store bypass
Return status based on ssbd_state and __ssb_safe. If the mitigation is disabled, or the firmware isn't responding then return the expected machine state based on a whitelist of known good cores. Given a heterogeneous machine, the overall machine vulnerability defaults to safe but is reset to unsafe when we miss the whitelist and the firmware doesn't explicitly tell us the core is safe. In order to make that work we delay transitioning to vulnerable until we know the firmware isn't responding to avoid a case where we miss the whitelist, but the firmware goes ahead and reports the core is not vulnerable. If all the cores in the machine have SSBS, then __ssb_safe will remain true. Tested-by: Stefan Wahren <stefan.wahren@i2se.com> Signed-off-by: Jeremy Linton <jeremy.linton@arm.com> Signed-off-by: Will Deacon <will.deacon@arm.com>
-rw-r--r--arch/arm64/kernel/cpu_errata.c42
1 files changed, 42 insertions, 0 deletions
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
index b6132783e8cf..4bb0f7cad418 100644
--- a/arch/arm64/kernel/cpu_errata.c
+++ b/arch/arm64/kernel/cpu_errata.c
@@ -278,6 +278,7 @@ static int detect_harden_bp_fw(void)
278DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required); 278DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
279 279
280int ssbd_state __read_mostly = ARM64_SSBD_KERNEL; 280int ssbd_state __read_mostly = ARM64_SSBD_KERNEL;
281static bool __ssb_safe = true;
281 282
282static const struct ssbd_options { 283static const struct ssbd_options {
283 const char *str; 284 const char *str;
@@ -381,6 +382,7 @@ static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
381 struct arm_smccc_res res; 382 struct arm_smccc_res res;
382 bool required = true; 383 bool required = true;
383 s32 val; 384 s32 val;
385 bool this_cpu_safe = false;
384 386
385 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); 387 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
386 388
@@ -389,8 +391,14 @@ static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
389 goto out_printmsg; 391 goto out_printmsg;
390 } 392 }
391 393
394 /* delay setting __ssb_safe until we get a firmware response */
395 if (is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list))
396 this_cpu_safe = true;
397
392 if (psci_ops.smccc_version == SMCCC_VERSION_1_0) { 398 if (psci_ops.smccc_version == SMCCC_VERSION_1_0) {
393 ssbd_state = ARM64_SSBD_UNKNOWN; 399 ssbd_state = ARM64_SSBD_UNKNOWN;
400 if (!this_cpu_safe)
401 __ssb_safe = false;
394 return false; 402 return false;
395 } 403 }
396 404
@@ -407,6 +415,8 @@ static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
407 415
408 default: 416 default:
409 ssbd_state = ARM64_SSBD_UNKNOWN; 417 ssbd_state = ARM64_SSBD_UNKNOWN;
418 if (!this_cpu_safe)
419 __ssb_safe = false;
410 return false; 420 return false;
411 } 421 }
412 422
@@ -415,14 +425,18 @@ static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
415 switch (val) { 425 switch (val) {
416 case SMCCC_RET_NOT_SUPPORTED: 426 case SMCCC_RET_NOT_SUPPORTED:
417 ssbd_state = ARM64_SSBD_UNKNOWN; 427 ssbd_state = ARM64_SSBD_UNKNOWN;
428 if (!this_cpu_safe)
429 __ssb_safe = false;
418 return false; 430 return false;
419 431
432 /* machines with mixed mitigation requirements must not return this */
420 case SMCCC_RET_NOT_REQUIRED: 433 case SMCCC_RET_NOT_REQUIRED:
421 pr_info_once("%s mitigation not required\n", entry->desc); 434 pr_info_once("%s mitigation not required\n", entry->desc);
422 ssbd_state = ARM64_SSBD_MITIGATED; 435 ssbd_state = ARM64_SSBD_MITIGATED;
423 return false; 436 return false;
424 437
425 case SMCCC_RET_SUCCESS: 438 case SMCCC_RET_SUCCESS:
439 __ssb_safe = false;
426 required = true; 440 required = true;
427 break; 441 break;
428 442
@@ -432,6 +446,8 @@ static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
432 446
433 default: 447 default:
434 WARN_ON(1); 448 WARN_ON(1);
449 if (!this_cpu_safe)
450 __ssb_safe = false;
435 return false; 451 return false;
436 } 452 }
437 453
@@ -472,6 +488,14 @@ out_printmsg:
472 return required; 488 return required;
473} 489}
474 490
491/* known invulnerable cores */
492static const struct midr_range arm64_ssb_cpus[] = {
493 MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
494 MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
495 MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
496 {},
497};
498
475static void __maybe_unused 499static void __maybe_unused
476cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused) 500cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused)
477{ 501{
@@ -767,6 +791,7 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
767 .capability = ARM64_SSBD, 791 .capability = ARM64_SSBD,
768 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, 792 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
769 .matches = has_ssbd_mitigation, 793 .matches = has_ssbd_mitigation,
794 .midr_range_list = arm64_ssb_cpus,
770 }, 795 },
771#ifdef CONFIG_ARM64_ERRATUM_1188873 796#ifdef CONFIG_ARM64_ERRATUM_1188873
772 { 797 {
@@ -805,3 +830,20 @@ ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr,
805 830
806 return sprintf(buf, "Vulnerable\n"); 831 return sprintf(buf, "Vulnerable\n");
807} 832}
833
834ssize_t cpu_show_spec_store_bypass(struct device *dev,
835 struct device_attribute *attr, char *buf)
836{
837 if (__ssb_safe)
838 return sprintf(buf, "Not affected\n");
839
840 switch (ssbd_state) {
841 case ARM64_SSBD_KERNEL:
842 case ARM64_SSBD_FORCE_ENABLE:
843 if (IS_ENABLED(CONFIG_ARM64_SSBD))
844 return sprintf(buf,
845 "Mitigation: Speculative Store Bypass disabled via prctl\n");
846 }
847
848 return sprintf(buf, "Vulnerable\n");
849}