aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRussell King <rmk+kernel@armlinux.org.uk>2018-05-14 04:40:24 -0400
committerRussell King <rmk+kernel@armlinux.org.uk>2018-08-02 12:41:38 -0400
commita3c0f84765bb429ba0fd23de1c57b5e1591c9389 (patch)
tree993cc81b042f839d871122c5ed4bf3b1c5770d2f
parentb1cd0a14806321721aae45f5446ed83a3647c914 (diff)
ARM: spectre-v1: mitigate user accesses
Spectre variant 1 attacks are about this sequence of pseudo-code: index = load(user-manipulated pointer); access(base + index * stride); In order for the cache side-channel to work, the access() must me made to memory which userspace can detect whether cache lines have been loaded. On 32-bit ARM, this must be either user accessible memory, or a kernel mapping of that same user accessible memory. The problem occurs when the load() speculatively loads privileged data, and the subsequent access() is made to user accessible memory. Any load() which makes use of a user-maniplated pointer is a potential problem if the data it has loaded is used in a subsequent access. This also applies for the access() if the data loaded by that access is used by a subsequent access. Harden the get_user() accessors against Spectre attacks by forcing out of bounds addresses to a NULL pointer. This prevents get_user() being used as the load() step above. As a side effect, put_user() will also be affected even though it isn't implicated. Also harden copy_from_user() by redoing the bounds check within the arm_copy_from_user() code, and NULLing the pointer if out of bounds. Acked-by: Mark Rutland <mark.rutland@arm.com> Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk>
-rw-r--r--arch/arm/include/asm/assembler.h4
-rw-r--r--arch/arm/lib/copy_from_user.S9
2 files changed, 13 insertions, 0 deletions
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
index ef1386b1af9b..f0515f60cff5 100644
--- a/arch/arm/include/asm/assembler.h
+++ b/arch/arm/include/asm/assembler.h
@@ -460,6 +460,10 @@ THUMB( orr \reg , \reg , #PSR_T_BIT )
460 adds \tmp, \addr, #\size - 1 460 adds \tmp, \addr, #\size - 1
461 sbcccs \tmp, \tmp, \limit 461 sbcccs \tmp, \tmp, \limit
462 bcs \bad 462 bcs \bad
463#ifdef CONFIG_CPU_SPECTRE
464 movcs \addr, #0
465 csdb
466#endif
463#endif 467#endif
464 .endm 468 .endm
465 469
diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
index 7a4b06049001..a826df3d3814 100644
--- a/arch/arm/lib/copy_from_user.S
+++ b/arch/arm/lib/copy_from_user.S
@@ -90,6 +90,15 @@
90 .text 90 .text
91 91
92ENTRY(arm_copy_from_user) 92ENTRY(arm_copy_from_user)
93#ifdef CONFIG_CPU_SPECTRE
94 get_thread_info r3
95 ldr r3, [r3, #TI_ADDR_LIMIT]
96 adds ip, r1, r2 @ ip=addr+size
97 sub r3, r3, #1 @ addr_limit - 1
98 cmpcc ip, r3 @ if (addr+size > addr_limit - 1)
99 movcs r1, #0 @ addr = NULL
100 csdb
101#endif
93 102
94#include "copy_template.S" 103#include "copy_template.S"
95 104