aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRobin Murphy <robin.murphy@arm.com>2018-02-05 10:34:18 -0500
committerCatalin Marinas <catalin.marinas@arm.com>2018-02-06 17:53:32 -0500
commit51369e398d0d33e8f524314e672b07e8cf870e79 (patch)
tree9a30fc16dfef70bdc8547ef696ba6be7fa273c87
parent022620eed3d0bc4bf2027326f599f5ad71c2ea3f (diff)
arm64: Make USER_DS an inclusive limit
Currently, USER_DS represents an exclusive limit while KERNEL_DS is inclusive. In order to do some clever trickery for speculation-safe masking, we need them both to behave equivalently - there aren't enough bits to make KERNEL_DS exclusive, so we have precisely one option. This also happens to correct a longstanding false negative for a range ending on the very top byte of kernel memory. Mark Rutland points out that we've actually got the semantics of addresses vs. segments muddled up in most of the places we need to amend, so shuffle the {USER,KERNEL}_DS definitions around such that we can correct those properly instead of just pasting "-1"s everywhere. Signed-off-by: Robin Murphy <robin.murphy@arm.com> Signed-off-by: Will Deacon <will.deacon@arm.com> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
-rw-r--r--arch/arm64/include/asm/processor.h3
-rw-r--r--arch/arm64/include/asm/uaccess.h45
-rw-r--r--arch/arm64/kernel/entry.S4
-rw-r--r--arch/arm64/mm/fault.c4
4 files changed, 33 insertions, 23 deletions
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
index cee4ae25a5d1..dfefbf3fd9e3 100644
--- a/arch/arm64/include/asm/processor.h
+++ b/arch/arm64/include/asm/processor.h
@@ -21,6 +21,9 @@
21 21
22#define TASK_SIZE_64 (UL(1) << VA_BITS) 22#define TASK_SIZE_64 (UL(1) << VA_BITS)
23 23
24#define KERNEL_DS UL(-1)
25#define USER_DS (TASK_SIZE_64 - 1)
26
24#ifndef __ASSEMBLY__ 27#ifndef __ASSEMBLY__
25 28
26/* 29/*
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index 59fda5292936..f2fc026cffb4 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -35,10 +35,7 @@
35#include <asm/compiler.h> 35#include <asm/compiler.h>
36#include <asm/extable.h> 36#include <asm/extable.h>
37 37
38#define KERNEL_DS (-1UL)
39#define get_ds() (KERNEL_DS) 38#define get_ds() (KERNEL_DS)
40
41#define USER_DS TASK_SIZE_64
42#define get_fs() (current_thread_info()->addr_limit) 39#define get_fs() (current_thread_info()->addr_limit)
43 40
44static inline void set_fs(mm_segment_t fs) 41static inline void set_fs(mm_segment_t fs)
@@ -66,22 +63,32 @@ static inline void set_fs(mm_segment_t fs)
66 * Returns 1 if the range is valid, 0 otherwise. 63 * Returns 1 if the range is valid, 0 otherwise.
67 * 64 *
68 * This is equivalent to the following test: 65 * This is equivalent to the following test:
69 * (u65)addr + (u65)size <= current->addr_limit 66 * (u65)addr + (u65)size <= (u65)current->addr_limit + 1
70 *
71 * This needs 65-bit arithmetic.
72 */ 67 */
73#define __range_ok(addr, size) \ 68static inline unsigned long __range_ok(unsigned long addr, unsigned long size)
74({ \ 69{
75 unsigned long __addr = (unsigned long)(addr); \ 70 unsigned long limit = current_thread_info()->addr_limit;
76 unsigned long flag, roksum; \ 71
77 __chk_user_ptr(addr); \ 72 __chk_user_ptr(addr);
78 asm("adds %1, %1, %3; ccmp %1, %4, #2, cc; cset %0, ls" \ 73 asm volatile(
79 : "=&r" (flag), "=&r" (roksum) \ 74 // A + B <= C + 1 for all A,B,C, in four easy steps:
80 : "1" (__addr), "Ir" (size), \ 75 // 1: X = A + B; X' = X % 2^64
81 "r" (current_thread_info()->addr_limit) \ 76 " adds %0, %0, %2\n"
82 : "cc"); \ 77 // 2: Set C = 0 if X > 2^64, to guarantee X' > C in step 4
83 flag; \ 78 " csel %1, xzr, %1, hi\n"
84}) 79 // 3: Set X' = ~0 if X >= 2^64. For X == 2^64, this decrements X'
80 // to compensate for the carry flag being set in step 4. For
81 // X > 2^64, X' merely has to remain nonzero, which it does.
82 " csinv %0, %0, xzr, cc\n"
83 // 4: For X < 2^64, this gives us X' - C - 1 <= 0, where the -1
84 // comes from the carry in being clear. Otherwise, we are
85 // testing X' - C == 0, subject to the previous adjustments.
86 " sbcs xzr, %0, %1\n"
87 " cset %0, ls\n"
88 : "+r" (addr), "+r" (limit) : "Ir" (size) : "cc");
89
90 return addr;
91}
85 92
86/* 93/*
87 * When dealing with data aborts, watchpoints, or instruction traps we may end 94 * When dealing with data aborts, watchpoints, or instruction traps we may end
@@ -90,7 +97,7 @@ static inline void set_fs(mm_segment_t fs)
90 */ 97 */
91#define untagged_addr(addr) sign_extend64(addr, 55) 98#define untagged_addr(addr) sign_extend64(addr, 55)
92 99
93#define access_ok(type, addr, size) __range_ok(addr, size) 100#define access_ok(type, addr, size) __range_ok((unsigned long)(addr), size)
94#define user_addr_max get_fs 101#define user_addr_max get_fs
95 102
96#define _ASM_EXTABLE(from, to) \ 103#define _ASM_EXTABLE(from, to) \
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index 9b8635d9d7ae..17c15f741780 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -167,10 +167,10 @@ alternative_else_nop_endif
167 .else 167 .else
168 add x21, sp, #S_FRAME_SIZE 168 add x21, sp, #S_FRAME_SIZE
169 get_thread_info tsk 169 get_thread_info tsk
170 /* Save the task's original addr_limit and set USER_DS (TASK_SIZE_64) */ 170 /* Save the task's original addr_limit and set USER_DS */
171 ldr x20, [tsk, #TSK_TI_ADDR_LIMIT] 171 ldr x20, [tsk, #TSK_TI_ADDR_LIMIT]
172 str x20, [sp, #S_ORIG_ADDR_LIMIT] 172 str x20, [sp, #S_ORIG_ADDR_LIMIT]
173 mov x20, #TASK_SIZE_64 173 mov x20, #USER_DS
174 str x20, [tsk, #TSK_TI_ADDR_LIMIT] 174 str x20, [tsk, #TSK_TI_ADDR_LIMIT]
175 /* No need to reset PSTATE.UAO, hardware's already set it to 0 for us */ 175 /* No need to reset PSTATE.UAO, hardware's already set it to 0 for us */
176 .endif /* \el == 0 */ 176 .endif /* \el == 0 */
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 0e671ddf4855..af530eb9f2ed 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -240,7 +240,7 @@ static inline bool is_permission_fault(unsigned int esr, struct pt_regs *regs,
240 if (fsc_type == ESR_ELx_FSC_PERM) 240 if (fsc_type == ESR_ELx_FSC_PERM)
241 return true; 241 return true;
242 242
243 if (addr < USER_DS && system_uses_ttbr0_pan()) 243 if (addr < TASK_SIZE && system_uses_ttbr0_pan())
244 return fsc_type == ESR_ELx_FSC_FAULT && 244 return fsc_type == ESR_ELx_FSC_FAULT &&
245 (regs->pstate & PSR_PAN_BIT); 245 (regs->pstate & PSR_PAN_BIT);
246 246
@@ -414,7 +414,7 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
414 mm_flags |= FAULT_FLAG_WRITE; 414 mm_flags |= FAULT_FLAG_WRITE;
415 } 415 }
416 416
417 if (addr < USER_DS && is_permission_fault(esr, regs, addr)) { 417 if (addr < TASK_SIZE && is_permission_fault(esr, regs, addr)) {
418 /* regs->orig_addr_limit may be 0 if we entered from EL0 */ 418 /* regs->orig_addr_limit may be 0 if we entered from EL0 */
419 if (regs->orig_addr_limit == KERNEL_DS) 419 if (regs->orig_addr_limit == KERNEL_DS)
420 die("Accessing user space memory with fs=KERNEL_DS", regs, esr); 420 die("Accessing user space memory with fs=KERNEL_DS", regs, esr);