aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristophe Leroy <christophe.leroy@c-s.fr>2019-04-18 02:51:20 -0400
committerMichael Ellerman <mpe@ellerman.id.au>2019-04-21 09:05:57 -0400
commitde78a9c42a790011f179bc94a7da3f5d8721f4cc (patch)
tree462ebea30f53f5f1e61513cbae38e40145ec030a
parent0fb1c25ab523614b056ace11be67aac8f8ccabb1 (diff)
powerpc: Add a framework for Kernel Userspace Access Protection
This patch implements a framework for Kernel Userspace Access Protection. Then subarches will have the possibility to provide their own implementation by providing setup_kuap() and allow/prevent_user_access(). Some platforms will need to know the area accessed and whether it is accessed from read, write or both. Therefore source, destination and size and handed over to the two functions. mpe: Rename to allow/prevent rather than unlock/lock, and add read/write wrappers. Drop the 32-bit code for now until we have an implementation for it. Add kuap to pt_regs for 64-bit as well as 32-bit. Don't split strings, use pr_crit_ratelimited(). Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr> Signed-off-by: Russell Currey <ruscur@russell.cc> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt2
-rw-r--r--arch/powerpc/include/asm/futex.h4
-rw-r--r--arch/powerpc/include/asm/kup.h32
-rw-r--r--arch/powerpc/include/asm/ptrace.h11
-rw-r--r--arch/powerpc/include/asm/uaccess.h38
-rw-r--r--arch/powerpc/kernel/asm-offsets.c4
-rw-r--r--arch/powerpc/lib/checksum_wrappers.c4
-rw-r--r--arch/powerpc/mm/fault.c19
-rw-r--r--arch/powerpc/mm/init-common.c10
-rw-r--r--arch/powerpc/platforms/Kconfig.cputype12
10 files changed, 121 insertions, 15 deletions
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index a53df74589e5..c45a19d654f3 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -2839,7 +2839,7 @@
2839 noexec=on: enable non-executable mappings (default) 2839 noexec=on: enable non-executable mappings (default)
2840 noexec=off: disable non-executable mappings 2840 noexec=off: disable non-executable mappings
2841 2841
2842 nosmap [X86] 2842 nosmap [X86,PPC]
2843 Disable SMAP (Supervisor Mode Access Prevention) 2843 Disable SMAP (Supervisor Mode Access Prevention)
2844 even if it is supported by processor. 2844 even if it is supported by processor.
2845 2845
diff --git a/arch/powerpc/include/asm/futex.h b/arch/powerpc/include/asm/futex.h
index 88b38b37c21b..3a6aa57b9d90 100644
--- a/arch/powerpc/include/asm/futex.h
+++ b/arch/powerpc/include/asm/futex.h
@@ -35,6 +35,7 @@ static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
35{ 35{
36 int oldval = 0, ret; 36 int oldval = 0, ret;
37 37
38 allow_write_to_user(uaddr, sizeof(*uaddr));
38 pagefault_disable(); 39 pagefault_disable();
39 40
40 switch (op) { 41 switch (op) {
@@ -62,6 +63,7 @@ static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
62 if (!ret) 63 if (!ret)
63 *oval = oldval; 64 *oval = oldval;
64 65
66 prevent_write_to_user(uaddr, sizeof(*uaddr));
65 return ret; 67 return ret;
66} 68}
67 69
@@ -75,6 +77,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
75 if (!access_ok(uaddr, sizeof(u32))) 77 if (!access_ok(uaddr, sizeof(u32)))
76 return -EFAULT; 78 return -EFAULT;
77 79
80 allow_write_to_user(uaddr, sizeof(*uaddr));
78 __asm__ __volatile__ ( 81 __asm__ __volatile__ (
79 PPC_ATOMIC_ENTRY_BARRIER 82 PPC_ATOMIC_ENTRY_BARRIER
80"1: lwarx %1,0,%3 # futex_atomic_cmpxchg_inatomic\n\ 83"1: lwarx %1,0,%3 # futex_atomic_cmpxchg_inatomic\n\
@@ -95,6 +98,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
95 : "cc", "memory"); 98 : "cc", "memory");
96 99
97 *uval = prev; 100 *uval = prev;
101 prevent_write_to_user(uaddr, sizeof(*uaddr));
98 return ret; 102 return ret;
99} 103}
100 104
diff --git a/arch/powerpc/include/asm/kup.h b/arch/powerpc/include/asm/kup.h
index a2a959cb4e36..4d78b9d8c99c 100644
--- a/arch/powerpc/include/asm/kup.h
+++ b/arch/powerpc/include/asm/kup.h
@@ -4,6 +4,8 @@
4 4
5#ifndef __ASSEMBLY__ 5#ifndef __ASSEMBLY__
6 6
7#include <asm/pgtable.h>
8
7void setup_kup(void); 9void setup_kup(void);
8 10
9#ifdef CONFIG_PPC_KUEP 11#ifdef CONFIG_PPC_KUEP
@@ -12,6 +14,36 @@ void setup_kuep(bool disabled);
12static inline void setup_kuep(bool disabled) { } 14static inline void setup_kuep(bool disabled) { }
13#endif /* CONFIG_PPC_KUEP */ 15#endif /* CONFIG_PPC_KUEP */
14 16
17#ifdef CONFIG_PPC_KUAP
18void setup_kuap(bool disabled);
19#else
20static inline void setup_kuap(bool disabled) { }
21static inline void allow_user_access(void __user *to, const void __user *from,
22 unsigned long size) { }
23static inline void prevent_user_access(void __user *to, const void __user *from,
24 unsigned long size) { }
25#endif /* CONFIG_PPC_KUAP */
26
27static inline void allow_read_from_user(const void __user *from, unsigned long size)
28{
29 allow_user_access(NULL, from, size);
30}
31
32static inline void allow_write_to_user(void __user *to, unsigned long size)
33{
34 allow_user_access(to, NULL, size);
35}
36
37static inline void prevent_read_from_user(const void __user *from, unsigned long size)
38{
39 prevent_user_access(NULL, from, size);
40}
41
42static inline void prevent_write_to_user(void __user *to, unsigned long size)
43{
44 prevent_user_access(to, NULL, size);
45}
46
15#endif /* !__ASSEMBLY__ */ 47#endif /* !__ASSEMBLY__ */
16 48
17#endif /* _ASM_POWERPC_KUP_H_ */ 49#endif /* _ASM_POWERPC_KUP_H_ */
diff --git a/arch/powerpc/include/asm/ptrace.h b/arch/powerpc/include/asm/ptrace.h
index 64271e562fed..6f047730e642 100644
--- a/arch/powerpc/include/asm/ptrace.h
+++ b/arch/powerpc/include/asm/ptrace.h
@@ -52,10 +52,17 @@ struct pt_regs
52 }; 52 };
53 }; 53 };
54 54
55 union {
56 struct {
55#ifdef CONFIG_PPC64 57#ifdef CONFIG_PPC64
56 unsigned long ppr; 58 unsigned long ppr;
57 unsigned long __pad; /* Maintain 16 byte interrupt stack alignment */ 59#endif
60#ifdef CONFIG_PPC_KUAP
61 unsigned long kuap;
58#endif 62#endif
63 };
64 unsigned long __pad[2]; /* Maintain 16 byte interrupt stack alignment */
65 };
59}; 66};
60#endif 67#endif
61 68
diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
index 4d6d905e9138..76f34346b642 100644
--- a/arch/powerpc/include/asm/uaccess.h
+++ b/arch/powerpc/include/asm/uaccess.h
@@ -6,6 +6,7 @@
6#include <asm/processor.h> 6#include <asm/processor.h>
7#include <asm/page.h> 7#include <asm/page.h>
8#include <asm/extable.h> 8#include <asm/extable.h>
9#include <asm/kup.h>
9 10
10/* 11/*
11 * The fs value determines whether argument validity checking should be 12 * The fs value determines whether argument validity checking should be
@@ -140,6 +141,7 @@ extern long __put_user_bad(void);
140#define __put_user_size(x, ptr, size, retval) \ 141#define __put_user_size(x, ptr, size, retval) \
141do { \ 142do { \
142 retval = 0; \ 143 retval = 0; \
144 allow_write_to_user(ptr, size); \
143 switch (size) { \ 145 switch (size) { \
144 case 1: __put_user_asm(x, ptr, retval, "stb"); break; \ 146 case 1: __put_user_asm(x, ptr, retval, "stb"); break; \
145 case 2: __put_user_asm(x, ptr, retval, "sth"); break; \ 147 case 2: __put_user_asm(x, ptr, retval, "sth"); break; \
@@ -147,6 +149,7 @@ do { \
147 case 8: __put_user_asm2(x, ptr, retval); break; \ 149 case 8: __put_user_asm2(x, ptr, retval); break; \
148 default: __put_user_bad(); \ 150 default: __put_user_bad(); \
149 } \ 151 } \
152 prevent_write_to_user(ptr, size); \
150} while (0) 153} while (0)
151 154
152#define __put_user_nocheck(x, ptr, size) \ 155#define __put_user_nocheck(x, ptr, size) \
@@ -239,6 +242,7 @@ do { \
239 __chk_user_ptr(ptr); \ 242 __chk_user_ptr(ptr); \
240 if (size > sizeof(x)) \ 243 if (size > sizeof(x)) \
241 (x) = __get_user_bad(); \ 244 (x) = __get_user_bad(); \
245 allow_read_from_user(ptr, size); \
242 switch (size) { \ 246 switch (size) { \
243 case 1: __get_user_asm(x, ptr, retval, "lbz"); break; \ 247 case 1: __get_user_asm(x, ptr, retval, "lbz"); break; \
244 case 2: __get_user_asm(x, ptr, retval, "lhz"); break; \ 248 case 2: __get_user_asm(x, ptr, retval, "lhz"); break; \
@@ -246,6 +250,7 @@ do { \
246 case 8: __get_user_asm2(x, ptr, retval); break; \ 250 case 8: __get_user_asm2(x, ptr, retval); break; \
247 default: (x) = __get_user_bad(); \ 251 default: (x) = __get_user_bad(); \
248 } \ 252 } \
253 prevent_read_from_user(ptr, size); \
249} while (0) 254} while (0)
250 255
251/* 256/*
@@ -305,15 +310,21 @@ extern unsigned long __copy_tofrom_user(void __user *to,
305static inline unsigned long 310static inline unsigned long
306raw_copy_in_user(void __user *to, const void __user *from, unsigned long n) 311raw_copy_in_user(void __user *to, const void __user *from, unsigned long n)
307{ 312{
308 return __copy_tofrom_user(to, from, n); 313 unsigned long ret;
314
315 allow_user_access(to, from, n);
316 ret = __copy_tofrom_user(to, from, n);
317 prevent_user_access(to, from, n);
318 return ret;
309} 319}
310#endif /* __powerpc64__ */ 320#endif /* __powerpc64__ */
311 321
312static inline unsigned long raw_copy_from_user(void *to, 322static inline unsigned long raw_copy_from_user(void *to,
313 const void __user *from, unsigned long n) 323 const void __user *from, unsigned long n)
314{ 324{
325 unsigned long ret;
315 if (__builtin_constant_p(n) && (n <= 8)) { 326 if (__builtin_constant_p(n) && (n <= 8)) {
316 unsigned long ret = 1; 327 ret = 1;
317 328
318 switch (n) { 329 switch (n) {
319 case 1: 330 case 1:
@@ -338,14 +349,18 @@ static inline unsigned long raw_copy_from_user(void *to,
338 } 349 }
339 350
340 barrier_nospec(); 351 barrier_nospec();
341 return __copy_tofrom_user((__force void __user *)to, from, n); 352 allow_read_from_user(from, n);
353 ret = __copy_tofrom_user((__force void __user *)to, from, n);
354 prevent_read_from_user(from, n);
355 return ret;
342} 356}
343 357
344static inline unsigned long raw_copy_to_user(void __user *to, 358static inline unsigned long raw_copy_to_user(void __user *to,
345 const void *from, unsigned long n) 359 const void *from, unsigned long n)
346{ 360{
361 unsigned long ret;
347 if (__builtin_constant_p(n) && (n <= 8)) { 362 if (__builtin_constant_p(n) && (n <= 8)) {
348 unsigned long ret = 1; 363 ret = 1;
349 364
350 switch (n) { 365 switch (n) {
351 case 1: 366 case 1:
@@ -365,17 +380,24 @@ static inline unsigned long raw_copy_to_user(void __user *to,
365 return 0; 380 return 0;
366 } 381 }
367 382
368 return __copy_tofrom_user(to, (__force const void __user *)from, n); 383 allow_write_to_user(to, n);
384 ret = __copy_tofrom_user(to, (__force const void __user *)from, n);
385 prevent_write_to_user(to, n);
386 return ret;
369} 387}
370 388
371extern unsigned long __clear_user(void __user *addr, unsigned long size); 389extern unsigned long __clear_user(void __user *addr, unsigned long size);
372 390
373static inline unsigned long clear_user(void __user *addr, unsigned long size) 391static inline unsigned long clear_user(void __user *addr, unsigned long size)
374{ 392{
393 unsigned long ret = size;
375 might_fault(); 394 might_fault();
376 if (likely(access_ok(addr, size))) 395 if (likely(access_ok(addr, size))) {
377 return __clear_user(addr, size); 396 allow_write_to_user(addr, size);
378 return size; 397 ret = __clear_user(addr, size);
398 prevent_write_to_user(addr, size);
399 }
400 return ret;
379} 401}
380 402
381extern long strncpy_from_user(char *dst, const char __user *src, long count); 403extern long strncpy_from_user(char *dst, const char __user *src, long count);
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 86a61e5f8285..66202e02fee2 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -332,6 +332,10 @@ int main(void)
332 STACK_PT_REGS_OFFSET(_PPR, ppr); 332 STACK_PT_REGS_OFFSET(_PPR, ppr);
333#endif /* CONFIG_PPC64 */ 333#endif /* CONFIG_PPC64 */
334 334
335#ifdef CONFIG_PPC_KUAP
336 STACK_PT_REGS_OFFSET(STACK_REGS_KUAP, kuap);
337#endif
338
335#if defined(CONFIG_PPC32) 339#if defined(CONFIG_PPC32)
336#if defined(CONFIG_BOOKE) || defined(CONFIG_40x) 340#if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
337 DEFINE(EXC_LVL_SIZE, STACK_EXC_LVL_FRAME_SIZE); 341 DEFINE(EXC_LVL_SIZE, STACK_EXC_LVL_FRAME_SIZE);
diff --git a/arch/powerpc/lib/checksum_wrappers.c b/arch/powerpc/lib/checksum_wrappers.c
index 890d4ddd91d6..bb9307ce2440 100644
--- a/arch/powerpc/lib/checksum_wrappers.c
+++ b/arch/powerpc/lib/checksum_wrappers.c
@@ -29,6 +29,7 @@ __wsum csum_and_copy_from_user(const void __user *src, void *dst,
29 unsigned int csum; 29 unsigned int csum;
30 30
31 might_sleep(); 31 might_sleep();
32 allow_read_from_user(src, len);
32 33
33 *err_ptr = 0; 34 *err_ptr = 0;
34 35
@@ -60,6 +61,7 @@ __wsum csum_and_copy_from_user(const void __user *src, void *dst,
60 } 61 }
61 62
62out: 63out:
64 prevent_read_from_user(src, len);
63 return (__force __wsum)csum; 65 return (__force __wsum)csum;
64} 66}
65EXPORT_SYMBOL(csum_and_copy_from_user); 67EXPORT_SYMBOL(csum_and_copy_from_user);
@@ -70,6 +72,7 @@ __wsum csum_and_copy_to_user(const void *src, void __user *dst, int len,
70 unsigned int csum; 72 unsigned int csum;
71 73
72 might_sleep(); 74 might_sleep();
75 allow_write_to_user(dst, len);
73 76
74 *err_ptr = 0; 77 *err_ptr = 0;
75 78
@@ -97,6 +100,7 @@ __wsum csum_and_copy_to_user(const void *src, void __user *dst, int len,
97 } 100 }
98 101
99out: 102out:
103 prevent_write_to_user(dst, len);
100 return (__force __wsum)csum; 104 return (__force __wsum)csum;
101} 105}
102EXPORT_SYMBOL(csum_and_copy_to_user); 106EXPORT_SYMBOL(csum_and_copy_to_user);
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index 3384354abc1d..463d1e9d026e 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -223,9 +223,11 @@ static int mm_fault_error(struct pt_regs *regs, unsigned long addr,
223} 223}
224 224
225/* Is this a bad kernel fault ? */ 225/* Is this a bad kernel fault ? */
226static bool bad_kernel_fault(bool is_exec, unsigned long error_code, 226static bool bad_kernel_fault(struct pt_regs *regs, unsigned long error_code,
227 unsigned long address) 227 unsigned long address)
228{ 228{
229 int is_exec = TRAP(regs) == 0x400;
230
229 /* NX faults set DSISR_PROTFAULT on the 8xx, DSISR_NOEXEC_OR_G on others */ 231 /* NX faults set DSISR_PROTFAULT on the 8xx, DSISR_NOEXEC_OR_G on others */
230 if (is_exec && (error_code & (DSISR_NOEXEC_OR_G | DSISR_KEYFAULT | 232 if (is_exec && (error_code & (DSISR_NOEXEC_OR_G | DSISR_KEYFAULT |
231 DSISR_PROTFAULT))) { 233 DSISR_PROTFAULT))) {
@@ -234,7 +236,15 @@ static bool bad_kernel_fault(bool is_exec, unsigned long error_code,
234 address, 236 address,
235 from_kuid(&init_user_ns, current_uid())); 237 from_kuid(&init_user_ns, current_uid()));
236 } 238 }
237 return is_exec || (address >= TASK_SIZE); 239
240 if (!is_exec && address < TASK_SIZE && (error_code & DSISR_PROTFAULT) &&
241 !search_exception_tables(regs->nip)) {
242 pr_crit_ratelimited("Kernel attempted to access user page (%lx) - exploit attempt? (uid: %d)\n",
243 address,
244 from_kuid(&init_user_ns, current_uid()));
245 }
246
247 return is_exec || (address >= TASK_SIZE) || !search_exception_tables(regs->nip);
238} 248}
239 249
240static bool bad_stack_expansion(struct pt_regs *regs, unsigned long address, 250static bool bad_stack_expansion(struct pt_regs *regs, unsigned long address,
@@ -454,9 +464,10 @@ static int __do_page_fault(struct pt_regs *regs, unsigned long address,
454 464
455 /* 465 /*
456 * The kernel should never take an execute fault nor should it 466 * The kernel should never take an execute fault nor should it
457 * take a page fault to a kernel address. 467 * take a page fault to a kernel address or a page fault to a user
468 * address outside of dedicated places
458 */ 469 */
459 if (unlikely(!is_user && bad_kernel_fault(is_exec, error_code, address))) 470 if (unlikely(!is_user && bad_kernel_fault(regs, error_code, address)))
460 return SIGSEGV; 471 return SIGSEGV;
461 472
462 /* 473 /*
diff --git a/arch/powerpc/mm/init-common.c b/arch/powerpc/mm/init-common.c
index 83f95a5565d6..ecaedfff9992 100644
--- a/arch/powerpc/mm/init-common.c
+++ b/arch/powerpc/mm/init-common.c
@@ -27,6 +27,7 @@
27#include <asm/kup.h> 27#include <asm/kup.h>
28 28
29static bool disable_kuep = !IS_ENABLED(CONFIG_PPC_KUEP); 29static bool disable_kuep = !IS_ENABLED(CONFIG_PPC_KUEP);
30static bool disable_kuap = !IS_ENABLED(CONFIG_PPC_KUAP);
30 31
31static int __init parse_nosmep(char *p) 32static int __init parse_nosmep(char *p)
32{ 33{
@@ -36,9 +37,18 @@ static int __init parse_nosmep(char *p)
36} 37}
37early_param("nosmep", parse_nosmep); 38early_param("nosmep", parse_nosmep);
38 39
40static int __init parse_nosmap(char *p)
41{
42 disable_kuap = true;
43 pr_warn("Disabling Kernel Userspace Access Protection\n");
44 return 0;
45}
46early_param("nosmap", parse_nosmap);
47
39void __init setup_kup(void) 48void __init setup_kup(void)
40{ 49{
41 setup_kuep(disable_kuep); 50 setup_kuep(disable_kuep);
51 setup_kuap(disable_kuap);
42} 52}
43 53
44#define CTOR(shift) static void ctor_##shift(void *addr) \ 54#define CTOR(shift) static void ctor_##shift(void *addr) \
diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype
index 7d30bbbaa3c1..457fc3a5ed93 100644
--- a/arch/powerpc/platforms/Kconfig.cputype
+++ b/arch/powerpc/platforms/Kconfig.cputype
@@ -357,6 +357,18 @@ config PPC_KUEP
357 357
358 If you're unsure, say Y. 358 If you're unsure, say Y.
359 359
360config PPC_HAVE_KUAP
361 bool
362
363config PPC_KUAP
364 bool "Kernel Userspace Access Protection"
365 depends on PPC_HAVE_KUAP
366 default y
367 help
368 Enable support for Kernel Userspace Access Protection (KUAP)
369
370 If you're unsure, say Y.
371
360config ARCH_ENABLE_HUGEPAGE_MIGRATION 372config ARCH_ENABLE_HUGEPAGE_MIGRATION
361 def_bool y 373 def_bool y
362 depends on PPC_BOOK3S_64 && HUGETLB_PAGE && MIGRATION 374 depends on PPC_BOOK3S_64 && HUGETLB_PAGE && MIGRATION