summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/Kconfig9
-rw-r--r--arch/arm/Kconfig1
-rw-r--r--arch/arm/include/asm/uaccess.h11
-rw-r--r--arch/arm64/Kconfig1
-rw-r--r--arch/arm64/include/asm/uaccess.h15
-rw-r--r--arch/ia64/Kconfig1
-rw-r--r--arch/ia64/include/asm/uaccess.h18
-rw-r--r--arch/powerpc/Kconfig1
-rw-r--r--arch/powerpc/include/asm/uaccess.h21
-rw-r--r--arch/s390/Kconfig1
-rw-r--r--arch/s390/lib/uaccess.c2
-rw-r--r--arch/sparc/Kconfig1
-rw-r--r--arch/sparc/include/asm/uaccess_32.h14
-rw-r--r--arch/sparc/include/asm/uaccess_64.h11
-rw-r--r--arch/x86/Kconfig2
-rw-r--r--arch/x86/include/asm/thread_info.h44
-rw-r--r--arch/x86/include/asm/uaccess.h10
-rw-r--r--arch/x86/include/asm/uaccess_32.h2
-rw-r--r--arch/x86/include/asm/uaccess_64.h2
-rw-r--r--include/linux/mmzone.h2
-rw-r--r--include/linux/slab.h12
-rw-r--r--include/linux/thread_info.h24
-rw-r--r--init/Kconfig2
-rw-r--r--mm/Makefile4
-rw-r--r--mm/slab.c30
-rw-r--r--mm/slub.c40
-rw-r--r--mm/usercopy.c268
-rw-r--r--security/Kconfig28
28 files changed, 555 insertions, 22 deletions
diff --git a/arch/Kconfig b/arch/Kconfig
index bd8056b5b246..e9c9334507dd 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -461,6 +461,15 @@ config CC_STACKPROTECTOR_STRONG
461 461
462endchoice 462endchoice
463 463
464config HAVE_ARCH_WITHIN_STACK_FRAMES
465 bool
466 help
467 An architecture should select this if it can walk the kernel stack
468 frames to determine if an object is part of either the arguments
469 or local variables (i.e. that it excludes saved return addresses,
470 and similar) by implementing an inline arch_within_stack_frames(),
471 which is used by CONFIG_HARDENED_USERCOPY.
472
464config HAVE_CONTEXT_TRACKING 473config HAVE_CONTEXT_TRACKING
465 bool 474 bool
466 help 475 help
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 2d601d769a1c..a9c4e48bb7ec 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -35,6 +35,7 @@ config ARM
35 select HARDIRQS_SW_RESEND 35 select HARDIRQS_SW_RESEND
36 select HAVE_ARCH_AUDITSYSCALL if (AEABI && !OABI_COMPAT) 36 select HAVE_ARCH_AUDITSYSCALL if (AEABI && !OABI_COMPAT)
37 select HAVE_ARCH_BITREVERSE if (CPU_32v7M || CPU_32v7) && !CPU_32v6 37 select HAVE_ARCH_BITREVERSE if (CPU_32v7M || CPU_32v7) && !CPU_32v6
38 select HAVE_ARCH_HARDENED_USERCOPY
38 select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL && !CPU_ENDIAN_BE32 && MMU 39 select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL && !CPU_ENDIAN_BE32 && MMU
39 select HAVE_ARCH_KGDB if !CPU_ENDIAN_BE32 && MMU 40 select HAVE_ARCH_KGDB if !CPU_ENDIAN_BE32 && MMU
40 select HAVE_ARCH_MMAP_RND_BITS if MMU 41 select HAVE_ARCH_MMAP_RND_BITS if MMU
diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
index 62a6f65029e6..a93c0f99acf7 100644
--- a/arch/arm/include/asm/uaccess.h
+++ b/arch/arm/include/asm/uaccess.h
@@ -480,7 +480,10 @@ arm_copy_from_user(void *to, const void __user *from, unsigned long n);
480static inline unsigned long __must_check 480static inline unsigned long __must_check
481__copy_from_user(void *to, const void __user *from, unsigned long n) 481__copy_from_user(void *to, const void __user *from, unsigned long n)
482{ 482{
483 unsigned int __ua_flags = uaccess_save_and_enable(); 483 unsigned int __ua_flags;
484
485 check_object_size(to, n, false);
486 __ua_flags = uaccess_save_and_enable();
484 n = arm_copy_from_user(to, from, n); 487 n = arm_copy_from_user(to, from, n);
485 uaccess_restore(__ua_flags); 488 uaccess_restore(__ua_flags);
486 return n; 489 return n;
@@ -495,11 +498,15 @@ static inline unsigned long __must_check
495__copy_to_user(void __user *to, const void *from, unsigned long n) 498__copy_to_user(void __user *to, const void *from, unsigned long n)
496{ 499{
497#ifndef CONFIG_UACCESS_WITH_MEMCPY 500#ifndef CONFIG_UACCESS_WITH_MEMCPY
498 unsigned int __ua_flags = uaccess_save_and_enable(); 501 unsigned int __ua_flags;
502
503 check_object_size(from, n, true);
504 __ua_flags = uaccess_save_and_enable();
499 n = arm_copy_to_user(to, from, n); 505 n = arm_copy_to_user(to, from, n);
500 uaccess_restore(__ua_flags); 506 uaccess_restore(__ua_flags);
501 return n; 507 return n;
502#else 508#else
509 check_object_size(from, n, true);
503 return arm_copy_to_user(to, from, n); 510 return arm_copy_to_user(to, from, n);
504#endif 511#endif
505} 512}
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 69c8787bec7d..bc3f00f586f1 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -54,6 +54,7 @@ config ARM64
54 select HAVE_ALIGNED_STRUCT_PAGE if SLUB 54 select HAVE_ALIGNED_STRUCT_PAGE if SLUB
55 select HAVE_ARCH_AUDITSYSCALL 55 select HAVE_ARCH_AUDITSYSCALL
56 select HAVE_ARCH_BITREVERSE 56 select HAVE_ARCH_BITREVERSE
57 select HAVE_ARCH_HARDENED_USERCOPY
57 select HAVE_ARCH_HUGE_VMAP 58 select HAVE_ARCH_HUGE_VMAP
58 select HAVE_ARCH_JUMP_LABEL 59 select HAVE_ARCH_JUMP_LABEL
59 select HAVE_ARCH_KASAN if SPARSEMEM_VMEMMAP && !(ARM64_16K_PAGES && ARM64_VA_BITS_48) 60 select HAVE_ARCH_KASAN if SPARSEMEM_VMEMMAP && !(ARM64_16K_PAGES && ARM64_VA_BITS_48)
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index 5e834d10b291..c47257c91b77 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -265,22 +265,25 @@ extern unsigned long __must_check __clear_user(void __user *addr, unsigned long
265static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n) 265static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
266{ 266{
267 kasan_check_write(to, n); 267 kasan_check_write(to, n);
268 return __arch_copy_from_user(to, from, n); 268 check_object_size(to, n, false);
269 return __arch_copy_from_user(to, from, n);
269} 270}
270 271
271static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n) 272static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
272{ 273{
273 kasan_check_read(from, n); 274 kasan_check_read(from, n);
274 return __arch_copy_to_user(to, from, n); 275 check_object_size(from, n, true);
276 return __arch_copy_to_user(to, from, n);
275} 277}
276 278
277static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n) 279static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
278{ 280{
279 kasan_check_write(to, n); 281 kasan_check_write(to, n);
280 282
281 if (access_ok(VERIFY_READ, from, n)) 283 if (access_ok(VERIFY_READ, from, n)) {
284 check_object_size(to, n, false);
282 n = __arch_copy_from_user(to, from, n); 285 n = __arch_copy_from_user(to, from, n);
283 else /* security hole - plug it */ 286 } else /* security hole - plug it */
284 memset(to, 0, n); 287 memset(to, 0, n);
285 return n; 288 return n;
286} 289}
@@ -289,8 +292,10 @@ static inline unsigned long __must_check copy_to_user(void __user *to, const voi
289{ 292{
290 kasan_check_read(from, n); 293 kasan_check_read(from, n);
291 294
292 if (access_ok(VERIFY_WRITE, to, n)) 295 if (access_ok(VERIFY_WRITE, to, n)) {
296 check_object_size(from, n, true);
293 n = __arch_copy_to_user(to, from, n); 297 n = __arch_copy_to_user(to, from, n);
298 }
294 return n; 299 return n;
295} 300}
296 301
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 6a15083cc366..18ca6a9ce566 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -52,6 +52,7 @@ config IA64
52 select MODULES_USE_ELF_RELA 52 select MODULES_USE_ELF_RELA
53 select ARCH_USE_CMPXCHG_LOCKREF 53 select ARCH_USE_CMPXCHG_LOCKREF
54 select HAVE_ARCH_AUDITSYSCALL 54 select HAVE_ARCH_AUDITSYSCALL
55 select HAVE_ARCH_HARDENED_USERCOPY
55 default y 56 default y
56 help 57 help
57 The Itanium Processor Family is Intel's 64-bit successor to 58 The Itanium Processor Family is Intel's 64-bit successor to
diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
index 2189d5ddc1ee..465c70982f40 100644
--- a/arch/ia64/include/asm/uaccess.h
+++ b/arch/ia64/include/asm/uaccess.h
@@ -241,12 +241,18 @@ extern unsigned long __must_check __copy_user (void __user *to, const void __use
241static inline unsigned long 241static inline unsigned long
242__copy_to_user (void __user *to, const void *from, unsigned long count) 242__copy_to_user (void __user *to, const void *from, unsigned long count)
243{ 243{
244 if (!__builtin_constant_p(count))
245 check_object_size(from, count, true);
246
244 return __copy_user(to, (__force void __user *) from, count); 247 return __copy_user(to, (__force void __user *) from, count);
245} 248}
246 249
247static inline unsigned long 250static inline unsigned long
248__copy_from_user (void *to, const void __user *from, unsigned long count) 251__copy_from_user (void *to, const void __user *from, unsigned long count)
249{ 252{
253 if (!__builtin_constant_p(count))
254 check_object_size(to, count, false);
255
250 return __copy_user((__force void __user *) to, from, count); 256 return __copy_user((__force void __user *) to, from, count);
251} 257}
252 258
@@ -258,8 +264,11 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
258 const void *__cu_from = (from); \ 264 const void *__cu_from = (from); \
259 long __cu_len = (n); \ 265 long __cu_len = (n); \
260 \ 266 \
261 if (__access_ok(__cu_to, __cu_len, get_fs())) \ 267 if (__access_ok(__cu_to, __cu_len, get_fs())) { \
262 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \ 268 if (!__builtin_constant_p(n)) \
269 check_object_size(__cu_from, __cu_len, true); \
270 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
271 } \
263 __cu_len; \ 272 __cu_len; \
264}) 273})
265 274
@@ -270,8 +279,11 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
270 long __cu_len = (n); \ 279 long __cu_len = (n); \
271 \ 280 \
272 __chk_user_ptr(__cu_from); \ 281 __chk_user_ptr(__cu_from); \
273 if (__access_ok(__cu_from, __cu_len, get_fs())) \ 282 if (__access_ok(__cu_from, __cu_len, get_fs())) { \
283 if (!__builtin_constant_p(n)) \
284 check_object_size(__cu_to, __cu_len, false); \
274 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \ 285 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
286 } \
275 __cu_len; \ 287 __cu_len; \
276}) 288})
277 289
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index ec4047e170a0..927d2ab2ce08 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -166,6 +166,7 @@ config PPC
166 select HAVE_LIVEPATCH if HAVE_DYNAMIC_FTRACE_WITH_REGS 166 select HAVE_LIVEPATCH if HAVE_DYNAMIC_FTRACE_WITH_REGS
167 select GENERIC_CPU_AUTOPROBE 167 select GENERIC_CPU_AUTOPROBE
168 select HAVE_VIRT_CPU_ACCOUNTING 168 select HAVE_VIRT_CPU_ACCOUNTING
169 select HAVE_ARCH_HARDENED_USERCOPY
169 170
170config GENERIC_CSUM 171config GENERIC_CSUM
171 def_bool CPU_LITTLE_ENDIAN 172 def_bool CPU_LITTLE_ENDIAN
diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
index b7c20f0b8fbe..c1dc6c14deb8 100644
--- a/arch/powerpc/include/asm/uaccess.h
+++ b/arch/powerpc/include/asm/uaccess.h
@@ -310,10 +310,15 @@ static inline unsigned long copy_from_user(void *to,
310{ 310{
311 unsigned long over; 311 unsigned long over;
312 312
313 if (access_ok(VERIFY_READ, from, n)) 313 if (access_ok(VERIFY_READ, from, n)) {
314 if (!__builtin_constant_p(n))
315 check_object_size(to, n, false);
314 return __copy_tofrom_user((__force void __user *)to, from, n); 316 return __copy_tofrom_user((__force void __user *)to, from, n);
317 }
315 if ((unsigned long)from < TASK_SIZE) { 318 if ((unsigned long)from < TASK_SIZE) {
316 over = (unsigned long)from + n - TASK_SIZE; 319 over = (unsigned long)from + n - TASK_SIZE;
320 if (!__builtin_constant_p(n - over))
321 check_object_size(to, n - over, false);
317 return __copy_tofrom_user((__force void __user *)to, from, 322 return __copy_tofrom_user((__force void __user *)to, from,
318 n - over) + over; 323 n - over) + over;
319 } 324 }
@@ -325,10 +330,15 @@ static inline unsigned long copy_to_user(void __user *to,
325{ 330{
326 unsigned long over; 331 unsigned long over;
327 332
328 if (access_ok(VERIFY_WRITE, to, n)) 333 if (access_ok(VERIFY_WRITE, to, n)) {
334 if (!__builtin_constant_p(n))
335 check_object_size(from, n, true);
329 return __copy_tofrom_user(to, (__force void __user *)from, n); 336 return __copy_tofrom_user(to, (__force void __user *)from, n);
337 }
330 if ((unsigned long)to < TASK_SIZE) { 338 if ((unsigned long)to < TASK_SIZE) {
331 over = (unsigned long)to + n - TASK_SIZE; 339 over = (unsigned long)to + n - TASK_SIZE;
340 if (!__builtin_constant_p(n))
341 check_object_size(from, n - over, true);
332 return __copy_tofrom_user(to, (__force void __user *)from, 342 return __copy_tofrom_user(to, (__force void __user *)from,
333 n - over) + over; 343 n - over) + over;
334 } 344 }
@@ -372,6 +382,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
372 if (ret == 0) 382 if (ret == 0)
373 return 0; 383 return 0;
374 } 384 }
385
386 if (!__builtin_constant_p(n))
387 check_object_size(to, n, false);
388
375 return __copy_tofrom_user((__force void __user *)to, from, n); 389 return __copy_tofrom_user((__force void __user *)to, from, n);
376} 390}
377 391
@@ -398,6 +412,9 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
398 if (ret == 0) 412 if (ret == 0)
399 return 0; 413 return 0;
400 } 414 }
415 if (!__builtin_constant_p(n))
416 check_object_size(from, n, true);
417
401 return __copy_tofrom_user(to, (__force const void __user *)from, n); 418 return __copy_tofrom_user(to, (__force const void __user *)from, n);
402} 419}
403 420
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 9e607bf2d640..0e348781327b 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -123,6 +123,7 @@ config S390
123 select HAVE_ALIGNED_STRUCT_PAGE if SLUB 123 select HAVE_ALIGNED_STRUCT_PAGE if SLUB
124 select HAVE_ARCH_AUDITSYSCALL 124 select HAVE_ARCH_AUDITSYSCALL
125 select HAVE_ARCH_EARLY_PFN_TO_NID 125 select HAVE_ARCH_EARLY_PFN_TO_NID
126 select HAVE_ARCH_HARDENED_USERCOPY
126 select HAVE_ARCH_JUMP_LABEL 127 select HAVE_ARCH_JUMP_LABEL
127 select CPU_NO_EFFICIENT_FFS if !HAVE_MARCH_Z9_109_FEATURES 128 select CPU_NO_EFFICIENT_FFS if !HAVE_MARCH_Z9_109_FEATURES
128 select HAVE_ARCH_SECCOMP_FILTER 129 select HAVE_ARCH_SECCOMP_FILTER
diff --git a/arch/s390/lib/uaccess.c b/arch/s390/lib/uaccess.c
index d96596128e9f..f481fcde067b 100644
--- a/arch/s390/lib/uaccess.c
+++ b/arch/s390/lib/uaccess.c
@@ -104,6 +104,7 @@ static inline unsigned long copy_from_user_mvcp(void *x, const void __user *ptr,
104 104
105unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n) 105unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
106{ 106{
107 check_object_size(to, n, false);
107 if (static_branch_likely(&have_mvcos)) 108 if (static_branch_likely(&have_mvcos))
108 return copy_from_user_mvcos(to, from, n); 109 return copy_from_user_mvcos(to, from, n);
109 return copy_from_user_mvcp(to, from, n); 110 return copy_from_user_mvcp(to, from, n);
@@ -177,6 +178,7 @@ static inline unsigned long copy_to_user_mvcs(void __user *ptr, const void *x,
177 178
178unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n) 179unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
179{ 180{
181 check_object_size(from, n, true);
180 if (static_branch_likely(&have_mvcos)) 182 if (static_branch_likely(&have_mvcos))
181 return copy_to_user_mvcos(to, from, n); 183 return copy_to_user_mvcos(to, from, n);
182 return copy_to_user_mvcs(to, from, n); 184 return copy_to_user_mvcs(to, from, n);
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 546293d9e6c5..59b09600dd32 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -43,6 +43,7 @@ config SPARC
43 select OLD_SIGSUSPEND 43 select OLD_SIGSUSPEND
44 select ARCH_HAS_SG_CHAIN 44 select ARCH_HAS_SG_CHAIN
45 select CPU_NO_EFFICIENT_FFS 45 select CPU_NO_EFFICIENT_FFS
46 select HAVE_ARCH_HARDENED_USERCOPY
46 47
47config SPARC32 48config SPARC32
48 def_bool !64BIT 49 def_bool !64BIT
diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
index 57aca2792d29..341a5a133f48 100644
--- a/arch/sparc/include/asm/uaccess_32.h
+++ b/arch/sparc/include/asm/uaccess_32.h
@@ -248,22 +248,28 @@ unsigned long __copy_user(void __user *to, const void __user *from, unsigned lon
248 248
249static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n) 249static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
250{ 250{
251 if (n && __access_ok((unsigned long) to, n)) 251 if (n && __access_ok((unsigned long) to, n)) {
252 if (!__builtin_constant_p(n))
253 check_object_size(from, n, true);
252 return __copy_user(to, (__force void __user *) from, n); 254 return __copy_user(to, (__force void __user *) from, n);
253 else 255 } else
254 return n; 256 return n;
255} 257}
256 258
257static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n) 259static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
258{ 260{
261 if (!__builtin_constant_p(n))
262 check_object_size(from, n, true);
259 return __copy_user(to, (__force void __user *) from, n); 263 return __copy_user(to, (__force void __user *) from, n);
260} 264}
261 265
262static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n) 266static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
263{ 267{
264 if (n && __access_ok((unsigned long) from, n)) 268 if (n && __access_ok((unsigned long) from, n)) {
269 if (!__builtin_constant_p(n))
270 check_object_size(to, n, false);
265 return __copy_user((__force void __user *) to, from, n); 271 return __copy_user((__force void __user *) to, from, n);
266 else 272 } else
267 return n; 273 return n;
268} 274}
269 275
diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
index e9a51d64974d..8bda94fab8e8 100644
--- a/arch/sparc/include/asm/uaccess_64.h
+++ b/arch/sparc/include/asm/uaccess_64.h
@@ -210,8 +210,12 @@ unsigned long copy_from_user_fixup(void *to, const void __user *from,
210static inline unsigned long __must_check 210static inline unsigned long __must_check
211copy_from_user(void *to, const void __user *from, unsigned long size) 211copy_from_user(void *to, const void __user *from, unsigned long size)
212{ 212{
213 unsigned long ret = ___copy_from_user(to, from, size); 213 unsigned long ret;
214 214
215 if (!__builtin_constant_p(size))
216 check_object_size(to, size, false);
217
218 ret = ___copy_from_user(to, from, size);
215 if (unlikely(ret)) 219 if (unlikely(ret))
216 ret = copy_from_user_fixup(to, from, size); 220 ret = copy_from_user_fixup(to, from, size);
217 221
@@ -227,8 +231,11 @@ unsigned long copy_to_user_fixup(void __user *to, const void *from,
227static inline unsigned long __must_check 231static inline unsigned long __must_check
228copy_to_user(void __user *to, const void *from, unsigned long size) 232copy_to_user(void __user *to, const void *from, unsigned long size)
229{ 233{
230 unsigned long ret = ___copy_to_user(to, from, size); 234 unsigned long ret;
231 235
236 if (!__builtin_constant_p(size))
237 check_object_size(from, size, true);
238 ret = ___copy_to_user(to, from, size);
232 if (unlikely(ret)) 239 if (unlikely(ret))
233 ret = copy_to_user_fixup(to, from, size); 240 ret = copy_to_user_fixup(to, from, size);
234 return ret; 241 return ret;
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 5c6e7471b732..c580d8c33562 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -80,6 +80,7 @@ config X86
80 select HAVE_ALIGNED_STRUCT_PAGE if SLUB 80 select HAVE_ALIGNED_STRUCT_PAGE if SLUB
81 select HAVE_AOUT if X86_32 81 select HAVE_AOUT if X86_32
82 select HAVE_ARCH_AUDITSYSCALL 82 select HAVE_ARCH_AUDITSYSCALL
83 select HAVE_ARCH_HARDENED_USERCOPY
83 select HAVE_ARCH_HUGE_VMAP if X86_64 || X86_PAE 84 select HAVE_ARCH_HUGE_VMAP if X86_64 || X86_PAE
84 select HAVE_ARCH_JUMP_LABEL 85 select HAVE_ARCH_JUMP_LABEL
85 select HAVE_ARCH_KASAN if X86_64 && SPARSEMEM_VMEMMAP 86 select HAVE_ARCH_KASAN if X86_64 && SPARSEMEM_VMEMMAP
@@ -91,6 +92,7 @@ config X86
91 select HAVE_ARCH_SOFT_DIRTY if X86_64 92 select HAVE_ARCH_SOFT_DIRTY if X86_64
92 select HAVE_ARCH_TRACEHOOK 93 select HAVE_ARCH_TRACEHOOK
93 select HAVE_ARCH_TRANSPARENT_HUGEPAGE 94 select HAVE_ARCH_TRANSPARENT_HUGEPAGE
95 select HAVE_ARCH_WITHIN_STACK_FRAMES
94 select HAVE_EBPF_JIT if X86_64 96 select HAVE_EBPF_JIT if X86_64
95 select HAVE_CC_STACKPROTECTOR 97 select HAVE_CC_STACKPROTECTOR
96 select HAVE_CMPXCHG_DOUBLE 98 select HAVE_CMPXCHG_DOUBLE
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
index 84b59846154a..8b7c8d8e0852 100644
--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
@@ -176,6 +176,50 @@ static inline unsigned long current_stack_pointer(void)
176 return sp; 176 return sp;
177} 177}
178 178
179/*
180 * Walks up the stack frames to make sure that the specified object is
181 * entirely contained by a single stack frame.
182 *
183 * Returns:
184 * 1 if within a frame
185 * -1 if placed across a frame boundary (or outside stack)
186 * 0 unable to determine (no frame pointers, etc)
187 */
188static inline int arch_within_stack_frames(const void * const stack,
189 const void * const stackend,
190 const void *obj, unsigned long len)
191{
192#if defined(CONFIG_FRAME_POINTER)
193 const void *frame = NULL;
194 const void *oldframe;
195
196 oldframe = __builtin_frame_address(1);
197 if (oldframe)
198 frame = __builtin_frame_address(2);
199 /*
200 * low ----------------------------------------------> high
201 * [saved bp][saved ip][args][local vars][saved bp][saved ip]
202 * ^----------------^
203 * allow copies only within here
204 */
205 while (stack <= frame && frame < stackend) {
206 /*
207 * If obj + len extends past the last frame, this
208 * check won't pass and the next frame will be 0,
209 * causing us to bail out and correctly report
210 * the copy as invalid.
211 */
212 if (obj + len <= frame)
213 return obj >= oldframe + 2 * sizeof(void *) ? 1 : -1;
214 oldframe = frame;
215 frame = *(const void * const *)frame;
216 }
217 return -1;
218#else
219 return 0;
220#endif
221}
222
179#else /* !__ASSEMBLY__ */ 223#else /* !__ASSEMBLY__ */
180 224
181#ifdef CONFIG_X86_64 225#ifdef CONFIG_X86_64
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index 52f230094c51..a0ae610b9280 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -761,9 +761,10 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
761 * case, and do only runtime checking for non-constant sizes. 761 * case, and do only runtime checking for non-constant sizes.
762 */ 762 */
763 763
764 if (likely(sz < 0 || sz >= n)) 764 if (likely(sz < 0 || sz >= n)) {
765 check_object_size(to, n, false);
765 n = _copy_from_user(to, from, n); 766 n = _copy_from_user(to, from, n);
766 else if(__builtin_constant_p(n)) 767 } else if (__builtin_constant_p(n))
767 copy_from_user_overflow(); 768 copy_from_user_overflow();
768 else 769 else
769 __copy_from_user_overflow(sz, n); 770 __copy_from_user_overflow(sz, n);
@@ -781,9 +782,10 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
781 might_fault(); 782 might_fault();
782 783
783 /* See the comment in copy_from_user() above. */ 784 /* See the comment in copy_from_user() above. */
784 if (likely(sz < 0 || sz >= n)) 785 if (likely(sz < 0 || sz >= n)) {
786 check_object_size(from, n, true);
785 n = _copy_to_user(to, from, n); 787 n = _copy_to_user(to, from, n);
786 else if(__builtin_constant_p(n)) 788 } else if (__builtin_constant_p(n))
787 copy_to_user_overflow(); 789 copy_to_user_overflow();
788 else 790 else
789 __copy_to_user_overflow(sz, n); 791 __copy_to_user_overflow(sz, n);
diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
index 4b32da24faaf..7d3bdd1ed697 100644
--- a/arch/x86/include/asm/uaccess_32.h
+++ b/arch/x86/include/asm/uaccess_32.h
@@ -37,6 +37,7 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero
37static __always_inline unsigned long __must_check 37static __always_inline unsigned long __must_check
38__copy_to_user_inatomic(void __user *to, const void *from, unsigned long n) 38__copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
39{ 39{
40 check_object_size(from, n, true);
40 return __copy_to_user_ll(to, from, n); 41 return __copy_to_user_ll(to, from, n);
41} 42}
42 43
@@ -95,6 +96,7 @@ static __always_inline unsigned long
95__copy_from_user(void *to, const void __user *from, unsigned long n) 96__copy_from_user(void *to, const void __user *from, unsigned long n)
96{ 97{
97 might_fault(); 98 might_fault();
99 check_object_size(to, n, false);
98 if (__builtin_constant_p(n)) { 100 if (__builtin_constant_p(n)) {
99 unsigned long ret; 101 unsigned long ret;
100 102
diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
index 2eac2aa3e37f..673059a109fe 100644
--- a/arch/x86/include/asm/uaccess_64.h
+++ b/arch/x86/include/asm/uaccess_64.h
@@ -54,6 +54,7 @@ int __copy_from_user_nocheck(void *dst, const void __user *src, unsigned size)
54{ 54{
55 int ret = 0; 55 int ret = 0;
56 56
57 check_object_size(dst, size, false);
57 if (!__builtin_constant_p(size)) 58 if (!__builtin_constant_p(size))
58 return copy_user_generic(dst, (__force void *)src, size); 59 return copy_user_generic(dst, (__force void *)src, size);
59 switch (size) { 60 switch (size) {
@@ -119,6 +120,7 @@ int __copy_to_user_nocheck(void __user *dst, const void *src, unsigned size)
119{ 120{
120 int ret = 0; 121 int ret = 0;
121 122
123 check_object_size(src, size, true);
122 if (!__builtin_constant_p(size)) 124 if (!__builtin_constant_p(size))
123 return copy_user_generic((__force void *)dst, src, size); 125 return copy_user_generic((__force void *)dst, src, size);
124 switch (size) { 126 switch (size) {
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index f2e4e90621ec..d572b78b65e1 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -68,8 +68,10 @@ extern char * const migratetype_names[MIGRATE_TYPES];
68 68
69#ifdef CONFIG_CMA 69#ifdef CONFIG_CMA
70# define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA) 70# define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA)
71# define is_migrate_cma_page(_page) (get_pageblock_migratetype(_page) == MIGRATE_CMA)
71#else 72#else
72# define is_migrate_cma(migratetype) false 73# define is_migrate_cma(migratetype) false
74# define is_migrate_cma_page(_page) false
73#endif 75#endif
74 76
75#define for_each_migratetype_order(order, type) \ 77#define for_each_migratetype_order(order, type) \
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 1a4ea551aae5..4293808d8cfb 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -155,6 +155,18 @@ void kfree(const void *);
155void kzfree(const void *); 155void kzfree(const void *);
156size_t ksize(const void *); 156size_t ksize(const void *);
157 157
158#ifdef CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR
159const char *__check_heap_object(const void *ptr, unsigned long n,
160 struct page *page);
161#else
162static inline const char *__check_heap_object(const void *ptr,
163 unsigned long n,
164 struct page *page)
165{
166 return NULL;
167}
168#endif
169
158/* 170/*
159 * Some archs want to perform DMA into kmalloc caches and need a guaranteed 171 * Some archs want to perform DMA into kmalloc caches and need a guaranteed
160 * alignment larger than the alignment of a 64-bit integer. 172 * alignment larger than the alignment of a 64-bit integer.
diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
index 352b1542f5cc..cbd8990e2e77 100644
--- a/include/linux/thread_info.h
+++ b/include/linux/thread_info.h
@@ -105,6 +105,30 @@ static inline int test_ti_thread_flag(struct thread_info *ti, int flag)
105 105
106#define tif_need_resched() test_thread_flag(TIF_NEED_RESCHED) 106#define tif_need_resched() test_thread_flag(TIF_NEED_RESCHED)
107 107
108#ifndef CONFIG_HAVE_ARCH_WITHIN_STACK_FRAMES
109static inline int arch_within_stack_frames(const void * const stack,
110 const void * const stackend,
111 const void *obj, unsigned long len)
112{
113 return 0;
114}
115#endif
116
117#ifdef CONFIG_HARDENED_USERCOPY
118extern void __check_object_size(const void *ptr, unsigned long n,
119 bool to_user);
120
121static inline void check_object_size(const void *ptr, unsigned long n,
122 bool to_user)
123{
124 __check_object_size(ptr, n, to_user);
125}
126#else
127static inline void check_object_size(const void *ptr, unsigned long n,
128 bool to_user)
129{ }
130#endif /* CONFIG_HARDENED_USERCOPY */
131
108#endif /* __KERNEL__ */ 132#endif /* __KERNEL__ */
109 133
110#endif /* _LINUX_THREAD_INFO_H */ 134#endif /* _LINUX_THREAD_INFO_H */
diff --git a/init/Kconfig b/init/Kconfig
index 69886493ff1e..cac3f096050d 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1761,6 +1761,7 @@ choice
1761 1761
1762config SLAB 1762config SLAB
1763 bool "SLAB" 1763 bool "SLAB"
1764 select HAVE_HARDENED_USERCOPY_ALLOCATOR
1764 help 1765 help
1765 The regular slab allocator that is established and known to work 1766 The regular slab allocator that is established and known to work
1766 well in all environments. It organizes cache hot objects in 1767 well in all environments. It organizes cache hot objects in
@@ -1768,6 +1769,7 @@ config SLAB
1768 1769
1769config SLUB 1770config SLUB
1770 bool "SLUB (Unqueued Allocator)" 1771 bool "SLUB (Unqueued Allocator)"
1772 select HAVE_HARDENED_USERCOPY_ALLOCATOR
1771 help 1773 help
1772 SLUB is a slab allocator that minimizes cache line usage 1774 SLUB is a slab allocator that minimizes cache line usage
1773 instead of managing queues of cached objects (SLAB approach). 1775 instead of managing queues of cached objects (SLAB approach).
diff --git a/mm/Makefile b/mm/Makefile
index fc059666c760..2ca1faf3fa09 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -21,6 +21,9 @@ KCOV_INSTRUMENT_memcontrol.o := n
21KCOV_INSTRUMENT_mmzone.o := n 21KCOV_INSTRUMENT_mmzone.o := n
22KCOV_INSTRUMENT_vmstat.o := n 22KCOV_INSTRUMENT_vmstat.o := n
23 23
24# Since __builtin_frame_address does work as used, disable the warning.
25CFLAGS_usercopy.o += $(call cc-disable-warning, frame-address)
26
24mmu-y := nommu.o 27mmu-y := nommu.o
25mmu-$(CONFIG_MMU) := gup.o highmem.o memory.o mincore.o \ 28mmu-$(CONFIG_MMU) := gup.o highmem.o memory.o mincore.o \
26 mlock.o mmap.o mprotect.o mremap.o msync.o rmap.o \ 29 mlock.o mmap.o mprotect.o mremap.o msync.o rmap.o \
@@ -99,3 +102,4 @@ obj-$(CONFIG_USERFAULTFD) += userfaultfd.o
99obj-$(CONFIG_IDLE_PAGE_TRACKING) += page_idle.o 102obj-$(CONFIG_IDLE_PAGE_TRACKING) += page_idle.o
100obj-$(CONFIG_FRAME_VECTOR) += frame_vector.o 103obj-$(CONFIG_FRAME_VECTOR) += frame_vector.o
101obj-$(CONFIG_DEBUG_PAGE_REF) += debug_page_ref.o 104obj-$(CONFIG_DEBUG_PAGE_REF) += debug_page_ref.o
105obj-$(CONFIG_HARDENED_USERCOPY) += usercopy.o
diff --git a/mm/slab.c b/mm/slab.c
index 261147ba156f..b67271024135 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -4441,6 +4441,36 @@ static int __init slab_proc_init(void)
4441module_init(slab_proc_init); 4441module_init(slab_proc_init);
4442#endif 4442#endif
4443 4443
4444#ifdef CONFIG_HARDENED_USERCOPY
4445/*
4446 * Rejects objects that are incorrectly sized.
4447 *
4448 * Returns NULL if check passes, otherwise const char * to name of cache
4449 * to indicate an error.
4450 */
4451const char *__check_heap_object(const void *ptr, unsigned long n,
4452 struct page *page)
4453{
4454 struct kmem_cache *cachep;
4455 unsigned int objnr;
4456 unsigned long offset;
4457
4458 /* Find and validate object. */
4459 cachep = page->slab_cache;
4460 objnr = obj_to_index(cachep, page, (void *)ptr);
4461 BUG_ON(objnr >= cachep->num);
4462
4463 /* Find offset within object. */
4464 offset = ptr - index_to_obj(cachep, page, objnr) - obj_offset(cachep);
4465
4466 /* Allow address range falling entirely within object size. */
4467 if (offset <= cachep->object_size && n <= cachep->object_size - offset)
4468 return NULL;
4469
4470 return cachep->name;
4471}
4472#endif /* CONFIG_HARDENED_USERCOPY */
4473
4444/** 4474/**
4445 * ksize - get the actual amount of memory allocated for a given object 4475 * ksize - get the actual amount of memory allocated for a given object
4446 * @objp: Pointer to the object 4476 * @objp: Pointer to the object
diff --git a/mm/slub.c b/mm/slub.c
index 850737bdfbd8..cead06394e9e 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -3764,6 +3764,46 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
3764EXPORT_SYMBOL(__kmalloc_node); 3764EXPORT_SYMBOL(__kmalloc_node);
3765#endif 3765#endif
3766 3766
3767#ifdef CONFIG_HARDENED_USERCOPY
3768/*
3769 * Rejects objects that are incorrectly sized.
3770 *
3771 * Returns NULL if check passes, otherwise const char * to name of cache
3772 * to indicate an error.
3773 */
3774const char *__check_heap_object(const void *ptr, unsigned long n,
3775 struct page *page)
3776{
3777 struct kmem_cache *s;
3778 unsigned long offset;
3779 size_t object_size;
3780
3781 /* Find object and usable object size. */
3782 s = page->slab_cache;
3783 object_size = slab_ksize(s);
3784
3785 /* Reject impossible pointers. */
3786 if (ptr < page_address(page))
3787 return s->name;
3788
3789 /* Find offset within object. */
3790 offset = (ptr - page_address(page)) % s->size;
3791
3792 /* Adjust for redzone and reject if within the redzone. */
3793 if (kmem_cache_debug(s) && s->flags & SLAB_RED_ZONE) {
3794 if (offset < s->red_left_pad)
3795 return s->name;
3796 offset -= s->red_left_pad;
3797 }
3798
3799 /* Allow address range falling entirely within object size. */
3800 if (offset <= object_size && n <= object_size - offset)
3801 return NULL;
3802
3803 return s->name;
3804}
3805#endif /* CONFIG_HARDENED_USERCOPY */
3806
3767static size_t __ksize(const void *object) 3807static size_t __ksize(const void *object)
3768{ 3808{
3769 struct page *page; 3809 struct page *page;
diff --git a/mm/usercopy.c b/mm/usercopy.c
new file mode 100644
index 000000000000..8ebae91a6b55
--- /dev/null
+++ b/mm/usercopy.c
@@ -0,0 +1,268 @@
1/*
2 * This implements the various checks for CONFIG_HARDENED_USERCOPY*,
3 * which are designed to protect kernel memory from needless exposure
4 * and overwrite under many unintended conditions. This code is based
5 * on PAX_USERCOPY, which is:
6 *
7 * Copyright (C) 2001-2016 PaX Team, Bradley Spengler, Open Source
8 * Security Inc.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 *
14 */
15#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16
17#include <linux/mm.h>
18#include <linux/slab.h>
19#include <asm/sections.h>
20
21enum {
22 BAD_STACK = -1,
23 NOT_STACK = 0,
24 GOOD_FRAME,
25 GOOD_STACK,
26};
27
28/*
29 * Checks if a given pointer and length is contained by the current
30 * stack frame (if possible).
31 *
32 * Returns:
33 * NOT_STACK: not at all on the stack
34 * GOOD_FRAME: fully within a valid stack frame
35 * GOOD_STACK: fully on the stack (when can't do frame-checking)
36 * BAD_STACK: error condition (invalid stack position or bad stack frame)
37 */
38static noinline int check_stack_object(const void *obj, unsigned long len)
39{
40 const void * const stack = task_stack_page(current);
41 const void * const stackend = stack + THREAD_SIZE;
42 int ret;
43
44 /* Object is not on the stack at all. */
45 if (obj + len <= stack || stackend <= obj)
46 return NOT_STACK;
47
48 /*
49 * Reject: object partially overlaps the stack (passing the
50 * the check above means at least one end is within the stack,
51 * so if this check fails, the other end is outside the stack).
52 */
53 if (obj < stack || stackend < obj + len)
54 return BAD_STACK;
55
56 /* Check if object is safely within a valid frame. */
57 ret = arch_within_stack_frames(stack, stackend, obj, len);
58 if (ret)
59 return ret;
60
61 return GOOD_STACK;
62}
63
64static void report_usercopy(const void *ptr, unsigned long len,
65 bool to_user, const char *type)
66{
67 pr_emerg("kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
68 to_user ? "exposure" : "overwrite",
69 to_user ? "from" : "to", ptr, type ? : "unknown", len);
70 /*
71 * For greater effect, it would be nice to do do_group_exit(),
72 * but BUG() actually hooks all the lock-breaking and per-arch
73 * Oops code, so that is used here instead.
74 */
75 BUG();
76}
77
78/* Returns true if any portion of [ptr,ptr+n) over laps with [low,high). */
79static bool overlaps(const void *ptr, unsigned long n, unsigned long low,
80 unsigned long high)
81{
82 unsigned long check_low = (uintptr_t)ptr;
83 unsigned long check_high = check_low + n;
84
85 /* Does not overlap if entirely above or entirely below. */
86 if (check_low >= high || check_high < low)
87 return false;
88
89 return true;
90}
91
92/* Is this address range in the kernel text area? */
93static inline const char *check_kernel_text_object(const void *ptr,
94 unsigned long n)
95{
96 unsigned long textlow = (unsigned long)_stext;
97 unsigned long texthigh = (unsigned long)_etext;
98 unsigned long textlow_linear, texthigh_linear;
99
100 if (overlaps(ptr, n, textlow, texthigh))
101 return "<kernel text>";
102
103 /*
104 * Some architectures have virtual memory mappings with a secondary
105 * mapping of the kernel text, i.e. there is more than one virtual
106 * kernel address that points to the kernel image. It is usually
107 * when there is a separate linear physical memory mapping, in that
108 * __pa() is not just the reverse of __va(). This can be detected
109 * and checked:
110 */
111 textlow_linear = (unsigned long)__va(__pa(textlow));
112 /* No different mapping: we're done. */
113 if (textlow_linear == textlow)
114 return NULL;
115
116 /* Check the secondary mapping... */
117 texthigh_linear = (unsigned long)__va(__pa(texthigh));
118 if (overlaps(ptr, n, textlow_linear, texthigh_linear))
119 return "<linear kernel text>";
120
121 return NULL;
122}
123
124static inline const char *check_bogus_address(const void *ptr, unsigned long n)
125{
126 /* Reject if object wraps past end of memory. */
127 if (ptr + n < ptr)
128 return "<wrapped address>";
129
130 /* Reject if NULL or ZERO-allocation. */
131 if (ZERO_OR_NULL_PTR(ptr))
132 return "<null>";
133
134 return NULL;
135}
136
137static inline const char *check_heap_object(const void *ptr, unsigned long n,
138 bool to_user)
139{
140 struct page *page, *endpage;
141 const void *end = ptr + n - 1;
142 bool is_reserved, is_cma;
143
144 /*
145 * Some architectures (arm64) return true for virt_addr_valid() on
146 * vmalloced addresses. Work around this by checking for vmalloc
147 * first.
148 */
149 if (is_vmalloc_addr(ptr))
150 return NULL;
151
152 if (!virt_addr_valid(ptr))
153 return NULL;
154
155 page = virt_to_head_page(ptr);
156
157 /* Check slab allocator for flags and size. */
158 if (PageSlab(page))
159 return __check_heap_object(ptr, n, page);
160
161 /*
162 * Sometimes the kernel data regions are not marked Reserved (see
163 * check below). And sometimes [_sdata,_edata) does not cover
164 * rodata and/or bss, so check each range explicitly.
165 */
166
167 /* Allow reads of kernel rodata region (if not marked as Reserved). */
168 if (ptr >= (const void *)__start_rodata &&
169 end <= (const void *)__end_rodata) {
170 if (!to_user)
171 return "<rodata>";
172 return NULL;
173 }
174
175 /* Allow kernel data region (if not marked as Reserved). */
176 if (ptr >= (const void *)_sdata && end <= (const void *)_edata)
177 return NULL;
178
179 /* Allow kernel bss region (if not marked as Reserved). */
180 if (ptr >= (const void *)__bss_start &&
181 end <= (const void *)__bss_stop)
182 return NULL;
183
184 /* Is the object wholly within one base page? */
185 if (likely(((unsigned long)ptr & (unsigned long)PAGE_MASK) ==
186 ((unsigned long)end & (unsigned long)PAGE_MASK)))
187 return NULL;
188
189 /* Allow if start and end are inside the same compound page. */
190 endpage = virt_to_head_page(end);
191 if (likely(endpage == page))
192 return NULL;
193
194 /*
195 * Reject if range is entirely either Reserved (i.e. special or
196 * device memory), or CMA. Otherwise, reject since the object spans
197 * several independently allocated pages.
198 */
199 is_reserved = PageReserved(page);
200 is_cma = is_migrate_cma_page(page);
201 if (!is_reserved && !is_cma)
202 goto reject;
203
204 for (ptr += PAGE_SIZE; ptr <= end; ptr += PAGE_SIZE) {
205 page = virt_to_head_page(ptr);
206 if (is_reserved && !PageReserved(page))
207 goto reject;
208 if (is_cma && !is_migrate_cma_page(page))
209 goto reject;
210 }
211
212 return NULL;
213
214reject:
215 return "<spans multiple pages>";
216}
217
218/*
219 * Validates that the given object is:
220 * - not bogus address
221 * - known-safe heap or stack object
222 * - not in kernel text
223 */
224void __check_object_size(const void *ptr, unsigned long n, bool to_user)
225{
226 const char *err;
227
228 /* Skip all tests if size is zero. */
229 if (!n)
230 return;
231
232 /* Check for invalid addresses. */
233 err = check_bogus_address(ptr, n);
234 if (err)
235 goto report;
236
237 /* Check for bad heap object. */
238 err = check_heap_object(ptr, n, to_user);
239 if (err)
240 goto report;
241
242 /* Check for bad stack object. */
243 switch (check_stack_object(ptr, n)) {
244 case NOT_STACK:
245 /* Object is not touching the current process stack. */
246 break;
247 case GOOD_FRAME:
248 case GOOD_STACK:
249 /*
250 * Object is either in the correct frame (when it
251 * is possible to check) or just generally on the
252 * process stack (when frame checking not available).
253 */
254 return;
255 default:
256 err = "<process stack>";
257 goto report;
258 }
259
260 /* Check for object in kernel to avoid text exposure. */
261 err = check_kernel_text_object(ptr, n);
262 if (!err)
263 return;
264
265report:
266 report_usercopy(ptr, n, to_user, err);
267}
268EXPORT_SYMBOL(__check_object_size);
diff --git a/security/Kconfig b/security/Kconfig
index 176758cdfa57..df28f2b6f3e1 100644
--- a/security/Kconfig
+++ b/security/Kconfig
@@ -118,6 +118,34 @@ config LSM_MMAP_MIN_ADDR
118 this low address space will need the permission specific to the 118 this low address space will need the permission specific to the
119 systems running LSM. 119 systems running LSM.
120 120
121config HAVE_HARDENED_USERCOPY_ALLOCATOR
122 bool
123 help
124 The heap allocator implements __check_heap_object() for
125 validating memory ranges against heap object sizes in
126 support of CONFIG_HARDENED_USERCOPY.
127
128config HAVE_ARCH_HARDENED_USERCOPY
129 bool
130 help
131 The architecture supports CONFIG_HARDENED_USERCOPY by
132 calling check_object_size() just before performing the
133 userspace copies in the low level implementation of
134 copy_to_user() and copy_from_user().
135
136config HARDENED_USERCOPY
137 bool "Harden memory copies between kernel and userspace"
138 depends on HAVE_ARCH_HARDENED_USERCOPY
139 select BUG
140 help
141 This option checks for obviously wrong memory regions when
142 copying memory to/from the kernel (via copy_to_user() and
143 copy_from_user() functions) by rejecting memory ranges that
144 are larger than the specified heap object, span multiple
145 separately allocates pages, are not on the process stack,
146 or are part of the kernel text. This kills entire classes
147 of heap overflow exploits and similar kernel memory exposures.
148
121source security/selinux/Kconfig 149source security/selinux/Kconfig
122source security/smack/Kconfig 150source security/smack/Kconfig
123source security/tomoyo/Kconfig 151source security/tomoyo/Kconfig