aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChris von Recklinghausen <crecklin@redhat.com>2018-07-03 15:43:08 -0400
committerKees Cook <keescook@chromium.org>2018-07-04 11:04:52 -0400
commitb5cb15d9372abc9adc4e844c0c1bf594ca6a7695 (patch)
tree191dc8291fd437d849716393d9ec7c7bcd2c346f
parent6aa56f44253a6dd802e45d8ab1b48847feaf063a (diff)
usercopy: Allow boot cmdline disabling of hardening
Enabling HARDENED_USERCOPY may cause measurable regressions in networking performance: up to 8% under UDP flood. I ran a small packet UDP flood using pktgen vs. a host b2b connected. On the receiver side the UDP packets are processed by a simple user space process that just reads and drops them: https://github.com/netoptimizer/network-testing/blob/master/src/udp_sink.c Not very useful from a functional PoV, but it helps to pin-point bottlenecks in the networking stack. When running a kernel with CONFIG_HARDENED_USERCOPY=y, I see a 5-8% regression in the receive tput, compared to the same kernel without this option enabled. With CONFIG_HARDENED_USERCOPY=y, perf shows ~6% of CPU time spent cumulatively in __check_object_size (~4%) and __virt_addr_valid (~2%). The call-chain is: __GI___libc_recvfrom entry_SYSCALL_64_after_hwframe do_syscall_64 __x64_sys_recvfrom __sys_recvfrom inet_recvmsg udp_recvmsg __check_object_size udp_recvmsg() actually calls copy_to_iter() (inlined) and the latters calls check_copy_size() (again, inlined). A generic distro may want to enable HARDENED_USERCOPY in their default kernel config, but at the same time, such distro may want to be able to avoid the performance penalties in with the default configuration and disable the stricter check on a per-boot basis. This change adds a boot parameter that conditionally disables HARDENED_USERCOPY via "hardened_usercopy=off". Signed-off-by: Chris von Recklinghausen <crecklin@redhat.com> Signed-off-by: Kees Cook <keescook@chromium.org>
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt11
-rw-r--r--include/linux/jump_label.h6
-rw-r--r--mm/usercopy.c25
3 files changed, 42 insertions, 0 deletions
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index efc7aa7a0670..560d4dc66f02 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -816,6 +816,17 @@
816 disable= [IPV6] 816 disable= [IPV6]
817 See Documentation/networking/ipv6.txt. 817 See Documentation/networking/ipv6.txt.
818 818
819 hardened_usercopy=
820 [KNL] Under CONFIG_HARDENED_USERCOPY, whether
821 hardening is enabled for this boot. Hardened
822 usercopy checking is used to protect the kernel
823 from reading or writing beyond known memory
824 allocation boundaries as a proactive defense
825 against bounds-checking flaws in the kernel's
826 copy_to_user()/copy_from_user() interface.
827 on Perform hardened usercopy checks (default).
828 off Disable hardened usercopy checks.
829
819 disable_radix [PPC] 830 disable_radix [PPC]
820 Disable RADIX MMU mode on POWER9 831 Disable RADIX MMU mode on POWER9
821 832
diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h
index b46b541c67c4..1a0b6f17a5d6 100644
--- a/include/linux/jump_label.h
+++ b/include/linux/jump_label.h
@@ -299,12 +299,18 @@ struct static_key_false {
299#define DEFINE_STATIC_KEY_TRUE(name) \ 299#define DEFINE_STATIC_KEY_TRUE(name) \
300 struct static_key_true name = STATIC_KEY_TRUE_INIT 300 struct static_key_true name = STATIC_KEY_TRUE_INIT
301 301
302#define DEFINE_STATIC_KEY_TRUE_RO(name) \
303 struct static_key_true name __ro_after_init = STATIC_KEY_TRUE_INIT
304
302#define DECLARE_STATIC_KEY_TRUE(name) \ 305#define DECLARE_STATIC_KEY_TRUE(name) \
303 extern struct static_key_true name 306 extern struct static_key_true name
304 307
305#define DEFINE_STATIC_KEY_FALSE(name) \ 308#define DEFINE_STATIC_KEY_FALSE(name) \
306 struct static_key_false name = STATIC_KEY_FALSE_INIT 309 struct static_key_false name = STATIC_KEY_FALSE_INIT
307 310
311#define DEFINE_STATIC_KEY_FALSE_RO(name) \
312 struct static_key_false name __ro_after_init = STATIC_KEY_FALSE_INIT
313
308#define DECLARE_STATIC_KEY_FALSE(name) \ 314#define DECLARE_STATIC_KEY_FALSE(name) \
309 extern struct static_key_false name 315 extern struct static_key_false name
310 316
diff --git a/mm/usercopy.c b/mm/usercopy.c
index e9e9325f7638..852eb4e53f06 100644
--- a/mm/usercopy.c
+++ b/mm/usercopy.c
@@ -20,6 +20,8 @@
20#include <linux/sched/task.h> 20#include <linux/sched/task.h>
21#include <linux/sched/task_stack.h> 21#include <linux/sched/task_stack.h>
22#include <linux/thread_info.h> 22#include <linux/thread_info.h>
23#include <linux/atomic.h>
24#include <linux/jump_label.h>
23#include <asm/sections.h> 25#include <asm/sections.h>
24 26
25/* 27/*
@@ -240,6 +242,8 @@ static inline void check_heap_object(const void *ptr, unsigned long n,
240 } 242 }
241} 243}
242 244
245static DEFINE_STATIC_KEY_FALSE_RO(bypass_usercopy_checks);
246
243/* 247/*
244 * Validates that the given object is: 248 * Validates that the given object is:
245 * - not bogus address 249 * - not bogus address
@@ -248,6 +252,9 @@ static inline void check_heap_object(const void *ptr, unsigned long n,
248 */ 252 */
249void __check_object_size(const void *ptr, unsigned long n, bool to_user) 253void __check_object_size(const void *ptr, unsigned long n, bool to_user)
250{ 254{
255 if (static_branch_unlikely(&bypass_usercopy_checks))
256 return;
257
251 /* Skip all tests if size is zero. */ 258 /* Skip all tests if size is zero. */
252 if (!n) 259 if (!n)
253 return; 260 return;
@@ -279,3 +286,21 @@ void __check_object_size(const void *ptr, unsigned long n, bool to_user)
279 check_kernel_text_object((const unsigned long)ptr, n, to_user); 286 check_kernel_text_object((const unsigned long)ptr, n, to_user);
280} 287}
281EXPORT_SYMBOL(__check_object_size); 288EXPORT_SYMBOL(__check_object_size);
289
290static bool enable_checks __initdata = true;
291
292static int __init parse_hardened_usercopy(char *str)
293{
294 return strtobool(str, &enable_checks);
295}
296
297__setup("hardened_usercopy=", parse_hardened_usercopy);
298
299static int __init set_hardened_usercopy(void)
300{
301 if (enable_checks == false)
302 static_branch_enable(&bypass_usercopy_checks);
303 return 1;
304}
305
306late_initcall(set_hardened_usercopy);