diff options
author | Dan Williams <dan.j.williams@intel.com> | 2018-01-29 20:02:44 -0500 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2018-01-30 15:54:30 -0500 |
commit | b5c4ae4f35325d520b230bab6eb3310613b72ac1 (patch) | |
tree | c8e7da2d7f5ba83e3d91180432dfefee9df123e6 /arch/x86 | |
parent | b3bbfb3fb5d25776b8e3f361d2eedaabb0b496cd (diff) |
x86/usercopy: Replace open coded stac/clac with __uaccess_{begin, end}
In preparation for converting some __uaccess_begin() instances to
__uacess_begin_nospec(), make sure all 'from user' uaccess paths are
using the _begin(), _end() helpers rather than open-coded stac() and
clac().
No functional changes.
Suggested-by: Ingo Molnar <mingo@redhat.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-arch@vger.kernel.org
Cc: Tom Lendacky <thomas.lendacky@amd.com>
Cc: Kees Cook <keescook@chromium.org>
Cc: kernel-hardening@lists.openwall.com
Cc: gregkh@linuxfoundation.org
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: torvalds@linux-foundation.org
Cc: alan@linux.intel.com
Link: https://lkml.kernel.org/r/151727416438.33451.17309465232057176966.stgit@dwillia2-desk3.amr.corp.intel.com
Diffstat (limited to 'arch/x86')
-rw-r--r-- | arch/x86/lib/usercopy_32.c | 8 |
1 files changed, 4 insertions, 4 deletions
diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c index 1b377f734e64..de3436719e26 100644 --- a/arch/x86/lib/usercopy_32.c +++ b/arch/x86/lib/usercopy_32.c | |||
@@ -331,12 +331,12 @@ do { \ | |||
331 | 331 | ||
332 | unsigned long __copy_user_ll(void *to, const void *from, unsigned long n) | 332 | unsigned long __copy_user_ll(void *to, const void *from, unsigned long n) |
333 | { | 333 | { |
334 | stac(); | 334 | __uaccess_begin(); |
335 | if (movsl_is_ok(to, from, n)) | 335 | if (movsl_is_ok(to, from, n)) |
336 | __copy_user(to, from, n); | 336 | __copy_user(to, from, n); |
337 | else | 337 | else |
338 | n = __copy_user_intel(to, from, n); | 338 | n = __copy_user_intel(to, from, n); |
339 | clac(); | 339 | __uaccess_end(); |
340 | return n; | 340 | return n; |
341 | } | 341 | } |
342 | EXPORT_SYMBOL(__copy_user_ll); | 342 | EXPORT_SYMBOL(__copy_user_ll); |
@@ -344,7 +344,7 @@ EXPORT_SYMBOL(__copy_user_ll); | |||
344 | unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *from, | 344 | unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *from, |
345 | unsigned long n) | 345 | unsigned long n) |
346 | { | 346 | { |
347 | stac(); | 347 | __uaccess_begin(); |
348 | #ifdef CONFIG_X86_INTEL_USERCOPY | 348 | #ifdef CONFIG_X86_INTEL_USERCOPY |
349 | if (n > 64 && static_cpu_has(X86_FEATURE_XMM2)) | 349 | if (n > 64 && static_cpu_has(X86_FEATURE_XMM2)) |
350 | n = __copy_user_intel_nocache(to, from, n); | 350 | n = __copy_user_intel_nocache(to, from, n); |
@@ -353,7 +353,7 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr | |||
353 | #else | 353 | #else |
354 | __copy_user(to, from, n); | 354 | __copy_user(to, from, n); |
355 | #endif | 355 | #endif |
356 | clac(); | 356 | __uaccess_end(); |
357 | return n; | 357 | return n; |
358 | } | 358 | } |
359 | EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero); | 359 | EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero); |