diff options
author | Andi Kleen <ak@linux.intel.com> | 2013-08-16 17:17:19 -0400 |
---|---|---|
committer | H. Peter Anvin <hpa@linux.intel.com> | 2013-09-10 18:27:43 -0400 |
commit | ff47ab4ff3cddfa7bc1b25b990e24abe2ae474ff (patch) | |
tree | 9a41335f282ba7851abf625fb295369aaa6061d9 | |
parent | 6e4664525b1db28f8c4e1130957f70a94c19213e (diff) |
x86: Add 1/2/4/8 byte optimization to 64bit __copy_{from,to}_user_inatomic
The 64bit __copy_{from,to}_user_inatomic always called
copy_from_user_generic, but skipped the special optimizations for 1/2/4/8
byte accesses.
This especially hurts the futex call, which accesses the 4 byte futex
user value with a complicated fast string operation in a function call,
instead of a single movl.
Use __copy_{from,to}_user for _inatomic instead to get the same
optimizations. The only problem was the might_fault() in those functions.
So move that to new wrapper and call __copy_{f,t}_user_nocheck()
from *_inatomic directly.
32bit already did this correctly by duplicating the code.
Signed-off-by: Andi Kleen <ak@linux.intel.com>
Link: http://lkml.kernel.org/r/1376687844-19857-2-git-send-email-andi@firstfloor.org
Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
-rw-r--r-- | arch/x86/include/asm/uaccess_64.h | 24 |
1 files changed, 18 insertions, 6 deletions
diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h index 4f7923dd0007..64476bb2a146 100644 --- a/arch/x86/include/asm/uaccess_64.h +++ b/arch/x86/include/asm/uaccess_64.h | |||
@@ -77,11 +77,10 @@ int copy_to_user(void __user *dst, const void *src, unsigned size) | |||
77 | } | 77 | } |
78 | 78 | ||
79 | static __always_inline __must_check | 79 | static __always_inline __must_check |
80 | int __copy_from_user(void *dst, const void __user *src, unsigned size) | 80 | int __copy_from_user_nocheck(void *dst, const void __user *src, unsigned size) |
81 | { | 81 | { |
82 | int ret = 0; | 82 | int ret = 0; |
83 | 83 | ||
84 | might_fault(); | ||
85 | if (!__builtin_constant_p(size)) | 84 | if (!__builtin_constant_p(size)) |
86 | return copy_user_generic(dst, (__force void *)src, size); | 85 | return copy_user_generic(dst, (__force void *)src, size); |
87 | switch (size) { | 86 | switch (size) { |
@@ -121,11 +120,17 @@ int __copy_from_user(void *dst, const void __user *src, unsigned size) | |||
121 | } | 120 | } |
122 | 121 | ||
123 | static __always_inline __must_check | 122 | static __always_inline __must_check |
124 | int __copy_to_user(void __user *dst, const void *src, unsigned size) | 123 | int __copy_from_user(void *dst, const void __user *src, unsigned size) |
124 | { | ||
125 | might_fault(); | ||
126 | return __copy_from_user_nocheck(dst, src, size); | ||
127 | } | ||
128 | |||
129 | static __always_inline __must_check | ||
130 | int __copy_to_user_nocheck(void __user *dst, const void *src, unsigned size) | ||
125 | { | 131 | { |
126 | int ret = 0; | 132 | int ret = 0; |
127 | 133 | ||
128 | might_fault(); | ||
129 | if (!__builtin_constant_p(size)) | 134 | if (!__builtin_constant_p(size)) |
130 | return copy_user_generic((__force void *)dst, src, size); | 135 | return copy_user_generic((__force void *)dst, src, size); |
131 | switch (size) { | 136 | switch (size) { |
@@ -165,6 +170,13 @@ int __copy_to_user(void __user *dst, const void *src, unsigned size) | |||
165 | } | 170 | } |
166 | 171 | ||
167 | static __always_inline __must_check | 172 | static __always_inline __must_check |
173 | int __copy_to_user(void __user *dst, const void *src, unsigned size) | ||
174 | { | ||
175 | might_fault(); | ||
176 | return __copy_to_user_nocheck(dst, src, size); | ||
177 | } | ||
178 | |||
179 | static __always_inline __must_check | ||
168 | int __copy_in_user(void __user *dst, const void __user *src, unsigned size) | 180 | int __copy_in_user(void __user *dst, const void __user *src, unsigned size) |
169 | { | 181 | { |
170 | int ret = 0; | 182 | int ret = 0; |
@@ -220,13 +232,13 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size) | |||
220 | static __must_check __always_inline int | 232 | static __must_check __always_inline int |
221 | __copy_from_user_inatomic(void *dst, const void __user *src, unsigned size) | 233 | __copy_from_user_inatomic(void *dst, const void __user *src, unsigned size) |
222 | { | 234 | { |
223 | return copy_user_generic(dst, (__force const void *)src, size); | 235 | return __copy_from_user_nocheck(dst, (__force const void *)src, size); |
224 | } | 236 | } |
225 | 237 | ||
226 | static __must_check __always_inline int | 238 | static __must_check __always_inline int |
227 | __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size) | 239 | __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size) |
228 | { | 240 | { |
229 | return copy_user_generic((__force void *)dst, src, size); | 241 | return __copy_to_user_nocheck((__force void *)dst, src, size); |
230 | } | 242 | } |
231 | 243 | ||
232 | extern long __copy_user_nocache(void *dst, const void __user *src, | 244 | extern long __copy_user_nocache(void *dst, const void __user *src, |