aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-11-11 21:46:06 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2013-11-11 21:46:06 -0500
commitc2136301e43cbb3b71d0163a9949f30dafcb4590 (patch)
tree1da35409efcdf88921262410b6985fe4447f595d /arch
parent986189f9ec81263c982b60433b5b937f1056a631 (diff)
parentff47ab4ff3cddfa7bc1b25b990e24abe2ae474ff (diff)
Merge branch 'x86-uaccess-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 uaccess changes from Ingo Molnar: "A single change that micro-optimizes __copy_*_user_inatomic(), used by the futex code" * 'x86-uaccess-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86: Add 1/2/4/8 byte optimization to 64bit __copy_{from,to}_user_inatomic
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/include/asm/uaccess_64.h24
1 files changed, 18 insertions, 6 deletions
diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
index 0acae710fa00..190413d0de57 100644
--- a/arch/x86/include/asm/uaccess_64.h
+++ b/arch/x86/include/asm/uaccess_64.h
@@ -49,11 +49,10 @@ __must_check unsigned long
49copy_in_user(void __user *to, const void __user *from, unsigned len); 49copy_in_user(void __user *to, const void __user *from, unsigned len);
50 50
51static __always_inline __must_check 51static __always_inline __must_check
52int __copy_from_user(void *dst, const void __user *src, unsigned size) 52int __copy_from_user_nocheck(void *dst, const void __user *src, unsigned size)
53{ 53{
54 int ret = 0; 54 int ret = 0;
55 55
56 might_fault();
57 if (!__builtin_constant_p(size)) 56 if (!__builtin_constant_p(size))
58 return copy_user_generic(dst, (__force void *)src, size); 57 return copy_user_generic(dst, (__force void *)src, size);
59 switch (size) { 58 switch (size) {
@@ -93,11 +92,17 @@ int __copy_from_user(void *dst, const void __user *src, unsigned size)
93} 92}
94 93
95static __always_inline __must_check 94static __always_inline __must_check
96int __copy_to_user(void __user *dst, const void *src, unsigned size) 95int __copy_from_user(void *dst, const void __user *src, unsigned size)
96{
97 might_fault();
98 return __copy_from_user_nocheck(dst, src, size);
99}
100
101static __always_inline __must_check
102int __copy_to_user_nocheck(void __user *dst, const void *src, unsigned size)
97{ 103{
98 int ret = 0; 104 int ret = 0;
99 105
100 might_fault();
101 if (!__builtin_constant_p(size)) 106 if (!__builtin_constant_p(size))
102 return copy_user_generic((__force void *)dst, src, size); 107 return copy_user_generic((__force void *)dst, src, size);
103 switch (size) { 108 switch (size) {
@@ -137,6 +142,13 @@ int __copy_to_user(void __user *dst, const void *src, unsigned size)
137} 142}
138 143
139static __always_inline __must_check 144static __always_inline __must_check
145int __copy_to_user(void __user *dst, const void *src, unsigned size)
146{
147 might_fault();
148 return __copy_to_user_nocheck(dst, src, size);
149}
150
151static __always_inline __must_check
140int __copy_in_user(void __user *dst, const void __user *src, unsigned size) 152int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
141{ 153{
142 int ret = 0; 154 int ret = 0;
@@ -192,13 +204,13 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
192static __must_check __always_inline int 204static __must_check __always_inline int
193__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size) 205__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
194{ 206{
195 return copy_user_generic(dst, (__force const void *)src, size); 207 return __copy_from_user_nocheck(dst, (__force const void *)src, size);
196} 208}
197 209
198static __must_check __always_inline int 210static __must_check __always_inline int
199__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size) 211__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
200{ 212{
201 return copy_user_generic((__force void *)dst, src, size); 213 return __copy_to_user_nocheck((__force void *)dst, src, size);
202} 214}
203 215
204extern long __copy_user_nocache(void *dst, const void __user *src, 216extern long __copy_user_nocache(void *dst, const void __user *src,