aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-i386/uaccess.h
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2006-01-14 16:21:30 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2006-01-14 21:27:15 -0500
commit652050aec936fdd70ed9cbce1cd1ef30a7c9d117 (patch)
treea2859991fd1c71d918c85c5a8b4bc63bb2c64808 /include/asm-i386/uaccess.h
parent9ab34fe76114b9538bfcaf3a9d112dee0feb5f17 (diff)
[PATCH] mark several functions __always_inline
Arjan van de Ven <arjan@infradead.org> Mark a number of functions as 'must inline'. The functions affected by this patch need to be inlined because they use knowledge that their arguments are constant so that most of the function optimizes away. At this point this patch does not change behavior, it's for documentation only (and for future patches in the inline series) Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Arjan van de Ven <arjan@infradead.org> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'include/asm-i386/uaccess.h')
-rw-r--r--include/asm-i386/uaccess.h8
1 files changed, 4 insertions, 4 deletions
diff --git a/include/asm-i386/uaccess.h b/include/asm-i386/uaccess.h
index 89ab7e2bc5aa..3f1337c34208 100644
--- a/include/asm-i386/uaccess.h
+++ b/include/asm-i386/uaccess.h
@@ -411,7 +411,7 @@ unsigned long __must_check __copy_from_user_ll(void *to,
411 * Returns number of bytes that could not be copied. 411 * Returns number of bytes that could not be copied.
412 * On success, this will be zero. 412 * On success, this will be zero.
413 */ 413 */
414static inline unsigned long __must_check 414static __always_inline unsigned long __must_check
415__copy_to_user_inatomic(void __user *to, const void *from, unsigned long n) 415__copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
416{ 416{
417 if (__builtin_constant_p(n)) { 417 if (__builtin_constant_p(n)) {
@@ -432,7 +432,7 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
432 return __copy_to_user_ll(to, from, n); 432 return __copy_to_user_ll(to, from, n);
433} 433}
434 434
435static inline unsigned long __must_check 435static __always_inline unsigned long __must_check
436__copy_to_user(void __user *to, const void *from, unsigned long n) 436__copy_to_user(void __user *to, const void *from, unsigned long n)
437{ 437{
438 might_sleep(); 438 might_sleep();
@@ -456,7 +456,7 @@ __copy_to_user(void __user *to, const void *from, unsigned long n)
456 * If some data could not be copied, this function will pad the copied 456 * If some data could not be copied, this function will pad the copied
457 * data to the requested size using zero bytes. 457 * data to the requested size using zero bytes.
458 */ 458 */
459static inline unsigned long 459static __always_inline unsigned long
460__copy_from_user_inatomic(void *to, const void __user *from, unsigned long n) 460__copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
461{ 461{
462 if (__builtin_constant_p(n)) { 462 if (__builtin_constant_p(n)) {
@@ -477,7 +477,7 @@ __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
477 return __copy_from_user_ll(to, from, n); 477 return __copy_from_user_ll(to, from, n);
478} 478}
479 479
480static inline unsigned long 480static __always_inline unsigned long
481__copy_from_user(void *to, const void __user *from, unsigned long n) 481__copy_from_user(void *to, const void __user *from, unsigned long n)
482{ 482{
483 might_sleep(); 483 might_sleep();