diff options
author | Ingo Molnar <mingo@elte.hu> | 2006-01-14 16:21:30 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-01-14 21:27:15 -0500 |
commit | 652050aec936fdd70ed9cbce1cd1ef30a7c9d117 (patch) | |
tree | a2859991fd1c71d918c85c5a8b4bc63bb2c64808 /include/asm-i386 | |
parent | 9ab34fe76114b9538bfcaf3a9d112dee0feb5f17 (diff) |
[PATCH] mark several functions __always_inline
Arjan van de Ven <arjan@infradead.org>
Mark a number of functions as 'must inline'. The functions affected by this
patch need to be inlined because they use knowledge that their arguments are
constant so that most of the function optimizes away. At this point this
patch does not change behavior, it's for documentation only (and for future
patches in the inline series)
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Arjan van de Ven <arjan@infradead.org>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'include/asm-i386')
-rw-r--r-- | include/asm-i386/bitops.h | 2 | ||||
-rw-r--r-- | include/asm-i386/current.h | 2 | ||||
-rw-r--r-- | include/asm-i386/string.h | 8 | ||||
-rw-r--r-- | include/asm-i386/uaccess.h | 8 |
4 files changed, 10 insertions, 10 deletions
diff --git a/include/asm-i386/bitops.h b/include/asm-i386/bitops.h index fe0819fe9c64..88e6ca248cd7 100644 --- a/include/asm-i386/bitops.h +++ b/include/asm-i386/bitops.h | |||
@@ -247,7 +247,7 @@ static inline int test_and_change_bit(int nr, volatile unsigned long* addr) | |||
247 | static int test_bit(int nr, const volatile void * addr); | 247 | static int test_bit(int nr, const volatile void * addr); |
248 | #endif | 248 | #endif |
249 | 249 | ||
250 | static inline int constant_test_bit(int nr, const volatile unsigned long *addr) | 250 | static __always_inline int constant_test_bit(int nr, const volatile unsigned long *addr) |
251 | { | 251 | { |
252 | return ((1UL << (nr & 31)) & (addr[nr >> 5])) != 0; | 252 | return ((1UL << (nr & 31)) & (addr[nr >> 5])) != 0; |
253 | } | 253 | } |
diff --git a/include/asm-i386/current.h b/include/asm-i386/current.h index d97328951f5f..3cbbecd79016 100644 --- a/include/asm-i386/current.h +++ b/include/asm-i386/current.h | |||
@@ -5,7 +5,7 @@ | |||
5 | 5 | ||
6 | struct task_struct; | 6 | struct task_struct; |
7 | 7 | ||
8 | static inline struct task_struct * get_current(void) | 8 | static __always_inline struct task_struct * get_current(void) |
9 | { | 9 | { |
10 | return current_thread_info()->task; | 10 | return current_thread_info()->task; |
11 | } | 11 | } |
diff --git a/include/asm-i386/string.h b/include/asm-i386/string.h index 02c8f5d22065..bb5f88a27f7a 100644 --- a/include/asm-i386/string.h +++ b/include/asm-i386/string.h | |||
@@ -201,7 +201,7 @@ __asm__ __volatile__( | |||
201 | return __res; | 201 | return __res; |
202 | } | 202 | } |
203 | 203 | ||
204 | static inline void * __memcpy(void * to, const void * from, size_t n) | 204 | static __always_inline void * __memcpy(void * to, const void * from, size_t n) |
205 | { | 205 | { |
206 | int d0, d1, d2; | 206 | int d0, d1, d2; |
207 | __asm__ __volatile__( | 207 | __asm__ __volatile__( |
@@ -223,7 +223,7 @@ return (to); | |||
223 | * This looks ugly, but the compiler can optimize it totally, | 223 | * This looks ugly, but the compiler can optimize it totally, |
224 | * as the count is constant. | 224 | * as the count is constant. |
225 | */ | 225 | */ |
226 | static inline void * __constant_memcpy(void * to, const void * from, size_t n) | 226 | static __always_inline void * __constant_memcpy(void * to, const void * from, size_t n) |
227 | { | 227 | { |
228 | long esi, edi; | 228 | long esi, edi; |
229 | if (!n) return to; | 229 | if (!n) return to; |
@@ -367,7 +367,7 @@ return s; | |||
367 | * things 32 bits at a time even when we don't know the size of the | 367 | * things 32 bits at a time even when we don't know the size of the |
368 | * area at compile-time.. | 368 | * area at compile-time.. |
369 | */ | 369 | */ |
370 | static inline void * __constant_c_memset(void * s, unsigned long c, size_t count) | 370 | static __always_inline void * __constant_c_memset(void * s, unsigned long c, size_t count) |
371 | { | 371 | { |
372 | int d0, d1; | 372 | int d0, d1; |
373 | __asm__ __volatile__( | 373 | __asm__ __volatile__( |
@@ -416,7 +416,7 @@ extern char *strstr(const char *cs, const char *ct); | |||
416 | * This looks horribly ugly, but the compiler can optimize it totally, | 416 | * This looks horribly ugly, but the compiler can optimize it totally, |
417 | * as we by now know that both pattern and count is constant.. | 417 | * as we by now know that both pattern and count is constant.. |
418 | */ | 418 | */ |
419 | static inline void * __constant_c_and_count_memset(void * s, unsigned long pattern, size_t count) | 419 | static __always_inline void * __constant_c_and_count_memset(void * s, unsigned long pattern, size_t count) |
420 | { | 420 | { |
421 | switch (count) { | 421 | switch (count) { |
422 | case 0: | 422 | case 0: |
diff --git a/include/asm-i386/uaccess.h b/include/asm-i386/uaccess.h index 89ab7e2bc5aa..3f1337c34208 100644 --- a/include/asm-i386/uaccess.h +++ b/include/asm-i386/uaccess.h | |||
@@ -411,7 +411,7 @@ unsigned long __must_check __copy_from_user_ll(void *to, | |||
411 | * Returns number of bytes that could not be copied. | 411 | * Returns number of bytes that could not be copied. |
412 | * On success, this will be zero. | 412 | * On success, this will be zero. |
413 | */ | 413 | */ |
414 | static inline unsigned long __must_check | 414 | static __always_inline unsigned long __must_check |
415 | __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n) | 415 | __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n) |
416 | { | 416 | { |
417 | if (__builtin_constant_p(n)) { | 417 | if (__builtin_constant_p(n)) { |
@@ -432,7 +432,7 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n) | |||
432 | return __copy_to_user_ll(to, from, n); | 432 | return __copy_to_user_ll(to, from, n); |
433 | } | 433 | } |
434 | 434 | ||
435 | static inline unsigned long __must_check | 435 | static __always_inline unsigned long __must_check |
436 | __copy_to_user(void __user *to, const void *from, unsigned long n) | 436 | __copy_to_user(void __user *to, const void *from, unsigned long n) |
437 | { | 437 | { |
438 | might_sleep(); | 438 | might_sleep(); |
@@ -456,7 +456,7 @@ __copy_to_user(void __user *to, const void *from, unsigned long n) | |||
456 | * If some data could not be copied, this function will pad the copied | 456 | * If some data could not be copied, this function will pad the copied |
457 | * data to the requested size using zero bytes. | 457 | * data to the requested size using zero bytes. |
458 | */ | 458 | */ |
459 | static inline unsigned long | 459 | static __always_inline unsigned long |
460 | __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n) | 460 | __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n) |
461 | { | 461 | { |
462 | if (__builtin_constant_p(n)) { | 462 | if (__builtin_constant_p(n)) { |
@@ -477,7 +477,7 @@ __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n) | |||
477 | return __copy_from_user_ll(to, from, n); | 477 | return __copy_from_user_ll(to, from, n); |
478 | } | 478 | } |
479 | 479 | ||
480 | static inline unsigned long | 480 | static __always_inline unsigned long |
481 | __copy_from_user(void *to, const void __user *from, unsigned long n) | 481 | __copy_from_user(void *to, const void __user *from, unsigned long n) |
482 | { | 482 | { |
483 | might_sleep(); | 483 | might_sleep(); |