aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2006-01-14 16:21:30 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2006-01-14 21:27:15 -0500
commit652050aec936fdd70ed9cbce1cd1ef30a7c9d117 (patch)
treea2859991fd1c71d918c85c5a8b4bc63bb2c64808
parent9ab34fe76114b9538bfcaf3a9d112dee0feb5f17 (diff)
[PATCH] mark several functions __always_inline
Arjan van de Ven <arjan@infradead.org> Mark a number of functions as 'must inline'. The functions affected by this patch need to be inlined because they use knowledge that their arguments are constant so that most of the function optimizes away. At this point this patch does not change behavior, it's for documentation only (and for future patches in the inline series) Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Arjan van de Ven <arjan@infradead.org> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r--include/asm-i386/bitops.h2
-rw-r--r--include/asm-i386/current.h2
-rw-r--r--include/asm-i386/string.h8
-rw-r--r--include/asm-i386/uaccess.h8
-rw-r--r--include/asm-x86_64/fixmap.h2
-rw-r--r--include/asm-x86_64/uaccess.h6
-rw-r--r--include/linux/mm.h2
7 files changed, 15 insertions, 15 deletions
diff --git a/include/asm-i386/bitops.h b/include/asm-i386/bitops.h
index fe0819fe9c64..88e6ca248cd7 100644
--- a/include/asm-i386/bitops.h
+++ b/include/asm-i386/bitops.h
@@ -247,7 +247,7 @@ static inline int test_and_change_bit(int nr, volatile unsigned long* addr)
247static int test_bit(int nr, const volatile void * addr); 247static int test_bit(int nr, const volatile void * addr);
248#endif 248#endif
249 249
250static inline int constant_test_bit(int nr, const volatile unsigned long *addr) 250static __always_inline int constant_test_bit(int nr, const volatile unsigned long *addr)
251{ 251{
252 return ((1UL << (nr & 31)) & (addr[nr >> 5])) != 0; 252 return ((1UL << (nr & 31)) & (addr[nr >> 5])) != 0;
253} 253}
diff --git a/include/asm-i386/current.h b/include/asm-i386/current.h
index d97328951f5f..3cbbecd79016 100644
--- a/include/asm-i386/current.h
+++ b/include/asm-i386/current.h
@@ -5,7 +5,7 @@
5 5
6struct task_struct; 6struct task_struct;
7 7
8static inline struct task_struct * get_current(void) 8static __always_inline struct task_struct * get_current(void)
9{ 9{
10 return current_thread_info()->task; 10 return current_thread_info()->task;
11} 11}
diff --git a/include/asm-i386/string.h b/include/asm-i386/string.h
index 02c8f5d22065..bb5f88a27f7a 100644
--- a/include/asm-i386/string.h
+++ b/include/asm-i386/string.h
@@ -201,7 +201,7 @@ __asm__ __volatile__(
201return __res; 201return __res;
202} 202}
203 203
204static inline void * __memcpy(void * to, const void * from, size_t n) 204static __always_inline void * __memcpy(void * to, const void * from, size_t n)
205{ 205{
206int d0, d1, d2; 206int d0, d1, d2;
207__asm__ __volatile__( 207__asm__ __volatile__(
@@ -223,7 +223,7 @@ return (to);
223 * This looks ugly, but the compiler can optimize it totally, 223 * This looks ugly, but the compiler can optimize it totally,
224 * as the count is constant. 224 * as the count is constant.
225 */ 225 */
226static inline void * __constant_memcpy(void * to, const void * from, size_t n) 226static __always_inline void * __constant_memcpy(void * to, const void * from, size_t n)
227{ 227{
228 long esi, edi; 228 long esi, edi;
229 if (!n) return to; 229 if (!n) return to;
@@ -367,7 +367,7 @@ return s;
367 * things 32 bits at a time even when we don't know the size of the 367 * things 32 bits at a time even when we don't know the size of the
368 * area at compile-time.. 368 * area at compile-time..
369 */ 369 */
370static inline void * __constant_c_memset(void * s, unsigned long c, size_t count) 370static __always_inline void * __constant_c_memset(void * s, unsigned long c, size_t count)
371{ 371{
372int d0, d1; 372int d0, d1;
373__asm__ __volatile__( 373__asm__ __volatile__(
@@ -416,7 +416,7 @@ extern char *strstr(const char *cs, const char *ct);
416 * This looks horribly ugly, but the compiler can optimize it totally, 416 * This looks horribly ugly, but the compiler can optimize it totally,
417 * as we by now know that both pattern and count is constant.. 417 * as we by now know that both pattern and count is constant..
418 */ 418 */
419static inline void * __constant_c_and_count_memset(void * s, unsigned long pattern, size_t count) 419static __always_inline void * __constant_c_and_count_memset(void * s, unsigned long pattern, size_t count)
420{ 420{
421 switch (count) { 421 switch (count) {
422 case 0: 422 case 0:
diff --git a/include/asm-i386/uaccess.h b/include/asm-i386/uaccess.h
index 89ab7e2bc5aa..3f1337c34208 100644
--- a/include/asm-i386/uaccess.h
+++ b/include/asm-i386/uaccess.h
@@ -411,7 +411,7 @@ unsigned long __must_check __copy_from_user_ll(void *to,
411 * Returns number of bytes that could not be copied. 411 * Returns number of bytes that could not be copied.
412 * On success, this will be zero. 412 * On success, this will be zero.
413 */ 413 */
414static inline unsigned long __must_check 414static __always_inline unsigned long __must_check
415__copy_to_user_inatomic(void __user *to, const void *from, unsigned long n) 415__copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
416{ 416{
417 if (__builtin_constant_p(n)) { 417 if (__builtin_constant_p(n)) {
@@ -432,7 +432,7 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
432 return __copy_to_user_ll(to, from, n); 432 return __copy_to_user_ll(to, from, n);
433} 433}
434 434
435static inline unsigned long __must_check 435static __always_inline unsigned long __must_check
436__copy_to_user(void __user *to, const void *from, unsigned long n) 436__copy_to_user(void __user *to, const void *from, unsigned long n)
437{ 437{
438 might_sleep(); 438 might_sleep();
@@ -456,7 +456,7 @@ __copy_to_user(void __user *to, const void *from, unsigned long n)
456 * If some data could not be copied, this function will pad the copied 456 * If some data could not be copied, this function will pad the copied
457 * data to the requested size using zero bytes. 457 * data to the requested size using zero bytes.
458 */ 458 */
459static inline unsigned long 459static __always_inline unsigned long
460__copy_from_user_inatomic(void *to, const void __user *from, unsigned long n) 460__copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
461{ 461{
462 if (__builtin_constant_p(n)) { 462 if (__builtin_constant_p(n)) {
@@ -477,7 +477,7 @@ __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
477 return __copy_from_user_ll(to, from, n); 477 return __copy_from_user_ll(to, from, n);
478} 478}
479 479
480static inline unsigned long 480static __always_inline unsigned long
481__copy_from_user(void *to, const void __user *from, unsigned long n) 481__copy_from_user(void *to, const void __user *from, unsigned long n)
482{ 482{
483 might_sleep(); 483 might_sleep();
diff --git a/include/asm-x86_64/fixmap.h b/include/asm-x86_64/fixmap.h
index a582cfcf2231..7b286bd21d1d 100644
--- a/include/asm-x86_64/fixmap.h
+++ b/include/asm-x86_64/fixmap.h
@@ -76,7 +76,7 @@ extern void __this_fixmap_does_not_exist(void);
76 * directly without translation, we catch the bug with a NULL-deference 76 * directly without translation, we catch the bug with a NULL-deference
77 * kernel oops. Illegal ranges of incoming indices are caught too. 77 * kernel oops. Illegal ranges of incoming indices are caught too.
78 */ 78 */
79static inline unsigned long fix_to_virt(const unsigned int idx) 79static __always_inline unsigned long fix_to_virt(const unsigned int idx)
80{ 80{
81 /* 81 /*
82 * this branch gets completely eliminated after inlining, 82 * this branch gets completely eliminated after inlining,
diff --git a/include/asm-x86_64/uaccess.h b/include/asm-x86_64/uaccess.h
index 2892c4b7a28b..bddffcb591b8 100644
--- a/include/asm-x86_64/uaccess.h
+++ b/include/asm-x86_64/uaccess.h
@@ -244,7 +244,7 @@ extern unsigned long copy_to_user(void __user *to, const void *from, unsigned le
244extern unsigned long copy_from_user(void *to, const void __user *from, unsigned len); 244extern unsigned long copy_from_user(void *to, const void __user *from, unsigned len);
245extern unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len); 245extern unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len);
246 246
247static inline int __copy_from_user(void *dst, const void __user *src, unsigned size) 247static __always_inline int __copy_from_user(void *dst, const void __user *src, unsigned size)
248{ 248{
249 int ret = 0; 249 int ret = 0;
250 if (!__builtin_constant_p(size)) 250 if (!__builtin_constant_p(size))
@@ -273,7 +273,7 @@ static inline int __copy_from_user(void *dst, const void __user *src, unsigned s
273 } 273 }
274} 274}
275 275
276static inline int __copy_to_user(void __user *dst, const void *src, unsigned size) 276static __always_inline int __copy_to_user(void __user *dst, const void *src, unsigned size)
277{ 277{
278 int ret = 0; 278 int ret = 0;
279 if (!__builtin_constant_p(size)) 279 if (!__builtin_constant_p(size))
@@ -305,7 +305,7 @@ static inline int __copy_to_user(void __user *dst, const void *src, unsigned siz
305} 305}
306 306
307 307
308static inline int __copy_in_user(void __user *dst, const void __user *src, unsigned size) 308static __always_inline int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
309{ 309{
310 int ret = 0; 310 int ret = 0;
311 if (!__builtin_constant_p(size)) 311 if (!__builtin_constant_p(size))
diff --git a/include/linux/mm.h b/include/linux/mm.h
index c643016499a1..85854b867463 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -512,7 +512,7 @@ static inline void set_page_links(struct page *page, unsigned long zone,
512extern struct page *mem_map; 512extern struct page *mem_map;
513#endif 513#endif
514 514
515static inline void *lowmem_page_address(struct page *page) 515static __always_inline void *lowmem_page_address(struct page *page)
516{ 516{
517 return __va(page_to_pfn(page) << PAGE_SHIFT); 517 return __va(page_to_pfn(page) << PAGE_SHIFT);
518} 518}