diff options
-rw-r--r-- | include/asm-i386/bitops.h | 2 | ||||
-rw-r--r-- | include/asm-i386/current.h | 2 | ||||
-rw-r--r-- | include/asm-i386/string.h | 8 | ||||
-rw-r--r-- | include/asm-i386/uaccess.h | 8 | ||||
-rw-r--r-- | include/asm-x86_64/fixmap.h | 2 | ||||
-rw-r--r-- | include/asm-x86_64/uaccess.h | 6 | ||||
-rw-r--r-- | include/linux/mm.h | 2 |
7 files changed, 15 insertions, 15 deletions
diff --git a/include/asm-i386/bitops.h b/include/asm-i386/bitops.h index fe0819fe9c64..88e6ca248cd7 100644 --- a/include/asm-i386/bitops.h +++ b/include/asm-i386/bitops.h | |||
@@ -247,7 +247,7 @@ static inline int test_and_change_bit(int nr, volatile unsigned long* addr) | |||
247 | static int test_bit(int nr, const volatile void * addr); | 247 | static int test_bit(int nr, const volatile void * addr); |
248 | #endif | 248 | #endif |
249 | 249 | ||
250 | static inline int constant_test_bit(int nr, const volatile unsigned long *addr) | 250 | static __always_inline int constant_test_bit(int nr, const volatile unsigned long *addr) |
251 | { | 251 | { |
252 | return ((1UL << (nr & 31)) & (addr[nr >> 5])) != 0; | 252 | return ((1UL << (nr & 31)) & (addr[nr >> 5])) != 0; |
253 | } | 253 | } |
diff --git a/include/asm-i386/current.h b/include/asm-i386/current.h index d97328951f5f..3cbbecd79016 100644 --- a/include/asm-i386/current.h +++ b/include/asm-i386/current.h | |||
@@ -5,7 +5,7 @@ | |||
5 | 5 | ||
6 | struct task_struct; | 6 | struct task_struct; |
7 | 7 | ||
8 | static inline struct task_struct * get_current(void) | 8 | static __always_inline struct task_struct * get_current(void) |
9 | { | 9 | { |
10 | return current_thread_info()->task; | 10 | return current_thread_info()->task; |
11 | } | 11 | } |
diff --git a/include/asm-i386/string.h b/include/asm-i386/string.h index 02c8f5d22065..bb5f88a27f7a 100644 --- a/include/asm-i386/string.h +++ b/include/asm-i386/string.h | |||
@@ -201,7 +201,7 @@ __asm__ __volatile__( | |||
201 | return __res; | 201 | return __res; |
202 | } | 202 | } |
203 | 203 | ||
204 | static inline void * __memcpy(void * to, const void * from, size_t n) | 204 | static __always_inline void * __memcpy(void * to, const void * from, size_t n) |
205 | { | 205 | { |
206 | int d0, d1, d2; | 206 | int d0, d1, d2; |
207 | __asm__ __volatile__( | 207 | __asm__ __volatile__( |
@@ -223,7 +223,7 @@ return (to); | |||
223 | * This looks ugly, but the compiler can optimize it totally, | 223 | * This looks ugly, but the compiler can optimize it totally, |
224 | * as the count is constant. | 224 | * as the count is constant. |
225 | */ | 225 | */ |
226 | static inline void * __constant_memcpy(void * to, const void * from, size_t n) | 226 | static __always_inline void * __constant_memcpy(void * to, const void * from, size_t n) |
227 | { | 227 | { |
228 | long esi, edi; | 228 | long esi, edi; |
229 | if (!n) return to; | 229 | if (!n) return to; |
@@ -367,7 +367,7 @@ return s; | |||
367 | * things 32 bits at a time even when we don't know the size of the | 367 | * things 32 bits at a time even when we don't know the size of the |
368 | * area at compile-time.. | 368 | * area at compile-time.. |
369 | */ | 369 | */ |
370 | static inline void * __constant_c_memset(void * s, unsigned long c, size_t count) | 370 | static __always_inline void * __constant_c_memset(void * s, unsigned long c, size_t count) |
371 | { | 371 | { |
372 | int d0, d1; | 372 | int d0, d1; |
373 | __asm__ __volatile__( | 373 | __asm__ __volatile__( |
@@ -416,7 +416,7 @@ extern char *strstr(const char *cs, const char *ct); | |||
416 | * This looks horribly ugly, but the compiler can optimize it totally, | 416 | * This looks horribly ugly, but the compiler can optimize it totally, |
417 | * as we by now know that both pattern and count is constant.. | 417 | * as we by now know that both pattern and count is constant.. |
418 | */ | 418 | */ |
419 | static inline void * __constant_c_and_count_memset(void * s, unsigned long pattern, size_t count) | 419 | static __always_inline void * __constant_c_and_count_memset(void * s, unsigned long pattern, size_t count) |
420 | { | 420 | { |
421 | switch (count) { | 421 | switch (count) { |
422 | case 0: | 422 | case 0: |
diff --git a/include/asm-i386/uaccess.h b/include/asm-i386/uaccess.h index 89ab7e2bc5aa..3f1337c34208 100644 --- a/include/asm-i386/uaccess.h +++ b/include/asm-i386/uaccess.h | |||
@@ -411,7 +411,7 @@ unsigned long __must_check __copy_from_user_ll(void *to, | |||
411 | * Returns number of bytes that could not be copied. | 411 | * Returns number of bytes that could not be copied. |
412 | * On success, this will be zero. | 412 | * On success, this will be zero. |
413 | */ | 413 | */ |
414 | static inline unsigned long __must_check | 414 | static __always_inline unsigned long __must_check |
415 | __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n) | 415 | __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n) |
416 | { | 416 | { |
417 | if (__builtin_constant_p(n)) { | 417 | if (__builtin_constant_p(n)) { |
@@ -432,7 +432,7 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n) | |||
432 | return __copy_to_user_ll(to, from, n); | 432 | return __copy_to_user_ll(to, from, n); |
433 | } | 433 | } |
434 | 434 | ||
435 | static inline unsigned long __must_check | 435 | static __always_inline unsigned long __must_check |
436 | __copy_to_user(void __user *to, const void *from, unsigned long n) | 436 | __copy_to_user(void __user *to, const void *from, unsigned long n) |
437 | { | 437 | { |
438 | might_sleep(); | 438 | might_sleep(); |
@@ -456,7 +456,7 @@ __copy_to_user(void __user *to, const void *from, unsigned long n) | |||
456 | * If some data could not be copied, this function will pad the copied | 456 | * If some data could not be copied, this function will pad the copied |
457 | * data to the requested size using zero bytes. | 457 | * data to the requested size using zero bytes. |
458 | */ | 458 | */ |
459 | static inline unsigned long | 459 | static __always_inline unsigned long |
460 | __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n) | 460 | __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n) |
461 | { | 461 | { |
462 | if (__builtin_constant_p(n)) { | 462 | if (__builtin_constant_p(n)) { |
@@ -477,7 +477,7 @@ __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n) | |||
477 | return __copy_from_user_ll(to, from, n); | 477 | return __copy_from_user_ll(to, from, n); |
478 | } | 478 | } |
479 | 479 | ||
480 | static inline unsigned long | 480 | static __always_inline unsigned long |
481 | __copy_from_user(void *to, const void __user *from, unsigned long n) | 481 | __copy_from_user(void *to, const void __user *from, unsigned long n) |
482 | { | 482 | { |
483 | might_sleep(); | 483 | might_sleep(); |
diff --git a/include/asm-x86_64/fixmap.h b/include/asm-x86_64/fixmap.h index a582cfcf2231..7b286bd21d1d 100644 --- a/include/asm-x86_64/fixmap.h +++ b/include/asm-x86_64/fixmap.h | |||
@@ -76,7 +76,7 @@ extern void __this_fixmap_does_not_exist(void); | |||
76 | * directly without translation, we catch the bug with a NULL-deference | 76 | * directly without translation, we catch the bug with a NULL-deference |
77 | * kernel oops. Illegal ranges of incoming indices are caught too. | 77 | * kernel oops. Illegal ranges of incoming indices are caught too. |
78 | */ | 78 | */ |
79 | static inline unsigned long fix_to_virt(const unsigned int idx) | 79 | static __always_inline unsigned long fix_to_virt(const unsigned int idx) |
80 | { | 80 | { |
81 | /* | 81 | /* |
82 | * this branch gets completely eliminated after inlining, | 82 | * this branch gets completely eliminated after inlining, |
diff --git a/include/asm-x86_64/uaccess.h b/include/asm-x86_64/uaccess.h index 2892c4b7a28b..bddffcb591b8 100644 --- a/include/asm-x86_64/uaccess.h +++ b/include/asm-x86_64/uaccess.h | |||
@@ -244,7 +244,7 @@ extern unsigned long copy_to_user(void __user *to, const void *from, unsigned le | |||
244 | extern unsigned long copy_from_user(void *to, const void __user *from, unsigned len); | 244 | extern unsigned long copy_from_user(void *to, const void __user *from, unsigned len); |
245 | extern unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len); | 245 | extern unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len); |
246 | 246 | ||
247 | static inline int __copy_from_user(void *dst, const void __user *src, unsigned size) | 247 | static __always_inline int __copy_from_user(void *dst, const void __user *src, unsigned size) |
248 | { | 248 | { |
249 | int ret = 0; | 249 | int ret = 0; |
250 | if (!__builtin_constant_p(size)) | 250 | if (!__builtin_constant_p(size)) |
@@ -273,7 +273,7 @@ static inline int __copy_from_user(void *dst, const void __user *src, unsigned s | |||
273 | } | 273 | } |
274 | } | 274 | } |
275 | 275 | ||
276 | static inline int __copy_to_user(void __user *dst, const void *src, unsigned size) | 276 | static __always_inline int __copy_to_user(void __user *dst, const void *src, unsigned size) |
277 | { | 277 | { |
278 | int ret = 0; | 278 | int ret = 0; |
279 | if (!__builtin_constant_p(size)) | 279 | if (!__builtin_constant_p(size)) |
@@ -305,7 +305,7 @@ static inline int __copy_to_user(void __user *dst, const void *src, unsigned siz | |||
305 | } | 305 | } |
306 | 306 | ||
307 | 307 | ||
308 | static inline int __copy_in_user(void __user *dst, const void __user *src, unsigned size) | 308 | static __always_inline int __copy_in_user(void __user *dst, const void __user *src, unsigned size) |
309 | { | 309 | { |
310 | int ret = 0; | 310 | int ret = 0; |
311 | if (!__builtin_constant_p(size)) | 311 | if (!__builtin_constant_p(size)) |
diff --git a/include/linux/mm.h b/include/linux/mm.h index c643016499a1..85854b867463 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
@@ -512,7 +512,7 @@ static inline void set_page_links(struct page *page, unsigned long zone, | |||
512 | extern struct page *mem_map; | 512 | extern struct page *mem_map; |
513 | #endif | 513 | #endif |
514 | 514 | ||
515 | static inline void *lowmem_page_address(struct page *page) | 515 | static __always_inline void *lowmem_page_address(struct page *page) |
516 | { | 516 | { |
517 | return __va(page_to_pfn(page) << PAGE_SHIFT); | 517 | return __va(page_to_pfn(page) << PAGE_SHIFT); |
518 | } | 518 | } |