aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-i386
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-i386')
-rw-r--r--include/asm-i386/bitops.h2
-rw-r--r--include/asm-i386/current.h2
-rw-r--r--include/asm-i386/string.h8
-rw-r--r--include/asm-i386/uaccess.h8
4 files changed, 10 insertions, 10 deletions
diff --git a/include/asm-i386/bitops.h b/include/asm-i386/bitops.h
index fe0819fe9c64..88e6ca248cd7 100644
--- a/include/asm-i386/bitops.h
+++ b/include/asm-i386/bitops.h
@@ -247,7 +247,7 @@ static inline int test_and_change_bit(int nr, volatile unsigned long* addr)
247static int test_bit(int nr, const volatile void * addr); 247static int test_bit(int nr, const volatile void * addr);
248#endif 248#endif
249 249
250static inline int constant_test_bit(int nr, const volatile unsigned long *addr) 250static __always_inline int constant_test_bit(int nr, const volatile unsigned long *addr)
251{ 251{
252 return ((1UL << (nr & 31)) & (addr[nr >> 5])) != 0; 252 return ((1UL << (nr & 31)) & (addr[nr >> 5])) != 0;
253} 253}
diff --git a/include/asm-i386/current.h b/include/asm-i386/current.h
index d97328951f5f..3cbbecd79016 100644
--- a/include/asm-i386/current.h
+++ b/include/asm-i386/current.h
@@ -5,7 +5,7 @@
5 5
6struct task_struct; 6struct task_struct;
7 7
8static inline struct task_struct * get_current(void) 8static __always_inline struct task_struct * get_current(void)
9{ 9{
10 return current_thread_info()->task; 10 return current_thread_info()->task;
11} 11}
diff --git a/include/asm-i386/string.h b/include/asm-i386/string.h
index 02c8f5d22065..bb5f88a27f7a 100644
--- a/include/asm-i386/string.h
+++ b/include/asm-i386/string.h
@@ -201,7 +201,7 @@ __asm__ __volatile__(
201return __res; 201return __res;
202} 202}
203 203
204static inline void * __memcpy(void * to, const void * from, size_t n) 204static __always_inline void * __memcpy(void * to, const void * from, size_t n)
205{ 205{
206int d0, d1, d2; 206int d0, d1, d2;
207__asm__ __volatile__( 207__asm__ __volatile__(
@@ -223,7 +223,7 @@ return (to);
223 * This looks ugly, but the compiler can optimize it totally, 223 * This looks ugly, but the compiler can optimize it totally,
224 * as the count is constant. 224 * as the count is constant.
225 */ 225 */
226static inline void * __constant_memcpy(void * to, const void * from, size_t n) 226static __always_inline void * __constant_memcpy(void * to, const void * from, size_t n)
227{ 227{
228 long esi, edi; 228 long esi, edi;
229 if (!n) return to; 229 if (!n) return to;
@@ -367,7 +367,7 @@ return s;
367 * things 32 bits at a time even when we don't know the size of the 367 * things 32 bits at a time even when we don't know the size of the
368 * area at compile-time.. 368 * area at compile-time..
369 */ 369 */
370static inline void * __constant_c_memset(void * s, unsigned long c, size_t count) 370static __always_inline void * __constant_c_memset(void * s, unsigned long c, size_t count)
371{ 371{
372int d0, d1; 372int d0, d1;
373__asm__ __volatile__( 373__asm__ __volatile__(
@@ -416,7 +416,7 @@ extern char *strstr(const char *cs, const char *ct);
416 * This looks horribly ugly, but the compiler can optimize it totally, 416 * This looks horribly ugly, but the compiler can optimize it totally,
417 * as we by now know that both pattern and count is constant.. 417 * as we by now know that both pattern and count is constant..
418 */ 418 */
419static inline void * __constant_c_and_count_memset(void * s, unsigned long pattern, size_t count) 419static __always_inline void * __constant_c_and_count_memset(void * s, unsigned long pattern, size_t count)
420{ 420{
421 switch (count) { 421 switch (count) {
422 case 0: 422 case 0:
diff --git a/include/asm-i386/uaccess.h b/include/asm-i386/uaccess.h
index 89ab7e2bc5aa..3f1337c34208 100644
--- a/include/asm-i386/uaccess.h
+++ b/include/asm-i386/uaccess.h
@@ -411,7 +411,7 @@ unsigned long __must_check __copy_from_user_ll(void *to,
411 * Returns number of bytes that could not be copied. 411 * Returns number of bytes that could not be copied.
412 * On success, this will be zero. 412 * On success, this will be zero.
413 */ 413 */
414static inline unsigned long __must_check 414static __always_inline unsigned long __must_check
415__copy_to_user_inatomic(void __user *to, const void *from, unsigned long n) 415__copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
416{ 416{
417 if (__builtin_constant_p(n)) { 417 if (__builtin_constant_p(n)) {
@@ -432,7 +432,7 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
432 return __copy_to_user_ll(to, from, n); 432 return __copy_to_user_ll(to, from, n);
433} 433}
434 434
435static inline unsigned long __must_check 435static __always_inline unsigned long __must_check
436__copy_to_user(void __user *to, const void *from, unsigned long n) 436__copy_to_user(void __user *to, const void *from, unsigned long n)
437{ 437{
438 might_sleep(); 438 might_sleep();
@@ -456,7 +456,7 @@ __copy_to_user(void __user *to, const void *from, unsigned long n)
456 * If some data could not be copied, this function will pad the copied 456 * If some data could not be copied, this function will pad the copied
457 * data to the requested size using zero bytes. 457 * data to the requested size using zero bytes.
458 */ 458 */
459static inline unsigned long 459static __always_inline unsigned long
460__copy_from_user_inatomic(void *to, const void __user *from, unsigned long n) 460__copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
461{ 461{
462 if (__builtin_constant_p(n)) { 462 if (__builtin_constant_p(n)) {
@@ -477,7 +477,7 @@ __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
477 return __copy_from_user_ll(to, from, n); 477 return __copy_from_user_ll(to, from, n);
478} 478}
479 479
480static inline unsigned long 480static __always_inline unsigned long
481__copy_from_user(void *to, const void __user *from, unsigned long n) 481__copy_from_user(void *to, const void __user *from, unsigned long n)
482{ 482{
483 might_sleep(); 483 might_sleep();