diff options
Diffstat (limited to 'arch/x86/include/asm/bitops.h')
| -rw-r--r-- | arch/x86/include/asm/bitops.h | 14 |
1 files changed, 10 insertions, 4 deletions
diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h index e02a359d2aa5..02b47a603fc8 100644 --- a/arch/x86/include/asm/bitops.h +++ b/arch/x86/include/asm/bitops.h | |||
| @@ -3,6 +3,9 @@ | |||
| 3 | 3 | ||
| 4 | /* | 4 | /* |
| 5 | * Copyright 1992, Linus Torvalds. | 5 | * Copyright 1992, Linus Torvalds. |
| 6 | * | ||
| 7 | * Note: inlines with more than a single statement should be marked | ||
| 8 | * __always_inline to avoid problems with older gcc's inlining heuristics. | ||
| 6 | */ | 9 | */ |
| 7 | 10 | ||
| 8 | #ifndef _LINUX_BITOPS_H | 11 | #ifndef _LINUX_BITOPS_H |
| @@ -53,7 +56,8 @@ | |||
| 53 | * Note that @nr may be almost arbitrarily large; this function is not | 56 | * Note that @nr may be almost arbitrarily large; this function is not |
| 54 | * restricted to acting on a single-word quantity. | 57 | * restricted to acting on a single-word quantity. |
| 55 | */ | 58 | */ |
| 56 | static inline void set_bit(unsigned int nr, volatile unsigned long *addr) | 59 | static __always_inline void |
| 60 | set_bit(unsigned int nr, volatile unsigned long *addr) | ||
| 57 | { | 61 | { |
| 58 | if (IS_IMMEDIATE(nr)) { | 62 | if (IS_IMMEDIATE(nr)) { |
| 59 | asm volatile(LOCK_PREFIX "orb %1,%0" | 63 | asm volatile(LOCK_PREFIX "orb %1,%0" |
| @@ -90,7 +94,8 @@ static inline void __set_bit(int nr, volatile unsigned long *addr) | |||
| 90 | * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() | 94 | * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() |
| 91 | * in order to ensure changes are visible on other processors. | 95 | * in order to ensure changes are visible on other processors. |
| 92 | */ | 96 | */ |
| 93 | static inline void clear_bit(int nr, volatile unsigned long *addr) | 97 | static __always_inline void |
| 98 | clear_bit(int nr, volatile unsigned long *addr) | ||
| 94 | { | 99 | { |
| 95 | if (IS_IMMEDIATE(nr)) { | 100 | if (IS_IMMEDIATE(nr)) { |
| 96 | asm volatile(LOCK_PREFIX "andb %1,%0" | 101 | asm volatile(LOCK_PREFIX "andb %1,%0" |
| @@ -204,7 +209,8 @@ static inline int test_and_set_bit(int nr, volatile unsigned long *addr) | |||
| 204 | * | 209 | * |
| 205 | * This is the same as test_and_set_bit on x86. | 210 | * This is the same as test_and_set_bit on x86. |
| 206 | */ | 211 | */ |
| 207 | static inline int test_and_set_bit_lock(int nr, volatile unsigned long *addr) | 212 | static __always_inline int |
| 213 | test_and_set_bit_lock(int nr, volatile unsigned long *addr) | ||
| 208 | { | 214 | { |
| 209 | return test_and_set_bit(nr, addr); | 215 | return test_and_set_bit(nr, addr); |
| 210 | } | 216 | } |
| @@ -300,7 +306,7 @@ static inline int test_and_change_bit(int nr, volatile unsigned long *addr) | |||
| 300 | return oldbit; | 306 | return oldbit; |
| 301 | } | 307 | } |
| 302 | 308 | ||
| 303 | static inline int constant_test_bit(unsigned int nr, const volatile unsigned long *addr) | 309 | static __always_inline int constant_test_bit(unsigned int nr, const volatile unsigned long *addr) |
| 304 | { | 310 | { |
| 305 | return ((1UL << (nr % BITS_PER_LONG)) & | 311 | return ((1UL << (nr % BITS_PER_LONG)) & |
| 306 | (((unsigned long *)addr)[nr / BITS_PER_LONG])) != 0; | 312 | (((unsigned long *)addr)[nr / BITS_PER_LONG])) != 0; |
