diff options
| -rw-r--r-- | include/asm-x86/bitops_64.h | 52 |
1 files changed, 26 insertions, 26 deletions
diff --git a/include/asm-x86/bitops_64.h b/include/asm-x86/bitops_64.h index bba26e03f33f..766bcc0470a6 100644 --- a/include/asm-x86/bitops_64.h +++ b/include/asm-x86/bitops_64.h | |||
| @@ -29,7 +29,7 @@ | |||
| 29 | * Note that @nr may be almost arbitrarily large; this function is not | 29 | * Note that @nr may be almost arbitrarily large; this function is not |
| 30 | * restricted to acting on a single-word quantity. | 30 | * restricted to acting on a single-word quantity. |
| 31 | */ | 31 | */ |
| 32 | static __inline__ void set_bit(int nr, volatile void * addr) | 32 | static inline void set_bit(int nr, volatile void *addr) |
| 33 | { | 33 | { |
| 34 | __asm__ __volatile__( LOCK_PREFIX | 34 | __asm__ __volatile__( LOCK_PREFIX |
| 35 | "btsl %1,%0" | 35 | "btsl %1,%0" |
| @@ -46,7 +46,7 @@ static __inline__ void set_bit(int nr, volatile void * addr) | |||
| 46 | * If it's called on the same region of memory simultaneously, the effect | 46 | * If it's called on the same region of memory simultaneously, the effect |
| 47 | * may be that only one operation succeeds. | 47 | * may be that only one operation succeeds. |
| 48 | */ | 48 | */ |
| 49 | static __inline__ void __set_bit(int nr, volatile void * addr) | 49 | static inline void __set_bit(int nr, volatile void *addr) |
| 50 | { | 50 | { |
| 51 | __asm__ volatile( | 51 | __asm__ volatile( |
| 52 | "btsl %1,%0" | 52 | "btsl %1,%0" |
| @@ -64,7 +64,7 @@ static __inline__ void __set_bit(int nr, volatile void * addr) | |||
| 64 | * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() | 64 | * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() |
| 65 | * in order to ensure changes are visible on other processors. | 65 | * in order to ensure changes are visible on other processors. |
| 66 | */ | 66 | */ |
| 67 | static __inline__ void clear_bit(int nr, volatile void * addr) | 67 | static inline void clear_bit(int nr, volatile void *addr) |
| 68 | { | 68 | { |
| 69 | __asm__ __volatile__( LOCK_PREFIX | 69 | __asm__ __volatile__( LOCK_PREFIX |
| 70 | "btrl %1,%0" | 70 | "btrl %1,%0" |
| @@ -86,7 +86,7 @@ static inline void clear_bit_unlock(unsigned long nr, volatile unsigned long *ad | |||
| 86 | clear_bit(nr, addr); | 86 | clear_bit(nr, addr); |
| 87 | } | 87 | } |
| 88 | 88 | ||
| 89 | static __inline__ void __clear_bit(int nr, volatile void * addr) | 89 | static inline void __clear_bit(int nr, volatile void *addr) |
| 90 | { | 90 | { |
| 91 | __asm__ __volatile__( | 91 | __asm__ __volatile__( |
| 92 | "btrl %1,%0" | 92 | "btrl %1,%0" |
| @@ -124,7 +124,7 @@ static inline void __clear_bit_unlock(unsigned long nr, volatile unsigned long * | |||
| 124 | * If it's called on the same region of memory simultaneously, the effect | 124 | * If it's called on the same region of memory simultaneously, the effect |
| 125 | * may be that only one operation succeeds. | 125 | * may be that only one operation succeeds. |
| 126 | */ | 126 | */ |
| 127 | static __inline__ void __change_bit(int nr, volatile void * addr) | 127 | static inline void __change_bit(int nr, volatile void *addr) |
| 128 | { | 128 | { |
| 129 | __asm__ __volatile__( | 129 | __asm__ __volatile__( |
| 130 | "btcl %1,%0" | 130 | "btcl %1,%0" |
| @@ -141,7 +141,7 @@ static __inline__ void __change_bit(int nr, volatile void * addr) | |||
| 141 | * Note that @nr may be almost arbitrarily large; this function is not | 141 | * Note that @nr may be almost arbitrarily large; this function is not |
| 142 | * restricted to acting on a single-word quantity. | 142 | * restricted to acting on a single-word quantity. |
| 143 | */ | 143 | */ |
| 144 | static __inline__ void change_bit(int nr, volatile void * addr) | 144 | static inline void change_bit(int nr, volatile void *addr) |
| 145 | { | 145 | { |
| 146 | __asm__ __volatile__( LOCK_PREFIX | 146 | __asm__ __volatile__( LOCK_PREFIX |
| 147 | "btcl %1,%0" | 147 | "btcl %1,%0" |
| @@ -157,7 +157,7 @@ static __inline__ void change_bit(int nr, volatile void * addr) | |||
| 157 | * This operation is atomic and cannot be reordered. | 157 | * This operation is atomic and cannot be reordered. |
| 158 | * It also implies a memory barrier. | 158 | * It also implies a memory barrier. |
| 159 | */ | 159 | */ |
| 160 | static __inline__ int test_and_set_bit(int nr, volatile void * addr) | 160 | static inline int test_and_set_bit(int nr, volatile void *addr) |
| 161 | { | 161 | { |
| 162 | int oldbit; | 162 | int oldbit; |
| 163 | 163 | ||
| @@ -175,7 +175,7 @@ static __inline__ int test_and_set_bit(int nr, volatile void * addr) | |||
| 175 | * | 175 | * |
| 176 | * This is the same as test_and_set_bit on x86. | 176 | * This is the same as test_and_set_bit on x86. |
| 177 | */ | 177 | */ |
| 178 | static __inline__ int test_and_set_bit_lock(int nr, volatile void *addr) | 178 | static inline int test_and_set_bit_lock(int nr, volatile void *addr) |
| 179 | { | 179 | { |
| 180 | return test_and_set_bit(nr, addr); | 180 | return test_and_set_bit(nr, addr); |
| 181 | } | 181 | } |
| @@ -189,7 +189,7 @@ static __inline__ int test_and_set_bit_lock(int nr, volatile void *addr) | |||
| 189 | * If two examples of this operation race, one can appear to succeed | 189 | * If two examples of this operation race, one can appear to succeed |
| 190 | * but actually fail. You must protect multiple accesses with a lock. | 190 | * but actually fail. You must protect multiple accesses with a lock. |
| 191 | */ | 191 | */ |
| 192 | static __inline__ int __test_and_set_bit(int nr, volatile void * addr) | 192 | static inline int __test_and_set_bit(int nr, volatile void *addr) |
| 193 | { | 193 | { |
| 194 | int oldbit; | 194 | int oldbit; |
| 195 | 195 | ||
| @@ -208,7 +208,7 @@ static __inline__ int __test_and_set_bit(int nr, volatile void * addr) | |||
| 208 | * This operation is atomic and cannot be reordered. | 208 | * This operation is atomic and cannot be reordered. |
| 209 | * It also implies a memory barrier. | 209 | * It also implies a memory barrier. |
| 210 | */ | 210 | */ |
| 211 | static __inline__ int test_and_clear_bit(int nr, volatile void * addr) | 211 | static inline int test_and_clear_bit(int nr, volatile void *addr) |
| 212 | { | 212 | { |
| 213 | int oldbit; | 213 | int oldbit; |
| 214 | 214 | ||
| @@ -228,7 +228,7 @@ static __inline__ int test_and_clear_bit(int nr, volatile void * addr) | |||
| 228 | * If two examples of this operation race, one can appear to succeed | 228 | * If two examples of this operation race, one can appear to succeed |
| 229 | * but actually fail. You must protect multiple accesses with a lock. | 229 | * but actually fail. You must protect multiple accesses with a lock. |
| 230 | */ | 230 | */ |
| 231 | static __inline__ int __test_and_clear_bit(int nr, volatile void * addr) | 231 | static inline int __test_and_clear_bit(int nr, volatile void *addr) |
| 232 | { | 232 | { |
| 233 | int oldbit; | 233 | int oldbit; |
| 234 | 234 | ||
| @@ -240,7 +240,7 @@ static __inline__ int __test_and_clear_bit(int nr, volatile void * addr) | |||
| 240 | } | 240 | } |
| 241 | 241 | ||
| 242 | /* WARNING: non atomic and it can be reordered! */ | 242 | /* WARNING: non atomic and it can be reordered! */ |
| 243 | static __inline__ int __test_and_change_bit(int nr, volatile void * addr) | 243 | static inline int __test_and_change_bit(int nr, volatile void *addr) |
| 244 | { | 244 | { |
| 245 | int oldbit; | 245 | int oldbit; |
| 246 | 246 | ||
| @@ -259,7 +259,7 @@ static __inline__ int __test_and_change_bit(int nr, volatile void * addr) | |||
| 259 | * This operation is atomic and cannot be reordered. | 259 | * This operation is atomic and cannot be reordered. |
| 260 | * It also implies a memory barrier. | 260 | * It also implies a memory barrier. |
| 261 | */ | 261 | */ |
| 262 | static __inline__ int test_and_change_bit(int nr, volatile void * addr) | 262 | static inline int test_and_change_bit(int nr, volatile void *addr) |
| 263 | { | 263 | { |
| 264 | int oldbit; | 264 | int oldbit; |
| 265 | 265 | ||
| @@ -276,15 +276,15 @@ static __inline__ int test_and_change_bit(int nr, volatile void * addr) | |||
| 276 | * @nr: bit number to test | 276 | * @nr: bit number to test |
| 277 | * @addr: Address to start counting from | 277 | * @addr: Address to start counting from |
| 278 | */ | 278 | */ |
| 279 | static int test_bit(int nr, const volatile void * addr); | 279 | static int test_bit(int nr, const volatile void *addr); |
| 280 | #endif | 280 | #endif |
| 281 | 281 | ||
| 282 | static __inline__ int constant_test_bit(int nr, const volatile void * addr) | 282 | static inline int constant_test_bit(int nr, const volatile void *addr) |
| 283 | { | 283 | { |
| 284 | return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0; | 284 | return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0; |
| 285 | } | 285 | } |
| 286 | 286 | ||
| 287 | static __inline__ int variable_test_bit(int nr, volatile const void * addr) | 287 | static inline int variable_test_bit(int nr, volatile const void *addr) |
| 288 | { | 288 | { |
| 289 | int oldbit; | 289 | int oldbit; |
| 290 | 290 | ||
| @@ -302,10 +302,10 @@ static __inline__ int variable_test_bit(int nr, volatile const void * addr) | |||
| 302 | 302 | ||
| 303 | #undef ADDR | 303 | #undef ADDR |
| 304 | 304 | ||
| 305 | extern long find_first_zero_bit(const unsigned long * addr, unsigned long size); | 305 | extern long find_first_zero_bit(const unsigned long *addr, unsigned long size); |
| 306 | extern long find_next_zero_bit (const unsigned long * addr, long size, long offset); | 306 | extern long find_next_zero_bit(const unsigned long *addr, long size, long offset); |
| 307 | extern long find_first_bit(const unsigned long * addr, unsigned long size); | 307 | extern long find_first_bit(const unsigned long *addr, unsigned long size); |
| 308 | extern long find_next_bit(const unsigned long * addr, long size, long offset); | 308 | extern long find_next_bit(const unsigned long *addr, long size, long offset); |
| 309 | 309 | ||
| 310 | /* return index of first bet set in val or max when no bit is set */ | 310 | /* return index of first bet set in val or max when no bit is set */ |
| 311 | static inline long __scanbit(unsigned long val, unsigned long max) | 311 | static inline long __scanbit(unsigned long val, unsigned long max) |
| @@ -366,7 +366,7 @@ static inline void __clear_bit_string(unsigned long *bitmap, unsigned long i, | |||
| 366 | * | 366 | * |
| 367 | * Undefined if no zero exists, so code should check against ~0UL first. | 367 | * Undefined if no zero exists, so code should check against ~0UL first. |
| 368 | */ | 368 | */ |
| 369 | static __inline__ unsigned long ffz(unsigned long word) | 369 | static inline unsigned long ffz(unsigned long word) |
| 370 | { | 370 | { |
| 371 | __asm__("bsfq %1,%0" | 371 | __asm__("bsfq %1,%0" |
| 372 | :"=r" (word) | 372 | :"=r" (word) |
| @@ -380,7 +380,7 @@ static __inline__ unsigned long ffz(unsigned long word) | |||
| 380 | * | 380 | * |
| 381 | * Undefined if no bit exists, so code should check against 0 first. | 381 | * Undefined if no bit exists, so code should check against 0 first. |
| 382 | */ | 382 | */ |
| 383 | static __inline__ unsigned long __ffs(unsigned long word) | 383 | static inline unsigned long __ffs(unsigned long word) |
| 384 | { | 384 | { |
| 385 | __asm__("bsfq %1,%0" | 385 | __asm__("bsfq %1,%0" |
| 386 | :"=r" (word) | 386 | :"=r" (word) |
| @@ -394,7 +394,7 @@ static __inline__ unsigned long __ffs(unsigned long word) | |||
| 394 | * | 394 | * |
| 395 | * Undefined if no zero exists, so code should check against ~0UL first. | 395 | * Undefined if no zero exists, so code should check against ~0UL first. |
| 396 | */ | 396 | */ |
| 397 | static __inline__ unsigned long __fls(unsigned long word) | 397 | static inline unsigned long __fls(unsigned long word) |
| 398 | { | 398 | { |
| 399 | __asm__("bsrq %1,%0" | 399 | __asm__("bsrq %1,%0" |
| 400 | :"=r" (word) | 400 | :"=r" (word) |
| @@ -414,7 +414,7 @@ static __inline__ unsigned long __fls(unsigned long word) | |||
| 414 | * the libc and compiler builtin ffs routines, therefore | 414 | * the libc and compiler builtin ffs routines, therefore |
| 415 | * differs in spirit from the above ffz (man ffs). | 415 | * differs in spirit from the above ffz (man ffs). |
| 416 | */ | 416 | */ |
| 417 | static __inline__ int ffs(int x) | 417 | static inline int ffs(int x) |
| 418 | { | 418 | { |
| 419 | int r; | 419 | int r; |
| 420 | 420 | ||
| @@ -430,7 +430,7 @@ static __inline__ int ffs(int x) | |||
| 430 | * | 430 | * |
| 431 | * This is defined the same way as fls. | 431 | * This is defined the same way as fls. |
| 432 | */ | 432 | */ |
| 433 | static __inline__ int fls64(__u64 x) | 433 | static inline int fls64(__u64 x) |
| 434 | { | 434 | { |
| 435 | if (x == 0) | 435 | if (x == 0) |
| 436 | return 0; | 436 | return 0; |
| @@ -443,7 +443,7 @@ static __inline__ int fls64(__u64 x) | |||
| 443 | * | 443 | * |
| 444 | * This is defined the same way as ffs. | 444 | * This is defined the same way as ffs. |
| 445 | */ | 445 | */ |
| 446 | static __inline__ int fls(int x) | 446 | static inline int fls(int x) |
| 447 | { | 447 | { |
| 448 | int r; | 448 | int r; |
| 449 | 449 | ||
