diff options
author | Mathieu Desnoyers <compudj@krystal.dyndns.org> | 2007-05-08 03:34:38 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-05-08 14:15:20 -0400 |
commit | 2856f5e31c1413bf6e4f1371e07e17078a5fee5e (patch) | |
tree | 587dfe584f0913813d0cf2414a9378618143db15 /include/asm-x86_64/atomic.h | |
parent | 79d365a306c3af53d8a732fec79b76c0b285d816 (diff) |
atomic.h: atomic_add_unless as inline. Remove system.h atomic.h circular dependency
atomic_add_unless as inline. Remove system.h atomic.h circular dependency.
I agree (with Andi Kleen) this typeof is not needed and more error
prone. All the original atomic.h code that uses cmpxchg (which includes
the atomic_add_unless) uses defines instead of inline functions,
probably to circumvent a circular dependency between system.h and
atomic.h on powerpc (which my patch addresses). Therefore, it makes
sense to use inline functions that will provide type checking.
atomic_add_unless as inline. Remove system.h atomic.h circular dependency.
Digging into the FRV architecture shows me that it is also affected by
such a circular dependency. Here is the diff applying this against the
rest of my atomic.h patches.
It applies over the atomic.h standardization patches.
Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include/asm-x86_64/atomic.h')
-rw-r--r-- | include/asm-x86_64/atomic.h | 59 |
1 files changed, 31 insertions, 28 deletions
diff --git a/include/asm-x86_64/atomic.h b/include/asm-x86_64/atomic.h index 80e4fdbe2204..19e0c607b568 100644 --- a/include/asm-x86_64/atomic.h +++ b/include/asm-x86_64/atomic.h | |||
@@ -2,6 +2,7 @@ | |||
2 | #define __ARCH_X86_64_ATOMIC__ | 2 | #define __ARCH_X86_64_ATOMIC__ |
3 | 3 | ||
4 | #include <asm/alternative.h> | 4 | #include <asm/alternative.h> |
5 | #include <asm/system.h> | ||
5 | 6 | ||
6 | /* atomic_t should be 32 bit signed type */ | 7 | /* atomic_t should be 32 bit signed type */ |
7 | 8 | ||
@@ -403,20 +404,21 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t *v) | |||
403 | * Atomically adds @a to @v, so long as it was not @u. | 404 | * Atomically adds @a to @v, so long as it was not @u. |
404 | * Returns non-zero if @v was not @u, and zero otherwise. | 405 | * Returns non-zero if @v was not @u, and zero otherwise. |
405 | */ | 406 | */ |
406 | #define atomic_add_unless(v, a, u) \ | 407 | static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) |
407 | ({ \ | 408 | { |
408 | __typeof__((v)->counter) c, old; \ | 409 | int c, old; |
409 | c = atomic_read(v); \ | 410 | c = atomic_read(v); |
410 | for (;;) { \ | 411 | for (;;) { |
411 | if (unlikely(c == (u))) \ | 412 | if (unlikely(c == (u))) |
412 | break; \ | 413 | break; |
413 | old = atomic_cmpxchg((v), c, c + (a)); \ | 414 | old = atomic_cmpxchg((v), c, c + (a)); |
414 | if (likely(old == c)) \ | 415 | if (likely(old == c)) |
415 | break; \ | 416 | break; |
416 | c = old; \ | 417 | c = old; |
417 | } \ | 418 | } |
418 | c != (u); \ | 419 | return c != (u); |
419 | }) | 420 | } |
421 | |||
420 | #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) | 422 | #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) |
421 | 423 | ||
422 | /** | 424 | /** |
@@ -428,20 +430,21 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t *v) | |||
428 | * Atomically adds @a to @v, so long as it was not @u. | 430 | * Atomically adds @a to @v, so long as it was not @u. |
429 | * Returns non-zero if @v was not @u, and zero otherwise. | 431 | * Returns non-zero if @v was not @u, and zero otherwise. |
430 | */ | 432 | */ |
431 | #define atomic64_add_unless(v, a, u) \ | 433 | static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) |
432 | ({ \ | 434 | { |
433 | __typeof__((v)->counter) c, old; \ | 435 | long c, old; |
434 | c = atomic64_read(v); \ | 436 | c = atomic64_read(v); |
435 | for (;;) { \ | 437 | for (;;) { |
436 | if (unlikely(c == (u))) \ | 438 | if (unlikely(c == (u))) |
437 | break; \ | 439 | break; |
438 | old = atomic64_cmpxchg((v), c, c + (a)); \ | 440 | old = atomic64_cmpxchg((v), c, c + (a)); |
439 | if (likely(old == c)) \ | 441 | if (likely(old == c)) |
440 | break; \ | 442 | break; |
441 | c = old; \ | 443 | c = old; |
442 | } \ | 444 | } |
443 | c != (u); \ | 445 | return c != (u); |
444 | }) | 446 | } |
447 | |||
445 | #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) | 448 | #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) |
446 | 449 | ||
447 | /* These are x86-specific, used by some header files */ | 450 | /* These are x86-specific, used by some header files */ |