diff options
author | Mathieu Desnoyers <compudj@krystal.dyndns.org> | 2007-05-08 03:34:38 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-05-08 14:15:20 -0400 |
commit | 2856f5e31c1413bf6e4f1371e07e17078a5fee5e (patch) | |
tree | 587dfe584f0913813d0cf2414a9378618143db15 /include/asm-ia64/atomic.h | |
parent | 79d365a306c3af53d8a732fec79b76c0b285d816 (diff) |
atomic.h: atomic_add_unless as inline. Remove system.h atomic.h circular dependency
atomic_add_unless as inline. Remove system.h atomic.h circular dependency.
I agree (with Andi Kleen) this typeof is not needed and more error
prone. All the original atomic.h code that uses cmpxchg (which includes
the atomic_add_unless) uses defines instead of inline functions,
probably to circumvent a circular dependency between system.h and
atomic.h on powerpc (which my patch addresses). Therefore, it makes
sense to use inline functions that will provide type checking.
atomic_add_unless as inline. Remove system.h atomic.h circular dependency.
Digging into the FRV architecture shows me that it is also affected by
such a circular dependency. Here is the diff applying this against the
rest of my atomic.h patches.
It applies over the atomic.h standardization patches.
Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include/asm-ia64/atomic.h')
-rw-r--r-- | include/asm-ia64/atomic.h | 59 |
1 files changed, 31 insertions, 28 deletions
diff --git a/include/asm-ia64/atomic.h b/include/asm-ia64/atomic.h index b16ad235c7ee..1fc3b83325da 100644 --- a/include/asm-ia64/atomic.h +++ b/include/asm-ia64/atomic.h | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <linux/types.h> | 15 | #include <linux/types.h> |
16 | 16 | ||
17 | #include <asm/intrinsics.h> | 17 | #include <asm/intrinsics.h> |
18 | #include <asm/system.h> | ||
18 | 19 | ||
19 | /* | 20 | /* |
20 | * On IA-64, counter must always be volatile to ensure that that the | 21 | * On IA-64, counter must always be volatile to ensure that that the |
@@ -95,36 +96,38 @@ ia64_atomic64_sub (__s64 i, atomic64_t *v) | |||
95 | (cmpxchg(&((v)->counter), old, new)) | 96 | (cmpxchg(&((v)->counter), old, new)) |
96 | #define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) | 97 | #define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) |
97 | 98 | ||
98 | #define atomic_add_unless(v, a, u) \ | 99 | static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) |
99 | ({ \ | 100 | { |
100 | __typeof__(v->counter) c, old; \ | 101 | int c, old; |
101 | c = atomic_read(v); \ | 102 | c = atomic_read(v); |
102 | for (;;) { \ | 103 | for (;;) { |
103 | if (unlikely(c == (u))) \ | 104 | if (unlikely(c == (u))) |
104 | break; \ | 105 | break; |
105 | old = atomic_cmpxchg((v), c, c + (a)); \ | 106 | old = atomic_cmpxchg((v), c, c + (a)); |
106 | if (likely(old == c)) \ | 107 | if (likely(old == c)) |
107 | break; \ | 108 | break; |
108 | c = old; \ | 109 | c = old; |
109 | } \ | 110 | } |
110 | c != (u); \ | 111 | return c != (u); |
111 | }) | 112 | } |
113 | |||
112 | #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) | 114 | #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) |
113 | 115 | ||
114 | #define atomic64_add_unless(v, a, u) \ | 116 | static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) |
115 | ({ \ | 117 | { |
116 | __typeof__(v->counter) c, old; \ | 118 | long c, old; |
117 | c = atomic64_read(v); \ | 119 | c = atomic64_read(v); |
118 | for (;;) { \ | 120 | for (;;) { |
119 | if (unlikely(c == (u))) \ | 121 | if (unlikely(c == (u))) |
120 | break; \ | 122 | break; |
121 | old = atomic64_cmpxchg((v), c, c + (a)); \ | 123 | old = atomic64_cmpxchg((v), c, c + (a)); |
122 | if (likely(old == c)) \ | 124 | if (likely(old == c)) |
123 | break; \ | 125 | break; |
124 | c = old; \ | 126 | c = old; |
125 | } \ | 127 | } |
126 | c != (u); \ | 128 | return c != (u); |
127 | }) | 129 | } |
130 | |||
128 | #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) | 131 | #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) |
129 | 132 | ||
130 | #define atomic_add_return(i,v) \ | 133 | #define atomic_add_return(i,v) \ |