aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-m68k/atomic.h
diff options
context:
space:
mode:
authorMathieu Desnoyers <compudj@krystal.dyndns.org>2007-05-08 03:34:38 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-05-08 14:15:20 -0400
commit2856f5e31c1413bf6e4f1371e07e17078a5fee5e (patch)
tree587dfe584f0913813d0cf2414a9378618143db15 /include/asm-m68k/atomic.h
parent79d365a306c3af53d8a732fec79b76c0b285d816 (diff)
atomic.h: atomic_add_unless as inline. Remove system.h atomic.h circular dependency
atomic_add_unless as inline. Remove system.h atomic.h circular dependency. I agree (with Andi Kleen) this typeof is not needed and more error prone. All the original atomic.h code that uses cmpxchg (which includes the atomic_add_unless) uses defines instead of inline functions, probably to circumvent a circular dependency between system.h and atomic.h on powerpc (which my patch addresses). Therefore, it makes sense to use inline functions that will provide type checking. atomic_add_unless as inline. Remove system.h atomic.h circular dependency. Digging into the FRV architecture shows me that it is also affected by such a circular dependency. Here is the diff applying this against the rest of my atomic.h patches. It applies over the atomic.h standardization patches. Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include/asm-m68k/atomic.h')
-rw-r--r--include/asm-m68k/atomic.h31
1 files changed, 16 insertions, 15 deletions
diff --git a/include/asm-m68k/atomic.h b/include/asm-m68k/atomic.h
index d5eed64cb83..4915294fea6 100644
--- a/include/asm-m68k/atomic.h
+++ b/include/asm-m68k/atomic.h
@@ -2,7 +2,7 @@
2#define __ARCH_M68K_ATOMIC__ 2#define __ARCH_M68K_ATOMIC__
3 3
4 4
5#include <asm/system.h> /* local_irq_XXX() */ 5#include <asm/system.h>
6 6
7/* 7/*
8 * Atomic operations that C can't guarantee us. Useful for 8 * Atomic operations that C can't guarantee us. Useful for
@@ -170,20 +170,21 @@ static inline void atomic_set_mask(unsigned long mask, unsigned long *v)
170 __asm__ __volatile__("orl %1,%0" : "+m" (*v) : "id" (mask)); 170 __asm__ __volatile__("orl %1,%0" : "+m" (*v) : "id" (mask));
171} 171}
172 172
173#define atomic_add_unless(v, a, u) \ 173static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
174({ \ 174{
175 int c, old; \ 175 int c, old;
176 c = atomic_read(v); \ 176 c = atomic_read(v);
177 for (;;) { \ 177 for (;;) {
178 if (unlikely(c == (u))) \ 178 if (unlikely(c == (u)))
179 break; \ 179 break;
180 old = atomic_cmpxchg((v), c, c + (a)); \ 180 old = atomic_cmpxchg((v), c, c + (a));
181 if (likely(old == c)) \ 181 if (likely(old == c))
182 break; \ 182 break;
183 c = old; \ 183 c = old;
184 } \ 184 }
185 c != (u); \ 185 return c != (u);
186}) 186}
187
187#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) 188#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
188 189
189/* Atomic operations are already serializing */ 190/* Atomic operations are already serializing */