aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-frv/atomic.h
diff options
context:
space:
mode:
authorMathieu Desnoyers <compudj@krystal.dyndns.org>2007-05-08 03:34:38 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-05-08 14:15:20 -0400
commit2856f5e31c1413bf6e4f1371e07e17078a5fee5e (patch)
tree587dfe584f0913813d0cf2414a9378618143db15 /include/asm-frv/atomic.h
parent79d365a306c3af53d8a732fec79b76c0b285d816 (diff)
atomic.h: atomic_add_unless as inline. Remove system.h atomic.h circular dependency
atomic_add_unless as inline. Remove system.h atomic.h circular dependency. I agree (with Andi Kleen) this typeof is not needed and more error prone. All the original atomic.h code that uses cmpxchg (which includes the atomic_add_unless) uses defines instead of inline functions, probably to circumvent a circular dependency between system.h and atomic.h on powerpc (which my patch addresses). Therefore, it makes sense to use inline functions that will provide type checking. atomic_add_unless as inline. Remove system.h atomic.h circular dependency. Digging into the FRV architecture shows me that it is also affected by such a circular dependency. Here is the diff applying this against the rest of my atomic.h patches. It applies over the atomic.h standardization patches. Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include/asm-frv/atomic.h')
-rw-r--r--include/asm-frv/atomic.h91
1 files changed, 15 insertions, 76 deletions
diff --git a/include/asm-frv/atomic.h b/include/asm-frv/atomic.h
index 066386ac238e..d425d8d0ad77 100644
--- a/include/asm-frv/atomic.h
+++ b/include/asm-frv/atomic.h
@@ -16,6 +16,7 @@
16 16
17#include <linux/types.h> 17#include <linux/types.h>
18#include <asm/spr-regs.h> 18#include <asm/spr-regs.h>
19#include <asm/system.h>
19 20
20#ifdef CONFIG_SMP 21#ifdef CONFIG_SMP
21#error not SMP safe 22#error not SMP safe
@@ -258,85 +259,23 @@ extern uint32_t __xchg_32(uint32_t i, volatile void *v);
258 259
259#define tas(ptr) (xchg((ptr), 1)) 260#define tas(ptr) (xchg((ptr), 1))
260 261
261/*****************************************************************************/
262/*
263 * compare and conditionally exchange value with memory
264 * - if (*ptr == test) then orig = *ptr; *ptr = test;
265 * - if (*ptr != test) then orig = *ptr;
266 */
267#ifndef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS
268
269#define cmpxchg(ptr, test, new) \
270({ \
271 __typeof__(ptr) __xg_ptr = (ptr); \
272 __typeof__(*(ptr)) __xg_orig, __xg_tmp; \
273 __typeof__(*(ptr)) __xg_test = (test); \
274 __typeof__(*(ptr)) __xg_new = (new); \
275 \
276 switch (sizeof(__xg_orig)) { \
277 case 4: \
278 asm volatile( \
279 "0: \n" \
280 " orcc gr0,gr0,gr0,icc3 \n" \
281 " ckeq icc3,cc7 \n" \
282 " ld.p %M0,%1 \n" \
283 " orcr cc7,cc7,cc3 \n" \
284 " sub%I4cc %1,%4,%2,icc0 \n" \
285 " bne icc0,#0,1f \n" \
286 " cst.p %3,%M0 ,cc3,#1 \n" \
287 " corcc gr29,gr29,gr0 ,cc3,#1 \n" \
288 " beq icc3,#0,0b \n" \
289 "1: \n" \
290 : "+U"(*__xg_ptr), "=&r"(__xg_orig), "=&r"(__xg_tmp) \
291 : "r"(__xg_new), "NPr"(__xg_test) \
292 : "memory", "cc7", "cc3", "icc3", "icc0" \
293 ); \
294 break; \
295 \
296 default: \
297 __xg_orig = 0; \
298 asm volatile("break"); \
299 break; \
300 } \
301 \
302 __xg_orig; \
303})
304
305#else
306
307extern uint32_t __cmpxchg_32(uint32_t *v, uint32_t test, uint32_t new);
308
309#define cmpxchg(ptr, test, new) \
310({ \
311 __typeof__(ptr) __xg_ptr = (ptr); \
312 __typeof__(*(ptr)) __xg_orig; \
313 __typeof__(*(ptr)) __xg_test = (test); \
314 __typeof__(*(ptr)) __xg_new = (new); \
315 \
316 switch (sizeof(__xg_orig)) { \
317 case 4: __xg_orig = __cmpxchg_32(__xg_ptr, __xg_test, __xg_new); break; \
318 default: \
319 __xg_orig = 0; \
320 asm volatile("break"); \
321 break; \
322 } \
323 \
324 __xg_orig; \
325})
326
327#endif
328
329#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new)) 262#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
330#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) 263#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
331 264
332#define atomic_add_unless(v, a, u) \ 265static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
333({ \ 266{
334 int c, old; \ 267 int c, old;
335 c = atomic_read(v); \ 268 c = atomic_read(v);
336 while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \ 269 for (;;) {
337 c = old; \ 270 if (unlikely(c == (u)))
338 c != (u); \ 271 break;
339}) 272 old = atomic_cmpxchg((v), c, c + (a));
273 if (likely(old == c))
274 break;
275 c = old;
276 }
277 return c != (u);
278}
340 279
341#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) 280#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
342 281