aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorMathieu Desnoyers <compudj@krystal.dyndns.org>2007-05-08 03:34:38 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-05-08 14:15:20 -0400
commit2856f5e31c1413bf6e4f1371e07e17078a5fee5e (patch)
tree587dfe584f0913813d0cf2414a9378618143db15 /include
parent79d365a306c3af53d8a732fec79b76c0b285d816 (diff)
atomic.h: atomic_add_unless as inline. Remove system.h atomic.h circular dependency
atomic_add_unless as inline. Remove system.h atomic.h circular dependency. I agree (with Andi Kleen) this typeof is not needed and more error prone. All the original atomic.h code that uses cmpxchg (which includes the atomic_add_unless) uses defines instead of inline functions, probably to circumvent a circular dependency between system.h and atomic.h on powerpc (which my patch addresses). Therefore, it makes sense to use inline functions that will provide type checking. atomic_add_unless as inline. Remove system.h atomic.h circular dependency. Digging into the FRV architecture shows me that it is also affected by such a circular dependency. Here is the diff applying this against the rest of my atomic.h patches. It applies over the atomic.h standardization patches. Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include')
-rw-r--r--include/asm-alpha/atomic.h59
-rw-r--r--include/asm-arm/atomic.h1
-rw-r--r--include/asm-arm26/atomic.h1
-rw-r--r--include/asm-frv/atomic.h91
-rw-r--r--include/asm-frv/system.h70
-rw-r--r--include/asm-generic/atomic.h17
-rw-r--r--include/asm-i386/atomic.h29
-rw-r--r--include/asm-ia64/atomic.h59
-rw-r--r--include/asm-m32r/atomic.h23
-rw-r--r--include/asm-m68k/atomic.h31
-rw-r--r--include/asm-m68knommu/atomic.h25
-rw-r--r--include/asm-mips/atomic.h46
-rw-r--r--include/asm-parisc/atomic.h47
-rw-r--r--include/asm-powerpc/atomic.h1
-rw-r--r--include/asm-ppc/system.h1
-rw-r--r--include/asm-sparc64/atomic.h59
-rw-r--r--include/asm-x86_64/atomic.h59
-rw-r--r--include/asm-xtensa/atomic.h23
18 files changed, 360 insertions, 282 deletions
diff --git a/include/asm-alpha/atomic.h b/include/asm-alpha/atomic.h
index 7b4fba88cbeb..f5cb7b878af2 100644
--- a/include/asm-alpha/atomic.h
+++ b/include/asm-alpha/atomic.h
@@ -2,6 +2,7 @@
2#define _ALPHA_ATOMIC_H 2#define _ALPHA_ATOMIC_H
3 3
4#include <asm/barrier.h> 4#include <asm/barrier.h>
5#include <asm/system.h>
5 6
6/* 7/*
7 * Atomic operations that C can't guarantee us. Useful for 8 * Atomic operations that C can't guarantee us. Useful for
@@ -190,20 +191,21 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
190 * Atomically adds @a to @v, so long as it was not @u. 191 * Atomically adds @a to @v, so long as it was not @u.
191 * Returns non-zero if @v was not @u, and zero otherwise. 192 * Returns non-zero if @v was not @u, and zero otherwise.
192 */ 193 */
193#define atomic_add_unless(v, a, u) \ 194static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
194({ \ 195{
195 __typeof__((v)->counter) c, old; \ 196 int c, old;
196 c = atomic_read(v); \ 197 c = atomic_read(v);
197 for (;;) { \ 198 for (;;) {
198 if (unlikely(c == (u))) \ 199 if (unlikely(c == (u)))
199 break; \ 200 break;
200 old = atomic_cmpxchg((v), c, c + (a)); \ 201 old = atomic_cmpxchg((v), c, c + (a));
201 if (likely(old == c)) \ 202 if (likely(old == c))
202 break; \ 203 break;
203 c = old; \ 204 c = old;
204 } \ 205 }
205 c != (u); \ 206 return c != (u);
206}) 207}
208
207#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) 209#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
208 210
209/** 211/**
@@ -215,20 +217,21 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
215 * Atomically adds @a to @v, so long as it was not @u. 217 * Atomically adds @a to @v, so long as it was not @u.
216 * Returns non-zero if @v was not @u, and zero otherwise. 218 * Returns non-zero if @v was not @u, and zero otherwise.
217 */ 219 */
218#define atomic64_add_unless(v, a, u) \ 220static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
219({ \ 221{
220 __typeof__((v)->counter) c, old; \ 222 long c, old;
221 c = atomic64_read(v); \ 223 c = atomic64_read(v);
222 for (;;) { \ 224 for (;;) {
223 if (unlikely(c == (u))) \ 225 if (unlikely(c == (u)))
224 break; \ 226 break;
225 old = atomic64_cmpxchg((v), c, c + (a)); \ 227 old = atomic64_cmpxchg((v), c, c + (a));
226 if (likely(old == c)) \ 228 if (likely(old == c))
227 break; \ 229 break;
228 c = old; \ 230 c = old;
229 } \ 231 }
230 c != (u); \ 232 return c != (u);
231}) 233}
234
232#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) 235#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
233 236
234#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0) 237#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
diff --git a/include/asm-arm/atomic.h b/include/asm-arm/atomic.h
index f266c2795124..3b59f94b5a3d 100644
--- a/include/asm-arm/atomic.h
+++ b/include/asm-arm/atomic.h
@@ -12,6 +12,7 @@
12#define __ASM_ARM_ATOMIC_H 12#define __ASM_ARM_ATOMIC_H
13 13
14#include <linux/compiler.h> 14#include <linux/compiler.h>
15#include <asm/system.h>
15 16
16typedef struct { volatile int counter; } atomic_t; 17typedef struct { volatile int counter; } atomic_t;
17 18
diff --git a/include/asm-arm26/atomic.h b/include/asm-arm26/atomic.h
index 97e944fe1cff..d6dd42374cf3 100644
--- a/include/asm-arm26/atomic.h
+++ b/include/asm-arm26/atomic.h
@@ -20,7 +20,6 @@
20#ifndef __ASM_ARM_ATOMIC_H 20#ifndef __ASM_ARM_ATOMIC_H
21#define __ASM_ARM_ATOMIC_H 21#define __ASM_ARM_ATOMIC_H
22 22
23
24#ifdef CONFIG_SMP 23#ifdef CONFIG_SMP
25#error SMP is NOT supported 24#error SMP is NOT supported
26#endif 25#endif
diff --git a/include/asm-frv/atomic.h b/include/asm-frv/atomic.h
index 066386ac238e..d425d8d0ad77 100644
--- a/include/asm-frv/atomic.h
+++ b/include/asm-frv/atomic.h
@@ -16,6 +16,7 @@
16 16
17#include <linux/types.h> 17#include <linux/types.h>
18#include <asm/spr-regs.h> 18#include <asm/spr-regs.h>
19#include <asm/system.h>
19 20
20#ifdef CONFIG_SMP 21#ifdef CONFIG_SMP
21#error not SMP safe 22#error not SMP safe
@@ -258,85 +259,23 @@ extern uint32_t __xchg_32(uint32_t i, volatile void *v);
258 259
259#define tas(ptr) (xchg((ptr), 1)) 260#define tas(ptr) (xchg((ptr), 1))
260 261
261/*****************************************************************************/
262/*
263 * compare and conditionally exchange value with memory
264 * - if (*ptr == test) then orig = *ptr; *ptr = test;
265 * - if (*ptr != test) then orig = *ptr;
266 */
267#ifndef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS
268
269#define cmpxchg(ptr, test, new) \
270({ \
271 __typeof__(ptr) __xg_ptr = (ptr); \
272 __typeof__(*(ptr)) __xg_orig, __xg_tmp; \
273 __typeof__(*(ptr)) __xg_test = (test); \
274 __typeof__(*(ptr)) __xg_new = (new); \
275 \
276 switch (sizeof(__xg_orig)) { \
277 case 4: \
278 asm volatile( \
279 "0: \n" \
280 " orcc gr0,gr0,gr0,icc3 \n" \
281 " ckeq icc3,cc7 \n" \
282 " ld.p %M0,%1 \n" \
283 " orcr cc7,cc7,cc3 \n" \
284 " sub%I4cc %1,%4,%2,icc0 \n" \
285 " bne icc0,#0,1f \n" \
286 " cst.p %3,%M0 ,cc3,#1 \n" \
287 " corcc gr29,gr29,gr0 ,cc3,#1 \n" \
288 " beq icc3,#0,0b \n" \
289 "1: \n" \
290 : "+U"(*__xg_ptr), "=&r"(__xg_orig), "=&r"(__xg_tmp) \
291 : "r"(__xg_new), "NPr"(__xg_test) \
292 : "memory", "cc7", "cc3", "icc3", "icc0" \
293 ); \
294 break; \
295 \
296 default: \
297 __xg_orig = 0; \
298 asm volatile("break"); \
299 break; \
300 } \
301 \
302 __xg_orig; \
303})
304
305#else
306
307extern uint32_t __cmpxchg_32(uint32_t *v, uint32_t test, uint32_t new);
308
309#define cmpxchg(ptr, test, new) \
310({ \
311 __typeof__(ptr) __xg_ptr = (ptr); \
312 __typeof__(*(ptr)) __xg_orig; \
313 __typeof__(*(ptr)) __xg_test = (test); \
314 __typeof__(*(ptr)) __xg_new = (new); \
315 \
316 switch (sizeof(__xg_orig)) { \
317 case 4: __xg_orig = __cmpxchg_32(__xg_ptr, __xg_test, __xg_new); break; \
318 default: \
319 __xg_orig = 0; \
320 asm volatile("break"); \
321 break; \
322 } \
323 \
324 __xg_orig; \
325})
326
327#endif
328
329#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new)) 262#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
330#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) 263#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
331 264
332#define atomic_add_unless(v, a, u) \ 265static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
333({ \ 266{
334 int c, old; \ 267 int c, old;
335 c = atomic_read(v); \ 268 c = atomic_read(v);
336 while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \ 269 for (;;) {
337 c = old; \ 270 if (unlikely(c == (u)))
338 c != (u); \ 271 break;
339}) 272 old = atomic_cmpxchg((v), c, c + (a));
273 if (likely(old == c))
274 break;
275 c = old;
276 }
277 return c != (u);
278}
340 279
341#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) 280#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
342 281
diff --git a/include/asm-frv/system.h b/include/asm-frv/system.h
index 1166899317d7..be303b3eef40 100644
--- a/include/asm-frv/system.h
+++ b/include/asm-frv/system.h
@@ -13,7 +13,6 @@
13#define _ASM_SYSTEM_H 13#define _ASM_SYSTEM_H
14 14
15#include <linux/linkage.h> 15#include <linux/linkage.h>
16#include <asm/atomic.h>
17 16
18struct thread_struct; 17struct thread_struct;
19 18
@@ -197,4 +196,73 @@ extern void free_initmem(void);
197 196
198#define arch_align_stack(x) (x) 197#define arch_align_stack(x) (x)
199 198
199/*****************************************************************************/
200/*
201 * compare and conditionally exchange value with memory
202 * - if (*ptr == test) then orig = *ptr; *ptr = test;
203 * - if (*ptr != test) then orig = *ptr;
204 */
205#ifndef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS
206
207#define cmpxchg(ptr, test, new) \
208({ \
209 __typeof__(ptr) __xg_ptr = (ptr); \
210 __typeof__(*(ptr)) __xg_orig, __xg_tmp; \
211 __typeof__(*(ptr)) __xg_test = (test); \
212 __typeof__(*(ptr)) __xg_new = (new); \
213 \
214 switch (sizeof(__xg_orig)) { \
215 case 4: \
216 asm volatile( \
217 "0: \n" \
218 " orcc gr0,gr0,gr0,icc3 \n" \
219 " ckeq icc3,cc7 \n" \
220 " ld.p %M0,%1 \n" \
221 " orcr cc7,cc7,cc3 \n" \
222 " sub%I4cc %1,%4,%2,icc0 \n" \
223 " bne icc0,#0,1f \n" \
224 " cst.p %3,%M0 ,cc3,#1 \n" \
225 " corcc gr29,gr29,gr0 ,cc3,#1 \n" \
226 " beq icc3,#0,0b \n" \
227 "1: \n" \
228 : "+U"(*__xg_ptr), "=&r"(__xg_orig), "=&r"(__xg_tmp) \
229 : "r"(__xg_new), "NPr"(__xg_test) \
230 : "memory", "cc7", "cc3", "icc3", "icc0" \
231 ); \
232 break; \
233 \
234 default: \
235 __xg_orig = 0; \
236 asm volatile("break"); \
237 break; \
238 } \
239 \
240 __xg_orig; \
241})
242
243#else
244
245extern uint32_t __cmpxchg_32(uint32_t *v, uint32_t test, uint32_t new);
246
247#define cmpxchg(ptr, test, new) \
248({ \
249 __typeof__(ptr) __xg_ptr = (ptr); \
250 __typeof__(*(ptr)) __xg_orig; \
251 __typeof__(*(ptr)) __xg_test = (test); \
252 __typeof__(*(ptr)) __xg_new = (new); \
253 \
254 switch (sizeof(__xg_orig)) { \
255 case 4: __xg_orig = __cmpxchg_32(__xg_ptr, __xg_test, __xg_new); break; \
256 default: \
257 __xg_orig = 0; \
258 asm volatile("break"); \
259 break; \
260 } \
261 \
262 __xg_orig; \
263})
264
265#endif
266
267
200#endif /* _ASM_SYSTEM_H */ 268#endif /* _ASM_SYSTEM_H */
diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h
index 5ae6dce1cba2..85fd0aa27a8c 100644
--- a/include/asm-generic/atomic.h
+++ b/include/asm-generic/atomic.h
@@ -9,7 +9,6 @@
9 */ 9 */
10 10
11#include <asm/types.h> 11#include <asm/types.h>
12#include <asm/system.h>
13 12
14/* 13/*
15 * Suppport for atomic_long_t 14 * Suppport for atomic_long_t
@@ -123,8 +122,12 @@ static inline long atomic_long_dec_return(atomic_long_t *l)
123 return (long)atomic64_dec_return(v); 122 return (long)atomic64_dec_return(v);
124} 123}
125 124
126#define atomic_long_add_unless(l, a, u) \ 125static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
127 atomic64_add_unless((atomic64_t *)(l), (a), (u)) 126{
127 atomic64_t *v = (atomic64_t *)l;
128
129 return (long)atomic64_add_unless(v, a, u);
130}
128 131
129#define atomic_long_inc_not_zero(l) atomic64_inc_not_zero((atomic64_t *)(l)) 132#define atomic_long_inc_not_zero(l) atomic64_inc_not_zero((atomic64_t *)(l))
130 133
@@ -236,8 +239,12 @@ static inline long atomic_long_dec_return(atomic_long_t *l)
236 return (long)atomic_dec_return(v); 239 return (long)atomic_dec_return(v);
237} 240}
238 241
239#define atomic_long_add_unless(l, a, u) \ 242static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
240 atomic_add_unless((atomic_t *)(l), (a), (u)) 243{
244 atomic_t *v = (atomic_t *)l;
245
246 return (long)atomic_add_unless(v, a, u);
247}
241 248
242#define atomic_long_inc_not_zero(l) atomic_inc_not_zero((atomic_t *)(l)) 249#define atomic_long_inc_not_zero(l) atomic_inc_not_zero((atomic_t *)(l))
243 250
diff --git a/include/asm-i386/atomic.h b/include/asm-i386/atomic.h
index 08935113206a..ff90c6e3fcb4 100644
--- a/include/asm-i386/atomic.h
+++ b/include/asm-i386/atomic.h
@@ -219,20 +219,21 @@ static __inline__ int atomic_sub_return(int i, atomic_t *v)
219 * Atomically adds @a to @v, so long as @v was not already @u. 219 * Atomically adds @a to @v, so long as @v was not already @u.
220 * Returns non-zero if @v was not @u, and zero otherwise. 220 * Returns non-zero if @v was not @u, and zero otherwise.
221 */ 221 */
222#define atomic_add_unless(v, a, u) \ 222static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
223({ \ 223{
224 __typeof__((v)->counter) c, old; \ 224 int c, old;
225 c = atomic_read(v); \ 225 c = atomic_read(v);
226 for (;;) { \ 226 for (;;) {
227 if (unlikely(c == (u))) \ 227 if (unlikely(c == (u)))
228 break; \ 228 break;
229 old = atomic_cmpxchg((v), c, c + (a)); \ 229 old = atomic_cmpxchg((v), c, c + (a));
230 if (likely(old == c)) \ 230 if (likely(old == c))
231 break; \ 231 break;
232 c = old; \ 232 c = old;
233 } \ 233 }
234 c != (u); \ 234 return c != (u);
235}) 235}
236
236#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) 237#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
237 238
238#define atomic_inc_return(v) (atomic_add_return(1,v)) 239#define atomic_inc_return(v) (atomic_add_return(1,v))
diff --git a/include/asm-ia64/atomic.h b/include/asm-ia64/atomic.h
index b16ad235c7ee..1fc3b83325da 100644
--- a/include/asm-ia64/atomic.h
+++ b/include/asm-ia64/atomic.h
@@ -15,6 +15,7 @@
15#include <linux/types.h> 15#include <linux/types.h>
16 16
17#include <asm/intrinsics.h> 17#include <asm/intrinsics.h>
18#include <asm/system.h>
18 19
19/* 20/*
20 * On IA-64, counter must always be volatile to ensure that that the 21 * On IA-64, counter must always be volatile to ensure that that the
@@ -95,36 +96,38 @@ ia64_atomic64_sub (__s64 i, atomic64_t *v)
95 (cmpxchg(&((v)->counter), old, new)) 96 (cmpxchg(&((v)->counter), old, new))
96#define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) 97#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
97 98
98#define atomic_add_unless(v, a, u) \ 99static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
99({ \ 100{
100 __typeof__(v->counter) c, old; \ 101 int c, old;
101 c = atomic_read(v); \ 102 c = atomic_read(v);
102 for (;;) { \ 103 for (;;) {
103 if (unlikely(c == (u))) \ 104 if (unlikely(c == (u)))
104 break; \ 105 break;
105 old = atomic_cmpxchg((v), c, c + (a)); \ 106 old = atomic_cmpxchg((v), c, c + (a));
106 if (likely(old == c)) \ 107 if (likely(old == c))
107 break; \ 108 break;
108 c = old; \ 109 c = old;
109 } \ 110 }
110 c != (u); \ 111 return c != (u);
111}) 112}
113
112#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) 114#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
113 115
114#define atomic64_add_unless(v, a, u) \ 116static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
115({ \ 117{
116 __typeof__(v->counter) c, old; \ 118 long c, old;
117 c = atomic64_read(v); \ 119 c = atomic64_read(v);
118 for (;;) { \ 120 for (;;) {
119 if (unlikely(c == (u))) \ 121 if (unlikely(c == (u)))
120 break; \ 122 break;
121 old = atomic64_cmpxchg((v), c, c + (a)); \ 123 old = atomic64_cmpxchg((v), c, c + (a));
122 if (likely(old == c)) \ 124 if (likely(old == c))
123 break; \ 125 break;
124 c = old; \ 126 c = old;
125 } \ 127 }
126 c != (u); \ 128 return c != (u);
127}) 129}
130
128#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) 131#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
129 132
130#define atomic_add_return(i,v) \ 133#define atomic_add_return(i,v) \
diff --git a/include/asm-m32r/atomic.h b/include/asm-m32r/atomic.h
index f5a7d7301c72..3a38ffe4a4f4 100644
--- a/include/asm-m32r/atomic.h
+++ b/include/asm-m32r/atomic.h
@@ -253,14 +253,21 @@ static __inline__ int atomic_dec_return(atomic_t *v)
253 * Atomically adds @a to @v, so long as it was not @u. 253 * Atomically adds @a to @v, so long as it was not @u.
254 * Returns non-zero if @v was not @u, and zero otherwise. 254 * Returns non-zero if @v was not @u, and zero otherwise.
255 */ 255 */
256#define atomic_add_unless(v, a, u) \ 256static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
257({ \ 257{
258 int c, old; \ 258 int c, old;
259 c = atomic_read(v); \ 259 c = atomic_read(v);
260 while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \ 260 for (;;) {
261 c = old; \ 261 if (unlikely(c == (u)))
262 c != (u); \ 262 break;
263}) 263 old = atomic_cmpxchg((v), c, c + (a));
264 if (likely(old == c))
265 break;
266 c = old;
267 }
268 return c != (u);
269}
270
264#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) 271#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
265 272
266static __inline__ void atomic_clear_mask(unsigned long mask, atomic_t *addr) 273static __inline__ void atomic_clear_mask(unsigned long mask, atomic_t *addr)
diff --git a/include/asm-m68k/atomic.h b/include/asm-m68k/atomic.h
index d5eed64cb833..4915294fea63 100644
--- a/include/asm-m68k/atomic.h
+++ b/include/asm-m68k/atomic.h
@@ -2,7 +2,7 @@
2#define __ARCH_M68K_ATOMIC__ 2#define __ARCH_M68K_ATOMIC__
3 3
4 4
5#include <asm/system.h> /* local_irq_XXX() */ 5#include <asm/system.h>
6 6
7/* 7/*
8 * Atomic operations that C can't guarantee us. Useful for 8 * Atomic operations that C can't guarantee us. Useful for
@@ -170,20 +170,21 @@ static inline void atomic_set_mask(unsigned long mask, unsigned long *v)
170 __asm__ __volatile__("orl %1,%0" : "+m" (*v) : "id" (mask)); 170 __asm__ __volatile__("orl %1,%0" : "+m" (*v) : "id" (mask));
171} 171}
172 172
173#define atomic_add_unless(v, a, u) \ 173static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
174({ \ 174{
175 int c, old; \ 175 int c, old;
176 c = atomic_read(v); \ 176 c = atomic_read(v);
177 for (;;) { \ 177 for (;;) {
178 if (unlikely(c == (u))) \ 178 if (unlikely(c == (u)))
179 break; \ 179 break;
180 old = atomic_cmpxchg((v), c, c + (a)); \ 180 old = atomic_cmpxchg((v), c, c + (a));
181 if (likely(old == c)) \ 181 if (likely(old == c))
182 break; \ 182 break;
183 c = old; \ 183 c = old;
184 } \ 184 }
185 c != (u); \ 185 return c != (u);
186}) 186}
187
187#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) 188#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
188 189
189/* Atomic operations are already serializing */ 190/* Atomic operations are already serializing */
diff --git a/include/asm-m68knommu/atomic.h b/include/asm-m68knommu/atomic.h
index 6c4e4b63e454..d5632a305dae 100644
--- a/include/asm-m68knommu/atomic.h
+++ b/include/asm-m68knommu/atomic.h
@@ -1,7 +1,7 @@
1#ifndef __ARCH_M68KNOMMU_ATOMIC__ 1#ifndef __ARCH_M68KNOMMU_ATOMIC__
2#define __ARCH_M68KNOMMU_ATOMIC__ 2#define __ARCH_M68KNOMMU_ATOMIC__
3 3
4#include <asm/system.h> /* local_irq_XXX() */ 4#include <asm/system.h>
5 5
6/* 6/*
7 * Atomic operations that C can't guarantee us. Useful for 7 * Atomic operations that C can't guarantee us. Useful for
@@ -131,14 +131,21 @@ static inline int atomic_sub_return(int i, atomic_t * v)
131#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n))) 131#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
132#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) 132#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
133 133
134#define atomic_add_unless(v, a, u) \ 134static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
135({ \ 135{
136 int c, old; \ 136 int c, old;
137 c = atomic_read(v); \ 137 c = atomic_read(v);
138 while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \ 138 for (;;) {
139 c = old; \ 139 if (unlikely(c == (u)))
140 c != (u); \ 140 break;
141}) 141 old = atomic_cmpxchg((v), c, c + (a));
142 if (likely(old == c))
143 break;
144 c = old;
145 }
146 return c != (u);
147}
148
142#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) 149#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
143 150
144#define atomic_dec_return(v) atomic_sub_return(1,(v)) 151#define atomic_dec_return(v) atomic_sub_return(1,(v))
diff --git a/include/asm-mips/atomic.h b/include/asm-mips/atomic.h
index 6423ffa195a4..62daa746a9c9 100644
--- a/include/asm-mips/atomic.h
+++ b/include/asm-mips/atomic.h
@@ -18,6 +18,7 @@
18#include <asm/barrier.h> 18#include <asm/barrier.h>
19#include <asm/cpu-features.h> 19#include <asm/cpu-features.h>
20#include <asm/war.h> 20#include <asm/war.h>
21#include <asm/system.h>
21 22
22typedef struct { volatile int counter; } atomic_t; 23typedef struct { volatile int counter; } atomic_t;
23 24
@@ -318,14 +319,20 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
318 * Atomically adds @a to @v, so long as it was not @u. 319 * Atomically adds @a to @v, so long as it was not @u.
319 * Returns non-zero if @v was not @u, and zero otherwise. 320 * Returns non-zero if @v was not @u, and zero otherwise.
320 */ 321 */
321#define atomic_add_unless(v, a, u) \ 322static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
322({ \ 323{
323 __typeof__((v)->counter) c, old; \ 324 int c, old;
324 c = atomic_read(v); \ 325 c = atomic_read(v);
325 while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \ 326 for (;;) {
326 c = old; \ 327 if (unlikely(c == (u)))
327 c != (u); \ 328 break;
328}) 329 old = atomic_cmpxchg((v), c, c + (a));
330 if (likely(old == c))
331 break;
332 c = old;
333 }
334 return c != (u);
335}
329#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) 336#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
330 337
331#define atomic_dec_return(v) atomic_sub_return(1,(v)) 338#define atomic_dec_return(v) atomic_sub_return(1,(v))
@@ -694,14 +701,21 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
694 * Atomically adds @a to @v, so long as it was not @u. 701 * Atomically adds @a to @v, so long as it was not @u.
695 * Returns non-zero if @v was not @u, and zero otherwise. 702 * Returns non-zero if @v was not @u, and zero otherwise.
696 */ 703 */
697#define atomic64_add_unless(v, a, u) \ 704static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
698({ \ 705{
699 __typeof__((v)->counter) c, old; \ 706 long c, old;
700 c = atomic_read(v); \ 707 c = atomic64_read(v);
701 while (c != (u) && (old = atomic64_cmpxchg((v), c, c + (a))) != c) \ 708 for (;;) {
702 c = old; \ 709 if (unlikely(c == (u)))
703 c != (u); \ 710 break;
704}) 711 old = atomic64_cmpxchg((v), c, c + (a));
712 if (likely(old == c))
713 break;
714 c = old;
715 }
716 return c != (u);
717}
718
705#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) 719#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
706 720
707#define atomic64_dec_return(v) atomic64_sub_return(1,(v)) 721#define atomic64_dec_return(v) atomic64_sub_return(1,(v))
diff --git a/include/asm-parisc/atomic.h b/include/asm-parisc/atomic.h
index 66a0edbb51f4..e894ee35074b 100644
--- a/include/asm-parisc/atomic.h
+++ b/include/asm-parisc/atomic.h
@@ -6,6 +6,7 @@
6#define _ASM_PARISC_ATOMIC_H_ 6#define _ASM_PARISC_ATOMIC_H_
7 7
8#include <linux/types.h> 8#include <linux/types.h>
9#include <asm/system.h>
9 10
10/* 11/*
11 * Atomic operations that C can't guarantee us. Useful for 12 * Atomic operations that C can't guarantee us. Useful for
@@ -174,14 +175,21 @@ static __inline__ int atomic_read(const atomic_t *v)
174 * Atomically adds @a to @v, so long as it was not @u. 175 * Atomically adds @a to @v, so long as it was not @u.
175 * Returns non-zero if @v was not @u, and zero otherwise. 176 * Returns non-zero if @v was not @u, and zero otherwise.
176 */ 177 */
177#define atomic_add_unless(v, a, u) \ 178static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
178({ \ 179{
179 __typeof__((v)->counter) c, old; \ 180 int c, old;
180 c = atomic_read(v); \ 181 c = atomic_read(v);
181 while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \ 182 for (;;) {
182 c = old; \ 183 if (unlikely(c == (u)))
183 c != (u); \ 184 break;
184}) 185 old = atomic_cmpxchg((v), c, c + (a));
186 if (likely(old == c))
187 break;
188 c = old;
189 }
190 return c != (u);
191}
192
185#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) 193#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
186 194
187#define atomic_add(i,v) ((void)(__atomic_add_return( ((int)i),(v)))) 195#define atomic_add(i,v) ((void)(__atomic_add_return( ((int)i),(v))))
@@ -283,14 +291,21 @@ atomic64_read(const atomic64_t *v)
283 * Atomically adds @a to @v, so long as it was not @u. 291 * Atomically adds @a to @v, so long as it was not @u.
284 * Returns non-zero if @v was not @u, and zero otherwise. 292 * Returns non-zero if @v was not @u, and zero otherwise.
285 */ 293 */
286#define atomic64_add_unless(v, a, u) \ 294static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
287({ \ 295{
288 __typeof__((v)->counter) c, old; \ 296 long c, old;
289 c = atomic64_read(v); \ 297 c = atomic64_read(v);
290 while (c != (u) && (old = atomic64_cmpxchg((v), c, c + (a))) != c) \ 298 for (;;) {
291 c = old; \ 299 if (unlikely(c == (u)))
292 c != (u); \ 300 break;
293}) 301 old = atomic64_cmpxchg((v), c, c + (a));
302 if (likely(old == c))
303 break;
304 c = old;
305 }
306 return c != (u);
307}
308
294#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) 309#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
295 310
296#endif /* CONFIG_64BIT */ 311#endif /* CONFIG_64BIT */
diff --git a/include/asm-powerpc/atomic.h b/include/asm-powerpc/atomic.h
index 438a7fcfba58..c44810b9d322 100644
--- a/include/asm-powerpc/atomic.h
+++ b/include/asm-powerpc/atomic.h
@@ -11,6 +11,7 @@ typedef struct { volatile int counter; } atomic_t;
11#include <linux/compiler.h> 11#include <linux/compiler.h>
12#include <asm/synch.h> 12#include <asm/synch.h>
13#include <asm/asm-compat.h> 13#include <asm/asm-compat.h>
14#include <asm/system.h>
14 15
15#define ATOMIC_INIT(i) { (i) } 16#define ATOMIC_INIT(i) { (i) }
16 17
diff --git a/include/asm-ppc/system.h b/include/asm-ppc/system.h
index 738943584c01..56abe5e9e155 100644
--- a/include/asm-ppc/system.h
+++ b/include/asm-ppc/system.h
@@ -6,7 +6,6 @@
6 6
7#include <linux/kernel.h> 7#include <linux/kernel.h>
8 8
9#include <asm/atomic.h>
10#include <asm/hw_irq.h> 9#include <asm/hw_irq.h>
11 10
12/* 11/*
diff --git a/include/asm-sparc64/atomic.h b/include/asm-sparc64/atomic.h
index c3feb3af2cfe..3fb4e1f7f186 100644
--- a/include/asm-sparc64/atomic.h
+++ b/include/asm-sparc64/atomic.h
@@ -9,6 +9,7 @@
9#define __ARCH_SPARC64_ATOMIC__ 9#define __ARCH_SPARC64_ATOMIC__
10 10
11#include <linux/types.h> 11#include <linux/types.h>
12#include <asm/system.h>
12 13
13typedef struct { volatile int counter; } atomic_t; 14typedef struct { volatile int counter; } atomic_t;
14typedef struct { volatile __s64 counter; } atomic64_t; 15typedef struct { volatile __s64 counter; } atomic64_t;
@@ -73,40 +74,42 @@ extern int atomic64_sub_ret(int, atomic64_t *);
73#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n))) 74#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
74#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) 75#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
75 76
76#define atomic_add_unless(v, a, u) \ 77static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
77({ \ 78{
78 __typeof__((v)->counter) c, old; \ 79 int c, old;
79 c = atomic_read(v); \ 80 c = atomic_read(v);
80 for (;;) { \ 81 for (;;) {
81 if (unlikely(c == (u))) \ 82 if (unlikely(c == (u)))
82 break; \ 83 break;
83 old = atomic_cmpxchg((v), c, c + (a)); \ 84 old = atomic_cmpxchg((v), c, c + (a));
84 if (likely(old == c)) \ 85 if (likely(old == c))
85 break; \ 86 break;
86 c = old; \ 87 c = old;
87 } \ 88 }
88 likely(c != (u)); \ 89 return c != (u);
89}) 90}
91
90#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) 92#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
91 93
92#define atomic64_cmpxchg(v, o, n) \ 94#define atomic64_cmpxchg(v, o, n) \
93 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n))) 95 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
94#define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) 96#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
95 97
96#define atomic64_add_unless(v, a, u) \ 98static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
97({ \ 99{
98 __typeof__((v)->counter) c, old; \ 100 long c, old;
99 c = atomic64_read(v); \ 101 c = atomic64_read(v);
100 for (;;) { \ 102 for (;;) {
101 if (unlikely(c == (u))) \ 103 if (unlikely(c == (u)))
102 break; \ 104 break;
103 old = atomic64_cmpxchg((v), c, c + (a)); \ 105 old = atomic64_cmpxchg((v), c, c + (a));
104 if (likely(old == c)) \ 106 if (likely(old == c))
105 break; \ 107 break;
106 c = old; \ 108 c = old;
107 } \ 109 }
108 likely(c != (u)); \ 110 return c != (u);
109}) 111}
112
110#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) 113#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
111 114
112/* Atomic operations are already serializing */ 115/* Atomic operations are already serializing */
diff --git a/include/asm-x86_64/atomic.h b/include/asm-x86_64/atomic.h
index 80e4fdbe2204..19e0c607b568 100644
--- a/include/asm-x86_64/atomic.h
+++ b/include/asm-x86_64/atomic.h
@@ -2,6 +2,7 @@
2#define __ARCH_X86_64_ATOMIC__ 2#define __ARCH_X86_64_ATOMIC__
3 3
4#include <asm/alternative.h> 4#include <asm/alternative.h>
5#include <asm/system.h>
5 6
6/* atomic_t should be 32 bit signed type */ 7/* atomic_t should be 32 bit signed type */
7 8
@@ -403,20 +404,21 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t *v)
403 * Atomically adds @a to @v, so long as it was not @u. 404 * Atomically adds @a to @v, so long as it was not @u.
404 * Returns non-zero if @v was not @u, and zero otherwise. 405 * Returns non-zero if @v was not @u, and zero otherwise.
405 */ 406 */
406#define atomic_add_unless(v, a, u) \ 407static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
407({ \ 408{
408 __typeof__((v)->counter) c, old; \ 409 int c, old;
409 c = atomic_read(v); \ 410 c = atomic_read(v);
410 for (;;) { \ 411 for (;;) {
411 if (unlikely(c == (u))) \ 412 if (unlikely(c == (u)))
412 break; \ 413 break;
413 old = atomic_cmpxchg((v), c, c + (a)); \ 414 old = atomic_cmpxchg((v), c, c + (a));
414 if (likely(old == c)) \ 415 if (likely(old == c))
415 break; \ 416 break;
416 c = old; \ 417 c = old;
417 } \ 418 }
418 c != (u); \ 419 return c != (u);
419}) 420}
421
420#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) 422#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
421 423
422/** 424/**
@@ -428,20 +430,21 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t *v)
428 * Atomically adds @a to @v, so long as it was not @u. 430 * Atomically adds @a to @v, so long as it was not @u.
429 * Returns non-zero if @v was not @u, and zero otherwise. 431 * Returns non-zero if @v was not @u, and zero otherwise.
430 */ 432 */
431#define atomic64_add_unless(v, a, u) \ 433static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
432({ \ 434{
433 __typeof__((v)->counter) c, old; \ 435 long c, old;
434 c = atomic64_read(v); \ 436 c = atomic64_read(v);
435 for (;;) { \ 437 for (;;) {
436 if (unlikely(c == (u))) \ 438 if (unlikely(c == (u)))
437 break; \ 439 break;
438 old = atomic64_cmpxchg((v), c, c + (a)); \ 440 old = atomic64_cmpxchg((v), c, c + (a));
439 if (likely(old == c)) \ 441 if (likely(old == c))
440 break; \ 442 break;
441 c = old; \ 443 c = old;
442 } \ 444 }
443 c != (u); \ 445 return c != (u);
444}) 446}
447
445#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) 448#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
446 449
447/* These are x86-specific, used by some header files */ 450/* These are x86-specific, used by some header files */
diff --git a/include/asm-xtensa/atomic.h b/include/asm-xtensa/atomic.h
index 5c2672021068..b3b23540f14d 100644
--- a/include/asm-xtensa/atomic.h
+++ b/include/asm-xtensa/atomic.h
@@ -234,14 +234,21 @@ static inline int atomic_sub_return(int i, atomic_t * v)
234 * Atomically adds @a to @v, so long as it was not @u. 234 * Atomically adds @a to @v, so long as it was not @u.
235 * Returns non-zero if @v was not @u, and zero otherwise. 235 * Returns non-zero if @v was not @u, and zero otherwise.
236 */ 236 */
237#define atomic_add_unless(v, a, u) \ 237static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
238({ \ 238{
239 int c, old; \ 239 int c, old;
240 c = atomic_read(v); \ 240 c = atomic_read(v);
241 while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \ 241 for (;;) {
242 c = old; \ 242 if (unlikely(c == (u)))
243 c != (u); \ 243 break;
244}) 244 old = atomic_cmpxchg((v), c, c + (a));
245 if (likely(old == c))
246 break;
247 c = old;
248 }
249 return c != (u);
250}
251
245#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) 252#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
246 253
247static inline void atomic_clear_mask(unsigned int mask, atomic_t *v) 254static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)