aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/alpha/include/asm/atomic.h10
-rw-r--r--arch/arm/include/asm/atomic.h4
-rw-r--r--arch/avr32/include/asm/atomic.h57
-rw-r--r--arch/blackfin/include/asm/atomic.h4
-rw-r--r--arch/cris/include/asm/atomic.h4
-rw-r--r--arch/frv/include/asm/atomic.h4
-rw-r--r--arch/h8300/include/asm/atomic.h4
-rw-r--r--arch/ia64/include/asm/atomic.h4
-rw-r--r--arch/m32r/include/asm/atomic.h8
-rw-r--r--arch/m68k/include/asm/atomic.h4
-rw-r--r--arch/mips/include/asm/atomic.h10
-rw-r--r--arch/mn10300/include/asm/atomic.h4
-rw-r--r--arch/parisc/include/asm/atomic.h10
-rw-r--r--arch/powerpc/include/asm/atomic.h14
-rw-r--r--arch/s390/include/asm/atomic.h4
-rw-r--r--arch/sh/include/asm/atomic.h8
-rw-r--r--arch/sparc/include/asm/atomic_32.h2
-rw-r--r--arch/sparc/include/asm/atomic_64.h4
-rw-r--r--arch/tile/include/asm/atomic_32.h10
-rw-r--r--arch/tile/include/asm/atomic_64.h4
-rw-r--r--arch/x86/include/asm/atomic.h8
-rw-r--r--arch/x86/include/asm/atomic64_32.h2
-rw-r--r--arch/x86/include/asm/atomic64_64.h2
-rw-r--r--arch/xtensa/include/asm/atomic.h8
-rw-r--r--include/asm-generic/atomic.h4
-rw-r--r--include/linux/atomic.h14
26 files changed, 109 insertions, 102 deletions
diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
index 88b7491490bc..3d6704910268 100644
--- a/arch/alpha/include/asm/atomic.h
+++ b/arch/alpha/include/asm/atomic.h
@@ -176,15 +176,15 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
176#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) 176#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
177 177
178/** 178/**
179 * atomic_add_unless - add unless the number is a given value 179 * __atomic_add_unless - add unless the number is a given value
180 * @v: pointer of type atomic_t 180 * @v: pointer of type atomic_t
181 * @a: the amount to add to v... 181 * @a: the amount to add to v...
182 * @u: ...unless v is equal to u. 182 * @u: ...unless v is equal to u.
183 * 183 *
184 * Atomically adds @a to @v, so long as it was not @u. 184 * Atomically adds @a to @v, so long as it was not @u.
185 * Returns non-zero if @v was not @u, and zero otherwise. 185 * Returns the old value of @v.
186 */ 186 */
187static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) 187static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
188{ 188{
189 int c, old; 189 int c, old;
190 c = atomic_read(v); 190 c = atomic_read(v);
@@ -196,7 +196,7 @@ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
196 break; 196 break;
197 c = old; 197 c = old;
198 } 198 }
199 return c != (u); 199 return c;
200} 200}
201 201
202 202
@@ -207,7 +207,7 @@ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
207 * @u: ...unless v is equal to u. 207 * @u: ...unless v is equal to u.
208 * 208 *
209 * Atomically adds @a to @v, so long as it was not @u. 209 * Atomically adds @a to @v, so long as it was not @u.
210 * Returns non-zero if @v was not @u, and zero otherwise. 210 * Returns the old value of @v.
211 */ 211 */
212static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) 212static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
213{ 213{
diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
index 4d501f1bdc9d..3757e91c5281 100644
--- a/arch/arm/include/asm/atomic.h
+++ b/arch/arm/include/asm/atomic.h
@@ -208,14 +208,14 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
208 208
209#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) 209#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
210 210
211static inline int atomic_add_unless(atomic_t *v, int a, int u) 211static inline int __atomic_add_unless(atomic_t *v, int a, int u)
212{ 212{
213 int c, old; 213 int c, old;
214 214
215 c = atomic_read(v); 215 c = atomic_read(v);
216 while (c != u && (old = atomic_cmpxchg((v), c, c + a)) != c) 216 while (c != u && (old = atomic_cmpxchg((v), c, c + a)) != c)
217 c = old; 217 c = old;
218 return c != u; 218 return c;
219} 219}
220 220
221#define atomic_inc(v) atomic_add(1, v) 221#define atomic_inc(v) atomic_add(1, v)
diff --git a/arch/avr32/include/asm/atomic.h b/arch/avr32/include/asm/atomic.h
index f229c3849f03..dc6c3a41a2d7 100644
--- a/arch/avr32/include/asm/atomic.h
+++ b/arch/avr32/include/asm/atomic.h
@@ -78,70 +78,63 @@ static inline int atomic_add_return(int i, atomic_t *v)
78/* 78/*
79 * atomic_sub_unless - sub unless the number is a given value 79 * atomic_sub_unless - sub unless the number is a given value
80 * @v: pointer of type atomic_t 80 * @v: pointer of type atomic_t
81 * @a: the amount to add to v... 81 * @a: the amount to subtract from v...
82 * @u: ...unless v is equal to u. 82 * @u: ...unless v is equal to u.
83 * 83 *
84 * If the atomic value v is not equal to u, this function subtracts a 84 * Atomically subtract @a from @v, so long as it was not @u.
85 * from v, and returns non zero. If v is equal to u then it returns 85 * Returns the old value of @v.
86 * zero. This is done as an atomic operation.
87*/ 86*/
88static inline int atomic_sub_unless(atomic_t *v, int a, int u) 87static inline void atomic_sub_unless(atomic_t *v, int a, int u)
89{ 88{
90 int tmp, result = 0; 89 int tmp;
91 90
92 asm volatile( 91 asm volatile(
93 "/* atomic_sub_unless */\n" 92 "/* atomic_sub_unless */\n"
94 "1: ssrf 5\n" 93 "1: ssrf 5\n"
95 " ld.w %0, %3\n" 94 " ld.w %0, %2\n"
96 " cp.w %0, %5\n" 95 " cp.w %0, %4\n"
97 " breq 1f\n" 96 " breq 1f\n"
98 " sub %0, %4\n" 97 " sub %0, %3\n"
99 " stcond %2, %0\n" 98 " stcond %1, %0\n"
100 " brne 1b\n" 99 " brne 1b\n"
101 " mov %1, 1\n"
102 "1:" 100 "1:"
103 : "=&r"(tmp), "=&r"(result), "=o"(v->counter) 101 : "=&r"(tmp), "=o"(v->counter)
104 : "m"(v->counter), "rKs21"(a), "rKs21"(u), "1"(result) 102 : "m"(v->counter), "rKs21"(a), "rKs21"(u)
105 : "cc", "memory"); 103 : "cc", "memory");
106
107 return result;
108} 104}
109 105
110/* 106/*
111 * atomic_add_unless - add unless the number is a given value 107 * __atomic_add_unless - add unless the number is a given value
112 * @v: pointer of type atomic_t 108 * @v: pointer of type atomic_t
113 * @a: the amount to add to v... 109 * @a: the amount to add to v...
114 * @u: ...unless v is equal to u. 110 * @u: ...unless v is equal to u.
115 * 111 *
116 * If the atomic value v is not equal to u, this function adds a to v, 112 * Atomically adds @a to @v, so long as it was not @u.
117 * and returns non zero. If v is equal to u then it returns zero. This 113 * Returns the old value of @v.
118 * is done as an atomic operation.
119*/ 114*/
120static inline int atomic_add_unless(atomic_t *v, int a, int u) 115static inline int __atomic_add_unless(atomic_t *v, int a, int u)
121{ 116{
122 int tmp, result; 117 int tmp, old = atomic_read(v);
123 118
124 if (__builtin_constant_p(a) && (a >= -1048575) && (a <= 1048576)) 119 if (__builtin_constant_p(a) && (a >= -1048575) && (a <= 1048576))
125 result = atomic_sub_unless(v, -a, u); 120 atomic_sub_unless(v, -a, u);
126 else { 121 else {
127 result = 0;
128 asm volatile( 122 asm volatile(
129 "/* atomic_add_unless */\n" 123 "/* __atomic_add_unless */\n"
130 "1: ssrf 5\n" 124 "1: ssrf 5\n"
131 " ld.w %0, %3\n" 125 " ld.w %0, %2\n"
132 " cp.w %0, %5\n" 126 " cp.w %0, %4\n"
133 " breq 1f\n" 127 " breq 1f\n"
134 " add %0, %4\n" 128 " add %0, %3\n"
135 " stcond %2, %0\n" 129 " stcond %1, %0\n"
136 " brne 1b\n" 130 " brne 1b\n"
137 " mov %1, 1\n"
138 "1:" 131 "1:"
139 : "=&r"(tmp), "=&r"(result), "=o"(v->counter) 132 : "=&r"(tmp), "=o"(v->counter)
140 : "m"(v->counter), "r"(a), "ir"(u), "1"(result) 133 : "m"(v->counter), "r"(a), "ir"(u)
141 : "cc", "memory"); 134 : "cc", "memory");
142 } 135 }
143 136
144 return result; 137 return old;
145} 138}
146 139
147/* 140/*
diff --git a/arch/blackfin/include/asm/atomic.h b/arch/blackfin/include/asm/atomic.h
index f2cf5b714ea4..292c86f74f85 100644
--- a/arch/blackfin/include/asm/atomic.h
+++ b/arch/blackfin/include/asm/atomic.h
@@ -89,13 +89,13 @@ static inline void atomic_set_mask(int mask, atomic_t *v)
89#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n))) 89#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
90#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) 90#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
91 91
92#define atomic_add_unless(v, a, u) \ 92#define __atomic_add_unless(v, a, u) \
93({ \ 93({ \
94 int c, old; \ 94 int c, old; \
95 c = atomic_read(v); \ 95 c = atomic_read(v); \
96 while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \ 96 while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \
97 c = old; \ 97 c = old; \
98 c != (u); \ 98 c; \
99}) 99})
100 100
101/* 101/*
diff --git a/arch/cris/include/asm/atomic.h b/arch/cris/include/asm/atomic.h
index ce9f67e4d977..7e90532c5253 100644
--- a/arch/cris/include/asm/atomic.h
+++ b/arch/cris/include/asm/atomic.h
@@ -138,7 +138,7 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
138 138
139#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) 139#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
140 140
141static inline int atomic_add_unless(atomic_t *v, int a, int u) 141static inline int __atomic_add_unless(atomic_t *v, int a, int u)
142{ 142{
143 int ret; 143 int ret;
144 unsigned long flags; 144 unsigned long flags;
@@ -148,7 +148,7 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
148 if (ret != u) 148 if (ret != u)
149 v->counter += a; 149 v->counter += a;
150 cris_atomic_restore(v, flags); 150 cris_atomic_restore(v, flags);
151 return ret != u; 151 return ret;
152} 152}
153 153
154/* Atomic operations are already serializing */ 154/* Atomic operations are already serializing */
diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h
index b07b75f411f2..a51dcdfe1fbf 100644
--- a/arch/frv/include/asm/atomic.h
+++ b/arch/frv/include/asm/atomic.h
@@ -241,7 +241,7 @@ extern uint32_t __xchg_32(uint32_t i, volatile void *v);
241#define atomic64_cmpxchg(v, old, new) (__cmpxchg_64(old, new, &(v)->counter)) 241#define atomic64_cmpxchg(v, old, new) (__cmpxchg_64(old, new, &(v)->counter))
242#define atomic64_xchg(v, new) (__xchg_64(new, &(v)->counter)) 242#define atomic64_xchg(v, new) (__xchg_64(new, &(v)->counter))
243 243
244static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) 244static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
245{ 245{
246 int c, old; 246 int c, old;
247 c = atomic_read(v); 247 c = atomic_read(v);
@@ -253,7 +253,7 @@ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
253 break; 253 break;
254 c = old; 254 c = old;
255 } 255 }
256 return c != (u); 256 return c;
257} 257}
258 258
259 259
diff --git a/arch/h8300/include/asm/atomic.h b/arch/h8300/include/asm/atomic.h
index b641714774ea..e6d1663625f0 100644
--- a/arch/h8300/include/asm/atomic.h
+++ b/arch/h8300/include/asm/atomic.h
@@ -104,7 +104,7 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
104 104
105#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) 105#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
106 106
107static inline int atomic_add_unless(atomic_t *v, int a, int u) 107static inline int __atomic_add_unless(atomic_t *v, int a, int u)
108{ 108{
109 int ret; 109 int ret;
110 unsigned long flags; 110 unsigned long flags;
@@ -114,7 +114,7 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
114 if (ret != u) 114 if (ret != u)
115 v->counter += a; 115 v->counter += a;
116 local_irq_restore(flags); 116 local_irq_restore(flags);
117 return ret != u; 117 return ret;
118} 118}
119 119
120static __inline__ void atomic_clear_mask(unsigned long mask, unsigned long *v) 120static __inline__ void atomic_clear_mask(unsigned long mask, unsigned long *v)
diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
index fdb887005dff..22aca210bd05 100644
--- a/arch/ia64/include/asm/atomic.h
+++ b/arch/ia64/include/asm/atomic.h
@@ -90,7 +90,7 @@ ia64_atomic64_sub (__s64 i, atomic64_t *v)
90 (cmpxchg(&((v)->counter), old, new)) 90 (cmpxchg(&((v)->counter), old, new))
91#define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) 91#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
92 92
93static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) 93static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
94{ 94{
95 int c, old; 95 int c, old;
96 c = atomic_read(v); 96 c = atomic_read(v);
@@ -102,7 +102,7 @@ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
102 break; 102 break;
103 c = old; 103 c = old;
104 } 104 }
105 return c != (u); 105 return c;
106} 106}
107 107
108 108
diff --git a/arch/m32r/include/asm/atomic.h b/arch/m32r/include/asm/atomic.h
index d64d894dc549..c839426ac732 100644
--- a/arch/m32r/include/asm/atomic.h
+++ b/arch/m32r/include/asm/atomic.h
@@ -239,15 +239,15 @@ static __inline__ int atomic_dec_return(atomic_t *v)
239#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) 239#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
240 240
241/** 241/**
242 * atomic_add_unless - add unless the number is a given value 242 * __atomic_add_unless - add unless the number is a given value
243 * @v: pointer of type atomic_t 243 * @v: pointer of type atomic_t
244 * @a: the amount to add to v... 244 * @a: the amount to add to v...
245 * @u: ...unless v is equal to u. 245 * @u: ...unless v is equal to u.
246 * 246 *
247 * Atomically adds @a to @v, so long as it was not @u. 247 * Atomically adds @a to @v, so long as it was not @u.
248 * Returns non-zero if @v was not @u, and zero otherwise. 248 * Returns the old value of @v.
249 */ 249 */
250static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) 250static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
251{ 251{
252 int c, old; 252 int c, old;
253 c = atomic_read(v); 253 c = atomic_read(v);
@@ -259,7 +259,7 @@ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
259 break; 259 break;
260 c = old; 260 c = old;
261 } 261 }
262 return c != (u); 262 return c;
263} 263}
264 264
265 265
diff --git a/arch/m68k/include/asm/atomic.h b/arch/m68k/include/asm/atomic.h
index e844a2d2ba23..2269350974f1 100644
--- a/arch/m68k/include/asm/atomic.h
+++ b/arch/m68k/include/asm/atomic.h
@@ -183,7 +183,7 @@ static inline void atomic_set_mask(unsigned long mask, unsigned long *v)
183 __asm__ __volatile__("orl %1,%0" : "+m" (*v) : ASM_DI (mask)); 183 __asm__ __volatile__("orl %1,%0" : "+m" (*v) : ASM_DI (mask));
184} 184}
185 185
186static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) 186static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
187{ 187{
188 int c, old; 188 int c, old;
189 c = atomic_read(v); 189 c = atomic_read(v);
@@ -195,7 +195,7 @@ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
195 break; 195 break;
196 c = old; 196 c = old;
197 } 197 }
198 return c != (u); 198 return c;
199} 199}
200 200
201 201
diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
index 833a4023648a..31cb23debb7e 100644
--- a/arch/mips/include/asm/atomic.h
+++ b/arch/mips/include/asm/atomic.h
@@ -303,15 +303,15 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
303#define atomic_xchg(v, new) (xchg(&((v)->counter), (new))) 303#define atomic_xchg(v, new) (xchg(&((v)->counter), (new)))
304 304
305/** 305/**
306 * atomic_add_unless - add unless the number is a given value 306 * __atomic_add_unless - add unless the number is a given value
307 * @v: pointer of type atomic_t 307 * @v: pointer of type atomic_t
308 * @a: the amount to add to v... 308 * @a: the amount to add to v...
309 * @u: ...unless v is equal to u. 309 * @u: ...unless v is equal to u.
310 * 310 *
311 * Atomically adds @a to @v, so long as it was not @u. 311 * Atomically adds @a to @v, so long as it was not @u.
312 * Returns non-zero if @v was not @u, and zero otherwise. 312 * Returns the old value of @v.
313 */ 313 */
314static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) 314static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
315{ 315{
316 int c, old; 316 int c, old;
317 c = atomic_read(v); 317 c = atomic_read(v);
@@ -323,7 +323,7 @@ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
323 break; 323 break;
324 c = old; 324 c = old;
325 } 325 }
326 return c != (u); 326 return c;
327} 327}
328 328
329#define atomic_dec_return(v) atomic_sub_return(1, (v)) 329#define atomic_dec_return(v) atomic_sub_return(1, (v))
@@ -679,7 +679,7 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
679 * @u: ...unless v is equal to u. 679 * @u: ...unless v is equal to u.
680 * 680 *
681 * Atomically adds @a to @v, so long as it was not @u. 681 * Atomically adds @a to @v, so long as it was not @u.
682 * Returns non-zero if @v was not @u, and zero otherwise. 682 * Returns the old value of @v.
683 */ 683 */
684static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) 684static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
685{ 685{
diff --git a/arch/mn10300/include/asm/atomic.h b/arch/mn10300/include/asm/atomic.h
index 041b9d69d86c..a2e6759af4db 100644
--- a/arch/mn10300/include/asm/atomic.h
+++ b/arch/mn10300/include/asm/atomic.h
@@ -260,13 +260,13 @@ static inline void atomic_dec(atomic_t *v)
260#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0) 260#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
261#define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0) 261#define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0)
262 262
263#define atomic_add_unless(v, a, u) \ 263#define __atomic_add_unless(v, a, u) \
264({ \ 264({ \
265 int c, old; \ 265 int c, old; \
266 c = atomic_read(v); \ 266 c = atomic_read(v); \
267 while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \ 267 while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \
268 c = old; \ 268 c = old; \
269 c != (u); \ 269 c; \
270}) 270})
271 271
272 272
diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
index 192488999b63..1914f179879d 100644
--- a/arch/parisc/include/asm/atomic.h
+++ b/arch/parisc/include/asm/atomic.h
@@ -197,15 +197,15 @@ static __inline__ int atomic_read(const atomic_t *v)
197#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) 197#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
198 198
199/** 199/**
200 * atomic_add_unless - add unless the number is a given value 200 * __atomic_add_unless - add unless the number is a given value
201 * @v: pointer of type atomic_t 201 * @v: pointer of type atomic_t
202 * @a: the amount to add to v... 202 * @a: the amount to add to v...
203 * @u: ...unless v is equal to u. 203 * @u: ...unless v is equal to u.
204 * 204 *
205 * Atomically adds @a to @v, so long as it was not @u. 205 * Atomically adds @a to @v, so long as it was not @u.
206 * Returns non-zero if @v was not @u, and zero otherwise. 206 * Returns the old value of @v.
207 */ 207 */
208static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) 208static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
209{ 209{
210 int c, old; 210 int c, old;
211 c = atomic_read(v); 211 c = atomic_read(v);
@@ -217,7 +217,7 @@ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
217 break; 217 break;
218 c = old; 218 c = old;
219 } 219 }
220 return c != (u); 220 return c;
221} 221}
222 222
223 223
@@ -316,7 +316,7 @@ atomic64_read(const atomic64_t *v)
316 * @u: ...unless v is equal to u. 316 * @u: ...unless v is equal to u.
317 * 317 *
318 * Atomically adds @a to @v, so long as it was not @u. 318 * Atomically adds @a to @v, so long as it was not @u.
319 * Returns non-zero if @v was not @u, and zero otherwise. 319 * Returns the old value of @v.
320 */ 320 */
321static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) 321static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
322{ 322{
diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
index b2bcbee622ea..952e161fbb89 100644
--- a/arch/powerpc/include/asm/atomic.h
+++ b/arch/powerpc/include/asm/atomic.h
@@ -181,21 +181,21 @@ static __inline__ int atomic_dec_return(atomic_t *v)
181#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) 181#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
182 182
183/** 183/**
184 * atomic_add_unless - add unless the number is a given value 184 * __atomic_add_unless - add unless the number is a given value
185 * @v: pointer of type atomic_t 185 * @v: pointer of type atomic_t
186 * @a: the amount to add to v... 186 * @a: the amount to add to v...
187 * @u: ...unless v is equal to u. 187 * @u: ...unless v is equal to u.
188 * 188 *
189 * Atomically adds @a to @v, so long as it was not @u. 189 * Atomically adds @a to @v, so long as it was not @u.
190 * Returns non-zero if @v was not @u, and zero otherwise. 190 * Returns the old value of @v.
191 */ 191 */
192static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) 192static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
193{ 193{
194 int t; 194 int t;
195 195
196 __asm__ __volatile__ ( 196 __asm__ __volatile__ (
197 PPC_RELEASE_BARRIER 197 PPC_RELEASE_BARRIER
198"1: lwarx %0,0,%1 # atomic_add_unless\n\ 198"1: lwarx %0,0,%1 # __atomic_add_unless\n\
199 cmpw 0,%0,%3 \n\ 199 cmpw 0,%0,%3 \n\
200 beq- 2f \n\ 200 beq- 2f \n\
201 add %0,%2,%0 \n" 201 add %0,%2,%0 \n"
@@ -209,7 +209,7 @@ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
209 : "r" (&v->counter), "r" (a), "r" (u) 209 : "r" (&v->counter), "r" (a), "r" (u)
210 : "cc", "memory"); 210 : "cc", "memory");
211 211
212 return t != u; 212 return t;
213} 213}
214 214
215 215
@@ -443,7 +443,7 @@ static __inline__ long atomic64_dec_if_positive(atomic64_t *v)
443 * @u: ...unless v is equal to u. 443 * @u: ...unless v is equal to u.
444 * 444 *
445 * Atomically adds @a to @v, so long as it was not @u. 445 * Atomically adds @a to @v, so long as it was not @u.
446 * Returns non-zero if @v was not @u, and zero otherwise. 446 * Returns the old value of @v.
447 */ 447 */
448static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) 448static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
449{ 449{
@@ -451,7 +451,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
451 451
452 __asm__ __volatile__ ( 452 __asm__ __volatile__ (
453 PPC_RELEASE_BARRIER 453 PPC_RELEASE_BARRIER
454"1: ldarx %0,0,%1 # atomic_add_unless\n\ 454"1: ldarx %0,0,%1 # __atomic_add_unless\n\
455 cmpd 0,%0,%3 \n\ 455 cmpd 0,%0,%3 \n\
456 beq- 2f \n\ 456 beq- 2f \n\
457 add %0,%2,%0 \n" 457 add %0,%2,%0 \n"
diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
index 29d756329228..7b0b0a7193e5 100644
--- a/arch/s390/include/asm/atomic.h
+++ b/arch/s390/include/asm/atomic.h
@@ -93,7 +93,7 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
93 return old; 93 return old;
94} 94}
95 95
96static inline int atomic_add_unless(atomic_t *v, int a, int u) 96static inline int __atomic_add_unless(atomic_t *v, int a, int u)
97{ 97{
98 int c, old; 98 int c, old;
99 c = atomic_read(v); 99 c = atomic_read(v);
@@ -105,7 +105,7 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
105 break; 105 break;
106 c = old; 106 c = old;
107 } 107 }
108 return c != u; 108 return c;
109} 109}
110 110
111 111
diff --git a/arch/sh/include/asm/atomic.h b/arch/sh/include/asm/atomic.h
index 8ddb2635cf92..2177596d4b38 100644
--- a/arch/sh/include/asm/atomic.h
+++ b/arch/sh/include/asm/atomic.h
@@ -38,15 +38,15 @@
38#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n))) 38#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
39 39
40/** 40/**
41 * atomic_add_unless - add unless the number is a given value 41 * __atomic_add_unless - add unless the number is a given value
42 * @v: pointer of type atomic_t 42 * @v: pointer of type atomic_t
43 * @a: the amount to add to v... 43 * @a: the amount to add to v...
44 * @u: ...unless v is equal to u. 44 * @u: ...unless v is equal to u.
45 * 45 *
46 * Atomically adds @a to @v, so long as it was not @u. 46 * Atomically adds @a to @v, so long as it was not @u.
47 * Returns non-zero if @v was not @u, and zero otherwise. 47 * Returns the old value of @v.
48 */ 48 */
49static inline int atomic_add_unless(atomic_t *v, int a, int u) 49static inline int __atomic_add_unless(atomic_t *v, int a, int u)
50{ 50{
51 int c, old; 51 int c, old;
52 c = atomic_read(v); 52 c = atomic_read(v);
@@ -59,7 +59,7 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
59 c = old; 59 c = old;
60 } 60 }
61 61
62 return c != (u); 62 return c;
63} 63}
64 64
65#define smp_mb__before_atomic_dec() smp_mb() 65#define smp_mb__before_atomic_dec() smp_mb()
diff --git a/arch/sparc/include/asm/atomic_32.h b/arch/sparc/include/asm/atomic_32.h
index 7646f2cef5d0..bdce95e77f85 100644
--- a/arch/sparc/include/asm/atomic_32.h
+++ b/arch/sparc/include/asm/atomic_32.h
@@ -22,7 +22,7 @@
22extern int __atomic_add_return(int, atomic_t *); 22extern int __atomic_add_return(int, atomic_t *);
23extern int atomic_cmpxchg(atomic_t *, int, int); 23extern int atomic_cmpxchg(atomic_t *, int, int);
24#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) 24#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
25extern int atomic_add_unless(atomic_t *, int, int); 25extern int __atomic_add_unless(atomic_t *, int, int);
26extern void atomic_set(atomic_t *, int); 26extern void atomic_set(atomic_t *, int);
27 27
28#define atomic_read(v) (*(volatile int *)&(v)->counter) 28#define atomic_read(v) (*(volatile int *)&(v)->counter)
diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
index 337139ef91be..1fc3d0a62d66 100644
--- a/arch/sparc/include/asm/atomic_64.h
+++ b/arch/sparc/include/asm/atomic_64.h
@@ -70,7 +70,7 @@ extern long atomic64_sub_ret(long, atomic64_t *);
70#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n))) 70#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
71#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) 71#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
72 72
73static inline int atomic_add_unless(atomic_t *v, int a, int u) 73static inline int __atomic_add_unless(atomic_t *v, int a, int u)
74{ 74{
75 int c, old; 75 int c, old;
76 c = atomic_read(v); 76 c = atomic_read(v);
@@ -82,7 +82,7 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
82 break; 82 break;
83 c = old; 83 c = old;
84 } 84 }
85 return c != (u); 85 return c;
86} 86}
87 87
88 88
diff --git a/arch/tile/include/asm/atomic_32.h b/arch/tile/include/asm/atomic_32.h
index 246feed4794d..c03349e0ca9f 100644
--- a/arch/tile/include/asm/atomic_32.h
+++ b/arch/tile/include/asm/atomic_32.h
@@ -81,18 +81,18 @@ static inline int atomic_add_return(int i, atomic_t *v)
81} 81}
82 82
83/** 83/**
84 * atomic_add_unless - add unless the number is already a given value 84 * __atomic_add_unless - add unless the number is already a given value
85 * @v: pointer of type atomic_t 85 * @v: pointer of type atomic_t
86 * @a: the amount to add to v... 86 * @a: the amount to add to v...
87 * @u: ...unless v is equal to u. 87 * @u: ...unless v is equal to u.
88 * 88 *
89 * Atomically adds @a to @v, so long as @v was not already @u. 89 * Atomically adds @a to @v, so long as @v was not already @u.
90 * Returns non-zero if @v was not @u, and zero otherwise. 90 * Returns the old value of @v.
91 */ 91 */
92static inline int atomic_add_unless(atomic_t *v, int a, int u) 92static inline int __atomic_add_unless(atomic_t *v, int a, int u)
93{ 93{
94 smp_mb(); /* barrier for proper semantics */ 94 smp_mb(); /* barrier for proper semantics */
95 return _atomic_xchg_add_unless(v, a, u) != u; 95 return _atomic_xchg_add_unless(v, a, u);
96} 96}
97 97
98/** 98/**
@@ -199,7 +199,7 @@ static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
199 * @u: ...unless v is equal to u. 199 * @u: ...unless v is equal to u.
200 * 200 *
201 * Atomically adds @a to @v, so long as @v was not already @u. 201 * Atomically adds @a to @v, so long as @v was not already @u.
202 * Returns non-zero if @v was not @u, and zero otherwise. 202 * Returns the old value of @v.
203 */ 203 */
204static inline u64 atomic64_add_unless(atomic64_t *v, u64 a, u64 u) 204static inline u64 atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
205{ 205{
diff --git a/arch/tile/include/asm/atomic_64.h b/arch/tile/include/asm/atomic_64.h
index a48dda30cbcc..27fe667fddfe 100644
--- a/arch/tile/include/asm/atomic_64.h
+++ b/arch/tile/include/asm/atomic_64.h
@@ -64,7 +64,7 @@ static inline int atomic_add_return(int i, atomic_t *v)
64 return val; 64 return val;
65} 65}
66 66
67static inline int atomic_add_unless(atomic_t *v, int a, int u) 67static inline int __atomic_add_unless(atomic_t *v, int a, int u)
68{ 68{
69 int guess, oldval = v->counter; 69 int guess, oldval = v->counter;
70 do { 70 do {
@@ -73,7 +73,7 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
73 guess = oldval; 73 guess = oldval;
74 oldval = atomic_cmpxchg(v, guess, guess + a); 74 oldval = atomic_cmpxchg(v, guess, guess + a);
75 } while (guess != oldval); 75 } while (guess != oldval);
76 return oldval != u; 76 return oldval;
77} 77}
78 78
79/* Now the true 64-bit operations. */ 79/* Now the true 64-bit operations. */
diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
index 897969bdd4e6..5fe9cb335cd2 100644
--- a/arch/x86/include/asm/atomic.h
+++ b/arch/x86/include/asm/atomic.h
@@ -221,15 +221,15 @@ static inline int atomic_xchg(atomic_t *v, int new)
221} 221}
222 222
223/** 223/**
224 * atomic_add_unless - add unless the number is already a given value 224 * __atomic_add_unless - add unless the number is already a given value
225 * @v: pointer of type atomic_t 225 * @v: pointer of type atomic_t
226 * @a: the amount to add to v... 226 * @a: the amount to add to v...
227 * @u: ...unless v is equal to u. 227 * @u: ...unless v is equal to u.
228 * 228 *
229 * Atomically adds @a to @v, so long as @v was not already @u. 229 * Atomically adds @a to @v, so long as @v was not already @u.
230 * Returns non-zero if @v was not @u, and zero otherwise. 230 * Returns the old value of @v.
231 */ 231 */
232static inline int atomic_add_unless(atomic_t *v, int a, int u) 232static inline int __atomic_add_unless(atomic_t *v, int a, int u)
233{ 233{
234 int c, old; 234 int c, old;
235 c = atomic_read(v); 235 c = atomic_read(v);
@@ -241,7 +241,7 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
241 break; 241 break;
242 c = old; 242 c = old;
243 } 243 }
244 return c != (u); 244 return c;
245} 245}
246 246
247 247
diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
index 2a934aa19a43..24098aafce0d 100644
--- a/arch/x86/include/asm/atomic64_32.h
+++ b/arch/x86/include/asm/atomic64_32.h
@@ -263,7 +263,7 @@ static inline int atomic64_add_negative(long long i, atomic64_t *v)
263 * @u: ...unless v is equal to u. 263 * @u: ...unless v is equal to u.
264 * 264 *
265 * Atomically adds @a to @v, so long as it was not @u. 265 * Atomically adds @a to @v, so long as it was not @u.
266 * Returns non-zero if @v was not @u, and zero otherwise. 266 * Returns the old value of @v.
267 */ 267 */
268static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u) 268static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
269{ 269{
diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
index 49fd1ea22951..017594d403f6 100644
--- a/arch/x86/include/asm/atomic64_64.h
+++ b/arch/x86/include/asm/atomic64_64.h
@@ -202,7 +202,7 @@ static inline long atomic64_xchg(atomic64_t *v, long new)
202 * @u: ...unless v is equal to u. 202 * @u: ...unless v is equal to u.
203 * 203 *
204 * Atomically adds @a to @v, so long as it was not @u. 204 * Atomically adds @a to @v, so long as it was not @u.
205 * Returns non-zero if @v was not @u, and zero otherwise. 205 * Returns the old value of @v.
206 */ 206 */
207static inline int atomic64_add_unless(atomic64_t *v, long a, long u) 207static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
208{ 208{
diff --git a/arch/xtensa/include/asm/atomic.h b/arch/xtensa/include/asm/atomic.h
index 7cca2fb18baf..e464212f9ee6 100644
--- a/arch/xtensa/include/asm/atomic.h
+++ b/arch/xtensa/include/asm/atomic.h
@@ -225,15 +225,15 @@ static inline int atomic_sub_return(int i, atomic_t * v)
225#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) 225#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
226 226
227/** 227/**
228 * atomic_add_unless - add unless the number is a given value 228 * __atomic_add_unless - add unless the number is a given value
229 * @v: pointer of type atomic_t 229 * @v: pointer of type atomic_t
230 * @a: the amount to add to v... 230 * @a: the amount to add to v...
231 * @u: ...unless v is equal to u. 231 * @u: ...unless v is equal to u.
232 * 232 *
233 * Atomically adds @a to @v, so long as it was not @u. 233 * Atomically adds @a to @v, so long as it was not @u.
234 * Returns non-zero if @v was not @u, and zero otherwise. 234 * Returns the old value of @v.
235 */ 235 */
236static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) 236static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
237{ 237{
238 int c, old; 238 int c, old;
239 c = atomic_read(v); 239 c = atomic_read(v);
@@ -245,7 +245,7 @@ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
245 break; 245 break;
246 c = old; 246 c = old;
247 } 247 }
248 return c != (u); 248 return c;
249} 249}
250 250
251 251
diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h
index 7f84414c5f53..bd18bfd78e9b 100644
--- a/include/asm-generic/atomic.h
+++ b/include/asm-generic/atomic.h
@@ -129,13 +129,13 @@ static inline void atomic_dec(atomic_t *v)
129 129
130#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) 130#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
131 131
132static inline int atomic_add_unless(atomic_t *v, int a, int u) 132static inline int __atomic_add_unless(atomic_t *v, int a, int u)
133{ 133{
134 int c, old; 134 int c, old;
135 c = atomic_read(v); 135 c = atomic_read(v);
136 while (c != u && (old = atomic_cmpxchg(v, c, c + a)) != c) 136 while (c != u && (old = atomic_cmpxchg(v, c, c + a)) != c)
137 c = old; 137 c = old;
138 return c != u; 138 return c;
139} 139}
140 140
141static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr) 141static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
diff --git a/include/linux/atomic.h b/include/linux/atomic.h
index 66fed6364122..d5e167a6a398 100644
--- a/include/linux/atomic.h
+++ b/include/linux/atomic.h
@@ -3,6 +3,20 @@
3#include <asm/atomic.h> 3#include <asm/atomic.h>
4 4
5/** 5/**
6 * atomic_add_unless - add unless the number is already a given value
7 * @v: pointer of type atomic_t
8 * @a: the amount to add to v...
9 * @u: ...unless v is equal to u.
10 *
11 * Atomically adds @a to @v, so long as @v was not already @u.
12 * Returns non-zero if @v was not @u, and zero otherwise.
13 */
14static inline int atomic_add_unless(atomic_t *v, int a, int u)
15{
16 return __atomic_add_unless(v, a, u) != u;
17}
18
19/**
6 * atomic_inc_not_zero - increment unless the number is zero 20 * atomic_inc_not_zero - increment unless the number is zero
7 * @v: pointer of type atomic_t 21 * @v: pointer of type atomic_t
8 * 22 *