aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorNick Piggin <nickpiggin@yahoo.com.au>2005-11-13 19:07:25 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2005-11-13 21:14:16 -0500
commit8426e1f6af0fd7f44d040af7263750c5a52f3cc3 (patch)
tree827bd2588c2b73d11cea6869de8ff42dba134375
parent4a6dae6d382e9edf3ff440b819e554ed706359bc (diff)
[PATCH] atomic: inc_not_zero
Introduce an atomic_inc_not_zero operation. Make this a special case of atomic_add_unless because lockless pagecache actually wants atomic_inc_not_negativeone due to its offset refcount. Signed-off-by: Nick Piggin <npiggin@suse.de> Cc: "Paul E. McKenney" <paulmck@us.ibm.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r--Documentation/atomic_ops.txt14
-rw-r--r--arch/sparc/lib/atomic32.c15
-rw-r--r--include/asm-alpha/atomic.h10
-rw-r--r--include/asm-arm/atomic.h11
-rw-r--r--include/asm-arm26/atomic.h15
-rw-r--r--include/asm-cris/atomic.h14
-rw-r--r--include/asm-frv/atomic.h10
-rw-r--r--include/asm-h8300/atomic.h14
-rw-r--r--include/asm-i386/atomic.h19
-rw-r--r--include/asm-ia64/atomic.h10
-rw-r--r--include/asm-m68k/atomic.h10
-rw-r--r--include/asm-m68knommu/atomic.h10
-rw-r--r--include/asm-mips/atomic.h19
-rw-r--r--include/asm-parisc/atomic.h19
-rw-r--r--include/asm-powerpc/atomic.h25
-rw-r--r--include/asm-s390/atomic.h10
-rw-r--r--include/asm-sh/atomic.h15
-rw-r--r--include/asm-sh64/atomic.h15
-rw-r--r--include/asm-sparc/atomic.h3
-rw-r--r--include/asm-sparc64/atomic.h10
-rw-r--r--include/asm-v850/atomic.h16
-rw-r--r--include/asm-x86_64/atomic.h19
-rw-r--r--include/asm-xtensa/atomic.h19
23 files changed, 321 insertions, 1 deletions
diff --git a/Documentation/atomic_ops.txt b/Documentation/atomic_ops.txt
index f1744161ef06..23a1c2402bcc 100644
--- a/Documentation/atomic_ops.txt
+++ b/Documentation/atomic_ops.txt
@@ -115,7 +115,7 @@ boolean is return which indicates whether the resulting counter value
115is negative. It requires explicit memory barrier semantics around the 115is negative. It requires explicit memory barrier semantics around the
116operation. 116operation.
117 117
118Finally: 118Then:
119 119
120 int atomic_cmpxchg(atomic_t *v, int old, int new); 120 int atomic_cmpxchg(atomic_t *v, int old, int new);
121 121
@@ -129,6 +129,18 @@ atomic_cmpxchg requires explicit memory barriers around the operation.
129The semantics for atomic_cmpxchg are the same as those defined for 'cas' 129The semantics for atomic_cmpxchg are the same as those defined for 'cas'
130below. 130below.
131 131
132Finally:
133
134 int atomic_add_unless(atomic_t *v, int a, int u);
135
136If the atomic value v is not equal to u, this function adds a to v, and
137returns non zero. If v is equal to u then it returns zero. This is done as
138an atomic operation.
139
140atomic_add_unless requires explicit memory barriers around the operation.
141
142atomic_inc_not_zero, equivalent to atomic_add_unless(v, 1, 0)
143
132 144
133If a caller requires memory barrier semantics around an atomic_t 145If a caller requires memory barrier semantics around an atomic_t
134operation which does not return a value, a set of interfaces are 146operation which does not return a value, a set of interfaces are
diff --git a/arch/sparc/lib/atomic32.c b/arch/sparc/lib/atomic32.c
index be46f6545184..cb3cf0f22822 100644
--- a/arch/sparc/lib/atomic32.c
+++ b/arch/sparc/lib/atomic32.c
@@ -53,6 +53,21 @@ int atomic_cmpxchg(atomic_t *v, int old, int new)
53 return ret; 53 return ret;
54} 54}
55 55
56int atomic_add_unless(atomic_t *v, int a, int u)
57{
58 int ret;
59 unsigned long flags;
60
61 spin_lock_irqsave(ATOMIC_HASH(v), flags);
62 ret = v->counter;
63 if (ret != u)
64 v->counter += a;
65 spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
66 return ret != u;
67}
68
69static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
70/* Atomic operations are already serializing */
56void atomic_set(atomic_t *v, int i) 71void atomic_set(atomic_t *v, int i)
57{ 72{
58 unsigned long flags; 73 unsigned long flags;
diff --git a/include/asm-alpha/atomic.h b/include/asm-alpha/atomic.h
index a6660809a879..36505bb4e8cb 100644
--- a/include/asm-alpha/atomic.h
+++ b/include/asm-alpha/atomic.h
@@ -179,6 +179,16 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
179 179
180#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n))) 180#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
181 181
182#define atomic_add_unless(v, a, u) \
183({ \
184 int c, old; \
185 c = atomic_read(v); \
186 while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \
187 c = old; \
188 c != (u); \
189})
190#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
191
182#define atomic_dec_return(v) atomic_sub_return(1,(v)) 192#define atomic_dec_return(v) atomic_sub_return(1,(v))
183#define atomic64_dec_return(v) atomic64_sub_return(1,(v)) 193#define atomic64_dec_return(v) atomic64_sub_return(1,(v))
184 194
diff --git a/include/asm-arm/atomic.h b/include/asm-arm/atomic.h
index 8ab1689ef56a..75b802719723 100644
--- a/include/asm-arm/atomic.h
+++ b/include/asm-arm/atomic.h
@@ -173,6 +173,17 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
173 173
174#endif /* __LINUX_ARM_ARCH__ */ 174#endif /* __LINUX_ARM_ARCH__ */
175 175
176static inline int atomic_add_unless(atomic_t *v, int a, int u)
177{
178 int c, old;
179
180 c = atomic_read(v);
181 while (c != u && (old = atomic_cmpxchg((v), c, c + a)) != c)
182 c = old;
183 return c != u;
184}
185#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
186
176#define atomic_add(i, v) (void) atomic_add_return(i, v) 187#define atomic_add(i, v) (void) atomic_add_return(i, v)
177#define atomic_inc(v) (void) atomic_add_return(1, v) 188#define atomic_inc(v) (void) atomic_add_return(1, v)
178#define atomic_sub(i, v) (void) atomic_sub_return(i, v) 189#define atomic_sub(i, v) (void) atomic_sub_return(i, v)
diff --git a/include/asm-arm26/atomic.h b/include/asm-arm26/atomic.h
index 54b24ead7132..a47cadc59686 100644
--- a/include/asm-arm26/atomic.h
+++ b/include/asm-arm26/atomic.h
@@ -76,6 +76,21 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
76 return ret; 76 return ret;
77} 77}
78 78
79static inline int atomic_add_unless(atomic_t *v, int a, int u)
80{
81 int ret;
82 unsigned long flags;
83
84 local_irq_save(flags);
85 ret = v->counter;
86 if (ret != u)
87 v->counter += a;
88 local_irq_restore(flags);
89
90 return ret != u;
91}
92#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
93
79static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr) 94static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
80{ 95{
81 unsigned long flags; 96 unsigned long flags;
diff --git a/include/asm-cris/atomic.h b/include/asm-cris/atomic.h
index 45891f7de00f..683b05a57d88 100644
--- a/include/asm-cris/atomic.h
+++ b/include/asm-cris/atomic.h
@@ -136,6 +136,20 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
136 return ret; 136 return ret;
137} 137}
138 138
139static inline int atomic_add_unless(atomic_t *v, int a, int u)
140{
141 int ret;
142 unsigned long flags;
143
144 cris_atomic_save(v, flags);
145 ret = v->counter;
146 if (ret != u)
147 v->counter += a;
148 cris_atomic_restore(v, flags);
149 return ret != u;
150}
151#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
152
139/* Atomic operations are already serializing */ 153/* Atomic operations are already serializing */
140#define smp_mb__before_atomic_dec() barrier() 154#define smp_mb__before_atomic_dec() barrier()
141#define smp_mb__after_atomic_dec() barrier() 155#define smp_mb__after_atomic_dec() barrier()
diff --git a/include/asm-frv/atomic.h b/include/asm-frv/atomic.h
index 55f06a0e949f..f6539ff569c5 100644
--- a/include/asm-frv/atomic.h
+++ b/include/asm-frv/atomic.h
@@ -416,4 +416,14 @@ extern uint32_t __cmpxchg_32(uint32_t *v, uint32_t test, uint32_t new);
416 416
417#define atomic_cmpxchg(v, old, new) ((int)cmpxchg(&((v)->counter), old, new)) 417#define atomic_cmpxchg(v, old, new) ((int)cmpxchg(&((v)->counter), old, new))
418 418
419#define atomic_add_unless(v, a, u) \
420({ \
421 int c, old; \
422 c = atomic_read(v); \
423 while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \
424 c = old; \
425 c != (u); \
426})
427#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
428
419#endif /* _ASM_ATOMIC_H */ 429#endif /* _ASM_ATOMIC_H */
diff --git a/include/asm-h8300/atomic.h b/include/asm-h8300/atomic.h
index d50439259491..f23d86819ea8 100644
--- a/include/asm-h8300/atomic.h
+++ b/include/asm-h8300/atomic.h
@@ -95,6 +95,20 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
95 return ret; 95 return ret;
96} 96}
97 97
98static inline int atomic_add_unless(atomic_t *v, int a, int u)
99{
100 int ret;
101 unsigned long flags;
102
103 local_irq_save(flags);
104 ret = v->counter;
105 if (ret != u)
106 v->counter += a;
107 local_irq_restore(flags);
108 return ret != u;
109}
110#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
111
98static __inline__ void atomic_clear_mask(unsigned long mask, unsigned long *v) 112static __inline__ void atomic_clear_mask(unsigned long mask, unsigned long *v)
99{ 113{
100 __asm__ __volatile__("stc ccr,r1l\n\t" 114 __asm__ __volatile__("stc ccr,r1l\n\t"
diff --git a/include/asm-i386/atomic.h b/include/asm-i386/atomic.h
index 5ff698e9d2c2..c68557aa04b2 100644
--- a/include/asm-i386/atomic.h
+++ b/include/asm-i386/atomic.h
@@ -217,6 +217,25 @@ static __inline__ int atomic_sub_return(int i, atomic_t *v)
217 217
218#define atomic_cmpxchg(v, old, new) ((int)cmpxchg(&((v)->counter), old, new)) 218#define atomic_cmpxchg(v, old, new) ((int)cmpxchg(&((v)->counter), old, new))
219 219
220/**
221 * atomic_add_unless - add unless the number is a given value
222 * @v: pointer of type atomic_t
223 * @a: the amount to add to v...
224 * @u: ...unless v is equal to u.
225 *
226 * Atomically adds @a to @v, so long as it was not @u.
227 * Returns non-zero if @v was not @u, and zero otherwise.
228 */
229#define atomic_add_unless(v, a, u) \
230({ \
231 int c, old; \
232 c = atomic_read(v); \
233 while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \
234 c = old; \
235 c != (u); \
236})
237#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
238
220#define atomic_inc_return(v) (atomic_add_return(1,v)) 239#define atomic_inc_return(v) (atomic_add_return(1,v))
221#define atomic_dec_return(v) (atomic_sub_return(1,v)) 240#define atomic_dec_return(v) (atomic_sub_return(1,v))
222 241
diff --git a/include/asm-ia64/atomic.h b/include/asm-ia64/atomic.h
index 593d3da9f3c2..2fbebf85c31d 100644
--- a/include/asm-ia64/atomic.h
+++ b/include/asm-ia64/atomic.h
@@ -90,6 +90,16 @@ ia64_atomic64_sub (__s64 i, atomic64_t *v)
90 90
91#define atomic_cmpxchg(v, old, new) ((int)cmpxchg(&((v)->counter), old, new)) 91#define atomic_cmpxchg(v, old, new) ((int)cmpxchg(&((v)->counter), old, new))
92 92
93#define atomic_add_unless(v, a, u) \
94({ \
95 int c, old; \
96 c = atomic_read(v); \
97 while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \
98 c = old; \
99 c != (u); \
100})
101#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
102
93#define atomic_add_return(i,v) \ 103#define atomic_add_return(i,v) \
94({ \ 104({ \
95 int __ia64_aar_i = (i); \ 105 int __ia64_aar_i = (i); \
diff --git a/include/asm-m68k/atomic.h b/include/asm-m68k/atomic.h
index b821975a361a..e3c962eeabf3 100644
--- a/include/asm-m68k/atomic.h
+++ b/include/asm-m68k/atomic.h
@@ -141,6 +141,16 @@ static inline void atomic_set_mask(unsigned long mask, unsigned long *v)
141 141
142#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n))) 142#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
143 143
144#define atomic_add_unless(v, a, u) \
145({ \
146 int c, old; \
147 c = atomic_read(v); \
148 while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \
149 c = old; \
150 c != (u); \
151})
152#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
153
144/* Atomic operations are already serializing */ 154/* Atomic operations are already serializing */
145#define smp_mb__before_atomic_dec() barrier() 155#define smp_mb__before_atomic_dec() barrier()
146#define smp_mb__after_atomic_dec() barrier() 156#define smp_mb__after_atomic_dec() barrier()
diff --git a/include/asm-m68knommu/atomic.h b/include/asm-m68knommu/atomic.h
index 2fd33a56b603..3c1cc153c415 100644
--- a/include/asm-m68knommu/atomic.h
+++ b/include/asm-m68knommu/atomic.h
@@ -130,6 +130,16 @@ static inline int atomic_sub_return(int i, atomic_t * v)
130 130
131#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n))) 131#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
132 132
133#define atomic_add_unless(v, a, u) \
134({ \
135 int c, old; \
136 c = atomic_read(v); \
137 while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \
138 c = old; \
139 c != (u); \
140})
141#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
142
133#define atomic_dec_return(v) atomic_sub_return(1,(v)) 143#define atomic_dec_return(v) atomic_sub_return(1,(v))
134#define atomic_inc_return(v) atomic_add_return(1,(v)) 144#define atomic_inc_return(v) atomic_add_return(1,(v))
135 145
diff --git a/include/asm-mips/atomic.h b/include/asm-mips/atomic.h
index 4fba0d003c99..2c87b41e69ba 100644
--- a/include/asm-mips/atomic.h
+++ b/include/asm-mips/atomic.h
@@ -289,6 +289,25 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
289 289
290#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n))) 290#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
291 291
292/**
293 * atomic_add_unless - add unless the number is a given value
294 * @v: pointer of type atomic_t
295 * @a: the amount to add to v...
296 * @u: ...unless v is equal to u.
297 *
298 * Atomically adds @a to @v, so long as it was not @u.
299 * Returns non-zero if @v was not @u, and zero otherwise.
300 */
301#define atomic_add_unless(v, a, u) \
302({ \
303 int c, old; \
304 c = atomic_read(v); \
305 while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \
306 c = old; \
307 c != (u); \
308})
309#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
310
292#define atomic_dec_return(v) atomic_sub_return(1,(v)) 311#define atomic_dec_return(v) atomic_sub_return(1,(v))
293#define atomic_inc_return(v) atomic_add_return(1,(v)) 312#define atomic_inc_return(v) atomic_add_return(1,(v))
294 313
diff --git a/include/asm-parisc/atomic.h b/include/asm-parisc/atomic.h
index 52c9a45b5f87..983e9a2b6042 100644
--- a/include/asm-parisc/atomic.h
+++ b/include/asm-parisc/atomic.h
@@ -166,6 +166,25 @@ static __inline__ int atomic_read(const atomic_t *v)
166/* exported interface */ 166/* exported interface */
167#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n))) 167#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
168 168
169/**
170 * atomic_add_unless - add unless the number is a given value
171 * @v: pointer of type atomic_t
172 * @a: the amount to add to v...
173 * @u: ...unless v is equal to u.
174 *
175 * Atomically adds @a to @v, so long as it was not @u.
176 * Returns non-zero if @v was not @u, and zero otherwise.
177 */
178#define atomic_add_unless(v, a, u) \
179({ \
180 int c, old; \
181 c = atomic_read(v); \
182 while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \
183 c = old; \
184 c != (u); \
185})
186#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
187
169#define atomic_add(i,v) ((void)(__atomic_add_return( ((int)i),(v)))) 188#define atomic_add(i,v) ((void)(__atomic_add_return( ((int)i),(v))))
170#define atomic_sub(i,v) ((void)(__atomic_add_return(-((int)i),(v)))) 189#define atomic_sub(i,v) ((void)(__atomic_add_return(-((int)i),(v))))
171#define atomic_inc(v) ((void)(__atomic_add_return( 1,(v)))) 190#define atomic_inc(v) ((void)(__atomic_add_return( 1,(v))))
diff --git a/include/asm-powerpc/atomic.h b/include/asm-powerpc/atomic.h
index 37205faa9d7c..ec4b14468959 100644
--- a/include/asm-powerpc/atomic.h
+++ b/include/asm-powerpc/atomic.h
@@ -166,6 +166,31 @@ static __inline__ int atomic_dec_return(atomic_t *v)
166 166
167#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n))) 167#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
168 168
169/**
170 * atomic_add_unless - add unless the number is a given value
171 * @v: pointer of type atomic_t
172 * @a: the amount to add to v...
173 * @u: ...unless v is equal to u.
174 *
175 * Atomically adds @a to @v, so long as it was not @u.
176 * Returns non-zero if @v was not @u, and zero otherwise.
177 */
178#define atomic_add_unless(v, a, u) \
179({ \
180 int c, old; \
181 c = atomic_read(v); \
182 for (;;) { \
183 if (unlikely(c == (u))) \
184 break; \
185 old = atomic_cmpxchg((v), c, c + (a)); \
186 if (likely(old == c)) \
187 break; \
188 c = old; \
189 } \
190 c != (u); \
191})
192#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
193
169#define atomic_sub_and_test(a, v) (atomic_sub_return((a), (v)) == 0) 194#define atomic_sub_and_test(a, v) (atomic_sub_return((a), (v)) == 0)
170#define atomic_dec_and_test(v) (atomic_dec_return((v)) == 0) 195#define atomic_dec_and_test(v) (atomic_dec_return((v)) == 0)
171 196
diff --git a/include/asm-s390/atomic.h b/include/asm-s390/atomic.h
index 631014d5de90..b3bd4f679f72 100644
--- a/include/asm-s390/atomic.h
+++ b/include/asm-s390/atomic.h
@@ -200,6 +200,16 @@ atomic_compare_and_swap(int expected_oldval,int new_val,atomic_t *v)
200 200
201#define atomic_cmpxchg(v, o, n) (atomic_compare_and_swap((o), (n), &((v)->counter))) 201#define atomic_cmpxchg(v, o, n) (atomic_compare_and_swap((o), (n), &((v)->counter)))
202 202
203#define atomic_add_unless(v, a, u) \
204({ \
205 int c, old; \
206 c = atomic_read(v); \
207 while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \
208 c = old; \
209 c != (u); \
210})
211#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
212
203#define smp_mb__before_atomic_dec() smp_mb() 213#define smp_mb__before_atomic_dec() smp_mb()
204#define smp_mb__after_atomic_dec() smp_mb() 214#define smp_mb__after_atomic_dec() smp_mb()
205#define smp_mb__before_atomic_inc() smp_mb() 215#define smp_mb__before_atomic_inc() smp_mb()
diff --git a/include/asm-sh/atomic.h b/include/asm-sh/atomic.h
index a148c762d366..aabfd334462c 100644
--- a/include/asm-sh/atomic.h
+++ b/include/asm-sh/atomic.h
@@ -101,6 +101,21 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
101 return ret; 101 return ret;
102} 102}
103 103
104static inline int atomic_add_unless(atomic_t *v, int a, int u)
105{
106 int ret;
107 unsigned long flags;
108
109 local_irq_save(flags);
110 ret = v->counter;
111 if (ret != u)
112 v->counter += a;
113 local_irq_restore(flags);
114
115 return ret != u;
116}
117#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
118
104static __inline__ void atomic_clear_mask(unsigned int mask, atomic_t *v) 119static __inline__ void atomic_clear_mask(unsigned int mask, atomic_t *v)
105{ 120{
106 unsigned long flags; 121 unsigned long flags;
diff --git a/include/asm-sh64/atomic.h b/include/asm-sh64/atomic.h
index 6eeb57b015ce..927a2bc27b30 100644
--- a/include/asm-sh64/atomic.h
+++ b/include/asm-sh64/atomic.h
@@ -113,6 +113,21 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
113 return ret; 113 return ret;
114} 114}
115 115
116static inline int atomic_add_unless(atomic_t *v, int a, int u)
117{
118 int ret;
119 unsigned long flags;
120
121 local_irq_save(flags);
122 ret = v->counter;
123 if (ret != u)
124 v->counter += a;
125 local_irq_restore(flags);
126
127 return ret != u;
128}
129#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
130
116static __inline__ void atomic_clear_mask(unsigned int mask, atomic_t *v) 131static __inline__ void atomic_clear_mask(unsigned int mask, atomic_t *v)
117{ 132{
118 unsigned long flags; 133 unsigned long flags;
diff --git a/include/asm-sparc/atomic.h b/include/asm-sparc/atomic.h
index 52bdd1a895fa..62bec7ad271c 100644
--- a/include/asm-sparc/atomic.h
+++ b/include/asm-sparc/atomic.h
@@ -20,6 +20,7 @@ typedef struct { volatile int counter; } atomic_t;
20 20
21extern int __atomic_add_return(int, atomic_t *); 21extern int __atomic_add_return(int, atomic_t *);
22extern int atomic_cmpxchg(atomic_t *, int, int); 22extern int atomic_cmpxchg(atomic_t *, int, int);
23extern int atomic_add_unless(atomic_t *, int, int);
23extern void atomic_set(atomic_t *, int); 24extern void atomic_set(atomic_t *, int);
24 25
25#define atomic_read(v) ((v)->counter) 26#define atomic_read(v) ((v)->counter)
@@ -49,6 +50,8 @@ extern void atomic_set(atomic_t *, int);
49#define atomic_dec_and_test(v) (atomic_dec_return(v) == 0) 50#define atomic_dec_and_test(v) (atomic_dec_return(v) == 0)
50#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0) 51#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
51 52
53#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
54
52/* This is the old 24-bit implementation. It's still used internally 55/* This is the old 24-bit implementation. It's still used internally
53 * by some sparc-specific code, notably the semaphore implementation. 56 * by some sparc-specific code, notably the semaphore implementation.
54 */ 57 */
diff --git a/include/asm-sparc64/atomic.h b/include/asm-sparc64/atomic.h
index 3a0b4383bbac..8198c3d0d007 100644
--- a/include/asm-sparc64/atomic.h
+++ b/include/asm-sparc64/atomic.h
@@ -72,6 +72,16 @@ extern int atomic64_sub_ret(int, atomic64_t *);
72 72
73#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n))) 73#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
74 74
75#define atomic_add_unless(v, a, u) \
76({ \
77 int c, old; \
78 c = atomic_read(v); \
79 while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \
80 c = old; \
81 c != (u); \
82})
83#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
84
75/* Atomic operations are already serializing */ 85/* Atomic operations are already serializing */
76#ifdef CONFIG_SMP 86#ifdef CONFIG_SMP
77#define smp_mb__before_atomic_dec() membar_storeload_loadload(); 87#define smp_mb__before_atomic_dec() membar_storeload_loadload();
diff --git a/include/asm-v850/atomic.h b/include/asm-v850/atomic.h
index e497166ca42b..bede3172ce7f 100644
--- a/include/asm-v850/atomic.h
+++ b/include/asm-v850/atomic.h
@@ -104,6 +104,22 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
104 return ret; 104 return ret;
105} 105}
106 106
107static inline int atomic_add_unless(atomic_t *v, int a, int u)
108{
109 int ret;
110 unsigned long flags;
111
112 local_irq_save(flags);
113 ret = v->counter;
114 if (ret != u)
115 v->counter += a;
116 local_irq_restore(flags);
117
118 return ret != u;
119}
120
121#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
122
107/* Atomic operations are already serializing on ARM */ 123/* Atomic operations are already serializing on ARM */
108#define smp_mb__before_atomic_dec() barrier() 124#define smp_mb__before_atomic_dec() barrier()
109#define smp_mb__after_atomic_dec() barrier() 125#define smp_mb__after_atomic_dec() barrier()
diff --git a/include/asm-x86_64/atomic.h b/include/asm-x86_64/atomic.h
index 75c8a1e96737..0866ef67f198 100644
--- a/include/asm-x86_64/atomic.h
+++ b/include/asm-x86_64/atomic.h
@@ -362,6 +362,25 @@ static __inline__ int atomic_sub_return(int i, atomic_t *v)
362 362
363#define atomic_cmpxchg(v, old, new) ((int)cmpxchg(&((v)->counter), old, new)) 363#define atomic_cmpxchg(v, old, new) ((int)cmpxchg(&((v)->counter), old, new))
364 364
365/**
366 * atomic_add_unless - add unless the number is a given value
367 * @v: pointer of type atomic_t
368 * @a: the amount to add to v...
369 * @u: ...unless v is equal to u.
370 *
371 * Atomically adds @a to @v, so long as it was not @u.
372 * Returns non-zero if @v was not @u, and zero otherwise.
373 */
374#define atomic_add_unless(v, a, u) \
375({ \
376 int c, old; \
377 c = atomic_read(v); \
378 while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \
379 c = old; \
380 c != (u); \
381})
382#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
383
365#define atomic_inc_return(v) (atomic_add_return(1,v)) 384#define atomic_inc_return(v) (atomic_add_return(1,v))
366#define atomic_dec_return(v) (atomic_sub_return(1,v)) 385#define atomic_dec_return(v) (atomic_sub_return(1,v))
367 386
diff --git a/include/asm-xtensa/atomic.h b/include/asm-xtensa/atomic.h
index cd40c5e75160..3670cc7695da 100644
--- a/include/asm-xtensa/atomic.h
+++ b/include/asm-xtensa/atomic.h
@@ -225,6 +225,25 @@ static inline int atomic_sub_return(int i, atomic_t * v)
225 225
226#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n))) 226#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
227 227
228/**
229 * atomic_add_unless - add unless the number is a given value
230 * @v: pointer of type atomic_t
231 * @a: the amount to add to v...
232 * @u: ...unless v is equal to u.
233 *
234 * Atomically adds @a to @v, so long as it was not @u.
235 * Returns non-zero if @v was not @u, and zero otherwise.
236 */
237#define atomic_add_unless(v, a, u) \
238({ \
239 int c, old; \
240 c = atomic_read(v); \
241 while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \
242 c = old; \
243 c != (u); \
244})
245#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
246
228static inline void atomic_clear_mask(unsigned int mask, atomic_t *v) 247static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
229{ 248{
230 unsigned int all_f = -1; 249 unsigned int all_f = -1;