diff options
Diffstat (limited to 'include/asm-powerpc/atomic.h')
-rw-r--r-- | include/asm-powerpc/atomic.h | 178 |
1 files changed, 178 insertions, 0 deletions
diff --git a/include/asm-powerpc/atomic.h b/include/asm-powerpc/atomic.h index c5b12fd2b46b..9c0b372a46e1 100644 --- a/include/asm-powerpc/atomic.h +++ b/include/asm-powerpc/atomic.h | |||
@@ -197,5 +197,183 @@ static __inline__ int atomic_dec_if_positive(atomic_t *v) | |||
197 | #define smp_mb__before_atomic_inc() smp_mb() | 197 | #define smp_mb__before_atomic_inc() smp_mb() |
198 | #define smp_mb__after_atomic_inc() smp_mb() | 198 | #define smp_mb__after_atomic_inc() smp_mb() |
199 | 199 | ||
200 | #ifdef __powerpc64__ | ||
201 | |||
202 | typedef struct { volatile long counter; } atomic64_t; | ||
203 | |||
204 | #define ATOMIC64_INIT(i) { (i) } | ||
205 | |||
206 | #define atomic64_read(v) ((v)->counter) | ||
207 | #define atomic64_set(v,i) (((v)->counter) = (i)) | ||
208 | |||
209 | static __inline__ void atomic64_add(long a, atomic64_t *v) | ||
210 | { | ||
211 | long t; | ||
212 | |||
213 | __asm__ __volatile__( | ||
214 | "1: ldarx %0,0,%3 # atomic64_add\n\ | ||
215 | add %0,%2,%0\n\ | ||
216 | stdcx. %0,0,%3 \n\ | ||
217 | bne- 1b" | ||
218 | : "=&r" (t), "=m" (v->counter) | ||
219 | : "r" (a), "r" (&v->counter), "m" (v->counter) | ||
220 | : "cc"); | ||
221 | } | ||
222 | |||
223 | static __inline__ long atomic64_add_return(long a, atomic64_t *v) | ||
224 | { | ||
225 | long t; | ||
226 | |||
227 | __asm__ __volatile__( | ||
228 | EIEIO_ON_SMP | ||
229 | "1: ldarx %0,0,%2 # atomic64_add_return\n\ | ||
230 | add %0,%1,%0\n\ | ||
231 | stdcx. %0,0,%2 \n\ | ||
232 | bne- 1b" | ||
233 | ISYNC_ON_SMP | ||
234 | : "=&r" (t) | ||
235 | : "r" (a), "r" (&v->counter) | ||
236 | : "cc", "memory"); | ||
237 | |||
238 | return t; | ||
239 | } | ||
240 | |||
241 | #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0) | ||
242 | |||
243 | static __inline__ void atomic64_sub(long a, atomic64_t *v) | ||
244 | { | ||
245 | long t; | ||
246 | |||
247 | __asm__ __volatile__( | ||
248 | "1: ldarx %0,0,%3 # atomic64_sub\n\ | ||
249 | subf %0,%2,%0\n\ | ||
250 | stdcx. %0,0,%3 \n\ | ||
251 | bne- 1b" | ||
252 | : "=&r" (t), "=m" (v->counter) | ||
253 | : "r" (a), "r" (&v->counter), "m" (v->counter) | ||
254 | : "cc"); | ||
255 | } | ||
256 | |||
257 | static __inline__ long atomic64_sub_return(long a, atomic64_t *v) | ||
258 | { | ||
259 | long t; | ||
260 | |||
261 | __asm__ __volatile__( | ||
262 | EIEIO_ON_SMP | ||
263 | "1: ldarx %0,0,%2 # atomic64_sub_return\n\ | ||
264 | subf %0,%1,%0\n\ | ||
265 | stdcx. %0,0,%2 \n\ | ||
266 | bne- 1b" | ||
267 | ISYNC_ON_SMP | ||
268 | : "=&r" (t) | ||
269 | : "r" (a), "r" (&v->counter) | ||
270 | : "cc", "memory"); | ||
271 | |||
272 | return t; | ||
273 | } | ||
274 | |||
275 | static __inline__ void atomic64_inc(atomic64_t *v) | ||
276 | { | ||
277 | long t; | ||
278 | |||
279 | __asm__ __volatile__( | ||
280 | "1: ldarx %0,0,%2 # atomic64_inc\n\ | ||
281 | addic %0,%0,1\n\ | ||
282 | stdcx. %0,0,%2 \n\ | ||
283 | bne- 1b" | ||
284 | : "=&r" (t), "=m" (v->counter) | ||
285 | : "r" (&v->counter), "m" (v->counter) | ||
286 | : "cc"); | ||
287 | } | ||
288 | |||
289 | static __inline__ long atomic64_inc_return(atomic64_t *v) | ||
290 | { | ||
291 | long t; | ||
292 | |||
293 | __asm__ __volatile__( | ||
294 | EIEIO_ON_SMP | ||
295 | "1: ldarx %0,0,%1 # atomic64_inc_return\n\ | ||
296 | addic %0,%0,1\n\ | ||
297 | stdcx. %0,0,%1 \n\ | ||
298 | bne- 1b" | ||
299 | ISYNC_ON_SMP | ||
300 | : "=&r" (t) | ||
301 | : "r" (&v->counter) | ||
302 | : "cc", "memory"); | ||
303 | |||
304 | return t; | ||
305 | } | ||
306 | |||
307 | /* | ||
308 | * atomic64_inc_and_test - increment and test | ||
309 | * @v: pointer of type atomic64_t | ||
310 | * | ||
311 | * Atomically increments @v by 1 | ||
312 | * and returns true if the result is zero, or false for all | ||
313 | * other cases. | ||
314 | */ | ||
315 | #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0) | ||
316 | |||
317 | static __inline__ void atomic64_dec(atomic64_t *v) | ||
318 | { | ||
319 | long t; | ||
320 | |||
321 | __asm__ __volatile__( | ||
322 | "1: ldarx %0,0,%2 # atomic64_dec\n\ | ||
323 | addic %0,%0,-1\n\ | ||
324 | stdcx. %0,0,%2\n\ | ||
325 | bne- 1b" | ||
326 | : "=&r" (t), "=m" (v->counter) | ||
327 | : "r" (&v->counter), "m" (v->counter) | ||
328 | : "cc"); | ||
329 | } | ||
330 | |||
331 | static __inline__ long atomic64_dec_return(atomic64_t *v) | ||
332 | { | ||
333 | long t; | ||
334 | |||
335 | __asm__ __volatile__( | ||
336 | EIEIO_ON_SMP | ||
337 | "1: ldarx %0,0,%1 # atomic64_dec_return\n\ | ||
338 | addic %0,%0,-1\n\ | ||
339 | stdcx. %0,0,%1\n\ | ||
340 | bne- 1b" | ||
341 | ISYNC_ON_SMP | ||
342 | : "=&r" (t) | ||
343 | : "r" (&v->counter) | ||
344 | : "cc", "memory"); | ||
345 | |||
346 | return t; | ||
347 | } | ||
348 | |||
349 | #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0) | ||
350 | #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0) | ||
351 | |||
352 | /* | ||
353 | * Atomically test *v and decrement if it is greater than 0. | ||
354 | * The function returns the old value of *v minus 1. | ||
355 | */ | ||
356 | static __inline__ long atomic64_dec_if_positive(atomic64_t *v) | ||
357 | { | ||
358 | long t; | ||
359 | |||
360 | __asm__ __volatile__( | ||
361 | EIEIO_ON_SMP | ||
362 | "1: ldarx %0,0,%1 # atomic64_dec_if_positive\n\ | ||
363 | addic. %0,%0,-1\n\ | ||
364 | blt- 2f\n\ | ||
365 | stdcx. %0,0,%1\n\ | ||
366 | bne- 1b" | ||
367 | ISYNC_ON_SMP | ||
368 | "\n\ | ||
369 | 2:" : "=&r" (t) | ||
370 | : "r" (&v->counter) | ||
371 | : "cc", "memory"); | ||
372 | |||
373 | return t; | ||
374 | } | ||
375 | |||
376 | #endif /* __powerpc64__ */ | ||
377 | |||
200 | #endif /* __KERNEL__ */ | 378 | #endif /* __KERNEL__ */ |
201 | #endif /* _ASM_POWERPC_ATOMIC_H_ */ | 379 | #endif /* _ASM_POWERPC_ATOMIC_H_ */ |