aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-powerpc
diff options
context:
space:
mode:
authorMathieu Desnoyers <mathieu.desnoyers@polymtl.ca>2007-05-08 03:34:27 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-05-08 14:15:19 -0400
commitf46e477ed94f6407982690ef53dab7898834268f (patch)
tree40b53e2bfab3f532fcd490a5aad54b39deef7a51 /include/asm-powerpc
parent8ffe9d0bffa441de41d8543a984e552d49293641 (diff)
atomic.h: add atomic64 cmpxchg, xchg and add_unless to powerpc
[akpm@linux-foundation.org: build fixes] Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca> Cc: Paul Mackerras <paulus@samba.org> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include/asm-powerpc')
-rw-r--r--include/asm-powerpc/atomic.h6
-rw-r--r--include/asm-powerpc/bitops.h1
-rw-r--r--include/asm-powerpc/system.h130
3 files changed, 131 insertions, 6 deletions
diff --git a/include/asm-powerpc/atomic.h b/include/asm-powerpc/atomic.h
index 2ce4b6b7b348..438a7fcfba58 100644
--- a/include/asm-powerpc/atomic.h
+++ b/include/asm-powerpc/atomic.h
@@ -165,8 +165,7 @@ static __inline__ int atomic_dec_return(atomic_t *v)
165 return t; 165 return t;
166} 166}
167 167
168#define atomic_cmpxchg(v, o, n) \ 168#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
169 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
170#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) 169#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
171 170
172/** 171/**
@@ -414,8 +413,7 @@ static __inline__ long atomic64_dec_if_positive(atomic64_t *v)
414 return t; 413 return t;
415} 414}
416 415
417#define atomic64_cmpxchg(v, o, n) \ 416#define atomic64_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
418 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
419#define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) 417#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
420 418
421/** 419/**
diff --git a/include/asm-powerpc/bitops.h b/include/asm-powerpc/bitops.h
index 8f757f6246e4..8144a2788db6 100644
--- a/include/asm-powerpc/bitops.h
+++ b/include/asm-powerpc/bitops.h
@@ -39,7 +39,6 @@
39#ifdef __KERNEL__ 39#ifdef __KERNEL__
40 40
41#include <linux/compiler.h> 41#include <linux/compiler.h>
42#include <asm/atomic.h>
43#include <asm/asm-compat.h> 42#include <asm/asm-compat.h>
44#include <asm/synch.h> 43#include <asm/synch.h>
45 44
diff --git a/include/asm-powerpc/system.h b/include/asm-powerpc/system.h
index d3e0906ff2bc..77bf5873a013 100644
--- a/include/asm-powerpc/system.h
+++ b/include/asm-powerpc/system.h
@@ -7,7 +7,6 @@
7#include <linux/kernel.h> 7#include <linux/kernel.h>
8 8
9#include <asm/hw_irq.h> 9#include <asm/hw_irq.h>
10#include <asm/atomic.h>
11 10
12/* 11/*
13 * Memory barrier. 12 * Memory barrier.
@@ -227,6 +226,29 @@ __xchg_u32(volatile void *p, unsigned long val)
227 return prev; 226 return prev;
228} 227}
229 228
229/*
230 * Atomic exchange
231 *
232 * Changes the memory location '*ptr' to be val and returns
233 * the previous value stored there.
234 */
235static __inline__ unsigned long
236__xchg_u32_local(volatile void *p, unsigned long val)
237{
238 unsigned long prev;
239
240 __asm__ __volatile__(
241"1: lwarx %0,0,%2 \n"
242 PPC405_ERR77(0,%2)
243" stwcx. %3,0,%2 \n\
244 bne- 1b"
245 : "=&r" (prev), "+m" (*(volatile unsigned int *)p)
246 : "r" (p), "r" (val)
247 : "cc", "memory");
248
249 return prev;
250}
251
230#ifdef CONFIG_PPC64 252#ifdef CONFIG_PPC64
231static __inline__ unsigned long 253static __inline__ unsigned long
232__xchg_u64(volatile void *p, unsigned long val) 254__xchg_u64(volatile void *p, unsigned long val)
@@ -246,6 +268,23 @@ __xchg_u64(volatile void *p, unsigned long val)
246 268
247 return prev; 269 return prev;
248} 270}
271
272static __inline__ unsigned long
273__xchg_u64_local(volatile void *p, unsigned long val)
274{
275 unsigned long prev;
276
277 __asm__ __volatile__(
278"1: ldarx %0,0,%2 \n"
279 PPC405_ERR77(0,%2)
280" stdcx. %3,0,%2 \n\
281 bne- 1b"
282 : "=&r" (prev), "+m" (*(volatile unsigned long *)p)
283 : "r" (p), "r" (val)
284 : "cc", "memory");
285
286 return prev;
287}
249#endif 288#endif
250 289
251/* 290/*
@@ -269,12 +308,33 @@ __xchg(volatile void *ptr, unsigned long x, unsigned int size)
269 return x; 308 return x;
270} 309}
271 310
311static __inline__ unsigned long
312__xchg_local(volatile void *ptr, unsigned long x, unsigned int size)
313{
314 switch (size) {
315 case 4:
316 return __xchg_u32_local(ptr, x);
317#ifdef CONFIG_PPC64
318 case 8:
319 return __xchg_u64_local(ptr, x);
320#endif
321 }
322 __xchg_called_with_bad_pointer();
323 return x;
324}
272#define xchg(ptr,x) \ 325#define xchg(ptr,x) \
273 ({ \ 326 ({ \
274 __typeof__(*(ptr)) _x_ = (x); \ 327 __typeof__(*(ptr)) _x_ = (x); \
275 (__typeof__(*(ptr))) __xchg((ptr), (unsigned long)_x_, sizeof(*(ptr))); \ 328 (__typeof__(*(ptr))) __xchg((ptr), (unsigned long)_x_, sizeof(*(ptr))); \
276 }) 329 })
277 330
331#define xchg_local(ptr,x) \
332 ({ \
333 __typeof__(*(ptr)) _x_ = (x); \
334 (__typeof__(*(ptr))) __xchg_local((ptr), \
335 (unsigned long)_x_, sizeof(*(ptr))); \
336 })
337
278#define tas(ptr) (xchg((ptr),1)) 338#define tas(ptr) (xchg((ptr),1))
279 339
280/* 340/*
@@ -306,6 +366,28 @@ __cmpxchg_u32(volatile unsigned int *p, unsigned long old, unsigned long new)
306 return prev; 366 return prev;
307} 367}
308 368
369static __inline__ unsigned long
370__cmpxchg_u32_local(volatile unsigned int *p, unsigned long old,
371 unsigned long new)
372{
373 unsigned int prev;
374
375 __asm__ __volatile__ (
376"1: lwarx %0,0,%2 # __cmpxchg_u32\n\
377 cmpw 0,%0,%3\n\
378 bne- 2f\n"
379 PPC405_ERR77(0,%2)
380" stwcx. %4,0,%2\n\
381 bne- 1b"
382 "\n\
3832:"
384 : "=&r" (prev), "+m" (*p)
385 : "r" (p), "r" (old), "r" (new)
386 : "cc", "memory");
387
388 return prev;
389}
390
309#ifdef CONFIG_PPC64 391#ifdef CONFIG_PPC64
310static __inline__ unsigned long 392static __inline__ unsigned long
311__cmpxchg_u64(volatile unsigned long *p, unsigned long old, unsigned long new) 393__cmpxchg_u64(volatile unsigned long *p, unsigned long old, unsigned long new)
@@ -328,6 +410,27 @@ __cmpxchg_u64(volatile unsigned long *p, unsigned long old, unsigned long new)
328 410
329 return prev; 411 return prev;
330} 412}
413
414static __inline__ unsigned long
415__cmpxchg_u64_local(volatile unsigned long *p, unsigned long old,
416 unsigned long new)
417{
418 unsigned long prev;
419
420 __asm__ __volatile__ (
421"1: ldarx %0,0,%2 # __cmpxchg_u64\n\
422 cmpd 0,%0,%3\n\
423 bne- 2f\n\
424 stdcx. %4,0,%2\n\
425 bne- 1b"
426 "\n\
4272:"
428 : "=&r" (prev), "+m" (*p)
429 : "r" (p), "r" (old), "r" (new)
430 : "cc", "memory");
431
432 return prev;
433}
331#endif 434#endif
332 435
333/* This function doesn't exist, so you'll get a linker error 436/* This function doesn't exist, so you'll get a linker error
@@ -350,6 +453,22 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new,
350 return old; 453 return old;
351} 454}
352 455
456static __inline__ unsigned long
457__cmpxchg_local(volatile void *ptr, unsigned long old, unsigned long new,
458 unsigned int size)
459{
460 switch (size) {
461 case 4:
462 return __cmpxchg_u32_local(ptr, old, new);
463#ifdef CONFIG_PPC64
464 case 8:
465 return __cmpxchg_u64_local(ptr, old, new);
466#endif
467 }
468 __cmpxchg_called_with_bad_pointer();
469 return old;
470}
471
353#define cmpxchg(ptr,o,n) \ 472#define cmpxchg(ptr,o,n) \
354 ({ \ 473 ({ \
355 __typeof__(*(ptr)) _o_ = (o); \ 474 __typeof__(*(ptr)) _o_ = (o); \
@@ -358,6 +477,15 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new,
358 (unsigned long)_n_, sizeof(*(ptr))); \ 477 (unsigned long)_n_, sizeof(*(ptr))); \
359 }) 478 })
360 479
480
481#define cmpxchg_local(ptr,o,n) \
482 ({ \
483 __typeof__(*(ptr)) _o_ = (o); \
484 __typeof__(*(ptr)) _n_ = (n); \
485 (__typeof__(*(ptr))) __cmpxchg_local((ptr), (unsigned long)_o_, \
486 (unsigned long)_n_, sizeof(*(ptr))); \
487 })
488
361#ifdef CONFIG_PPC64 489#ifdef CONFIG_PPC64
362/* 490/*
363 * We handle most unaligned accesses in hardware. On the other hand 491 * We handle most unaligned accesses in hardware. On the other hand