aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-powerpc/atomic.h
diff options
context:
space:
mode:
authorAnton Blanchard <anton@samba.org>2006-01-12 23:37:17 -0500
committerPaul Mackerras <paulus@samba.org>2006-01-13 05:18:50 -0500
commit144b9c135b963bcb7f242c7b83bff930620d3161 (patch)
tree4b454f3e5e5921c5a528131dfa51df542259d918 /include/asm-powerpc/atomic.h
parent3356bb9f7ba378a6e2709f9df95f4ea52111f4df (diff)
[PATCH] powerpc: use lwsync in atomics, bitops, lock functions
eieio is only a store - store ordering. When used to order an unlock operation loads may leak out of the critical region. This is potentially buggy, one example is if a user wants to atomically read a couple of values. We can solve this with an lwsync which orders everything except store - load. I removed the (now unused) EIEIO_ON_SMP macros and the c versions isync_on_smp and eieio_on_smp now we dont use them. I also removed some old comments that were used to identify inline spinlocks in assembly, they dont make sense now our locks are out of line. Another interesting thing was that read_unlock was using an eieio even though the rest of the spinlock code had already been converted to use lwsync. Signed-off-by: Anton Blanchard <anton@samba.org> Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'include/asm-powerpc/atomic.h')
-rw-r--r--include/asm-powerpc/atomic.h20
1 files changed, 10 insertions, 10 deletions
diff --git a/include/asm-powerpc/atomic.h b/include/asm-powerpc/atomic.h
index 248f9aec959c..9ce51ba54c13 100644
--- a/include/asm-powerpc/atomic.h
+++ b/include/asm-powerpc/atomic.h
@@ -36,7 +36,7 @@ static __inline__ int atomic_add_return(int a, atomic_t *v)
36 int t; 36 int t;
37 37
38 __asm__ __volatile__( 38 __asm__ __volatile__(
39 EIEIO_ON_SMP 39 LWSYNC_ON_SMP
40"1: lwarx %0,0,%2 # atomic_add_return\n\ 40"1: lwarx %0,0,%2 # atomic_add_return\n\
41 add %0,%1,%0\n" 41 add %0,%1,%0\n"
42 PPC405_ERR77(0,%2) 42 PPC405_ERR77(0,%2)
@@ -72,7 +72,7 @@ static __inline__ int atomic_sub_return(int a, atomic_t *v)
72 int t; 72 int t;
73 73
74 __asm__ __volatile__( 74 __asm__ __volatile__(
75 EIEIO_ON_SMP 75 LWSYNC_ON_SMP
76"1: lwarx %0,0,%2 # atomic_sub_return\n\ 76"1: lwarx %0,0,%2 # atomic_sub_return\n\
77 subf %0,%1,%0\n" 77 subf %0,%1,%0\n"
78 PPC405_ERR77(0,%2) 78 PPC405_ERR77(0,%2)
@@ -106,7 +106,7 @@ static __inline__ int atomic_inc_return(atomic_t *v)
106 int t; 106 int t;
107 107
108 __asm__ __volatile__( 108 __asm__ __volatile__(
109 EIEIO_ON_SMP 109 LWSYNC_ON_SMP
110"1: lwarx %0,0,%1 # atomic_inc_return\n\ 110"1: lwarx %0,0,%1 # atomic_inc_return\n\
111 addic %0,%0,1\n" 111 addic %0,%0,1\n"
112 PPC405_ERR77(0,%1) 112 PPC405_ERR77(0,%1)
@@ -150,7 +150,7 @@ static __inline__ int atomic_dec_return(atomic_t *v)
150 int t; 150 int t;
151 151
152 __asm__ __volatile__( 152 __asm__ __volatile__(
153 EIEIO_ON_SMP 153 LWSYNC_ON_SMP
154"1: lwarx %0,0,%1 # atomic_dec_return\n\ 154"1: lwarx %0,0,%1 # atomic_dec_return\n\
155 addic %0,%0,-1\n" 155 addic %0,%0,-1\n"
156 PPC405_ERR77(0,%1) 156 PPC405_ERR77(0,%1)
@@ -204,7 +204,7 @@ static __inline__ int atomic_dec_if_positive(atomic_t *v)
204 int t; 204 int t;
205 205
206 __asm__ __volatile__( 206 __asm__ __volatile__(
207 EIEIO_ON_SMP 207 LWSYNC_ON_SMP
208"1: lwarx %0,0,%1 # atomic_dec_if_positive\n\ 208"1: lwarx %0,0,%1 # atomic_dec_if_positive\n\
209 addic. %0,%0,-1\n\ 209 addic. %0,%0,-1\n\
210 blt- 2f\n" 210 blt- 2f\n"
@@ -253,7 +253,7 @@ static __inline__ long atomic64_add_return(long a, atomic64_t *v)
253 long t; 253 long t;
254 254
255 __asm__ __volatile__( 255 __asm__ __volatile__(
256 EIEIO_ON_SMP 256 LWSYNC_ON_SMP
257"1: ldarx %0,0,%2 # atomic64_add_return\n\ 257"1: ldarx %0,0,%2 # atomic64_add_return\n\
258 add %0,%1,%0\n\ 258 add %0,%1,%0\n\
259 stdcx. %0,0,%2 \n\ 259 stdcx. %0,0,%2 \n\
@@ -287,7 +287,7 @@ static __inline__ long atomic64_sub_return(long a, atomic64_t *v)
287 long t; 287 long t;
288 288
289 __asm__ __volatile__( 289 __asm__ __volatile__(
290 EIEIO_ON_SMP 290 LWSYNC_ON_SMP
291"1: ldarx %0,0,%2 # atomic64_sub_return\n\ 291"1: ldarx %0,0,%2 # atomic64_sub_return\n\
292 subf %0,%1,%0\n\ 292 subf %0,%1,%0\n\
293 stdcx. %0,0,%2 \n\ 293 stdcx. %0,0,%2 \n\
@@ -319,7 +319,7 @@ static __inline__ long atomic64_inc_return(atomic64_t *v)
319 long t; 319 long t;
320 320
321 __asm__ __volatile__( 321 __asm__ __volatile__(
322 EIEIO_ON_SMP 322 LWSYNC_ON_SMP
323"1: ldarx %0,0,%1 # atomic64_inc_return\n\ 323"1: ldarx %0,0,%1 # atomic64_inc_return\n\
324 addic %0,%0,1\n\ 324 addic %0,%0,1\n\
325 stdcx. %0,0,%1 \n\ 325 stdcx. %0,0,%1 \n\
@@ -361,7 +361,7 @@ static __inline__ long atomic64_dec_return(atomic64_t *v)
361 long t; 361 long t;
362 362
363 __asm__ __volatile__( 363 __asm__ __volatile__(
364 EIEIO_ON_SMP 364 LWSYNC_ON_SMP
365"1: ldarx %0,0,%1 # atomic64_dec_return\n\ 365"1: ldarx %0,0,%1 # atomic64_dec_return\n\
366 addic %0,%0,-1\n\ 366 addic %0,%0,-1\n\
367 stdcx. %0,0,%1\n\ 367 stdcx. %0,0,%1\n\
@@ -386,7 +386,7 @@ static __inline__ long atomic64_dec_if_positive(atomic64_t *v)
386 long t; 386 long t;
387 387
388 __asm__ __volatile__( 388 __asm__ __volatile__(
389 EIEIO_ON_SMP 389 LWSYNC_ON_SMP
390"1: ldarx %0,0,%1 # atomic64_dec_if_positive\n\ 390"1: ldarx %0,0,%1 # atomic64_dec_if_positive\n\
391 addic. %0,%0,-1\n\ 391 addic. %0,%0,-1\n\
392 blt- 2f\n\ 392 blt- 2f\n\