aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/include/asm/spinlock.h
diff options
context:
space:
mode:
authorAnton Blanchard <anton@samba.org>2010-02-09 20:04:06 -0500
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2010-02-16 22:03:15 -0500
commitf10e2e5b4b4c9937de596f96ffe028be3a565598 (patch)
tree89808d05159ac5bd4aeea53b52dc0ddb3373fe65 /arch/powerpc/include/asm/spinlock.h
parent66d99b883419b8df6d0a24ca957da7ab4831cf6e (diff)
powerpc: Rename LWSYNC_ON_SMP to PPC_RELEASE_BARRIER, ISYNC_ON_SMP to PPC_ACQUIRE_BARRIER
For performance reasons we are about to change ISYNC_ON_SMP to sometimes be lwsync. Now that the macro name doesn't make sense, change it and LWSYNC_ON_SMP to better explain what the barriers are doing. Signed-off-by: Anton Blanchard <anton@samba.org> Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc/include/asm/spinlock.h')
-rw-r--r--arch/powerpc/include/asm/spinlock.h25
1 files changed, 13 insertions, 12 deletions
diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h
index 1c35b59f6f30..f9611bd69ed2 100644
--- a/arch/powerpc/include/asm/spinlock.h
+++ b/arch/powerpc/include/asm/spinlock.h
@@ -65,9 +65,10 @@ static inline unsigned long __arch_spin_trylock(arch_spinlock_t *lock)
65 cmpwi 0,%0,0\n\ 65 cmpwi 0,%0,0\n\
66 bne- 2f\n\ 66 bne- 2f\n\
67 stwcx. %1,0,%2\n\ 67 stwcx. %1,0,%2\n\
68 bne- 1b\n\ 68 bne- 1b\n"
69 isync\n\ 69 PPC_ACQUIRE_BARRIER
702:" : "=&r" (tmp) 70"2:"
71 : "=&r" (tmp)
71 : "r" (token), "r" (&lock->slock) 72 : "r" (token), "r" (&lock->slock)
72 : "cr0", "memory"); 73 : "cr0", "memory");
73 74
@@ -145,7 +146,7 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
145{ 146{
146 SYNC_IO; 147 SYNC_IO;
147 __asm__ __volatile__("# arch_spin_unlock\n\t" 148 __asm__ __volatile__("# arch_spin_unlock\n\t"
148 LWSYNC_ON_SMP: : :"memory"); 149 PPC_RELEASE_BARRIER: : :"memory");
149 lock->slock = 0; 150 lock->slock = 0;
150} 151}
151 152
@@ -193,9 +194,9 @@ static inline long __arch_read_trylock(arch_rwlock_t *rw)
193 ble- 2f\n" 194 ble- 2f\n"
194 PPC405_ERR77(0,%1) 195 PPC405_ERR77(0,%1)
195" stwcx. %0,0,%1\n\ 196" stwcx. %0,0,%1\n\
196 bne- 1b\n\ 197 bne- 1b\n"
197 isync\n\ 198 PPC_ACQUIRE_BARRIER
1982:" : "=&r" (tmp) 199"2:" : "=&r" (tmp)
199 : "r" (&rw->lock) 200 : "r" (&rw->lock)
200 : "cr0", "xer", "memory"); 201 : "cr0", "xer", "memory");
201 202
@@ -217,9 +218,9 @@ static inline long __arch_write_trylock(arch_rwlock_t *rw)
217 bne- 2f\n" 218 bne- 2f\n"
218 PPC405_ERR77(0,%1) 219 PPC405_ERR77(0,%1)
219" stwcx. %1,0,%2\n\ 220" stwcx. %1,0,%2\n\
220 bne- 1b\n\ 221 bne- 1b\n"
221 isync\n\ 222 PPC_ACQUIRE_BARRIER
2222:" : "=&r" (tmp) 223"2:" : "=&r" (tmp)
223 : "r" (token), "r" (&rw->lock) 224 : "r" (token), "r" (&rw->lock)
224 : "cr0", "memory"); 225 : "cr0", "memory");
225 226
@@ -270,7 +271,7 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
270 271
271 __asm__ __volatile__( 272 __asm__ __volatile__(
272 "# read_unlock\n\t" 273 "# read_unlock\n\t"
273 LWSYNC_ON_SMP 274 PPC_RELEASE_BARRIER
274"1: lwarx %0,0,%1\n\ 275"1: lwarx %0,0,%1\n\
275 addic %0,%0,-1\n" 276 addic %0,%0,-1\n"
276 PPC405_ERR77(0,%1) 277 PPC405_ERR77(0,%1)
@@ -284,7 +285,7 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
284static inline void arch_write_unlock(arch_rwlock_t *rw) 285static inline void arch_write_unlock(arch_rwlock_t *rw)
285{ 286{
286 __asm__ __volatile__("# write_unlock\n\t" 287 __asm__ __volatile__("# write_unlock\n\t"
287 LWSYNC_ON_SMP: : :"memory"); 288 PPC_RELEASE_BARRIER: : :"memory");
288 rw->lock = 0; 289 rw->lock = 0;
289} 290}
290 291