aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc
diff options
context:
space:
mode:
authorMikael Pettersson <mikpe@it.uu.se>2011-08-15 06:11:50 -0400
committerDavid S. Miller <davem@davemloft.net>2011-08-15 17:35:19 -0400
commit3f6aa0b113846a8628baa649af422cfc6fb1d786 (patch)
tree5b3d1f9b112a779dab37db5f685c24e5e857df96 /arch/sparc
parenta0fba3eb059e73fed2d376a901f8117734c12f1f (diff)
sparc32: unbreak arch_write_unlock()
The sparc32 version of arch_write_unlock() is just a plain assignment. Unfortunately this allows the compiler to schedule side-effects in a protected region to occur after the HW-level unlock, which is broken. E.g., the following trivial test case gets miscompiled: #include <linux/spinlock.h> rwlock_t lock; int counter; void foo(void) { write_lock(&lock); ++counter; write_unlock(&lock); } Fixed by adding a compiler memory barrier to arch_write_unlock(). The sparc64 version combines the barrier and assignment into a single asm(), and implements the operation as a static inline, so that's what I did too. Compile-tested with sparc32_defconfig + CONFIG_SMP=y. Signed-off-by: Mikael Pettersson <mikpe@it.uu.se> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc')
-rw-r--r--arch/sparc/include/asm/spinlock_32.h11
1 files changed, 9 insertions, 2 deletions
diff --git a/arch/sparc/include/asm/spinlock_32.h b/arch/sparc/include/asm/spinlock_32.h
index 5f5b8bf3f50d..bcc98fc35281 100644
--- a/arch/sparc/include/asm/spinlock_32.h
+++ b/arch/sparc/include/asm/spinlock_32.h
@@ -131,6 +131,15 @@ static inline void arch_write_lock(arch_rwlock_t *rw)
131 *(volatile __u32 *)&lp->lock = ~0U; 131 *(volatile __u32 *)&lp->lock = ~0U;
132} 132}
133 133
134static void inline arch_write_unlock(arch_rwlock_t *lock)
135{
136 __asm__ __volatile__(
137" st %%g0, [%0]"
138 : /* no outputs */
139 : "r" (lock)
140 : "memory");
141}
142
134static inline int arch_write_trylock(arch_rwlock_t *rw) 143static inline int arch_write_trylock(arch_rwlock_t *rw)
135{ 144{
136 unsigned int val; 145 unsigned int val;
@@ -175,8 +184,6 @@ static inline int __arch_read_trylock(arch_rwlock_t *rw)
175 res; \ 184 res; \
176}) 185})
177 186
178#define arch_write_unlock(rw) do { (rw)->lock = 0; } while(0)
179
180#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) 187#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
181#define arch_read_lock_flags(rw, flags) arch_read_lock(rw) 188#define arch_read_lock_flags(rw, flags) arch_read_lock(rw)
182#define arch_write_lock_flags(rw, flags) arch_write_lock(rw) 189#define arch_write_lock_flags(rw, flags) arch_write_lock(rw)