summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBoqun Feng <boqun.feng@gmail.com>2016-06-09 23:51:28 -0400
committerMichael Ellerman <mpe@ellerman.id.au>2016-06-14 02:05:44 -0400
commit6262db7c088bbfc26480d10144cde70bbf077be3 (patch)
tree9da160672c217a7d38ac2769015530629556e9a3
parent6e45273eacc829a44fae1d3df14065d6947335ae (diff)
powerpc/spinlock: Fix spin_unlock_wait()
There is an ordering issue with spin_unlock_wait() on powerpc, because the spin_lock primitive is an ACQUIRE and an ACQUIRE is only ordering the load part of the operation with memory operations following it. Therefore the following event sequence can happen: CPU 1 CPU 2 CPU 3 ================== ==================== ============== spin_unlock(&lock); spin_lock(&lock): r1 = *lock; // r1 == 0; o = object; o = READ_ONCE(object); // reordered here object = NULL; smp_mb(); spin_unlock_wait(&lock); *lock = 1; smp_mb(); o->dead = true; < o = READ_ONCE(object); > // reordered upwards if (o) // true BUG_ON(o->dead); // true!! To fix this, we add a "nop" ll/sc loop in arch_spin_unlock_wait() on ppc, the "nop" ll/sc loop reads the lock value and writes it back atomically, in this way it will synchronize the view of the lock on CPU1 with that on CPU2. Therefore in the scenario above, either CPU2 will fail to get the lock at first or CPU1 will see the lock acquired by CPU2, both cases will eliminate this bug. This is a similar idea as what Will Deacon did for ARM64 in: d86b8da04dfa ("arm64: spinlock: serialise spin_unlock_wait against concurrent lockers") Furthermore, if the "nop" ll/sc figures out the lock is locked, we actually don't need to do the "nop" ll/sc trick again, we can just do a normal load+check loop for the lock to be released, because in that case, spin_unlock_wait() is called when someone is holding the lock, and the store part of the "nop" ll/sc happens before the lock release of the current lock holder: "nop" ll/sc -> spin_unlock() and the lock release happens before the next lock acquisition: spin_unlock() -> spin_lock() <next holder> which means the "nop" ll/sc happens before the next lock acquisition: "nop" ll/sc -> spin_unlock() -> spin_lock() <next holder> With a smp_mb() preceding spin_unlock_wait(), the store of object is guaranteed to be observed by the next lock holder: STORE -> smp_mb() -> "nop" ll/sc -> spin_unlock() -> spin_lock() <next holder> This patch therefore fixes the issue and also cleans the arch_spin_unlock_wait() a little bit by removing superfluous memory barriers in loops and consolidating the implementations for PPC32 and PPC64 into one. Suggested-by: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com> Signed-off-by: Boqun Feng <boqun.feng@gmail.com> Reviewed-by: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com> [mpe: Inline the "nop" ll/sc loop and set EH=0, munge change log] Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
-rw-r--r--arch/powerpc/include/asm/spinlock.h38
-rw-r--r--arch/powerpc/lib/locks.c16
2 files changed, 32 insertions, 22 deletions
diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h
index 523673d7583c..fa37fe93bc02 100644
--- a/arch/powerpc/include/asm/spinlock.h
+++ b/arch/powerpc/include/asm/spinlock.h
@@ -162,12 +162,38 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
162 lock->slock = 0; 162 lock->slock = 0;
163} 163}
164 164
165#ifdef CONFIG_PPC64 165static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
166extern void arch_spin_unlock_wait(arch_spinlock_t *lock); 166{
167#else 167 arch_spinlock_t lock_val;
168#define arch_spin_unlock_wait(lock) \ 168
169 do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0) 169 smp_mb();
170#endif 170
171 /*
172 * Atomically load and store back the lock value (unchanged). This
173 * ensures that our observation of the lock value is ordered with
174 * respect to other lock operations.
175 */
176 __asm__ __volatile__(
177"1: " PPC_LWARX(%0, 0, %2, 0) "\n"
178" stwcx. %0, 0, %2\n"
179" bne- 1b\n"
180 : "=&r" (lock_val), "+m" (*lock)
181 : "r" (lock)
182 : "cr0", "xer");
183
184 if (arch_spin_value_unlocked(lock_val))
185 goto out;
186
187 while (lock->slock) {
188 HMT_low();
189 if (SHARED_PROCESSOR)
190 __spin_yield(lock);
191 }
192 HMT_medium();
193
194out:
195 smp_mb();
196}
171 197
172/* 198/*
173 * Read-write spinlocks, allowing multiple readers 199 * Read-write spinlocks, allowing multiple readers
diff --git a/arch/powerpc/lib/locks.c b/arch/powerpc/lib/locks.c
index f7deebdf3365..b7b1237d4aa6 100644
--- a/arch/powerpc/lib/locks.c
+++ b/arch/powerpc/lib/locks.c
@@ -68,19 +68,3 @@ void __rw_yield(arch_rwlock_t *rw)
68 get_hard_smp_processor_id(holder_cpu), yield_count); 68 get_hard_smp_processor_id(holder_cpu), yield_count);
69} 69}
70#endif 70#endif
71
72void arch_spin_unlock_wait(arch_spinlock_t *lock)
73{
74 smp_mb();
75
76 while (lock->slock) {
77 HMT_low();
78 if (SHARED_PROCESSOR)
79 __spin_yield(lock);
80 }
81 HMT_medium();
82
83 smp_mb();
84}
85
86EXPORT_SYMBOL(arch_spin_unlock_wait);