diff options
author | Peter Zijlstra <peterz@infradead.org> | 2016-05-26 04:35:03 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2016-06-14 05:55:15 -0400 |
commit | 726328d92a42b6d4b76078e2659f43067f82c4e8 (patch) | |
tree | 6df3622839432fc698c4404cbb0a0922a1357251 /arch/sparc/include | |
parent | b464d1270a8016edcf1fd20d77cefdecf9b0b73e (diff) |
locking/spinlock, arch: Update and fix spin_unlock_wait() implementations
This patch updates/fixes all spin_unlock_wait() implementations.
The update is in semantics; where it previously was only a control
dependency, we now upgrade to a full load-acquire to match the
store-release from the spin_unlock() we waited on. This ensures that
when spin_unlock_wait() returns, we're guaranteed to observe the full
critical section we waited on.
This fixes a number of spin_unlock_wait() users that (not
unreasonably) rely on this.
I also fixed a number of ticket lock versions to only wait on the
current lock holder, instead of for a full unlock, as this is
sufficient.
Furthermore; again for ticket locks; I added an smp_rmb() in between
the initial ticket load and the spin loop testing the current value
because I could not convince myself the address dependency is
sufficient, esp. if the loads are of different sizes.
I'm more than happy to remove this smp_rmb() again if people are
certain the address dependency does indeed work as expected.
Note: PPC32 will be fixed independently
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: chris@zankel.net
Cc: cmetcalf@mellanox.com
Cc: davem@davemloft.net
Cc: dhowells@redhat.com
Cc: james.hogan@imgtec.com
Cc: jejb@parisc-linux.org
Cc: linux@armlinux.org.uk
Cc: mpe@ellerman.id.au
Cc: ralf@linux-mips.org
Cc: realmz6@gmail.com
Cc: rkuo@codeaurora.org
Cc: rth@twiddle.net
Cc: schwidefsky@de.ibm.com
Cc: tony.luck@intel.com
Cc: vgupta@synopsys.com
Cc: ysato@users.sourceforge.jp
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/sparc/include')
-rw-r--r-- | arch/sparc/include/asm/spinlock_32.h | 7 | ||||
-rw-r--r-- | arch/sparc/include/asm/spinlock_64.h | 10 |
2 files changed, 12 insertions, 5 deletions
diff --git a/arch/sparc/include/asm/spinlock_32.h b/arch/sparc/include/asm/spinlock_32.h index bcc98fc35281..d9c5876c6121 100644 --- a/arch/sparc/include/asm/spinlock_32.h +++ b/arch/sparc/include/asm/spinlock_32.h | |||
@@ -9,12 +9,15 @@ | |||
9 | #ifndef __ASSEMBLY__ | 9 | #ifndef __ASSEMBLY__ |
10 | 10 | ||
11 | #include <asm/psr.h> | 11 | #include <asm/psr.h> |
12 | #include <asm/barrier.h> | ||
12 | #include <asm/processor.h> /* for cpu_relax */ | 13 | #include <asm/processor.h> /* for cpu_relax */ |
13 | 14 | ||
14 | #define arch_spin_is_locked(lock) (*((volatile unsigned char *)(lock)) != 0) | 15 | #define arch_spin_is_locked(lock) (*((volatile unsigned char *)(lock)) != 0) |
15 | 16 | ||
16 | #define arch_spin_unlock_wait(lock) \ | 17 | static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) |
17 | do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0) | 18 | { |
19 | smp_cond_load_acquire(&lock->lock, !VAL); | ||
20 | } | ||
18 | 21 | ||
19 | static inline void arch_spin_lock(arch_spinlock_t *lock) | 22 | static inline void arch_spin_lock(arch_spinlock_t *lock) |
20 | { | 23 | { |
diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h index 968917694978..87990b7c6b0d 100644 --- a/arch/sparc/include/asm/spinlock_64.h +++ b/arch/sparc/include/asm/spinlock_64.h | |||
@@ -8,6 +8,9 @@ | |||
8 | 8 | ||
9 | #ifndef __ASSEMBLY__ | 9 | #ifndef __ASSEMBLY__ |
10 | 10 | ||
11 | #include <asm/processor.h> | ||
12 | #include <asm/barrier.h> | ||
13 | |||
11 | /* To get debugging spinlocks which detect and catch | 14 | /* To get debugging spinlocks which detect and catch |
12 | * deadlock situations, set CONFIG_DEBUG_SPINLOCK | 15 | * deadlock situations, set CONFIG_DEBUG_SPINLOCK |
13 | * and rebuild your kernel. | 16 | * and rebuild your kernel. |
@@ -23,9 +26,10 @@ | |||
23 | 26 | ||
24 | #define arch_spin_is_locked(lp) ((lp)->lock != 0) | 27 | #define arch_spin_is_locked(lp) ((lp)->lock != 0) |
25 | 28 | ||
26 | #define arch_spin_unlock_wait(lp) \ | 29 | static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) |
27 | do { rmb(); \ | 30 | { |
28 | } while((lp)->lock) | 31 | smp_cond_load_acquire(&lock->lock, !VAL); |
32 | } | ||
29 | 33 | ||
30 | static inline void arch_spin_lock(arch_spinlock_t *lock) | 34 | static inline void arch_spin_lock(arch_spinlock_t *lock) |
31 | { | 35 | { |