aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/include/asm/spinlock.h
diff options
context:
space:
mode:
authorWill Deacon <will.deacon@arm.com>2013-10-09 12:19:22 -0400
committerRussell King <rmk+kernel@arm.linux.org.uk>2013-10-29 07:06:11 -0400
commit0cbad9c9dfe0c38e8ec7385b39087c005a6dee3e (patch)
treec835625cbf2eb478134713fe1df99ca5e4e9bbf3 /arch/arm/include/asm/spinlock.h
parent775ebcc16b940ebf61bf54d6054a5e639f68b9d6 (diff)
ARM: 7854/1: lockref: add support for lockless lockrefs using cmpxchg64
Our spinlocks are only 32-bit (2x16-bit tickets) and, on processors with 64-bit atomic instructions, cmpxchg64 makes use of the double-word exclusive accessors. This patch wires up the cmpxchg-based lockless lockref implementation for ARM. Signed-off-by: Will Deacon <will.deacon@arm.com> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm/include/asm/spinlock.h')
-rw-r--r--arch/arm/include/asm/spinlock.h8
1 files changed, 6 insertions, 2 deletions
diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h
index 4f2c28060c9a..ed6c22919e47 100644
--- a/arch/arm/include/asm/spinlock.h
+++ b/arch/arm/include/asm/spinlock.h
@@ -127,10 +127,14 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
127 dsb_sev(); 127 dsb_sev();
128} 128}
129 129
130static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
131{
132 return lock.tickets.owner == lock.tickets.next;
133}
134
130static inline int arch_spin_is_locked(arch_spinlock_t *lock) 135static inline int arch_spin_is_locked(arch_spinlock_t *lock)
131{ 136{
132 struct __raw_tickets tickets = ACCESS_ONCE(lock->tickets); 137 return !arch_spin_value_unlocked(ACCESS_ONCE(*lock));
133 return tickets.owner != tickets.next;
134} 138}
135 139
136static inline int arch_spin_is_contended(arch_spinlock_t *lock) 140static inline int arch_spin_is_contended(arch_spinlock_t *lock)