diff options
author | Will Deacon <will.deacon@arm.com> | 2013-10-09 12:19:22 -0400 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2013-10-29 07:06:11 -0400 |
commit | 0cbad9c9dfe0c38e8ec7385b39087c005a6dee3e (patch) | |
tree | c835625cbf2eb478134713fe1df99ca5e4e9bbf3 /arch | |
parent | 775ebcc16b940ebf61bf54d6054a5e639f68b9d6 (diff) |
ARM: 7854/1: lockref: add support for lockless lockrefs using cmpxchg64
Our spinlocks are only 32-bit (2x16-bit tickets) and, on processors
with 64-bit atomic instructions, cmpxchg64 makes use of the double-word
exclusive accessors.
This patch wires up the cmpxchg-based lockless lockref implementation
for ARM.
Signed-off-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/arm/Kconfig | 1 | ||||
-rw-r--r-- | arch/arm/include/asm/spinlock.h | 8 |
2 files changed, 7 insertions, 2 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 1ad6fb6c094d..fc184bcd7848 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig | |||
@@ -5,6 +5,7 @@ config ARM | |||
5 | select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE | 5 | select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE |
6 | select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST | 6 | select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST |
7 | select ARCH_HAVE_CUSTOM_GPIO_H | 7 | select ARCH_HAVE_CUSTOM_GPIO_H |
8 | select ARCH_USE_CMPXCHG_LOCKREF | ||
8 | select ARCH_WANT_IPC_PARSE_VERSION | 9 | select ARCH_WANT_IPC_PARSE_VERSION |
9 | select BUILDTIME_EXTABLE_SORT if MMU | 10 | select BUILDTIME_EXTABLE_SORT if MMU |
10 | select CLONE_BACKWARDS | 11 | select CLONE_BACKWARDS |
diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h index 4f2c28060c9a..ed6c22919e47 100644 --- a/arch/arm/include/asm/spinlock.h +++ b/arch/arm/include/asm/spinlock.h | |||
@@ -127,10 +127,14 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock) | |||
127 | dsb_sev(); | 127 | dsb_sev(); |
128 | } | 128 | } |
129 | 129 | ||
130 | static inline int arch_spin_value_unlocked(arch_spinlock_t lock) | ||
131 | { | ||
132 | return lock.tickets.owner == lock.tickets.next; | ||
133 | } | ||
134 | |||
130 | static inline int arch_spin_is_locked(arch_spinlock_t *lock) | 135 | static inline int arch_spin_is_locked(arch_spinlock_t *lock) |
131 | { | 136 | { |
132 | struct __raw_tickets tickets = ACCESS_ONCE(lock->tickets); | 137 | return !arch_spin_value_unlocked(ACCESS_ONCE(*lock)); |
133 | return tickets.owner != tickets.next; | ||
134 | } | 138 | } |
135 | 139 | ||
136 | static inline int arch_spin_is_contended(arch_spinlock_t *lock) | 140 | static inline int arch_spin_is_contended(arch_spinlock_t *lock) |