diff options
author | Will Deacon <will.deacon@arm.com> | 2013-01-24 08:47:38 -0500 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2013-01-28 09:13:05 -0500 |
commit | 20e260b6f4f717c100620122f626a2c06a4cfd72 (patch) | |
tree | b2f0c8c70f86ae41f742604b4eb6c08d75109595 | |
parent | 0a301110b7bd33ef10164c184fe2c1d8c4c3ab6b (diff) |
ARM: 7632/1: spinlock: avoid exclusive accesses on unlock() path
When unlocking a spinlock, all we need to do is increment the owner
field of the lock. Since only one CPU can be performing an unlock()
operation for a given lock, this doesn't need to be exclusive.
This patch simplifies arch_spin_unlock to use non-exclusive accesses
when updating the owner field of the lock.
Signed-off-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
-rw-r--r-- | arch/arm/include/asm/spinlock.h | 16 |
1 files changed, 1 insertions, 15 deletions
diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h index b4ca707d0a69..6220e9fdf4c7 100644 --- a/arch/arm/include/asm/spinlock.h +++ b/arch/arm/include/asm/spinlock.h | |||
@@ -119,22 +119,8 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock) | |||
119 | 119 | ||
120 | static inline void arch_spin_unlock(arch_spinlock_t *lock) | 120 | static inline void arch_spin_unlock(arch_spinlock_t *lock) |
121 | { | 121 | { |
122 | unsigned long tmp; | ||
123 | u32 slock; | ||
124 | |||
125 | smp_mb(); | 122 | smp_mb(); |
126 | 123 | lock->tickets.owner++; | |
127 | __asm__ __volatile__( | ||
128 | " mov %1, #1\n" | ||
129 | "1: ldrex %0, [%2]\n" | ||
130 | " uadd16 %0, %0, %1\n" | ||
131 | " strex %1, %0, [%2]\n" | ||
132 | " teq %1, #0\n" | ||
133 | " bne 1b" | ||
134 | : "=&r" (slock), "=&r" (tmp) | ||
135 | : "r" (&lock->slock) | ||
136 | : "cc"); | ||
137 | |||
138 | dsb_sev(); | 124 | dsb_sev(); |
139 | } | 125 | } |
140 | 126 | ||