diff options
author | Will Deacon <will.deacon@arm.com> | 2013-08-12 13:04:05 -0400 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2014-03-07 00:30:13 -0500 |
commit | fed783a535fe9cf3068977562235cf334fbff0b3 (patch) | |
tree | 78055252e2992bc23f5936bb478e11e0e2f41d42 /arch | |
parent | f411a4439b68149518898379d7d537a32becb381 (diff) |
ARM: 7812/1: rwlocks: retry trylock operation if strex fails on free lock
commit 00efaa0250939dc148e2d3104fb3c18395d24a2d upstream.
Commit 15e7e5c1ebf5 ("ARM: 7749/1: spinlock: retry trylock operation if
strex fails on free lock") modifying our arch_spin_trylock to retry the
acquisition if the lock appeared uncontended, but the strex failed.
This patch does the same for rwlocks, which were missed by the original
patch.
Signed-off-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Cc: Li Zefan <lizefan@huawei.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/arm/include/asm/spinlock.h | 49 |
1 files changed, 30 insertions, 19 deletions
diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h index f8b8965666e9..dd64cc6f9cba 100644 --- a/arch/arm/include/asm/spinlock.h +++ b/arch/arm/include/asm/spinlock.h | |||
@@ -168,17 +168,20 @@ static inline void arch_write_lock(arch_rwlock_t *rw) | |||
168 | 168 | ||
169 | static inline int arch_write_trylock(arch_rwlock_t *rw) | 169 | static inline int arch_write_trylock(arch_rwlock_t *rw) |
170 | { | 170 | { |
171 | unsigned long tmp; | 171 | unsigned long contended, res; |
172 | 172 | ||
173 | __asm__ __volatile__( | 173 | do { |
174 | " ldrex %0, [%1]\n" | 174 | __asm__ __volatile__( |
175 | " teq %0, #0\n" | 175 | " ldrex %0, [%2]\n" |
176 | " strexeq %0, %2, [%1]" | 176 | " mov %1, #0\n" |
177 | : "=&r" (tmp) | 177 | " teq %0, #0\n" |
178 | : "r" (&rw->lock), "r" (0x80000000) | 178 | " strexeq %1, %3, [%2]" |
179 | : "cc"); | 179 | : "=&r" (contended), "=&r" (res) |
180 | : "r" (&rw->lock), "r" (0x80000000) | ||
181 | : "cc"); | ||
182 | } while (res); | ||
180 | 183 | ||
181 | if (tmp == 0) { | 184 | if (!contended) { |
182 | smp_mb(); | 185 | smp_mb(); |
183 | return 1; | 186 | return 1; |
184 | } else { | 187 | } else { |
@@ -254,18 +257,26 @@ static inline void arch_read_unlock(arch_rwlock_t *rw) | |||
254 | 257 | ||
255 | static inline int arch_read_trylock(arch_rwlock_t *rw) | 258 | static inline int arch_read_trylock(arch_rwlock_t *rw) |
256 | { | 259 | { |
257 | unsigned long tmp, tmp2 = 1; | 260 | unsigned long contended, res; |
258 | 261 | ||
259 | __asm__ __volatile__( | 262 | do { |
260 | " ldrex %0, [%2]\n" | 263 | __asm__ __volatile__( |
261 | " adds %0, %0, #1\n" | 264 | " ldrex %0, [%2]\n" |
262 | " strexpl %1, %0, [%2]\n" | 265 | " mov %1, #0\n" |
263 | : "=&r" (tmp), "+r" (tmp2) | 266 | " adds %0, %0, #1\n" |
264 | : "r" (&rw->lock) | 267 | " strexpl %1, %0, [%2]" |
265 | : "cc"); | 268 | : "=&r" (contended), "=&r" (res) |
269 | : "r" (&rw->lock) | ||
270 | : "cc"); | ||
271 | } while (res); | ||
266 | 272 | ||
267 | smp_mb(); | 273 | /* If the lock is negative, then it is already held for write. */ |
268 | return tmp2 == 0; | 274 | if (contended < 0x80000000) { |
275 | smp_mb(); | ||
276 | return 1; | ||
277 | } else { | ||
278 | return 0; | ||
279 | } | ||
269 | } | 280 | } |
270 | 281 | ||
271 | /* read_can_lock - would read_trylock() succeed? */ | 282 | /* read_can_lock - would read_trylock() succeed? */ |