aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorWaiman Long <Waiman.Long@hp.com>2014-08-14 13:27:30 -0400
committerIngo Molnar <mingo@kernel.org>2014-09-10 05:46:38 -0400
commit2ff810a7ef38b55ba6c7b80bb7ff22847fd3be69 (patch)
treec75b76b3c7ec0d79a91c59e815e915cb2e336f57
parent315427691c7a064718b5ad7d378d7f1c1898a626 (diff)
locking/rwlock, x86: Clean up asm/spinlock*.h to remove old rwlock code
As the x86 architecture now uses qrwlock for its read/write lock implementation, it is no longer necessary to keep the old rwlock code around. This patch removes the old rwlock code in the asm/spinlock.h and asm/spinlock_types.h files. Now the ARCH_USE_QUEUE_RWLOCK config parameter cannot be removed from x86/Kconfig or there will be a compilation error. Signed-off-by: Waiman Long <Waiman.Long@hp.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Scott J Norton <scott.norton@hp.com> Cc: Dave Jones <davej@redhat.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Waiman Long <Waiman.Long@hp.com> Link: http://lkml.kernel.org/r/1408037251-45918-2-git-send-email-Waiman.Long@hp.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--arch/x86/include/asm/spinlock.h81
-rw-r--r--arch/x86/include/asm/spinlock_types.h4
2 files changed, 2 insertions, 83 deletions
diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
index 54f1c8068c02..9295016485c9 100644
--- a/arch/x86/include/asm/spinlock.h
+++ b/arch/x86/include/asm/spinlock.h
@@ -187,7 +187,6 @@ static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
187 cpu_relax(); 187 cpu_relax();
188} 188}
189 189
190#ifndef CONFIG_QUEUE_RWLOCK
191/* 190/*
192 * Read-write spinlocks, allowing multiple readers 191 * Read-write spinlocks, allowing multiple readers
193 * but only one writer. 192 * but only one writer.
@@ -198,91 +197,15 @@ static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
198 * irq-safe write-lock, but readers can get non-irqsafe 197 * irq-safe write-lock, but readers can get non-irqsafe
199 * read-locks. 198 * read-locks.
200 * 199 *
201 * On x86, we implement read-write locks as a 32-bit counter 200 * On x86, we implement read-write locks using the generic qrwlock with
202 * with the high bit (sign) being the "contended" bit. 201 * x86 specific optimization.
203 */ 202 */
204 203
205/**
206 * read_can_lock - would read_trylock() succeed?
207 * @lock: the rwlock in question.
208 */
209static inline int arch_read_can_lock(arch_rwlock_t *lock)
210{
211 return lock->lock > 0;
212}
213
214/**
215 * write_can_lock - would write_trylock() succeed?
216 * @lock: the rwlock in question.
217 */
218static inline int arch_write_can_lock(arch_rwlock_t *lock)
219{
220 return lock->write == WRITE_LOCK_CMP;
221}
222
223static inline void arch_read_lock(arch_rwlock_t *rw)
224{
225 asm volatile(LOCK_PREFIX READ_LOCK_SIZE(dec) " (%0)\n\t"
226 "jns 1f\n"
227 "call __read_lock_failed\n\t"
228 "1:\n"
229 ::LOCK_PTR_REG (rw) : "memory");
230}
231
232static inline void arch_write_lock(arch_rwlock_t *rw)
233{
234 asm volatile(LOCK_PREFIX WRITE_LOCK_SUB(%1) "(%0)\n\t"
235 "jz 1f\n"
236 "call __write_lock_failed\n\t"
237 "1:\n"
238 ::LOCK_PTR_REG (&rw->write), "i" (RW_LOCK_BIAS)
239 : "memory");
240}
241
242static inline int arch_read_trylock(arch_rwlock_t *lock)
243{
244 READ_LOCK_ATOMIC(t) *count = (READ_LOCK_ATOMIC(t) *)lock;
245
246 if (READ_LOCK_ATOMIC(dec_return)(count) >= 0)
247 return 1;
248 READ_LOCK_ATOMIC(inc)(count);
249 return 0;
250}
251
252static inline int arch_write_trylock(arch_rwlock_t *lock)
253{
254 atomic_t *count = (atomic_t *)&lock->write;
255
256 if (atomic_sub_and_test(WRITE_LOCK_CMP, count))
257 return 1;
258 atomic_add(WRITE_LOCK_CMP, count);
259 return 0;
260}
261
262static inline void arch_read_unlock(arch_rwlock_t *rw)
263{
264 asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0"
265 :"+m" (rw->lock) : : "memory");
266}
267
268static inline void arch_write_unlock(arch_rwlock_t *rw)
269{
270 asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0"
271 : "+m" (rw->write) : "i" (RW_LOCK_BIAS) : "memory");
272}
273#else
274#include <asm/qrwlock.h> 204#include <asm/qrwlock.h>
275#endif /* CONFIG_QUEUE_RWLOCK */
276 205
277#define arch_read_lock_flags(lock, flags) arch_read_lock(lock) 206#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
278#define arch_write_lock_flags(lock, flags) arch_write_lock(lock) 207#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
279 208
280#undef READ_LOCK_SIZE
281#undef READ_LOCK_ATOMIC
282#undef WRITE_LOCK_ADD
283#undef WRITE_LOCK_SUB
284#undef WRITE_LOCK_CMP
285
286#define arch_spin_relax(lock) cpu_relax() 209#define arch_spin_relax(lock) cpu_relax()
287#define arch_read_relax(lock) cpu_relax() 210#define arch_read_relax(lock) cpu_relax()
288#define arch_write_relax(lock) cpu_relax() 211#define arch_write_relax(lock) cpu_relax()
diff --git a/arch/x86/include/asm/spinlock_types.h b/arch/x86/include/asm/spinlock_types.h
index 73c4c007200f..5f9d7572d82b 100644
--- a/arch/x86/include/asm/spinlock_types.h
+++ b/arch/x86/include/asm/spinlock_types.h
@@ -34,10 +34,6 @@ typedef struct arch_spinlock {
34 34
35#define __ARCH_SPIN_LOCK_UNLOCKED { { 0 } } 35#define __ARCH_SPIN_LOCK_UNLOCKED { { 0 } }
36 36
37#ifdef CONFIG_QUEUE_RWLOCK
38#include <asm-generic/qrwlock_types.h> 37#include <asm-generic/qrwlock_types.h>
39#else
40#include <asm/rwlock.h>
41#endif
42 38
43#endif /* _ASM_X86_SPINLOCK_TYPES_H */ 39#endif /* _ASM_X86_SPINLOCK_TYPES_H */