aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-x86_64/spinlock.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-x86_64/spinlock.h')
-rw-r--r--include/asm-x86_64/spinlock.h11
1 files changed, 2 insertions, 9 deletions
diff --git a/include/asm-x86_64/spinlock.h b/include/asm-x86_64/spinlock.h
index 248a79f0eaff..a8e3d89f591f 100644
--- a/include/asm-x86_64/spinlock.h
+++ b/include/asm-x86_64/spinlock.h
@@ -79,13 +79,6 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock)
79 * 79 *
80 * On x86, we implement read-write locks as a 32-bit counter 80 * On x86, we implement read-write locks as a 32-bit counter
81 * with the high bit (sign) being the "contended" bit. 81 * with the high bit (sign) being the "contended" bit.
82 *
83 * The inline assembly is non-obvious. Think about it.
84 *
85 * Changed to use the same technique as rw semaphores. See
86 * semaphore.h for details. -ben
87 *
88 * the helpers are in arch/i386/kernel/semaphore.c
89 */ 82 */
90 83
91#define __raw_read_can_lock(x) ((int)(x)->lock > 0) 84#define __raw_read_can_lock(x) ((int)(x)->lock > 0)
@@ -93,12 +86,12 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock)
93 86
94static inline void __raw_read_lock(raw_rwlock_t *rw) 87static inline void __raw_read_lock(raw_rwlock_t *rw)
95{ 88{
96 __build_read_lock(rw, "__read_lock_failed"); 89 __build_read_lock(rw);
97} 90}
98 91
99static inline void __raw_write_lock(raw_rwlock_t *rw) 92static inline void __raw_write_lock(raw_rwlock_t *rw)
100{ 93{
101 __build_write_lock(rw, "__write_lock_failed"); 94 __build_write_lock(rw);
102} 95}
103 96
104static inline int __raw_read_trylock(raw_rwlock_t *lock) 97static inline int __raw_read_trylock(raw_rwlock_t *lock)