aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-x86_64/spinlock.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-x86_64/spinlock.h')
-rw-r--r--include/asm-x86_64/spinlock.h79
1 files changed, 40 insertions, 39 deletions
diff --git a/include/asm-x86_64/spinlock.h b/include/asm-x86_64/spinlock.h
index 248a79f0eaff..be7a9e629fb2 100644
--- a/include/asm-x86_64/spinlock.h
+++ b/include/asm-x86_64/spinlock.h
@@ -16,31 +16,23 @@
16 * (the type definitions are in asm/spinlock_types.h) 16 * (the type definitions are in asm/spinlock_types.h)
17 */ 17 */
18 18
19#define __raw_spin_is_locked(x) \ 19static inline int __raw_spin_is_locked(raw_spinlock_t *lock)
20 (*(volatile signed int *)(&(x)->slock) <= 0) 20{
21 21 return *(volatile signed int *)(&(lock)->slock) <= 0;
22#define __raw_spin_lock_string \ 22}
23 "\n1:\t" \
24 LOCK_PREFIX " ; decl %0\n\t" \
25 "js 2f\n" \
26 LOCK_SECTION_START("") \
27 "2:\t" \
28 "rep;nop\n\t" \
29 "cmpl $0,%0\n\t" \
30 "jle 2b\n\t" \
31 "jmp 1b\n" \
32 LOCK_SECTION_END
33
34#define __raw_spin_lock_string_up \
35 "\n\tdecl %0"
36
37#define __raw_spin_unlock_string \
38 "movl $1,%0" \
39 :"=m" (lock->slock) : : "memory"
40 23
41static inline void __raw_spin_lock(raw_spinlock_t *lock) 24static inline void __raw_spin_lock(raw_spinlock_t *lock)
42{ 25{
43 asm volatile(__raw_spin_lock_string : "=m" (lock->slock) : : "memory"); 26 asm volatile(
27 "\n1:\t"
28 LOCK_PREFIX " ; decl %0\n\t"
29 "jns 2f\n"
30 "3:\n"
31 "rep;nop\n\t"
32 "cmpl $0,%0\n\t"
33 "jle 3b\n\t"
34 "jmp 1b\n"
35 "2:\t" : "=m" (lock->slock) : : "memory");
44} 36}
45 37
46#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) 38#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
@@ -49,7 +41,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
49{ 41{
50 int oldval; 42 int oldval;
51 43
52 __asm__ __volatile__( 44 asm volatile(
53 "xchgl %0,%1" 45 "xchgl %0,%1"
54 :"=q" (oldval), "=m" (lock->slock) 46 :"=q" (oldval), "=m" (lock->slock)
55 :"0" (0) : "memory"); 47 :"0" (0) : "memory");
@@ -59,13 +51,14 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
59 51
60static inline void __raw_spin_unlock(raw_spinlock_t *lock) 52static inline void __raw_spin_unlock(raw_spinlock_t *lock)
61{ 53{
62 __asm__ __volatile__( 54 asm volatile("movl $1,%0" :"=m" (lock->slock) :: "memory");
63 __raw_spin_unlock_string
64 );
65} 55}
66 56
67#define __raw_spin_unlock_wait(lock) \ 57static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock)
68 do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0) 58{
59 while (__raw_spin_is_locked(lock))
60 cpu_relax();
61}
69 62
70/* 63/*
71 * Read-write spinlocks, allowing multiple readers 64 * Read-write spinlocks, allowing multiple readers
@@ -79,26 +72,34 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock)
79 * 72 *
80 * On x86, we implement read-write locks as a 32-bit counter 73 * On x86, we implement read-write locks as a 32-bit counter
81 * with the high bit (sign) being the "contended" bit. 74 * with the high bit (sign) being the "contended" bit.
82 *
83 * The inline assembly is non-obvious. Think about it.
84 *
85 * Changed to use the same technique as rw semaphores. See
86 * semaphore.h for details. -ben
87 *
88 * the helpers are in arch/i386/kernel/semaphore.c
89 */ 75 */
90 76
91#define __raw_read_can_lock(x) ((int)(x)->lock > 0) 77static inline int __raw_read_can_lock(raw_rwlock_t *lock)
92#define __raw_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS) 78{
79 return (int)(lock)->lock > 0;
80}
81
82static inline int __raw_write_can_lock(raw_rwlock_t *lock)
83{
84 return (lock)->lock == RW_LOCK_BIAS;
85}
93 86
94static inline void __raw_read_lock(raw_rwlock_t *rw) 87static inline void __raw_read_lock(raw_rwlock_t *rw)
95{ 88{
96 __build_read_lock(rw, "__read_lock_failed"); 89 asm volatile(LOCK_PREFIX "subl $1,(%0)\n\t"
90 "jns 1f\n"
91 "call __read_lock_failed\n"
92 "1:\n"
93 ::"D" (rw), "i" (RW_LOCK_BIAS) : "memory");
97} 94}
98 95
99static inline void __raw_write_lock(raw_rwlock_t *rw) 96static inline void __raw_write_lock(raw_rwlock_t *rw)
100{ 97{
101 __build_write_lock(rw, "__write_lock_failed"); 98 asm volatile(LOCK_PREFIX "subl %1,(%0)\n\t"
99 "jz 1f\n"
100 "\tcall __write_lock_failed\n\t"
101 "1:\n"
102 ::"D" (rw), "i" (RW_LOCK_BIAS) : "memory");
102} 103}
103 104
104static inline int __raw_read_trylock(raw_rwlock_t *lock) 105static inline int __raw_read_trylock(raw_rwlock_t *lock)