aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-x86_64/spinlock.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-x86_64/spinlock.h')
-rw-r--r--include/asm-x86_64/spinlock.h164
1 files changed, 42 insertions, 122 deletions
diff --git a/include/asm-x86_64/spinlock.h b/include/asm-x86_64/spinlock.h
index 5aeb57a3baad..69636831ad2f 100644
--- a/include/asm-x86_64/spinlock.h
+++ b/include/asm-x86_64/spinlock.h
@@ -6,47 +6,21 @@
6#include <asm/page.h> 6#include <asm/page.h>
7#include <linux/config.h> 7#include <linux/config.h>
8 8
9extern int printk(const char * fmt, ...)
10 __attribute__ ((format (printf, 1, 2)));
11
12/* 9/*
13 * Your basic SMP spinlocks, allowing only a single CPU anywhere 10 * Your basic SMP spinlocks, allowing only a single CPU anywhere
14 */ 11 *
15
16typedef struct {
17 volatile unsigned int lock;
18#ifdef CONFIG_DEBUG_SPINLOCK
19 unsigned magic;
20#endif
21#ifdef CONFIG_PREEMPT
22 unsigned int break_lock;
23#endif
24} spinlock_t;
25
26#define SPINLOCK_MAGIC 0xdead4ead
27
28#ifdef CONFIG_DEBUG_SPINLOCK
29#define SPINLOCK_MAGIC_INIT , SPINLOCK_MAGIC
30#else
31#define SPINLOCK_MAGIC_INIT /* */
32#endif
33
34#define SPIN_LOCK_UNLOCKED (spinlock_t) { 1 SPINLOCK_MAGIC_INIT }
35
36#define spin_lock_init(x) do { *(x) = SPIN_LOCK_UNLOCKED; } while(0)
37
38/*
39 * Simple spin lock operations. There are two variants, one clears IRQ's 12 * Simple spin lock operations. There are two variants, one clears IRQ's
40 * on the local processor, one does not. 13 * on the local processor, one does not.
41 * 14 *
42 * We make no fairness assumptions. They have a cost. 15 * We make no fairness assumptions. They have a cost.
16 *
17 * (the type definitions are in asm/spinlock_types.h)
43 */ 18 */
44 19
45#define spin_is_locked(x) (*(volatile signed char *)(&(x)->lock) <= 0) 20#define __raw_spin_is_locked(x) \
46#define spin_unlock_wait(x) do { barrier(); } while(spin_is_locked(x)) 21 (*(volatile signed char *)(&(x)->slock) <= 0)
47#define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock)
48 22
49#define spin_lock_string \ 23#define __raw_spin_lock_string \
50 "\n1:\t" \ 24 "\n1:\t" \
51 "lock ; decb %0\n\t" \ 25 "lock ; decb %0\n\t" \
52 "js 2f\n" \ 26 "js 2f\n" \
@@ -58,74 +32,40 @@ typedef struct {
58 "jmp 1b\n" \ 32 "jmp 1b\n" \
59 LOCK_SECTION_END 33 LOCK_SECTION_END
60 34
61/* 35#define __raw_spin_unlock_string \
62 * This works. Despite all the confusion.
63 * (except on PPro SMP or if we are using OOSTORE)
64 * (PPro errata 66, 92)
65 */
66
67#if !defined(CONFIG_X86_OOSTORE) && !defined(CONFIG_X86_PPRO_FENCE)
68
69#define spin_unlock_string \
70 "movb $1,%0" \ 36 "movb $1,%0" \
71 :"=m" (lock->lock) : : "memory" 37 :"=m" (lock->slock) : : "memory"
72
73
74static inline void _raw_spin_unlock(spinlock_t *lock)
75{
76#ifdef CONFIG_DEBUG_SPINLOCK
77 BUG_ON(lock->magic != SPINLOCK_MAGIC);
78 assert_spin_locked(lock);
79#endif
80 __asm__ __volatile__(
81 spin_unlock_string
82 );
83}
84
85#else
86
87#define spin_unlock_string \
88 "xchgb %b0, %1" \
89 :"=q" (oldval), "=m" (lock->lock) \
90 :"0" (oldval) : "memory"
91 38
92static inline void _raw_spin_unlock(spinlock_t *lock) 39static inline void __raw_spin_lock(raw_spinlock_t *lock)
93{ 40{
94 char oldval = 1;
95#ifdef CONFIG_DEBUG_SPINLOCK
96 BUG_ON(lock->magic != SPINLOCK_MAGIC);
97 assert_spin_locked(lock);
98#endif
99 __asm__ __volatile__( 41 __asm__ __volatile__(
100 spin_unlock_string 42 __raw_spin_lock_string
101 ); 43 :"=m" (lock->slock) : : "memory");
102} 44}
103 45
104#endif 46#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
105 47
106static inline int _raw_spin_trylock(spinlock_t *lock) 48static inline int __raw_spin_trylock(raw_spinlock_t *lock)
107{ 49{
108 char oldval; 50 char oldval;
51
109 __asm__ __volatile__( 52 __asm__ __volatile__(
110 "xchgb %b0,%1" 53 "xchgb %b0,%1"
111 :"=q" (oldval), "=m" (lock->lock) 54 :"=q" (oldval), "=m" (lock->slock)
112 :"0" (0) : "memory"); 55 :"0" (0) : "memory");
56
113 return oldval > 0; 57 return oldval > 0;
114} 58}
115 59
116static inline void _raw_spin_lock(spinlock_t *lock) 60static inline void __raw_spin_unlock(raw_spinlock_t *lock)
117{ 61{
118#ifdef CONFIG_DEBUG_SPINLOCK
119 if (lock->magic != SPINLOCK_MAGIC) {
120 printk("eip: %p\n", __builtin_return_address(0));
121 BUG();
122 }
123#endif
124 __asm__ __volatile__( 62 __asm__ __volatile__(
125 spin_lock_string 63 __raw_spin_unlock_string
126 :"=m" (lock->lock) : : "memory"); 64 );
127} 65}
128 66
67#define __raw_spin_unlock_wait(lock) \
68 do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0)
129 69
130/* 70/*
131 * Read-write spinlocks, allowing multiple readers 71 * Read-write spinlocks, allowing multiple readers
@@ -136,33 +76,7 @@ static inline void _raw_spin_lock(spinlock_t *lock)
136 * can "mix" irq-safe locks - any writer needs to get a 76 * can "mix" irq-safe locks - any writer needs to get a
137 * irq-safe write-lock, but readers can get non-irqsafe 77 * irq-safe write-lock, but readers can get non-irqsafe
138 * read-locks. 78 * read-locks.
139 */ 79 *
140typedef struct {
141 volatile unsigned int lock;
142#ifdef CONFIG_DEBUG_SPINLOCK
143 unsigned magic;
144#endif
145#ifdef CONFIG_PREEMPT
146 unsigned int break_lock;
147#endif
148} rwlock_t;
149
150#define RWLOCK_MAGIC 0xdeaf1eed
151
152#ifdef CONFIG_DEBUG_SPINLOCK
153#define RWLOCK_MAGIC_INIT , RWLOCK_MAGIC
154#else
155#define RWLOCK_MAGIC_INIT /* */
156#endif
157
158#define RW_LOCK_UNLOCKED (rwlock_t) { RW_LOCK_BIAS RWLOCK_MAGIC_INIT }
159
160#define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while(0)
161
162#define read_can_lock(x) ((int)(x)->lock > 0)
163#define write_can_lock(x) ((x)->lock == RW_LOCK_BIAS)
164
165/*
166 * On x86, we implement read-write locks as a 32-bit counter 80 * On x86, we implement read-write locks as a 32-bit counter
167 * with the high bit (sign) being the "contended" bit. 81 * with the high bit (sign) being the "contended" bit.
168 * 82 *
@@ -170,29 +84,24 @@ typedef struct {
170 * 84 *
171 * Changed to use the same technique as rw semaphores. See 85 * Changed to use the same technique as rw semaphores. See
172 * semaphore.h for details. -ben 86 * semaphore.h for details. -ben
87 *
88 * the helpers are in arch/i386/kernel/semaphore.c
173 */ 89 */
174/* the spinlock helpers are in arch/i386/kernel/semaphore.c */
175 90
176static inline void _raw_read_lock(rwlock_t *rw) 91#define __raw_read_can_lock(x) ((int)(x)->lock > 0)
92#define __raw_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS)
93
94static inline void __raw_read_lock(raw_rwlock_t *rw)
177{ 95{
178#ifdef CONFIG_DEBUG_SPINLOCK
179 BUG_ON(rw->magic != RWLOCK_MAGIC);
180#endif
181 __build_read_lock(rw, "__read_lock_failed"); 96 __build_read_lock(rw, "__read_lock_failed");
182} 97}
183 98
184static inline void _raw_write_lock(rwlock_t *rw) 99static inline void __raw_write_lock(raw_rwlock_t *rw)
185{ 100{
186#ifdef CONFIG_DEBUG_SPINLOCK
187 BUG_ON(rw->magic != RWLOCK_MAGIC);
188#endif
189 __build_write_lock(rw, "__write_lock_failed"); 101 __build_write_lock(rw, "__write_lock_failed");
190} 102}
191 103
192#define _raw_read_unlock(rw) asm volatile("lock ; incl %0" :"=m" ((rw)->lock) : : "memory") 104static inline int __raw_read_trylock(raw_rwlock_t *lock)
193#define _raw_write_unlock(rw) asm volatile("lock ; addl $" RW_LOCK_BIAS_STR ",%0":"=m" ((rw)->lock) : : "memory")
194
195static inline int _raw_read_trylock(rwlock_t *lock)
196{ 105{
197 atomic_t *count = (atomic_t *)lock; 106 atomic_t *count = (atomic_t *)lock;
198 atomic_dec(count); 107 atomic_dec(count);
@@ -202,7 +111,7 @@ static inline int _raw_read_trylock(rwlock_t *lock)
202 return 0; 111 return 0;
203} 112}
204 113
205static inline int _raw_write_trylock(rwlock_t *lock) 114static inline int __raw_write_trylock(raw_rwlock_t *lock)
206{ 115{
207 atomic_t *count = (atomic_t *)lock; 116 atomic_t *count = (atomic_t *)lock;
208 if (atomic_sub_and_test(RW_LOCK_BIAS, count)) 117 if (atomic_sub_and_test(RW_LOCK_BIAS, count))
@@ -211,4 +120,15 @@ static inline int _raw_write_trylock(rwlock_t *lock)
211 return 0; 120 return 0;
212} 121}
213 122
123static inline void __raw_read_unlock(raw_rwlock_t *rw)
124{
125 asm volatile("lock ; incl %0" :"=m" (rw->lock) : : "memory");
126}
127
128static inline void __raw_write_unlock(raw_rwlock_t *rw)
129{
130 asm volatile("lock ; addl $" RW_LOCK_BIAS_STR ",%0"
131 : "=m" (rw->lock) : : "memory");
132}
133
214#endif /* __ASM_SPINLOCK_H */ 134#endif /* __ASM_SPINLOCK_H */