diff options
Diffstat (limited to 'include/linux/spinlock.h')
-rw-r--r-- | include/linux/spinlock.h | 379 |
1 files changed, 213 insertions, 166 deletions
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index f0ca7a7a1757..86088213334a 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h | |||
@@ -8,13 +8,13 @@ | |||
8 | * | 8 | * |
9 | * on SMP builds: | 9 | * on SMP builds: |
10 | * | 10 | * |
11 | * asm/spinlock_types.h: contains the raw_spinlock_t/raw_rwlock_t and the | 11 | * asm/spinlock_types.h: contains the arch_spinlock_t/arch_rwlock_t and the |
12 | * initializers | 12 | * initializers |
13 | * | 13 | * |
14 | * linux/spinlock_types.h: | 14 | * linux/spinlock_types.h: |
15 | * defines the generic type and initializers | 15 | * defines the generic type and initializers |
16 | * | 16 | * |
17 | * asm/spinlock.h: contains the __raw_spin_*()/etc. lowlevel | 17 | * asm/spinlock.h: contains the arch_spin_*()/etc. lowlevel |
18 | * implementations, mostly inline assembly code | 18 | * implementations, mostly inline assembly code |
19 | * | 19 | * |
20 | * (also included on UP-debug builds:) | 20 | * (also included on UP-debug builds:) |
@@ -34,7 +34,7 @@ | |||
34 | * defines the generic type and initializers | 34 | * defines the generic type and initializers |
35 | * | 35 | * |
36 | * linux/spinlock_up.h: | 36 | * linux/spinlock_up.h: |
37 | * contains the __raw_spin_*()/etc. version of UP | 37 | * contains the arch_spin_*()/etc. version of UP |
38 | * builds. (which are NOPs on non-debug, non-preempt | 38 | * builds. (which are NOPs on non-debug, non-preempt |
39 | * builds) | 39 | * builds) |
40 | * | 40 | * |
@@ -75,14 +75,12 @@ | |||
75 | #define __lockfunc __attribute__((section(".spinlock.text"))) | 75 | #define __lockfunc __attribute__((section(".spinlock.text"))) |
76 | 76 | ||
77 | /* | 77 | /* |
78 | * Pull the raw_spinlock_t and raw_rwlock_t definitions: | 78 | * Pull the arch_spinlock_t and arch_rwlock_t definitions: |
79 | */ | 79 | */ |
80 | #include <linux/spinlock_types.h> | 80 | #include <linux/spinlock_types.h> |
81 | 81 | ||
82 | extern int __lockfunc generic__raw_read_trylock(raw_rwlock_t *lock); | ||
83 | |||
84 | /* | 82 | /* |
85 | * Pull the __raw*() functions/declarations (UP-nondebug doesnt need them): | 83 | * Pull the arch_spin*() functions/declarations (UP-nondebug doesnt need them): |
86 | */ | 84 | */ |
87 | #ifdef CONFIG_SMP | 85 | #ifdef CONFIG_SMP |
88 | # include <asm/spinlock.h> | 86 | # include <asm/spinlock.h> |
@@ -91,45 +89,31 @@ extern int __lockfunc generic__raw_read_trylock(raw_rwlock_t *lock); | |||
91 | #endif | 89 | #endif |
92 | 90 | ||
93 | #ifdef CONFIG_DEBUG_SPINLOCK | 91 | #ifdef CONFIG_DEBUG_SPINLOCK |
94 | extern void __spin_lock_init(spinlock_t *lock, const char *name, | 92 | extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name, |
95 | struct lock_class_key *key); | 93 | struct lock_class_key *key); |
96 | # define spin_lock_init(lock) \ | 94 | # define raw_spin_lock_init(lock) \ |
97 | do { \ | 95 | do { \ |
98 | static struct lock_class_key __key; \ | 96 | static struct lock_class_key __key; \ |
99 | \ | 97 | \ |
100 | __spin_lock_init((lock), #lock, &__key); \ | 98 | __raw_spin_lock_init((lock), #lock, &__key); \ |
101 | } while (0) | 99 | } while (0) |
102 | 100 | ||
103 | #else | 101 | #else |
104 | # define spin_lock_init(lock) \ | 102 | # define raw_spin_lock_init(lock) \ |
105 | do { *(lock) = SPIN_LOCK_UNLOCKED; } while (0) | 103 | do { *(lock) = __RAW_SPIN_LOCK_UNLOCKED(lock); } while (0) |
106 | #endif | ||
107 | |||
108 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
109 | extern void __rwlock_init(rwlock_t *lock, const char *name, | ||
110 | struct lock_class_key *key); | ||
111 | # define rwlock_init(lock) \ | ||
112 | do { \ | ||
113 | static struct lock_class_key __key; \ | ||
114 | \ | ||
115 | __rwlock_init((lock), #lock, &__key); \ | ||
116 | } while (0) | ||
117 | #else | ||
118 | # define rwlock_init(lock) \ | ||
119 | do { *(lock) = RW_LOCK_UNLOCKED; } while (0) | ||
120 | #endif | 104 | #endif |
121 | 105 | ||
122 | #define spin_is_locked(lock) __raw_spin_is_locked(&(lock)->raw_lock) | 106 | #define raw_spin_is_locked(lock) arch_spin_is_locked(&(lock)->raw_lock) |
123 | 107 | ||
124 | #ifdef CONFIG_GENERIC_LOCKBREAK | 108 | #ifdef CONFIG_GENERIC_LOCKBREAK |
125 | #define spin_is_contended(lock) ((lock)->break_lock) | 109 | #define raw_spin_is_contended(lock) ((lock)->break_lock) |
126 | #else | 110 | #else |
127 | 111 | ||
128 | #ifdef __raw_spin_is_contended | 112 | #ifdef arch_spin_is_contended |
129 | #define spin_is_contended(lock) __raw_spin_is_contended(&(lock)->raw_lock) | 113 | #define raw_spin_is_contended(lock) arch_spin_is_contended(&(lock)->raw_lock) |
130 | #else | 114 | #else |
131 | #define spin_is_contended(lock) (((void)(lock), 0)) | 115 | #define raw_spin_is_contended(lock) (((void)(lock), 0)) |
132 | #endif /*__raw_spin_is_contended*/ | 116 | #endif /*arch_spin_is_contended*/ |
133 | #endif | 117 | #endif |
134 | 118 | ||
135 | /* The lock does not imply full memory barrier. */ | 119 | /* The lock does not imply full memory barrier. */ |
@@ -138,182 +122,260 @@ static inline void smp_mb__after_lock(void) { smp_mb(); } | |||
138 | #endif | 122 | #endif |
139 | 123 | ||
140 | /** | 124 | /** |
141 | * spin_unlock_wait - wait until the spinlock gets unlocked | 125 | * raw_spin_unlock_wait - wait until the spinlock gets unlocked |
142 | * @lock: the spinlock in question. | 126 | * @lock: the spinlock in question. |
143 | */ | 127 | */ |
144 | #define spin_unlock_wait(lock) __raw_spin_unlock_wait(&(lock)->raw_lock) | 128 | #define raw_spin_unlock_wait(lock) arch_spin_unlock_wait(&(lock)->raw_lock) |
145 | 129 | ||
146 | #ifdef CONFIG_DEBUG_SPINLOCK | 130 | #ifdef CONFIG_DEBUG_SPINLOCK |
147 | extern void _raw_spin_lock(spinlock_t *lock); | 131 | extern void do_raw_spin_lock(raw_spinlock_t *lock); |
148 | #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock) | 132 | #define do_raw_spin_lock_flags(lock, flags) do_raw_spin_lock(lock) |
149 | extern int _raw_spin_trylock(spinlock_t *lock); | 133 | extern int do_raw_spin_trylock(raw_spinlock_t *lock); |
150 | extern void _raw_spin_unlock(spinlock_t *lock); | 134 | extern void do_raw_spin_unlock(raw_spinlock_t *lock); |
151 | extern void _raw_read_lock(rwlock_t *lock); | ||
152 | #define _raw_read_lock_flags(lock, flags) _raw_read_lock(lock) | ||
153 | extern int _raw_read_trylock(rwlock_t *lock); | ||
154 | extern void _raw_read_unlock(rwlock_t *lock); | ||
155 | extern void _raw_write_lock(rwlock_t *lock); | ||
156 | #define _raw_write_lock_flags(lock, flags) _raw_write_lock(lock) | ||
157 | extern int _raw_write_trylock(rwlock_t *lock); | ||
158 | extern void _raw_write_unlock(rwlock_t *lock); | ||
159 | #else | 135 | #else |
160 | # define _raw_spin_lock(lock) __raw_spin_lock(&(lock)->raw_lock) | 136 | static inline void do_raw_spin_lock(raw_spinlock_t *lock) |
161 | # define _raw_spin_lock_flags(lock, flags) \ | 137 | { |
162 | __raw_spin_lock_flags(&(lock)->raw_lock, *(flags)) | 138 | arch_spin_lock(&lock->raw_lock); |
163 | # define _raw_spin_trylock(lock) __raw_spin_trylock(&(lock)->raw_lock) | 139 | } |
164 | # define _raw_spin_unlock(lock) __raw_spin_unlock(&(lock)->raw_lock) | 140 | |
165 | # define _raw_read_lock(rwlock) __raw_read_lock(&(rwlock)->raw_lock) | 141 | static inline void |
166 | # define _raw_read_lock_flags(lock, flags) \ | 142 | do_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags) |
167 | __raw_read_lock_flags(&(lock)->raw_lock, *(flags)) | 143 | { |
168 | # define _raw_read_trylock(rwlock) __raw_read_trylock(&(rwlock)->raw_lock) | 144 | arch_spin_lock_flags(&lock->raw_lock, *flags); |
169 | # define _raw_read_unlock(rwlock) __raw_read_unlock(&(rwlock)->raw_lock) | 145 | } |
170 | # define _raw_write_lock(rwlock) __raw_write_lock(&(rwlock)->raw_lock) | 146 | |
171 | # define _raw_write_lock_flags(lock, flags) \ | 147 | static inline int do_raw_spin_trylock(raw_spinlock_t *lock) |
172 | __raw_write_lock_flags(&(lock)->raw_lock, *(flags)) | 148 | { |
173 | # define _raw_write_trylock(rwlock) __raw_write_trylock(&(rwlock)->raw_lock) | 149 | return arch_spin_trylock(&(lock)->raw_lock); |
174 | # define _raw_write_unlock(rwlock) __raw_write_unlock(&(rwlock)->raw_lock) | 150 | } |
151 | |||
152 | static inline void do_raw_spin_unlock(raw_spinlock_t *lock) | ||
153 | { | ||
154 | arch_spin_unlock(&lock->raw_lock); | ||
155 | } | ||
175 | #endif | 156 | #endif |
176 | 157 | ||
177 | #define read_can_lock(rwlock) __raw_read_can_lock(&(rwlock)->raw_lock) | ||
178 | #define write_can_lock(rwlock) __raw_write_can_lock(&(rwlock)->raw_lock) | ||
179 | |||
180 | /* | 158 | /* |
181 | * Define the various spin_lock and rw_lock methods. Note we define these | 159 | * Define the various spin_lock methods. Note we define these |
182 | * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The various | 160 | * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The |
183 | * methods are defined as nops in the case they are not required. | 161 | * various methods are defined as nops in the case they are not |
162 | * required. | ||
184 | */ | 163 | */ |
185 | #define spin_trylock(lock) __cond_lock(lock, _spin_trylock(lock)) | 164 | #define raw_spin_trylock(lock) __cond_lock(lock, _raw_spin_trylock(lock)) |
186 | #define read_trylock(lock) __cond_lock(lock, _read_trylock(lock)) | ||
187 | #define write_trylock(lock) __cond_lock(lock, _write_trylock(lock)) | ||
188 | 165 | ||
189 | #define spin_lock(lock) _spin_lock(lock) | 166 | #define raw_spin_lock(lock) _raw_spin_lock(lock) |
190 | 167 | ||
191 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 168 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
192 | # define spin_lock_nested(lock, subclass) _spin_lock_nested(lock, subclass) | 169 | # define raw_spin_lock_nested(lock, subclass) \ |
193 | # define spin_lock_nest_lock(lock, nest_lock) \ | 170 | _raw_spin_lock_nested(lock, subclass) |
171 | |||
172 | # define raw_spin_lock_nest_lock(lock, nest_lock) \ | ||
194 | do { \ | 173 | do { \ |
195 | typecheck(struct lockdep_map *, &(nest_lock)->dep_map);\ | 174 | typecheck(struct lockdep_map *, &(nest_lock)->dep_map);\ |
196 | _spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \ | 175 | _raw_spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \ |
197 | } while (0) | 176 | } while (0) |
198 | #else | 177 | #else |
199 | # define spin_lock_nested(lock, subclass) _spin_lock(lock) | 178 | # define raw_spin_lock_nested(lock, subclass) _raw_spin_lock(lock) |
200 | # define spin_lock_nest_lock(lock, nest_lock) _spin_lock(lock) | 179 | # define raw_spin_lock_nest_lock(lock, nest_lock) _raw_spin_lock(lock) |
201 | #endif | 180 | #endif |
202 | 181 | ||
203 | #define write_lock(lock) _write_lock(lock) | ||
204 | #define read_lock(lock) _read_lock(lock) | ||
205 | |||
206 | #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) | 182 | #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) |
207 | 183 | ||
208 | #define spin_lock_irqsave(lock, flags) \ | 184 | #define raw_spin_lock_irqsave(lock, flags) \ |
209 | do { \ | 185 | do { \ |
210 | typecheck(unsigned long, flags); \ | 186 | typecheck(unsigned long, flags); \ |
211 | flags = _spin_lock_irqsave(lock); \ | 187 | flags = _raw_spin_lock_irqsave(lock); \ |
212 | } while (0) | ||
213 | #define read_lock_irqsave(lock, flags) \ | ||
214 | do { \ | ||
215 | typecheck(unsigned long, flags); \ | ||
216 | flags = _read_lock_irqsave(lock); \ | ||
217 | } while (0) | ||
218 | #define write_lock_irqsave(lock, flags) \ | ||
219 | do { \ | ||
220 | typecheck(unsigned long, flags); \ | ||
221 | flags = _write_lock_irqsave(lock); \ | ||
222 | } while (0) | 188 | } while (0) |
223 | 189 | ||
224 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 190 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
225 | #define spin_lock_irqsave_nested(lock, flags, subclass) \ | 191 | #define raw_spin_lock_irqsave_nested(lock, flags, subclass) \ |
226 | do { \ | 192 | do { \ |
227 | typecheck(unsigned long, flags); \ | 193 | typecheck(unsigned long, flags); \ |
228 | flags = _spin_lock_irqsave_nested(lock, subclass); \ | 194 | flags = _raw_spin_lock_irqsave_nested(lock, subclass); \ |
229 | } while (0) | 195 | } while (0) |
230 | #else | 196 | #else |
231 | #define spin_lock_irqsave_nested(lock, flags, subclass) \ | 197 | #define raw_spin_lock_irqsave_nested(lock, flags, subclass) \ |
232 | do { \ | 198 | do { \ |
233 | typecheck(unsigned long, flags); \ | 199 | typecheck(unsigned long, flags); \ |
234 | flags = _spin_lock_irqsave(lock); \ | 200 | flags = _raw_spin_lock_irqsave(lock); \ |
235 | } while (0) | 201 | } while (0) |
236 | #endif | 202 | #endif |
237 | 203 | ||
238 | #else | 204 | #else |
239 | 205 | ||
240 | #define spin_lock_irqsave(lock, flags) \ | 206 | #define raw_spin_lock_irqsave(lock, flags) \ |
241 | do { \ | ||
242 | typecheck(unsigned long, flags); \ | ||
243 | _spin_lock_irqsave(lock, flags); \ | ||
244 | } while (0) | ||
245 | #define read_lock_irqsave(lock, flags) \ | ||
246 | do { \ | 207 | do { \ |
247 | typecheck(unsigned long, flags); \ | 208 | typecheck(unsigned long, flags); \ |
248 | _read_lock_irqsave(lock, flags); \ | 209 | _raw_spin_lock_irqsave(lock, flags); \ |
249 | } while (0) | 210 | } while (0) |
250 | #define write_lock_irqsave(lock, flags) \ | 211 | |
251 | do { \ | 212 | #define raw_spin_lock_irqsave_nested(lock, flags, subclass) \ |
252 | typecheck(unsigned long, flags); \ | 213 | raw_spin_lock_irqsave(lock, flags) |
253 | _write_lock_irqsave(lock, flags); \ | ||
254 | } while (0) | ||
255 | #define spin_lock_irqsave_nested(lock, flags, subclass) \ | ||
256 | spin_lock_irqsave(lock, flags) | ||
257 | 214 | ||
258 | #endif | 215 | #endif |
259 | 216 | ||
260 | #define spin_lock_irq(lock) _spin_lock_irq(lock) | 217 | #define raw_spin_lock_irq(lock) _raw_spin_lock_irq(lock) |
261 | #define spin_lock_bh(lock) _spin_lock_bh(lock) | 218 | #define raw_spin_lock_bh(lock) _raw_spin_lock_bh(lock) |
262 | #define read_lock_irq(lock) _read_lock_irq(lock) | 219 | #define raw_spin_unlock(lock) _raw_spin_unlock(lock) |
263 | #define read_lock_bh(lock) _read_lock_bh(lock) | 220 | #define raw_spin_unlock_irq(lock) _raw_spin_unlock_irq(lock) |
264 | #define write_lock_irq(lock) _write_lock_irq(lock) | ||
265 | #define write_lock_bh(lock) _write_lock_bh(lock) | ||
266 | #define spin_unlock(lock) _spin_unlock(lock) | ||
267 | #define read_unlock(lock) _read_unlock(lock) | ||
268 | #define write_unlock(lock) _write_unlock(lock) | ||
269 | #define spin_unlock_irq(lock) _spin_unlock_irq(lock) | ||
270 | #define read_unlock_irq(lock) _read_unlock_irq(lock) | ||
271 | #define write_unlock_irq(lock) _write_unlock_irq(lock) | ||
272 | |||
273 | #define spin_unlock_irqrestore(lock, flags) \ | ||
274 | do { \ | ||
275 | typecheck(unsigned long, flags); \ | ||
276 | _spin_unlock_irqrestore(lock, flags); \ | ||
277 | } while (0) | ||
278 | #define spin_unlock_bh(lock) _spin_unlock_bh(lock) | ||
279 | 221 | ||
280 | #define read_unlock_irqrestore(lock, flags) \ | 222 | #define raw_spin_unlock_irqrestore(lock, flags) \ |
281 | do { \ | 223 | do { \ |
282 | typecheck(unsigned long, flags); \ | 224 | typecheck(unsigned long, flags); \ |
283 | _read_unlock_irqrestore(lock, flags); \ | 225 | _raw_spin_unlock_irqrestore(lock, flags); \ |
284 | } while (0) | 226 | } while (0) |
285 | #define read_unlock_bh(lock) _read_unlock_bh(lock) | 227 | #define raw_spin_unlock_bh(lock) _raw_spin_unlock_bh(lock) |
286 | 228 | ||
287 | #define write_unlock_irqrestore(lock, flags) \ | 229 | #define raw_spin_trylock_bh(lock) \ |
288 | do { \ | 230 | __cond_lock(lock, _raw_spin_trylock_bh(lock)) |
289 | typecheck(unsigned long, flags); \ | ||
290 | _write_unlock_irqrestore(lock, flags); \ | ||
291 | } while (0) | ||
292 | #define write_unlock_bh(lock) _write_unlock_bh(lock) | ||
293 | 231 | ||
294 | #define spin_trylock_bh(lock) __cond_lock(lock, _spin_trylock_bh(lock)) | 232 | #define raw_spin_trylock_irq(lock) \ |
295 | |||
296 | #define spin_trylock_irq(lock) \ | ||
297 | ({ \ | 233 | ({ \ |
298 | local_irq_disable(); \ | 234 | local_irq_disable(); \ |
299 | spin_trylock(lock) ? \ | 235 | raw_spin_trylock(lock) ? \ |
300 | 1 : ({ local_irq_enable(); 0; }); \ | 236 | 1 : ({ local_irq_enable(); 0; }); \ |
301 | }) | 237 | }) |
302 | 238 | ||
303 | #define spin_trylock_irqsave(lock, flags) \ | 239 | #define raw_spin_trylock_irqsave(lock, flags) \ |
304 | ({ \ | 240 | ({ \ |
305 | local_irq_save(flags); \ | 241 | local_irq_save(flags); \ |
306 | spin_trylock(lock) ? \ | 242 | raw_spin_trylock(lock) ? \ |
307 | 1 : ({ local_irq_restore(flags); 0; }); \ | 243 | 1 : ({ local_irq_restore(flags); 0; }); \ |
308 | }) | 244 | }) |
309 | 245 | ||
310 | #define write_trylock_irqsave(lock, flags) \ | 246 | /** |
311 | ({ \ | 247 | * raw_spin_can_lock - would raw_spin_trylock() succeed? |
312 | local_irq_save(flags); \ | 248 | * @lock: the spinlock in question. |
313 | write_trylock(lock) ? \ | 249 | */ |
314 | 1 : ({ local_irq_restore(flags); 0; }); \ | 250 | #define raw_spin_can_lock(lock) (!raw_spin_is_locked(lock)) |
251 | |||
252 | /* Include rwlock functions */ | ||
253 | #include <linux/rwlock.h> | ||
254 | |||
255 | /* | ||
256 | * Pull the _spin_*()/_read_*()/_write_*() functions/declarations: | ||
257 | */ | ||
258 | #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) | ||
259 | # include <linux/spinlock_api_smp.h> | ||
260 | #else | ||
261 | # include <linux/spinlock_api_up.h> | ||
262 | #endif | ||
263 | |||
264 | /* | ||
265 | * Map the spin_lock functions to the raw variants for PREEMPT_RT=n | ||
266 | */ | ||
267 | |||
268 | static inline raw_spinlock_t *spinlock_check(spinlock_t *lock) | ||
269 | { | ||
270 | return &lock->rlock; | ||
271 | } | ||
272 | |||
273 | #define spin_lock_init(_lock) \ | ||
274 | do { \ | ||
275 | spinlock_check(_lock); \ | ||
276 | raw_spin_lock_init(&(_lock)->rlock); \ | ||
277 | } while (0) | ||
278 | |||
279 | static inline void spin_lock(spinlock_t *lock) | ||
280 | { | ||
281 | raw_spin_lock(&lock->rlock); | ||
282 | } | ||
283 | |||
284 | static inline void spin_lock_bh(spinlock_t *lock) | ||
285 | { | ||
286 | raw_spin_lock_bh(&lock->rlock); | ||
287 | } | ||
288 | |||
289 | static inline int spin_trylock(spinlock_t *lock) | ||
290 | { | ||
291 | return raw_spin_trylock(&lock->rlock); | ||
292 | } | ||
293 | |||
294 | #define spin_lock_nested(lock, subclass) \ | ||
295 | do { \ | ||
296 | raw_spin_lock_nested(spinlock_check(lock), subclass); \ | ||
297 | } while (0) | ||
298 | |||
299 | #define spin_lock_nest_lock(lock, nest_lock) \ | ||
300 | do { \ | ||
301 | raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock); \ | ||
302 | } while (0) | ||
303 | |||
304 | static inline void spin_lock_irq(spinlock_t *lock) | ||
305 | { | ||
306 | raw_spin_lock_irq(&lock->rlock); | ||
307 | } | ||
308 | |||
309 | #define spin_lock_irqsave(lock, flags) \ | ||
310 | do { \ | ||
311 | raw_spin_lock_irqsave(spinlock_check(lock), flags); \ | ||
312 | } while (0) | ||
313 | |||
314 | #define spin_lock_irqsave_nested(lock, flags, subclass) \ | ||
315 | do { \ | ||
316 | raw_spin_lock_irqsave_nested(spinlock_check(lock), flags, subclass); \ | ||
317 | } while (0) | ||
318 | |||
319 | static inline void spin_unlock(spinlock_t *lock) | ||
320 | { | ||
321 | raw_spin_unlock(&lock->rlock); | ||
322 | } | ||
323 | |||
324 | static inline void spin_unlock_bh(spinlock_t *lock) | ||
325 | { | ||
326 | raw_spin_unlock_bh(&lock->rlock); | ||
327 | } | ||
328 | |||
329 | static inline void spin_unlock_irq(spinlock_t *lock) | ||
330 | { | ||
331 | raw_spin_unlock_irq(&lock->rlock); | ||
332 | } | ||
333 | |||
334 | static inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags) | ||
335 | { | ||
336 | raw_spin_unlock_irqrestore(&lock->rlock, flags); | ||
337 | } | ||
338 | |||
339 | static inline int spin_trylock_bh(spinlock_t *lock) | ||
340 | { | ||
341 | return raw_spin_trylock_bh(&lock->rlock); | ||
342 | } | ||
343 | |||
344 | static inline int spin_trylock_irq(spinlock_t *lock) | ||
345 | { | ||
346 | return raw_spin_trylock_irq(&lock->rlock); | ||
347 | } | ||
348 | |||
349 | #define spin_trylock_irqsave(lock, flags) \ | ||
350 | ({ \ | ||
351 | raw_spin_trylock_irqsave(spinlock_check(lock), flags); \ | ||
315 | }) | 352 | }) |
316 | 353 | ||
354 | static inline void spin_unlock_wait(spinlock_t *lock) | ||
355 | { | ||
356 | raw_spin_unlock_wait(&lock->rlock); | ||
357 | } | ||
358 | |||
359 | static inline int spin_is_locked(spinlock_t *lock) | ||
360 | { | ||
361 | return raw_spin_is_locked(&lock->rlock); | ||
362 | } | ||
363 | |||
364 | static inline int spin_is_contended(spinlock_t *lock) | ||
365 | { | ||
366 | return raw_spin_is_contended(&lock->rlock); | ||
367 | } | ||
368 | |||
369 | static inline int spin_can_lock(spinlock_t *lock) | ||
370 | { | ||
371 | return raw_spin_can_lock(&lock->rlock); | ||
372 | } | ||
373 | |||
374 | static inline void assert_spin_locked(spinlock_t *lock) | ||
375 | { | ||
376 | assert_raw_spin_locked(&lock->rlock); | ||
377 | } | ||
378 | |||
317 | /* | 379 | /* |
318 | * Pull the atomic_t declaration: | 380 | * Pull the atomic_t declaration: |
319 | * (asm-mips/atomic.h needs above definitions) | 381 | * (asm-mips/atomic.h needs above definitions) |
@@ -331,19 +393,4 @@ extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock); | |||
331 | #define atomic_dec_and_lock(atomic, lock) \ | 393 | #define atomic_dec_and_lock(atomic, lock) \ |
332 | __cond_lock(lock, _atomic_dec_and_lock(atomic, lock)) | 394 | __cond_lock(lock, _atomic_dec_and_lock(atomic, lock)) |
333 | 395 | ||
334 | /** | ||
335 | * spin_can_lock - would spin_trylock() succeed? | ||
336 | * @lock: the spinlock in question. | ||
337 | */ | ||
338 | #define spin_can_lock(lock) (!spin_is_locked(lock)) | ||
339 | |||
340 | /* | ||
341 | * Pull the _spin_*()/_read_*()/_write_*() functions/declarations: | ||
342 | */ | ||
343 | #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) | ||
344 | # include <linux/spinlock_api_smp.h> | ||
345 | #else | ||
346 | # include <linux/spinlock_api_up.h> | ||
347 | #endif | ||
348 | |||
349 | #endif /* __LINUX_SPINLOCK_H */ | 396 | #endif /* __LINUX_SPINLOCK_H */ |