diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2009-12-02 14:02:59 -0500 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2009-12-14 17:55:32 -0500 |
commit | c2f21ce2e31286a0a32f8da0a7856e9ca1122ef3 (patch) | |
tree | 6cc8d1fd37ffa6d02481353857b92734241f4dd0 /include/linux/spinlock.h | |
parent | e5931943d02bf751b1ec849c0d2ade23d76a8d41 (diff) |
locking: Implement new raw_spinlock
Now that the raw_spin name space is freed up, we can implement
raw_spinlock and the related functions which are used to annotate the
locks which are not converted to sleeping spinlocks in preempt-rt.
A side effect is that only such locks can be used with the low level
lock fsunctions which circumvent lockdep.
For !rt spin_* functions are mapped to the raw_spin* implementations.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: Peter Zijlstra <peterz@infradead.org>
Acked-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'include/linux/spinlock.h')
-rw-r--r-- | include/linux/spinlock.h | 258 |
1 files changed, 194 insertions, 64 deletions
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index 53bc2213b414..ef5a55d96b9b 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h | |||
@@ -80,7 +80,7 @@ | |||
80 | #include <linux/spinlock_types.h> | 80 | #include <linux/spinlock_types.h> |
81 | 81 | ||
82 | /* | 82 | /* |
83 | * Pull the __raw*() functions/declarations (UP-nondebug doesnt need them): | 83 | * Pull the arch_spin*() functions/declarations (UP-nondebug doesnt need them): |
84 | */ | 84 | */ |
85 | #ifdef CONFIG_SMP | 85 | #ifdef CONFIG_SMP |
86 | # include <asm/spinlock.h> | 86 | # include <asm/spinlock.h> |
@@ -89,30 +89,30 @@ | |||
89 | #endif | 89 | #endif |
90 | 90 | ||
91 | #ifdef CONFIG_DEBUG_SPINLOCK | 91 | #ifdef CONFIG_DEBUG_SPINLOCK |
92 | extern void __spin_lock_init(spinlock_t *lock, const char *name, | 92 | extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name, |
93 | struct lock_class_key *key); | 93 | struct lock_class_key *key); |
94 | # define spin_lock_init(lock) \ | 94 | # define raw_spin_lock_init(lock) \ |
95 | do { \ | 95 | do { \ |
96 | static struct lock_class_key __key; \ | 96 | static struct lock_class_key __key; \ |
97 | \ | 97 | \ |
98 | __spin_lock_init((lock), #lock, &__key); \ | 98 | __raw_spin_lock_init((lock), #lock, &__key); \ |
99 | } while (0) | 99 | } while (0) |
100 | 100 | ||
101 | #else | 101 | #else |
102 | # define spin_lock_init(lock) \ | 102 | # define raw_spin_lock_init(lock) \ |
103 | do { *(lock) = __SPIN_LOCK_UNLOCKED(lock); } while (0) | 103 | do { *(lock) = __RAW_SPIN_LOCK_UNLOCKED(lock); } while (0) |
104 | #endif | 104 | #endif |
105 | 105 | ||
106 | #define spin_is_locked(lock) arch_spin_is_locked(&(lock)->raw_lock) | 106 | #define raw_spin_is_locked(lock) arch_spin_is_locked(&(lock)->raw_lock) |
107 | 107 | ||
108 | #ifdef CONFIG_GENERIC_LOCKBREAK | 108 | #ifdef CONFIG_GENERIC_LOCKBREAK |
109 | #define spin_is_contended(lock) ((lock)->break_lock) | 109 | #define raw_spin_is_contended(lock) ((lock)->break_lock) |
110 | #else | 110 | #else |
111 | 111 | ||
112 | #ifdef arch_spin_is_contended | 112 | #ifdef arch_spin_is_contended |
113 | #define spin_is_contended(lock) arch_spin_is_contended(&(lock)->raw_lock) | 113 | #define raw_spin_is_contended(lock) arch_spin_is_contended(&(lock)->raw_lock) |
114 | #else | 114 | #else |
115 | #define spin_is_contended(lock) (((void)(lock), 0)) | 115 | #define raw_spin_is_contended(lock) (((void)(lock), 0)) |
116 | #endif /*arch_spin_is_contended*/ | 116 | #endif /*arch_spin_is_contended*/ |
117 | #endif | 117 | #endif |
118 | 118 | ||
@@ -122,22 +122,37 @@ static inline void smp_mb__after_lock(void) { smp_mb(); } | |||
122 | #endif | 122 | #endif |
123 | 123 | ||
124 | /** | 124 | /** |
125 | * spin_unlock_wait - wait until the spinlock gets unlocked | 125 | * raw_spin_unlock_wait - wait until the spinlock gets unlocked |
126 | * @lock: the spinlock in question. | 126 | * @lock: the spinlock in question. |
127 | */ | 127 | */ |
128 | #define spin_unlock_wait(lock) arch_spin_unlock_wait(&(lock)->raw_lock) | 128 | #define raw_spin_unlock_wait(lock) arch_spin_unlock_wait(&(lock)->raw_lock) |
129 | 129 | ||
130 | #ifdef CONFIG_DEBUG_SPINLOCK | 130 | #ifdef CONFIG_DEBUG_SPINLOCK |
131 | extern void _raw_spin_lock(spinlock_t *lock); | 131 | extern void _raw_spin_lock(raw_spinlock_t *lock); |
132 | #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock) | 132 | #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock) |
133 | extern int _raw_spin_trylock(spinlock_t *lock); | 133 | extern int _raw_spin_trylock(raw_spinlock_t *lock); |
134 | extern void _raw_spin_unlock(spinlock_t *lock); | 134 | extern void _raw_spin_unlock(raw_spinlock_t *lock); |
135 | #else | 135 | #else |
136 | # define _raw_spin_lock(lock) arch_spin_lock(&(lock)->raw_lock) | 136 | static inline void _raw_spin_lock(raw_spinlock_t *lock) |
137 | # define _raw_spin_lock_flags(lock, flags) \ | 137 | { |
138 | arch_spin_lock_flags(&(lock)->raw_lock, *(flags)) | 138 | arch_spin_lock(&lock->raw_lock); |
139 | # define _raw_spin_trylock(lock) arch_spin_trylock(&(lock)->raw_lock) | 139 | } |
140 | # define _raw_spin_unlock(lock) arch_spin_unlock(&(lock)->raw_lock) | 140 | |
141 | static inline void | ||
142 | _raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags) | ||
143 | { | ||
144 | arch_spin_lock_flags(&lock->raw_lock, *flags); | ||
145 | } | ||
146 | |||
147 | static inline int _raw_spin_trylock(raw_spinlock_t *lock) | ||
148 | { | ||
149 | return arch_spin_trylock(&(lock)->raw_lock); | ||
150 | } | ||
151 | |||
152 | static inline void _raw_spin_unlock(raw_spinlock_t *lock) | ||
153 | { | ||
154 | arch_spin_unlock(&lock->raw_lock); | ||
155 | } | ||
141 | #endif | 156 | #endif |
142 | 157 | ||
143 | /* | 158 | /* |
@@ -146,38 +161,38 @@ static inline void smp_mb__after_lock(void) { smp_mb(); } | |||
146 | * various methods are defined as nops in the case they are not | 161 | * various methods are defined as nops in the case they are not |
147 | * required. | 162 | * required. |
148 | */ | 163 | */ |
149 | #define spin_trylock(lock) __cond_lock(lock, _spin_trylock(lock)) | 164 | #define raw_spin_trylock(lock) __cond_lock(lock, _spin_trylock(lock)) |
150 | 165 | ||
151 | #define spin_lock(lock) _spin_lock(lock) | 166 | #define raw_spin_lock(lock) _spin_lock(lock) |
152 | 167 | ||
153 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 168 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
154 | # define spin_lock_nested(lock, subclass) _spin_lock_nested(lock, subclass) | 169 | # define raw_spin_lock_nested(lock, subclass) _spin_lock_nested(lock, subclass) |
155 | # define spin_lock_nest_lock(lock, nest_lock) \ | 170 | # define raw_spin_lock_nest_lock(lock, nest_lock) \ |
156 | do { \ | 171 | do { \ |
157 | typecheck(struct lockdep_map *, &(nest_lock)->dep_map);\ | 172 | typecheck(struct lockdep_map *, &(nest_lock)->dep_map);\ |
158 | _spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \ | 173 | _spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \ |
159 | } while (0) | 174 | } while (0) |
160 | #else | 175 | #else |
161 | # define spin_lock_nested(lock, subclass) _spin_lock(lock) | 176 | # define raw_spin_lock_nested(lock, subclass) _spin_lock(lock) |
162 | # define spin_lock_nest_lock(lock, nest_lock) _spin_lock(lock) | 177 | # define raw_spin_lock_nest_lock(lock, nest_lock) _spin_lock(lock) |
163 | #endif | 178 | #endif |
164 | 179 | ||
165 | #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) | 180 | #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) |
166 | 181 | ||
167 | #define spin_lock_irqsave(lock, flags) \ | 182 | #define raw_spin_lock_irqsave(lock, flags) \ |
168 | do { \ | 183 | do { \ |
169 | typecheck(unsigned long, flags); \ | 184 | typecheck(unsigned long, flags); \ |
170 | flags = _spin_lock_irqsave(lock); \ | 185 | flags = _spin_lock_irqsave(lock); \ |
171 | } while (0) | 186 | } while (0) |
172 | 187 | ||
173 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 188 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
174 | #define spin_lock_irqsave_nested(lock, flags, subclass) \ | 189 | #define raw_spin_lock_irqsave_nested(lock, flags, subclass) \ |
175 | do { \ | 190 | do { \ |
176 | typecheck(unsigned long, flags); \ | 191 | typecheck(unsigned long, flags); \ |
177 | flags = _spin_lock_irqsave_nested(lock, subclass); \ | 192 | flags = _spin_lock_irqsave_nested(lock, subclass); \ |
178 | } while (0) | 193 | } while (0) |
179 | #else | 194 | #else |
180 | #define spin_lock_irqsave_nested(lock, flags, subclass) \ | 195 | #define raw_spin_lock_irqsave_nested(lock, flags, subclass) \ |
181 | do { \ | 196 | do { \ |
182 | typecheck(unsigned long, flags); \ | 197 | typecheck(unsigned long, flags); \ |
183 | flags = _spin_lock_irqsave(lock); \ | 198 | flags = _spin_lock_irqsave(lock); \ |
@@ -186,45 +201,178 @@ static inline void smp_mb__after_lock(void) { smp_mb(); } | |||
186 | 201 | ||
187 | #else | 202 | #else |
188 | 203 | ||
189 | #define spin_lock_irqsave(lock, flags) \ | 204 | #define raw_spin_lock_irqsave(lock, flags) \ |
190 | do { \ | 205 | do { \ |
191 | typecheck(unsigned long, flags); \ | 206 | typecheck(unsigned long, flags); \ |
192 | _spin_lock_irqsave(lock, flags); \ | 207 | _spin_lock_irqsave(lock, flags); \ |
193 | } while (0) | 208 | } while (0) |
194 | 209 | ||
195 | #define spin_lock_irqsave_nested(lock, flags, subclass) \ | 210 | #define raw_spin_lock_irqsave_nested(lock, flags, subclass) \ |
196 | spin_lock_irqsave(lock, flags) | 211 | raw_spin_lock_irqsave(lock, flags) |
197 | 212 | ||
198 | #endif | 213 | #endif |
199 | 214 | ||
200 | #define spin_lock_irq(lock) _spin_lock_irq(lock) | 215 | #define raw_spin_lock_irq(lock) _spin_lock_irq(lock) |
201 | #define spin_lock_bh(lock) _spin_lock_bh(lock) | 216 | #define raw_spin_lock_bh(lock) _spin_lock_bh(lock) |
202 | #define spin_unlock(lock) _spin_unlock(lock) | 217 | #define raw_spin_unlock(lock) _spin_unlock(lock) |
203 | #define spin_unlock_irq(lock) _spin_unlock_irq(lock) | 218 | #define raw_spin_unlock_irq(lock) _spin_unlock_irq(lock) |
204 | 219 | ||
205 | #define spin_unlock_irqrestore(lock, flags) \ | 220 | #define raw_spin_unlock_irqrestore(lock, flags) \ |
206 | do { \ | 221 | do { \ |
207 | typecheck(unsigned long, flags); \ | 222 | typecheck(unsigned long, flags); \ |
208 | _spin_unlock_irqrestore(lock, flags); \ | 223 | _spin_unlock_irqrestore(lock, flags); \ |
209 | } while (0) | 224 | } while (0) |
210 | #define spin_unlock_bh(lock) _spin_unlock_bh(lock) | 225 | #define raw_spin_unlock_bh(lock) _spin_unlock_bh(lock) |
211 | 226 | ||
212 | #define spin_trylock_bh(lock) __cond_lock(lock, _spin_trylock_bh(lock)) | 227 | #define raw_spin_trylock_bh(lock) __cond_lock(lock, _spin_trylock_bh(lock)) |
213 | 228 | ||
214 | #define spin_trylock_irq(lock) \ | 229 | #define raw_spin_trylock_irq(lock) \ |
215 | ({ \ | 230 | ({ \ |
216 | local_irq_disable(); \ | 231 | local_irq_disable(); \ |
217 | spin_trylock(lock) ? \ | 232 | raw_spin_trylock(lock) ? \ |
218 | 1 : ({ local_irq_enable(); 0; }); \ | 233 | 1 : ({ local_irq_enable(); 0; }); \ |
219 | }) | 234 | }) |
220 | 235 | ||
221 | #define spin_trylock_irqsave(lock, flags) \ | 236 | #define raw_spin_trylock_irqsave(lock, flags) \ |
222 | ({ \ | 237 | ({ \ |
223 | local_irq_save(flags); \ | 238 | local_irq_save(flags); \ |
224 | spin_trylock(lock) ? \ | 239 | raw_spin_trylock(lock) ? \ |
225 | 1 : ({ local_irq_restore(flags); 0; }); \ | 240 | 1 : ({ local_irq_restore(flags); 0; }); \ |
226 | }) | 241 | }) |
227 | 242 | ||
243 | /** | ||
244 | * raw_spin_can_lock - would raw_spin_trylock() succeed? | ||
245 | * @lock: the spinlock in question. | ||
246 | */ | ||
247 | #define raw_spin_can_lock(lock) (!raw_spin_is_locked(lock)) | ||
248 | |||
249 | /* Include rwlock functions */ | ||
250 | #include <linux/rwlock.h> | ||
251 | |||
252 | /* | ||
253 | * Pull the _spin_*()/_read_*()/_write_*() functions/declarations: | ||
254 | */ | ||
255 | #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) | ||
256 | # include <linux/spinlock_api_smp.h> | ||
257 | #else | ||
258 | # include <linux/spinlock_api_up.h> | ||
259 | #endif | ||
260 | |||
261 | /* | ||
262 | * Map the spin_lock functions to the raw variants for PREEMPT_RT=n | ||
263 | */ | ||
264 | |||
265 | static inline raw_spinlock_t *spinlock_check(spinlock_t *lock) | ||
266 | { | ||
267 | return &lock->rlock; | ||
268 | } | ||
269 | |||
270 | #define spin_lock_init(_lock) \ | ||
271 | do { \ | ||
272 | spinlock_check(_lock); \ | ||
273 | raw_spin_lock_init(&(_lock)->rlock); \ | ||
274 | } while (0) | ||
275 | |||
276 | static inline void spin_lock(spinlock_t *lock) | ||
277 | { | ||
278 | raw_spin_lock(&lock->rlock); | ||
279 | } | ||
280 | |||
281 | static inline void spin_lock_bh(spinlock_t *lock) | ||
282 | { | ||
283 | raw_spin_lock_bh(&lock->rlock); | ||
284 | } | ||
285 | |||
286 | static inline int spin_trylock(spinlock_t *lock) | ||
287 | { | ||
288 | return raw_spin_trylock(&lock->rlock); | ||
289 | } | ||
290 | |||
291 | #define spin_lock_nested(lock, subclass) \ | ||
292 | do { \ | ||
293 | raw_spin_lock_nested(spinlock_check(lock), subclass); \ | ||
294 | } while (0) | ||
295 | |||
296 | #define spin_lock_nest_lock(lock, nest_lock) \ | ||
297 | do { \ | ||
298 | raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock); \ | ||
299 | } while (0) | ||
300 | |||
301 | static inline void spin_lock_irq(spinlock_t *lock) | ||
302 | { | ||
303 | raw_spin_lock_irq(&lock->rlock); | ||
304 | } | ||
305 | |||
306 | #define spin_lock_irqsave(lock, flags) \ | ||
307 | do { \ | ||
308 | raw_spin_lock_irqsave(spinlock_check(lock), flags); \ | ||
309 | } while (0) | ||
310 | |||
311 | #define spin_lock_irqsave_nested(lock, flags, subclass) \ | ||
312 | do { \ | ||
313 | raw_spin_lock_irqsave_nested(spinlock_check(lock), flags, subclass); \ | ||
314 | } while (0) | ||
315 | |||
316 | static inline void spin_unlock(spinlock_t *lock) | ||
317 | { | ||
318 | raw_spin_unlock(&lock->rlock); | ||
319 | } | ||
320 | |||
321 | static inline void spin_unlock_bh(spinlock_t *lock) | ||
322 | { | ||
323 | raw_spin_unlock_bh(&lock->rlock); | ||
324 | } | ||
325 | |||
326 | static inline void spin_unlock_irq(spinlock_t *lock) | ||
327 | { | ||
328 | raw_spin_unlock_irq(&lock->rlock); | ||
329 | } | ||
330 | |||
331 | static inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags) | ||
332 | { | ||
333 | raw_spin_unlock_irqrestore(&lock->rlock, flags); | ||
334 | } | ||
335 | |||
336 | static inline int spin_trylock_bh(spinlock_t *lock) | ||
337 | { | ||
338 | return raw_spin_trylock_bh(&lock->rlock); | ||
339 | } | ||
340 | |||
341 | static inline int spin_trylock_irq(spinlock_t *lock) | ||
342 | { | ||
343 | return raw_spin_trylock_irq(&lock->rlock); | ||
344 | } | ||
345 | |||
346 | #define spin_trylock_irqsave(lock, flags) \ | ||
347 | ({ \ | ||
348 | raw_spin_trylock_irqsave(spinlock_check(lock), flags); \ | ||
349 | }) | ||
350 | |||
351 | static inline void spin_unlock_wait(spinlock_t *lock) | ||
352 | { | ||
353 | raw_spin_unlock_wait(&lock->rlock); | ||
354 | } | ||
355 | |||
356 | static inline int spin_is_locked(spinlock_t *lock) | ||
357 | { | ||
358 | return raw_spin_is_locked(&lock->rlock); | ||
359 | } | ||
360 | |||
361 | static inline int spin_is_contended(spinlock_t *lock) | ||
362 | { | ||
363 | return raw_spin_is_contended(&lock->rlock); | ||
364 | } | ||
365 | |||
366 | static inline int spin_can_lock(spinlock_t *lock) | ||
367 | { | ||
368 | return raw_spin_can_lock(&lock->rlock); | ||
369 | } | ||
370 | |||
371 | static inline void assert_spin_locked(spinlock_t *lock) | ||
372 | { | ||
373 | assert_raw_spin_locked(&lock->rlock); | ||
374 | } | ||
375 | |||
228 | /* | 376 | /* |
229 | * Pull the atomic_t declaration: | 377 | * Pull the atomic_t declaration: |
230 | * (asm-mips/atomic.h needs above definitions) | 378 | * (asm-mips/atomic.h needs above definitions) |
@@ -242,22 +390,4 @@ extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock); | |||
242 | #define atomic_dec_and_lock(atomic, lock) \ | 390 | #define atomic_dec_and_lock(atomic, lock) \ |
243 | __cond_lock(lock, _atomic_dec_and_lock(atomic, lock)) | 391 | __cond_lock(lock, _atomic_dec_and_lock(atomic, lock)) |
244 | 392 | ||
245 | /** | ||
246 | * spin_can_lock - would spin_trylock() succeed? | ||
247 | * @lock: the spinlock in question. | ||
248 | */ | ||
249 | #define spin_can_lock(lock) (!spin_is_locked(lock)) | ||
250 | |||
251 | /* Include rwlock functions */ | ||
252 | #include <linux/rwlock.h> | ||
253 | |||
254 | /* | ||
255 | * Pull the _spin_*()/_read_*()/_write_*() functions/declarations: | ||
256 | */ | ||
257 | #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) | ||
258 | # include <linux/spinlock_api_smp.h> | ||
259 | #else | ||
260 | # include <linux/spinlock_api_up.h> | ||
261 | #endif | ||
262 | |||
263 | #endif /* __LINUX_SPINLOCK_H */ | 393 | #endif /* __LINUX_SPINLOCK_H */ |