diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2009-09-11 16:17:24 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-09-11 16:17:24 -0400 |
commit | 4e3408d9f71a70316ebe844c20ef0d7715281f84 (patch) | |
tree | 365f67fbcbe8e047a5fbead3db5d2e7ac20b3618 /include/linux | |
parent | a66a50054e46ec2a03244bc14c48b9125fcd75a7 (diff) | |
parent | 96910b6dc8a4fdb75e69f09f47b62d41743d36ba (diff) |
Merge branch 'core-locking-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'core-locking-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (32 commits)
locking, m68k/asm-offsets: Rename signal defines
locking: Inline spinlock code for all locking variants on s390
locking: Simplify spinlock inlining
locking: Allow arch-inlined spinlocks
locking: Move spinlock function bodies to header file
locking, m68k: Calculate thread_info offset with asm offset
locking, m68k/asm-offsets: Rename pt_regs offset defines
locking, sparc: Rename __spin_try_lock() and friends
locking, powerpc: Rename __spin_try_lock() and friends
lockdep: Remove recursion stattistics
lockdep: Simplify lock_stat seqfile code
lockdep: Simplify lockdep_chains seqfile code
lockdep: Simplify lockdep seqfile code
lockdep: Fix missing entries in /proc/lock_chains
lockdep: Fix missing entry in /proc/lock_stat
lockdep: Fix memory usage info of BFS
lockdep: Reintroduce generation count to make BFS faster
lockdep: Deal with many similar locks
lockdep: Introduce lockdep_assert_held()
lockdep: Fix style nits
...
Diffstat (limited to 'include/linux')
-rw-r--r-- | include/linux/lockdep.h | 18 | ||||
-rw-r--r-- | include/linux/spinlock.h | 64 | ||||
-rw-r--r-- | include/linux/spinlock_api_smp.h | 394 |
3 files changed, 426 insertions, 50 deletions
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index b25d1b53df0d..9ccf0e286b2a 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h | |||
@@ -149,6 +149,12 @@ struct lock_list { | |||
149 | struct lock_class *class; | 149 | struct lock_class *class; |
150 | struct stack_trace trace; | 150 | struct stack_trace trace; |
151 | int distance; | 151 | int distance; |
152 | |||
153 | /* | ||
154 | * The parent field is used to implement breadth-first search, and the | ||
155 | * bit 0 is reused to indicate if the lock has been accessed in BFS. | ||
156 | */ | ||
157 | struct lock_list *parent; | ||
152 | }; | 158 | }; |
153 | 159 | ||
154 | /* | 160 | /* |
@@ -208,10 +214,12 @@ struct held_lock { | |||
208 | * interrupt context: | 214 | * interrupt context: |
209 | */ | 215 | */ |
210 | unsigned int irq_context:2; /* bit 0 - soft, bit 1 - hard */ | 216 | unsigned int irq_context:2; /* bit 0 - soft, bit 1 - hard */ |
211 | unsigned int trylock:1; | 217 | unsigned int trylock:1; /* 16 bits */ |
218 | |||
212 | unsigned int read:2; /* see lock_acquire() comment */ | 219 | unsigned int read:2; /* see lock_acquire() comment */ |
213 | unsigned int check:2; /* see lock_acquire() comment */ | 220 | unsigned int check:2; /* see lock_acquire() comment */ |
214 | unsigned int hardirqs_off:1; | 221 | unsigned int hardirqs_off:1; |
222 | unsigned int references:11; /* 32 bits */ | ||
215 | }; | 223 | }; |
216 | 224 | ||
217 | /* | 225 | /* |
@@ -291,6 +299,10 @@ extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass, | |||
291 | extern void lock_release(struct lockdep_map *lock, int nested, | 299 | extern void lock_release(struct lockdep_map *lock, int nested, |
292 | unsigned long ip); | 300 | unsigned long ip); |
293 | 301 | ||
302 | #define lockdep_is_held(lock) lock_is_held(&(lock)->dep_map) | ||
303 | |||
304 | extern int lock_is_held(struct lockdep_map *lock); | ||
305 | |||
294 | extern void lock_set_class(struct lockdep_map *lock, const char *name, | 306 | extern void lock_set_class(struct lockdep_map *lock, const char *name, |
295 | struct lock_class_key *key, unsigned int subclass, | 307 | struct lock_class_key *key, unsigned int subclass, |
296 | unsigned long ip); | 308 | unsigned long ip); |
@@ -309,6 +321,8 @@ extern void lockdep_trace_alloc(gfp_t mask); | |||
309 | 321 | ||
310 | #define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0) | 322 | #define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0) |
311 | 323 | ||
324 | #define lockdep_assert_held(l) WARN_ON(debug_locks && !lockdep_is_held(l)) | ||
325 | |||
312 | #else /* !LOCKDEP */ | 326 | #else /* !LOCKDEP */ |
313 | 327 | ||
314 | static inline void lockdep_off(void) | 328 | static inline void lockdep_off(void) |
@@ -353,6 +367,8 @@ struct lock_class_key { }; | |||
353 | 367 | ||
354 | #define lockdep_depth(tsk) (0) | 368 | #define lockdep_depth(tsk) (0) |
355 | 369 | ||
370 | #define lockdep_assert_held(l) do { } while (0) | ||
371 | |||
356 | #endif /* !LOCKDEP */ | 372 | #endif /* !LOCKDEP */ |
357 | 373 | ||
358 | #ifdef CONFIG_LOCK_STAT | 374 | #ifdef CONFIG_LOCK_STAT |
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index 4be57ab03478..f0ca7a7a1757 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h | |||
@@ -143,15 +143,6 @@ static inline void smp_mb__after_lock(void) { smp_mb(); } | |||
143 | */ | 143 | */ |
144 | #define spin_unlock_wait(lock) __raw_spin_unlock_wait(&(lock)->raw_lock) | 144 | #define spin_unlock_wait(lock) __raw_spin_unlock_wait(&(lock)->raw_lock) |
145 | 145 | ||
146 | /* | ||
147 | * Pull the _spin_*()/_read_*()/_write_*() functions/declarations: | ||
148 | */ | ||
149 | #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) | ||
150 | # include <linux/spinlock_api_smp.h> | ||
151 | #else | ||
152 | # include <linux/spinlock_api_up.h> | ||
153 | #endif | ||
154 | |||
155 | #ifdef CONFIG_DEBUG_SPINLOCK | 146 | #ifdef CONFIG_DEBUG_SPINLOCK |
156 | extern void _raw_spin_lock(spinlock_t *lock); | 147 | extern void _raw_spin_lock(spinlock_t *lock); |
157 | #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock) | 148 | #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock) |
@@ -268,50 +259,16 @@ static inline void smp_mb__after_lock(void) { smp_mb(); } | |||
268 | 259 | ||
269 | #define spin_lock_irq(lock) _spin_lock_irq(lock) | 260 | #define spin_lock_irq(lock) _spin_lock_irq(lock) |
270 | #define spin_lock_bh(lock) _spin_lock_bh(lock) | 261 | #define spin_lock_bh(lock) _spin_lock_bh(lock) |
271 | |||
272 | #define read_lock_irq(lock) _read_lock_irq(lock) | 262 | #define read_lock_irq(lock) _read_lock_irq(lock) |
273 | #define read_lock_bh(lock) _read_lock_bh(lock) | 263 | #define read_lock_bh(lock) _read_lock_bh(lock) |
274 | |||
275 | #define write_lock_irq(lock) _write_lock_irq(lock) | 264 | #define write_lock_irq(lock) _write_lock_irq(lock) |
276 | #define write_lock_bh(lock) _write_lock_bh(lock) | 265 | #define write_lock_bh(lock) _write_lock_bh(lock) |
277 | 266 | #define spin_unlock(lock) _spin_unlock(lock) | |
278 | /* | 267 | #define read_unlock(lock) _read_unlock(lock) |
279 | * We inline the unlock functions in the nondebug case: | 268 | #define write_unlock(lock) _write_unlock(lock) |
280 | */ | 269 | #define spin_unlock_irq(lock) _spin_unlock_irq(lock) |
281 | #if defined(CONFIG_DEBUG_SPINLOCK) || defined(CONFIG_PREEMPT) || \ | 270 | #define read_unlock_irq(lock) _read_unlock_irq(lock) |
282 | !defined(CONFIG_SMP) | 271 | #define write_unlock_irq(lock) _write_unlock_irq(lock) |
283 | # define spin_unlock(lock) _spin_unlock(lock) | ||
284 | # define read_unlock(lock) _read_unlock(lock) | ||
285 | # define write_unlock(lock) _write_unlock(lock) | ||
286 | # define spin_unlock_irq(lock) _spin_unlock_irq(lock) | ||
287 | # define read_unlock_irq(lock) _read_unlock_irq(lock) | ||
288 | # define write_unlock_irq(lock) _write_unlock_irq(lock) | ||
289 | #else | ||
290 | # define spin_unlock(lock) \ | ||
291 | do {__raw_spin_unlock(&(lock)->raw_lock); __release(lock); } while (0) | ||
292 | # define read_unlock(lock) \ | ||
293 | do {__raw_read_unlock(&(lock)->raw_lock); __release(lock); } while (0) | ||
294 | # define write_unlock(lock) \ | ||
295 | do {__raw_write_unlock(&(lock)->raw_lock); __release(lock); } while (0) | ||
296 | # define spin_unlock_irq(lock) \ | ||
297 | do { \ | ||
298 | __raw_spin_unlock(&(lock)->raw_lock); \ | ||
299 | __release(lock); \ | ||
300 | local_irq_enable(); \ | ||
301 | } while (0) | ||
302 | # define read_unlock_irq(lock) \ | ||
303 | do { \ | ||
304 | __raw_read_unlock(&(lock)->raw_lock); \ | ||
305 | __release(lock); \ | ||
306 | local_irq_enable(); \ | ||
307 | } while (0) | ||
308 | # define write_unlock_irq(lock) \ | ||
309 | do { \ | ||
310 | __raw_write_unlock(&(lock)->raw_lock); \ | ||
311 | __release(lock); \ | ||
312 | local_irq_enable(); \ | ||
313 | } while (0) | ||
314 | #endif | ||
315 | 272 | ||
316 | #define spin_unlock_irqrestore(lock, flags) \ | 273 | #define spin_unlock_irqrestore(lock, flags) \ |
317 | do { \ | 274 | do { \ |
@@ -380,4 +337,13 @@ extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock); | |||
380 | */ | 337 | */ |
381 | #define spin_can_lock(lock) (!spin_is_locked(lock)) | 338 | #define spin_can_lock(lock) (!spin_is_locked(lock)) |
382 | 339 | ||
340 | /* | ||
341 | * Pull the _spin_*()/_read_*()/_write_*() functions/declarations: | ||
342 | */ | ||
343 | #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) | ||
344 | # include <linux/spinlock_api_smp.h> | ||
345 | #else | ||
346 | # include <linux/spinlock_api_up.h> | ||
347 | #endif | ||
348 | |||
383 | #endif /* __LINUX_SPINLOCK_H */ | 349 | #endif /* __LINUX_SPINLOCK_H */ |
diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h index d79845d034b5..7a7e18fc2415 100644 --- a/include/linux/spinlock_api_smp.h +++ b/include/linux/spinlock_api_smp.h | |||
@@ -60,4 +60,398 @@ void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) | |||
60 | void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags) | 60 | void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags) |
61 | __releases(lock); | 61 | __releases(lock); |
62 | 62 | ||
63 | /* | ||
64 | * We inline the unlock functions in the nondebug case: | ||
65 | */ | ||
66 | #if !defined(CONFIG_DEBUG_SPINLOCK) && !defined(CONFIG_PREEMPT) | ||
67 | #define __always_inline__spin_unlock | ||
68 | #define __always_inline__read_unlock | ||
69 | #define __always_inline__write_unlock | ||
70 | #define __always_inline__spin_unlock_irq | ||
71 | #define __always_inline__read_unlock_irq | ||
72 | #define __always_inline__write_unlock_irq | ||
73 | #endif | ||
74 | |||
75 | #ifndef CONFIG_DEBUG_SPINLOCK | ||
76 | #ifndef CONFIG_GENERIC_LOCKBREAK | ||
77 | |||
78 | #ifdef __always_inline__spin_lock | ||
79 | #define _spin_lock(lock) __spin_lock(lock) | ||
80 | #endif | ||
81 | |||
82 | #ifdef __always_inline__read_lock | ||
83 | #define _read_lock(lock) __read_lock(lock) | ||
84 | #endif | ||
85 | |||
86 | #ifdef __always_inline__write_lock | ||
87 | #define _write_lock(lock) __write_lock(lock) | ||
88 | #endif | ||
89 | |||
90 | #ifdef __always_inline__spin_lock_bh | ||
91 | #define _spin_lock_bh(lock) __spin_lock_bh(lock) | ||
92 | #endif | ||
93 | |||
94 | #ifdef __always_inline__read_lock_bh | ||
95 | #define _read_lock_bh(lock) __read_lock_bh(lock) | ||
96 | #endif | ||
97 | |||
98 | #ifdef __always_inline__write_lock_bh | ||
99 | #define _write_lock_bh(lock) __write_lock_bh(lock) | ||
100 | #endif | ||
101 | |||
102 | #ifdef __always_inline__spin_lock_irq | ||
103 | #define _spin_lock_irq(lock) __spin_lock_irq(lock) | ||
104 | #endif | ||
105 | |||
106 | #ifdef __always_inline__read_lock_irq | ||
107 | #define _read_lock_irq(lock) __read_lock_irq(lock) | ||
108 | #endif | ||
109 | |||
110 | #ifdef __always_inline__write_lock_irq | ||
111 | #define _write_lock_irq(lock) __write_lock_irq(lock) | ||
112 | #endif | ||
113 | |||
114 | #ifdef __always_inline__spin_lock_irqsave | ||
115 | #define _spin_lock_irqsave(lock) __spin_lock_irqsave(lock) | ||
116 | #endif | ||
117 | |||
118 | #ifdef __always_inline__read_lock_irqsave | ||
119 | #define _read_lock_irqsave(lock) __read_lock_irqsave(lock) | ||
120 | #endif | ||
121 | |||
122 | #ifdef __always_inline__write_lock_irqsave | ||
123 | #define _write_lock_irqsave(lock) __write_lock_irqsave(lock) | ||
124 | #endif | ||
125 | |||
126 | #endif /* !CONFIG_GENERIC_LOCKBREAK */ | ||
127 | |||
128 | #ifdef __always_inline__spin_trylock | ||
129 | #define _spin_trylock(lock) __spin_trylock(lock) | ||
130 | #endif | ||
131 | |||
132 | #ifdef __always_inline__read_trylock | ||
133 | #define _read_trylock(lock) __read_trylock(lock) | ||
134 | #endif | ||
135 | |||
136 | #ifdef __always_inline__write_trylock | ||
137 | #define _write_trylock(lock) __write_trylock(lock) | ||
138 | #endif | ||
139 | |||
140 | #ifdef __always_inline__spin_trylock_bh | ||
141 | #define _spin_trylock_bh(lock) __spin_trylock_bh(lock) | ||
142 | #endif | ||
143 | |||
144 | #ifdef __always_inline__spin_unlock | ||
145 | #define _spin_unlock(lock) __spin_unlock(lock) | ||
146 | #endif | ||
147 | |||
148 | #ifdef __always_inline__read_unlock | ||
149 | #define _read_unlock(lock) __read_unlock(lock) | ||
150 | #endif | ||
151 | |||
152 | #ifdef __always_inline__write_unlock | ||
153 | #define _write_unlock(lock) __write_unlock(lock) | ||
154 | #endif | ||
155 | |||
156 | #ifdef __always_inline__spin_unlock_bh | ||
157 | #define _spin_unlock_bh(lock) __spin_unlock_bh(lock) | ||
158 | #endif | ||
159 | |||
160 | #ifdef __always_inline__read_unlock_bh | ||
161 | #define _read_unlock_bh(lock) __read_unlock_bh(lock) | ||
162 | #endif | ||
163 | |||
164 | #ifdef __always_inline__write_unlock_bh | ||
165 | #define _write_unlock_bh(lock) __write_unlock_bh(lock) | ||
166 | #endif | ||
167 | |||
168 | #ifdef __always_inline__spin_unlock_irq | ||
169 | #define _spin_unlock_irq(lock) __spin_unlock_irq(lock) | ||
170 | #endif | ||
171 | |||
172 | #ifdef __always_inline__read_unlock_irq | ||
173 | #define _read_unlock_irq(lock) __read_unlock_irq(lock) | ||
174 | #endif | ||
175 | |||
176 | #ifdef __always_inline__write_unlock_irq | ||
177 | #define _write_unlock_irq(lock) __write_unlock_irq(lock) | ||
178 | #endif | ||
179 | |||
180 | #ifdef __always_inline__spin_unlock_irqrestore | ||
181 | #define _spin_unlock_irqrestore(lock, flags) __spin_unlock_irqrestore(lock, flags) | ||
182 | #endif | ||
183 | |||
184 | #ifdef __always_inline__read_unlock_irqrestore | ||
185 | #define _read_unlock_irqrestore(lock, flags) __read_unlock_irqrestore(lock, flags) | ||
186 | #endif | ||
187 | |||
188 | #ifdef __always_inline__write_unlock_irqrestore | ||
189 | #define _write_unlock_irqrestore(lock, flags) __write_unlock_irqrestore(lock, flags) | ||
190 | #endif | ||
191 | |||
192 | #endif /* CONFIG_DEBUG_SPINLOCK */ | ||
193 | |||
194 | static inline int __spin_trylock(spinlock_t *lock) | ||
195 | { | ||
196 | preempt_disable(); | ||
197 | if (_raw_spin_trylock(lock)) { | ||
198 | spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); | ||
199 | return 1; | ||
200 | } | ||
201 | preempt_enable(); | ||
202 | return 0; | ||
203 | } | ||
204 | |||
205 | static inline int __read_trylock(rwlock_t *lock) | ||
206 | { | ||
207 | preempt_disable(); | ||
208 | if (_raw_read_trylock(lock)) { | ||
209 | rwlock_acquire_read(&lock->dep_map, 0, 1, _RET_IP_); | ||
210 | return 1; | ||
211 | } | ||
212 | preempt_enable(); | ||
213 | return 0; | ||
214 | } | ||
215 | |||
216 | static inline int __write_trylock(rwlock_t *lock) | ||
217 | { | ||
218 | preempt_disable(); | ||
219 | if (_raw_write_trylock(lock)) { | ||
220 | rwlock_acquire(&lock->dep_map, 0, 1, _RET_IP_); | ||
221 | return 1; | ||
222 | } | ||
223 | preempt_enable(); | ||
224 | return 0; | ||
225 | } | ||
226 | |||
227 | /* | ||
228 | * If lockdep is enabled then we use the non-preemption spin-ops | ||
229 | * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are | ||
230 | * not re-enabled during lock-acquire (which the preempt-spin-ops do): | ||
231 | */ | ||
232 | #if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC) | ||
233 | |||
234 | static inline void __read_lock(rwlock_t *lock) | ||
235 | { | ||
236 | preempt_disable(); | ||
237 | rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_); | ||
238 | LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock); | ||
239 | } | ||
240 | |||
241 | static inline unsigned long __spin_lock_irqsave(spinlock_t *lock) | ||
242 | { | ||
243 | unsigned long flags; | ||
244 | |||
245 | local_irq_save(flags); | ||
246 | preempt_disable(); | ||
247 | spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); | ||
248 | /* | ||
249 | * On lockdep we dont want the hand-coded irq-enable of | ||
250 | * _raw_spin_lock_flags() code, because lockdep assumes | ||
251 | * that interrupts are not re-enabled during lock-acquire: | ||
252 | */ | ||
253 | #ifdef CONFIG_LOCKDEP | ||
254 | LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); | ||
255 | #else | ||
256 | _raw_spin_lock_flags(lock, &flags); | ||
257 | #endif | ||
258 | return flags; | ||
259 | } | ||
260 | |||
261 | static inline void __spin_lock_irq(spinlock_t *lock) | ||
262 | { | ||
263 | local_irq_disable(); | ||
264 | preempt_disable(); | ||
265 | spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); | ||
266 | LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); | ||
267 | } | ||
268 | |||
269 | static inline void __spin_lock_bh(spinlock_t *lock) | ||
270 | { | ||
271 | local_bh_disable(); | ||
272 | preempt_disable(); | ||
273 | spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); | ||
274 | LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); | ||
275 | } | ||
276 | |||
277 | static inline unsigned long __read_lock_irqsave(rwlock_t *lock) | ||
278 | { | ||
279 | unsigned long flags; | ||
280 | |||
281 | local_irq_save(flags); | ||
282 | preempt_disable(); | ||
283 | rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_); | ||
284 | LOCK_CONTENDED_FLAGS(lock, _raw_read_trylock, _raw_read_lock, | ||
285 | _raw_read_lock_flags, &flags); | ||
286 | return flags; | ||
287 | } | ||
288 | |||
289 | static inline void __read_lock_irq(rwlock_t *lock) | ||
290 | { | ||
291 | local_irq_disable(); | ||
292 | preempt_disable(); | ||
293 | rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_); | ||
294 | LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock); | ||
295 | } | ||
296 | |||
297 | static inline void __read_lock_bh(rwlock_t *lock) | ||
298 | { | ||
299 | local_bh_disable(); | ||
300 | preempt_disable(); | ||
301 | rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_); | ||
302 | LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock); | ||
303 | } | ||
304 | |||
305 | static inline unsigned long __write_lock_irqsave(rwlock_t *lock) | ||
306 | { | ||
307 | unsigned long flags; | ||
308 | |||
309 | local_irq_save(flags); | ||
310 | preempt_disable(); | ||
311 | rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); | ||
312 | LOCK_CONTENDED_FLAGS(lock, _raw_write_trylock, _raw_write_lock, | ||
313 | _raw_write_lock_flags, &flags); | ||
314 | return flags; | ||
315 | } | ||
316 | |||
317 | static inline void __write_lock_irq(rwlock_t *lock) | ||
318 | { | ||
319 | local_irq_disable(); | ||
320 | preempt_disable(); | ||
321 | rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); | ||
322 | LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock); | ||
323 | } | ||
324 | |||
325 | static inline void __write_lock_bh(rwlock_t *lock) | ||
326 | { | ||
327 | local_bh_disable(); | ||
328 | preempt_disable(); | ||
329 | rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); | ||
330 | LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock); | ||
331 | } | ||
332 | |||
333 | static inline void __spin_lock(spinlock_t *lock) | ||
334 | { | ||
335 | preempt_disable(); | ||
336 | spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); | ||
337 | LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); | ||
338 | } | ||
339 | |||
340 | static inline void __write_lock(rwlock_t *lock) | ||
341 | { | ||
342 | preempt_disable(); | ||
343 | rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); | ||
344 | LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock); | ||
345 | } | ||
346 | |||
347 | #endif /* CONFIG_PREEMPT */ | ||
348 | |||
349 | static inline void __spin_unlock(spinlock_t *lock) | ||
350 | { | ||
351 | spin_release(&lock->dep_map, 1, _RET_IP_); | ||
352 | _raw_spin_unlock(lock); | ||
353 | preempt_enable(); | ||
354 | } | ||
355 | |||
356 | static inline void __write_unlock(rwlock_t *lock) | ||
357 | { | ||
358 | rwlock_release(&lock->dep_map, 1, _RET_IP_); | ||
359 | _raw_write_unlock(lock); | ||
360 | preempt_enable(); | ||
361 | } | ||
362 | |||
363 | static inline void __read_unlock(rwlock_t *lock) | ||
364 | { | ||
365 | rwlock_release(&lock->dep_map, 1, _RET_IP_); | ||
366 | _raw_read_unlock(lock); | ||
367 | preempt_enable(); | ||
368 | } | ||
369 | |||
370 | static inline void __spin_unlock_irqrestore(spinlock_t *lock, | ||
371 | unsigned long flags) | ||
372 | { | ||
373 | spin_release(&lock->dep_map, 1, _RET_IP_); | ||
374 | _raw_spin_unlock(lock); | ||
375 | local_irq_restore(flags); | ||
376 | preempt_enable(); | ||
377 | } | ||
378 | |||
379 | static inline void __spin_unlock_irq(spinlock_t *lock) | ||
380 | { | ||
381 | spin_release(&lock->dep_map, 1, _RET_IP_); | ||
382 | _raw_spin_unlock(lock); | ||
383 | local_irq_enable(); | ||
384 | preempt_enable(); | ||
385 | } | ||
386 | |||
387 | static inline void __spin_unlock_bh(spinlock_t *lock) | ||
388 | { | ||
389 | spin_release(&lock->dep_map, 1, _RET_IP_); | ||
390 | _raw_spin_unlock(lock); | ||
391 | preempt_enable_no_resched(); | ||
392 | local_bh_enable_ip((unsigned long)__builtin_return_address(0)); | ||
393 | } | ||
394 | |||
395 | static inline void __read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) | ||
396 | { | ||
397 | rwlock_release(&lock->dep_map, 1, _RET_IP_); | ||
398 | _raw_read_unlock(lock); | ||
399 | local_irq_restore(flags); | ||
400 | preempt_enable(); | ||
401 | } | ||
402 | |||
403 | static inline void __read_unlock_irq(rwlock_t *lock) | ||
404 | { | ||
405 | rwlock_release(&lock->dep_map, 1, _RET_IP_); | ||
406 | _raw_read_unlock(lock); | ||
407 | local_irq_enable(); | ||
408 | preempt_enable(); | ||
409 | } | ||
410 | |||
411 | static inline void __read_unlock_bh(rwlock_t *lock) | ||
412 | { | ||
413 | rwlock_release(&lock->dep_map, 1, _RET_IP_); | ||
414 | _raw_read_unlock(lock); | ||
415 | preempt_enable_no_resched(); | ||
416 | local_bh_enable_ip((unsigned long)__builtin_return_address(0)); | ||
417 | } | ||
418 | |||
419 | static inline void __write_unlock_irqrestore(rwlock_t *lock, | ||
420 | unsigned long flags) | ||
421 | { | ||
422 | rwlock_release(&lock->dep_map, 1, _RET_IP_); | ||
423 | _raw_write_unlock(lock); | ||
424 | local_irq_restore(flags); | ||
425 | preempt_enable(); | ||
426 | } | ||
427 | |||
428 | static inline void __write_unlock_irq(rwlock_t *lock) | ||
429 | { | ||
430 | rwlock_release(&lock->dep_map, 1, _RET_IP_); | ||
431 | _raw_write_unlock(lock); | ||
432 | local_irq_enable(); | ||
433 | preempt_enable(); | ||
434 | } | ||
435 | |||
436 | static inline void __write_unlock_bh(rwlock_t *lock) | ||
437 | { | ||
438 | rwlock_release(&lock->dep_map, 1, _RET_IP_); | ||
439 | _raw_write_unlock(lock); | ||
440 | preempt_enable_no_resched(); | ||
441 | local_bh_enable_ip((unsigned long)__builtin_return_address(0)); | ||
442 | } | ||
443 | |||
444 | static inline int __spin_trylock_bh(spinlock_t *lock) | ||
445 | { | ||
446 | local_bh_disable(); | ||
447 | preempt_disable(); | ||
448 | if (_raw_spin_trylock(lock)) { | ||
449 | spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); | ||
450 | return 1; | ||
451 | } | ||
452 | preempt_enable_no_resched(); | ||
453 | local_bh_enable_ip((unsigned long)__builtin_return_address(0)); | ||
454 | return 0; | ||
455 | } | ||
456 | |||
63 | #endif /* __LINUX_SPINLOCK_API_SMP_H */ | 457 | #endif /* __LINUX_SPINLOCK_API_SMP_H */ |