diff options
author | Heiko Carstens <heiko.carstens@de.ibm.com> | 2009-08-31 08:43:36 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-08-31 12:08:50 -0400 |
commit | 69d0ee7377eef808e34ba5542b554ec97244b871 (patch) | |
tree | f46c756b897cf51497fda2ad22f9f12a3512e23b | |
parent | 0ee000e5e8fa2e5c760250be0d78d5906e3eb94b (diff) |
locking: Move spinlock function bodies to header file
Move spinlock function bodies to header file by creating a
static inline version of each variant. Use the inline version
on the out-of-line code.
This shouldn't make any difference besides that the spinlock
code can now be used to generate inlined spinlock code.
Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Acked-by: Arnd Bergmann <arnd@arndb.de>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Horst Hartmann <horsth@linux.vnet.ibm.com>
Cc: Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: David Miller <davem@davemloft.net>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Geert Uytterhoeven <geert@linux-m68k.org>
Cc: Roman Zippel <zippel@linux-m68k.org>
Cc: <linux-arch@vger.kernel.org>
LKML-Reference: <20090831124417.859022429@de.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r-- | include/linux/spinlock.h | 18 | ||||
-rw-r--r-- | include/linux/spinlock_api_smp.h | 263 | ||||
-rw-r--r-- | kernel/spinlock.c | 174 |
3 files changed, 300 insertions, 155 deletions
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index 4be57ab03478..da76a06556bd 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h | |||
@@ -143,15 +143,6 @@ static inline void smp_mb__after_lock(void) { smp_mb(); } | |||
143 | */ | 143 | */ |
144 | #define spin_unlock_wait(lock) __raw_spin_unlock_wait(&(lock)->raw_lock) | 144 | #define spin_unlock_wait(lock) __raw_spin_unlock_wait(&(lock)->raw_lock) |
145 | 145 | ||
146 | /* | ||
147 | * Pull the _spin_*()/_read_*()/_write_*() functions/declarations: | ||
148 | */ | ||
149 | #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) | ||
150 | # include <linux/spinlock_api_smp.h> | ||
151 | #else | ||
152 | # include <linux/spinlock_api_up.h> | ||
153 | #endif | ||
154 | |||
155 | #ifdef CONFIG_DEBUG_SPINLOCK | 146 | #ifdef CONFIG_DEBUG_SPINLOCK |
156 | extern void _raw_spin_lock(spinlock_t *lock); | 147 | extern void _raw_spin_lock(spinlock_t *lock); |
157 | #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock) | 148 | #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock) |
@@ -380,4 +371,13 @@ extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock); | |||
380 | */ | 371 | */ |
381 | #define spin_can_lock(lock) (!spin_is_locked(lock)) | 372 | #define spin_can_lock(lock) (!spin_is_locked(lock)) |
382 | 373 | ||
374 | /* | ||
375 | * Pull the _spin_*()/_read_*()/_write_*() functions/declarations: | ||
376 | */ | ||
377 | #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) | ||
378 | # include <linux/spinlock_api_smp.h> | ||
379 | #else | ||
380 | # include <linux/spinlock_api_up.h> | ||
381 | #endif | ||
382 | |||
383 | #endif /* __LINUX_SPINLOCK_H */ | 383 | #endif /* __LINUX_SPINLOCK_H */ |
diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h index d79845d034b5..6b108f5fb149 100644 --- a/include/linux/spinlock_api_smp.h +++ b/include/linux/spinlock_api_smp.h | |||
@@ -60,4 +60,267 @@ void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) | |||
60 | void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags) | 60 | void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags) |
61 | __releases(lock); | 61 | __releases(lock); |
62 | 62 | ||
63 | static inline int __spin_trylock(spinlock_t *lock) | ||
64 | { | ||
65 | preempt_disable(); | ||
66 | if (_raw_spin_trylock(lock)) { | ||
67 | spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); | ||
68 | return 1; | ||
69 | } | ||
70 | preempt_enable(); | ||
71 | return 0; | ||
72 | } | ||
73 | |||
74 | static inline int __read_trylock(rwlock_t *lock) | ||
75 | { | ||
76 | preempt_disable(); | ||
77 | if (_raw_read_trylock(lock)) { | ||
78 | rwlock_acquire_read(&lock->dep_map, 0, 1, _RET_IP_); | ||
79 | return 1; | ||
80 | } | ||
81 | preempt_enable(); | ||
82 | return 0; | ||
83 | } | ||
84 | |||
85 | static inline int __write_trylock(rwlock_t *lock) | ||
86 | { | ||
87 | preempt_disable(); | ||
88 | if (_raw_write_trylock(lock)) { | ||
89 | rwlock_acquire(&lock->dep_map, 0, 1, _RET_IP_); | ||
90 | return 1; | ||
91 | } | ||
92 | preempt_enable(); | ||
93 | return 0; | ||
94 | } | ||
95 | |||
96 | /* | ||
97 | * If lockdep is enabled then we use the non-preemption spin-ops | ||
98 | * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are | ||
99 | * not re-enabled during lock-acquire (which the preempt-spin-ops do): | ||
100 | */ | ||
101 | #if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC) | ||
102 | |||
103 | static inline void __read_lock(rwlock_t *lock) | ||
104 | { | ||
105 | preempt_disable(); | ||
106 | rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_); | ||
107 | LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock); | ||
108 | } | ||
109 | |||
110 | static inline unsigned long __spin_lock_irqsave(spinlock_t *lock) | ||
111 | { | ||
112 | unsigned long flags; | ||
113 | |||
114 | local_irq_save(flags); | ||
115 | preempt_disable(); | ||
116 | spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); | ||
117 | /* | ||
118 | * On lockdep we dont want the hand-coded irq-enable of | ||
119 | * _raw_spin_lock_flags() code, because lockdep assumes | ||
120 | * that interrupts are not re-enabled during lock-acquire: | ||
121 | */ | ||
122 | #ifdef CONFIG_LOCKDEP | ||
123 | LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); | ||
124 | #else | ||
125 | _raw_spin_lock_flags(lock, &flags); | ||
126 | #endif | ||
127 | return flags; | ||
128 | } | ||
129 | |||
130 | static inline void __spin_lock_irq(spinlock_t *lock) | ||
131 | { | ||
132 | local_irq_disable(); | ||
133 | preempt_disable(); | ||
134 | spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); | ||
135 | LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); | ||
136 | } | ||
137 | |||
138 | static inline void __spin_lock_bh(spinlock_t *lock) | ||
139 | { | ||
140 | local_bh_disable(); | ||
141 | preempt_disable(); | ||
142 | spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); | ||
143 | LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); | ||
144 | } | ||
145 | |||
146 | static inline unsigned long __read_lock_irqsave(rwlock_t *lock) | ||
147 | { | ||
148 | unsigned long flags; | ||
149 | |||
150 | local_irq_save(flags); | ||
151 | preempt_disable(); | ||
152 | rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_); | ||
153 | LOCK_CONTENDED_FLAGS(lock, _raw_read_trylock, _raw_read_lock, | ||
154 | _raw_read_lock_flags, &flags); | ||
155 | return flags; | ||
156 | } | ||
157 | |||
158 | static inline void __read_lock_irq(rwlock_t *lock) | ||
159 | { | ||
160 | local_irq_disable(); | ||
161 | preempt_disable(); | ||
162 | rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_); | ||
163 | LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock); | ||
164 | } | ||
165 | |||
166 | static inline void __read_lock_bh(rwlock_t *lock) | ||
167 | { | ||
168 | local_bh_disable(); | ||
169 | preempt_disable(); | ||
170 | rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_); | ||
171 | LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock); | ||
172 | } | ||
173 | |||
174 | static inline unsigned long __write_lock_irqsave(rwlock_t *lock) | ||
175 | { | ||
176 | unsigned long flags; | ||
177 | |||
178 | local_irq_save(flags); | ||
179 | preempt_disable(); | ||
180 | rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); | ||
181 | LOCK_CONTENDED_FLAGS(lock, _raw_write_trylock, _raw_write_lock, | ||
182 | _raw_write_lock_flags, &flags); | ||
183 | return flags; | ||
184 | } | ||
185 | |||
186 | static inline void __write_lock_irq(rwlock_t *lock) | ||
187 | { | ||
188 | local_irq_disable(); | ||
189 | preempt_disable(); | ||
190 | rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); | ||
191 | LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock); | ||
192 | } | ||
193 | |||
194 | static inline void __write_lock_bh(rwlock_t *lock) | ||
195 | { | ||
196 | local_bh_disable(); | ||
197 | preempt_disable(); | ||
198 | rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); | ||
199 | LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock); | ||
200 | } | ||
201 | |||
202 | static inline void __spin_lock(spinlock_t *lock) | ||
203 | { | ||
204 | preempt_disable(); | ||
205 | spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); | ||
206 | LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); | ||
207 | } | ||
208 | |||
209 | static inline void __write_lock(rwlock_t *lock) | ||
210 | { | ||
211 | preempt_disable(); | ||
212 | rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); | ||
213 | LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock); | ||
214 | } | ||
215 | |||
216 | #endif /* CONFIG_PREEMPT */ | ||
217 | |||
218 | static inline void __spin_unlock(spinlock_t *lock) | ||
219 | { | ||
220 | spin_release(&lock->dep_map, 1, _RET_IP_); | ||
221 | _raw_spin_unlock(lock); | ||
222 | preempt_enable(); | ||
223 | } | ||
224 | |||
225 | static inline void __write_unlock(rwlock_t *lock) | ||
226 | { | ||
227 | rwlock_release(&lock->dep_map, 1, _RET_IP_); | ||
228 | _raw_write_unlock(lock); | ||
229 | preempt_enable(); | ||
230 | } | ||
231 | |||
232 | static inline void __read_unlock(rwlock_t *lock) | ||
233 | { | ||
234 | rwlock_release(&lock->dep_map, 1, _RET_IP_); | ||
235 | _raw_read_unlock(lock); | ||
236 | preempt_enable(); | ||
237 | } | ||
238 | |||
239 | static inline void __spin_unlock_irqrestore(spinlock_t *lock, | ||
240 | unsigned long flags) | ||
241 | { | ||
242 | spin_release(&lock->dep_map, 1, _RET_IP_); | ||
243 | _raw_spin_unlock(lock); | ||
244 | local_irq_restore(flags); | ||
245 | preempt_enable(); | ||
246 | } | ||
247 | |||
248 | static inline void __spin_unlock_irq(spinlock_t *lock) | ||
249 | { | ||
250 | spin_release(&lock->dep_map, 1, _RET_IP_); | ||
251 | _raw_spin_unlock(lock); | ||
252 | local_irq_enable(); | ||
253 | preempt_enable(); | ||
254 | } | ||
255 | |||
256 | static inline void __spin_unlock_bh(spinlock_t *lock) | ||
257 | { | ||
258 | spin_release(&lock->dep_map, 1, _RET_IP_); | ||
259 | _raw_spin_unlock(lock); | ||
260 | preempt_enable_no_resched(); | ||
261 | local_bh_enable_ip((unsigned long)__builtin_return_address(0)); | ||
262 | } | ||
263 | |||
264 | static inline void __read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) | ||
265 | { | ||
266 | rwlock_release(&lock->dep_map, 1, _RET_IP_); | ||
267 | _raw_read_unlock(lock); | ||
268 | local_irq_restore(flags); | ||
269 | preempt_enable(); | ||
270 | } | ||
271 | |||
272 | static inline void __read_unlock_irq(rwlock_t *lock) | ||
273 | { | ||
274 | rwlock_release(&lock->dep_map, 1, _RET_IP_); | ||
275 | _raw_read_unlock(lock); | ||
276 | local_irq_enable(); | ||
277 | preempt_enable(); | ||
278 | } | ||
279 | |||
280 | static inline void __read_unlock_bh(rwlock_t *lock) | ||
281 | { | ||
282 | rwlock_release(&lock->dep_map, 1, _RET_IP_); | ||
283 | _raw_read_unlock(lock); | ||
284 | preempt_enable_no_resched(); | ||
285 | local_bh_enable_ip((unsigned long)__builtin_return_address(0)); | ||
286 | } | ||
287 | |||
288 | static inline void __write_unlock_irqrestore(rwlock_t *lock, | ||
289 | unsigned long flags) | ||
290 | { | ||
291 | rwlock_release(&lock->dep_map, 1, _RET_IP_); | ||
292 | _raw_write_unlock(lock); | ||
293 | local_irq_restore(flags); | ||
294 | preempt_enable(); | ||
295 | } | ||
296 | |||
297 | static inline void __write_unlock_irq(rwlock_t *lock) | ||
298 | { | ||
299 | rwlock_release(&lock->dep_map, 1, _RET_IP_); | ||
300 | _raw_write_unlock(lock); | ||
301 | local_irq_enable(); | ||
302 | preempt_enable(); | ||
303 | } | ||
304 | |||
305 | static inline void __write_unlock_bh(rwlock_t *lock) | ||
306 | { | ||
307 | rwlock_release(&lock->dep_map, 1, _RET_IP_); | ||
308 | _raw_write_unlock(lock); | ||
309 | preempt_enable_no_resched(); | ||
310 | local_bh_enable_ip((unsigned long)__builtin_return_address(0)); | ||
311 | } | ||
312 | |||
313 | static inline int __spin_trylock_bh(spinlock_t *lock) | ||
314 | { | ||
315 | local_bh_disable(); | ||
316 | preempt_disable(); | ||
317 | if (_raw_spin_trylock(lock)) { | ||
318 | spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); | ||
319 | return 1; | ||
320 | } | ||
321 | preempt_enable_no_resched(); | ||
322 | local_bh_enable_ip((unsigned long)__builtin_return_address(0)); | ||
323 | return 0; | ||
324 | } | ||
325 | |||
63 | #endif /* __LINUX_SPINLOCK_API_SMP_H */ | 326 | #endif /* __LINUX_SPINLOCK_API_SMP_H */ |
diff --git a/kernel/spinlock.c b/kernel/spinlock.c index 7932653c4ebd..2c000f5c070b 100644 --- a/kernel/spinlock.c +++ b/kernel/spinlock.c | |||
@@ -23,40 +23,19 @@ | |||
23 | 23 | ||
24 | int __lockfunc _spin_trylock(spinlock_t *lock) | 24 | int __lockfunc _spin_trylock(spinlock_t *lock) |
25 | { | 25 | { |
26 | preempt_disable(); | 26 | return __spin_trylock(lock); |
27 | if (_raw_spin_trylock(lock)) { | ||
28 | spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); | ||
29 | return 1; | ||
30 | } | ||
31 | |||
32 | preempt_enable(); | ||
33 | return 0; | ||
34 | } | 27 | } |
35 | EXPORT_SYMBOL(_spin_trylock); | 28 | EXPORT_SYMBOL(_spin_trylock); |
36 | 29 | ||
37 | int __lockfunc _read_trylock(rwlock_t *lock) | 30 | int __lockfunc _read_trylock(rwlock_t *lock) |
38 | { | 31 | { |
39 | preempt_disable(); | 32 | return __read_trylock(lock); |
40 | if (_raw_read_trylock(lock)) { | ||
41 | rwlock_acquire_read(&lock->dep_map, 0, 1, _RET_IP_); | ||
42 | return 1; | ||
43 | } | ||
44 | |||
45 | preempt_enable(); | ||
46 | return 0; | ||
47 | } | 33 | } |
48 | EXPORT_SYMBOL(_read_trylock); | 34 | EXPORT_SYMBOL(_read_trylock); |
49 | 35 | ||
50 | int __lockfunc _write_trylock(rwlock_t *lock) | 36 | int __lockfunc _write_trylock(rwlock_t *lock) |
51 | { | 37 | { |
52 | preempt_disable(); | 38 | return __write_trylock(lock); |
53 | if (_raw_write_trylock(lock)) { | ||
54 | rwlock_acquire(&lock->dep_map, 0, 1, _RET_IP_); | ||
55 | return 1; | ||
56 | } | ||
57 | |||
58 | preempt_enable(); | ||
59 | return 0; | ||
60 | } | 39 | } |
61 | EXPORT_SYMBOL(_write_trylock); | 40 | EXPORT_SYMBOL(_write_trylock); |
62 | 41 | ||
@@ -69,129 +48,74 @@ EXPORT_SYMBOL(_write_trylock); | |||
69 | 48 | ||
70 | void __lockfunc _read_lock(rwlock_t *lock) | 49 | void __lockfunc _read_lock(rwlock_t *lock) |
71 | { | 50 | { |
72 | preempt_disable(); | 51 | __read_lock(lock); |
73 | rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_); | ||
74 | LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock); | ||
75 | } | 52 | } |
76 | EXPORT_SYMBOL(_read_lock); | 53 | EXPORT_SYMBOL(_read_lock); |
77 | 54 | ||
78 | unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock) | 55 | unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock) |
79 | { | 56 | { |
80 | unsigned long flags; | 57 | return __spin_lock_irqsave(lock); |
81 | |||
82 | local_irq_save(flags); | ||
83 | preempt_disable(); | ||
84 | spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); | ||
85 | /* | ||
86 | * On lockdep we dont want the hand-coded irq-enable of | ||
87 | * _raw_spin_lock_flags() code, because lockdep assumes | ||
88 | * that interrupts are not re-enabled during lock-acquire: | ||
89 | */ | ||
90 | #ifdef CONFIG_LOCKDEP | ||
91 | LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); | ||
92 | #else | ||
93 | _raw_spin_lock_flags(lock, &flags); | ||
94 | #endif | ||
95 | return flags; | ||
96 | } | 58 | } |
97 | EXPORT_SYMBOL(_spin_lock_irqsave); | 59 | EXPORT_SYMBOL(_spin_lock_irqsave); |
98 | 60 | ||
99 | void __lockfunc _spin_lock_irq(spinlock_t *lock) | 61 | void __lockfunc _spin_lock_irq(spinlock_t *lock) |
100 | { | 62 | { |
101 | local_irq_disable(); | 63 | __spin_lock_irq(lock); |
102 | preempt_disable(); | ||
103 | spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); | ||
104 | LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); | ||
105 | } | 64 | } |
106 | EXPORT_SYMBOL(_spin_lock_irq); | 65 | EXPORT_SYMBOL(_spin_lock_irq); |
107 | 66 | ||
108 | void __lockfunc _spin_lock_bh(spinlock_t *lock) | 67 | void __lockfunc _spin_lock_bh(spinlock_t *lock) |
109 | { | 68 | { |
110 | local_bh_disable(); | 69 | __spin_lock_bh(lock); |
111 | preempt_disable(); | ||
112 | spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); | ||
113 | LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); | ||
114 | } | 70 | } |
115 | EXPORT_SYMBOL(_spin_lock_bh); | 71 | EXPORT_SYMBOL(_spin_lock_bh); |
116 | 72 | ||
117 | unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock) | 73 | unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock) |
118 | { | 74 | { |
119 | unsigned long flags; | 75 | return __read_lock_irqsave(lock); |
120 | |||
121 | local_irq_save(flags); | ||
122 | preempt_disable(); | ||
123 | rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_); | ||
124 | LOCK_CONTENDED_FLAGS(lock, _raw_read_trylock, _raw_read_lock, | ||
125 | _raw_read_lock_flags, &flags); | ||
126 | return flags; | ||
127 | } | 76 | } |
128 | EXPORT_SYMBOL(_read_lock_irqsave); | 77 | EXPORT_SYMBOL(_read_lock_irqsave); |
129 | 78 | ||
130 | void __lockfunc _read_lock_irq(rwlock_t *lock) | 79 | void __lockfunc _read_lock_irq(rwlock_t *lock) |
131 | { | 80 | { |
132 | local_irq_disable(); | 81 | __read_lock_irq(lock); |
133 | preempt_disable(); | ||
134 | rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_); | ||
135 | LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock); | ||
136 | } | 82 | } |
137 | EXPORT_SYMBOL(_read_lock_irq); | 83 | EXPORT_SYMBOL(_read_lock_irq); |
138 | 84 | ||
139 | void __lockfunc _read_lock_bh(rwlock_t *lock) | 85 | void __lockfunc _read_lock_bh(rwlock_t *lock) |
140 | { | 86 | { |
141 | local_bh_disable(); | 87 | __read_lock_bh(lock); |
142 | preempt_disable(); | ||
143 | rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_); | ||
144 | LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock); | ||
145 | } | 88 | } |
146 | EXPORT_SYMBOL(_read_lock_bh); | 89 | EXPORT_SYMBOL(_read_lock_bh); |
147 | 90 | ||
148 | unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock) | 91 | unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock) |
149 | { | 92 | { |
150 | unsigned long flags; | 93 | return __write_lock_irqsave(lock); |
151 | |||
152 | local_irq_save(flags); | ||
153 | preempt_disable(); | ||
154 | rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); | ||
155 | LOCK_CONTENDED_FLAGS(lock, _raw_write_trylock, _raw_write_lock, | ||
156 | _raw_write_lock_flags, &flags); | ||
157 | return flags; | ||
158 | } | 94 | } |
159 | EXPORT_SYMBOL(_write_lock_irqsave); | 95 | EXPORT_SYMBOL(_write_lock_irqsave); |
160 | 96 | ||
161 | void __lockfunc _write_lock_irq(rwlock_t *lock) | 97 | void __lockfunc _write_lock_irq(rwlock_t *lock) |
162 | { | 98 | { |
163 | local_irq_disable(); | 99 | __write_lock_irq(lock); |
164 | preempt_disable(); | ||
165 | rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); | ||
166 | LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock); | ||
167 | } | 100 | } |
168 | EXPORT_SYMBOL(_write_lock_irq); | 101 | EXPORT_SYMBOL(_write_lock_irq); |
169 | 102 | ||
170 | void __lockfunc _write_lock_bh(rwlock_t *lock) | 103 | void __lockfunc _write_lock_bh(rwlock_t *lock) |
171 | { | 104 | { |
172 | local_bh_disable(); | 105 | __write_lock_bh(lock); |
173 | preempt_disable(); | ||
174 | rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); | ||
175 | LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock); | ||
176 | } | 106 | } |
177 | EXPORT_SYMBOL(_write_lock_bh); | 107 | EXPORT_SYMBOL(_write_lock_bh); |
178 | 108 | ||
179 | void __lockfunc _spin_lock(spinlock_t *lock) | 109 | void __lockfunc _spin_lock(spinlock_t *lock) |
180 | { | 110 | { |
181 | preempt_disable(); | 111 | __spin_lock(lock); |
182 | spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); | ||
183 | LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); | ||
184 | } | 112 | } |
185 | |||
186 | EXPORT_SYMBOL(_spin_lock); | 113 | EXPORT_SYMBOL(_spin_lock); |
187 | 114 | ||
188 | void __lockfunc _write_lock(rwlock_t *lock) | 115 | void __lockfunc _write_lock(rwlock_t *lock) |
189 | { | 116 | { |
190 | preempt_disable(); | 117 | __write_lock(lock); |
191 | rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); | ||
192 | LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock); | ||
193 | } | 118 | } |
194 | |||
195 | EXPORT_SYMBOL(_write_lock); | 119 | EXPORT_SYMBOL(_write_lock); |
196 | 120 | ||
197 | #else /* CONFIG_PREEMPT: */ | 121 | #else /* CONFIG_PREEMPT: */ |
@@ -320,121 +244,79 @@ EXPORT_SYMBOL(_spin_lock_nest_lock); | |||
320 | 244 | ||
321 | void __lockfunc _spin_unlock(spinlock_t *lock) | 245 | void __lockfunc _spin_unlock(spinlock_t *lock) |
322 | { | 246 | { |
323 | spin_release(&lock->dep_map, 1, _RET_IP_); | 247 | __spin_unlock(lock); |
324 | _raw_spin_unlock(lock); | ||
325 | preempt_enable(); | ||
326 | } | 248 | } |
327 | EXPORT_SYMBOL(_spin_unlock); | 249 | EXPORT_SYMBOL(_spin_unlock); |
328 | 250 | ||
329 | void __lockfunc _write_unlock(rwlock_t *lock) | 251 | void __lockfunc _write_unlock(rwlock_t *lock) |
330 | { | 252 | { |
331 | rwlock_release(&lock->dep_map, 1, _RET_IP_); | 253 | __write_unlock(lock); |
332 | _raw_write_unlock(lock); | ||
333 | preempt_enable(); | ||
334 | } | 254 | } |
335 | EXPORT_SYMBOL(_write_unlock); | 255 | EXPORT_SYMBOL(_write_unlock); |
336 | 256 | ||
337 | void __lockfunc _read_unlock(rwlock_t *lock) | 257 | void __lockfunc _read_unlock(rwlock_t *lock) |
338 | { | 258 | { |
339 | rwlock_release(&lock->dep_map, 1, _RET_IP_); | 259 | __read_unlock(lock); |
340 | _raw_read_unlock(lock); | ||
341 | preempt_enable(); | ||
342 | } | 260 | } |
343 | EXPORT_SYMBOL(_read_unlock); | 261 | EXPORT_SYMBOL(_read_unlock); |
344 | 262 | ||
345 | void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags) | 263 | void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags) |
346 | { | 264 | { |
347 | spin_release(&lock->dep_map, 1, _RET_IP_); | 265 | __spin_unlock_irqrestore(lock, flags); |
348 | _raw_spin_unlock(lock); | ||
349 | local_irq_restore(flags); | ||
350 | preempt_enable(); | ||
351 | } | 266 | } |
352 | EXPORT_SYMBOL(_spin_unlock_irqrestore); | 267 | EXPORT_SYMBOL(_spin_unlock_irqrestore); |
353 | 268 | ||
354 | void __lockfunc _spin_unlock_irq(spinlock_t *lock) | 269 | void __lockfunc _spin_unlock_irq(spinlock_t *lock) |
355 | { | 270 | { |
356 | spin_release(&lock->dep_map, 1, _RET_IP_); | 271 | __spin_unlock_irq(lock); |
357 | _raw_spin_unlock(lock); | ||
358 | local_irq_enable(); | ||
359 | preempt_enable(); | ||
360 | } | 272 | } |
361 | EXPORT_SYMBOL(_spin_unlock_irq); | 273 | EXPORT_SYMBOL(_spin_unlock_irq); |
362 | 274 | ||
363 | void __lockfunc _spin_unlock_bh(spinlock_t *lock) | 275 | void __lockfunc _spin_unlock_bh(spinlock_t *lock) |
364 | { | 276 | { |
365 | spin_release(&lock->dep_map, 1, _RET_IP_); | 277 | __spin_unlock_bh(lock); |
366 | _raw_spin_unlock(lock); | ||
367 | preempt_enable_no_resched(); | ||
368 | local_bh_enable_ip((unsigned long)__builtin_return_address(0)); | ||
369 | } | 278 | } |
370 | EXPORT_SYMBOL(_spin_unlock_bh); | 279 | EXPORT_SYMBOL(_spin_unlock_bh); |
371 | 280 | ||
372 | void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) | 281 | void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) |
373 | { | 282 | { |
374 | rwlock_release(&lock->dep_map, 1, _RET_IP_); | 283 | __read_unlock_irqrestore(lock, flags); |
375 | _raw_read_unlock(lock); | ||
376 | local_irq_restore(flags); | ||
377 | preempt_enable(); | ||
378 | } | 284 | } |
379 | EXPORT_SYMBOL(_read_unlock_irqrestore); | 285 | EXPORT_SYMBOL(_read_unlock_irqrestore); |
380 | 286 | ||
381 | void __lockfunc _read_unlock_irq(rwlock_t *lock) | 287 | void __lockfunc _read_unlock_irq(rwlock_t *lock) |
382 | { | 288 | { |
383 | rwlock_release(&lock->dep_map, 1, _RET_IP_); | 289 | __read_unlock_irq(lock); |
384 | _raw_read_unlock(lock); | ||
385 | local_irq_enable(); | ||
386 | preempt_enable(); | ||
387 | } | 290 | } |
388 | EXPORT_SYMBOL(_read_unlock_irq); | 291 | EXPORT_SYMBOL(_read_unlock_irq); |
389 | 292 | ||
390 | void __lockfunc _read_unlock_bh(rwlock_t *lock) | 293 | void __lockfunc _read_unlock_bh(rwlock_t *lock) |
391 | { | 294 | { |
392 | rwlock_release(&lock->dep_map, 1, _RET_IP_); | 295 | __read_unlock_bh(lock); |
393 | _raw_read_unlock(lock); | ||
394 | preempt_enable_no_resched(); | ||
395 | local_bh_enable_ip((unsigned long)__builtin_return_address(0)); | ||
396 | } | 296 | } |
397 | EXPORT_SYMBOL(_read_unlock_bh); | 297 | EXPORT_SYMBOL(_read_unlock_bh); |
398 | 298 | ||
399 | void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags) | 299 | void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags) |
400 | { | 300 | { |
401 | rwlock_release(&lock->dep_map, 1, _RET_IP_); | 301 | __write_unlock_irqrestore(lock, flags); |
402 | _raw_write_unlock(lock); | ||
403 | local_irq_restore(flags); | ||
404 | preempt_enable(); | ||
405 | } | 302 | } |
406 | EXPORT_SYMBOL(_write_unlock_irqrestore); | 303 | EXPORT_SYMBOL(_write_unlock_irqrestore); |
407 | 304 | ||
408 | void __lockfunc _write_unlock_irq(rwlock_t *lock) | 305 | void __lockfunc _write_unlock_irq(rwlock_t *lock) |
409 | { | 306 | { |
410 | rwlock_release(&lock->dep_map, 1, _RET_IP_); | 307 | __write_unlock_irq(lock); |
411 | _raw_write_unlock(lock); | ||
412 | local_irq_enable(); | ||
413 | preempt_enable(); | ||
414 | } | 308 | } |
415 | EXPORT_SYMBOL(_write_unlock_irq); | 309 | EXPORT_SYMBOL(_write_unlock_irq); |
416 | 310 | ||
417 | void __lockfunc _write_unlock_bh(rwlock_t *lock) | 311 | void __lockfunc _write_unlock_bh(rwlock_t *lock) |
418 | { | 312 | { |
419 | rwlock_release(&lock->dep_map, 1, _RET_IP_); | 313 | __write_unlock_bh(lock); |
420 | _raw_write_unlock(lock); | ||
421 | preempt_enable_no_resched(); | ||
422 | local_bh_enable_ip((unsigned long)__builtin_return_address(0)); | ||
423 | } | 314 | } |
424 | EXPORT_SYMBOL(_write_unlock_bh); | 315 | EXPORT_SYMBOL(_write_unlock_bh); |
425 | 316 | ||
426 | int __lockfunc _spin_trylock_bh(spinlock_t *lock) | 317 | int __lockfunc _spin_trylock_bh(spinlock_t *lock) |
427 | { | 318 | { |
428 | local_bh_disable(); | 319 | return __spin_trylock_bh(lock); |
429 | preempt_disable(); | ||
430 | if (_raw_spin_trylock(lock)) { | ||
431 | spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); | ||
432 | return 1; | ||
433 | } | ||
434 | |||
435 | preempt_enable_no_resched(); | ||
436 | local_bh_enable_ip((unsigned long)__builtin_return_address(0)); | ||
437 | return 0; | ||
438 | } | 320 | } |
439 | EXPORT_SYMBOL(_spin_trylock_bh); | 321 | EXPORT_SYMBOL(_spin_trylock_bh); |
440 | 322 | ||