diff options
Diffstat (limited to 'kernel/spinlock.c')
-rw-r--r-- | kernel/spinlock.c | 448 |
1 files changed, 223 insertions, 225 deletions
diff --git a/kernel/spinlock.c b/kernel/spinlock.c index 5ddab730cb2f..be6517fb9c14 100644 --- a/kernel/spinlock.c +++ b/kernel/spinlock.c | |||
@@ -21,193 +21,72 @@ | |||
21 | #include <linux/debug_locks.h> | 21 | #include <linux/debug_locks.h> |
22 | #include <linux/module.h> | 22 | #include <linux/module.h> |
23 | 23 | ||
24 | #ifndef _spin_trylock | ||
25 | int __lockfunc _spin_trylock(spinlock_t *lock) | ||
26 | { | ||
27 | return __spin_trylock(lock); | ||
28 | } | ||
29 | EXPORT_SYMBOL(_spin_trylock); | ||
30 | #endif | ||
31 | |||
32 | #ifndef _read_trylock | ||
33 | int __lockfunc _read_trylock(rwlock_t *lock) | ||
34 | { | ||
35 | return __read_trylock(lock); | ||
36 | } | ||
37 | EXPORT_SYMBOL(_read_trylock); | ||
38 | #endif | ||
39 | |||
40 | #ifndef _write_trylock | ||
41 | int __lockfunc _write_trylock(rwlock_t *lock) | ||
42 | { | ||
43 | return __write_trylock(lock); | ||
44 | } | ||
45 | EXPORT_SYMBOL(_write_trylock); | ||
46 | #endif | ||
47 | |||
48 | /* | 24 | /* |
49 | * If lockdep is enabled then we use the non-preemption spin-ops | 25 | * If lockdep is enabled then we use the non-preemption spin-ops |
50 | * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are | 26 | * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are |
51 | * not re-enabled during lock-acquire (which the preempt-spin-ops do): | 27 | * not re-enabled during lock-acquire (which the preempt-spin-ops do): |
52 | */ | 28 | */ |
53 | #if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC) | 29 | #if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC) |
54 | |||
55 | #ifndef _read_lock | ||
56 | void __lockfunc _read_lock(rwlock_t *lock) | ||
57 | { | ||
58 | __read_lock(lock); | ||
59 | } | ||
60 | EXPORT_SYMBOL(_read_lock); | ||
61 | #endif | ||
62 | |||
63 | #ifndef _spin_lock_irqsave | ||
64 | unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock) | ||
65 | { | ||
66 | return __spin_lock_irqsave(lock); | ||
67 | } | ||
68 | EXPORT_SYMBOL(_spin_lock_irqsave); | ||
69 | #endif | ||
70 | |||
71 | #ifndef _spin_lock_irq | ||
72 | void __lockfunc _spin_lock_irq(spinlock_t *lock) | ||
73 | { | ||
74 | __spin_lock_irq(lock); | ||
75 | } | ||
76 | EXPORT_SYMBOL(_spin_lock_irq); | ||
77 | #endif | ||
78 | |||
79 | #ifndef _spin_lock_bh | ||
80 | void __lockfunc _spin_lock_bh(spinlock_t *lock) | ||
81 | { | ||
82 | __spin_lock_bh(lock); | ||
83 | } | ||
84 | EXPORT_SYMBOL(_spin_lock_bh); | ||
85 | #endif | ||
86 | |||
87 | #ifndef _read_lock_irqsave | ||
88 | unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock) | ||
89 | { | ||
90 | return __read_lock_irqsave(lock); | ||
91 | } | ||
92 | EXPORT_SYMBOL(_read_lock_irqsave); | ||
93 | #endif | ||
94 | |||
95 | #ifndef _read_lock_irq | ||
96 | void __lockfunc _read_lock_irq(rwlock_t *lock) | ||
97 | { | ||
98 | __read_lock_irq(lock); | ||
99 | } | ||
100 | EXPORT_SYMBOL(_read_lock_irq); | ||
101 | #endif | ||
102 | |||
103 | #ifndef _read_lock_bh | ||
104 | void __lockfunc _read_lock_bh(rwlock_t *lock) | ||
105 | { | ||
106 | __read_lock_bh(lock); | ||
107 | } | ||
108 | EXPORT_SYMBOL(_read_lock_bh); | ||
109 | #endif | ||
110 | |||
111 | #ifndef _write_lock_irqsave | ||
112 | unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock) | ||
113 | { | ||
114 | return __write_lock_irqsave(lock); | ||
115 | } | ||
116 | EXPORT_SYMBOL(_write_lock_irqsave); | ||
117 | #endif | ||
118 | |||
119 | #ifndef _write_lock_irq | ||
120 | void __lockfunc _write_lock_irq(rwlock_t *lock) | ||
121 | { | ||
122 | __write_lock_irq(lock); | ||
123 | } | ||
124 | EXPORT_SYMBOL(_write_lock_irq); | ||
125 | #endif | ||
126 | |||
127 | #ifndef _write_lock_bh | ||
128 | void __lockfunc _write_lock_bh(rwlock_t *lock) | ||
129 | { | ||
130 | __write_lock_bh(lock); | ||
131 | } | ||
132 | EXPORT_SYMBOL(_write_lock_bh); | ||
133 | #endif | ||
134 | |||
135 | #ifndef _spin_lock | ||
136 | void __lockfunc _spin_lock(spinlock_t *lock) | ||
137 | { | ||
138 | __spin_lock(lock); | ||
139 | } | ||
140 | EXPORT_SYMBOL(_spin_lock); | ||
141 | #endif | ||
142 | |||
143 | #ifndef _write_lock | ||
144 | void __lockfunc _write_lock(rwlock_t *lock) | ||
145 | { | ||
146 | __write_lock(lock); | ||
147 | } | ||
148 | EXPORT_SYMBOL(_write_lock); | ||
149 | #endif | ||
150 | |||
151 | #else /* CONFIG_PREEMPT: */ | ||
152 | |||
153 | /* | 30 | /* |
31 | * The __lock_function inlines are taken from | ||
32 | * include/linux/spinlock_api_smp.h | ||
33 | */ | ||
34 | #else | ||
35 | #define raw_read_can_lock(l) read_can_lock(l) | ||
36 | #define raw_write_can_lock(l) write_can_lock(l) | ||
37 | /* | ||
38 | * We build the __lock_function inlines here. They are too large for | ||
39 | * inlining all over the place, but here is only one user per function | ||
40 | * which embedds them into the calling _lock_function below. | ||
41 | * | ||
154 | * This could be a long-held lock. We both prepare to spin for a long | 42 | * This could be a long-held lock. We both prepare to spin for a long |
155 | * time (making _this_ CPU preemptable if possible), and we also signal | 43 | * time (making _this_ CPU preemptable if possible), and we also signal |
156 | * towards that other CPU that it should break the lock ASAP. | 44 | * towards that other CPU that it should break the lock ASAP. |
157 | * | ||
158 | * (We do this in a function because inlining it would be excessive.) | ||
159 | */ | 45 | */ |
160 | |||
161 | #define BUILD_LOCK_OPS(op, locktype) \ | 46 | #define BUILD_LOCK_OPS(op, locktype) \ |
162 | void __lockfunc _##op##_lock(locktype##_t *lock) \ | 47 | void __lockfunc __raw_##op##_lock(locktype##_t *lock) \ |
163 | { \ | 48 | { \ |
164 | for (;;) { \ | 49 | for (;;) { \ |
165 | preempt_disable(); \ | 50 | preempt_disable(); \ |
166 | if (likely(_raw_##op##_trylock(lock))) \ | 51 | if (likely(do_raw_##op##_trylock(lock))) \ |
167 | break; \ | 52 | break; \ |
168 | preempt_enable(); \ | 53 | preempt_enable(); \ |
169 | \ | 54 | \ |
170 | if (!(lock)->break_lock) \ | 55 | if (!(lock)->break_lock) \ |
171 | (lock)->break_lock = 1; \ | 56 | (lock)->break_lock = 1; \ |
172 | while (!op##_can_lock(lock) && (lock)->break_lock) \ | 57 | while (!raw_##op##_can_lock(lock) && (lock)->break_lock)\ |
173 | _raw_##op##_relax(&lock->raw_lock); \ | 58 | arch_##op##_relax(&lock->raw_lock); \ |
174 | } \ | 59 | } \ |
175 | (lock)->break_lock = 0; \ | 60 | (lock)->break_lock = 0; \ |
176 | } \ | 61 | } \ |
177 | \ | 62 | \ |
178 | EXPORT_SYMBOL(_##op##_lock); \ | 63 | unsigned long __lockfunc __raw_##op##_lock_irqsave(locktype##_t *lock) \ |
179 | \ | ||
180 | unsigned long __lockfunc _##op##_lock_irqsave(locktype##_t *lock) \ | ||
181 | { \ | 64 | { \ |
182 | unsigned long flags; \ | 65 | unsigned long flags; \ |
183 | \ | 66 | \ |
184 | for (;;) { \ | 67 | for (;;) { \ |
185 | preempt_disable(); \ | 68 | preempt_disable(); \ |
186 | local_irq_save(flags); \ | 69 | local_irq_save(flags); \ |
187 | if (likely(_raw_##op##_trylock(lock))) \ | 70 | if (likely(do_raw_##op##_trylock(lock))) \ |
188 | break; \ | 71 | break; \ |
189 | local_irq_restore(flags); \ | 72 | local_irq_restore(flags); \ |
190 | preempt_enable(); \ | 73 | preempt_enable(); \ |
191 | \ | 74 | \ |
192 | if (!(lock)->break_lock) \ | 75 | if (!(lock)->break_lock) \ |
193 | (lock)->break_lock = 1; \ | 76 | (lock)->break_lock = 1; \ |
194 | while (!op##_can_lock(lock) && (lock)->break_lock) \ | 77 | while (!raw_##op##_can_lock(lock) && (lock)->break_lock)\ |
195 | _raw_##op##_relax(&lock->raw_lock); \ | 78 | arch_##op##_relax(&lock->raw_lock); \ |
196 | } \ | 79 | } \ |
197 | (lock)->break_lock = 0; \ | 80 | (lock)->break_lock = 0; \ |
198 | return flags; \ | 81 | return flags; \ |
199 | } \ | 82 | } \ |
200 | \ | 83 | \ |
201 | EXPORT_SYMBOL(_##op##_lock_irqsave); \ | 84 | void __lockfunc __raw_##op##_lock_irq(locktype##_t *lock) \ |
202 | \ | ||
203 | void __lockfunc _##op##_lock_irq(locktype##_t *lock) \ | ||
204 | { \ | 85 | { \ |
205 | _##op##_lock_irqsave(lock); \ | 86 | _raw_##op##_lock_irqsave(lock); \ |
206 | } \ | 87 | } \ |
207 | \ | 88 | \ |
208 | EXPORT_SYMBOL(_##op##_lock_irq); \ | 89 | void __lockfunc __raw_##op##_lock_bh(locktype##_t *lock) \ |
209 | \ | ||
210 | void __lockfunc _##op##_lock_bh(locktype##_t *lock) \ | ||
211 | { \ | 90 | { \ |
212 | unsigned long flags; \ | 91 | unsigned long flags; \ |
213 | \ | 92 | \ |
@@ -216,164 +95,283 @@ void __lockfunc _##op##_lock_bh(locktype##_t *lock) \ | |||
216 | /* irq-disabling. We use the generic preemption-aware */ \ | 95 | /* irq-disabling. We use the generic preemption-aware */ \ |
217 | /* function: */ \ | 96 | /* function: */ \ |
218 | /**/ \ | 97 | /**/ \ |
219 | flags = _##op##_lock_irqsave(lock); \ | 98 | flags = _raw_##op##_lock_irqsave(lock); \ |
220 | local_bh_disable(); \ | 99 | local_bh_disable(); \ |
221 | local_irq_restore(flags); \ | 100 | local_irq_restore(flags); \ |
222 | } \ | 101 | } \ |
223 | \ | ||
224 | EXPORT_SYMBOL(_##op##_lock_bh) | ||
225 | 102 | ||
226 | /* | 103 | /* |
227 | * Build preemption-friendly versions of the following | 104 | * Build preemption-friendly versions of the following |
228 | * lock-spinning functions: | 105 | * lock-spinning functions: |
229 | * | 106 | * |
230 | * _[spin|read|write]_lock() | 107 | * __[spin|read|write]_lock() |
231 | * _[spin|read|write]_lock_irq() | 108 | * __[spin|read|write]_lock_irq() |
232 | * _[spin|read|write]_lock_irqsave() | 109 | * __[spin|read|write]_lock_irqsave() |
233 | * _[spin|read|write]_lock_bh() | 110 | * __[spin|read|write]_lock_bh() |
234 | */ | 111 | */ |
235 | BUILD_LOCK_OPS(spin, spinlock); | 112 | BUILD_LOCK_OPS(spin, raw_spinlock); |
236 | BUILD_LOCK_OPS(read, rwlock); | 113 | BUILD_LOCK_OPS(read, rwlock); |
237 | BUILD_LOCK_OPS(write, rwlock); | 114 | BUILD_LOCK_OPS(write, rwlock); |
238 | 115 | ||
239 | #endif /* CONFIG_PREEMPT */ | 116 | #endif |
240 | 117 | ||
241 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 118 | #ifndef CONFIG_INLINE_SPIN_TRYLOCK |
119 | int __lockfunc _raw_spin_trylock(raw_spinlock_t *lock) | ||
120 | { | ||
121 | return __raw_spin_trylock(lock); | ||
122 | } | ||
123 | EXPORT_SYMBOL(_raw_spin_trylock); | ||
124 | #endif | ||
242 | 125 | ||
243 | void __lockfunc _spin_lock_nested(spinlock_t *lock, int subclass) | 126 | #ifndef CONFIG_INLINE_SPIN_TRYLOCK_BH |
127 | int __lockfunc _raw_spin_trylock_bh(raw_spinlock_t *lock) | ||
244 | { | 128 | { |
245 | preempt_disable(); | 129 | return __raw_spin_trylock_bh(lock); |
246 | spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_); | ||
247 | LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); | ||
248 | } | 130 | } |
249 | EXPORT_SYMBOL(_spin_lock_nested); | 131 | EXPORT_SYMBOL(_raw_spin_trylock_bh); |
132 | #endif | ||
250 | 133 | ||
251 | unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock, int subclass) | 134 | #ifndef CONFIG_INLINE_SPIN_LOCK |
135 | void __lockfunc _raw_spin_lock(raw_spinlock_t *lock) | ||
252 | { | 136 | { |
253 | unsigned long flags; | 137 | __raw_spin_lock(lock); |
138 | } | ||
139 | EXPORT_SYMBOL(_raw_spin_lock); | ||
140 | #endif | ||
254 | 141 | ||
255 | local_irq_save(flags); | 142 | #ifndef CONFIG_INLINE_SPIN_LOCK_IRQSAVE |
256 | preempt_disable(); | 143 | unsigned long __lockfunc _raw_spin_lock_irqsave(raw_spinlock_t *lock) |
257 | spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_); | 144 | { |
258 | LOCK_CONTENDED_FLAGS(lock, _raw_spin_trylock, _raw_spin_lock, | 145 | return __raw_spin_lock_irqsave(lock); |
259 | _raw_spin_lock_flags, &flags); | ||
260 | return flags; | ||
261 | } | 146 | } |
262 | EXPORT_SYMBOL(_spin_lock_irqsave_nested); | 147 | EXPORT_SYMBOL(_raw_spin_lock_irqsave); |
148 | #endif | ||
263 | 149 | ||
264 | void __lockfunc _spin_lock_nest_lock(spinlock_t *lock, | 150 | #ifndef CONFIG_INLINE_SPIN_LOCK_IRQ |
265 | struct lockdep_map *nest_lock) | 151 | void __lockfunc _raw_spin_lock_irq(raw_spinlock_t *lock) |
266 | { | 152 | { |
267 | preempt_disable(); | 153 | __raw_spin_lock_irq(lock); |
268 | spin_acquire_nest(&lock->dep_map, 0, 0, nest_lock, _RET_IP_); | ||
269 | LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); | ||
270 | } | 154 | } |
271 | EXPORT_SYMBOL(_spin_lock_nest_lock); | 155 | EXPORT_SYMBOL(_raw_spin_lock_irq); |
156 | #endif | ||
272 | 157 | ||
158 | #ifndef CONFIG_INLINE_SPIN_LOCK_BH | ||
159 | void __lockfunc _raw_spin_lock_bh(raw_spinlock_t *lock) | ||
160 | { | ||
161 | __raw_spin_lock_bh(lock); | ||
162 | } | ||
163 | EXPORT_SYMBOL(_raw_spin_lock_bh); | ||
273 | #endif | 164 | #endif |
274 | 165 | ||
275 | #ifndef _spin_unlock | 166 | #ifndef CONFIG_INLINE_SPIN_UNLOCK |
276 | void __lockfunc _spin_unlock(spinlock_t *lock) | 167 | void __lockfunc _raw_spin_unlock(raw_spinlock_t *lock) |
277 | { | 168 | { |
278 | __spin_unlock(lock); | 169 | __raw_spin_unlock(lock); |
279 | } | 170 | } |
280 | EXPORT_SYMBOL(_spin_unlock); | 171 | EXPORT_SYMBOL(_raw_spin_unlock); |
281 | #endif | 172 | #endif |
282 | 173 | ||
283 | #ifndef _write_unlock | 174 | #ifndef CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE |
284 | void __lockfunc _write_unlock(rwlock_t *lock) | 175 | void __lockfunc _raw_spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags) |
285 | { | 176 | { |
286 | __write_unlock(lock); | 177 | __raw_spin_unlock_irqrestore(lock, flags); |
287 | } | 178 | } |
288 | EXPORT_SYMBOL(_write_unlock); | 179 | EXPORT_SYMBOL(_raw_spin_unlock_irqrestore); |
289 | #endif | 180 | #endif |
290 | 181 | ||
291 | #ifndef _read_unlock | 182 | #ifndef CONFIG_INLINE_SPIN_UNLOCK_IRQ |
292 | void __lockfunc _read_unlock(rwlock_t *lock) | 183 | void __lockfunc _raw_spin_unlock_irq(raw_spinlock_t *lock) |
293 | { | 184 | { |
294 | __read_unlock(lock); | 185 | __raw_spin_unlock_irq(lock); |
295 | } | 186 | } |
296 | EXPORT_SYMBOL(_read_unlock); | 187 | EXPORT_SYMBOL(_raw_spin_unlock_irq); |
297 | #endif | 188 | #endif |
298 | 189 | ||
299 | #ifndef _spin_unlock_irqrestore | 190 | #ifndef CONFIG_INLINE_SPIN_UNLOCK_BH |
300 | void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags) | 191 | void __lockfunc _raw_spin_unlock_bh(raw_spinlock_t *lock) |
301 | { | 192 | { |
302 | __spin_unlock_irqrestore(lock, flags); | 193 | __raw_spin_unlock_bh(lock); |
303 | } | 194 | } |
304 | EXPORT_SYMBOL(_spin_unlock_irqrestore); | 195 | EXPORT_SYMBOL(_raw_spin_unlock_bh); |
305 | #endif | 196 | #endif |
306 | 197 | ||
307 | #ifndef _spin_unlock_irq | 198 | #ifndef CONFIG_INLINE_READ_TRYLOCK |
308 | void __lockfunc _spin_unlock_irq(spinlock_t *lock) | 199 | int __lockfunc _raw_read_trylock(rwlock_t *lock) |
309 | { | 200 | { |
310 | __spin_unlock_irq(lock); | 201 | return __raw_read_trylock(lock); |
311 | } | 202 | } |
312 | EXPORT_SYMBOL(_spin_unlock_irq); | 203 | EXPORT_SYMBOL(_raw_read_trylock); |
313 | #endif | 204 | #endif |
314 | 205 | ||
315 | #ifndef _spin_unlock_bh | 206 | #ifndef CONFIG_INLINE_READ_LOCK |
316 | void __lockfunc _spin_unlock_bh(spinlock_t *lock) | 207 | void __lockfunc _raw_read_lock(rwlock_t *lock) |
317 | { | 208 | { |
318 | __spin_unlock_bh(lock); | 209 | __raw_read_lock(lock); |
319 | } | 210 | } |
320 | EXPORT_SYMBOL(_spin_unlock_bh); | 211 | EXPORT_SYMBOL(_raw_read_lock); |
321 | #endif | 212 | #endif |
322 | 213 | ||
323 | #ifndef _read_unlock_irqrestore | 214 | #ifndef CONFIG_INLINE_READ_LOCK_IRQSAVE |
324 | void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) | 215 | unsigned long __lockfunc _raw_read_lock_irqsave(rwlock_t *lock) |
325 | { | 216 | { |
326 | __read_unlock_irqrestore(lock, flags); | 217 | return __raw_read_lock_irqsave(lock); |
327 | } | 218 | } |
328 | EXPORT_SYMBOL(_read_unlock_irqrestore); | 219 | EXPORT_SYMBOL(_raw_read_lock_irqsave); |
329 | #endif | 220 | #endif |
330 | 221 | ||
331 | #ifndef _read_unlock_irq | 222 | #ifndef CONFIG_INLINE_READ_LOCK_IRQ |
332 | void __lockfunc _read_unlock_irq(rwlock_t *lock) | 223 | void __lockfunc _raw_read_lock_irq(rwlock_t *lock) |
333 | { | 224 | { |
334 | __read_unlock_irq(lock); | 225 | __raw_read_lock_irq(lock); |
335 | } | 226 | } |
336 | EXPORT_SYMBOL(_read_unlock_irq); | 227 | EXPORT_SYMBOL(_raw_read_lock_irq); |
337 | #endif | 228 | #endif |
338 | 229 | ||
339 | #ifndef _read_unlock_bh | 230 | #ifndef CONFIG_INLINE_READ_LOCK_BH |
340 | void __lockfunc _read_unlock_bh(rwlock_t *lock) | 231 | void __lockfunc _raw_read_lock_bh(rwlock_t *lock) |
341 | { | 232 | { |
342 | __read_unlock_bh(lock); | 233 | __raw_read_lock_bh(lock); |
343 | } | 234 | } |
344 | EXPORT_SYMBOL(_read_unlock_bh); | 235 | EXPORT_SYMBOL(_raw_read_lock_bh); |
345 | #endif | 236 | #endif |
346 | 237 | ||
347 | #ifndef _write_unlock_irqrestore | 238 | #ifndef CONFIG_INLINE_READ_UNLOCK |
348 | void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags) | 239 | void __lockfunc _raw_read_unlock(rwlock_t *lock) |
349 | { | 240 | { |
350 | __write_unlock_irqrestore(lock, flags); | 241 | __raw_read_unlock(lock); |
351 | } | 242 | } |
352 | EXPORT_SYMBOL(_write_unlock_irqrestore); | 243 | EXPORT_SYMBOL(_raw_read_unlock); |
353 | #endif | 244 | #endif |
354 | 245 | ||
355 | #ifndef _write_unlock_irq | 246 | #ifndef CONFIG_INLINE_READ_UNLOCK_IRQRESTORE |
356 | void __lockfunc _write_unlock_irq(rwlock_t *lock) | 247 | void __lockfunc _raw_read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) |
357 | { | 248 | { |
358 | __write_unlock_irq(lock); | 249 | __raw_read_unlock_irqrestore(lock, flags); |
359 | } | 250 | } |
360 | EXPORT_SYMBOL(_write_unlock_irq); | 251 | EXPORT_SYMBOL(_raw_read_unlock_irqrestore); |
361 | #endif | 252 | #endif |
362 | 253 | ||
363 | #ifndef _write_unlock_bh | 254 | #ifndef CONFIG_INLINE_READ_UNLOCK_IRQ |
364 | void __lockfunc _write_unlock_bh(rwlock_t *lock) | 255 | void __lockfunc _raw_read_unlock_irq(rwlock_t *lock) |
365 | { | 256 | { |
366 | __write_unlock_bh(lock); | 257 | __raw_read_unlock_irq(lock); |
367 | } | 258 | } |
368 | EXPORT_SYMBOL(_write_unlock_bh); | 259 | EXPORT_SYMBOL(_raw_read_unlock_irq); |
369 | #endif | 260 | #endif |
370 | 261 | ||
371 | #ifndef _spin_trylock_bh | 262 | #ifndef CONFIG_INLINE_READ_UNLOCK_BH |
372 | int __lockfunc _spin_trylock_bh(spinlock_t *lock) | 263 | void __lockfunc _raw_read_unlock_bh(rwlock_t *lock) |
373 | { | 264 | { |
374 | return __spin_trylock_bh(lock); | 265 | __raw_read_unlock_bh(lock); |
375 | } | 266 | } |
376 | EXPORT_SYMBOL(_spin_trylock_bh); | 267 | EXPORT_SYMBOL(_raw_read_unlock_bh); |
268 | #endif | ||
269 | |||
270 | #ifndef CONFIG_INLINE_WRITE_TRYLOCK | ||
271 | int __lockfunc _raw_write_trylock(rwlock_t *lock) | ||
272 | { | ||
273 | return __raw_write_trylock(lock); | ||
274 | } | ||
275 | EXPORT_SYMBOL(_raw_write_trylock); | ||
276 | #endif | ||
277 | |||
278 | #ifndef CONFIG_INLINE_WRITE_LOCK | ||
279 | void __lockfunc _raw_write_lock(rwlock_t *lock) | ||
280 | { | ||
281 | __raw_write_lock(lock); | ||
282 | } | ||
283 | EXPORT_SYMBOL(_raw_write_lock); | ||
284 | #endif | ||
285 | |||
286 | #ifndef CONFIG_INLINE_WRITE_LOCK_IRQSAVE | ||
287 | unsigned long __lockfunc _raw_write_lock_irqsave(rwlock_t *lock) | ||
288 | { | ||
289 | return __raw_write_lock_irqsave(lock); | ||
290 | } | ||
291 | EXPORT_SYMBOL(_raw_write_lock_irqsave); | ||
292 | #endif | ||
293 | |||
294 | #ifndef CONFIG_INLINE_WRITE_LOCK_IRQ | ||
295 | void __lockfunc _raw_write_lock_irq(rwlock_t *lock) | ||
296 | { | ||
297 | __raw_write_lock_irq(lock); | ||
298 | } | ||
299 | EXPORT_SYMBOL(_raw_write_lock_irq); | ||
300 | #endif | ||
301 | |||
302 | #ifndef CONFIG_INLINE_WRITE_LOCK_BH | ||
303 | void __lockfunc _raw_write_lock_bh(rwlock_t *lock) | ||
304 | { | ||
305 | __raw_write_lock_bh(lock); | ||
306 | } | ||
307 | EXPORT_SYMBOL(_raw_write_lock_bh); | ||
308 | #endif | ||
309 | |||
310 | #ifndef CONFIG_INLINE_WRITE_UNLOCK | ||
311 | void __lockfunc _raw_write_unlock(rwlock_t *lock) | ||
312 | { | ||
313 | __raw_write_unlock(lock); | ||
314 | } | ||
315 | EXPORT_SYMBOL(_raw_write_unlock); | ||
316 | #endif | ||
317 | |||
318 | #ifndef CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE | ||
319 | void __lockfunc _raw_write_unlock_irqrestore(rwlock_t *lock, unsigned long flags) | ||
320 | { | ||
321 | __raw_write_unlock_irqrestore(lock, flags); | ||
322 | } | ||
323 | EXPORT_SYMBOL(_raw_write_unlock_irqrestore); | ||
324 | #endif | ||
325 | |||
326 | #ifndef CONFIG_INLINE_WRITE_UNLOCK_IRQ | ||
327 | void __lockfunc _raw_write_unlock_irq(rwlock_t *lock) | ||
328 | { | ||
329 | __raw_write_unlock_irq(lock); | ||
330 | } | ||
331 | EXPORT_SYMBOL(_raw_write_unlock_irq); | ||
332 | #endif | ||
333 | |||
334 | #ifndef CONFIG_INLINE_WRITE_UNLOCK_BH | ||
335 | void __lockfunc _raw_write_unlock_bh(rwlock_t *lock) | ||
336 | { | ||
337 | __raw_write_unlock_bh(lock); | ||
338 | } | ||
339 | EXPORT_SYMBOL(_raw_write_unlock_bh); | ||
340 | #endif | ||
341 | |||
342 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | ||
343 | |||
344 | void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass) | ||
345 | { | ||
346 | preempt_disable(); | ||
347 | spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_); | ||
348 | LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock); | ||
349 | } | ||
350 | EXPORT_SYMBOL(_raw_spin_lock_nested); | ||
351 | |||
352 | unsigned long __lockfunc _raw_spin_lock_irqsave_nested(raw_spinlock_t *lock, | ||
353 | int subclass) | ||
354 | { | ||
355 | unsigned long flags; | ||
356 | |||
357 | local_irq_save(flags); | ||
358 | preempt_disable(); | ||
359 | spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_); | ||
360 | LOCK_CONTENDED_FLAGS(lock, do_raw_spin_trylock, do_raw_spin_lock, | ||
361 | do_raw_spin_lock_flags, &flags); | ||
362 | return flags; | ||
363 | } | ||
364 | EXPORT_SYMBOL(_raw_spin_lock_irqsave_nested); | ||
365 | |||
366 | void __lockfunc _raw_spin_lock_nest_lock(raw_spinlock_t *lock, | ||
367 | struct lockdep_map *nest_lock) | ||
368 | { | ||
369 | preempt_disable(); | ||
370 | spin_acquire_nest(&lock->dep_map, 0, 0, nest_lock, _RET_IP_); | ||
371 | LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock); | ||
372 | } | ||
373 | EXPORT_SYMBOL(_raw_spin_lock_nest_lock); | ||
374 | |||
377 | #endif | 375 | #endif |
378 | 376 | ||
379 | notrace int in_lock_functions(unsigned long addr) | 377 | notrace int in_lock_functions(unsigned long addr) |