diff options
Diffstat (limited to 'kernel/locking/spinlock.c')
-rw-r--r-- | kernel/locking/spinlock.c | 399 |
1 files changed, 399 insertions, 0 deletions
diff --git a/kernel/locking/spinlock.c b/kernel/locking/spinlock.c new file mode 100644 index 000000000000..4b082b5cac9e --- /dev/null +++ b/kernel/locking/spinlock.c | |||
@@ -0,0 +1,399 @@ | |||
1 | /* | ||
2 | * Copyright (2004) Linus Torvalds | ||
3 | * | ||
4 | * Author: Zwane Mwaikambo <zwane@fsmlabs.com> | ||
5 | * | ||
6 | * Copyright (2004, 2005) Ingo Molnar | ||
7 | * | ||
8 | * This file contains the spinlock/rwlock implementations for the | ||
9 | * SMP and the DEBUG_SPINLOCK cases. (UP-nondebug inlines them) | ||
10 | * | ||
11 | * Note that some architectures have special knowledge about the | ||
12 | * stack frames of these functions in their profile_pc. If you | ||
13 | * change anything significant here that could change the stack | ||
14 | * frame contact the architecture maintainers. | ||
15 | */ | ||
16 | |||
17 | #include <linux/linkage.h> | ||
18 | #include <linux/preempt.h> | ||
19 | #include <linux/spinlock.h> | ||
20 | #include <linux/interrupt.h> | ||
21 | #include <linux/debug_locks.h> | ||
22 | #include <linux/export.h> | ||
23 | |||
24 | /* | ||
25 | * If lockdep is enabled then we use the non-preemption spin-ops | ||
26 | * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are | ||
27 | * not re-enabled during lock-acquire (which the preempt-spin-ops do): | ||
28 | */ | ||
29 | #if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC) | ||
30 | /* | ||
31 | * The __lock_function inlines are taken from | ||
32 | * include/linux/spinlock_api_smp.h | ||
33 | */ | ||
34 | #else | ||
35 | #define raw_read_can_lock(l) read_can_lock(l) | ||
36 | #define raw_write_can_lock(l) write_can_lock(l) | ||
37 | |||
38 | /* | ||
39 | * Some architectures can relax in favour of the CPU owning the lock. | ||
40 | */ | ||
41 | #ifndef arch_read_relax | ||
42 | # define arch_read_relax(l) cpu_relax() | ||
43 | #endif | ||
44 | #ifndef arch_write_relax | ||
45 | # define arch_write_relax(l) cpu_relax() | ||
46 | #endif | ||
47 | #ifndef arch_spin_relax | ||
48 | # define arch_spin_relax(l) cpu_relax() | ||
49 | #endif | ||
50 | |||
51 | /* | ||
52 | * We build the __lock_function inlines here. They are too large for | ||
53 | * inlining all over the place, but here is only one user per function | ||
54 | * which embedds them into the calling _lock_function below. | ||
55 | * | ||
56 | * This could be a long-held lock. We both prepare to spin for a long | ||
57 | * time (making _this_ CPU preemptable if possible), and we also signal | ||
58 | * towards that other CPU that it should break the lock ASAP. | ||
59 | */ | ||
60 | #define BUILD_LOCK_OPS(op, locktype) \ | ||
61 | void __lockfunc __raw_##op##_lock(locktype##_t *lock) \ | ||
62 | { \ | ||
63 | for (;;) { \ | ||
64 | preempt_disable(); \ | ||
65 | if (likely(do_raw_##op##_trylock(lock))) \ | ||
66 | break; \ | ||
67 | preempt_enable(); \ | ||
68 | \ | ||
69 | if (!(lock)->break_lock) \ | ||
70 | (lock)->break_lock = 1; \ | ||
71 | while (!raw_##op##_can_lock(lock) && (lock)->break_lock)\ | ||
72 | arch_##op##_relax(&lock->raw_lock); \ | ||
73 | } \ | ||
74 | (lock)->break_lock = 0; \ | ||
75 | } \ | ||
76 | \ | ||
77 | unsigned long __lockfunc __raw_##op##_lock_irqsave(locktype##_t *lock) \ | ||
78 | { \ | ||
79 | unsigned long flags; \ | ||
80 | \ | ||
81 | for (;;) { \ | ||
82 | preempt_disable(); \ | ||
83 | local_irq_save(flags); \ | ||
84 | if (likely(do_raw_##op##_trylock(lock))) \ | ||
85 | break; \ | ||
86 | local_irq_restore(flags); \ | ||
87 | preempt_enable(); \ | ||
88 | \ | ||
89 | if (!(lock)->break_lock) \ | ||
90 | (lock)->break_lock = 1; \ | ||
91 | while (!raw_##op##_can_lock(lock) && (lock)->break_lock)\ | ||
92 | arch_##op##_relax(&lock->raw_lock); \ | ||
93 | } \ | ||
94 | (lock)->break_lock = 0; \ | ||
95 | return flags; \ | ||
96 | } \ | ||
97 | \ | ||
98 | void __lockfunc __raw_##op##_lock_irq(locktype##_t *lock) \ | ||
99 | { \ | ||
100 | _raw_##op##_lock_irqsave(lock); \ | ||
101 | } \ | ||
102 | \ | ||
103 | void __lockfunc __raw_##op##_lock_bh(locktype##_t *lock) \ | ||
104 | { \ | ||
105 | unsigned long flags; \ | ||
106 | \ | ||
107 | /* */ \ | ||
108 | /* Careful: we must exclude softirqs too, hence the */ \ | ||
109 | /* irq-disabling. We use the generic preemption-aware */ \ | ||
110 | /* function: */ \ | ||
111 | /**/ \ | ||
112 | flags = _raw_##op##_lock_irqsave(lock); \ | ||
113 | local_bh_disable(); \ | ||
114 | local_irq_restore(flags); \ | ||
115 | } \ | ||
116 | |||
117 | /* | ||
118 | * Build preemption-friendly versions of the following | ||
119 | * lock-spinning functions: | ||
120 | * | ||
121 | * __[spin|read|write]_lock() | ||
122 | * __[spin|read|write]_lock_irq() | ||
123 | * __[spin|read|write]_lock_irqsave() | ||
124 | * __[spin|read|write]_lock_bh() | ||
125 | */ | ||
126 | BUILD_LOCK_OPS(spin, raw_spinlock); | ||
127 | BUILD_LOCK_OPS(read, rwlock); | ||
128 | BUILD_LOCK_OPS(write, rwlock); | ||
129 | |||
130 | #endif | ||
131 | |||
132 | #ifndef CONFIG_INLINE_SPIN_TRYLOCK | ||
133 | int __lockfunc _raw_spin_trylock(raw_spinlock_t *lock) | ||
134 | { | ||
135 | return __raw_spin_trylock(lock); | ||
136 | } | ||
137 | EXPORT_SYMBOL(_raw_spin_trylock); | ||
138 | #endif | ||
139 | |||
140 | #ifndef CONFIG_INLINE_SPIN_TRYLOCK_BH | ||
141 | int __lockfunc _raw_spin_trylock_bh(raw_spinlock_t *lock) | ||
142 | { | ||
143 | return __raw_spin_trylock_bh(lock); | ||
144 | } | ||
145 | EXPORT_SYMBOL(_raw_spin_trylock_bh); | ||
146 | #endif | ||
147 | |||
148 | #ifndef CONFIG_INLINE_SPIN_LOCK | ||
149 | void __lockfunc _raw_spin_lock(raw_spinlock_t *lock) | ||
150 | { | ||
151 | __raw_spin_lock(lock); | ||
152 | } | ||
153 | EXPORT_SYMBOL(_raw_spin_lock); | ||
154 | #endif | ||
155 | |||
156 | #ifndef CONFIG_INLINE_SPIN_LOCK_IRQSAVE | ||
157 | unsigned long __lockfunc _raw_spin_lock_irqsave(raw_spinlock_t *lock) | ||
158 | { | ||
159 | return __raw_spin_lock_irqsave(lock); | ||
160 | } | ||
161 | EXPORT_SYMBOL(_raw_spin_lock_irqsave); | ||
162 | #endif | ||
163 | |||
164 | #ifndef CONFIG_INLINE_SPIN_LOCK_IRQ | ||
165 | void __lockfunc _raw_spin_lock_irq(raw_spinlock_t *lock) | ||
166 | { | ||
167 | __raw_spin_lock_irq(lock); | ||
168 | } | ||
169 | EXPORT_SYMBOL(_raw_spin_lock_irq); | ||
170 | #endif | ||
171 | |||
172 | #ifndef CONFIG_INLINE_SPIN_LOCK_BH | ||
173 | void __lockfunc _raw_spin_lock_bh(raw_spinlock_t *lock) | ||
174 | { | ||
175 | __raw_spin_lock_bh(lock); | ||
176 | } | ||
177 | EXPORT_SYMBOL(_raw_spin_lock_bh); | ||
178 | #endif | ||
179 | |||
180 | #ifdef CONFIG_UNINLINE_SPIN_UNLOCK | ||
181 | void __lockfunc _raw_spin_unlock(raw_spinlock_t *lock) | ||
182 | { | ||
183 | __raw_spin_unlock(lock); | ||
184 | } | ||
185 | EXPORT_SYMBOL(_raw_spin_unlock); | ||
186 | #endif | ||
187 | |||
188 | #ifndef CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE | ||
189 | void __lockfunc _raw_spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags) | ||
190 | { | ||
191 | __raw_spin_unlock_irqrestore(lock, flags); | ||
192 | } | ||
193 | EXPORT_SYMBOL(_raw_spin_unlock_irqrestore); | ||
194 | #endif | ||
195 | |||
196 | #ifndef CONFIG_INLINE_SPIN_UNLOCK_IRQ | ||
197 | void __lockfunc _raw_spin_unlock_irq(raw_spinlock_t *lock) | ||
198 | { | ||
199 | __raw_spin_unlock_irq(lock); | ||
200 | } | ||
201 | EXPORT_SYMBOL(_raw_spin_unlock_irq); | ||
202 | #endif | ||
203 | |||
204 | #ifndef CONFIG_INLINE_SPIN_UNLOCK_BH | ||
205 | void __lockfunc _raw_spin_unlock_bh(raw_spinlock_t *lock) | ||
206 | { | ||
207 | __raw_spin_unlock_bh(lock); | ||
208 | } | ||
209 | EXPORT_SYMBOL(_raw_spin_unlock_bh); | ||
210 | #endif | ||
211 | |||
212 | #ifndef CONFIG_INLINE_READ_TRYLOCK | ||
213 | int __lockfunc _raw_read_trylock(rwlock_t *lock) | ||
214 | { | ||
215 | return __raw_read_trylock(lock); | ||
216 | } | ||
217 | EXPORT_SYMBOL(_raw_read_trylock); | ||
218 | #endif | ||
219 | |||
220 | #ifndef CONFIG_INLINE_READ_LOCK | ||
221 | void __lockfunc _raw_read_lock(rwlock_t *lock) | ||
222 | { | ||
223 | __raw_read_lock(lock); | ||
224 | } | ||
225 | EXPORT_SYMBOL(_raw_read_lock); | ||
226 | #endif | ||
227 | |||
228 | #ifndef CONFIG_INLINE_READ_LOCK_IRQSAVE | ||
229 | unsigned long __lockfunc _raw_read_lock_irqsave(rwlock_t *lock) | ||
230 | { | ||
231 | return __raw_read_lock_irqsave(lock); | ||
232 | } | ||
233 | EXPORT_SYMBOL(_raw_read_lock_irqsave); | ||
234 | #endif | ||
235 | |||
236 | #ifndef CONFIG_INLINE_READ_LOCK_IRQ | ||
237 | void __lockfunc _raw_read_lock_irq(rwlock_t *lock) | ||
238 | { | ||
239 | __raw_read_lock_irq(lock); | ||
240 | } | ||
241 | EXPORT_SYMBOL(_raw_read_lock_irq); | ||
242 | #endif | ||
243 | |||
244 | #ifndef CONFIG_INLINE_READ_LOCK_BH | ||
245 | void __lockfunc _raw_read_lock_bh(rwlock_t *lock) | ||
246 | { | ||
247 | __raw_read_lock_bh(lock); | ||
248 | } | ||
249 | EXPORT_SYMBOL(_raw_read_lock_bh); | ||
250 | #endif | ||
251 | |||
252 | #ifndef CONFIG_INLINE_READ_UNLOCK | ||
253 | void __lockfunc _raw_read_unlock(rwlock_t *lock) | ||
254 | { | ||
255 | __raw_read_unlock(lock); | ||
256 | } | ||
257 | EXPORT_SYMBOL(_raw_read_unlock); | ||
258 | #endif | ||
259 | |||
260 | #ifndef CONFIG_INLINE_READ_UNLOCK_IRQRESTORE | ||
261 | void __lockfunc _raw_read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) | ||
262 | { | ||
263 | __raw_read_unlock_irqrestore(lock, flags); | ||
264 | } | ||
265 | EXPORT_SYMBOL(_raw_read_unlock_irqrestore); | ||
266 | #endif | ||
267 | |||
268 | #ifndef CONFIG_INLINE_READ_UNLOCK_IRQ | ||
269 | void __lockfunc _raw_read_unlock_irq(rwlock_t *lock) | ||
270 | { | ||
271 | __raw_read_unlock_irq(lock); | ||
272 | } | ||
273 | EXPORT_SYMBOL(_raw_read_unlock_irq); | ||
274 | #endif | ||
275 | |||
276 | #ifndef CONFIG_INLINE_READ_UNLOCK_BH | ||
277 | void __lockfunc _raw_read_unlock_bh(rwlock_t *lock) | ||
278 | { | ||
279 | __raw_read_unlock_bh(lock); | ||
280 | } | ||
281 | EXPORT_SYMBOL(_raw_read_unlock_bh); | ||
282 | #endif | ||
283 | |||
284 | #ifndef CONFIG_INLINE_WRITE_TRYLOCK | ||
285 | int __lockfunc _raw_write_trylock(rwlock_t *lock) | ||
286 | { | ||
287 | return __raw_write_trylock(lock); | ||
288 | } | ||
289 | EXPORT_SYMBOL(_raw_write_trylock); | ||
290 | #endif | ||
291 | |||
292 | #ifndef CONFIG_INLINE_WRITE_LOCK | ||
293 | void __lockfunc _raw_write_lock(rwlock_t *lock) | ||
294 | { | ||
295 | __raw_write_lock(lock); | ||
296 | } | ||
297 | EXPORT_SYMBOL(_raw_write_lock); | ||
298 | #endif | ||
299 | |||
300 | #ifndef CONFIG_INLINE_WRITE_LOCK_IRQSAVE | ||
301 | unsigned long __lockfunc _raw_write_lock_irqsave(rwlock_t *lock) | ||
302 | { | ||
303 | return __raw_write_lock_irqsave(lock); | ||
304 | } | ||
305 | EXPORT_SYMBOL(_raw_write_lock_irqsave); | ||
306 | #endif | ||
307 | |||
308 | #ifndef CONFIG_INLINE_WRITE_LOCK_IRQ | ||
309 | void __lockfunc _raw_write_lock_irq(rwlock_t *lock) | ||
310 | { | ||
311 | __raw_write_lock_irq(lock); | ||
312 | } | ||
313 | EXPORT_SYMBOL(_raw_write_lock_irq); | ||
314 | #endif | ||
315 | |||
316 | #ifndef CONFIG_INLINE_WRITE_LOCK_BH | ||
317 | void __lockfunc _raw_write_lock_bh(rwlock_t *lock) | ||
318 | { | ||
319 | __raw_write_lock_bh(lock); | ||
320 | } | ||
321 | EXPORT_SYMBOL(_raw_write_lock_bh); | ||
322 | #endif | ||
323 | |||
324 | #ifndef CONFIG_INLINE_WRITE_UNLOCK | ||
325 | void __lockfunc _raw_write_unlock(rwlock_t *lock) | ||
326 | { | ||
327 | __raw_write_unlock(lock); | ||
328 | } | ||
329 | EXPORT_SYMBOL(_raw_write_unlock); | ||
330 | #endif | ||
331 | |||
332 | #ifndef CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE | ||
333 | void __lockfunc _raw_write_unlock_irqrestore(rwlock_t *lock, unsigned long flags) | ||
334 | { | ||
335 | __raw_write_unlock_irqrestore(lock, flags); | ||
336 | } | ||
337 | EXPORT_SYMBOL(_raw_write_unlock_irqrestore); | ||
338 | #endif | ||
339 | |||
340 | #ifndef CONFIG_INLINE_WRITE_UNLOCK_IRQ | ||
341 | void __lockfunc _raw_write_unlock_irq(rwlock_t *lock) | ||
342 | { | ||
343 | __raw_write_unlock_irq(lock); | ||
344 | } | ||
345 | EXPORT_SYMBOL(_raw_write_unlock_irq); | ||
346 | #endif | ||
347 | |||
348 | #ifndef CONFIG_INLINE_WRITE_UNLOCK_BH | ||
349 | void __lockfunc _raw_write_unlock_bh(rwlock_t *lock) | ||
350 | { | ||
351 | __raw_write_unlock_bh(lock); | ||
352 | } | ||
353 | EXPORT_SYMBOL(_raw_write_unlock_bh); | ||
354 | #endif | ||
355 | |||
356 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | ||
357 | |||
358 | void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass) | ||
359 | { | ||
360 | preempt_disable(); | ||
361 | spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_); | ||
362 | LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock); | ||
363 | } | ||
364 | EXPORT_SYMBOL(_raw_spin_lock_nested); | ||
365 | |||
366 | unsigned long __lockfunc _raw_spin_lock_irqsave_nested(raw_spinlock_t *lock, | ||
367 | int subclass) | ||
368 | { | ||
369 | unsigned long flags; | ||
370 | |||
371 | local_irq_save(flags); | ||
372 | preempt_disable(); | ||
373 | spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_); | ||
374 | LOCK_CONTENDED_FLAGS(lock, do_raw_spin_trylock, do_raw_spin_lock, | ||
375 | do_raw_spin_lock_flags, &flags); | ||
376 | return flags; | ||
377 | } | ||
378 | EXPORT_SYMBOL(_raw_spin_lock_irqsave_nested); | ||
379 | |||
380 | void __lockfunc _raw_spin_lock_nest_lock(raw_spinlock_t *lock, | ||
381 | struct lockdep_map *nest_lock) | ||
382 | { | ||
383 | preempt_disable(); | ||
384 | spin_acquire_nest(&lock->dep_map, 0, 0, nest_lock, _RET_IP_); | ||
385 | LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock); | ||
386 | } | ||
387 | EXPORT_SYMBOL(_raw_spin_lock_nest_lock); | ||
388 | |||
389 | #endif | ||
390 | |||
391 | notrace int in_lock_functions(unsigned long addr) | ||
392 | { | ||
393 | /* Linker adds these: start and end of __lockfunc functions */ | ||
394 | extern char __lock_text_start[], __lock_text_end[]; | ||
395 | |||
396 | return addr >= (unsigned long)__lock_text_start | ||
397 | && addr < (unsigned long)__lock_text_end; | ||
398 | } | ||
399 | EXPORT_SYMBOL(in_lock_functions); | ||