diff options
author | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
commit | 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch) | |
tree | 0bba044c4ce775e45a88a51686b5d9f90697ea9d /kernel/spinlock.c |
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.
Let it rip!
Diffstat (limited to 'kernel/spinlock.c')
-rw-r--r-- | kernel/spinlock.c | 371 |
1 files changed, 371 insertions, 0 deletions
diff --git a/kernel/spinlock.c b/kernel/spinlock.c new file mode 100644 index 000000000000..e15ed17863f1 --- /dev/null +++ b/kernel/spinlock.c | |||
@@ -0,0 +1,371 @@ | |||
1 | /* | ||
2 | * Copyright (2004) Linus Torvalds | ||
3 | * | ||
4 | * Author: Zwane Mwaikambo <zwane@fsmlabs.com> | ||
5 | * | ||
6 | * Copyright (2004) Ingo Molnar | ||
7 | */ | ||
8 | |||
9 | #include <linux/config.h> | ||
10 | #include <linux/linkage.h> | ||
11 | #include <linux/preempt.h> | ||
12 | #include <linux/spinlock.h> | ||
13 | #include <linux/interrupt.h> | ||
14 | #include <linux/module.h> | ||
15 | |||
16 | /* | ||
17 | * Generic declaration of the raw read_trylock() function, | ||
18 | * architectures are supposed to optimize this: | ||
19 | */ | ||
20 | int __lockfunc generic_raw_read_trylock(rwlock_t *lock) | ||
21 | { | ||
22 | _raw_read_lock(lock); | ||
23 | return 1; | ||
24 | } | ||
25 | EXPORT_SYMBOL(generic_raw_read_trylock); | ||
26 | |||
27 | int __lockfunc _spin_trylock(spinlock_t *lock) | ||
28 | { | ||
29 | preempt_disable(); | ||
30 | if (_raw_spin_trylock(lock)) | ||
31 | return 1; | ||
32 | |||
33 | preempt_enable(); | ||
34 | return 0; | ||
35 | } | ||
36 | EXPORT_SYMBOL(_spin_trylock); | ||
37 | |||
38 | int __lockfunc _read_trylock(rwlock_t *lock) | ||
39 | { | ||
40 | preempt_disable(); | ||
41 | if (_raw_read_trylock(lock)) | ||
42 | return 1; | ||
43 | |||
44 | preempt_enable(); | ||
45 | return 0; | ||
46 | } | ||
47 | EXPORT_SYMBOL(_read_trylock); | ||
48 | |||
49 | int __lockfunc _write_trylock(rwlock_t *lock) | ||
50 | { | ||
51 | preempt_disable(); | ||
52 | if (_raw_write_trylock(lock)) | ||
53 | return 1; | ||
54 | |||
55 | preempt_enable(); | ||
56 | return 0; | ||
57 | } | ||
58 | EXPORT_SYMBOL(_write_trylock); | ||
59 | |||
60 | #ifndef CONFIG_PREEMPT | ||
61 | |||
62 | void __lockfunc _read_lock(rwlock_t *lock) | ||
63 | { | ||
64 | preempt_disable(); | ||
65 | _raw_read_lock(lock); | ||
66 | } | ||
67 | EXPORT_SYMBOL(_read_lock); | ||
68 | |||
69 | unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock) | ||
70 | { | ||
71 | unsigned long flags; | ||
72 | |||
73 | local_irq_save(flags); | ||
74 | preempt_disable(); | ||
75 | _raw_spin_lock_flags(lock, flags); | ||
76 | return flags; | ||
77 | } | ||
78 | EXPORT_SYMBOL(_spin_lock_irqsave); | ||
79 | |||
80 | void __lockfunc _spin_lock_irq(spinlock_t *lock) | ||
81 | { | ||
82 | local_irq_disable(); | ||
83 | preempt_disable(); | ||
84 | _raw_spin_lock(lock); | ||
85 | } | ||
86 | EXPORT_SYMBOL(_spin_lock_irq); | ||
87 | |||
88 | void __lockfunc _spin_lock_bh(spinlock_t *lock) | ||
89 | { | ||
90 | local_bh_disable(); | ||
91 | preempt_disable(); | ||
92 | _raw_spin_lock(lock); | ||
93 | } | ||
94 | EXPORT_SYMBOL(_spin_lock_bh); | ||
95 | |||
96 | unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock) | ||
97 | { | ||
98 | unsigned long flags; | ||
99 | |||
100 | local_irq_save(flags); | ||
101 | preempt_disable(); | ||
102 | _raw_read_lock(lock); | ||
103 | return flags; | ||
104 | } | ||
105 | EXPORT_SYMBOL(_read_lock_irqsave); | ||
106 | |||
107 | void __lockfunc _read_lock_irq(rwlock_t *lock) | ||
108 | { | ||
109 | local_irq_disable(); | ||
110 | preempt_disable(); | ||
111 | _raw_read_lock(lock); | ||
112 | } | ||
113 | EXPORT_SYMBOL(_read_lock_irq); | ||
114 | |||
115 | void __lockfunc _read_lock_bh(rwlock_t *lock) | ||
116 | { | ||
117 | local_bh_disable(); | ||
118 | preempt_disable(); | ||
119 | _raw_read_lock(lock); | ||
120 | } | ||
121 | EXPORT_SYMBOL(_read_lock_bh); | ||
122 | |||
123 | unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock) | ||
124 | { | ||
125 | unsigned long flags; | ||
126 | |||
127 | local_irq_save(flags); | ||
128 | preempt_disable(); | ||
129 | _raw_write_lock(lock); | ||
130 | return flags; | ||
131 | } | ||
132 | EXPORT_SYMBOL(_write_lock_irqsave); | ||
133 | |||
134 | void __lockfunc _write_lock_irq(rwlock_t *lock) | ||
135 | { | ||
136 | local_irq_disable(); | ||
137 | preempt_disable(); | ||
138 | _raw_write_lock(lock); | ||
139 | } | ||
140 | EXPORT_SYMBOL(_write_lock_irq); | ||
141 | |||
142 | void __lockfunc _write_lock_bh(rwlock_t *lock) | ||
143 | { | ||
144 | local_bh_disable(); | ||
145 | preempt_disable(); | ||
146 | _raw_write_lock(lock); | ||
147 | } | ||
148 | EXPORT_SYMBOL(_write_lock_bh); | ||
149 | |||
150 | void __lockfunc _spin_lock(spinlock_t *lock) | ||
151 | { | ||
152 | preempt_disable(); | ||
153 | _raw_spin_lock(lock); | ||
154 | } | ||
155 | |||
156 | EXPORT_SYMBOL(_spin_lock); | ||
157 | |||
158 | void __lockfunc _write_lock(rwlock_t *lock) | ||
159 | { | ||
160 | preempt_disable(); | ||
161 | _raw_write_lock(lock); | ||
162 | } | ||
163 | |||
164 | EXPORT_SYMBOL(_write_lock); | ||
165 | |||
166 | #else /* CONFIG_PREEMPT: */ | ||
167 | |||
168 | /* | ||
169 | * This could be a long-held lock. We both prepare to spin for a long | ||
170 | * time (making _this_ CPU preemptable if possible), and we also signal | ||
171 | * towards that other CPU that it should break the lock ASAP. | ||
172 | * | ||
173 | * (We do this in a function because inlining it would be excessive.) | ||
174 | */ | ||
175 | |||
176 | #define BUILD_LOCK_OPS(op, locktype) \ | ||
177 | void __lockfunc _##op##_lock(locktype##_t *lock) \ | ||
178 | { \ | ||
179 | preempt_disable(); \ | ||
180 | for (;;) { \ | ||
181 | if (likely(_raw_##op##_trylock(lock))) \ | ||
182 | break; \ | ||
183 | preempt_enable(); \ | ||
184 | if (!(lock)->break_lock) \ | ||
185 | (lock)->break_lock = 1; \ | ||
186 | while (!op##_can_lock(lock) && (lock)->break_lock) \ | ||
187 | cpu_relax(); \ | ||
188 | preempt_disable(); \ | ||
189 | } \ | ||
190 | (lock)->break_lock = 0; \ | ||
191 | } \ | ||
192 | \ | ||
193 | EXPORT_SYMBOL(_##op##_lock); \ | ||
194 | \ | ||
195 | unsigned long __lockfunc _##op##_lock_irqsave(locktype##_t *lock) \ | ||
196 | { \ | ||
197 | unsigned long flags; \ | ||
198 | \ | ||
199 | preempt_disable(); \ | ||
200 | for (;;) { \ | ||
201 | local_irq_save(flags); \ | ||
202 | if (likely(_raw_##op##_trylock(lock))) \ | ||
203 | break; \ | ||
204 | local_irq_restore(flags); \ | ||
205 | \ | ||
206 | preempt_enable(); \ | ||
207 | if (!(lock)->break_lock) \ | ||
208 | (lock)->break_lock = 1; \ | ||
209 | while (!op##_can_lock(lock) && (lock)->break_lock) \ | ||
210 | cpu_relax(); \ | ||
211 | preempt_disable(); \ | ||
212 | } \ | ||
213 | (lock)->break_lock = 0; \ | ||
214 | return flags; \ | ||
215 | } \ | ||
216 | \ | ||
217 | EXPORT_SYMBOL(_##op##_lock_irqsave); \ | ||
218 | \ | ||
219 | void __lockfunc _##op##_lock_irq(locktype##_t *lock) \ | ||
220 | { \ | ||
221 | _##op##_lock_irqsave(lock); \ | ||
222 | } \ | ||
223 | \ | ||
224 | EXPORT_SYMBOL(_##op##_lock_irq); \ | ||
225 | \ | ||
226 | void __lockfunc _##op##_lock_bh(locktype##_t *lock) \ | ||
227 | { \ | ||
228 | unsigned long flags; \ | ||
229 | \ | ||
230 | /* */ \ | ||
231 | /* Careful: we must exclude softirqs too, hence the */ \ | ||
232 | /* irq-disabling. We use the generic preemption-aware */ \ | ||
233 | /* function: */ \ | ||
234 | /**/ \ | ||
235 | flags = _##op##_lock_irqsave(lock); \ | ||
236 | local_bh_disable(); \ | ||
237 | local_irq_restore(flags); \ | ||
238 | } \ | ||
239 | \ | ||
240 | EXPORT_SYMBOL(_##op##_lock_bh) | ||
241 | |||
242 | /* | ||
243 | * Build preemption-friendly versions of the following | ||
244 | * lock-spinning functions: | ||
245 | * | ||
246 | * _[spin|read|write]_lock() | ||
247 | * _[spin|read|write]_lock_irq() | ||
248 | * _[spin|read|write]_lock_irqsave() | ||
249 | * _[spin|read|write]_lock_bh() | ||
250 | */ | ||
251 | BUILD_LOCK_OPS(spin, spinlock); | ||
252 | BUILD_LOCK_OPS(read, rwlock); | ||
253 | BUILD_LOCK_OPS(write, rwlock); | ||
254 | |||
255 | #endif /* CONFIG_PREEMPT */ | ||
256 | |||
257 | void __lockfunc _spin_unlock(spinlock_t *lock) | ||
258 | { | ||
259 | _raw_spin_unlock(lock); | ||
260 | preempt_enable(); | ||
261 | } | ||
262 | EXPORT_SYMBOL(_spin_unlock); | ||
263 | |||
264 | void __lockfunc _write_unlock(rwlock_t *lock) | ||
265 | { | ||
266 | _raw_write_unlock(lock); | ||
267 | preempt_enable(); | ||
268 | } | ||
269 | EXPORT_SYMBOL(_write_unlock); | ||
270 | |||
271 | void __lockfunc _read_unlock(rwlock_t *lock) | ||
272 | { | ||
273 | _raw_read_unlock(lock); | ||
274 | preempt_enable(); | ||
275 | } | ||
276 | EXPORT_SYMBOL(_read_unlock); | ||
277 | |||
278 | void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags) | ||
279 | { | ||
280 | _raw_spin_unlock(lock); | ||
281 | local_irq_restore(flags); | ||
282 | preempt_enable(); | ||
283 | } | ||
284 | EXPORT_SYMBOL(_spin_unlock_irqrestore); | ||
285 | |||
286 | void __lockfunc _spin_unlock_irq(spinlock_t *lock) | ||
287 | { | ||
288 | _raw_spin_unlock(lock); | ||
289 | local_irq_enable(); | ||
290 | preempt_enable(); | ||
291 | } | ||
292 | EXPORT_SYMBOL(_spin_unlock_irq); | ||
293 | |||
294 | void __lockfunc _spin_unlock_bh(spinlock_t *lock) | ||
295 | { | ||
296 | _raw_spin_unlock(lock); | ||
297 | preempt_enable(); | ||
298 | local_bh_enable(); | ||
299 | } | ||
300 | EXPORT_SYMBOL(_spin_unlock_bh); | ||
301 | |||
302 | void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) | ||
303 | { | ||
304 | _raw_read_unlock(lock); | ||
305 | local_irq_restore(flags); | ||
306 | preempt_enable(); | ||
307 | } | ||
308 | EXPORT_SYMBOL(_read_unlock_irqrestore); | ||
309 | |||
310 | void __lockfunc _read_unlock_irq(rwlock_t *lock) | ||
311 | { | ||
312 | _raw_read_unlock(lock); | ||
313 | local_irq_enable(); | ||
314 | preempt_enable(); | ||
315 | } | ||
316 | EXPORT_SYMBOL(_read_unlock_irq); | ||
317 | |||
318 | void __lockfunc _read_unlock_bh(rwlock_t *lock) | ||
319 | { | ||
320 | _raw_read_unlock(lock); | ||
321 | preempt_enable(); | ||
322 | local_bh_enable(); | ||
323 | } | ||
324 | EXPORT_SYMBOL(_read_unlock_bh); | ||
325 | |||
326 | void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags) | ||
327 | { | ||
328 | _raw_write_unlock(lock); | ||
329 | local_irq_restore(flags); | ||
330 | preempt_enable(); | ||
331 | } | ||
332 | EXPORT_SYMBOL(_write_unlock_irqrestore); | ||
333 | |||
334 | void __lockfunc _write_unlock_irq(rwlock_t *lock) | ||
335 | { | ||
336 | _raw_write_unlock(lock); | ||
337 | local_irq_enable(); | ||
338 | preempt_enable(); | ||
339 | } | ||
340 | EXPORT_SYMBOL(_write_unlock_irq); | ||
341 | |||
342 | void __lockfunc _write_unlock_bh(rwlock_t *lock) | ||
343 | { | ||
344 | _raw_write_unlock(lock); | ||
345 | preempt_enable(); | ||
346 | local_bh_enable(); | ||
347 | } | ||
348 | EXPORT_SYMBOL(_write_unlock_bh); | ||
349 | |||
350 | int __lockfunc _spin_trylock_bh(spinlock_t *lock) | ||
351 | { | ||
352 | local_bh_disable(); | ||
353 | preempt_disable(); | ||
354 | if (_raw_spin_trylock(lock)) | ||
355 | return 1; | ||
356 | |||
357 | preempt_enable(); | ||
358 | local_bh_enable(); | ||
359 | return 0; | ||
360 | } | ||
361 | EXPORT_SYMBOL(_spin_trylock_bh); | ||
362 | |||
363 | int in_lock_functions(unsigned long addr) | ||
364 | { | ||
365 | /* Linker adds these: start and end of __lockfunc functions */ | ||
366 | extern char __lock_text_start[], __lock_text_end[]; | ||
367 | |||
368 | return addr >= (unsigned long)__lock_text_start | ||
369 | && addr < (unsigned long)__lock_text_end; | ||
370 | } | ||
371 | EXPORT_SYMBOL(in_lock_functions); | ||