diff options
author | Heiko Carstens <heiko.carstens@de.ibm.com> | 2009-08-31 08:43:37 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-08-31 12:08:50 -0400 |
commit | 892a7c67c12da63fa4b51728bbe5b982356a090a (patch) | |
tree | ba6cb9cf1be394428d9ef2596b0575e28ab0b19a | |
parent | 69d0ee7377eef808e34ba5542b554ec97244b871 (diff) |
locking: Allow arch-inlined spinlocks
This allows an architecture to specify per lock variant if the
locking code should be kept out-of-line or inlined.
If an architecure wants out-of-line locking code no change is
needed. To force inlining of e.g. spin_lock() the line:
#define __always_inline__spin_lock
needs to be added to arch/<...>/include/asm/spinlock.h
If CONFIG_DEBUG_SPINLOCK or CONFIG_GENERIC_LOCKBREAK are
defined the per architecture defines are (partly) ignored and
still out-of-line spinlock code will be generated.
Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Horst Hartmann <horsth@linux.vnet.ibm.com>
Cc: Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: David Miller <davem@davemloft.net>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Geert Uytterhoeven <geert@linux-m68k.org>
Cc: Roman Zippel <zippel@linux-m68k.org>
Cc: <linux-arch@vger.kernel.org>
LKML-Reference: <20090831124418.375299024@de.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r-- | include/linux/spinlock_api_smp.h | 119 | ||||
-rw-r--r-- | kernel/spinlock.c | 56 |
2 files changed, 175 insertions, 0 deletions
diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h index 6b108f5fb149..1a411e3fab95 100644 --- a/include/linux/spinlock_api_smp.h +++ b/include/linux/spinlock_api_smp.h | |||
@@ -60,6 +60,125 @@ void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) | |||
60 | void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags) | 60 | void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags) |
61 | __releases(lock); | 61 | __releases(lock); |
62 | 62 | ||
63 | #ifndef CONFIG_DEBUG_SPINLOCK | ||
64 | #ifndef CONFIG_GENERIC_LOCKBREAK | ||
65 | |||
66 | #ifdef __always_inline__spin_lock | ||
67 | #define _spin_lock(lock) __spin_lock(lock) | ||
68 | #endif | ||
69 | |||
70 | #ifdef __always_inline__read_lock | ||
71 | #define _read_lock(lock) __read_lock(lock) | ||
72 | #endif | ||
73 | |||
74 | #ifdef __always_inline__write_lock | ||
75 | #define _write_lock(lock) __write_lock(lock) | ||
76 | #endif | ||
77 | |||
78 | #ifdef __always_inline__spin_lock_bh | ||
79 | #define _spin_lock_bh(lock) __spin_lock_bh(lock) | ||
80 | #endif | ||
81 | |||
82 | #ifdef __always_inline__read_lock_bh | ||
83 | #define _read_lock_bh(lock) __read_lock_bh(lock) | ||
84 | #endif | ||
85 | |||
86 | #ifdef __always_inline__write_lock_bh | ||
87 | #define _write_lock_bh(lock) __write_lock_bh(lock) | ||
88 | #endif | ||
89 | |||
90 | #ifdef __always_inline__spin_lock_irq | ||
91 | #define _spin_lock_irq(lock) __spin_lock_irq(lock) | ||
92 | #endif | ||
93 | |||
94 | #ifdef __always_inline__read_lock_irq | ||
95 | #define _read_lock_irq(lock) __read_lock_irq(lock) | ||
96 | #endif | ||
97 | |||
98 | #ifdef __always_inline__write_lock_irq | ||
99 | #define _write_lock_irq(lock) __write_lock_irq(lock) | ||
100 | #endif | ||
101 | |||
102 | #ifdef __always_inline__spin_lock_irqsave | ||
103 | #define _spin_lock_irqsave(lock) __spin_lock_irqsave(lock) | ||
104 | #endif | ||
105 | |||
106 | #ifdef __always_inline__read_lock_irqsave | ||
107 | #define _read_lock_irqsave(lock) __read_lock_irqsave(lock) | ||
108 | #endif | ||
109 | |||
110 | #ifdef __always_inline__write_lock_irqsave | ||
111 | #define _write_lock_irqsave(lock) __write_lock_irqsave(lock) | ||
112 | #endif | ||
113 | |||
114 | #endif /* !CONFIG_GENERIC_LOCKBREAK */ | ||
115 | |||
116 | #ifdef __always_inline__spin_trylock | ||
117 | #define _spin_trylock(lock) __spin_trylock(lock) | ||
118 | #endif | ||
119 | |||
120 | #ifdef __always_inline__read_trylock | ||
121 | #define _read_trylock(lock) __read_trylock(lock) | ||
122 | #endif | ||
123 | |||
124 | #ifdef __always_inline__write_trylock | ||
125 | #define _write_trylock(lock) __write_trylock(lock) | ||
126 | #endif | ||
127 | |||
128 | #ifdef __always_inline__spin_trylock_bh | ||
129 | #define _spin_trylock_bh(lock) __spin_trylock_bh(lock) | ||
130 | #endif | ||
131 | |||
132 | #ifdef __always_inline__spin_unlock | ||
133 | #define _spin_unlock(lock) __spin_unlock(lock) | ||
134 | #endif | ||
135 | |||
136 | #ifdef __always_inline__read_unlock | ||
137 | #define _read_unlock(lock) __read_unlock(lock) | ||
138 | #endif | ||
139 | |||
140 | #ifdef __always_inline__write_unlock | ||
141 | #define _write_unlock(lock) __write_unlock(lock) | ||
142 | #endif | ||
143 | |||
144 | #ifdef __always_inline__spin_unlock_bh | ||
145 | #define _spin_unlock_bh(lock) __spin_unlock_bh(lock) | ||
146 | #endif | ||
147 | |||
148 | #ifdef __always_inline__read_unlock_bh | ||
149 | #define _read_unlock_bh(lock) __read_unlock_bh(lock) | ||
150 | #endif | ||
151 | |||
152 | #ifdef __always_inline__write_unlock_bh | ||
153 | #define _write_unlock_bh(lock) __write_unlock_bh(lock) | ||
154 | #endif | ||
155 | |||
156 | #ifdef __always_inline__spin_unlock_irq | ||
157 | #define _spin_unlock_irq(lock) __spin_unlock_irq(lock) | ||
158 | #endif | ||
159 | |||
160 | #ifdef __always_inline__read_unlock_irq | ||
161 | #define _read_unlock_irq(lock) __read_unlock_irq(lock) | ||
162 | #endif | ||
163 | |||
164 | #ifdef __always_inline__write_unlock_irq | ||
165 | #define _write_unlock_irq(lock) __write_unlock_irq(lock) | ||
166 | #endif | ||
167 | |||
168 | #ifdef __always_inline__spin_unlock_irqrestore | ||
169 | #define _spin_unlock_irqrestore(lock, flags) __spin_unlock_irqrestore(lock, flags) | ||
170 | #endif | ||
171 | |||
172 | #ifdef __always_inline__read_unlock_irqrestore | ||
173 | #define _read_unlock_irqrestore(lock, flags) __read_unlock_irqrestore(lock, flags) | ||
174 | #endif | ||
175 | |||
176 | #ifdef __always_inline__write_unlock_irqrestore | ||
177 | #define _write_unlock_irqrestore(lock, flags) __write_unlock_irqrestore(lock, flags) | ||
178 | #endif | ||
179 | |||
180 | #endif /* CONFIG_DEBUG_SPINLOCK */ | ||
181 | |||
63 | static inline int __spin_trylock(spinlock_t *lock) | 182 | static inline int __spin_trylock(spinlock_t *lock) |
64 | { | 183 | { |
65 | preempt_disable(); | 184 | preempt_disable(); |
diff --git a/kernel/spinlock.c b/kernel/spinlock.c index 2c000f5c070b..5ddab730cb2f 100644 --- a/kernel/spinlock.c +++ b/kernel/spinlock.c | |||
@@ -21,23 +21,29 @@ | |||
21 | #include <linux/debug_locks.h> | 21 | #include <linux/debug_locks.h> |
22 | #include <linux/module.h> | 22 | #include <linux/module.h> |
23 | 23 | ||
24 | #ifndef _spin_trylock | ||
24 | int __lockfunc _spin_trylock(spinlock_t *lock) | 25 | int __lockfunc _spin_trylock(spinlock_t *lock) |
25 | { | 26 | { |
26 | return __spin_trylock(lock); | 27 | return __spin_trylock(lock); |
27 | } | 28 | } |
28 | EXPORT_SYMBOL(_spin_trylock); | 29 | EXPORT_SYMBOL(_spin_trylock); |
30 | #endif | ||
29 | 31 | ||
32 | #ifndef _read_trylock | ||
30 | int __lockfunc _read_trylock(rwlock_t *lock) | 33 | int __lockfunc _read_trylock(rwlock_t *lock) |
31 | { | 34 | { |
32 | return __read_trylock(lock); | 35 | return __read_trylock(lock); |
33 | } | 36 | } |
34 | EXPORT_SYMBOL(_read_trylock); | 37 | EXPORT_SYMBOL(_read_trylock); |
38 | #endif | ||
35 | 39 | ||
40 | #ifndef _write_trylock | ||
36 | int __lockfunc _write_trylock(rwlock_t *lock) | 41 | int __lockfunc _write_trylock(rwlock_t *lock) |
37 | { | 42 | { |
38 | return __write_trylock(lock); | 43 | return __write_trylock(lock); |
39 | } | 44 | } |
40 | EXPORT_SYMBOL(_write_trylock); | 45 | EXPORT_SYMBOL(_write_trylock); |
46 | #endif | ||
41 | 47 | ||
42 | /* | 48 | /* |
43 | * If lockdep is enabled then we use the non-preemption spin-ops | 49 | * If lockdep is enabled then we use the non-preemption spin-ops |
@@ -46,77 +52,101 @@ EXPORT_SYMBOL(_write_trylock); | |||
46 | */ | 52 | */ |
47 | #if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC) | 53 | #if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC) |
48 | 54 | ||
55 | #ifndef _read_lock | ||
49 | void __lockfunc _read_lock(rwlock_t *lock) | 56 | void __lockfunc _read_lock(rwlock_t *lock) |
50 | { | 57 | { |
51 | __read_lock(lock); | 58 | __read_lock(lock); |
52 | } | 59 | } |
53 | EXPORT_SYMBOL(_read_lock); | 60 | EXPORT_SYMBOL(_read_lock); |
61 | #endif | ||
54 | 62 | ||
63 | #ifndef _spin_lock_irqsave | ||
55 | unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock) | 64 | unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock) |
56 | { | 65 | { |
57 | return __spin_lock_irqsave(lock); | 66 | return __spin_lock_irqsave(lock); |
58 | } | 67 | } |
59 | EXPORT_SYMBOL(_spin_lock_irqsave); | 68 | EXPORT_SYMBOL(_spin_lock_irqsave); |
69 | #endif | ||
60 | 70 | ||
71 | #ifndef _spin_lock_irq | ||
61 | void __lockfunc _spin_lock_irq(spinlock_t *lock) | 72 | void __lockfunc _spin_lock_irq(spinlock_t *lock) |
62 | { | 73 | { |
63 | __spin_lock_irq(lock); | 74 | __spin_lock_irq(lock); |
64 | } | 75 | } |
65 | EXPORT_SYMBOL(_spin_lock_irq); | 76 | EXPORT_SYMBOL(_spin_lock_irq); |
77 | #endif | ||
66 | 78 | ||
79 | #ifndef _spin_lock_bh | ||
67 | void __lockfunc _spin_lock_bh(spinlock_t *lock) | 80 | void __lockfunc _spin_lock_bh(spinlock_t *lock) |
68 | { | 81 | { |
69 | __spin_lock_bh(lock); | 82 | __spin_lock_bh(lock); |
70 | } | 83 | } |
71 | EXPORT_SYMBOL(_spin_lock_bh); | 84 | EXPORT_SYMBOL(_spin_lock_bh); |
85 | #endif | ||
72 | 86 | ||
87 | #ifndef _read_lock_irqsave | ||
73 | unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock) | 88 | unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock) |
74 | { | 89 | { |
75 | return __read_lock_irqsave(lock); | 90 | return __read_lock_irqsave(lock); |
76 | } | 91 | } |
77 | EXPORT_SYMBOL(_read_lock_irqsave); | 92 | EXPORT_SYMBOL(_read_lock_irqsave); |
93 | #endif | ||
78 | 94 | ||
95 | #ifndef _read_lock_irq | ||
79 | void __lockfunc _read_lock_irq(rwlock_t *lock) | 96 | void __lockfunc _read_lock_irq(rwlock_t *lock) |
80 | { | 97 | { |
81 | __read_lock_irq(lock); | 98 | __read_lock_irq(lock); |
82 | } | 99 | } |
83 | EXPORT_SYMBOL(_read_lock_irq); | 100 | EXPORT_SYMBOL(_read_lock_irq); |
101 | #endif | ||
84 | 102 | ||
103 | #ifndef _read_lock_bh | ||
85 | void __lockfunc _read_lock_bh(rwlock_t *lock) | 104 | void __lockfunc _read_lock_bh(rwlock_t *lock) |
86 | { | 105 | { |
87 | __read_lock_bh(lock); | 106 | __read_lock_bh(lock); |
88 | } | 107 | } |
89 | EXPORT_SYMBOL(_read_lock_bh); | 108 | EXPORT_SYMBOL(_read_lock_bh); |
109 | #endif | ||
90 | 110 | ||
111 | #ifndef _write_lock_irqsave | ||
91 | unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock) | 112 | unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock) |
92 | { | 113 | { |
93 | return __write_lock_irqsave(lock); | 114 | return __write_lock_irqsave(lock); |
94 | } | 115 | } |
95 | EXPORT_SYMBOL(_write_lock_irqsave); | 116 | EXPORT_SYMBOL(_write_lock_irqsave); |
117 | #endif | ||
96 | 118 | ||
119 | #ifndef _write_lock_irq | ||
97 | void __lockfunc _write_lock_irq(rwlock_t *lock) | 120 | void __lockfunc _write_lock_irq(rwlock_t *lock) |
98 | { | 121 | { |
99 | __write_lock_irq(lock); | 122 | __write_lock_irq(lock); |
100 | } | 123 | } |
101 | EXPORT_SYMBOL(_write_lock_irq); | 124 | EXPORT_SYMBOL(_write_lock_irq); |
125 | #endif | ||
102 | 126 | ||
127 | #ifndef _write_lock_bh | ||
103 | void __lockfunc _write_lock_bh(rwlock_t *lock) | 128 | void __lockfunc _write_lock_bh(rwlock_t *lock) |
104 | { | 129 | { |
105 | __write_lock_bh(lock); | 130 | __write_lock_bh(lock); |
106 | } | 131 | } |
107 | EXPORT_SYMBOL(_write_lock_bh); | 132 | EXPORT_SYMBOL(_write_lock_bh); |
133 | #endif | ||
108 | 134 | ||
135 | #ifndef _spin_lock | ||
109 | void __lockfunc _spin_lock(spinlock_t *lock) | 136 | void __lockfunc _spin_lock(spinlock_t *lock) |
110 | { | 137 | { |
111 | __spin_lock(lock); | 138 | __spin_lock(lock); |
112 | } | 139 | } |
113 | EXPORT_SYMBOL(_spin_lock); | 140 | EXPORT_SYMBOL(_spin_lock); |
141 | #endif | ||
114 | 142 | ||
143 | #ifndef _write_lock | ||
115 | void __lockfunc _write_lock(rwlock_t *lock) | 144 | void __lockfunc _write_lock(rwlock_t *lock) |
116 | { | 145 | { |
117 | __write_lock(lock); | 146 | __write_lock(lock); |
118 | } | 147 | } |
119 | EXPORT_SYMBOL(_write_lock); | 148 | EXPORT_SYMBOL(_write_lock); |
149 | #endif | ||
120 | 150 | ||
121 | #else /* CONFIG_PREEMPT: */ | 151 | #else /* CONFIG_PREEMPT: */ |
122 | 152 | ||
@@ -242,83 +272,109 @@ EXPORT_SYMBOL(_spin_lock_nest_lock); | |||
242 | 272 | ||
243 | #endif | 273 | #endif |
244 | 274 | ||
275 | #ifndef _spin_unlock | ||
245 | void __lockfunc _spin_unlock(spinlock_t *lock) | 276 | void __lockfunc _spin_unlock(spinlock_t *lock) |
246 | { | 277 | { |
247 | __spin_unlock(lock); | 278 | __spin_unlock(lock); |
248 | } | 279 | } |
249 | EXPORT_SYMBOL(_spin_unlock); | 280 | EXPORT_SYMBOL(_spin_unlock); |
281 | #endif | ||
250 | 282 | ||
283 | #ifndef _write_unlock | ||
251 | void __lockfunc _write_unlock(rwlock_t *lock) | 284 | void __lockfunc _write_unlock(rwlock_t *lock) |
252 | { | 285 | { |
253 | __write_unlock(lock); | 286 | __write_unlock(lock); |
254 | } | 287 | } |
255 | EXPORT_SYMBOL(_write_unlock); | 288 | EXPORT_SYMBOL(_write_unlock); |
289 | #endif | ||
256 | 290 | ||
291 | #ifndef _read_unlock | ||
257 | void __lockfunc _read_unlock(rwlock_t *lock) | 292 | void __lockfunc _read_unlock(rwlock_t *lock) |
258 | { | 293 | { |
259 | __read_unlock(lock); | 294 | __read_unlock(lock); |
260 | } | 295 | } |
261 | EXPORT_SYMBOL(_read_unlock); | 296 | EXPORT_SYMBOL(_read_unlock); |
297 | #endif | ||
262 | 298 | ||
299 | #ifndef _spin_unlock_irqrestore | ||
263 | void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags) | 300 | void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags) |
264 | { | 301 | { |
265 | __spin_unlock_irqrestore(lock, flags); | 302 | __spin_unlock_irqrestore(lock, flags); |
266 | } | 303 | } |
267 | EXPORT_SYMBOL(_spin_unlock_irqrestore); | 304 | EXPORT_SYMBOL(_spin_unlock_irqrestore); |
305 | #endif | ||
268 | 306 | ||
307 | #ifndef _spin_unlock_irq | ||
269 | void __lockfunc _spin_unlock_irq(spinlock_t *lock) | 308 | void __lockfunc _spin_unlock_irq(spinlock_t *lock) |
270 | { | 309 | { |
271 | __spin_unlock_irq(lock); | 310 | __spin_unlock_irq(lock); |
272 | } | 311 | } |
273 | EXPORT_SYMBOL(_spin_unlock_irq); | 312 | EXPORT_SYMBOL(_spin_unlock_irq); |
313 | #endif | ||
274 | 314 | ||
315 | #ifndef _spin_unlock_bh | ||
275 | void __lockfunc _spin_unlock_bh(spinlock_t *lock) | 316 | void __lockfunc _spin_unlock_bh(spinlock_t *lock) |
276 | { | 317 | { |
277 | __spin_unlock_bh(lock); | 318 | __spin_unlock_bh(lock); |
278 | } | 319 | } |
279 | EXPORT_SYMBOL(_spin_unlock_bh); | 320 | EXPORT_SYMBOL(_spin_unlock_bh); |
321 | #endif | ||
280 | 322 | ||
323 | #ifndef _read_unlock_irqrestore | ||
281 | void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) | 324 | void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) |
282 | { | 325 | { |
283 | __read_unlock_irqrestore(lock, flags); | 326 | __read_unlock_irqrestore(lock, flags); |
284 | } | 327 | } |
285 | EXPORT_SYMBOL(_read_unlock_irqrestore); | 328 | EXPORT_SYMBOL(_read_unlock_irqrestore); |
329 | #endif | ||
286 | 330 | ||
331 | #ifndef _read_unlock_irq | ||
287 | void __lockfunc _read_unlock_irq(rwlock_t *lock) | 332 | void __lockfunc _read_unlock_irq(rwlock_t *lock) |
288 | { | 333 | { |
289 | __read_unlock_irq(lock); | 334 | __read_unlock_irq(lock); |
290 | } | 335 | } |
291 | EXPORT_SYMBOL(_read_unlock_irq); | 336 | EXPORT_SYMBOL(_read_unlock_irq); |
337 | #endif | ||
292 | 338 | ||
339 | #ifndef _read_unlock_bh | ||
293 | void __lockfunc _read_unlock_bh(rwlock_t *lock) | 340 | void __lockfunc _read_unlock_bh(rwlock_t *lock) |
294 | { | 341 | { |
295 | __read_unlock_bh(lock); | 342 | __read_unlock_bh(lock); |
296 | } | 343 | } |
297 | EXPORT_SYMBOL(_read_unlock_bh); | 344 | EXPORT_SYMBOL(_read_unlock_bh); |
345 | #endif | ||
298 | 346 | ||
347 | #ifndef _write_unlock_irqrestore | ||
299 | void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags) | 348 | void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags) |
300 | { | 349 | { |
301 | __write_unlock_irqrestore(lock, flags); | 350 | __write_unlock_irqrestore(lock, flags); |
302 | } | 351 | } |
303 | EXPORT_SYMBOL(_write_unlock_irqrestore); | 352 | EXPORT_SYMBOL(_write_unlock_irqrestore); |
353 | #endif | ||
304 | 354 | ||
355 | #ifndef _write_unlock_irq | ||
305 | void __lockfunc _write_unlock_irq(rwlock_t *lock) | 356 | void __lockfunc _write_unlock_irq(rwlock_t *lock) |
306 | { | 357 | { |
307 | __write_unlock_irq(lock); | 358 | __write_unlock_irq(lock); |
308 | } | 359 | } |
309 | EXPORT_SYMBOL(_write_unlock_irq); | 360 | EXPORT_SYMBOL(_write_unlock_irq); |
361 | #endif | ||
310 | 362 | ||
363 | #ifndef _write_unlock_bh | ||
311 | void __lockfunc _write_unlock_bh(rwlock_t *lock) | 364 | void __lockfunc _write_unlock_bh(rwlock_t *lock) |
312 | { | 365 | { |
313 | __write_unlock_bh(lock); | 366 | __write_unlock_bh(lock); |
314 | } | 367 | } |
315 | EXPORT_SYMBOL(_write_unlock_bh); | 368 | EXPORT_SYMBOL(_write_unlock_bh); |
369 | #endif | ||
316 | 370 | ||
371 | #ifndef _spin_trylock_bh | ||
317 | int __lockfunc _spin_trylock_bh(spinlock_t *lock) | 372 | int __lockfunc _spin_trylock_bh(spinlock_t *lock) |
318 | { | 373 | { |
319 | return __spin_trylock_bh(lock); | 374 | return __spin_trylock_bh(lock); |
320 | } | 375 | } |
321 | EXPORT_SYMBOL(_spin_trylock_bh); | 376 | EXPORT_SYMBOL(_spin_trylock_bh); |
377 | #endif | ||
322 | 378 | ||
323 | notrace int in_lock_functions(unsigned long addr) | 379 | notrace int in_lock_functions(unsigned long addr) |
324 | { | 380 | { |