aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/spinlock.c
diff options
context:
space:
mode:
authorHeiko Carstens <heiko.carstens@de.ibm.com>2009-08-31 08:43:37 -0400
committerIngo Molnar <mingo@elte.hu>2009-08-31 12:08:50 -0400
commit892a7c67c12da63fa4b51728bbe5b982356a090a (patch)
treeba6cb9cf1be394428d9ef2596b0575e28ab0b19a /kernel/spinlock.c
parent69d0ee7377eef808e34ba5542b554ec97244b871 (diff)
locking: Allow arch-inlined spinlocks
This allows an architecture to specify per lock variant if the locking code should be kept out-of-line or inlined. If an architecure wants out-of-line locking code no change is needed. To force inlining of e.g. spin_lock() the line: #define __always_inline__spin_lock needs to be added to arch/<...>/include/asm/spinlock.h If CONFIG_DEBUG_SPINLOCK or CONFIG_GENERIC_LOCKBREAK are defined the per architecture defines are (partly) ignored and still out-of-line spinlock code will be generated. Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com> Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Arnd Bergmann <arnd@arndb.de> Cc: Nick Piggin <nickpiggin@yahoo.com.au> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Horst Hartmann <horsth@linux.vnet.ibm.com> Cc: Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: David Miller <davem@davemloft.net> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Paul Mackerras <paulus@samba.org> Cc: Geert Uytterhoeven <geert@linux-m68k.org> Cc: Roman Zippel <zippel@linux-m68k.org> Cc: <linux-arch@vger.kernel.org> LKML-Reference: <20090831124418.375299024@de.ibm.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/spinlock.c')
-rw-r--r--kernel/spinlock.c56
1 files changed, 56 insertions, 0 deletions
diff --git a/kernel/spinlock.c b/kernel/spinlock.c
index 2c000f5c070b..5ddab730cb2f 100644
--- a/kernel/spinlock.c
+++ b/kernel/spinlock.c
@@ -21,23 +21,29 @@
21#include <linux/debug_locks.h> 21#include <linux/debug_locks.h>
22#include <linux/module.h> 22#include <linux/module.h>
23 23
24#ifndef _spin_trylock
24int __lockfunc _spin_trylock(spinlock_t *lock) 25int __lockfunc _spin_trylock(spinlock_t *lock)
25{ 26{
26 return __spin_trylock(lock); 27 return __spin_trylock(lock);
27} 28}
28EXPORT_SYMBOL(_spin_trylock); 29EXPORT_SYMBOL(_spin_trylock);
30#endif
29 31
32#ifndef _read_trylock
30int __lockfunc _read_trylock(rwlock_t *lock) 33int __lockfunc _read_trylock(rwlock_t *lock)
31{ 34{
32 return __read_trylock(lock); 35 return __read_trylock(lock);
33} 36}
34EXPORT_SYMBOL(_read_trylock); 37EXPORT_SYMBOL(_read_trylock);
38#endif
35 39
40#ifndef _write_trylock
36int __lockfunc _write_trylock(rwlock_t *lock) 41int __lockfunc _write_trylock(rwlock_t *lock)
37{ 42{
38 return __write_trylock(lock); 43 return __write_trylock(lock);
39} 44}
40EXPORT_SYMBOL(_write_trylock); 45EXPORT_SYMBOL(_write_trylock);
46#endif
41 47
42/* 48/*
43 * If lockdep is enabled then we use the non-preemption spin-ops 49 * If lockdep is enabled then we use the non-preemption spin-ops
@@ -46,77 +52,101 @@ EXPORT_SYMBOL(_write_trylock);
46 */ 52 */
47#if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC) 53#if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC)
48 54
55#ifndef _read_lock
49void __lockfunc _read_lock(rwlock_t *lock) 56void __lockfunc _read_lock(rwlock_t *lock)
50{ 57{
51 __read_lock(lock); 58 __read_lock(lock);
52} 59}
53EXPORT_SYMBOL(_read_lock); 60EXPORT_SYMBOL(_read_lock);
61#endif
54 62
63#ifndef _spin_lock_irqsave
55unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock) 64unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock)
56{ 65{
57 return __spin_lock_irqsave(lock); 66 return __spin_lock_irqsave(lock);
58} 67}
59EXPORT_SYMBOL(_spin_lock_irqsave); 68EXPORT_SYMBOL(_spin_lock_irqsave);
69#endif
60 70
71#ifndef _spin_lock_irq
61void __lockfunc _spin_lock_irq(spinlock_t *lock) 72void __lockfunc _spin_lock_irq(spinlock_t *lock)
62{ 73{
63 __spin_lock_irq(lock); 74 __spin_lock_irq(lock);
64} 75}
65EXPORT_SYMBOL(_spin_lock_irq); 76EXPORT_SYMBOL(_spin_lock_irq);
77#endif
66 78
79#ifndef _spin_lock_bh
67void __lockfunc _spin_lock_bh(spinlock_t *lock) 80void __lockfunc _spin_lock_bh(spinlock_t *lock)
68{ 81{
69 __spin_lock_bh(lock); 82 __spin_lock_bh(lock);
70} 83}
71EXPORT_SYMBOL(_spin_lock_bh); 84EXPORT_SYMBOL(_spin_lock_bh);
85#endif
72 86
87#ifndef _read_lock_irqsave
73unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock) 88unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock)
74{ 89{
75 return __read_lock_irqsave(lock); 90 return __read_lock_irqsave(lock);
76} 91}
77EXPORT_SYMBOL(_read_lock_irqsave); 92EXPORT_SYMBOL(_read_lock_irqsave);
93#endif
78 94
95#ifndef _read_lock_irq
79void __lockfunc _read_lock_irq(rwlock_t *lock) 96void __lockfunc _read_lock_irq(rwlock_t *lock)
80{ 97{
81 __read_lock_irq(lock); 98 __read_lock_irq(lock);
82} 99}
83EXPORT_SYMBOL(_read_lock_irq); 100EXPORT_SYMBOL(_read_lock_irq);
101#endif
84 102
103#ifndef _read_lock_bh
85void __lockfunc _read_lock_bh(rwlock_t *lock) 104void __lockfunc _read_lock_bh(rwlock_t *lock)
86{ 105{
87 __read_lock_bh(lock); 106 __read_lock_bh(lock);
88} 107}
89EXPORT_SYMBOL(_read_lock_bh); 108EXPORT_SYMBOL(_read_lock_bh);
109#endif
90 110
111#ifndef _write_lock_irqsave
91unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock) 112unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock)
92{ 113{
93 return __write_lock_irqsave(lock); 114 return __write_lock_irqsave(lock);
94} 115}
95EXPORT_SYMBOL(_write_lock_irqsave); 116EXPORT_SYMBOL(_write_lock_irqsave);
117#endif
96 118
119#ifndef _write_lock_irq
97void __lockfunc _write_lock_irq(rwlock_t *lock) 120void __lockfunc _write_lock_irq(rwlock_t *lock)
98{ 121{
99 __write_lock_irq(lock); 122 __write_lock_irq(lock);
100} 123}
101EXPORT_SYMBOL(_write_lock_irq); 124EXPORT_SYMBOL(_write_lock_irq);
125#endif
102 126
127#ifndef _write_lock_bh
103void __lockfunc _write_lock_bh(rwlock_t *lock) 128void __lockfunc _write_lock_bh(rwlock_t *lock)
104{ 129{
105 __write_lock_bh(lock); 130 __write_lock_bh(lock);
106} 131}
107EXPORT_SYMBOL(_write_lock_bh); 132EXPORT_SYMBOL(_write_lock_bh);
133#endif
108 134
135#ifndef _spin_lock
109void __lockfunc _spin_lock(spinlock_t *lock) 136void __lockfunc _spin_lock(spinlock_t *lock)
110{ 137{
111 __spin_lock(lock); 138 __spin_lock(lock);
112} 139}
113EXPORT_SYMBOL(_spin_lock); 140EXPORT_SYMBOL(_spin_lock);
141#endif
114 142
143#ifndef _write_lock
115void __lockfunc _write_lock(rwlock_t *lock) 144void __lockfunc _write_lock(rwlock_t *lock)
116{ 145{
117 __write_lock(lock); 146 __write_lock(lock);
118} 147}
119EXPORT_SYMBOL(_write_lock); 148EXPORT_SYMBOL(_write_lock);
149#endif
120 150
121#else /* CONFIG_PREEMPT: */ 151#else /* CONFIG_PREEMPT: */
122 152
@@ -242,83 +272,109 @@ EXPORT_SYMBOL(_spin_lock_nest_lock);
242 272
243#endif 273#endif
244 274
275#ifndef _spin_unlock
245void __lockfunc _spin_unlock(spinlock_t *lock) 276void __lockfunc _spin_unlock(spinlock_t *lock)
246{ 277{
247 __spin_unlock(lock); 278 __spin_unlock(lock);
248} 279}
249EXPORT_SYMBOL(_spin_unlock); 280EXPORT_SYMBOL(_spin_unlock);
281#endif
250 282
283#ifndef _write_unlock
251void __lockfunc _write_unlock(rwlock_t *lock) 284void __lockfunc _write_unlock(rwlock_t *lock)
252{ 285{
253 __write_unlock(lock); 286 __write_unlock(lock);
254} 287}
255EXPORT_SYMBOL(_write_unlock); 288EXPORT_SYMBOL(_write_unlock);
289#endif
256 290
291#ifndef _read_unlock
257void __lockfunc _read_unlock(rwlock_t *lock) 292void __lockfunc _read_unlock(rwlock_t *lock)
258{ 293{
259 __read_unlock(lock); 294 __read_unlock(lock);
260} 295}
261EXPORT_SYMBOL(_read_unlock); 296EXPORT_SYMBOL(_read_unlock);
297#endif
262 298
299#ifndef _spin_unlock_irqrestore
263void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags) 300void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
264{ 301{
265 __spin_unlock_irqrestore(lock, flags); 302 __spin_unlock_irqrestore(lock, flags);
266} 303}
267EXPORT_SYMBOL(_spin_unlock_irqrestore); 304EXPORT_SYMBOL(_spin_unlock_irqrestore);
305#endif
268 306
307#ifndef _spin_unlock_irq
269void __lockfunc _spin_unlock_irq(spinlock_t *lock) 308void __lockfunc _spin_unlock_irq(spinlock_t *lock)
270{ 309{
271 __spin_unlock_irq(lock); 310 __spin_unlock_irq(lock);
272} 311}
273EXPORT_SYMBOL(_spin_unlock_irq); 312EXPORT_SYMBOL(_spin_unlock_irq);
313#endif
274 314
315#ifndef _spin_unlock_bh
275void __lockfunc _spin_unlock_bh(spinlock_t *lock) 316void __lockfunc _spin_unlock_bh(spinlock_t *lock)
276{ 317{
277 __spin_unlock_bh(lock); 318 __spin_unlock_bh(lock);
278} 319}
279EXPORT_SYMBOL(_spin_unlock_bh); 320EXPORT_SYMBOL(_spin_unlock_bh);
321#endif
280 322
323#ifndef _read_unlock_irqrestore
281void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) 324void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
282{ 325{
283 __read_unlock_irqrestore(lock, flags); 326 __read_unlock_irqrestore(lock, flags);
284} 327}
285EXPORT_SYMBOL(_read_unlock_irqrestore); 328EXPORT_SYMBOL(_read_unlock_irqrestore);
329#endif
286 330
331#ifndef _read_unlock_irq
287void __lockfunc _read_unlock_irq(rwlock_t *lock) 332void __lockfunc _read_unlock_irq(rwlock_t *lock)
288{ 333{
289 __read_unlock_irq(lock); 334 __read_unlock_irq(lock);
290} 335}
291EXPORT_SYMBOL(_read_unlock_irq); 336EXPORT_SYMBOL(_read_unlock_irq);
337#endif
292 338
339#ifndef _read_unlock_bh
293void __lockfunc _read_unlock_bh(rwlock_t *lock) 340void __lockfunc _read_unlock_bh(rwlock_t *lock)
294{ 341{
295 __read_unlock_bh(lock); 342 __read_unlock_bh(lock);
296} 343}
297EXPORT_SYMBOL(_read_unlock_bh); 344EXPORT_SYMBOL(_read_unlock_bh);
345#endif
298 346
347#ifndef _write_unlock_irqrestore
299void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags) 348void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
300{ 349{
301 __write_unlock_irqrestore(lock, flags); 350 __write_unlock_irqrestore(lock, flags);
302} 351}
303EXPORT_SYMBOL(_write_unlock_irqrestore); 352EXPORT_SYMBOL(_write_unlock_irqrestore);
353#endif
304 354
355#ifndef _write_unlock_irq
305void __lockfunc _write_unlock_irq(rwlock_t *lock) 356void __lockfunc _write_unlock_irq(rwlock_t *lock)
306{ 357{
307 __write_unlock_irq(lock); 358 __write_unlock_irq(lock);
308} 359}
309EXPORT_SYMBOL(_write_unlock_irq); 360EXPORT_SYMBOL(_write_unlock_irq);
361#endif
310 362
363#ifndef _write_unlock_bh
311void __lockfunc _write_unlock_bh(rwlock_t *lock) 364void __lockfunc _write_unlock_bh(rwlock_t *lock)
312{ 365{
313 __write_unlock_bh(lock); 366 __write_unlock_bh(lock);
314} 367}
315EXPORT_SYMBOL(_write_unlock_bh); 368EXPORT_SYMBOL(_write_unlock_bh);
369#endif
316 370
371#ifndef _spin_trylock_bh
317int __lockfunc _spin_trylock_bh(spinlock_t *lock) 372int __lockfunc _spin_trylock_bh(spinlock_t *lock)
318{ 373{
319 return __spin_trylock_bh(lock); 374 return __spin_trylock_bh(lock);
320} 375}
321EXPORT_SYMBOL(_spin_trylock_bh); 376EXPORT_SYMBOL(_spin_trylock_bh);
377#endif
322 378
323notrace int in_lock_functions(unsigned long addr) 379notrace int in_lock_functions(unsigned long addr)
324{ 380{