diff options
author | Heiko Carstens <heiko.carstens@de.ibm.com> | 2009-08-31 08:43:37 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-08-31 12:08:50 -0400 |
commit | 892a7c67c12da63fa4b51728bbe5b982356a090a (patch) | |
tree | ba6cb9cf1be394428d9ef2596b0575e28ab0b19a /include/linux/spinlock_api_smp.h | |
parent | 69d0ee7377eef808e34ba5542b554ec97244b871 (diff) |
locking: Allow arch-inlined spinlocks
This allows an architecture to specify per lock variant if the
locking code should be kept out-of-line or inlined.
If an architecure wants out-of-line locking code no change is
needed. To force inlining of e.g. spin_lock() the line:
#define __always_inline__spin_lock
needs to be added to arch/<...>/include/asm/spinlock.h
If CONFIG_DEBUG_SPINLOCK or CONFIG_GENERIC_LOCKBREAK are
defined the per architecture defines are (partly) ignored and
still out-of-line spinlock code will be generated.
Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Horst Hartmann <horsth@linux.vnet.ibm.com>
Cc: Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: David Miller <davem@davemloft.net>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Geert Uytterhoeven <geert@linux-m68k.org>
Cc: Roman Zippel <zippel@linux-m68k.org>
Cc: <linux-arch@vger.kernel.org>
LKML-Reference: <20090831124418.375299024@de.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'include/linux/spinlock_api_smp.h')
-rw-r--r-- | include/linux/spinlock_api_smp.h | 119 |
1 files changed, 119 insertions, 0 deletions
diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h index 6b108f5fb149..1a411e3fab95 100644 --- a/include/linux/spinlock_api_smp.h +++ b/include/linux/spinlock_api_smp.h | |||
@@ -60,6 +60,125 @@ void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) | |||
60 | void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags) | 60 | void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags) |
61 | __releases(lock); | 61 | __releases(lock); |
62 | 62 | ||
63 | #ifndef CONFIG_DEBUG_SPINLOCK | ||
64 | #ifndef CONFIG_GENERIC_LOCKBREAK | ||
65 | |||
66 | #ifdef __always_inline__spin_lock | ||
67 | #define _spin_lock(lock) __spin_lock(lock) | ||
68 | #endif | ||
69 | |||
70 | #ifdef __always_inline__read_lock | ||
71 | #define _read_lock(lock) __read_lock(lock) | ||
72 | #endif | ||
73 | |||
74 | #ifdef __always_inline__write_lock | ||
75 | #define _write_lock(lock) __write_lock(lock) | ||
76 | #endif | ||
77 | |||
78 | #ifdef __always_inline__spin_lock_bh | ||
79 | #define _spin_lock_bh(lock) __spin_lock_bh(lock) | ||
80 | #endif | ||
81 | |||
82 | #ifdef __always_inline__read_lock_bh | ||
83 | #define _read_lock_bh(lock) __read_lock_bh(lock) | ||
84 | #endif | ||
85 | |||
86 | #ifdef __always_inline__write_lock_bh | ||
87 | #define _write_lock_bh(lock) __write_lock_bh(lock) | ||
88 | #endif | ||
89 | |||
90 | #ifdef __always_inline__spin_lock_irq | ||
91 | #define _spin_lock_irq(lock) __spin_lock_irq(lock) | ||
92 | #endif | ||
93 | |||
94 | #ifdef __always_inline__read_lock_irq | ||
95 | #define _read_lock_irq(lock) __read_lock_irq(lock) | ||
96 | #endif | ||
97 | |||
98 | #ifdef __always_inline__write_lock_irq | ||
99 | #define _write_lock_irq(lock) __write_lock_irq(lock) | ||
100 | #endif | ||
101 | |||
102 | #ifdef __always_inline__spin_lock_irqsave | ||
103 | #define _spin_lock_irqsave(lock) __spin_lock_irqsave(lock) | ||
104 | #endif | ||
105 | |||
106 | #ifdef __always_inline__read_lock_irqsave | ||
107 | #define _read_lock_irqsave(lock) __read_lock_irqsave(lock) | ||
108 | #endif | ||
109 | |||
110 | #ifdef __always_inline__write_lock_irqsave | ||
111 | #define _write_lock_irqsave(lock) __write_lock_irqsave(lock) | ||
112 | #endif | ||
113 | |||
114 | #endif /* !CONFIG_GENERIC_LOCKBREAK */ | ||
115 | |||
116 | #ifdef __always_inline__spin_trylock | ||
117 | #define _spin_trylock(lock) __spin_trylock(lock) | ||
118 | #endif | ||
119 | |||
120 | #ifdef __always_inline__read_trylock | ||
121 | #define _read_trylock(lock) __read_trylock(lock) | ||
122 | #endif | ||
123 | |||
124 | #ifdef __always_inline__write_trylock | ||
125 | #define _write_trylock(lock) __write_trylock(lock) | ||
126 | #endif | ||
127 | |||
128 | #ifdef __always_inline__spin_trylock_bh | ||
129 | #define _spin_trylock_bh(lock) __spin_trylock_bh(lock) | ||
130 | #endif | ||
131 | |||
132 | #ifdef __always_inline__spin_unlock | ||
133 | #define _spin_unlock(lock) __spin_unlock(lock) | ||
134 | #endif | ||
135 | |||
136 | #ifdef __always_inline__read_unlock | ||
137 | #define _read_unlock(lock) __read_unlock(lock) | ||
138 | #endif | ||
139 | |||
140 | #ifdef __always_inline__write_unlock | ||
141 | #define _write_unlock(lock) __write_unlock(lock) | ||
142 | #endif | ||
143 | |||
144 | #ifdef __always_inline__spin_unlock_bh | ||
145 | #define _spin_unlock_bh(lock) __spin_unlock_bh(lock) | ||
146 | #endif | ||
147 | |||
148 | #ifdef __always_inline__read_unlock_bh | ||
149 | #define _read_unlock_bh(lock) __read_unlock_bh(lock) | ||
150 | #endif | ||
151 | |||
152 | #ifdef __always_inline__write_unlock_bh | ||
153 | #define _write_unlock_bh(lock) __write_unlock_bh(lock) | ||
154 | #endif | ||
155 | |||
156 | #ifdef __always_inline__spin_unlock_irq | ||
157 | #define _spin_unlock_irq(lock) __spin_unlock_irq(lock) | ||
158 | #endif | ||
159 | |||
160 | #ifdef __always_inline__read_unlock_irq | ||
161 | #define _read_unlock_irq(lock) __read_unlock_irq(lock) | ||
162 | #endif | ||
163 | |||
164 | #ifdef __always_inline__write_unlock_irq | ||
165 | #define _write_unlock_irq(lock) __write_unlock_irq(lock) | ||
166 | #endif | ||
167 | |||
168 | #ifdef __always_inline__spin_unlock_irqrestore | ||
169 | #define _spin_unlock_irqrestore(lock, flags) __spin_unlock_irqrestore(lock, flags) | ||
170 | #endif | ||
171 | |||
172 | #ifdef __always_inline__read_unlock_irqrestore | ||
173 | #define _read_unlock_irqrestore(lock, flags) __read_unlock_irqrestore(lock, flags) | ||
174 | #endif | ||
175 | |||
176 | #ifdef __always_inline__write_unlock_irqrestore | ||
177 | #define _write_unlock_irqrestore(lock, flags) __write_unlock_irqrestore(lock, flags) | ||
178 | #endif | ||
179 | |||
180 | #endif /* CONFIG_DEBUG_SPINLOCK */ | ||
181 | |||
63 | static inline int __spin_trylock(spinlock_t *lock) | 182 | static inline int __spin_trylock(spinlock_t *lock) |
64 | { | 183 | { |
65 | preempt_disable(); | 184 | preempt_disable(); |