diff options
author | Heiko Carstens <heiko.carstens@de.ibm.com> | 2009-08-31 08:43:36 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-08-31 12:08:50 -0400 |
commit | 69d0ee7377eef808e34ba5542b554ec97244b871 (patch) | |
tree | f46c756b897cf51497fda2ad22f9f12a3512e23b /include/linux/spinlock_api_smp.h | |
parent | 0ee000e5e8fa2e5c760250be0d78d5906e3eb94b (diff) |
locking: Move spinlock function bodies to header file
Move spinlock function bodies to header file by creating a
static inline version of each variant. Use the inline version
on the out-of-line code.
This shouldn't make any difference besides that the spinlock
code can now be used to generate inlined spinlock code.
Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Acked-by: Arnd Bergmann <arnd@arndb.de>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Horst Hartmann <horsth@linux.vnet.ibm.com>
Cc: Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: David Miller <davem@davemloft.net>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Geert Uytterhoeven <geert@linux-m68k.org>
Cc: Roman Zippel <zippel@linux-m68k.org>
Cc: <linux-arch@vger.kernel.org>
LKML-Reference: <20090831124417.859022429@de.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'include/linux/spinlock_api_smp.h')
-rw-r--r-- | include/linux/spinlock_api_smp.h | 263 |
1 files changed, 263 insertions, 0 deletions
diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h index d79845d034b5..6b108f5fb149 100644 --- a/include/linux/spinlock_api_smp.h +++ b/include/linux/spinlock_api_smp.h | |||
@@ -60,4 +60,267 @@ void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) | |||
60 | void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags) | 60 | void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags) |
61 | __releases(lock); | 61 | __releases(lock); |
62 | 62 | ||
63 | static inline int __spin_trylock(spinlock_t *lock) | ||
64 | { | ||
65 | preempt_disable(); | ||
66 | if (_raw_spin_trylock(lock)) { | ||
67 | spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); | ||
68 | return 1; | ||
69 | } | ||
70 | preempt_enable(); | ||
71 | return 0; | ||
72 | } | ||
73 | |||
74 | static inline int __read_trylock(rwlock_t *lock) | ||
75 | { | ||
76 | preempt_disable(); | ||
77 | if (_raw_read_trylock(lock)) { | ||
78 | rwlock_acquire_read(&lock->dep_map, 0, 1, _RET_IP_); | ||
79 | return 1; | ||
80 | } | ||
81 | preempt_enable(); | ||
82 | return 0; | ||
83 | } | ||
84 | |||
85 | static inline int __write_trylock(rwlock_t *lock) | ||
86 | { | ||
87 | preempt_disable(); | ||
88 | if (_raw_write_trylock(lock)) { | ||
89 | rwlock_acquire(&lock->dep_map, 0, 1, _RET_IP_); | ||
90 | return 1; | ||
91 | } | ||
92 | preempt_enable(); | ||
93 | return 0; | ||
94 | } | ||
95 | |||
96 | /* | ||
97 | * If lockdep is enabled then we use the non-preemption spin-ops | ||
98 | * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are | ||
99 | * not re-enabled during lock-acquire (which the preempt-spin-ops do): | ||
100 | */ | ||
101 | #if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC) | ||
102 | |||
103 | static inline void __read_lock(rwlock_t *lock) | ||
104 | { | ||
105 | preempt_disable(); | ||
106 | rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_); | ||
107 | LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock); | ||
108 | } | ||
109 | |||
110 | static inline unsigned long __spin_lock_irqsave(spinlock_t *lock) | ||
111 | { | ||
112 | unsigned long flags; | ||
113 | |||
114 | local_irq_save(flags); | ||
115 | preempt_disable(); | ||
116 | spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); | ||
117 | /* | ||
118 | * On lockdep we dont want the hand-coded irq-enable of | ||
119 | * _raw_spin_lock_flags() code, because lockdep assumes | ||
120 | * that interrupts are not re-enabled during lock-acquire: | ||
121 | */ | ||
122 | #ifdef CONFIG_LOCKDEP | ||
123 | LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); | ||
124 | #else | ||
125 | _raw_spin_lock_flags(lock, &flags); | ||
126 | #endif | ||
127 | return flags; | ||
128 | } | ||
129 | |||
130 | static inline void __spin_lock_irq(spinlock_t *lock) | ||
131 | { | ||
132 | local_irq_disable(); | ||
133 | preempt_disable(); | ||
134 | spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); | ||
135 | LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); | ||
136 | } | ||
137 | |||
138 | static inline void __spin_lock_bh(spinlock_t *lock) | ||
139 | { | ||
140 | local_bh_disable(); | ||
141 | preempt_disable(); | ||
142 | spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); | ||
143 | LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); | ||
144 | } | ||
145 | |||
146 | static inline unsigned long __read_lock_irqsave(rwlock_t *lock) | ||
147 | { | ||
148 | unsigned long flags; | ||
149 | |||
150 | local_irq_save(flags); | ||
151 | preempt_disable(); | ||
152 | rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_); | ||
153 | LOCK_CONTENDED_FLAGS(lock, _raw_read_trylock, _raw_read_lock, | ||
154 | _raw_read_lock_flags, &flags); | ||
155 | return flags; | ||
156 | } | ||
157 | |||
158 | static inline void __read_lock_irq(rwlock_t *lock) | ||
159 | { | ||
160 | local_irq_disable(); | ||
161 | preempt_disable(); | ||
162 | rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_); | ||
163 | LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock); | ||
164 | } | ||
165 | |||
166 | static inline void __read_lock_bh(rwlock_t *lock) | ||
167 | { | ||
168 | local_bh_disable(); | ||
169 | preempt_disable(); | ||
170 | rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_); | ||
171 | LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock); | ||
172 | } | ||
173 | |||
174 | static inline unsigned long __write_lock_irqsave(rwlock_t *lock) | ||
175 | { | ||
176 | unsigned long flags; | ||
177 | |||
178 | local_irq_save(flags); | ||
179 | preempt_disable(); | ||
180 | rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); | ||
181 | LOCK_CONTENDED_FLAGS(lock, _raw_write_trylock, _raw_write_lock, | ||
182 | _raw_write_lock_flags, &flags); | ||
183 | return flags; | ||
184 | } | ||
185 | |||
186 | static inline void __write_lock_irq(rwlock_t *lock) | ||
187 | { | ||
188 | local_irq_disable(); | ||
189 | preempt_disable(); | ||
190 | rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); | ||
191 | LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock); | ||
192 | } | ||
193 | |||
194 | static inline void __write_lock_bh(rwlock_t *lock) | ||
195 | { | ||
196 | local_bh_disable(); | ||
197 | preempt_disable(); | ||
198 | rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); | ||
199 | LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock); | ||
200 | } | ||
201 | |||
202 | static inline void __spin_lock(spinlock_t *lock) | ||
203 | { | ||
204 | preempt_disable(); | ||
205 | spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); | ||
206 | LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); | ||
207 | } | ||
208 | |||
209 | static inline void __write_lock(rwlock_t *lock) | ||
210 | { | ||
211 | preempt_disable(); | ||
212 | rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); | ||
213 | LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock); | ||
214 | } | ||
215 | |||
216 | #endif /* CONFIG_PREEMPT */ | ||
217 | |||
218 | static inline void __spin_unlock(spinlock_t *lock) | ||
219 | { | ||
220 | spin_release(&lock->dep_map, 1, _RET_IP_); | ||
221 | _raw_spin_unlock(lock); | ||
222 | preempt_enable(); | ||
223 | } | ||
224 | |||
225 | static inline void __write_unlock(rwlock_t *lock) | ||
226 | { | ||
227 | rwlock_release(&lock->dep_map, 1, _RET_IP_); | ||
228 | _raw_write_unlock(lock); | ||
229 | preempt_enable(); | ||
230 | } | ||
231 | |||
232 | static inline void __read_unlock(rwlock_t *lock) | ||
233 | { | ||
234 | rwlock_release(&lock->dep_map, 1, _RET_IP_); | ||
235 | _raw_read_unlock(lock); | ||
236 | preempt_enable(); | ||
237 | } | ||
238 | |||
239 | static inline void __spin_unlock_irqrestore(spinlock_t *lock, | ||
240 | unsigned long flags) | ||
241 | { | ||
242 | spin_release(&lock->dep_map, 1, _RET_IP_); | ||
243 | _raw_spin_unlock(lock); | ||
244 | local_irq_restore(flags); | ||
245 | preempt_enable(); | ||
246 | } | ||
247 | |||
248 | static inline void __spin_unlock_irq(spinlock_t *lock) | ||
249 | { | ||
250 | spin_release(&lock->dep_map, 1, _RET_IP_); | ||
251 | _raw_spin_unlock(lock); | ||
252 | local_irq_enable(); | ||
253 | preempt_enable(); | ||
254 | } | ||
255 | |||
256 | static inline void __spin_unlock_bh(spinlock_t *lock) | ||
257 | { | ||
258 | spin_release(&lock->dep_map, 1, _RET_IP_); | ||
259 | _raw_spin_unlock(lock); | ||
260 | preempt_enable_no_resched(); | ||
261 | local_bh_enable_ip((unsigned long)__builtin_return_address(0)); | ||
262 | } | ||
263 | |||
264 | static inline void __read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) | ||
265 | { | ||
266 | rwlock_release(&lock->dep_map, 1, _RET_IP_); | ||
267 | _raw_read_unlock(lock); | ||
268 | local_irq_restore(flags); | ||
269 | preempt_enable(); | ||
270 | } | ||
271 | |||
272 | static inline void __read_unlock_irq(rwlock_t *lock) | ||
273 | { | ||
274 | rwlock_release(&lock->dep_map, 1, _RET_IP_); | ||
275 | _raw_read_unlock(lock); | ||
276 | local_irq_enable(); | ||
277 | preempt_enable(); | ||
278 | } | ||
279 | |||
280 | static inline void __read_unlock_bh(rwlock_t *lock) | ||
281 | { | ||
282 | rwlock_release(&lock->dep_map, 1, _RET_IP_); | ||
283 | _raw_read_unlock(lock); | ||
284 | preempt_enable_no_resched(); | ||
285 | local_bh_enable_ip((unsigned long)__builtin_return_address(0)); | ||
286 | } | ||
287 | |||
288 | static inline void __write_unlock_irqrestore(rwlock_t *lock, | ||
289 | unsigned long flags) | ||
290 | { | ||
291 | rwlock_release(&lock->dep_map, 1, _RET_IP_); | ||
292 | _raw_write_unlock(lock); | ||
293 | local_irq_restore(flags); | ||
294 | preempt_enable(); | ||
295 | } | ||
296 | |||
297 | static inline void __write_unlock_irq(rwlock_t *lock) | ||
298 | { | ||
299 | rwlock_release(&lock->dep_map, 1, _RET_IP_); | ||
300 | _raw_write_unlock(lock); | ||
301 | local_irq_enable(); | ||
302 | preempt_enable(); | ||
303 | } | ||
304 | |||
305 | static inline void __write_unlock_bh(rwlock_t *lock) | ||
306 | { | ||
307 | rwlock_release(&lock->dep_map, 1, _RET_IP_); | ||
308 | _raw_write_unlock(lock); | ||
309 | preempt_enable_no_resched(); | ||
310 | local_bh_enable_ip((unsigned long)__builtin_return_address(0)); | ||
311 | } | ||
312 | |||
313 | static inline int __spin_trylock_bh(spinlock_t *lock) | ||
314 | { | ||
315 | local_bh_disable(); | ||
316 | preempt_disable(); | ||
317 | if (_raw_spin_trylock(lock)) { | ||
318 | spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); | ||
319 | return 1; | ||
320 | } | ||
321 | preempt_enable_no_resched(); | ||
322 | local_bh_enable_ip((unsigned long)__builtin_return_address(0)); | ||
323 | return 0; | ||
324 | } | ||
325 | |||
63 | #endif /* __LINUX_SPINLOCK_API_SMP_H */ | 326 | #endif /* __LINUX_SPINLOCK_API_SMP_H */ |