diff options
author | Heiko Carstens <heiko.carstens@de.ibm.com> | 2009-08-31 08:43:38 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-08-31 12:08:51 -0400 |
commit | bb7bed082500179519c7caf0678ba3bed9752658 (patch) | |
tree | 59daa305235ec13b70f3f10a8f407df0e8516c62 /include/linux | |
parent | 892a7c67c12da63fa4b51728bbe5b982356a090a (diff) |
locking: Simplify spinlock inlining
For !DEBUG_SPINLOCK && !PREEMPT && SMP the spin_unlock()
functions were always inlined by using special defines which
would call the __raw* functions.
The out-of-line variants for these functions would be generated
anyway.
Use the new per unlock/locking variant mechanism to force
inlining of the unlock functions like before. This is not a
functional change, we just get rid of one additional way to
force inlining.
Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Horst Hartmann <horsth@linux.vnet.ibm.com>
Cc: Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: David Miller <davem@davemloft.net>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Geert Uytterhoeven <geert@linux-m68k.org>
Cc: Roman Zippel <zippel@linux-m68k.org>
Cc: <linux-arch@vger.kernel.org>
LKML-Reference: <20090831124418.848735034@de.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'include/linux')
-rw-r--r-- | include/linux/spinlock.h | 46 | ||||
-rw-r--r-- | include/linux/spinlock_api_smp.h | 12 |
2 files changed, 18 insertions, 40 deletions
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index da76a06556bd..f0ca7a7a1757 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h | |||
@@ -259,50 +259,16 @@ static inline void smp_mb__after_lock(void) { smp_mb(); } | |||
259 | 259 | ||
260 | #define spin_lock_irq(lock) _spin_lock_irq(lock) | 260 | #define spin_lock_irq(lock) _spin_lock_irq(lock) |
261 | #define spin_lock_bh(lock) _spin_lock_bh(lock) | 261 | #define spin_lock_bh(lock) _spin_lock_bh(lock) |
262 | |||
263 | #define read_lock_irq(lock) _read_lock_irq(lock) | 262 | #define read_lock_irq(lock) _read_lock_irq(lock) |
264 | #define read_lock_bh(lock) _read_lock_bh(lock) | 263 | #define read_lock_bh(lock) _read_lock_bh(lock) |
265 | |||
266 | #define write_lock_irq(lock) _write_lock_irq(lock) | 264 | #define write_lock_irq(lock) _write_lock_irq(lock) |
267 | #define write_lock_bh(lock) _write_lock_bh(lock) | 265 | #define write_lock_bh(lock) _write_lock_bh(lock) |
268 | 266 | #define spin_unlock(lock) _spin_unlock(lock) | |
269 | /* | 267 | #define read_unlock(lock) _read_unlock(lock) |
270 | * We inline the unlock functions in the nondebug case: | 268 | #define write_unlock(lock) _write_unlock(lock) |
271 | */ | 269 | #define spin_unlock_irq(lock) _spin_unlock_irq(lock) |
272 | #if defined(CONFIG_DEBUG_SPINLOCK) || defined(CONFIG_PREEMPT) || \ | 270 | #define read_unlock_irq(lock) _read_unlock_irq(lock) |
273 | !defined(CONFIG_SMP) | 271 | #define write_unlock_irq(lock) _write_unlock_irq(lock) |
274 | # define spin_unlock(lock) _spin_unlock(lock) | ||
275 | # define read_unlock(lock) _read_unlock(lock) | ||
276 | # define write_unlock(lock) _write_unlock(lock) | ||
277 | # define spin_unlock_irq(lock) _spin_unlock_irq(lock) | ||
278 | # define read_unlock_irq(lock) _read_unlock_irq(lock) | ||
279 | # define write_unlock_irq(lock) _write_unlock_irq(lock) | ||
280 | #else | ||
281 | # define spin_unlock(lock) \ | ||
282 | do {__raw_spin_unlock(&(lock)->raw_lock); __release(lock); } while (0) | ||
283 | # define read_unlock(lock) \ | ||
284 | do {__raw_read_unlock(&(lock)->raw_lock); __release(lock); } while (0) | ||
285 | # define write_unlock(lock) \ | ||
286 | do {__raw_write_unlock(&(lock)->raw_lock); __release(lock); } while (0) | ||
287 | # define spin_unlock_irq(lock) \ | ||
288 | do { \ | ||
289 | __raw_spin_unlock(&(lock)->raw_lock); \ | ||
290 | __release(lock); \ | ||
291 | local_irq_enable(); \ | ||
292 | } while (0) | ||
293 | # define read_unlock_irq(lock) \ | ||
294 | do { \ | ||
295 | __raw_read_unlock(&(lock)->raw_lock); \ | ||
296 | __release(lock); \ | ||
297 | local_irq_enable(); \ | ||
298 | } while (0) | ||
299 | # define write_unlock_irq(lock) \ | ||
300 | do { \ | ||
301 | __raw_write_unlock(&(lock)->raw_lock); \ | ||
302 | __release(lock); \ | ||
303 | local_irq_enable(); \ | ||
304 | } while (0) | ||
305 | #endif | ||
306 | 272 | ||
307 | #define spin_unlock_irqrestore(lock, flags) \ | 273 | #define spin_unlock_irqrestore(lock, flags) \ |
308 | do { \ | 274 | do { \ |
diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h index 1a411e3fab95..7a7e18fc2415 100644 --- a/include/linux/spinlock_api_smp.h +++ b/include/linux/spinlock_api_smp.h | |||
@@ -60,6 +60,18 @@ void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) | |||
60 | void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags) | 60 | void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags) |
61 | __releases(lock); | 61 | __releases(lock); |
62 | 62 | ||
63 | /* | ||
64 | * We inline the unlock functions in the nondebug case: | ||
65 | */ | ||
66 | #if !defined(CONFIG_DEBUG_SPINLOCK) && !defined(CONFIG_PREEMPT) | ||
67 | #define __always_inline__spin_unlock | ||
68 | #define __always_inline__read_unlock | ||
69 | #define __always_inline__write_unlock | ||
70 | #define __always_inline__spin_unlock_irq | ||
71 | #define __always_inline__read_unlock_irq | ||
72 | #define __always_inline__write_unlock_irq | ||
73 | #endif | ||
74 | |||
63 | #ifndef CONFIG_DEBUG_SPINLOCK | 75 | #ifndef CONFIG_DEBUG_SPINLOCK |
64 | #ifndef CONFIG_GENERIC_LOCKBREAK | 76 | #ifndef CONFIG_GENERIC_LOCKBREAK |
65 | 77 | ||