diff options
author | Robin Holt <holt@sgi.com> | 2009-04-02 19:59:45 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-04-02 22:05:10 -0400 |
commit | e8c158bb313c1df421eab7dc4299cd39cbbf5895 (patch) | |
tree | 8a5f1d01e58d0e358b2b0c9407fc494912e83c27 | |
parent | 41d577aa35aa0504fe28b76a948908bdb7fbec81 (diff) |
Factor out #ifdefs from kernel/spinlock.c to LOCK_CONTENDED_FLAGS
SGI has observed that on large systems, interrupts are not serviced for a
long period of time when waiting for a rwlock. The following patch series
re-enables irqs while waiting for the lock, resembling the code which is
already there for spinlocks.
I only made the ia64 version, because the patch adds some overhead to the
fast path. I assume there is currently no demand to have this for other
architectures, because the systems are not so large. Of course, the
possibility to implement raw_{read|write}_lock_flags for any architecture
is still there.
This patch:
The new macro LOCK_CONTENDED_FLAGS expands to the correct implementation
depending on the config options, so that IRQ's are re-enabled when
possible, but they remain disabled if CONFIG_LOCKDEP is set.
Signed-off-by: Petr Tesarik <ptesarik@suse.cz>
Signed-off-by: Robin Holt <holt@sgi.com>
Cc: <linux-arch@vger.kernel.org>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: "Luck, Tony" <tony.luck@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | include/linux/lockdep.h | 17 | ||||
-rw-r--r-- | kernel/spinlock.c | 12 |
2 files changed, 19 insertions, 10 deletions
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index 5a58ea3e91e9..da5a5a1f4cd2 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h | |||
@@ -364,6 +364,23 @@ do { \ | |||
364 | 364 | ||
365 | #endif /* CONFIG_LOCK_STAT */ | 365 | #endif /* CONFIG_LOCK_STAT */ |
366 | 366 | ||
367 | #ifdef CONFIG_LOCKDEP | ||
368 | |||
369 | /* | ||
370 | * On lockdep we dont want the hand-coded irq-enable of | ||
371 | * _raw_*_lock_flags() code, because lockdep assumes | ||
372 | * that interrupts are not re-enabled during lock-acquire: | ||
373 | */ | ||
374 | #define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \ | ||
375 | LOCK_CONTENDED((_lock), (try), (lock)) | ||
376 | |||
377 | #else /* CONFIG_LOCKDEP */ | ||
378 | |||
379 | #define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \ | ||
380 | lockfl((_lock), (flags)) | ||
381 | |||
382 | #endif /* CONFIG_LOCKDEP */ | ||
383 | |||
367 | #ifdef CONFIG_GENERIC_HARDIRQS | 384 | #ifdef CONFIG_GENERIC_HARDIRQS |
368 | extern void early_init_irq_lock_class(void); | 385 | extern void early_init_irq_lock_class(void); |
369 | #else | 386 | #else |
diff --git a/kernel/spinlock.c b/kernel/spinlock.c index 29ab20749dd3..7283c6dc2d59 100644 --- a/kernel/spinlock.c +++ b/kernel/spinlock.c | |||
@@ -299,16 +299,8 @@ unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock, int subclas | |||
299 | local_irq_save(flags); | 299 | local_irq_save(flags); |
300 | preempt_disable(); | 300 | preempt_disable(); |
301 | spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_); | 301 | spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_); |
302 | /* | 302 | LOCK_CONTENDED_FLAGS(lock, _raw_spin_trylock, _raw_spin_lock, |
303 | * On lockdep we dont want the hand-coded irq-enable of | 303 | _raw_spin_lock_flags, &flags); |
304 | * _raw_spin_lock_flags() code, because lockdep assumes | ||
305 | * that interrupts are not re-enabled during lock-acquire: | ||
306 | */ | ||
307 | #ifdef CONFIG_LOCKDEP | ||
308 | LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); | ||
309 | #else | ||
310 | _raw_spin_lock_flags(lock, &flags); | ||
311 | #endif | ||
312 | return flags; | 304 | return flags; |
313 | } | 305 | } |
314 | EXPORT_SYMBOL(_spin_lock_irqsave_nested); | 306 | EXPORT_SYMBOL(_spin_lock_irqsave_nested); |