aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/locking
diff options
context:
space:
mode:
authorWaiman Long <longman@redhat.com>2018-10-02 16:19:17 -0400
committerIngo Molnar <mingo@kernel.org>2018-10-03 02:46:02 -0400
commit8ee10862476ef8b9e81e5b521205fd5c620b4ffb (patch)
tree1a84b111fdc5b1ce298da14c4b951be6e377f534 /kernel/locking
parent44318d5b07be7d7cfe718aa22ea3b2577361a0b5 (diff)
locking/lockdep: Eliminate redundant IRQs check in __lock_acquire()
The static __lock_acquire() function has only two callers: 1) lock_acquire() 2) reacquire_held_locks() In lock_acquire(), raw_local_irq_save() is called beforehand. So IRQs must have been disabled. So the check: DEBUG_LOCKS_WARN_ON(!irqs_disabled()) is kind of redundant in this case. So move the above check to reacquire_held_locks() to eliminate redundant code in the lock_acquire() path. Signed-off-by: Waiman Long <longman@redhat.com> Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Will Deacon <will.deacon@arm.com> Link: http://lkml.kernel.org/r/1538511560-10090-3-git-send-email-longman@redhat.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/locking')
-rw-r--r--kernel/locking/lockdep.c15
1 files changed, 7 insertions, 8 deletions
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index fa82d55279fe..a5d7db558928 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -3193,6 +3193,10 @@ static int __lock_is_held(const struct lockdep_map *lock, int read);
3193/* 3193/*
3194 * This gets called for every mutex_lock*()/spin_lock*() operation. 3194 * This gets called for every mutex_lock*()/spin_lock*() operation.
3195 * We maintain the dependency maps and validate the locking attempt: 3195 * We maintain the dependency maps and validate the locking attempt:
3196 *
3197 * The callers must make sure that IRQs are disabled before calling it,
3198 * otherwise we could get an interrupt which would want to take locks,
3199 * which would end up in lockdep again.
3196 */ 3200 */
3197static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass, 3201static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
3198 int trylock, int read, int check, int hardirqs_off, 3202 int trylock, int read, int check, int hardirqs_off,
@@ -3210,14 +3214,6 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
3210 if (unlikely(!debug_locks)) 3214 if (unlikely(!debug_locks))
3211 return 0; 3215 return 0;
3212 3216
3213 /*
3214 * Lockdep should run with IRQs disabled, otherwise we could
3215 * get an interrupt which would want to take locks, which would
3216 * end up in lockdep and have you got a head-ache already?
3217 */
3218 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
3219 return 0;
3220
3221 if (!prove_locking || lock->key == &__lockdep_no_validate__) 3217 if (!prove_locking || lock->key == &__lockdep_no_validate__)
3222 check = 0; 3218 check = 0;
3223 3219
@@ -3474,6 +3470,9 @@ static int reacquire_held_locks(struct task_struct *curr, unsigned int depth,
3474{ 3470{
3475 struct held_lock *hlock; 3471 struct held_lock *hlock;
3476 3472
3473 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
3474 return 0;
3475
3477 for (hlock = curr->held_locks + idx; idx < depth; idx++, hlock++) { 3476 for (hlock = curr->held_locks + idx; idx < depth; idx++, hlock++) {
3478 if (!__lock_acquire(hlock->instance, 3477 if (!__lock_acquire(hlock->instance,
3479 hlock_class(hlock)->subclass, 3478 hlock_class(hlock)->subclass,