aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/locking
diff options
context:
space:
mode:
authorJ. R. Okajima <hooanon05g@gmail.com>2017-02-02 11:38:15 -0500
committerIngo Molnar <mingo@kernel.org>2017-03-16 04:57:06 -0400
commit41c2c5b86a5e1a691ddacfc03b631b87a0b19043 (patch)
treebdc9116635fd94cbb9f3299ec493dbacac2a0871 /kernel/locking
parentf2c716e4dd2489200589f90be1c4b82787bcc1e9 (diff)
locking/lockdep: Factor out the find_held_lock() helper function
A simple consolidataion to factor out repeated patterns. The behaviour should not change. Signed-off-by: J. R. Okajima <hooanon05g@gmail.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Link: http://lkml.kernel.org/r/1486053497-9948-1-git-send-email-hooanon05g@gmail.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/locking')
-rw-r--r--kernel/locking/lockdep.c114
1 files changed, 54 insertions, 60 deletions
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index a95e5d1f4a9c..0d28b8259b9a 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -3437,13 +3437,49 @@ static int match_held_lock(struct held_lock *hlock, struct lockdep_map *lock)
3437 return 0; 3437 return 0;
3438} 3438}
3439 3439
3440/* @depth must not be zero */
3441static struct held_lock *find_held_lock(struct task_struct *curr,
3442 struct lockdep_map *lock,
3443 unsigned int depth, int *idx)
3444{
3445 struct held_lock *ret, *hlock, *prev_hlock;
3446 int i;
3447
3448 i = depth - 1;
3449 hlock = curr->held_locks + i;
3450 ret = hlock;
3451 if (match_held_lock(hlock, lock))
3452 goto out;
3453
3454 ret = NULL;
3455 for (i--, prev_hlock = hlock--;
3456 i >= 0;
3457 i--, prev_hlock = hlock--) {
3458 /*
3459 * We must not cross into another context:
3460 */
3461 if (prev_hlock->irq_context != hlock->irq_context) {
3462 ret = NULL;
3463 break;
3464 }
3465 if (match_held_lock(hlock, lock)) {
3466 ret = hlock;
3467 break;
3468 }
3469 }
3470
3471out:
3472 *idx = i;
3473 return ret;
3474}
3475
3440static int 3476static int
3441__lock_set_class(struct lockdep_map *lock, const char *name, 3477__lock_set_class(struct lockdep_map *lock, const char *name,
3442 struct lock_class_key *key, unsigned int subclass, 3478 struct lock_class_key *key, unsigned int subclass,
3443 unsigned long ip) 3479 unsigned long ip)
3444{ 3480{
3445 struct task_struct *curr = current; 3481 struct task_struct *curr = current;
3446 struct held_lock *hlock, *prev_hlock; 3482 struct held_lock *hlock;
3447 struct lock_class *class; 3483 struct lock_class *class;
3448 unsigned int depth; 3484 unsigned int depth;
3449 int i; 3485 int i;
@@ -3456,21 +3492,10 @@ __lock_set_class(struct lockdep_map *lock, const char *name,
3456 if (DEBUG_LOCKS_WARN_ON(!depth)) 3492 if (DEBUG_LOCKS_WARN_ON(!depth))
3457 return 0; 3493 return 0;
3458 3494
3459 prev_hlock = NULL; 3495 hlock = find_held_lock(curr, lock, depth, &i);
3460 for (i = depth-1; i >= 0; i--) { 3496 if (!hlock)
3461 hlock = curr->held_locks + i; 3497 return print_unlock_imbalance_bug(curr, lock, ip);
3462 /*
3463 * We must not cross into another context:
3464 */
3465 if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
3466 break;
3467 if (match_held_lock(hlock, lock))
3468 goto found_it;
3469 prev_hlock = hlock;
3470 }
3471 return print_unlock_imbalance_bug(curr, lock, ip);
3472 3498
3473found_it:
3474 lockdep_init_map(lock, name, key, 0); 3499 lockdep_init_map(lock, name, key, 0);
3475 class = register_lock_class(lock, subclass, 0); 3500 class = register_lock_class(lock, subclass, 0);
3476 hlock->class_idx = class - lock_classes + 1; 3501 hlock->class_idx = class - lock_classes + 1;
@@ -3508,7 +3533,7 @@ static int
3508__lock_release(struct lockdep_map *lock, int nested, unsigned long ip) 3533__lock_release(struct lockdep_map *lock, int nested, unsigned long ip)
3509{ 3534{
3510 struct task_struct *curr = current; 3535 struct task_struct *curr = current;
3511 struct held_lock *hlock, *prev_hlock; 3536 struct held_lock *hlock;
3512 unsigned int depth; 3537 unsigned int depth;
3513 int i; 3538 int i;
3514 3539
@@ -3527,21 +3552,10 @@ __lock_release(struct lockdep_map *lock, int nested, unsigned long ip)
3527 * Check whether the lock exists in the current stack 3552 * Check whether the lock exists in the current stack
3528 * of held locks: 3553 * of held locks:
3529 */ 3554 */
3530 prev_hlock = NULL; 3555 hlock = find_held_lock(curr, lock, depth, &i);
3531 for (i = depth-1; i >= 0; i--) { 3556 if (!hlock)
3532 hlock = curr->held_locks + i; 3557 return print_unlock_imbalance_bug(curr, lock, ip);
3533 /*
3534 * We must not cross into another context:
3535 */
3536 if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
3537 break;
3538 if (match_held_lock(hlock, lock))
3539 goto found_it;
3540 prev_hlock = hlock;
3541 }
3542 return print_unlock_imbalance_bug(curr, lock, ip);
3543 3558
3544found_it:
3545 if (hlock->instance == lock) 3559 if (hlock->instance == lock)
3546 lock_release_holdtime(hlock); 3560 lock_release_holdtime(hlock);
3547 3561
@@ -3903,7 +3917,7 @@ static void
3903__lock_contended(struct lockdep_map *lock, unsigned long ip) 3917__lock_contended(struct lockdep_map *lock, unsigned long ip)
3904{ 3918{
3905 struct task_struct *curr = current; 3919 struct task_struct *curr = current;
3906 struct held_lock *hlock, *prev_hlock; 3920 struct held_lock *hlock;
3907 struct lock_class_stats *stats; 3921 struct lock_class_stats *stats;
3908 unsigned int depth; 3922 unsigned int depth;
3909 int i, contention_point, contending_point; 3923 int i, contention_point, contending_point;
@@ -3916,22 +3930,12 @@ __lock_contended(struct lockdep_map *lock, unsigned long ip)
3916 if (DEBUG_LOCKS_WARN_ON(!depth)) 3930 if (DEBUG_LOCKS_WARN_ON(!depth))
3917 return; 3931 return;
3918 3932
3919 prev_hlock = NULL; 3933 hlock = find_held_lock(curr, lock, depth, &i);
3920 for (i = depth-1; i >= 0; i--) { 3934 if (!hlock) {
3921 hlock = curr->held_locks + i; 3935 print_lock_contention_bug(curr, lock, ip);
3922 /* 3936 return;
3923 * We must not cross into another context:
3924 */
3925 if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
3926 break;
3927 if (match_held_lock(hlock, lock))
3928 goto found_it;
3929 prev_hlock = hlock;
3930 } 3937 }
3931 print_lock_contention_bug(curr, lock, ip);
3932 return;
3933 3938
3934found_it:
3935 if (hlock->instance != lock) 3939 if (hlock->instance != lock)
3936 return; 3940 return;
3937 3941
@@ -3955,7 +3959,7 @@ static void
3955__lock_acquired(struct lockdep_map *lock, unsigned long ip) 3959__lock_acquired(struct lockdep_map *lock, unsigned long ip)
3956{ 3960{
3957 struct task_struct *curr = current; 3961 struct task_struct *curr = current;
3958 struct held_lock *hlock, *prev_hlock; 3962 struct held_lock *hlock;
3959 struct lock_class_stats *stats; 3963 struct lock_class_stats *stats;
3960 unsigned int depth; 3964 unsigned int depth;
3961 u64 now, waittime = 0; 3965 u64 now, waittime = 0;
@@ -3969,22 +3973,12 @@ __lock_acquired(struct lockdep_map *lock, unsigned long ip)
3969 if (DEBUG_LOCKS_WARN_ON(!depth)) 3973 if (DEBUG_LOCKS_WARN_ON(!depth))
3970 return; 3974 return;
3971 3975
3972 prev_hlock = NULL; 3976 hlock = find_held_lock(curr, lock, depth, &i);
3973 for (i = depth-1; i >= 0; i--) { 3977 if (!hlock) {
3974 hlock = curr->held_locks + i; 3978 print_lock_contention_bug(curr, lock, _RET_IP_);
3975 /* 3979 return;
3976 * We must not cross into another context:
3977 */
3978 if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
3979 break;
3980 if (match_held_lock(hlock, lock))
3981 goto found_it;
3982 prev_hlock = hlock;
3983 } 3980 }
3984 print_lock_contention_bug(curr, lock, _RET_IP_);
3985 return;
3986 3981
3987found_it:
3988 if (hlock->instance != lock) 3982 if (hlock->instance != lock)
3989 return; 3983 return;
3990 3984