aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/locking/lockdep.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-06-24 18:09:40 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2015-06-24 18:09:40 -0400
commit98ec21a01896751b673b6c731ca8881daa8b2c6d (patch)
tree9d6d780675436efc894878475284c70f766126dd /kernel/locking/lockdep.c
parenta262948335bc5359b82f0ed5ef35f6e82ca44d16 (diff)
parentcbce1a686700595de65ee363b9b3283ae85d8fc5 (diff)
Merge branch 'sched-hrtimers-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler updates from Thomas Gleixner: "This series of scheduler updates depends on sched/core and timers/core branches, which are already in your tree: - Scheduler balancing overhaul to plug a hard to trigger race which causes an oops in the balancer (Peter Zijlstra) - Lockdep updates which are related to the balancing updates (Peter Zijlstra)" * 'sched-hrtimers-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: sched,lockdep: Employ lock pinning lockdep: Implement lock pinning lockdep: Simplify lock_release() sched: Streamline the task migration locking a little sched: Move code around sched,dl: Fix sched class hopping CBS hole sched, dl: Convert switched_{from, to}_dl() / prio_changed_dl() to balance callbacks sched,dl: Remove return value from pull_dl_task() sched, rt: Convert switched_{from, to}_rt() / prio_changed_rt() to balance callbacks sched,rt: Remove return value from pull_rt_task() sched: Allow balance callbacks for check_class_changed() sched: Use replace normalize_task() with __sched_setscheduler() sched: Replace post_schedule with a balance callback list
Diffstat (limited to 'kernel/locking/lockdep.c')
-rw-r--r--kernel/locking/lockdep.c177
1 files changed, 87 insertions, 90 deletions
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 456614136f1a..8acfbf773e06 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -3157,6 +3157,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
3157 hlock->waittime_stamp = 0; 3157 hlock->waittime_stamp = 0;
3158 hlock->holdtime_stamp = lockstat_clock(); 3158 hlock->holdtime_stamp = lockstat_clock();
3159#endif 3159#endif
3160 hlock->pin_count = 0;
3160 3161
3161 if (check && !mark_irqflags(curr, hlock)) 3162 if (check && !mark_irqflags(curr, hlock))
3162 return 0; 3163 return 0;
@@ -3260,26 +3261,6 @@ print_unlock_imbalance_bug(struct task_struct *curr, struct lockdep_map *lock,
3260 return 0; 3261 return 0;
3261} 3262}
3262 3263
3263/*
3264 * Common debugging checks for both nested and non-nested unlock:
3265 */
3266static int check_unlock(struct task_struct *curr, struct lockdep_map *lock,
3267 unsigned long ip)
3268{
3269 if (unlikely(!debug_locks))
3270 return 0;
3271 /*
3272 * Lockdep should run with IRQs disabled, recursion, head-ache, etc..
3273 */
3274 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
3275 return 0;
3276
3277 if (curr->lockdep_depth <= 0)
3278 return print_unlock_imbalance_bug(curr, lock, ip);
3279
3280 return 1;
3281}
3282
3283static int match_held_lock(struct held_lock *hlock, struct lockdep_map *lock) 3264static int match_held_lock(struct held_lock *hlock, struct lockdep_map *lock)
3284{ 3265{
3285 if (hlock->instance == lock) 3266 if (hlock->instance == lock)
@@ -3376,31 +3357,35 @@ found_it:
3376} 3357}
3377 3358
3378/* 3359/*
3379 * Remove the lock to the list of currently held locks in a 3360 * Remove the lock to the list of currently held locks - this gets
3380 * potentially non-nested (out of order) manner. This is a 3361 * called on mutex_unlock()/spin_unlock*() (or on a failed
3381 * relatively rare operation, as all the unlock APIs default 3362 * mutex_lock_interruptible()).
3382 * to nested mode (which uses lock_release()): 3363 *
3364 * @nested is an hysterical artifact, needs a tree wide cleanup.
3383 */ 3365 */
3384static int 3366static int
3385lock_release_non_nested(struct task_struct *curr, 3367__lock_release(struct lockdep_map *lock, int nested, unsigned long ip)
3386 struct lockdep_map *lock, unsigned long ip)
3387{ 3368{
3369 struct task_struct *curr = current;
3388 struct held_lock *hlock, *prev_hlock; 3370 struct held_lock *hlock, *prev_hlock;
3389 unsigned int depth; 3371 unsigned int depth;
3390 int i; 3372 int i;
3391 3373
3392 /* 3374 if (unlikely(!debug_locks))
3393 * Check whether the lock exists in the current stack 3375 return 0;
3394 * of held locks: 3376
3395 */
3396 depth = curr->lockdep_depth; 3377 depth = curr->lockdep_depth;
3397 /* 3378 /*
3398 * So we're all set to release this lock.. wait what lock? We don't 3379 * So we're all set to release this lock.. wait what lock? We don't
3399 * own any locks, you've been drinking again? 3380 * own any locks, you've been drinking again?
3400 */ 3381 */
3401 if (DEBUG_LOCKS_WARN_ON(!depth)) 3382 if (DEBUG_LOCKS_WARN_ON(depth <= 0))
3402 return 0; 3383 return print_unlock_imbalance_bug(curr, lock, ip);
3403 3384
3385 /*
3386 * Check whether the lock exists in the current stack
3387 * of held locks:
3388 */
3404 prev_hlock = NULL; 3389 prev_hlock = NULL;
3405 for (i = depth-1; i >= 0; i--) { 3390 for (i = depth-1; i >= 0; i--) {
3406 hlock = curr->held_locks + i; 3391 hlock = curr->held_locks + i;
@@ -3419,6 +3404,8 @@ found_it:
3419 if (hlock->instance == lock) 3404 if (hlock->instance == lock)
3420 lock_release_holdtime(hlock); 3405 lock_release_holdtime(hlock);
3421 3406
3407 WARN(hlock->pin_count, "releasing a pinned lock\n");
3408
3422 if (hlock->references) { 3409 if (hlock->references) {
3423 hlock->references--; 3410 hlock->references--;
3424 if (hlock->references) { 3411 if (hlock->references) {
@@ -3456,91 +3443,66 @@ found_it:
3456 */ 3443 */
3457 if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth - 1)) 3444 if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth - 1))
3458 return 0; 3445 return 0;
3446
3459 return 1; 3447 return 1;
3460} 3448}
3461 3449
3462/* 3450static int __lock_is_held(struct lockdep_map *lock)
3463 * Remove the lock to the list of currently held locks - this gets
3464 * called on mutex_unlock()/spin_unlock*() (or on a failed
3465 * mutex_lock_interruptible()). This is done for unlocks that nest
3466 * perfectly. (i.e. the current top of the lock-stack is unlocked)
3467 */
3468static int lock_release_nested(struct task_struct *curr,
3469 struct lockdep_map *lock, unsigned long ip)
3470{ 3451{
3471 struct held_lock *hlock; 3452 struct task_struct *curr = current;
3472 unsigned int depth; 3453 int i;
3473
3474 /*
3475 * Pop off the top of the lock stack:
3476 */
3477 depth = curr->lockdep_depth - 1;
3478 hlock = curr->held_locks + depth;
3479
3480 /*
3481 * Is the unlock non-nested:
3482 */
3483 if (hlock->instance != lock || hlock->references)
3484 return lock_release_non_nested(curr, lock, ip);
3485 curr->lockdep_depth--;
3486
3487 /*
3488 * No more locks, but somehow we've got hash left over, who left it?
3489 */
3490 if (DEBUG_LOCKS_WARN_ON(!depth && (hlock->prev_chain_key != 0)))
3491 return 0;
3492 3454
3493 curr->curr_chain_key = hlock->prev_chain_key; 3455 for (i = 0; i < curr->lockdep_depth; i++) {
3456 struct held_lock *hlock = curr->held_locks + i;
3494 3457
3495 lock_release_holdtime(hlock); 3458 if (match_held_lock(hlock, lock))
3459 return 1;
3460 }
3496 3461
3497#ifdef CONFIG_DEBUG_LOCKDEP 3462 return 0;
3498 hlock->prev_chain_key = 0;
3499 hlock->class_idx = 0;
3500 hlock->acquire_ip = 0;
3501 hlock->irq_context = 0;
3502#endif
3503 return 1;
3504} 3463}
3505 3464
3506/* 3465static void __lock_pin_lock(struct lockdep_map *lock)
3507 * Remove the lock to the list of currently held locks - this gets
3508 * called on mutex_unlock()/spin_unlock*() (or on a failed
3509 * mutex_lock_interruptible()). This is done for unlocks that nest
3510 * perfectly. (i.e. the current top of the lock-stack is unlocked)
3511 */
3512static void
3513__lock_release(struct lockdep_map *lock, int nested, unsigned long ip)
3514{ 3466{
3515 struct task_struct *curr = current; 3467 struct task_struct *curr = current;
3468 int i;
3516 3469
3517 if (!check_unlock(curr, lock, ip)) 3470 if (unlikely(!debug_locks))
3518 return; 3471 return;
3519 3472
3520 if (nested) { 3473 for (i = 0; i < curr->lockdep_depth; i++) {
3521 if (!lock_release_nested(curr, lock, ip)) 3474 struct held_lock *hlock = curr->held_locks + i;
3522 return; 3475
3523 } else { 3476 if (match_held_lock(hlock, lock)) {
3524 if (!lock_release_non_nested(curr, lock, ip)) 3477 hlock->pin_count++;
3525 return; 3478 return;
3479 }
3526 } 3480 }
3527 3481
3528 check_chain_key(curr); 3482 WARN(1, "pinning an unheld lock\n");
3529} 3483}
3530 3484
3531static int __lock_is_held(struct lockdep_map *lock) 3485static void __lock_unpin_lock(struct lockdep_map *lock)
3532{ 3486{
3533 struct task_struct *curr = current; 3487 struct task_struct *curr = current;
3534 int i; 3488 int i;
3535 3489
3490 if (unlikely(!debug_locks))
3491 return;
3492
3536 for (i = 0; i < curr->lockdep_depth; i++) { 3493 for (i = 0; i < curr->lockdep_depth; i++) {
3537 struct held_lock *hlock = curr->held_locks + i; 3494 struct held_lock *hlock = curr->held_locks + i;
3538 3495
3539 if (match_held_lock(hlock, lock)) 3496 if (match_held_lock(hlock, lock)) {
3540 return 1; 3497 if (WARN(!hlock->pin_count, "unpinning an unpinned lock\n"))
3498 return;
3499
3500 hlock->pin_count--;
3501 return;
3502 }
3541 } 3503 }
3542 3504
3543 return 0; 3505 WARN(1, "unpinning an unheld lock\n");
3544} 3506}
3545 3507
3546/* 3508/*
@@ -3639,7 +3601,8 @@ void lock_release(struct lockdep_map *lock, int nested,
3639 check_flags(flags); 3601 check_flags(flags);
3640 current->lockdep_recursion = 1; 3602 current->lockdep_recursion = 1;
3641 trace_lock_release(lock, ip); 3603 trace_lock_release(lock, ip);
3642 __lock_release(lock, nested, ip); 3604 if (__lock_release(lock, nested, ip))
3605 check_chain_key(current);
3643 current->lockdep_recursion = 0; 3606 current->lockdep_recursion = 0;
3644 raw_local_irq_restore(flags); 3607 raw_local_irq_restore(flags);
3645} 3608}
@@ -3665,6 +3628,40 @@ int lock_is_held(struct lockdep_map *lock)
3665} 3628}
3666EXPORT_SYMBOL_GPL(lock_is_held); 3629EXPORT_SYMBOL_GPL(lock_is_held);
3667 3630
3631void lock_pin_lock(struct lockdep_map *lock)
3632{
3633 unsigned long flags;
3634
3635 if (unlikely(current->lockdep_recursion))
3636 return;
3637
3638 raw_local_irq_save(flags);
3639 check_flags(flags);
3640
3641 current->lockdep_recursion = 1;
3642 __lock_pin_lock(lock);
3643 current->lockdep_recursion = 0;
3644 raw_local_irq_restore(flags);
3645}
3646EXPORT_SYMBOL_GPL(lock_pin_lock);
3647
3648void lock_unpin_lock(struct lockdep_map *lock)
3649{
3650 unsigned long flags;
3651
3652 if (unlikely(current->lockdep_recursion))
3653 return;
3654
3655 raw_local_irq_save(flags);
3656 check_flags(flags);
3657
3658 current->lockdep_recursion = 1;
3659 __lock_unpin_lock(lock);
3660 current->lockdep_recursion = 0;
3661 raw_local_irq_restore(flags);
3662}
3663EXPORT_SYMBOL_GPL(lock_unpin_lock);
3664
3668void lockdep_set_current_reclaim_state(gfp_t gfp_mask) 3665void lockdep_set_current_reclaim_state(gfp_t gfp_mask)
3669{ 3666{
3670 current->lockdep_reclaim_gfp = gfp_mask; 3667 current->lockdep_reclaim_gfp = gfp_mask;