aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/locking/lockdep.c
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2015-06-11 08:46:53 -0400
committerThomas Gleixner <tglx@linutronix.de>2015-06-18 18:25:27 -0400
commita24fc60d63da2b0b31bf7c876d12a51ed4b778bd (patch)
tree8f18a0b885a75b45b22b6f0b6b94abc3b8fab01d /kernel/locking/lockdep.c
parente0f56fd7066f35ae3765d080e036fa676a9d4128 (diff)
lockdep: Implement lock pinning
Add a lockdep annotation that WARNs if you 'accidentially' unlock a lock. This is especially helpful for code with callbacks, where the upper layer assumes a lock remains taken but a lower layer thinks it maybe can drop and reacquire the lock. By unwittingly breaking up the lock, races can be introduced. Lock pinning is a lockdep annotation that helps with this, when you lockdep_pin_lock() a held lock, any unlock without a lockdep_unpin_lock() will produce a WARN. Think of this as a relative of lockdep_assert_held(), except you don't only assert its held now, but ensure it stays held until you release your assertion. RFC: a possible alternative API would be something like: int cookie = lockdep_pin_lock(&foo); ... lockdep_unpin_lock(&foo, cookie); Where we pick a random number for the pin_count; this makes it impossible to sneak a lock break in without also passing the right cookie along. I've not done this because it ends up generating code for !LOCKDEP, esp. if you need to pass the cookie around for some reason. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: ktkhai@parallels.com Cc: rostedt@goodmis.org Cc: juri.lelli@gmail.com Cc: pang.xunlei@linaro.org Cc: oleg@redhat.com Cc: wanpeng.li@linux.intel.com Cc: umgwanakikbuti@gmail.com Link: http://lkml.kernel.org/r/20150611124743.906731065@infradead.org Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel/locking/lockdep.c')
-rw-r--r--kernel/locking/lockdep.c80
1 files changed, 80 insertions, 0 deletions
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index a266d5165b63..18f9f434d17e 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -3157,6 +3157,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
3157 hlock->waittime_stamp = 0; 3157 hlock->waittime_stamp = 0;
3158 hlock->holdtime_stamp = lockstat_clock(); 3158 hlock->holdtime_stamp = lockstat_clock();
3159#endif 3159#endif
3160 hlock->pin_count = 0;
3160 3161
3161 if (check && !mark_irqflags(curr, hlock)) 3162 if (check && !mark_irqflags(curr, hlock))
3162 return 0; 3163 return 0;
@@ -3403,6 +3404,8 @@ found_it:
3403 if (hlock->instance == lock) 3404 if (hlock->instance == lock)
3404 lock_release_holdtime(hlock); 3405 lock_release_holdtime(hlock);
3405 3406
3407 WARN(hlock->pin_count, "releasing a pinned lock\n");
3408
3406 if (hlock->references) { 3409 if (hlock->references) {
3407 hlock->references--; 3410 hlock->references--;
3408 if (hlock->references) { 3411 if (hlock->references) {
@@ -3459,6 +3462,49 @@ static int __lock_is_held(struct lockdep_map *lock)
3459 return 0; 3462 return 0;
3460} 3463}
3461 3464
3465static void __lock_pin_lock(struct lockdep_map *lock)
3466{
3467 struct task_struct *curr = current;
3468 int i;
3469
3470 if (unlikely(!debug_locks))
3471 return;
3472
3473 for (i = 0; i < curr->lockdep_depth; i++) {
3474 struct held_lock *hlock = curr->held_locks + i;
3475
3476 if (match_held_lock(hlock, lock)) {
3477 hlock->pin_count++;
3478 return;
3479 }
3480 }
3481
3482 WARN(1, "pinning an unheld lock\n");
3483}
3484
3485static void __lock_unpin_lock(struct lockdep_map *lock)
3486{
3487 struct task_struct *curr = current;
3488 int i;
3489
3490 if (unlikely(!debug_locks))
3491 return;
3492
3493 for (i = 0; i < curr->lockdep_depth; i++) {
3494 struct held_lock *hlock = curr->held_locks + i;
3495
3496 if (match_held_lock(hlock, lock)) {
3497 if (WARN(!hlock->pin_count, "unpinning an unpinned lock\n"))
3498 return;
3499
3500 hlock->pin_count--;
3501 return;
3502 }
3503 }
3504
3505 WARN(1, "unpinning an unheld lock\n");
3506}
3507
3462/* 3508/*
3463 * Check whether we follow the irq-flags state precisely: 3509 * Check whether we follow the irq-flags state precisely:
3464 */ 3510 */
@@ -3582,6 +3628,40 @@ int lock_is_held(struct lockdep_map *lock)
3582} 3628}
3583EXPORT_SYMBOL_GPL(lock_is_held); 3629EXPORT_SYMBOL_GPL(lock_is_held);
3584 3630
3631void lock_pin_lock(struct lockdep_map *lock)
3632{
3633 unsigned long flags;
3634
3635 if (unlikely(current->lockdep_recursion))
3636 return;
3637
3638 raw_local_irq_save(flags);
3639 check_flags(flags);
3640
3641 current->lockdep_recursion = 1;
3642 __lock_pin_lock(lock);
3643 current->lockdep_recursion = 0;
3644 raw_local_irq_restore(flags);
3645}
3646EXPORT_SYMBOL_GPL(lock_pin_lock);
3647
3648void lock_unpin_lock(struct lockdep_map *lock)
3649{
3650 unsigned long flags;
3651
3652 if (unlikely(current->lockdep_recursion))
3653 return;
3654
3655 raw_local_irq_save(flags);
3656 check_flags(flags);
3657
3658 current->lockdep_recursion = 1;
3659 __lock_unpin_lock(lock);
3660 current->lockdep_recursion = 0;
3661 raw_local_irq_restore(flags);
3662}
3663EXPORT_SYMBOL_GPL(lock_unpin_lock);
3664
3585void lockdep_set_current_reclaim_state(gfp_t gfp_mask) 3665void lockdep_set_current_reclaim_state(gfp_t gfp_mask)
3586{ 3666{
3587 current->lockdep_reclaim_gfp = gfp_mask; 3667 current->lockdep_reclaim_gfp = gfp_mask;