diff options
author | Peter Zijlstra <peterz@infradead.org> | 2015-06-11 08:46:52 -0400 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2015-06-18 18:25:27 -0400 |
commit | e0f56fd7066f35ae3765d080e036fa676a9d4128 (patch) | |
tree | f74e53c81869fd416d24cadea1ff899273cf75cc /kernel/locking/lockdep.c | |
parent | 5e16bbc2fb4053755705da5dd3557bbc0e5ccef6 (diff) |
lockdep: Simplify lock_release()
lock_release() takes this nested argument that's mostly pointless
these days, remove the implementation but leave the argument a
rudiment for now.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: ktkhai@parallels.com
Cc: rostedt@goodmis.org
Cc: juri.lelli@gmail.com
Cc: pang.xunlei@linaro.org
Cc: oleg@redhat.com
Cc: wanpeng.li@linux.intel.com
Cc: umgwanakikbuti@gmail.com
Link: http://lkml.kernel.org/r/20150611124743.840411606@infradead.org
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel/locking/lockdep.c')
-rw-r--r-- | kernel/locking/lockdep.c | 119 |
1 files changed, 18 insertions, 101 deletions
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index a0831e1b99f4..a266d5165b63 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c | |||
@@ -3260,26 +3260,6 @@ print_unlock_imbalance_bug(struct task_struct *curr, struct lockdep_map *lock, | |||
3260 | return 0; | 3260 | return 0; |
3261 | } | 3261 | } |
3262 | 3262 | ||
3263 | /* | ||
3264 | * Common debugging checks for both nested and non-nested unlock: | ||
3265 | */ | ||
3266 | static int check_unlock(struct task_struct *curr, struct lockdep_map *lock, | ||
3267 | unsigned long ip) | ||
3268 | { | ||
3269 | if (unlikely(!debug_locks)) | ||
3270 | return 0; | ||
3271 | /* | ||
3272 | * Lockdep should run with IRQs disabled, recursion, head-ache, etc.. | ||
3273 | */ | ||
3274 | if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) | ||
3275 | return 0; | ||
3276 | |||
3277 | if (curr->lockdep_depth <= 0) | ||
3278 | return print_unlock_imbalance_bug(curr, lock, ip); | ||
3279 | |||
3280 | return 1; | ||
3281 | } | ||
3282 | |||
3283 | static int match_held_lock(struct held_lock *hlock, struct lockdep_map *lock) | 3263 | static int match_held_lock(struct held_lock *hlock, struct lockdep_map *lock) |
3284 | { | 3264 | { |
3285 | if (hlock->instance == lock) | 3265 | if (hlock->instance == lock) |
@@ -3376,31 +3356,35 @@ found_it: | |||
3376 | } | 3356 | } |
3377 | 3357 | ||
3378 | /* | 3358 | /* |
3379 | * Remove the lock to the list of currently held locks in a | 3359 | * Remove the lock to the list of currently held locks - this gets |
3380 | * potentially non-nested (out of order) manner. This is a | 3360 | * called on mutex_unlock()/spin_unlock*() (or on a failed |
3381 | * relatively rare operation, as all the unlock APIs default | 3361 | * mutex_lock_interruptible()). |
3382 | * to nested mode (which uses lock_release()): | 3362 | * |
3363 | * @nested is an hysterical artifact, needs a tree wide cleanup. | ||
3383 | */ | 3364 | */ |
3384 | static int | 3365 | static int |
3385 | lock_release_non_nested(struct task_struct *curr, | 3366 | __lock_release(struct lockdep_map *lock, int nested, unsigned long ip) |
3386 | struct lockdep_map *lock, unsigned long ip) | ||
3387 | { | 3367 | { |
3368 | struct task_struct *curr = current; | ||
3388 | struct held_lock *hlock, *prev_hlock; | 3369 | struct held_lock *hlock, *prev_hlock; |
3389 | unsigned int depth; | 3370 | unsigned int depth; |
3390 | int i; | 3371 | int i; |
3391 | 3372 | ||
3392 | /* | 3373 | if (unlikely(!debug_locks)) |
3393 | * Check whether the lock exists in the current stack | 3374 | return 0; |
3394 | * of held locks: | 3375 | |
3395 | */ | ||
3396 | depth = curr->lockdep_depth; | 3376 | depth = curr->lockdep_depth; |
3397 | /* | 3377 | /* |
3398 | * So we're all set to release this lock.. wait what lock? We don't | 3378 | * So we're all set to release this lock.. wait what lock? We don't |
3399 | * own any locks, you've been drinking again? | 3379 | * own any locks, you've been drinking again? |
3400 | */ | 3380 | */ |
3401 | if (DEBUG_LOCKS_WARN_ON(!depth)) | 3381 | if (DEBUG_LOCKS_WARN_ON(depth <= 0)) |
3402 | return 0; | 3382 | return print_unlock_imbalance_bug(curr, lock, ip); |
3403 | 3383 | ||
3384 | /* | ||
3385 | * Check whether the lock exists in the current stack | ||
3386 | * of held locks: | ||
3387 | */ | ||
3404 | prev_hlock = NULL; | 3388 | prev_hlock = NULL; |
3405 | for (i = depth-1; i >= 0; i--) { | 3389 | for (i = depth-1; i >= 0; i--) { |
3406 | hlock = curr->held_locks + i; | 3390 | hlock = curr->held_locks + i; |
@@ -3456,78 +3440,10 @@ found_it: | |||
3456 | */ | 3440 | */ |
3457 | if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth - 1)) | 3441 | if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth - 1)) |
3458 | return 0; | 3442 | return 0; |
3459 | return 1; | ||
3460 | } | ||
3461 | |||
3462 | /* | ||
3463 | * Remove the lock to the list of currently held locks - this gets | ||
3464 | * called on mutex_unlock()/spin_unlock*() (or on a failed | ||
3465 | * mutex_lock_interruptible()). This is done for unlocks that nest | ||
3466 | * perfectly. (i.e. the current top of the lock-stack is unlocked) | ||
3467 | */ | ||
3468 | static int lock_release_nested(struct task_struct *curr, | ||
3469 | struct lockdep_map *lock, unsigned long ip) | ||
3470 | { | ||
3471 | struct held_lock *hlock; | ||
3472 | unsigned int depth; | ||
3473 | |||
3474 | /* | ||
3475 | * Pop off the top of the lock stack: | ||
3476 | */ | ||
3477 | depth = curr->lockdep_depth - 1; | ||
3478 | hlock = curr->held_locks + depth; | ||
3479 | |||
3480 | /* | ||
3481 | * Is the unlock non-nested: | ||
3482 | */ | ||
3483 | if (hlock->instance != lock || hlock->references) | ||
3484 | return lock_release_non_nested(curr, lock, ip); | ||
3485 | curr->lockdep_depth--; | ||
3486 | |||
3487 | /* | ||
3488 | * No more locks, but somehow we've got hash left over, who left it? | ||
3489 | */ | ||
3490 | if (DEBUG_LOCKS_WARN_ON(!depth && (hlock->prev_chain_key != 0))) | ||
3491 | return 0; | ||
3492 | |||
3493 | curr->curr_chain_key = hlock->prev_chain_key; | ||
3494 | |||
3495 | lock_release_holdtime(hlock); | ||
3496 | 3443 | ||
3497 | #ifdef CONFIG_DEBUG_LOCKDEP | ||
3498 | hlock->prev_chain_key = 0; | ||
3499 | hlock->class_idx = 0; | ||
3500 | hlock->acquire_ip = 0; | ||
3501 | hlock->irq_context = 0; | ||
3502 | #endif | ||
3503 | return 1; | 3444 | return 1; |
3504 | } | 3445 | } |
3505 | 3446 | ||
3506 | /* | ||
3507 | * Remove the lock to the list of currently held locks - this gets | ||
3508 | * called on mutex_unlock()/spin_unlock*() (or on a failed | ||
3509 | * mutex_lock_interruptible()). This is done for unlocks that nest | ||
3510 | * perfectly. (i.e. the current top of the lock-stack is unlocked) | ||
3511 | */ | ||
3512 | static void | ||
3513 | __lock_release(struct lockdep_map *lock, int nested, unsigned long ip) | ||
3514 | { | ||
3515 | struct task_struct *curr = current; | ||
3516 | |||
3517 | if (!check_unlock(curr, lock, ip)) | ||
3518 | return; | ||
3519 | |||
3520 | if (nested) { | ||
3521 | if (!lock_release_nested(curr, lock, ip)) | ||
3522 | return; | ||
3523 | } else { | ||
3524 | if (!lock_release_non_nested(curr, lock, ip)) | ||
3525 | return; | ||
3526 | } | ||
3527 | |||
3528 | check_chain_key(curr); | ||
3529 | } | ||
3530 | |||
3531 | static int __lock_is_held(struct lockdep_map *lock) | 3447 | static int __lock_is_held(struct lockdep_map *lock) |
3532 | { | 3448 | { |
3533 | struct task_struct *curr = current; | 3449 | struct task_struct *curr = current; |
@@ -3639,7 +3555,8 @@ void lock_release(struct lockdep_map *lock, int nested, | |||
3639 | check_flags(flags); | 3555 | check_flags(flags); |
3640 | current->lockdep_recursion = 1; | 3556 | current->lockdep_recursion = 1; |
3641 | trace_lock_release(lock, ip); | 3557 | trace_lock_release(lock, ip); |
3642 | __lock_release(lock, nested, ip); | 3558 | if (__lock_release(lock, nested, ip)) |
3559 | check_chain_key(current); | ||
3643 | current->lockdep_recursion = 0; | 3560 | current->lockdep_recursion = 0; |
3644 | raw_local_irq_restore(flags); | 3561 | raw_local_irq_restore(flags); |
3645 | } | 3562 | } |