diff options
author | Davidlohr Bueso <davidlohr@hp.com> | 2014-07-30 16:41:50 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2014-08-13 04:31:58 -0400 |
commit | 242489cfe97d44290e7f88b12591fab6c0819045 (patch) | |
tree | 34d9b2939f8060fc322d13d03515db00485203bc /kernel/locking/mutex.c | |
parent | 2e39465abc4b7856a0ea6fcf4f6b4668bb5db877 (diff) |
locking/mutexes: Standardize arguments in lock/unlock slowpaths
Just how the locking-end behaves, when unlocking, go ahead and
obtain the proper data structure immediately after the previous
(asm-end) call exits and there are (probably) pending waiters.
This simplifies a bit some of the layering.
Signed-off-by: Davidlohr Bueso <davidlohr@hp.com>
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Cc: jason.low2@hp.com
Cc: aswin@hp.com
Cc: mingo@kernel.org
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: linux-kernel@vger.kernel.org
Link: http://lkml.kernel.org/r/1406752916-3341-1-git-send-email-davidlohr@hp.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/locking/mutex.c')
-rw-r--r-- | kernel/locking/mutex.c | 7 |
1 files changed, 4 insertions, 3 deletions
diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c index ae712b25e492..ad0e3335c481 100644 --- a/kernel/locking/mutex.c +++ b/kernel/locking/mutex.c | |||
@@ -679,9 +679,8 @@ EXPORT_SYMBOL_GPL(__ww_mutex_lock_interruptible); | |||
679 | * Release the lock, slowpath: | 679 | * Release the lock, slowpath: |
680 | */ | 680 | */ |
681 | static inline void | 681 | static inline void |
682 | __mutex_unlock_common_slowpath(atomic_t *lock_count, int nested) | 682 | __mutex_unlock_common_slowpath(struct mutex *lock, int nested) |
683 | { | 683 | { |
684 | struct mutex *lock = container_of(lock_count, struct mutex, count); | ||
685 | unsigned long flags; | 684 | unsigned long flags; |
686 | 685 | ||
687 | /* | 686 | /* |
@@ -716,7 +715,9 @@ __mutex_unlock_common_slowpath(atomic_t *lock_count, int nested) | |||
716 | __visible void | 715 | __visible void |
717 | __mutex_unlock_slowpath(atomic_t *lock_count) | 716 | __mutex_unlock_slowpath(atomic_t *lock_count) |
718 | { | 717 | { |
719 | __mutex_unlock_common_slowpath(lock_count, 1); | 718 | struct mutex *lock = container_of(lock_count, struct mutex, count); |
719 | |||
720 | __mutex_unlock_common_slowpath(lock, 1); | ||
720 | } | 721 | } |
721 | 722 | ||
722 | #ifndef CONFIG_DEBUG_LOCK_ALLOC | 723 | #ifndef CONFIG_DEBUG_LOCK_ALLOC |