aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/locking/mutex.c
diff options
context:
space:
mode:
authorDavidlohr Bueso <davidlohr@hp.com>2014-07-30 16:41:51 -0400
committerIngo Molnar <mingo@kernel.org>2014-08-13 04:31:59 -0400
commit42fa566bd74aa7b95413fb00611ec983b488222d (patch)
treeed8785c7eb273aca842fc78972e7904a247ae28e /kernel/locking/mutex.c
parent242489cfe97d44290e7f88b12591fab6c0819045 (diff)
locking/mutexes: Document quick lock release when unlocking
When unlocking, we always want to reach the slowpath with the lock's counter indicating it is unlocked. -- as returned by the asm fastpath call or by explicitly setting it. While doing so, at least in theory, we can optimize and allow faster lock stealing. When unlocking, we always want to reach the slowpath with the lock's counter indicating it is unlocked. -- as returned by the asm fastpath call or by explicitly setting it. While doing so, at least in theory, we can optimize and allow faster lock stealing. Signed-off-by: Davidlohr Bueso <davidlohr@hp.com> Signed-off-by: Peter Zijlstra <peterz@infradead.org> Cc: jason.low2@hp.com Cc: aswin@hp.com Cc: Linus Torvalds <torvalds@linux-foundation.org> Link: http://lkml.kernel.org/r/1406752916-3341-2-git-send-email-davidlohr@hp.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/locking/mutex.c')
-rw-r--r--kernel/locking/mutex.c11
1 files changed, 9 insertions, 2 deletions
diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
index ad0e3335c481..93bec48f09ed 100644
--- a/kernel/locking/mutex.c
+++ b/kernel/locking/mutex.c
@@ -684,9 +684,16 @@ __mutex_unlock_common_slowpath(struct mutex *lock, int nested)
684 unsigned long flags; 684 unsigned long flags;
685 685
686 /* 686 /*
687 * some architectures leave the lock unlocked in the fastpath failure 687 * As a performance measurement, release the lock before doing other
688 * wakeup related duties to follow. This allows other tasks to acquire
689 * the lock sooner, while still handling cleanups in past unlock calls.
690 * This can be done as we do not enforce strict equivalence between the
691 * mutex counter and wait_list.
692 *
693 *
694 * Some architectures leave the lock unlocked in the fastpath failure
688 * case, others need to leave it locked. In the later case we have to 695 * case, others need to leave it locked. In the later case we have to
689 * unlock it here 696 * unlock it here - as the lock counter is currently 0 or negative.
690 */ 697 */
691 if (__mutex_slowpath_needs_to_unlock()) 698 if (__mutex_slowpath_needs_to_unlock())
692 atomic_set(&lock->count, 1); 699 atomic_set(&lock->count, 1);