diff options
author | Jason Low <jason.low2@hp.com> | 2014-01-28 14:13:14 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2014-03-11 07:14:54 -0400 |
commit | 1d8fe7dc8078b23e060ec62ccb4cdc1ac3c41bf8 (patch) | |
tree | 1640ad75e3eea39dee169b8c415d53cd599e830c /kernel/locking | |
parent | 47667fa1502e4d759df87e9cc7fbc0f202483361 (diff) |
locking/mutexes: Unlock the mutex without the wait_lock
When running workloads that have high contention in mutexes on an 8 socket
machine, mutex spinners would often spin for a long time with no lock owner.
The main reason why this is occuring is in __mutex_unlock_common_slowpath(),
if __mutex_slowpath_needs_to_unlock(), then the owner needs to acquire the
mutex->wait_lock before releasing the mutex (setting lock->count to 1). When
the wait_lock is contended, this delays the mutex from being released.
We should be able to release the mutex without holding the wait_lock.
Signed-off-by: Jason Low <jason.low2@hp.com>
Cc: chegu_vinod@hp.com
Cc: paulmck@linux.vnet.ibm.com
Cc: Waiman.Long@hp.com
Cc: torvalds@linux-foundation.org
Cc: tglx@linutronix.de
Cc: riel@redhat.com
Cc: akpm@linux-foundation.org
Cc: davidlohr@hp.com
Cc: hpa@zytor.com
Cc: andi@firstfloor.org
Cc: aswin@hp.com
Cc: scott.norton@hp.com
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/1390936396-3962-4-git-send-email-jason.low2@hp.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/locking')
-rw-r--r-- | kernel/locking/mutex.c | 8 |
1 files changed, 4 insertions, 4 deletions
diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c index 82dad2ccd40b..dc3d6f2bbe2a 100644 --- a/kernel/locking/mutex.c +++ b/kernel/locking/mutex.c | |||
@@ -671,10 +671,6 @@ __mutex_unlock_common_slowpath(atomic_t *lock_count, int nested) | |||
671 | struct mutex *lock = container_of(lock_count, struct mutex, count); | 671 | struct mutex *lock = container_of(lock_count, struct mutex, count); |
672 | unsigned long flags; | 672 | unsigned long flags; |
673 | 673 | ||
674 | spin_lock_mutex(&lock->wait_lock, flags); | ||
675 | mutex_release(&lock->dep_map, nested, _RET_IP_); | ||
676 | debug_mutex_unlock(lock); | ||
677 | |||
678 | /* | 674 | /* |
679 | * some architectures leave the lock unlocked in the fastpath failure | 675 | * some architectures leave the lock unlocked in the fastpath failure |
680 | * case, others need to leave it locked. In the later case we have to | 676 | * case, others need to leave it locked. In the later case we have to |
@@ -683,6 +679,10 @@ __mutex_unlock_common_slowpath(atomic_t *lock_count, int nested) | |||
683 | if (__mutex_slowpath_needs_to_unlock()) | 679 | if (__mutex_slowpath_needs_to_unlock()) |
684 | atomic_set(&lock->count, 1); | 680 | atomic_set(&lock->count, 1); |
685 | 681 | ||
682 | spin_lock_mutex(&lock->wait_lock, flags); | ||
683 | mutex_release(&lock->dep_map, nested, _RET_IP_); | ||
684 | debug_mutex_unlock(lock); | ||
685 | |||
686 | if (!list_empty(&lock->wait_list)) { | 686 | if (!list_empty(&lock->wait_list)) { |
687 | /* get the first entry from the wait-list: */ | 687 | /* get the first entry from the wait-list: */ |
688 | struct mutex_waiter *waiter = | 688 | struct mutex_waiter *waiter = |