aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--kernel/locking/mutex.c8
1 files changed, 4 insertions, 4 deletions
diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
index 82dad2ccd40b..dc3d6f2bbe2a 100644
--- a/kernel/locking/mutex.c
+++ b/kernel/locking/mutex.c
@@ -671,10 +671,6 @@ __mutex_unlock_common_slowpath(atomic_t *lock_count, int nested)
671 struct mutex *lock = container_of(lock_count, struct mutex, count); 671 struct mutex *lock = container_of(lock_count, struct mutex, count);
672 unsigned long flags; 672 unsigned long flags;
673 673
674 spin_lock_mutex(&lock->wait_lock, flags);
675 mutex_release(&lock->dep_map, nested, _RET_IP_);
676 debug_mutex_unlock(lock);
677
678 /* 674 /*
679 * some architectures leave the lock unlocked in the fastpath failure 675 * some architectures leave the lock unlocked in the fastpath failure
680 * case, others need to leave it locked. In the later case we have to 676 * case, others need to leave it locked. In the later case we have to
@@ -683,6 +679,10 @@ __mutex_unlock_common_slowpath(atomic_t *lock_count, int nested)
683 if (__mutex_slowpath_needs_to_unlock()) 679 if (__mutex_slowpath_needs_to_unlock())
684 atomic_set(&lock->count, 1); 680 atomic_set(&lock->count, 1);
685 681
682 spin_lock_mutex(&lock->wait_lock, flags);
683 mutex_release(&lock->dep_map, nested, _RET_IP_);
684 debug_mutex_unlock(lock);
685
686 if (!list_empty(&lock->wait_list)) { 686 if (!list_empty(&lock->wait_list)) {
687 /* get the first entry from the wait-list: */ 687 /* get the first entry from the wait-list: */
688 struct mutex_waiter *waiter = 688 struct mutex_waiter *waiter =