diff options
-rw-r--r-- | kernel/locking/mutex.c | 11 |
1 files changed, 9 insertions, 2 deletions
diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c index ad0e3335c481..93bec48f09ed 100644 --- a/kernel/locking/mutex.c +++ b/kernel/locking/mutex.c | |||
@@ -684,9 +684,16 @@ __mutex_unlock_common_slowpath(struct mutex *lock, int nested) | |||
684 | unsigned long flags; | 684 | unsigned long flags; |
685 | 685 | ||
686 | /* | 686 | /* |
687 | * some architectures leave the lock unlocked in the fastpath failure | 687 | * As a performance measurement, release the lock before doing other |
688 | * wakeup related duties to follow. This allows other tasks to acquire | ||
689 | * the lock sooner, while still handling cleanups in past unlock calls. | ||
690 | * This can be done as we do not enforce strict equivalence between the | ||
691 | * mutex counter and wait_list. | ||
692 | * | ||
693 | * | ||
694 | * Some architectures leave the lock unlocked in the fastpath failure | ||
688 | * case, others need to leave it locked. In the later case we have to | 695 | * case, others need to leave it locked. In the later case we have to |
689 | * unlock it here | 696 | * unlock it here - as the lock counter is currently 0 or negative. |
690 | */ | 697 | */ |
691 | if (__mutex_slowpath_needs_to_unlock()) | 698 | if (__mutex_slowpath_needs_to_unlock()) |
692 | atomic_set(&lock->count, 1); | 699 | atomic_set(&lock->count, 1); |