diff options
| author | Jason Low <jason.low2@hp.com> | 2014-06-11 14:37:21 -0400 |
|---|---|---|
| committer | Ingo Molnar <mingo@kernel.org> | 2014-07-05 05:25:41 -0400 |
| commit | 1e820c9608eace237e2c519d8fd9074aec479d81 (patch) | |
| tree | 5d48b95b5b18110164bb9133e027fcc8d6872fbb /kernel/locking | |
| parent | 0c3c0f0d6e56422cef60a33726d062e9923005c3 (diff) | |
locking/mutexes: Delete the MUTEX_SHOW_NO_WAITER macro
MUTEX_SHOW_NO_WAITER() is a macro which checks for if there are
"no waiters" on a mutex by checking if the lock count is non-negative.
Based on feedback from the discussion in the earlier version of this
patchset, the macro is not very readable.
Furthermore, checking lock->count isn't always the correct way to
determine if there are "no waiters" on a mutex. For example, a negative
count on a mutex really only means that there "potentially" are
waiters. Likewise, there can be waiters on the mutex even if the count is
non-negative. Thus, "MUTEX_SHOW_NO_WAITER" doesn't always do what the name
of the macro suggests.
So this patch deletes the MUTEX_SHOW_NO_WAITERS() macro, directly
use atomic_read() instead of the macro, and adds comments which
elaborate on how the extra atomic_read() checks can help reduce
unnecessary xchg() operations.
Signed-off-by: Jason Low <jason.low2@hp.com>
Acked-by: Waiman Long <Waiman.Long@hp.com>
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Cc: akpm@linux-foundation.org
Cc: tim.c.chen@linux.intel.com
Cc: paulmck@linux.vnet.ibm.com
Cc: rostedt@goodmis.org
Cc: davidlohr@hp.com
Cc: scott.norton@hp.com
Cc: aswin@hp.com
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Link: http://lkml.kernel.org/r/1402511843-4721-3-git-send-email-jason.low2@hp.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/locking')
| -rw-r--r-- | kernel/locking/mutex.c | 18 |
1 files changed, 8 insertions, 10 deletions
diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c index dd26bf6dee0c..4bd95465af55 100644 --- a/kernel/locking/mutex.c +++ b/kernel/locking/mutex.c | |||
| @@ -46,12 +46,6 @@ | |||
| 46 | # include <asm/mutex.h> | 46 | # include <asm/mutex.h> |
| 47 | #endif | 47 | #endif |
| 48 | 48 | ||
| 49 | /* | ||
| 50 | * A negative mutex count indicates that waiters are sleeping waiting for the | ||
| 51 | * mutex. | ||
| 52 | */ | ||
| 53 | #define MUTEX_SHOW_NO_WAITER(mutex) (atomic_read(&(mutex)->count) >= 0) | ||
| 54 | |||
| 55 | void | 49 | void |
| 56 | __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key) | 50 | __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key) |
| 57 | { | 51 | { |
| @@ -483,8 +477,11 @@ slowpath: | |||
| 483 | #endif | 477 | #endif |
| 484 | spin_lock_mutex(&lock->wait_lock, flags); | 478 | spin_lock_mutex(&lock->wait_lock, flags); |
| 485 | 479 | ||
| 486 | /* once more, can we acquire the lock? */ | 480 | /* |
| 487 | if (MUTEX_SHOW_NO_WAITER(lock) && (atomic_xchg(&lock->count, 0) == 1)) | 481 | * Once more, try to acquire the lock. Only try-lock the mutex if |
| 482 | * lock->count >= 0 to reduce unnecessary xchg operations. | ||
| 483 | */ | ||
| 484 | if (atomic_read(&lock->count) >= 0 && (atomic_xchg(&lock->count, 0) == 1)) | ||
| 488 | goto skip_wait; | 485 | goto skip_wait; |
| 489 | 486 | ||
| 490 | debug_mutex_lock_common(lock, &waiter); | 487 | debug_mutex_lock_common(lock, &waiter); |
| @@ -504,9 +501,10 @@ slowpath: | |||
| 504 | * it's unlocked. Later on, if we sleep, this is the | 501 | * it's unlocked. Later on, if we sleep, this is the |
| 505 | * operation that gives us the lock. We xchg it to -1, so | 502 | * operation that gives us the lock. We xchg it to -1, so |
| 506 | * that when we release the lock, we properly wake up the | 503 | * that when we release the lock, we properly wake up the |
| 507 | * other waiters: | 504 | * other waiters. We only attempt the xchg if the count is |
| 505 | * non-negative in order to avoid unnecessary xchg operations: | ||
| 508 | */ | 506 | */ |
| 509 | if (MUTEX_SHOW_NO_WAITER(lock) && | 507 | if (atomic_read(&lock->count) >= 0 && |
| 510 | (atomic_xchg(&lock->count, -1) == 1)) | 508 | (atomic_xchg(&lock->count, -1) == 1)) |
| 511 | break; | 509 | break; |
| 512 | 510 | ||
