aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavidlohr Bueso <dave@stgolabs.net>2015-09-30 16:03:12 -0400
committerIngo Molnar <mingo@kernel.org>2015-10-06 11:28:20 -0400
commit81a43adae3b943193fb3afd20a36a7482332f964 (patch)
tree09a6a2090473927939fc8d916ec12e5fec93414c
parent63ab7bd0d450b726b88fa4b932f151b98cee2557 (diff)
locking/mutex: Use acquire/release semantics
As of 654672d4ba1 (locking/atomics: Add _{acquire|release|relaxed}() variants of some atomic operations) and 6d79ef2d30e (locking, asm-generic: Add _{relaxed|acquire|release}() variants for 'atomic_long_t'), weakly ordered archs can benefit from more relaxed use of barriers when locking and unlocking, instead of regular full barrier semantics. While currently only arm64 supports such optimizations, updating corresponding locking primitives serves for other archs to immediately benefit as well, once the necessary machinery is implemented of course. Signed-off-by: Davidlohr Bueso <dbueso@suse.de> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Thomas Gleixner <tglx@linutronix.de> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Paul E.McKenney <paulmck@linux.vnet.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Will Deacon <will.deacon@arm.com> Cc: linux-kernel@vger.kernel.org Link: http://lkml.kernel.org/r/1443643395-17016-3-git-send-email-dave@stgolabs.net Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--include/asm-generic/mutex-dec.h8
-rw-r--r--include/asm-generic/mutex-xchg.h10
-rw-r--r--kernel/locking/mutex.c9
3 files changed, 14 insertions, 13 deletions
diff --git a/include/asm-generic/mutex-dec.h b/include/asm-generic/mutex-dec.h
index d4f9fb4e53df..fd694cfd678a 100644
--- a/include/asm-generic/mutex-dec.h
+++ b/include/asm-generic/mutex-dec.h
@@ -20,7 +20,7 @@
20static inline void 20static inline void
21__mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *)) 21__mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
22{ 22{
23 if (unlikely(atomic_dec_return(count) < 0)) 23 if (unlikely(atomic_dec_return_acquire(count) < 0))
24 fail_fn(count); 24 fail_fn(count);
25} 25}
26 26
@@ -35,7 +35,7 @@ __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
35static inline int 35static inline int
36__mutex_fastpath_lock_retval(atomic_t *count) 36__mutex_fastpath_lock_retval(atomic_t *count)
37{ 37{
38 if (unlikely(atomic_dec_return(count) < 0)) 38 if (unlikely(atomic_dec_return_acquire(count) < 0))
39 return -1; 39 return -1;
40 return 0; 40 return 0;
41} 41}
@@ -56,7 +56,7 @@ __mutex_fastpath_lock_retval(atomic_t *count)
56static inline void 56static inline void
57__mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *)) 57__mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *))
58{ 58{
59 if (unlikely(atomic_inc_return(count) <= 0)) 59 if (unlikely(atomic_inc_return_release(count) <= 0))
60 fail_fn(count); 60 fail_fn(count);
61} 61}
62 62
@@ -80,7 +80,7 @@ __mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *))
80static inline int 80static inline int
81__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *)) 81__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
82{ 82{
83 if (likely(atomic_cmpxchg(count, 1, 0) == 1)) 83 if (likely(atomic_cmpxchg_acquire(count, 1, 0) == 1))
84 return 1; 84 return 1;
85 return 0; 85 return 0;
86} 86}
diff --git a/include/asm-generic/mutex-xchg.h b/include/asm-generic/mutex-xchg.h
index f169ec064785..a6b4a7bd6ac9 100644
--- a/include/asm-generic/mutex-xchg.h
+++ b/include/asm-generic/mutex-xchg.h
@@ -31,7 +31,7 @@ __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
31 * to ensure that any waiting tasks are woken up by the 31 * to ensure that any waiting tasks are woken up by the
32 * unlock slow path. 32 * unlock slow path.
33 */ 33 */
34 if (likely(atomic_xchg(count, -1) != 1)) 34 if (likely(atomic_xchg_acquire(count, -1) != 1))
35 fail_fn(count); 35 fail_fn(count);
36} 36}
37 37
@@ -46,7 +46,7 @@ __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
46static inline int 46static inline int
47__mutex_fastpath_lock_retval(atomic_t *count) 47__mutex_fastpath_lock_retval(atomic_t *count)
48{ 48{
49 if (unlikely(atomic_xchg(count, 0) != 1)) 49 if (unlikely(atomic_xchg_acquire(count, 0) != 1))
50 if (likely(atomic_xchg(count, -1) != 1)) 50 if (likely(atomic_xchg(count, -1) != 1))
51 return -1; 51 return -1;
52 return 0; 52 return 0;
@@ -67,7 +67,7 @@ __mutex_fastpath_lock_retval(atomic_t *count)
67static inline void 67static inline void
68__mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *)) 68__mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *))
69{ 69{
70 if (unlikely(atomic_xchg(count, 1) != 0)) 70 if (unlikely(atomic_xchg_release(count, 1) != 0))
71 fail_fn(count); 71 fail_fn(count);
72} 72}
73 73
@@ -91,7 +91,7 @@ __mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *))
91static inline int 91static inline int
92__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *)) 92__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
93{ 93{
94 int prev = atomic_xchg(count, 0); 94 int prev = atomic_xchg_acquire(count, 0);
95 95
96 if (unlikely(prev < 0)) { 96 if (unlikely(prev < 0)) {
97 /* 97 /*
@@ -105,7 +105,7 @@ __mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
105 * owner's unlock path needlessly, but that's not a problem 105 * owner's unlock path needlessly, but that's not a problem
106 * in practice. ] 106 * in practice. ]
107 */ 107 */
108 prev = atomic_xchg(count, prev); 108 prev = atomic_xchg_acquire(count, prev);
109 if (prev < 0) 109 if (prev < 0)
110 prev = 0; 110 prev = 0;
111 } 111 }
diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
index 4cccea6b8934..0551c219c40e 100644
--- a/kernel/locking/mutex.c
+++ b/kernel/locking/mutex.c
@@ -277,7 +277,7 @@ static inline int mutex_can_spin_on_owner(struct mutex *lock)
277static inline bool mutex_try_to_acquire(struct mutex *lock) 277static inline bool mutex_try_to_acquire(struct mutex *lock)
278{ 278{
279 return !mutex_is_locked(lock) && 279 return !mutex_is_locked(lock) &&
280 (atomic_cmpxchg(&lock->count, 1, 0) == 1); 280 (atomic_cmpxchg_acquire(&lock->count, 1, 0) == 1);
281} 281}
282 282
283/* 283/*
@@ -529,7 +529,8 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
529 * Once more, try to acquire the lock. Only try-lock the mutex if 529 * Once more, try to acquire the lock. Only try-lock the mutex if
530 * it is unlocked to reduce unnecessary xchg() operations. 530 * it is unlocked to reduce unnecessary xchg() operations.
531 */ 531 */
532 if (!mutex_is_locked(lock) && (atomic_xchg(&lock->count, 0) == 1)) 532 if (!mutex_is_locked(lock) &&
533 (atomic_xchg_acquire(&lock->count, 0) == 1))
533 goto skip_wait; 534 goto skip_wait;
534 535
535 debug_mutex_lock_common(lock, &waiter); 536 debug_mutex_lock_common(lock, &waiter);
@@ -553,7 +554,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
553 * non-negative in order to avoid unnecessary xchg operations: 554 * non-negative in order to avoid unnecessary xchg operations:
554 */ 555 */
555 if (atomic_read(&lock->count) >= 0 && 556 if (atomic_read(&lock->count) >= 0 &&
556 (atomic_xchg(&lock->count, -1) == 1)) 557 (atomic_xchg_acquire(&lock->count, -1) == 1))
557 break; 558 break;
558 559
559 /* 560 /*
@@ -867,7 +868,7 @@ static inline int __mutex_trylock_slowpath(atomic_t *lock_count)
867 868
868 spin_lock_mutex(&lock->wait_lock, flags); 869 spin_lock_mutex(&lock->wait_lock, flags);
869 870
870 prev = atomic_xchg(&lock->count, -1); 871 prev = atomic_xchg_acquire(&lock->count, -1);
871 if (likely(prev == 1)) { 872 if (likely(prev == 1)) {
872 mutex_set_owner(lock); 873 mutex_set_owner(lock);
873 mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_); 874 mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);