aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavidlohr Bueso <dave@stgolabs.net>2015-09-14 03:37:24 -0400
committerIngo Molnar <mingo@kernel.org>2015-09-18 03:27:29 -0400
commitc55a6ffa6285e29f874ed403979472631ec70bff (patch)
tree122677e6f6cb995ba2bf53591c9b24e8322e1ff0
parent6e1e5196975fb7ecc501b3fe1075b77aea2b7839 (diff)
locking/osq: Relax atomic semantics
... by using acquire/release for ops around the lock->tail. As such, weakly ordered archs can benefit from more relaxed use of barriers when issuing atomics. Signed-off-by: Davidlohr Bueso <dbueso@suse.de> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Waiman Long <Waiman.Long@hpe.com> Link: http://lkml.kernel.org/r/1442216244-4409-3-git-send-email-dave@stgolabs.net Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--kernel/locking/osq_lock.c11
1 files changed, 8 insertions, 3 deletions
diff --git a/kernel/locking/osq_lock.c b/kernel/locking/osq_lock.c
index dc85ee23a26f..d092a0c9c2d4 100644
--- a/kernel/locking/osq_lock.c
+++ b/kernel/locking/osq_lock.c
@@ -50,7 +50,7 @@ osq_wait_next(struct optimistic_spin_queue *lock,
50 50
51 for (;;) { 51 for (;;) {
52 if (atomic_read(&lock->tail) == curr && 52 if (atomic_read(&lock->tail) == curr &&
53 atomic_cmpxchg(&lock->tail, curr, old) == curr) { 53 atomic_cmpxchg_acquire(&lock->tail, curr, old) == curr) {
54 /* 54 /*
55 * We were the last queued, we moved @lock back. @prev 55 * We were the last queued, we moved @lock back. @prev
56 * will now observe @lock and will complete its 56 * will now observe @lock and will complete its
@@ -92,7 +92,11 @@ bool osq_lock(struct optimistic_spin_queue *lock)
92 node->next = NULL; 92 node->next = NULL;
93 node->cpu = curr; 93 node->cpu = curr;
94 94
95 old = atomic_xchg(&lock->tail, curr); 95 /*
96 * ACQUIRE semantics, pairs with corresponding RELEASE
97 * in unlock() uncontended, or fastpath.
98 */
99 old = atomic_xchg_acquire(&lock->tail, curr);
96 if (old == OSQ_UNLOCKED_VAL) 100 if (old == OSQ_UNLOCKED_VAL)
97 return true; 101 return true;
98 102
@@ -184,7 +188,8 @@ void osq_unlock(struct optimistic_spin_queue *lock)
184 /* 188 /*
185 * Fast path for the uncontended case. 189 * Fast path for the uncontended case.
186 */ 190 */
187 if (likely(atomic_cmpxchg(&lock->tail, curr, OSQ_UNLOCKED_VAL) == curr)) 191 if (likely(atomic_cmpxchg_release(&lock->tail, curr,
192 OSQ_UNLOCKED_VAL) == curr))
188 return; 193 return;
189 194
190 /* 195 /*