diff options
| author | Peter Zijlstra <peterz@infradead.org> | 2016-06-08 03:12:30 -0400 |
|---|---|---|
| committer | Ingo Molnar <mingo@kernel.org> | 2016-06-08 08:44:00 -0400 |
| commit | 8d53fa19041ae65c484d81d75179b4a577e6d8e4 (patch) | |
| tree | 87f60f848df339868a1dde68ec7ae3250229fefa /kernel/locking | |
| parent | ae0b5c2f0334f35d2b2effb13aa418bc1e2039b7 (diff) | |
locking/qspinlock: Clarify xchg_tail() ordering
While going over the code I noticed that xchg_tail() is a RELEASE but
had no obvious pairing commented.
It pairs with a somewhat unique address dependency through
decode_tail().
So the store-release of xchg_tail() is paired by the address
dependency of the load of xchg_tail followed by the dereference from
the pointer computed from that load.
The @old -> @prev transformation itself is pure, and therefore does
not depend on external state, so that is immaterial wrt. ordering.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Boqun Feng <boqun.feng@gmail.com>
Cc: Davidlohr Bueso <dave@stgolabs.net>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Pan Xinhui <xinhui.pan@linux.vnet.ibm.com>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Waiman Long <waiman.long@hpe.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/locking')
| -rw-r--r-- | kernel/locking/qspinlock.c | 15 |
1 files changed, 13 insertions, 2 deletions
diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c index 5fc8c311b8fe..ee7deb08d43d 100644 --- a/kernel/locking/qspinlock.c +++ b/kernel/locking/qspinlock.c | |||
| @@ -90,7 +90,7 @@ static DEFINE_PER_CPU_ALIGNED(struct mcs_spinlock, mcs_nodes[MAX_NODES]); | |||
| 90 | * therefore increment the cpu number by one. | 90 | * therefore increment the cpu number by one. |
| 91 | */ | 91 | */ |
| 92 | 92 | ||
| 93 | static inline u32 encode_tail(int cpu, int idx) | 93 | static inline __pure u32 encode_tail(int cpu, int idx) |
| 94 | { | 94 | { |
| 95 | u32 tail; | 95 | u32 tail; |
| 96 | 96 | ||
| @@ -103,7 +103,7 @@ static inline u32 encode_tail(int cpu, int idx) | |||
| 103 | return tail; | 103 | return tail; |
| 104 | } | 104 | } |
| 105 | 105 | ||
| 106 | static inline struct mcs_spinlock *decode_tail(u32 tail) | 106 | static inline __pure struct mcs_spinlock *decode_tail(u32 tail) |
| 107 | { | 107 | { |
| 108 | int cpu = (tail >> _Q_TAIL_CPU_OFFSET) - 1; | 108 | int cpu = (tail >> _Q_TAIL_CPU_OFFSET) - 1; |
| 109 | int idx = (tail & _Q_TAIL_IDX_MASK) >> _Q_TAIL_IDX_OFFSET; | 109 | int idx = (tail & _Q_TAIL_IDX_MASK) >> _Q_TAIL_IDX_OFFSET; |
| @@ -455,6 +455,8 @@ queue: | |||
| 455 | * pending stuff. | 455 | * pending stuff. |
| 456 | * | 456 | * |
| 457 | * p,*,* -> n,*,* | 457 | * p,*,* -> n,*,* |
| 458 | * | ||
| 459 | * RELEASE, such that the stores to @node must be complete. | ||
| 458 | */ | 460 | */ |
| 459 | old = xchg_tail(lock, tail); | 461 | old = xchg_tail(lock, tail); |
| 460 | next = NULL; | 462 | next = NULL; |
| @@ -465,6 +467,15 @@ queue: | |||
| 465 | */ | 467 | */ |
| 466 | if (old & _Q_TAIL_MASK) { | 468 | if (old & _Q_TAIL_MASK) { |
| 467 | prev = decode_tail(old); | 469 | prev = decode_tail(old); |
| 470 | /* | ||
| 471 | * The above xchg_tail() is also a load of @lock which generates, | ||
| 472 | * through decode_tail(), a pointer. | ||
| 473 | * | ||
| 474 | * The address dependency matches the RELEASE of xchg_tail() | ||
| 475 | * such that the access to @prev must happen after. | ||
| 476 | */ | ||
| 477 | smp_read_barrier_depends(); | ||
| 478 | |||
| 468 | WRITE_ONCE(prev->next, node); | 479 | WRITE_ONCE(prev->next, node); |
| 469 | 480 | ||
| 470 | pv_wait_node(node, prev); | 481 | pv_wait_node(node, prev); |
