aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChris Metcalf <cmetcalf@mellanox.com>2016-04-26 09:54:56 -0400
committerChris Metcalf <cmetcalf@mellanox.com>2016-04-26 09:54:56 -0400
commit153847586b0aaa9482e42bc7e95b24adb87a1859 (patch)
tree1bd99e332e83378d2067ede0d6d981d4a3e2fe78
parent85f5251792abcd6dae897df8eb4ca0e890bc5882 (diff)
tile: clarify barrier semantics of atomic_add_return
A recent discussion on LKML made it clear that the one-line comment previously in atomic_add_return() was not clear enough: https://lkml.kernel.org/r/571E87E2.3010306@mellanox.com Signed-off-by: Chris Metcalf <cmetcalf@mellanox.com>
-rw-r--r--arch/tile/include/asm/atomic_64.h17
1 files changed, 15 insertions, 2 deletions
diff --git a/arch/tile/include/asm/atomic_64.h b/arch/tile/include/asm/atomic_64.h
index 51cabc26e387..b0531a623653 100644
--- a/arch/tile/include/asm/atomic_64.h
+++ b/arch/tile/include/asm/atomic_64.h
@@ -37,12 +37,25 @@ static inline void atomic_add(int i, atomic_t *v)
37 __insn_fetchadd4((void *)&v->counter, i); 37 __insn_fetchadd4((void *)&v->counter, i);
38} 38}
39 39
40/*
41 * Note a subtlety of the locking here. We are required to provide a
42 * full memory barrier before and after the operation. However, we
43 * only provide an explicit mb before the operation. After the
44 * operation, we use barrier() to get a full mb for free, because:
45 *
46 * (1) The barrier directive to the compiler prohibits any instructions
47 * being statically hoisted before the barrier;
48 * (2) the microarchitecture will not issue any further instructions
49 * until the fetchadd result is available for the "+ i" add instruction;
50 * (3) the smb_mb before the fetchadd ensures that no other memory
51 * operations are in flight at this point.
52 */
40static inline int atomic_add_return(int i, atomic_t *v) 53static inline int atomic_add_return(int i, atomic_t *v)
41{ 54{
42 int val; 55 int val;
43 smp_mb(); /* barrier for proper semantics */ 56 smp_mb(); /* barrier for proper semantics */
44 val = __insn_fetchadd4((void *)&v->counter, i) + i; 57 val = __insn_fetchadd4((void *)&v->counter, i) + i;
45 barrier(); /* the "+ i" above will wait on memory */ 58 barrier(); /* equivalent to smp_mb(); see block comment above */
46 return val; 59 return val;
47} 60}
48 61
@@ -95,7 +108,7 @@ static inline long atomic64_add_return(long i, atomic64_t *v)
95 int val; 108 int val;
96 smp_mb(); /* barrier for proper semantics */ 109 smp_mb(); /* barrier for proper semantics */
97 val = __insn_fetchadd((void *)&v->counter, i) + i; 110 val = __insn_fetchadd((void *)&v->counter, i) + i;
98 barrier(); /* the "+ i" above will wait on memory */ 111 barrier(); /* equivalent to smp_mb; see atomic_add_return() */
99 return val; 112 return val;
100} 113}
101 114