diff options
author | Peter Zijlstra <peterz@infradead.org> | 2016-04-17 19:16:07 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2016-06-16 04:48:25 -0400 |
commit | 4be7dd393515615430a4d07ca1ffceaf2a331620 (patch) | |
tree | a69d51f7059eae1a168f58aca3f853ddbf755c54 /arch/hexagon | |
parent | 0c074cbc33091dd69fe70ec27474d228c3184860 (diff) |
locking/atomic, arch/hexagon: Implement atomic_fetch_{add,sub,and,or,xor}()
Implement FETCH-OP atomic primitives, these are very similar to the
existing OP-RETURN primitives we already have, except they return the
value of the atomic variable _before_ modification.
This is especially useful for irreversible operations -- such as
bitops (because it becomes impossible to reconstruct the state prior
to modification).
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Richard Kuo <rkuo@codeaurora.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-arch@vger.kernel.org
Cc: linux-hexagon@vger.kernel.org
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/hexagon')
-rw-r--r-- | arch/hexagon/include/asm/atomic.h | 33 |
1 files changed, 28 insertions, 5 deletions
diff --git a/arch/hexagon/include/asm/atomic.h b/arch/hexagon/include/asm/atomic.h index 55696c4100d4..07dbb3332b4a 100644 --- a/arch/hexagon/include/asm/atomic.h +++ b/arch/hexagon/include/asm/atomic.h | |||
@@ -110,7 +110,7 @@ static inline void atomic_##op(int i, atomic_t *v) \ | |||
110 | ); \ | 110 | ); \ |
111 | } \ | 111 | } \ |
112 | 112 | ||
113 | #define ATOMIC_OP_RETURN(op) \ | 113 | #define ATOMIC_OP_RETURN(op) \ |
114 | static inline int atomic_##op##_return(int i, atomic_t *v) \ | 114 | static inline int atomic_##op##_return(int i, atomic_t *v) \ |
115 | { \ | 115 | { \ |
116 | int output; \ | 116 | int output; \ |
@@ -127,16 +127,39 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \ | |||
127 | return output; \ | 127 | return output; \ |
128 | } | 128 | } |
129 | 129 | ||
130 | #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) | 130 | #define ATOMIC_FETCH_OP(op) \ |
131 | static inline int atomic_fetch_##op(int i, atomic_t *v) \ | ||
132 | { \ | ||
133 | int output, val; \ | ||
134 | \ | ||
135 | __asm__ __volatile__ ( \ | ||
136 | "1: %0 = memw_locked(%2);\n" \ | ||
137 | " %1 = "#op "(%0,%3);\n" \ | ||
138 | " memw_locked(%2,P3)=%1;\n" \ | ||
139 | " if !P3 jump 1b;\n" \ | ||
140 | : "=&r" (output), "=&r" (val) \ | ||
141 | : "r" (&v->counter), "r" (i) \ | ||
142 | : "memory", "p3" \ | ||
143 | ); \ | ||
144 | return output; \ | ||
145 | } | ||
146 | |||
147 | #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) ATOMIC_FETCH_OP(op) | ||
131 | 148 | ||
132 | ATOMIC_OPS(add) | 149 | ATOMIC_OPS(add) |
133 | ATOMIC_OPS(sub) | 150 | ATOMIC_OPS(sub) |
134 | 151 | ||
135 | ATOMIC_OP(and) | 152 | #undef ATOMIC_OPS |
136 | ATOMIC_OP(or) | 153 | #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op) |
137 | ATOMIC_OP(xor) | 154 | |
155 | #define atomic_fetch_or atomic_fetch_or | ||
156 | |||
157 | ATOMIC_OPS(and) | ||
158 | ATOMIC_OPS(or) | ||
159 | ATOMIC_OPS(xor) | ||
138 | 160 | ||
139 | #undef ATOMIC_OPS | 161 | #undef ATOMIC_OPS |
162 | #undef ATOMIC_FETCH_OP | ||
140 | #undef ATOMIC_OP_RETURN | 163 | #undef ATOMIC_OP_RETURN |
141 | #undef ATOMIC_OP | 164 | #undef ATOMIC_OP |
142 | 165 | ||