diff options
author | Peter Zijlstra <peterz@infradead.org> | 2014-03-23 11:57:20 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2014-08-14 06:48:04 -0400 |
commit | 92ba1f530b4f90db78eb45f4b6598e75939146bd (patch) | |
tree | ee829d6e3a54262f4c75ea25f00977989651ad35 | |
parent | aee9a55452f0371258e18b41649ce650ff344090 (diff) |
locking,arch,arm64: Fold atomic_ops
Many of the atomic op implementations are the same except for one
instruction; fold the lot into a few CPP macros and reduce LoC.
This also prepares for easy addition of new ops.
Requires the asm_op due to eor.
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Acked-by: Will Deacon <will.deacon@arm.com>
Cc: Bjorn Helgaas <bhelgaas@google.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Chen Gang <gang.chen@asianux.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Russell King <rmk+kernel@arm.linux.org.uk>
Cc: linux-arm-kernel@lists.infradead.org
Link: http://lkml.kernel.org/r/20140508135851.995123148@infradead.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r-- | arch/arm64/include/asm/atomic.h | 197 |
1 files changed, 80 insertions, 117 deletions
diff --git a/arch/arm64/include/asm/atomic.h b/arch/arm64/include/asm/atomic.h index 65f1569ac96e..b83c325e587f 100644 --- a/arch/arm64/include/asm/atomic.h +++ b/arch/arm64/include/asm/atomic.h | |||
@@ -43,69 +43,51 @@ | |||
43 | * store exclusive to ensure that these are atomic. We may loop | 43 | * store exclusive to ensure that these are atomic. We may loop |
44 | * to ensure that the update happens. | 44 | * to ensure that the update happens. |
45 | */ | 45 | */ |
46 | static inline void atomic_add(int i, atomic_t *v) | ||
47 | { | ||
48 | unsigned long tmp; | ||
49 | int result; | ||
50 | |||
51 | asm volatile("// atomic_add\n" | ||
52 | "1: ldxr %w0, %2\n" | ||
53 | " add %w0, %w0, %w3\n" | ||
54 | " stxr %w1, %w0, %2\n" | ||
55 | " cbnz %w1, 1b" | ||
56 | : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) | ||
57 | : "Ir" (i)); | ||
58 | } | ||
59 | |||
60 | static inline int atomic_add_return(int i, atomic_t *v) | ||
61 | { | ||
62 | unsigned long tmp; | ||
63 | int result; | ||
64 | |||
65 | asm volatile("// atomic_add_return\n" | ||
66 | "1: ldxr %w0, %2\n" | ||
67 | " add %w0, %w0, %w3\n" | ||
68 | " stlxr %w1, %w0, %2\n" | ||
69 | " cbnz %w1, 1b" | ||
70 | : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) | ||
71 | : "Ir" (i) | ||
72 | : "memory"); | ||
73 | |||
74 | smp_mb(); | ||
75 | return result; | ||
76 | } | ||
77 | |||
78 | static inline void atomic_sub(int i, atomic_t *v) | ||
79 | { | ||
80 | unsigned long tmp; | ||
81 | int result; | ||
82 | 46 | ||
83 | asm volatile("// atomic_sub\n" | 47 | #define ATOMIC_OP(op, asm_op) \ |
84 | "1: ldxr %w0, %2\n" | 48 | static inline void atomic_##op(int i, atomic_t *v) \ |
85 | " sub %w0, %w0, %w3\n" | 49 | { \ |
86 | " stxr %w1, %w0, %2\n" | 50 | unsigned long tmp; \ |
87 | " cbnz %w1, 1b" | 51 | int result; \ |
88 | : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) | 52 | \ |
89 | : "Ir" (i)); | 53 | asm volatile("// atomic_" #op "\n" \ |
54 | "1: ldxr %w0, %2\n" \ | ||
55 | " " #asm_op " %w0, %w0, %w3\n" \ | ||
56 | " stxr %w1, %w0, %2\n" \ | ||
57 | " cbnz %w1, 1b" \ | ||
58 | : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \ | ||
59 | : "Ir" (i)); \ | ||
60 | } \ | ||
61 | |||
62 | #define ATOMIC_OP_RETURN(op, asm_op) \ | ||
63 | static inline int atomic_##op##_return(int i, atomic_t *v) \ | ||
64 | { \ | ||
65 | unsigned long tmp; \ | ||
66 | int result; \ | ||
67 | \ | ||
68 | asm volatile("// atomic_" #op "_return\n" \ | ||
69 | "1: ldxr %w0, %2\n" \ | ||
70 | " " #asm_op " %w0, %w0, %w3\n" \ | ||
71 | " stlxr %w1, %w0, %2\n" \ | ||
72 | " cbnz %w1, 1b" \ | ||
73 | : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \ | ||
74 | : "Ir" (i) \ | ||
75 | : "memory"); \ | ||
76 | \ | ||
77 | smp_mb(); \ | ||
78 | return result; \ | ||
90 | } | 79 | } |
91 | 80 | ||
92 | static inline int atomic_sub_return(int i, atomic_t *v) | 81 | #define ATOMIC_OPS(op, asm_op) \ |
93 | { | 82 | ATOMIC_OP(op, asm_op) \ |
94 | unsigned long tmp; | 83 | ATOMIC_OP_RETURN(op, asm_op) |
95 | int result; | ||
96 | 84 | ||
97 | asm volatile("// atomic_sub_return\n" | 85 | ATOMIC_OPS(add, add) |
98 | "1: ldxr %w0, %2\n" | 86 | ATOMIC_OPS(sub, sub) |
99 | " sub %w0, %w0, %w3\n" | ||
100 | " stlxr %w1, %w0, %2\n" | ||
101 | " cbnz %w1, 1b" | ||
102 | : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) | ||
103 | : "Ir" (i) | ||
104 | : "memory"); | ||
105 | 87 | ||
106 | smp_mb(); | 88 | #undef ATOMIC_OPS |
107 | return result; | 89 | #undef ATOMIC_OP_RETURN |
108 | } | 90 | #undef ATOMIC_OP |
109 | 91 | ||
110 | static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new) | 92 | static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new) |
111 | { | 93 | { |
@@ -160,69 +142,50 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u) | |||
160 | #define atomic64_read(v) (*(volatile long *)&(v)->counter) | 142 | #define atomic64_read(v) (*(volatile long *)&(v)->counter) |
161 | #define atomic64_set(v,i) (((v)->counter) = (i)) | 143 | #define atomic64_set(v,i) (((v)->counter) = (i)) |
162 | 144 | ||
163 | static inline void atomic64_add(u64 i, atomic64_t *v) | 145 | #define ATOMIC64_OP(op, asm_op) \ |
164 | { | 146 | static inline void atomic64_##op(long i, atomic64_t *v) \ |
165 | long result; | 147 | { \ |
166 | unsigned long tmp; | 148 | long result; \ |
167 | 149 | unsigned long tmp; \ | |
168 | asm volatile("// atomic64_add\n" | 150 | \ |
169 | "1: ldxr %0, %2\n" | 151 | asm volatile("// atomic64_" #op "\n" \ |
170 | " add %0, %0, %3\n" | 152 | "1: ldxr %0, %2\n" \ |
171 | " stxr %w1, %0, %2\n" | 153 | " " #asm_op " %0, %0, %3\n" \ |
172 | " cbnz %w1, 1b" | 154 | " stxr %w1, %0, %2\n" \ |
173 | : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) | 155 | " cbnz %w1, 1b" \ |
174 | : "Ir" (i)); | 156 | : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \ |
157 | : "Ir" (i)); \ | ||
158 | } \ | ||
159 | |||
160 | #define ATOMIC64_OP_RETURN(op, asm_op) \ | ||
161 | static inline long atomic64_##op##_return(long i, atomic64_t *v) \ | ||
162 | { \ | ||
163 | long result; \ | ||
164 | unsigned long tmp; \ | ||
165 | \ | ||
166 | asm volatile("// atomic64_" #op "_return\n" \ | ||
167 | "1: ldxr %0, %2\n" \ | ||
168 | " " #asm_op " %0, %0, %3\n" \ | ||
169 | " stlxr %w1, %0, %2\n" \ | ||
170 | " cbnz %w1, 1b" \ | ||
171 | : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \ | ||
172 | : "Ir" (i) \ | ||
173 | : "memory"); \ | ||
174 | \ | ||
175 | smp_mb(); \ | ||
176 | return result; \ | ||
175 | } | 177 | } |
176 | 178 | ||
177 | static inline long atomic64_add_return(long i, atomic64_t *v) | 179 | #define ATOMIC64_OPS(op, asm_op) \ |
178 | { | 180 | ATOMIC64_OP(op, asm_op) \ |
179 | long result; | 181 | ATOMIC64_OP_RETURN(op, asm_op) |
180 | unsigned long tmp; | ||
181 | 182 | ||
182 | asm volatile("// atomic64_add_return\n" | 183 | ATOMIC64_OPS(add, add) |
183 | "1: ldxr %0, %2\n" | 184 | ATOMIC64_OPS(sub, sub) |
184 | " add %0, %0, %3\n" | ||
185 | " stlxr %w1, %0, %2\n" | ||
186 | " cbnz %w1, 1b" | ||
187 | : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) | ||
188 | : "Ir" (i) | ||
189 | : "memory"); | ||
190 | 185 | ||
191 | smp_mb(); | 186 | #undef ATOMIC64_OPS |
192 | return result; | 187 | #undef ATOMIC64_OP_RETURN |
193 | } | 188 | #undef ATOMIC64_OP |
194 | |||
195 | static inline void atomic64_sub(u64 i, atomic64_t *v) | ||
196 | { | ||
197 | long result; | ||
198 | unsigned long tmp; | ||
199 | |||
200 | asm volatile("// atomic64_sub\n" | ||
201 | "1: ldxr %0, %2\n" | ||
202 | " sub %0, %0, %3\n" | ||
203 | " stxr %w1, %0, %2\n" | ||
204 | " cbnz %w1, 1b" | ||
205 | : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) | ||
206 | : "Ir" (i)); | ||
207 | } | ||
208 | |||
209 | static inline long atomic64_sub_return(long i, atomic64_t *v) | ||
210 | { | ||
211 | long result; | ||
212 | unsigned long tmp; | ||
213 | |||
214 | asm volatile("// atomic64_sub_return\n" | ||
215 | "1: ldxr %0, %2\n" | ||
216 | " sub %0, %0, %3\n" | ||
217 | " stlxr %w1, %0, %2\n" | ||
218 | " cbnz %w1, 1b" | ||
219 | : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) | ||
220 | : "Ir" (i) | ||
221 | : "memory"); | ||
222 | |||
223 | smp_mb(); | ||
224 | return result; | ||
225 | } | ||
226 | 189 | ||
227 | static inline long atomic64_cmpxchg(atomic64_t *ptr, long old, long new) | 190 | static inline long atomic64_cmpxchg(atomic64_t *ptr, long old, long new) |
228 | { | 191 | { |