diff options
author | Vineet Gupta <vgupta@synopsys.com> | 2015-03-31 13:08:21 -0400 |
---|---|---|
committer | Vineet Gupta <vgupta@synopsys.com> | 2015-06-24 20:30:18 -0400 |
commit | 04e2eee4b02edcafce96c9c37b31b1a3318291a4 (patch) | |
tree | 5446970f7a4252bbbe36ae501dd5266854c13c73 | |
parent | b8a033023994c4e59697bb3b16b441b38f258390 (diff) |
ARC: Reduce bitops lines of code using macros
No semantical changes !
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
-rw-r--r-- | arch/arc/include/asm/bitops.h | 477 |
1 files changed, 144 insertions, 333 deletions
diff --git a/arch/arc/include/asm/bitops.h b/arch/arc/include/asm/bitops.h index dd03fd931bb7..99fe118d3730 100644 --- a/arch/arc/include/asm/bitops.h +++ b/arch/arc/include/asm/bitops.h | |||
@@ -18,83 +18,50 @@ | |||
18 | #include <linux/types.h> | 18 | #include <linux/types.h> |
19 | #include <linux/compiler.h> | 19 | #include <linux/compiler.h> |
20 | #include <asm/barrier.h> | 20 | #include <asm/barrier.h> |
21 | #ifndef CONFIG_ARC_HAS_LLSC | ||
22 | #include <asm/smp.h> | ||
23 | #endif | ||
21 | 24 | ||
22 | /* | ||
23 | * Hardware assisted read-modify-write using ARC700 LLOCK/SCOND insns. | ||
24 | * The Kconfig glue ensures that in SMP, this is only set if the container | ||
25 | * SoC/platform has cross-core coherent LLOCK/SCOND | ||
26 | */ | ||
27 | #if defined(CONFIG_ARC_HAS_LLSC) | 25 | #if defined(CONFIG_ARC_HAS_LLSC) |
28 | 26 | ||
29 | static inline void set_bit(unsigned long nr, volatile unsigned long *m) | 27 | /* |
30 | { | 28 | * Hardware assisted Atomic-R-M-W |
31 | unsigned int temp; | 29 | */ |
32 | |||
33 | m += nr >> 5; | ||
34 | |||
35 | /* | ||
36 | * ARC ISA micro-optimization: | ||
37 | * | ||
38 | * Instructions dealing with bitpos only consider lower 5 bits (0-31) | ||
39 | * e.g (x << 33) is handled like (x << 1) by ASL instruction | ||
40 | * (mem pointer still needs adjustment to point to next word) | ||
41 | * | ||
42 | * Hence the masking to clamp @nr arg can be elided in general. | ||
43 | * | ||
44 | * However if @nr is a constant (above assumed it in a register), | ||
45 | * and greater than 31, gcc can optimize away (x << 33) to 0, | ||
46 | * as overflow, given the 32-bit ISA. Thus masking needs to be done | ||
47 | * for constant @nr, but no code is generated due to const prop. | ||
48 | */ | ||
49 | if (__builtin_constant_p(nr)) | ||
50 | nr &= 0x1f; | ||
51 | |||
52 | __asm__ __volatile__( | ||
53 | "1: llock %0, [%1] \n" | ||
54 | " bset %0, %0, %2 \n" | ||
55 | " scond %0, [%1] \n" | ||
56 | " bnz 1b \n" | ||
57 | : "=&r"(temp) | ||
58 | : "r"(m), "ir"(nr) | ||
59 | : "cc"); | ||
60 | } | ||
61 | |||
62 | static inline void clear_bit(unsigned long nr, volatile unsigned long *m) | ||
63 | { | ||
64 | unsigned int temp; | ||
65 | |||
66 | m += nr >> 5; | ||
67 | |||
68 | if (__builtin_constant_p(nr)) | ||
69 | nr &= 0x1f; | ||
70 | |||
71 | __asm__ __volatile__( | ||
72 | "1: llock %0, [%1] \n" | ||
73 | " bclr %0, %0, %2 \n" | ||
74 | " scond %0, [%1] \n" | ||
75 | " bnz 1b \n" | ||
76 | : "=&r"(temp) | ||
77 | : "r"(m), "ir"(nr) | ||
78 | : "cc"); | ||
79 | } | ||
80 | |||
81 | static inline void change_bit(unsigned long nr, volatile unsigned long *m) | ||
82 | { | ||
83 | unsigned int temp; | ||
84 | |||
85 | m += nr >> 5; | ||
86 | |||
87 | if (__builtin_constant_p(nr)) | ||
88 | nr &= 0x1f; | ||
89 | 30 | ||
90 | __asm__ __volatile__( | 31 | #define BIT_OP(op, c_op, asm_op) \ |
91 | "1: llock %0, [%1] \n" | 32 | static inline void op##_bit(unsigned long nr, volatile unsigned long *m)\ |
92 | " bxor %0, %0, %2 \n" | 33 | { \ |
93 | " scond %0, [%1] \n" | 34 | unsigned int temp; \ |
94 | " bnz 1b \n" | 35 | \ |
95 | : "=&r"(temp) | 36 | m += nr >> 5; \ |
96 | : "r"(m), "ir"(nr) | 37 | \ |
97 | : "cc"); | 38 | /* \ |
39 | * ARC ISA micro-optimization: \ | ||
40 | * \ | ||
41 | * Instructions dealing with bitpos only consider lower 5 bits \ | ||
42 | * e.g (x << 33) is handled like (x << 1) by ASL instruction \ | ||
43 | * (mem pointer still needs adjustment to point to next word) \ | ||
44 | * \ | ||
45 | * Hence the masking to clamp @nr arg can be elided in general. \ | ||
46 | * \ | ||
47 | * However if @nr is a constant (above assumed in a register), \ | ||
48 | * and greater than 31, gcc can optimize away (x << 33) to 0, \ | ||
49 | * as overflow, given the 32-bit ISA. Thus masking needs to be \ | ||
50 | * done for const @nr, but no code is generated due to gcc \ | ||
51 | * const prop. \ | ||
52 | */ \ | ||
53 | if (__builtin_constant_p(nr)) \ | ||
54 | nr &= 0x1f; \ | ||
55 | \ | ||
56 | __asm__ __volatile__( \ | ||
57 | "1: llock %0, [%1] \n" \ | ||
58 | " " #asm_op " %0, %0, %2 \n" \ | ||
59 | " scond %0, [%1] \n" \ | ||
60 | " bnz 1b \n" \ | ||
61 | : "=&r"(temp) /* Early clobber, to prevent reg reuse */ \ | ||
62 | : "r"(m), /* Not "m": llock only supports reg direct addr mode */ \ | ||
63 | "ir"(nr) \ | ||
64 | : "cc"); \ | ||
98 | } | 65 | } |
99 | 66 | ||
100 | /* | 67 | /* |
@@ -108,91 +75,38 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *m) | |||
108 | * Since ARC lacks a equivalent h/w primitive, the bit is set unconditionally | 75 | * Since ARC lacks a equivalent h/w primitive, the bit is set unconditionally |
109 | * and the old value of bit is returned | 76 | * and the old value of bit is returned |
110 | */ | 77 | */ |
111 | static inline int test_and_set_bit(unsigned long nr, volatile unsigned long *m) | 78 | #define TEST_N_BIT_OP(op, c_op, asm_op) \ |
112 | { | 79 | static inline int test_and_##op##_bit(unsigned long nr, volatile unsigned long *m)\ |
113 | unsigned long old, temp; | 80 | { \ |
114 | 81 | unsigned long old, temp; \ | |
115 | m += nr >> 5; | 82 | \ |
116 | 83 | m += nr >> 5; \ | |
117 | if (__builtin_constant_p(nr)) | 84 | \ |
118 | nr &= 0x1f; | 85 | if (__builtin_constant_p(nr)) \ |
119 | 86 | nr &= 0x1f; \ | |
120 | /* | 87 | \ |
121 | * Explicit full memory barrier needed before/after as | 88 | /* \ |
122 | * LLOCK/SCOND themselves don't provide any such semantics | 89 | * Explicit full memory barrier needed before/after as \ |
123 | */ | 90 | * LLOCK/SCOND themselves don't provide any such smenatic \ |
124 | smp_mb(); | 91 | */ \ |
125 | 92 | smp_mb(); \ | |
126 | __asm__ __volatile__( | 93 | \ |
127 | "1: llock %0, [%2] \n" | 94 | __asm__ __volatile__( \ |
128 | " bset %1, %0, %3 \n" | 95 | "1: llock %0, [%2] \n" \ |
129 | " scond %1, [%2] \n" | 96 | " " #asm_op " %1, %0, %3 \n" \ |
130 | " bnz 1b \n" | 97 | " scond %1, [%2] \n" \ |
131 | : "=&r"(old), "=&r"(temp) | 98 | " bnz 1b \n" \ |
132 | : "r"(m), "ir"(nr) | 99 | : "=&r"(old), "=&r"(temp) \ |
133 | : "cc"); | 100 | : "r"(m), "ir"(nr) \ |
134 | 101 | : "cc"); \ | |
135 | smp_mb(); | 102 | \ |
136 | 103 | smp_mb(); \ | |
137 | return (old & (1 << nr)) != 0; | 104 | \ |
138 | } | 105 | return (old & (1 << nr)) != 0; \ |
139 | |||
140 | static inline int | ||
141 | test_and_clear_bit(unsigned long nr, volatile unsigned long *m) | ||
142 | { | ||
143 | unsigned int old, temp; | ||
144 | |||
145 | m += nr >> 5; | ||
146 | |||
147 | if (__builtin_constant_p(nr)) | ||
148 | nr &= 0x1f; | ||
149 | |||
150 | smp_mb(); | ||
151 | |||
152 | __asm__ __volatile__( | ||
153 | "1: llock %0, [%2] \n" | ||
154 | " bclr %1, %0, %3 \n" | ||
155 | " scond %1, [%2] \n" | ||
156 | " bnz 1b \n" | ||
157 | : "=&r"(old), "=&r"(temp) | ||
158 | : "r"(m), "ir"(nr) | ||
159 | : "cc"); | ||
160 | |||
161 | smp_mb(); | ||
162 | |||
163 | return (old & (1 << nr)) != 0; | ||
164 | } | ||
165 | |||
166 | static inline int | ||
167 | test_and_change_bit(unsigned long nr, volatile unsigned long *m) | ||
168 | { | ||
169 | unsigned int old, temp; | ||
170 | |||
171 | m += nr >> 5; | ||
172 | |||
173 | if (__builtin_constant_p(nr)) | ||
174 | nr &= 0x1f; | ||
175 | |||
176 | smp_mb(); | ||
177 | |||
178 | __asm__ __volatile__( | ||
179 | "1: llock %0, [%2] \n" | ||
180 | " bxor %1, %0, %3 \n" | ||
181 | " scond %1, [%2] \n" | ||
182 | " bnz 1b \n" | ||
183 | : "=&r"(old), "=&r"(temp) | ||
184 | : "r"(m), "ir"(nr) | ||
185 | : "cc"); | ||
186 | |||
187 | smp_mb(); | ||
188 | |||
189 | return (old & (1 << nr)) != 0; | ||
190 | } | 106 | } |
191 | 107 | ||
192 | #else /* !CONFIG_ARC_HAS_LLSC */ | 108 | #else /* !CONFIG_ARC_HAS_LLSC */ |
193 | 109 | ||
194 | #include <asm/smp.h> | ||
195 | |||
196 | /* | 110 | /* |
197 | * Non hardware assisted Atomic-R-M-W | 111 | * Non hardware assisted Atomic-R-M-W |
198 | * Locking would change to irq-disabling only (UP) and spinlocks (SMP) | 112 | * Locking would change to irq-disabling only (UP) and spinlocks (SMP) |
@@ -209,111 +123,43 @@ test_and_change_bit(unsigned long nr, volatile unsigned long *m) | |||
209 | * at compile time) | 123 | * at compile time) |
210 | */ | 124 | */ |
211 | 125 | ||
212 | static inline void set_bit(unsigned long nr, volatile unsigned long *m) | 126 | #define BIT_OP(op, c_op, asm_op) \ |
213 | { | 127 | static inline void op##_bit(unsigned long nr, volatile unsigned long *m)\ |
214 | unsigned long temp, flags; | 128 | { \ |
215 | m += nr >> 5; | 129 | unsigned long temp, flags; \ |
216 | 130 | m += nr >> 5; \ | |
217 | if (__builtin_constant_p(nr)) | 131 | \ |
218 | nr &= 0x1f; | 132 | if (__builtin_constant_p(nr)) \ |
219 | 133 | nr &= 0x1f; \ | |
220 | bitops_lock(flags); | 134 | \ |
221 | 135 | /* \ | |
222 | temp = *m; | 136 | * spin lock/unlock provide the needed smp_mb() before/after \ |
223 | *m = temp | (1UL << nr); | 137 | */ \ |
224 | 138 | bitops_lock(flags); \ | |
225 | bitops_unlock(flags); | 139 | \ |
226 | } | 140 | temp = *m; \ |
227 | 141 | *m = temp c_op (1UL << nr); \ | |
228 | static inline void clear_bit(unsigned long nr, volatile unsigned long *m) | 142 | \ |
229 | { | 143 | bitops_unlock(flags); \ |
230 | unsigned long temp, flags; | ||
231 | m += nr >> 5; | ||
232 | |||
233 | if (__builtin_constant_p(nr)) | ||
234 | nr &= 0x1f; | ||
235 | |||
236 | bitops_lock(flags); | ||
237 | |||
238 | temp = *m; | ||
239 | *m = temp & ~(1UL << nr); | ||
240 | |||
241 | bitops_unlock(flags); | ||
242 | } | ||
243 | |||
244 | static inline void change_bit(unsigned long nr, volatile unsigned long *m) | ||
245 | { | ||
246 | unsigned long temp, flags; | ||
247 | m += nr >> 5; | ||
248 | |||
249 | if (__builtin_constant_p(nr)) | ||
250 | nr &= 0x1f; | ||
251 | |||
252 | bitops_lock(flags); | ||
253 | |||
254 | temp = *m; | ||
255 | *m = temp ^ (1UL << nr); | ||
256 | |||
257 | bitops_unlock(flags); | ||
258 | } | 144 | } |
259 | 145 | ||
260 | static inline int test_and_set_bit(unsigned long nr, volatile unsigned long *m) | 146 | #define TEST_N_BIT_OP(op, c_op, asm_op) \ |
261 | { | 147 | static inline int test_and_##op##_bit(unsigned long nr, volatile unsigned long *m)\ |
262 | unsigned long old, flags; | 148 | { \ |
263 | m += nr >> 5; | 149 | unsigned long old, flags; \ |
264 | 150 | m += nr >> 5; \ | |
265 | if (__builtin_constant_p(nr)) | 151 | \ |
266 | nr &= 0x1f; | 152 | if (__builtin_constant_p(nr)) \ |
267 | 153 | nr &= 0x1f; \ | |
268 | /* | 154 | \ |
269 | * spin lock/unlock provide the needed smp_mb() before/after | 155 | bitops_lock(flags); \ |
270 | */ | 156 | \ |
271 | bitops_lock(flags); | 157 | old = *m; \ |
272 | 158 | *m = old c_op (1 << nr); \ | |
273 | old = *m; | 159 | \ |
274 | *m = old | (1 << nr); | 160 | bitops_unlock(flags); \ |
275 | 161 | \ | |
276 | bitops_unlock(flags); | 162 | return (old & (1 << nr)) != 0; \ |
277 | |||
278 | return (old & (1 << nr)) != 0; | ||
279 | } | ||
280 | |||
281 | static inline int | ||
282 | test_and_clear_bit(unsigned long nr, volatile unsigned long *m) | ||
283 | { | ||
284 | unsigned long old, flags; | ||
285 | m += nr >> 5; | ||
286 | |||
287 | if (__builtin_constant_p(nr)) | ||
288 | nr &= 0x1f; | ||
289 | |||
290 | bitops_lock(flags); | ||
291 | |||
292 | old = *m; | ||
293 | *m = old & ~(1 << nr); | ||
294 | |||
295 | bitops_unlock(flags); | ||
296 | |||
297 | return (old & (1 << nr)) != 0; | ||
298 | } | ||
299 | |||
300 | static inline int | ||
301 | test_and_change_bit(unsigned long nr, volatile unsigned long *m) | ||
302 | { | ||
303 | unsigned long old, flags; | ||
304 | m += nr >> 5; | ||
305 | |||
306 | if (__builtin_constant_p(nr)) | ||
307 | nr &= 0x1f; | ||
308 | |||
309 | bitops_lock(flags); | ||
310 | |||
311 | old = *m; | ||
312 | *m = old ^ (1 << nr); | ||
313 | |||
314 | bitops_unlock(flags); | ||
315 | |||
316 | return (old & (1 << nr)) != 0; | ||
317 | } | 163 | } |
318 | 164 | ||
319 | #endif /* CONFIG_ARC_HAS_LLSC */ | 165 | #endif /* CONFIG_ARC_HAS_LLSC */ |
@@ -322,86 +168,51 @@ test_and_change_bit(unsigned long nr, volatile unsigned long *m) | |||
322 | * Non atomic variants | 168 | * Non atomic variants |
323 | **************************************/ | 169 | **************************************/ |
324 | 170 | ||
325 | static inline void __set_bit(unsigned long nr, volatile unsigned long *m) | 171 | #define __BIT_OP(op, c_op, asm_op) \ |
326 | { | 172 | static inline void __##op##_bit(unsigned long nr, volatile unsigned long *m) \ |
327 | unsigned long temp; | 173 | { \ |
328 | m += nr >> 5; | 174 | unsigned long temp; \ |
329 | 175 | m += nr >> 5; \ | |
330 | if (__builtin_constant_p(nr)) | 176 | \ |
331 | nr &= 0x1f; | 177 | if (__builtin_constant_p(nr)) \ |
332 | 178 | nr &= 0x1f; \ | |
333 | temp = *m; | 179 | \ |
334 | *m = temp | (1UL << nr); | 180 | temp = *m; \ |
335 | } | 181 | *m = temp c_op (1UL << nr); \ |
336 | |||
337 | static inline void __clear_bit(unsigned long nr, volatile unsigned long *m) | ||
338 | { | ||
339 | unsigned long temp; | ||
340 | m += nr >> 5; | ||
341 | |||
342 | if (__builtin_constant_p(nr)) | ||
343 | nr &= 0x1f; | ||
344 | |||
345 | temp = *m; | ||
346 | *m = temp & ~(1UL << nr); | ||
347 | } | ||
348 | |||
349 | static inline void __change_bit(unsigned long nr, volatile unsigned long *m) | ||
350 | { | ||
351 | unsigned long temp; | ||
352 | m += nr >> 5; | ||
353 | |||
354 | if (__builtin_constant_p(nr)) | ||
355 | nr &= 0x1f; | ||
356 | |||
357 | temp = *m; | ||
358 | *m = temp ^ (1UL << nr); | ||
359 | } | ||
360 | |||
361 | static inline int | ||
362 | __test_and_set_bit(unsigned long nr, volatile unsigned long *m) | ||
363 | { | ||
364 | unsigned long old; | ||
365 | m += nr >> 5; | ||
366 | |||
367 | if (__builtin_constant_p(nr)) | ||
368 | nr &= 0x1f; | ||
369 | |||
370 | old = *m; | ||
371 | *m = old | (1 << nr); | ||
372 | |||
373 | return (old & (1 << nr)) != 0; | ||
374 | } | 182 | } |
375 | 183 | ||
376 | static inline int | 184 | #define __TEST_N_BIT_OP(op, c_op, asm_op) \ |
377 | __test_and_clear_bit(unsigned long nr, volatile unsigned long *m) | 185 | static inline int __test_and_##op##_bit(unsigned long nr, volatile unsigned long *m)\ |
378 | { | 186 | { \ |
379 | unsigned long old; | 187 | unsigned long old; \ |
380 | m += nr >> 5; | 188 | m += nr >> 5; \ |
381 | 189 | \ | |
382 | if (__builtin_constant_p(nr)) | 190 | if (__builtin_constant_p(nr)) \ |
383 | nr &= 0x1f; | 191 | nr &= 0x1f; \ |
384 | 192 | \ | |
385 | old = *m; | 193 | old = *m; \ |
386 | *m = old & ~(1 << nr); | 194 | *m = old c_op (1 << nr); \ |
387 | 195 | \ | |
388 | return (old & (1 << nr)) != 0; | 196 | return (old & (1 << nr)) != 0; \ |
389 | } | 197 | } |
390 | 198 | ||
391 | static inline int | 199 | #define BIT_OPS(op, c_op, asm_op) \ |
392 | __test_and_change_bit(unsigned long nr, volatile unsigned long *m) | 200 | \ |
393 | { | 201 | /* set_bit(), clear_bit(), change_bit() */ \ |
394 | unsigned long old; | 202 | BIT_OP(op, c_op, asm_op) \ |
395 | m += nr >> 5; | 203 | \ |
396 | 204 | /* test_and_set_bit(), test_and_clear_bit(), test_and_change_bit() */\ | |
397 | if (__builtin_constant_p(nr)) | 205 | TEST_N_BIT_OP(op, c_op, asm_op) \ |
398 | nr &= 0x1f; | 206 | \ |
399 | 207 | /* __set_bit(), __clear_bit(), __change_bit() */ \ | |
400 | old = *m; | 208 | __BIT_OP(op, c_op, asm_op) \ |
401 | *m = old ^ (1 << nr); | 209 | \ |
402 | 210 | /* __test_and_set_bit(), __test_and_clear_bit(), __test_and_change_bit() */\ | |
403 | return (old & (1 << nr)) != 0; | 211 | __TEST_N_BIT_OP(op, c_op, asm_op) |
404 | } | 212 | |
213 | BIT_OPS(set, |, bset) | ||
214 | BIT_OPS(clear, & ~, bclr) | ||
215 | BIT_OPS(change, ^, bxor) | ||
405 | 216 | ||
406 | /* | 217 | /* |
407 | * This routine doesn't need to be atomic. | 218 | * This routine doesn't need to be atomic. |