aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm
diff options
context:
space:
mode:
authorWill Deacon <will.deacon@arm.com>2010-01-20 13:05:07 -0500
committerRussell King <rmk+kernel@arm.linux.org.uk>2010-02-15 16:39:50 -0500
commit24b44a66fa240f6fc63343623ca730d39754047e (patch)
treec271eea37b9559a06b11eb87743856be9a254663 /arch/arm
parenta9221de66d2d94e6e34c3f56bbdd744935020737 (diff)
ARM: 5889/1: Add atomic64 routines for ARMv6k and above.
In preparation for perf-events support, ARM needs to support atomic64_t operations. v6k and above support the ldrexd and strexd instructions to do just that. This patch adds atomic64 support to the ARM architecture. v6k and above make use of new instructions whilst older cores fall back on the generic solution using spinlocks. If and when v7-M cores are supported by Linux, they will need to fall back on the spinlock implementation too. Signed-off-by: Will Deacon <will.deacon@arm.com> Acked-by: Catalin Marinas <catalin.marinas@arm.com> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm')
-rw-r--r--arch/arm/Kconfig1
-rw-r--r--arch/arm/include/asm/atomic.h228
2 files changed, 229 insertions, 0 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index b224216c11db..762ae536f909 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -12,6 +12,7 @@ config ARM
12 select HAVE_IDE 12 select HAVE_IDE
13 select RTC_LIB 13 select RTC_LIB
14 select SYS_SUPPORTS_APM_EMULATION 14 select SYS_SUPPORTS_APM_EMULATION
15 select GENERIC_ATOMIC64 if (!CPU_32v6K)
15 select HAVE_OPROFILE 16 select HAVE_OPROFILE
16 select HAVE_ARCH_KGDB 17 select HAVE_ARCH_KGDB
17 select HAVE_KPROBES if (!XIP_KERNEL) 18 select HAVE_KPROBES if (!XIP_KERNEL)
diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
index d0daeab2234e..e8ddec2cb158 100644
--- a/arch/arm/include/asm/atomic.h
+++ b/arch/arm/include/asm/atomic.h
@@ -235,6 +235,234 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
235#define smp_mb__before_atomic_inc() smp_mb() 235#define smp_mb__before_atomic_inc() smp_mb()
236#define smp_mb__after_atomic_inc() smp_mb() 236#define smp_mb__after_atomic_inc() smp_mb()
237 237
238#ifndef CONFIG_GENERIC_ATOMIC64
239typedef struct {
240 u64 __aligned(8) counter;
241} atomic64_t;
242
243#define ATOMIC64_INIT(i) { (i) }
244
245static inline u64 atomic64_read(atomic64_t *v)
246{
247 u64 result;
248
249 __asm__ __volatile__("@ atomic64_read\n"
250" ldrexd %0, %H0, [%1]"
251 : "=&r" (result)
252 : "r" (&v->counter)
253 );
254
255 return result;
256}
257
258static inline void atomic64_set(atomic64_t *v, u64 i)
259{
260 u64 tmp;
261
262 __asm__ __volatile__("@ atomic64_set\n"
263"1: ldrexd %0, %H0, [%1]\n"
264" strexd %0, %2, %H2, [%1]\n"
265" teq %0, #0\n"
266" bne 1b"
267 : "=&r" (tmp)
268 : "r" (&v->counter), "r" (i)
269 : "cc");
270}
271
272static inline void atomic64_add(u64 i, atomic64_t *v)
273{
274 u64 result;
275 unsigned long tmp;
276
277 __asm__ __volatile__("@ atomic64_add\n"
278"1: ldrexd %0, %H0, [%2]\n"
279" adds %0, %0, %3\n"
280" adc %H0, %H0, %H3\n"
281" strexd %1, %0, %H0, [%2]\n"
282" teq %1, #0\n"
283" bne 1b"
284 : "=&r" (result), "=&r" (tmp)
285 : "r" (&v->counter), "r" (i)
286 : "cc");
287}
288
289static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
290{
291 u64 result;
292 unsigned long tmp;
293
294 smp_mb();
295
296 __asm__ __volatile__("@ atomic64_add_return\n"
297"1: ldrexd %0, %H0, [%2]\n"
298" adds %0, %0, %3\n"
299" adc %H0, %H0, %H3\n"
300" strexd %1, %0, %H0, [%2]\n"
301" teq %1, #0\n"
302" bne 1b"
303 : "=&r" (result), "=&r" (tmp)
304 : "r" (&v->counter), "r" (i)
305 : "cc");
306
307 smp_mb();
308
309 return result;
310}
311
312static inline void atomic64_sub(u64 i, atomic64_t *v)
313{
314 u64 result;
315 unsigned long tmp;
316
317 __asm__ __volatile__("@ atomic64_sub\n"
318"1: ldrexd %0, %H0, [%2]\n"
319" subs %0, %0, %3\n"
320" sbc %H0, %H0, %H3\n"
321" strexd %1, %0, %H0, [%2]\n"
322" teq %1, #0\n"
323" bne 1b"
324 : "=&r" (result), "=&r" (tmp)
325 : "r" (&v->counter), "r" (i)
326 : "cc");
327}
328
329static inline u64 atomic64_sub_return(u64 i, atomic64_t *v)
330{
331 u64 result;
332 unsigned long tmp;
333
334 smp_mb();
335
336 __asm__ __volatile__("@ atomic64_sub_return\n"
337"1: ldrexd %0, %H0, [%2]\n"
338" subs %0, %0, %3\n"
339" sbc %H0, %H0, %H3\n"
340" strexd %1, %0, %H0, [%2]\n"
341" teq %1, #0\n"
342" bne 1b"
343 : "=&r" (result), "=&r" (tmp)
344 : "r" (&v->counter), "r" (i)
345 : "cc");
346
347 smp_mb();
348
349 return result;
350}
351
352static inline u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old, u64 new)
353{
354 u64 oldval;
355 unsigned long res;
356
357 smp_mb();
358
359 do {
360 __asm__ __volatile__("@ atomic64_cmpxchg\n"
361 "ldrexd %1, %H1, [%2]\n"
362 "mov %0, #0\n"
363 "teq %1, %3\n"
364 "teqeq %H1, %H3\n"
365 "strexdeq %0, %4, %H4, [%2]"
366 : "=&r" (res), "=&r" (oldval)
367 : "r" (&ptr->counter), "r" (old), "r" (new)
368 : "cc");
369 } while (res);
370
371 smp_mb();
372
373 return oldval;
374}
375
376static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
377{
378 u64 result;
379 unsigned long tmp;
380
381 smp_mb();
382
383 __asm__ __volatile__("@ atomic64_xchg\n"
384"1: ldrexd %0, %H0, [%2]\n"
385" strexd %1, %3, %H3, [%2]\n"
386" teq %1, #0\n"
387" bne 1b"
388 : "=&r" (result), "=&r" (tmp)
389 : "r" (&ptr->counter), "r" (new)
390 : "cc");
391
392 smp_mb();
393
394 return result;
395}
396
397static inline u64 atomic64_dec_if_positive(atomic64_t *v)
398{
399 u64 result;
400 unsigned long tmp;
401
402 smp_mb();
403
404 __asm__ __volatile__("@ atomic64_dec_if_positive\n"
405"1: ldrexd %0, %H0, [%2]\n"
406" subs %0, %0, #1\n"
407" sbc %H0, %H0, #0\n"
408" teq %H0, #0\n"
409" bmi 2f\n"
410" strexd %1, %0, %H0, [%2]\n"
411" teq %1, #0\n"
412" bne 1b\n"
413"2:"
414 : "=&r" (result), "=&r" (tmp)
415 : "r" (&v->counter)
416 : "cc");
417
418 smp_mb();
419
420 return result;
421}
422
423static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
424{
425 u64 val;
426 unsigned long tmp;
427 int ret = 1;
428
429 smp_mb();
430
431 __asm__ __volatile__("@ atomic64_add_unless\n"
432"1: ldrexd %0, %H0, [%3]\n"
433" teq %0, %4\n"
434" teqeq %H0, %H4\n"
435" moveq %1, #0\n"
436" beq 2f\n"
437" adds %0, %0, %5\n"
438" adc %H0, %H0, %H5\n"
439" strexd %2, %0, %H0, [%3]\n"
440" teq %2, #0\n"
441" bne 1b\n"
442"2:"
443 : "=&r" (val), "=&r" (ret), "=&r" (tmp)
444 : "r" (&v->counter), "r" (u), "r" (a)
445 : "cc");
446
447 if (ret)
448 smp_mb();
449
450 return ret;
451}
452
453#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
454#define atomic64_inc(v) atomic64_add(1LL, (v))
455#define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
456#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
457#define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
458#define atomic64_dec(v) atomic64_sub(1LL, (v))
459#define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
460#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
461#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
462
463#else /* !CONFIG_GENERIC_ATOMIC64 */
464#include <asm-generic/atomic64.h>
465#endif
238#include <asm-generic/atomic-long.h> 466#include <asm-generic/atomic-long.h>
239#endif 467#endif
240#endif 468#endif