aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMathieu Desnoyers <mathieu.desnoyers@polymtl.ca>2009-05-28 16:07:39 -0400
committerRussell King <rmk+kernel@arm.linux.org.uk>2009-05-28 16:10:31 -0400
commitecd322c9b3e4ac70f9f108badde3eb6b99c7993d (patch)
treed3c19fab3aa17461480eaae5d535b8a9c99a82fc
parentbac4e960b5ce2453d862beaf20e59aa68af3b43a (diff)
[ARM] Add cmpxchg support for ARMv6+ systems (v5)
Add cmpxchg/cmpxchg64 support for ARMv6K and ARMv7 systems (original patch from Catalin Marinas <catalin.marinas@arm.com>) The cmpxchg and cmpxchg64 functions can be implemented using the LDREX*/STREX* instructions. Since operand lengths other than 32bit are required, the full implementations are only available if the ARMv6K extensions are present (for the LDREXB, LDREXH and LDREXD instructions). For ARMv6, only 32-bits cmpxchg is available. Mathieu : Make cmpxchg_local always available with best implementation for all type sizes (1, 2, 4 bytes). Make cmpxchg64_local always available. Use "Ir" constraint for "old" operand, like atomic.h atomic_cmpxchg does. Change since v3 : - Add "memory" clobbers (thanks to Nicolas Pitre) - removed __asmeq(), only needed for old compilers, very unlikely on ARMv6+. Note : ARMv7-M should eventually be ifdefed-out of cmpxchg64. But it's not supported by the Linux kernel currently. Put back arm < v6 cmpxchg support. Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca> CC: Catalin Marinas <catalin.marinas@arm.com> CC: Nicolas Pitre <nico@cam.org> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
-rw-r--r--arch/arm/include/asm/system.h173
1 files changed, 173 insertions, 0 deletions
diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h
index 7fce8f3b391d..d65b2f5bf41f 100644
--- a/arch/arm/include/asm/system.h
+++ b/arch/arm/include/asm/system.h
@@ -319,6 +319,12 @@ extern void enable_hlt(void);
319 319
320#include <asm-generic/cmpxchg-local.h> 320#include <asm-generic/cmpxchg-local.h>
321 321
322#if __LINUX_ARM_ARCH__ < 6
323
324#ifdef CONFIG_SMP
325#error "SMP is not supported on this platform"
326#endif
327
322/* 328/*
323 * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make 329 * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
324 * them available. 330 * them available.
@@ -332,6 +338,173 @@ extern void enable_hlt(void);
332#include <asm-generic/cmpxchg.h> 338#include <asm-generic/cmpxchg.h>
333#endif 339#endif
334 340
341#else /* __LINUX_ARM_ARCH__ >= 6 */
342
343extern void __bad_cmpxchg(volatile void *ptr, int size);
344
345/*
346 * cmpxchg only support 32-bits operands on ARMv6.
347 */
348
349static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
350 unsigned long new, int size)
351{
352 unsigned long oldval, res;
353
354 switch (size) {
355#ifdef CONFIG_CPU_32v6K
356 case 1:
357 do {
358 asm volatile("@ __cmpxchg1\n"
359 " ldrexb %1, [%2]\n"
360 " mov %0, #0\n"
361 " teq %1, %3\n"
362 " strexbeq %0, %4, [%2]\n"
363 : "=&r" (res), "=&r" (oldval)
364 : "r" (ptr), "Ir" (old), "r" (new)
365 : "memory", "cc");
366 } while (res);
367 break;
368 case 2:
369 do {
370 asm volatile("@ __cmpxchg1\n"
371 " ldrexh %1, [%2]\n"
372 " mov %0, #0\n"
373 " teq %1, %3\n"
374 " strexheq %0, %4, [%2]\n"
375 : "=&r" (res), "=&r" (oldval)
376 : "r" (ptr), "Ir" (old), "r" (new)
377 : "memory", "cc");
378 } while (res);
379 break;
380#endif /* CONFIG_CPU_32v6K */
381 case 4:
382 do {
383 asm volatile("@ __cmpxchg4\n"
384 " ldrex %1, [%2]\n"
385 " mov %0, #0\n"
386 " teq %1, %3\n"
387 " strexeq %0, %4, [%2]\n"
388 : "=&r" (res), "=&r" (oldval)
389 : "r" (ptr), "Ir" (old), "r" (new)
390 : "memory", "cc");
391 } while (res);
392 break;
393 default:
394 __bad_cmpxchg(ptr, size);
395 oldval = 0;
396 }
397
398 return oldval;
399}
400
401static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old,
402 unsigned long new, int size)
403{
404 unsigned long ret;
405
406 smp_mb();
407 ret = __cmpxchg(ptr, old, new, size);
408 smp_mb();
409
410 return ret;
411}
412
413#define cmpxchg(ptr,o,n) \
414 ((__typeof__(*(ptr)))__cmpxchg_mb((ptr), \
415 (unsigned long)(o), \
416 (unsigned long)(n), \
417 sizeof(*(ptr))))
418
419static inline unsigned long __cmpxchg_local(volatile void *ptr,
420 unsigned long old,
421 unsigned long new, int size)
422{
423 unsigned long ret;
424
425 switch (size) {
426#ifndef CONFIG_CPU_32v6K
427 case 1:
428 case 2:
429 ret = __cmpxchg_local_generic(ptr, old, new, size);
430 break;
431#endif /* !CONFIG_CPU_32v6K */
432 default:
433 ret = __cmpxchg(ptr, old, new, size);
434 }
435
436 return ret;
437}
438
439#define cmpxchg_local(ptr,o,n) \
440 ((__typeof__(*(ptr)))__cmpxchg_local((ptr), \
441 (unsigned long)(o), \
442 (unsigned long)(n), \
443 sizeof(*(ptr))))
444
445#ifdef CONFIG_CPU_32v6K
446
447/*
448 * Note : ARMv7-M (currently unsupported by Linux) does not support
449 * ldrexd/strexd. If ARMv7-M is ever supported by the Linux kernel, it should
450 * not be allowed to use __cmpxchg64.
451 */
452static inline unsigned long long __cmpxchg64(volatile void *ptr,
453 unsigned long long old,
454 unsigned long long new)
455{
456 register unsigned long long oldval asm("r0");
457 register unsigned long long __old asm("r2") = old;
458 register unsigned long long __new asm("r4") = new;
459 unsigned long res;
460
461 do {
462 asm volatile(
463 " @ __cmpxchg8\n"
464 " ldrexd %1, %H1, [%2]\n"
465 " mov %0, #0\n"
466 " teq %1, %3\n"
467 " teqeq %H1, %H3\n"
468 " strexdeq %0, %4, %H4, [%2]\n"
469 : "=&r" (res), "=&r" (oldval)
470 : "r" (ptr), "Ir" (__old), "r" (__new)
471 : "memory", "cc");
472 } while (res);
473
474 return oldval;
475}
476
477static inline unsigned long long __cmpxchg64_mb(volatile void *ptr,
478 unsigned long long old,
479 unsigned long long new)
480{
481 unsigned long long ret;
482
483 smp_mb();
484 ret = __cmpxchg64(ptr, old, new);
485 smp_mb();
486
487 return ret;
488}
489
490#define cmpxchg64(ptr,o,n) \
491 ((__typeof__(*(ptr)))__cmpxchg64_mb((ptr), \
492 (unsigned long long)(o), \
493 (unsigned long long)(n)))
494
495#define cmpxchg64_local(ptr,o,n) \
496 ((__typeof__(*(ptr)))__cmpxchg64((ptr), \
497 (unsigned long long)(o), \
498 (unsigned long long)(n)))
499
500#else /* !CONFIG_CPU_32v6K */
501
502#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
503
504#endif /* CONFIG_CPU_32v6K */
505
506#endif /* __LINUX_ARM_ARCH__ >= 6 */
507
335#endif /* __ASSEMBLY__ */ 508#endif /* __ASSEMBLY__ */
336 509
337#define arch_align_stack(x) (x) 510#define arch_align_stack(x) (x)