aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/kernel/entry-armv.S
diff options
context:
space:
mode:
authorNicolas Pitre <nico@cam.org>2005-12-19 17:20:51 -0500
committerRussell King <rmk+kernel@arm.linux.org.uk>2005-12-19 17:20:51 -0500
commit7c612bfd4ed3064fd48a4877a114c8186547367b (patch)
treebd307ea5bf9c0ad16ad1a590490d9095107d35fc /arch/arm/kernel/entry-armv.S
parent567bd98017d9c9f2ac1c148ddc78c062e8abd398 (diff)
[ARM] 3210/1: add missing memory barrier helper for NPTL support
Patch from Nicolas Pitre Strictly speaking, the NPTL kernel helpers are required for pre ARMv6 only. They are available on ARMv6+ as well for obvious compatibility reasons. However there are cases where extra memory barriers are needed when using an SMP ARMv6 machine but not on pre-ARMv6. This patch adds a memory barrier kernel helper that glibc can use as needed for pre-ARMv6 binaries to be forward compatible with an SMP kernel on ARMv6, as well as the necessary dmb instructions to the cmpxchg helper. Signed-off-by: Nicolas Pitre <nico@cam.org> Acked-by: Daniel Jacobowitz <dan@codesourcery.com> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm/kernel/entry-armv.S')
-rw-r--r--arch/arm/kernel/entry-armv.S49
1 files changed, 49 insertions, 0 deletions
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index d9fb819bf7cc..2a8d27e18fa7 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -614,6 +614,47 @@ __kuser_helper_start:
614/* 614/*
615 * Reference prototype: 615 * Reference prototype:
616 * 616 *
617 * void __kernel_memory_barrier(void)
618 *
619 * Input:
620 *
621 * lr = return address
622 *
623 * Output:
624 *
625 * none
626 *
627 * Clobbered:
628 *
629 * the Z flag might be lost
630 *
631 * Definition and user space usage example:
632 *
633 * typedef void (__kernel_dmb_t)(void);
634 * #define __kernel_dmb (*(__kernel_dmb_t *)0xffff0fa0)
635 *
636 * Apply any needed memory barrier to preserve consistency with data modified
637 * manually and __kuser_cmpxchg usage.
638 *
639 * This could be used as follows:
640 *
641 * #define __kernel_dmb() \
642 * asm volatile ( "mov r0, #0xffff0fff; mov lr, pc; sub pc, r0, #95" \
643 * : : : "lr","cc" )
644 */
645
646__kuser_memory_barrier: @ 0xffff0fa0
647
648#if __LINUX_ARM_ARCH__ >= 6 && defined(CONFIG_SMP)
649 mcr p15, 0, r0, c7, c10, 5 @ dmb
650#endif
651 mov pc, lr
652
653 .align 5
654
655/*
656 * Reference prototype:
657 *
617 * int __kernel_cmpxchg(int oldval, int newval, int *ptr) 658 * int __kernel_cmpxchg(int oldval, int newval, int *ptr)
618 * 659 *
619 * Input: 660 * Input:
@@ -642,6 +683,8 @@ __kuser_helper_start:
642 * The C flag is also set if *ptr was changed to allow for assembly 683 * The C flag is also set if *ptr was changed to allow for assembly
643 * optimization in the calling code. 684 * optimization in the calling code.
644 * 685 *
686 * Note: this routine already includes memory barriers as needed.
687 *
645 * For example, a user space atomic_add implementation could look like this: 688 * For example, a user space atomic_add implementation could look like this:
646 * 689 *
647 * #define atomic_add(ptr, val) \ 690 * #define atomic_add(ptr, val) \
@@ -698,10 +741,16 @@ __kuser_cmpxchg: @ 0xffff0fc0
698 741
699#else 742#else
700 743
744#ifdef CONFIG_SMP
745 mcr p15, 0, r0, c7, c10, 5 @ dmb
746#endif
701 ldrex r3, [r2] 747 ldrex r3, [r2]
702 subs r3, r3, r0 748 subs r3, r3, r0
703 strexeq r3, r1, [r2] 749 strexeq r3, r1, [r2]
704 rsbs r0, r3, #0 750 rsbs r0, r3, #0
751#ifdef CONFIG_SMP
752 mcr p15, 0, r0, c7, c10, 5 @ dmb
753#endif
705 mov pc, lr 754 mov pc, lr
706 755
707#endif 756#endif