aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/kernel/entry-armv.S
diff options
context:
space:
mode:
authorNicolas Pitre <nicolas.pitre@linaro.org>2011-06-19 23:36:03 -0400
committerNicolas Pitre <nico@fluxnic.net>2011-06-20 10:49:24 -0400
commit37b8304642c7f91df54888955c373ae89b577fcc (patch)
tree217e97c421fe58bb715b3d31bfc11f9a08cc8738 /arch/arm/kernel/entry-armv.S
parent2c53b436a30867eb6b47dd7bab23ba638d1fb0d2 (diff)
ARM: kuser: move interface documentation out of the source code
Digging into some assembly file in order to get information about the kuser helpers is not that convivial. Let's move that information to a better formatted file in Documentation/arm/ and improve on it a bit. Thanks to Dave Martin <dave.martin@linaro.org> for the initial cleanup and clarifications. Signed-off-by: Nicolas Pitre <nicolas.pitre@linaro.org> Acked-by: Dave Martin <dave.martin@linaro.org>
Diffstat (limited to 'arch/arm/kernel/entry-armv.S')
-rw-r--r--arch/arm/kernel/entry-armv.S153
1 files changed, 1 insertions, 152 deletions
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index e8d88567680..63f7907c4c3 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -754,31 +754,12 @@ ENDPROC(__switch_to)
754/* 754/*
755 * User helpers. 755 * User helpers.
756 * 756 *
757 * These are segment of kernel provided user code reachable from user space
758 * at a fixed address in kernel memory. This is used to provide user space
759 * with some operations which require kernel help because of unimplemented
760 * native feature and/or instructions in many ARM CPUs. The idea is for
761 * this code to be executed directly in user mode for best efficiency but
762 * which is too intimate with the kernel counter part to be left to user
763 * libraries. In fact this code might even differ from one CPU to another
764 * depending on the available instruction set and restrictions like on
765 * SMP systems. In other words, the kernel reserves the right to change
766 * this code as needed without warning. Only the entry points and their
767 * results are guaranteed to be stable.
768 *
769 * Each segment is 32-byte aligned and will be moved to the top of the high 757 * Each segment is 32-byte aligned and will be moved to the top of the high
770 * vector page. New segments (if ever needed) must be added in front of 758 * vector page. New segments (if ever needed) must be added in front of
771 * existing ones. This mechanism should be used only for things that are 759 * existing ones. This mechanism should be used only for things that are
772 * really small and justified, and not be abused freely. 760 * really small and justified, and not be abused freely.
773 * 761 *
774 * User space is expected to implement those things inline when optimizing 762 * See Documentation/arm/kernel_user_helpers.txt for formal definitions.
775 * for a processor that has the necessary native support, but only if such
776 * resulting binaries are already to be incompatible with earlier ARM
777 * processors due to the use of unsupported instructions other than what
778 * is provided here. In other words don't make binaries unable to run on
779 * earlier processors just for the sake of not using these kernel helpers
780 * if your compiled code is not going to use the new instructions for other
781 * purpose.
782 */ 763 */
783 THUMB( .arm ) 764 THUMB( .arm )
784 765
@@ -794,98 +775,12 @@ ENDPROC(__switch_to)
794 .globl __kuser_helper_start 775 .globl __kuser_helper_start
795__kuser_helper_start: 776__kuser_helper_start:
796 777
797/*
798 * Reference prototype:
799 *
800 * void __kernel_memory_barrier(void)
801 *
802 * Input:
803 *
804 * lr = return address
805 *
806 * Output:
807 *
808 * none
809 *
810 * Clobbered:
811 *
812 * none
813 *
814 * Definition and user space usage example:
815 *
816 * typedef void (__kernel_dmb_t)(void);
817 * #define __kernel_dmb (*(__kernel_dmb_t *)0xffff0fa0)
818 *
819 * Apply any needed memory barrier to preserve consistency with data modified
820 * manually and __kuser_cmpxchg usage.
821 *
822 * This could be used as follows:
823 *
824 * #define __kernel_dmb() \
825 * asm volatile ( "mov r0, #0xffff0fff; mov lr, pc; sub pc, r0, #95" \
826 * : : : "r0", "lr","cc" )
827 */
828
829__kuser_memory_barrier: @ 0xffff0fa0 778__kuser_memory_barrier: @ 0xffff0fa0
830 smp_dmb arm 779 smp_dmb arm
831 usr_ret lr 780 usr_ret lr
832 781
833 .align 5 782 .align 5
834 783
835/*
836 * Reference prototype:
837 *
838 * int __kernel_cmpxchg(int oldval, int newval, int *ptr)
839 *
840 * Input:
841 *
842 * r0 = oldval
843 * r1 = newval
844 * r2 = ptr
845 * lr = return address
846 *
847 * Output:
848 *
849 * r0 = returned value (zero or non-zero)
850 * C flag = set if r0 == 0, clear if r0 != 0
851 *
852 * Clobbered:
853 *
854 * r3, ip, flags
855 *
856 * Definition and user space usage example:
857 *
858 * typedef int (__kernel_cmpxchg_t)(int oldval, int newval, int *ptr);
859 * #define __kernel_cmpxchg (*(__kernel_cmpxchg_t *)0xffff0fc0)
860 *
861 * Atomically store newval in *ptr if *ptr is equal to oldval for user space.
862 * Return zero if *ptr was changed or non-zero if no exchange happened.
863 * The C flag is also set if *ptr was changed to allow for assembly
864 * optimization in the calling code.
865 *
866 * Notes:
867 *
868 * - This routine already includes memory barriers as needed.
869 *
870 * For example, a user space atomic_add implementation could look like this:
871 *
872 * #define atomic_add(ptr, val) \
873 * ({ register unsigned int *__ptr asm("r2") = (ptr); \
874 * register unsigned int __result asm("r1"); \
875 * asm volatile ( \
876 * "1: @ atomic_add\n\t" \
877 * "ldr r0, [r2]\n\t" \
878 * "mov r3, #0xffff0fff\n\t" \
879 * "add lr, pc, #4\n\t" \
880 * "add r1, r0, %2\n\t" \
881 * "add pc, r3, #(0xffff0fc0 - 0xffff0fff)\n\t" \
882 * "bcc 1b" \
883 * : "=&r" (__result) \
884 * : "r" (__ptr), "rIL" (val) \
885 * : "r0","r3","ip","lr","cc","memory" ); \
886 * __result; })
887 */
888
889__kuser_cmpxchg: @ 0xffff0fc0 784__kuser_cmpxchg: @ 0xffff0fc0
890 785
891#if defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG) 786#if defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
@@ -959,39 +854,6 @@ kuser_cmpxchg_fixup:
959 854
960 .align 5 855 .align 5
961 856
962/*
963 * Reference prototype:
964 *
965 * int __kernel_get_tls(void)
966 *
967 * Input:
968 *
969 * lr = return address
970 *
971 * Output:
972 *
973 * r0 = TLS value
974 *
975 * Clobbered:
976 *
977 * none
978 *
979 * Definition and user space usage example:
980 *
981 * typedef int (__kernel_get_tls_t)(void);
982 * #define __kernel_get_tls (*(__kernel_get_tls_t *)0xffff0fe0)
983 *
984 * Get the TLS value as previously set via the __ARM_NR_set_tls syscall.
985 *
986 * This could be used as follows:
987 *
988 * #define __kernel_get_tls() \
989 * ({ register unsigned int __val asm("r0"); \
990 * asm( "mov r0, #0xffff0fff; mov lr, pc; sub pc, r0, #31" \
991 * : "=r" (__val) : : "lr","cc" ); \
992 * __val; })
993 */
994
995__kuser_get_tls: @ 0xffff0fe0 857__kuser_get_tls: @ 0xffff0fe0
996 ldr r0, [pc, #(16 - 8)] @ read TLS, set in kuser_get_tls_init 858 ldr r0, [pc, #(16 - 8)] @ read TLS, set in kuser_get_tls_init
997 usr_ret lr 859 usr_ret lr
@@ -1000,19 +862,6 @@ __kuser_get_tls: @ 0xffff0fe0
1000 .word 0 @ 0xffff0ff0 software TLS value, then 862 .word 0 @ 0xffff0ff0 software TLS value, then
1001 .endr @ pad up to __kuser_helper_version 863 .endr @ pad up to __kuser_helper_version
1002 864
1003/*
1004 * Reference declaration:
1005 *
1006 * extern unsigned int __kernel_helper_version;
1007 *
1008 * Definition and user space usage example:
1009 *
1010 * #define __kernel_helper_version (*(unsigned int *)0xffff0ffc)
1011 *
1012 * User space may read this to determine the curent number of helpers
1013 * available.
1014 */
1015
1016__kuser_helper_version: @ 0xffff0ffc 865__kuser_helper_version: @ 0xffff0ffc
1017 .word ((__kuser_helper_end - __kuser_helper_start) >> 5) 866 .word ((__kuser_helper_end - __kuser_helper_start) >> 5)
1018 867