diff options
author | Russell King <rmk+kernel@arm.linux.org.uk> | 2011-06-28 16:23:00 -0400 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2011-06-28 16:23:00 -0400 |
commit | 2ff0720933f9a5e502a46fb6ea14ceeaad65131f (patch) | |
tree | 7d337086a6bd803df21e4df396886015ab872326 | |
parent | b0af8dfdd67699e25083478c63eedef2e72ebd85 (diff) | |
parent | 40fb79c8a88625504857d44de1bc89dc0341e618 (diff) |
Merge branch 'cmpxchg64' of git://git.linaro.org/people/nico/linux into devel-stable
-rw-r--r-- | Documentation/arm/kernel_user_helpers.txt | 267 | ||||
-rw-r--r-- | arch/arm/kernel/entry-armv.S | 246 |
2 files changed, 361 insertions, 152 deletions
diff --git a/Documentation/arm/kernel_user_helpers.txt b/Documentation/arm/kernel_user_helpers.txt new file mode 100644 index 000000000000..a17df9f91d16 --- /dev/null +++ b/Documentation/arm/kernel_user_helpers.txt | |||
@@ -0,0 +1,267 @@ | |||
1 | Kernel-provided User Helpers | ||
2 | ============================ | ||
3 | |||
4 | These are segment of kernel provided user code reachable from user space | ||
5 | at a fixed address in kernel memory. This is used to provide user space | ||
6 | with some operations which require kernel help because of unimplemented | ||
7 | native feature and/or instructions in many ARM CPUs. The idea is for this | ||
8 | code to be executed directly in user mode for best efficiency but which is | ||
9 | too intimate with the kernel counter part to be left to user libraries. | ||
10 | In fact this code might even differ from one CPU to another depending on | ||
11 | the available instruction set, or whether it is a SMP systems. In other | ||
12 | words, the kernel reserves the right to change this code as needed without | ||
13 | warning. Only the entry points and their results as documented here are | ||
14 | guaranteed to be stable. | ||
15 | |||
16 | This is different from (but doesn't preclude) a full blown VDSO | ||
17 | implementation, however a VDSO would prevent some assembly tricks with | ||
18 | constants that allows for efficient branching to those code segments. And | ||
19 | since those code segments only use a few cycles before returning to user | ||
20 | code, the overhead of a VDSO indirect far call would add a measurable | ||
21 | overhead to such minimalistic operations. | ||
22 | |||
23 | User space is expected to bypass those helpers and implement those things | ||
24 | inline (either in the code emitted directly by the compiler, or part of | ||
25 | the implementation of a library call) when optimizing for a recent enough | ||
26 | processor that has the necessary native support, but only if resulting | ||
27 | binaries are already to be incompatible with earlier ARM processors due to | ||
28 | useage of similar native instructions for other things. In other words | ||
29 | don't make binaries unable to run on earlier processors just for the sake | ||
30 | of not using these kernel helpers if your compiled code is not going to | ||
31 | use new instructions for other purpose. | ||
32 | |||
33 | New helpers may be added over time, so an older kernel may be missing some | ||
34 | helpers present in a newer kernel. For this reason, programs must check | ||
35 | the value of __kuser_helper_version (see below) before assuming that it is | ||
36 | safe to call any particular helper. This check should ideally be | ||
37 | performed only once at process startup time, and execution aborted early | ||
38 | if the required helpers are not provided by the kernel version that | ||
39 | process is running on. | ||
40 | |||
41 | kuser_helper_version | ||
42 | -------------------- | ||
43 | |||
44 | Location: 0xffff0ffc | ||
45 | |||
46 | Reference declaration: | ||
47 | |||
48 | extern int32_t __kuser_helper_version; | ||
49 | |||
50 | Definition: | ||
51 | |||
52 | This field contains the number of helpers being implemented by the | ||
53 | running kernel. User space may read this to determine the availability | ||
54 | of a particular helper. | ||
55 | |||
56 | Usage example: | ||
57 | |||
58 | #define __kuser_helper_version (*(int32_t *)0xffff0ffc) | ||
59 | |||
60 | void check_kuser_version(void) | ||
61 | { | ||
62 | if (__kuser_helper_version < 2) { | ||
63 | fprintf(stderr, "can't do atomic operations, kernel too old\n"); | ||
64 | abort(); | ||
65 | } | ||
66 | } | ||
67 | |||
68 | Notes: | ||
69 | |||
70 | User space may assume that the value of this field never changes | ||
71 | during the lifetime of any single process. This means that this | ||
72 | field can be read once during the initialisation of a library or | ||
73 | startup phase of a program. | ||
74 | |||
75 | kuser_get_tls | ||
76 | ------------- | ||
77 | |||
78 | Location: 0xffff0fe0 | ||
79 | |||
80 | Reference prototype: | ||
81 | |||
82 | void * __kuser_get_tls(void); | ||
83 | |||
84 | Input: | ||
85 | |||
86 | lr = return address | ||
87 | |||
88 | Output: | ||
89 | |||
90 | r0 = TLS value | ||
91 | |||
92 | Clobbered registers: | ||
93 | |||
94 | none | ||
95 | |||
96 | Definition: | ||
97 | |||
98 | Get the TLS value as previously set via the __ARM_NR_set_tls syscall. | ||
99 | |||
100 | Usage example: | ||
101 | |||
102 | typedef void * (__kuser_get_tls_t)(void); | ||
103 | #define __kuser_get_tls (*(__kuser_get_tls_t *)0xffff0fe0) | ||
104 | |||
105 | void foo() | ||
106 | { | ||
107 | void *tls = __kuser_get_tls(); | ||
108 | printf("TLS = %p\n", tls); | ||
109 | } | ||
110 | |||
111 | Notes: | ||
112 | |||
113 | - Valid only if __kuser_helper_version >= 1 (from kernel version 2.6.12). | ||
114 | |||
115 | kuser_cmpxchg | ||
116 | ------------- | ||
117 | |||
118 | Location: 0xffff0fc0 | ||
119 | |||
120 | Reference prototype: | ||
121 | |||
122 | int __kuser_cmpxchg(int32_t oldval, int32_t newval, volatile int32_t *ptr); | ||
123 | |||
124 | Input: | ||
125 | |||
126 | r0 = oldval | ||
127 | r1 = newval | ||
128 | r2 = ptr | ||
129 | lr = return address | ||
130 | |||
131 | Output: | ||
132 | |||
133 | r0 = success code (zero or non-zero) | ||
134 | C flag = set if r0 == 0, clear if r0 != 0 | ||
135 | |||
136 | Clobbered registers: | ||
137 | |||
138 | r3, ip, flags | ||
139 | |||
140 | Definition: | ||
141 | |||
142 | Atomically store newval in *ptr only if *ptr is equal to oldval. | ||
143 | Return zero if *ptr was changed or non-zero if no exchange happened. | ||
144 | The C flag is also set if *ptr was changed to allow for assembly | ||
145 | optimization in the calling code. | ||
146 | |||
147 | Usage example: | ||
148 | |||
149 | typedef int (__kuser_cmpxchg_t)(int oldval, int newval, volatile int *ptr); | ||
150 | #define __kuser_cmpxchg (*(__kuser_cmpxchg_t *)0xffff0fc0) | ||
151 | |||
152 | int atomic_add(volatile int *ptr, int val) | ||
153 | { | ||
154 | int old, new; | ||
155 | |||
156 | do { | ||
157 | old = *ptr; | ||
158 | new = old + val; | ||
159 | } while(__kuser_cmpxchg(old, new, ptr)); | ||
160 | |||
161 | return new; | ||
162 | } | ||
163 | |||
164 | Notes: | ||
165 | |||
166 | - This routine already includes memory barriers as needed. | ||
167 | |||
168 | - Valid only if __kuser_helper_version >= 2 (from kernel version 2.6.12). | ||
169 | |||
170 | kuser_memory_barrier | ||
171 | -------------------- | ||
172 | |||
173 | Location: 0xffff0fa0 | ||
174 | |||
175 | Reference prototype: | ||
176 | |||
177 | void __kuser_memory_barrier(void); | ||
178 | |||
179 | Input: | ||
180 | |||
181 | lr = return address | ||
182 | |||
183 | Output: | ||
184 | |||
185 | none | ||
186 | |||
187 | Clobbered registers: | ||
188 | |||
189 | none | ||
190 | |||
191 | Definition: | ||
192 | |||
193 | Apply any needed memory barrier to preserve consistency with data modified | ||
194 | manually and __kuser_cmpxchg usage. | ||
195 | |||
196 | Usage example: | ||
197 | |||
198 | typedef void (__kuser_dmb_t)(void); | ||
199 | #define __kuser_dmb (*(__kuser_dmb_t *)0xffff0fa0) | ||
200 | |||
201 | Notes: | ||
202 | |||
203 | - Valid only if __kuser_helper_version >= 3 (from kernel version 2.6.15). | ||
204 | |||
205 | kuser_cmpxchg64 | ||
206 | --------------- | ||
207 | |||
208 | Location: 0xffff0f60 | ||
209 | |||
210 | Reference prototype: | ||
211 | |||
212 | int __kuser_cmpxchg64(const int64_t *oldval, | ||
213 | const int64_t *newval, | ||
214 | volatile int64_t *ptr); | ||
215 | |||
216 | Input: | ||
217 | |||
218 | r0 = pointer to oldval | ||
219 | r1 = pointer to newval | ||
220 | r2 = pointer to target value | ||
221 | lr = return address | ||
222 | |||
223 | Output: | ||
224 | |||
225 | r0 = success code (zero or non-zero) | ||
226 | C flag = set if r0 == 0, clear if r0 != 0 | ||
227 | |||
228 | Clobbered registers: | ||
229 | |||
230 | r3, lr, flags | ||
231 | |||
232 | Definition: | ||
233 | |||
234 | Atomically store the 64-bit value pointed by *newval in *ptr only if *ptr | ||
235 | is equal to the 64-bit value pointed by *oldval. Return zero if *ptr was | ||
236 | changed or non-zero if no exchange happened. | ||
237 | |||
238 | The C flag is also set if *ptr was changed to allow for assembly | ||
239 | optimization in the calling code. | ||
240 | |||
241 | Usage example: | ||
242 | |||
243 | typedef int (__kuser_cmpxchg64_t)(const int64_t *oldval, | ||
244 | const int64_t *newval, | ||
245 | volatile int64_t *ptr); | ||
246 | #define __kuser_cmpxchg64 (*(__kuser_cmpxchg64_t *)0xffff0f60) | ||
247 | |||
248 | int64_t atomic_add64(volatile int64_t *ptr, int64_t val) | ||
249 | { | ||
250 | int64_t old, new; | ||
251 | |||
252 | do { | ||
253 | old = *ptr; | ||
254 | new = old + val; | ||
255 | } while(__kuser_cmpxchg64(&old, &new, ptr)); | ||
256 | |||
257 | return new; | ||
258 | } | ||
259 | |||
260 | Notes: | ||
261 | |||
262 | - This routine already includes memory barriers as needed. | ||
263 | |||
264 | - Due to the length of this sequence, this spans 2 conventional kuser | ||
265 | "slots", therefore 0xffff0f80 is not used as a valid entry point. | ||
266 | |||
267 | - Valid only if __kuser_helper_version >= 5 (from kernel version 3.1). | ||
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index 90c62cd51ca9..8f9ab5c9015b 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S | |||
@@ -383,7 +383,7 @@ ENDPROC(__pabt_svc) | |||
383 | .endm | 383 | .endm |
384 | 384 | ||
385 | .macro kuser_cmpxchg_check | 385 | .macro kuser_cmpxchg_check |
386 | #if __LINUX_ARM_ARCH__ < 6 && !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG) | 386 | #if !defined(CONFIG_CPU_32v6K) && !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG) |
387 | #ifndef CONFIG_MMU | 387 | #ifndef CONFIG_MMU |
388 | #warning "NPTL on non MMU needs fixing" | 388 | #warning "NPTL on non MMU needs fixing" |
389 | #else | 389 | #else |
@@ -392,7 +392,7 @@ ENDPROC(__pabt_svc) | |||
392 | @ perform a quick test inline since it should be false | 392 | @ perform a quick test inline since it should be false |
393 | @ 99.9999% of the time. The rest is done out of line. | 393 | @ 99.9999% of the time. The rest is done out of line. |
394 | cmp r2, #TASK_SIZE | 394 | cmp r2, #TASK_SIZE |
395 | blhs kuser_cmpxchg_fixup | 395 | blhs kuser_cmpxchg64_fixup |
396 | #endif | 396 | #endif |
397 | #endif | 397 | #endif |
398 | .endm | 398 | .endm |
@@ -758,31 +758,12 @@ ENDPROC(__switch_to) | |||
758 | /* | 758 | /* |
759 | * User helpers. | 759 | * User helpers. |
760 | * | 760 | * |
761 | * These are segment of kernel provided user code reachable from user space | ||
762 | * at a fixed address in kernel memory. This is used to provide user space | ||
763 | * with some operations which require kernel help because of unimplemented | ||
764 | * native feature and/or instructions in many ARM CPUs. The idea is for | ||
765 | * this code to be executed directly in user mode for best efficiency but | ||
766 | * which is too intimate with the kernel counter part to be left to user | ||
767 | * libraries. In fact this code might even differ from one CPU to another | ||
768 | * depending on the available instruction set and restrictions like on | ||
769 | * SMP systems. In other words, the kernel reserves the right to change | ||
770 | * this code as needed without warning. Only the entry points and their | ||
771 | * results are guaranteed to be stable. | ||
772 | * | ||
773 | * Each segment is 32-byte aligned and will be moved to the top of the high | 761 | * Each segment is 32-byte aligned and will be moved to the top of the high |
774 | * vector page. New segments (if ever needed) must be added in front of | 762 | * vector page. New segments (if ever needed) must be added in front of |
775 | * existing ones. This mechanism should be used only for things that are | 763 | * existing ones. This mechanism should be used only for things that are |
776 | * really small and justified, and not be abused freely. | 764 | * really small and justified, and not be abused freely. |
777 | * | 765 | * |
778 | * User space is expected to implement those things inline when optimizing | 766 | * See Documentation/arm/kernel_user_helpers.txt for formal definitions. |
779 | * for a processor that has the necessary native support, but only if such | ||
780 | * resulting binaries are already to be incompatible with earlier ARM | ||
781 | * processors due to the use of unsupported instructions other than what | ||
782 | * is provided here. In other words don't make binaries unable to run on | ||
783 | * earlier processors just for the sake of not using these kernel helpers | ||
784 | * if your compiled code is not going to use the new instructions for other | ||
785 | * purpose. | ||
786 | */ | 767 | */ |
787 | THUMB( .arm ) | 768 | THUMB( .arm ) |
788 | 769 | ||
@@ -799,96 +780,103 @@ ENDPROC(__switch_to) | |||
799 | __kuser_helper_start: | 780 | __kuser_helper_start: |
800 | 781 | ||
801 | /* | 782 | /* |
802 | * Reference prototype: | 783 | * Due to the length of some sequences, __kuser_cmpxchg64 spans 2 regular |
803 | * | 784 | * kuser "slots", therefore 0xffff0f80 is not used as a valid entry point. |
804 | * void __kernel_memory_barrier(void) | ||
805 | * | ||
806 | * Input: | ||
807 | * | ||
808 | * lr = return address | ||
809 | * | ||
810 | * Output: | ||
811 | * | ||
812 | * none | ||
813 | * | ||
814 | * Clobbered: | ||
815 | * | ||
816 | * none | ||
817 | * | ||
818 | * Definition and user space usage example: | ||
819 | * | ||
820 | * typedef void (__kernel_dmb_t)(void); | ||
821 | * #define __kernel_dmb (*(__kernel_dmb_t *)0xffff0fa0) | ||
822 | * | ||
823 | * Apply any needed memory barrier to preserve consistency with data modified | ||
824 | * manually and __kuser_cmpxchg usage. | ||
825 | * | ||
826 | * This could be used as follows: | ||
827 | * | ||
828 | * #define __kernel_dmb() \ | ||
829 | * asm volatile ( "mov r0, #0xffff0fff; mov lr, pc; sub pc, r0, #95" \ | ||
830 | * : : : "r0", "lr","cc" ) | ||
831 | */ | 785 | */ |
832 | 786 | ||
833 | __kuser_memory_barrier: @ 0xffff0fa0 | 787 | __kuser_cmpxchg64: @ 0xffff0f60 |
788 | |||
789 | #if defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG) | ||
790 | |||
791 | /* | ||
792 | * Poor you. No fast solution possible... | ||
793 | * The kernel itself must perform the operation. | ||
794 | * A special ghost syscall is used for that (see traps.c). | ||
795 | */ | ||
796 | stmfd sp!, {r7, lr} | ||
797 | ldr r7, 1f @ it's 20 bits | ||
798 | swi __ARM_NR_cmpxchg64 | ||
799 | ldmfd sp!, {r7, pc} | ||
800 | 1: .word __ARM_NR_cmpxchg64 | ||
801 | |||
802 | #elif defined(CONFIG_CPU_32v6K) | ||
803 | |||
804 | stmfd sp!, {r4, r5, r6, r7} | ||
805 | ldrd r4, r5, [r0] @ load old val | ||
806 | ldrd r6, r7, [r1] @ load new val | ||
807 | smp_dmb arm | ||
808 | 1: ldrexd r0, r1, [r2] @ load current val | ||
809 | eors r3, r0, r4 @ compare with oldval (1) | ||
810 | eoreqs r3, r1, r5 @ compare with oldval (2) | ||
811 | strexdeq r3, r6, r7, [r2] @ store newval if eq | ||
812 | teqeq r3, #1 @ success? | ||
813 | beq 1b @ if no then retry | ||
834 | smp_dmb arm | 814 | smp_dmb arm |
815 | rsbs r0, r3, #0 @ set returned val and C flag | ||
816 | ldmfd sp!, {r4, r5, r6, r7} | ||
817 | bx lr | ||
818 | |||
819 | #elif !defined(CONFIG_SMP) | ||
820 | |||
821 | #ifdef CONFIG_MMU | ||
822 | |||
823 | /* | ||
824 | * The only thing that can break atomicity in this cmpxchg64 | ||
825 | * implementation is either an IRQ or a data abort exception | ||
826 | * causing another process/thread to be scheduled in the middle of | ||
827 | * the critical sequence. The same strategy as for cmpxchg is used. | ||
828 | */ | ||
829 | stmfd sp!, {r4, r5, r6, lr} | ||
830 | ldmia r0, {r4, r5} @ load old val | ||
831 | ldmia r1, {r6, lr} @ load new val | ||
832 | 1: ldmia r2, {r0, r1} @ load current val | ||
833 | eors r3, r0, r4 @ compare with oldval (1) | ||
834 | eoreqs r3, r1, r5 @ compare with oldval (2) | ||
835 | 2: stmeqia r2, {r6, lr} @ store newval if eq | ||
836 | rsbs r0, r3, #0 @ set return val and C flag | ||
837 | ldmfd sp!, {r4, r5, r6, pc} | ||
838 | |||
839 | .text | ||
840 | kuser_cmpxchg64_fixup: | ||
841 | @ Called from kuser_cmpxchg_fixup. | ||
842 | @ r2 = address of interrupted insn (must be preserved). | ||
843 | @ sp = saved regs. r7 and r8 are clobbered. | ||
844 | @ 1b = first critical insn, 2b = last critical insn. | ||
845 | @ If r2 >= 1b and r2 <= 2b then saved pc_usr is set to 1b. | ||
846 | mov r7, #0xffff0fff | ||
847 | sub r7, r7, #(0xffff0fff - (0xffff0f60 + (1b - __kuser_cmpxchg64))) | ||
848 | subs r8, r2, r7 | ||
849 | rsbcss r8, r8, #(2b - 1b) | ||
850 | strcs r7, [sp, #S_PC] | ||
851 | #if __LINUX_ARM_ARCH__ < 6 | ||
852 | bcc kuser_cmpxchg32_fixup | ||
853 | #endif | ||
854 | mov pc, lr | ||
855 | .previous | ||
856 | |||
857 | #else | ||
858 | #warning "NPTL on non MMU needs fixing" | ||
859 | mov r0, #-1 | ||
860 | adds r0, r0, #0 | ||
835 | usr_ret lr | 861 | usr_ret lr |
862 | #endif | ||
863 | |||
864 | #else | ||
865 | #error "incoherent kernel configuration" | ||
866 | #endif | ||
867 | |||
868 | /* pad to next slot */ | ||
869 | .rept (16 - (. - __kuser_cmpxchg64)/4) | ||
870 | .word 0 | ||
871 | .endr | ||
836 | 872 | ||
837 | .align 5 | 873 | .align 5 |
838 | 874 | ||
839 | /* | 875 | __kuser_memory_barrier: @ 0xffff0fa0 |
840 | * Reference prototype: | 876 | smp_dmb arm |
841 | * | 877 | usr_ret lr |
842 | * int __kernel_cmpxchg(int oldval, int newval, int *ptr) | 878 | |
843 | * | 879 | .align 5 |
844 | * Input: | ||
845 | * | ||
846 | * r0 = oldval | ||
847 | * r1 = newval | ||
848 | * r2 = ptr | ||
849 | * lr = return address | ||
850 | * | ||
851 | * Output: | ||
852 | * | ||
853 | * r0 = returned value (zero or non-zero) | ||
854 | * C flag = set if r0 == 0, clear if r0 != 0 | ||
855 | * | ||
856 | * Clobbered: | ||
857 | * | ||
858 | * r3, ip, flags | ||
859 | * | ||
860 | * Definition and user space usage example: | ||
861 | * | ||
862 | * typedef int (__kernel_cmpxchg_t)(int oldval, int newval, int *ptr); | ||
863 | * #define __kernel_cmpxchg (*(__kernel_cmpxchg_t *)0xffff0fc0) | ||
864 | * | ||
865 | * Atomically store newval in *ptr if *ptr is equal to oldval for user space. | ||
866 | * Return zero if *ptr was changed or non-zero if no exchange happened. | ||
867 | * The C flag is also set if *ptr was changed to allow for assembly | ||
868 | * optimization in the calling code. | ||
869 | * | ||
870 | * Notes: | ||
871 | * | ||
872 | * - This routine already includes memory barriers as needed. | ||
873 | * | ||
874 | * For example, a user space atomic_add implementation could look like this: | ||
875 | * | ||
876 | * #define atomic_add(ptr, val) \ | ||
877 | * ({ register unsigned int *__ptr asm("r2") = (ptr); \ | ||
878 | * register unsigned int __result asm("r1"); \ | ||
879 | * asm volatile ( \ | ||
880 | * "1: @ atomic_add\n\t" \ | ||
881 | * "ldr r0, [r2]\n\t" \ | ||
882 | * "mov r3, #0xffff0fff\n\t" \ | ||
883 | * "add lr, pc, #4\n\t" \ | ||
884 | * "add r1, r0, %2\n\t" \ | ||
885 | * "add pc, r3, #(0xffff0fc0 - 0xffff0fff)\n\t" \ | ||
886 | * "bcc 1b" \ | ||
887 | * : "=&r" (__result) \ | ||
888 | * : "r" (__ptr), "rIL" (val) \ | ||
889 | * : "r0","r3","ip","lr","cc","memory" ); \ | ||
890 | * __result; }) | ||
891 | */ | ||
892 | 880 | ||
893 | __kuser_cmpxchg: @ 0xffff0fc0 | 881 | __kuser_cmpxchg: @ 0xffff0fc0 |
894 | 882 | ||
@@ -925,7 +913,7 @@ __kuser_cmpxchg: @ 0xffff0fc0 | |||
925 | usr_ret lr | 913 | usr_ret lr |
926 | 914 | ||
927 | .text | 915 | .text |
928 | kuser_cmpxchg_fixup: | 916 | kuser_cmpxchg32_fixup: |
929 | @ Called from kuser_cmpxchg_check macro. | 917 | @ Called from kuser_cmpxchg_check macro. |
930 | @ r2 = address of interrupted insn (must be preserved). | 918 | @ r2 = address of interrupted insn (must be preserved). |
931 | @ sp = saved regs. r7 and r8 are clobbered. | 919 | @ sp = saved regs. r7 and r8 are clobbered. |
@@ -963,39 +951,6 @@ kuser_cmpxchg_fixup: | |||
963 | 951 | ||
964 | .align 5 | 952 | .align 5 |
965 | 953 | ||
966 | /* | ||
967 | * Reference prototype: | ||
968 | * | ||
969 | * int __kernel_get_tls(void) | ||
970 | * | ||
971 | * Input: | ||
972 | * | ||
973 | * lr = return address | ||
974 | * | ||
975 | * Output: | ||
976 | * | ||
977 | * r0 = TLS value | ||
978 | * | ||
979 | * Clobbered: | ||
980 | * | ||
981 | * none | ||
982 | * | ||
983 | * Definition and user space usage example: | ||
984 | * | ||
985 | * typedef int (__kernel_get_tls_t)(void); | ||
986 | * #define __kernel_get_tls (*(__kernel_get_tls_t *)0xffff0fe0) | ||
987 | * | ||
988 | * Get the TLS value as previously set via the __ARM_NR_set_tls syscall. | ||
989 | * | ||
990 | * This could be used as follows: | ||
991 | * | ||
992 | * #define __kernel_get_tls() \ | ||
993 | * ({ register unsigned int __val asm("r0"); \ | ||
994 | * asm( "mov r0, #0xffff0fff; mov lr, pc; sub pc, r0, #31" \ | ||
995 | * : "=r" (__val) : : "lr","cc" ); \ | ||
996 | * __val; }) | ||
997 | */ | ||
998 | |||
999 | __kuser_get_tls: @ 0xffff0fe0 | 954 | __kuser_get_tls: @ 0xffff0fe0 |
1000 | ldr r0, [pc, #(16 - 8)] @ read TLS, set in kuser_get_tls_init | 955 | ldr r0, [pc, #(16 - 8)] @ read TLS, set in kuser_get_tls_init |
1001 | usr_ret lr | 956 | usr_ret lr |
@@ -1004,19 +959,6 @@ __kuser_get_tls: @ 0xffff0fe0 | |||
1004 | .word 0 @ 0xffff0ff0 software TLS value, then | 959 | .word 0 @ 0xffff0ff0 software TLS value, then |
1005 | .endr @ pad up to __kuser_helper_version | 960 | .endr @ pad up to __kuser_helper_version |
1006 | 961 | ||
1007 | /* | ||
1008 | * Reference declaration: | ||
1009 | * | ||
1010 | * extern unsigned int __kernel_helper_version; | ||
1011 | * | ||
1012 | * Definition and user space usage example: | ||
1013 | * | ||
1014 | * #define __kernel_helper_version (*(unsigned int *)0xffff0ffc) | ||
1015 | * | ||
1016 | * User space may read this to determine the curent number of helpers | ||
1017 | * available. | ||
1018 | */ | ||
1019 | |||
1020 | __kuser_helper_version: @ 0xffff0ffc | 962 | __kuser_helper_version: @ 0xffff0ffc |
1021 | .word ((__kuser_helper_end - __kuser_helper_start) >> 5) | 963 | .word ((__kuser_helper_end - __kuser_helper_start) >> 5) |
1022 | 964 | ||