diff options
| -rw-r--r-- | Documentation/arm/kernel_user_helpers.txt | 64 | ||||
| -rw-r--r-- | arch/arm/kernel/entry-armv.S | 99 |
2 files changed, 160 insertions, 3 deletions
diff --git a/Documentation/arm/kernel_user_helpers.txt b/Documentation/arm/kernel_user_helpers.txt index 0c33f72d1873..a17df9f91d16 100644 --- a/Documentation/arm/kernel_user_helpers.txt +++ b/Documentation/arm/kernel_user_helpers.txt | |||
| @@ -201,3 +201,67 @@ typedef void (__kuser_dmb_t)(void); | |||
| 201 | Notes: | 201 | Notes: |
| 202 | 202 | ||
| 203 | - Valid only if __kuser_helper_version >= 3 (from kernel version 2.6.15). | 203 | - Valid only if __kuser_helper_version >= 3 (from kernel version 2.6.15). |
| 204 | |||
| 205 | kuser_cmpxchg64 | ||
| 206 | --------------- | ||
| 207 | |||
| 208 | Location: 0xffff0f60 | ||
| 209 | |||
| 210 | Reference prototype: | ||
| 211 | |||
| 212 | int __kuser_cmpxchg64(const int64_t *oldval, | ||
| 213 | const int64_t *newval, | ||
| 214 | volatile int64_t *ptr); | ||
| 215 | |||
| 216 | Input: | ||
| 217 | |||
| 218 | r0 = pointer to oldval | ||
| 219 | r1 = pointer to newval | ||
| 220 | r2 = pointer to target value | ||
| 221 | lr = return address | ||
| 222 | |||
| 223 | Output: | ||
| 224 | |||
| 225 | r0 = success code (zero or non-zero) | ||
| 226 | C flag = set if r0 == 0, clear if r0 != 0 | ||
| 227 | |||
| 228 | Clobbered registers: | ||
| 229 | |||
| 230 | r3, lr, flags | ||
| 231 | |||
| 232 | Definition: | ||
| 233 | |||
| 234 | Atomically store the 64-bit value pointed by *newval in *ptr only if *ptr | ||
| 235 | is equal to the 64-bit value pointed by *oldval. Return zero if *ptr was | ||
| 236 | changed or non-zero if no exchange happened. | ||
| 237 | |||
| 238 | The C flag is also set if *ptr was changed to allow for assembly | ||
| 239 | optimization in the calling code. | ||
| 240 | |||
| 241 | Usage example: | ||
| 242 | |||
| 243 | typedef int (__kuser_cmpxchg64_t)(const int64_t *oldval, | ||
| 244 | const int64_t *newval, | ||
| 245 | volatile int64_t *ptr); | ||
| 246 | #define __kuser_cmpxchg64 (*(__kuser_cmpxchg64_t *)0xffff0f60) | ||
| 247 | |||
| 248 | int64_t atomic_add64(volatile int64_t *ptr, int64_t val) | ||
| 249 | { | ||
| 250 | int64_t old, new; | ||
| 251 | |||
| 252 | do { | ||
| 253 | old = *ptr; | ||
| 254 | new = old + val; | ||
| 255 | } while(__kuser_cmpxchg64(&old, &new, ptr)); | ||
| 256 | |||
| 257 | return new; | ||
| 258 | } | ||
| 259 | |||
| 260 | Notes: | ||
| 261 | |||
| 262 | - This routine already includes memory barriers as needed. | ||
| 263 | |||
| 264 | - Due to the length of this sequence, this spans 2 conventional kuser | ||
| 265 | "slots", therefore 0xffff0f80 is not used as a valid entry point. | ||
| 266 | |||
| 267 | - Valid only if __kuser_helper_version >= 5 (from kernel version 3.1). | ||
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index 63f7907c4c3c..9be97deca215 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S | |||
| @@ -383,7 +383,7 @@ ENDPROC(__pabt_svc) | |||
| 383 | .endm | 383 | .endm |
| 384 | 384 | ||
| 385 | .macro kuser_cmpxchg_check | 385 | .macro kuser_cmpxchg_check |
| 386 | #if __LINUX_ARM_ARCH__ < 6 && !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG) | 386 | #if !defined(CONFIG_CPU_32v6K) && !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG) |
| 387 | #ifndef CONFIG_MMU | 387 | #ifndef CONFIG_MMU |
| 388 | #warning "NPTL on non MMU needs fixing" | 388 | #warning "NPTL on non MMU needs fixing" |
| 389 | #else | 389 | #else |
| @@ -392,7 +392,7 @@ ENDPROC(__pabt_svc) | |||
| 392 | @ perform a quick test inline since it should be false | 392 | @ perform a quick test inline since it should be false |
| 393 | @ 99.9999% of the time. The rest is done out of line. | 393 | @ 99.9999% of the time. The rest is done out of line. |
| 394 | cmp r2, #TASK_SIZE | 394 | cmp r2, #TASK_SIZE |
| 395 | blhs kuser_cmpxchg_fixup | 395 | blhs kuser_cmpxchg64_fixup |
| 396 | #endif | 396 | #endif |
| 397 | #endif | 397 | #endif |
| 398 | .endm | 398 | .endm |
| @@ -775,6 +775,99 @@ ENDPROC(__switch_to) | |||
| 775 | .globl __kuser_helper_start | 775 | .globl __kuser_helper_start |
| 776 | __kuser_helper_start: | 776 | __kuser_helper_start: |
| 777 | 777 | ||
| 778 | /* | ||
| 779 | * Due to the length of some sequences, __kuser_cmpxchg64 spans 2 regular | ||
| 780 | * kuser "slots", therefore 0xffff0f80 is not used as a valid entry point. | ||
| 781 | */ | ||
| 782 | |||
| 783 | __kuser_cmpxchg64: @ 0xffff0f60 | ||
| 784 | |||
| 785 | #if defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG) | ||
| 786 | |||
| 787 | /* | ||
| 788 | * Poor you. No fast solution possible... | ||
| 789 | * The kernel itself must perform the operation. | ||
| 790 | * A special ghost syscall is used for that (see traps.c). | ||
| 791 | */ | ||
| 792 | stmfd sp!, {r7, lr} | ||
| 793 | ldr r7, 1f @ it's 20 bits | ||
| 794 | swi __ARM_NR_cmpxchg64 | ||
| 795 | ldmfd sp!, {r7, pc} | ||
| 796 | 1: .word __ARM_NR_cmpxchg64 | ||
| 797 | |||
| 798 | #elif defined(CONFIG_CPU_32v6K) | ||
| 799 | |||
| 800 | stmfd sp!, {r4, r5, r6, r7} | ||
| 801 | ldrd r4, r5, [r0] @ load old val | ||
| 802 | ldrd r6, r7, [r1] @ load new val | ||
| 803 | smp_dmb arm | ||
| 804 | 1: ldrexd r0, r1, [r2] @ load current val | ||
| 805 | eors r3, r0, r4 @ compare with oldval (1) | ||
| 806 | eoreqs r3, r1, r5 @ compare with oldval (2) | ||
| 807 | strexdeq r3, r6, r7, [r2] @ store newval if eq | ||
| 808 | teqeq r3, #1 @ success? | ||
| 809 | beq 1b @ if no then retry | ||
| 810 | smp_dmb arm | ||
| 811 | rsbs r0, r3, #0 @ set returned val and C flag | ||
| 812 | ldmfd sp!, {r4, r5, r6, r7} | ||
| 813 | bx lr | ||
| 814 | |||
| 815 | #elif !defined(CONFIG_SMP) | ||
| 816 | |||
| 817 | #ifdef CONFIG_MMU | ||
| 818 | |||
| 819 | /* | ||
| 820 | * The only thing that can break atomicity in this cmpxchg64 | ||
| 821 | * implementation is either an IRQ or a data abort exception | ||
| 822 | * causing another process/thread to be scheduled in the middle of | ||
| 823 | * the critical sequence. The same strategy as for cmpxchg is used. | ||
| 824 | */ | ||
| 825 | stmfd sp!, {r4, r5, r6, lr} | ||
| 826 | ldmia r0, {r4, r5} @ load old val | ||
| 827 | ldmia r1, {r6, lr} @ load new val | ||
| 828 | 1: ldmia r2, {r0, r1} @ load current val | ||
| 829 | eors r3, r0, r4 @ compare with oldval (1) | ||
| 830 | eoreqs r3, r1, r5 @ compare with oldval (2) | ||
| 831 | 2: stmeqia r2, {r6, lr} @ store newval if eq | ||
| 832 | rsbs r0, r3, #0 @ set return val and C flag | ||
| 833 | ldmfd sp!, {r4, r5, r6, pc} | ||
| 834 | |||
| 835 | .text | ||
| 836 | kuser_cmpxchg64_fixup: | ||
| 837 | @ Called from kuser_cmpxchg_fixup. | ||
| 838 | @ r2 = address of interrupted insn (must be preserved). | ||
| 839 | @ sp = saved regs. r7 and r8 are clobbered. | ||
| 840 | @ 1b = first critical insn, 2b = last critical insn. | ||
| 841 | @ If r2 >= 1b and r2 <= 2b then saved pc_usr is set to 1b. | ||
| 842 | mov r7, #0xffff0fff | ||
| 843 | sub r7, r7, #(0xffff0fff - (0xffff0f60 + (1b - __kuser_cmpxchg64))) | ||
| 844 | subs r8, r2, r7 | ||
| 845 | rsbcss r8, r8, #(2b - 1b) | ||
| 846 | strcs r7, [sp, #S_PC] | ||
| 847 | #if __LINUX_ARM_ARCH__ < 6 | ||
| 848 | bcc kuser_cmpxchg32_fixup | ||
| 849 | #endif | ||
| 850 | mov pc, lr | ||
| 851 | .previous | ||
| 852 | |||
| 853 | #else | ||
| 854 | #warning "NPTL on non MMU needs fixing" | ||
| 855 | mov r0, #-1 | ||
| 856 | adds r0, r0, #0 | ||
| 857 | usr_ret lr | ||
| 858 | #endif | ||
| 859 | |||
| 860 | #else | ||
| 861 | #error "incoherent kernel configuration" | ||
| 862 | #endif | ||
| 863 | |||
| 864 | /* pad to next slot */ | ||
| 865 | .rept (16 - (. - __kuser_cmpxchg64)/4) | ||
| 866 | .word 0 | ||
| 867 | .endr | ||
| 868 | |||
| 869 | .align 5 | ||
| 870 | |||
| 778 | __kuser_memory_barrier: @ 0xffff0fa0 | 871 | __kuser_memory_barrier: @ 0xffff0fa0 |
| 779 | smp_dmb arm | 872 | smp_dmb arm |
| 780 | usr_ret lr | 873 | usr_ret lr |
| @@ -816,7 +909,7 @@ __kuser_cmpxchg: @ 0xffff0fc0 | |||
| 816 | usr_ret lr | 909 | usr_ret lr |
| 817 | 910 | ||
| 818 | .text | 911 | .text |
| 819 | kuser_cmpxchg_fixup: | 912 | kuser_cmpxchg32_fixup: |
| 820 | @ Called from kuser_cmpxchg_check macro. | 913 | @ Called from kuser_cmpxchg_check macro. |
| 821 | @ r2 = address of interrupted insn (must be preserved). | 914 | @ r2 = address of interrupted insn (must be preserved). |
| 822 | @ sp = saved regs. r7 and r8 are clobbered. | 915 | @ sp = saved regs. r7 and r8 are clobbered. |
