diff options
-rw-r--r-- | arch/arm/kernel/calls.S | 8 | ||||
-rw-r--r-- | arch/arm/kernel/entry-armv.S | 49 | ||||
-rw-r--r-- | arch/arm/kernel/entry-common.S | 20 |
3 files changed, 55 insertions, 22 deletions
diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S index 2ad4aa2a1536..55076a75e5bf 100644 --- a/arch/arm/kernel/calls.S +++ b/arch/arm/kernel/calls.S | |||
@@ -131,7 +131,7 @@ __syscall_start: | |||
131 | .long sys_wait4 | 131 | .long sys_wait4 |
132 | /* 115 */ .long sys_swapoff | 132 | /* 115 */ .long sys_swapoff |
133 | .long sys_sysinfo | 133 | .long sys_sysinfo |
134 | .long sys_ipc_wrapper | 134 | .long sys_ipc |
135 | .long sys_fsync | 135 | .long sys_fsync |
136 | .long sys_sigreturn_wrapper | 136 | .long sys_sigreturn_wrapper |
137 | /* 120 */ .long sys_clone_wrapper | 137 | /* 120 */ .long sys_clone_wrapper |
@@ -254,7 +254,7 @@ __syscall_start: | |||
254 | .long sys_fremovexattr | 254 | .long sys_fremovexattr |
255 | .long sys_tkill | 255 | .long sys_tkill |
256 | .long sys_sendfile64 | 256 | .long sys_sendfile64 |
257 | /* 240 */ .long sys_futex_wrapper | 257 | /* 240 */ .long sys_futex |
258 | .long sys_sched_setaffinity | 258 | .long sys_sched_setaffinity |
259 | .long sys_sched_getaffinity | 259 | .long sys_sched_getaffinity |
260 | .long sys_io_setup | 260 | .long sys_io_setup |
@@ -284,7 +284,7 @@ __syscall_start: | |||
284 | .long sys_fstatfs64 | 284 | .long sys_fstatfs64 |
285 | .long sys_tgkill | 285 | .long sys_tgkill |
286 | .long sys_utimes | 286 | .long sys_utimes |
287 | /* 270 */ .long sys_arm_fadvise64_64_wrapper | 287 | /* 270 */ .long sys_arm_fadvise64_64 |
288 | .long sys_pciconfig_iobase | 288 | .long sys_pciconfig_iobase |
289 | .long sys_pciconfig_read | 289 | .long sys_pciconfig_read |
290 | .long sys_pciconfig_write | 290 | .long sys_pciconfig_write |
@@ -333,7 +333,7 @@ __syscall_start: | |||
333 | .long sys_inotify_init | 333 | .long sys_inotify_init |
334 | .long sys_inotify_add_watch | 334 | .long sys_inotify_add_watch |
335 | .long sys_inotify_rm_watch | 335 | .long sys_inotify_rm_watch |
336 | .long sys_mbind_wrapper | 336 | .long sys_mbind |
337 | /* 320 */ .long sys_get_mempolicy | 337 | /* 320 */ .long sys_get_mempolicy |
338 | .long sys_set_mempolicy | 338 | .long sys_set_mempolicy |
339 | __syscall_end: | 339 | __syscall_end: |
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index d9fb819bf7cc..2a8d27e18fa7 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S | |||
@@ -614,6 +614,47 @@ __kuser_helper_start: | |||
614 | /* | 614 | /* |
615 | * Reference prototype: | 615 | * Reference prototype: |
616 | * | 616 | * |
617 | * void __kernel_memory_barrier(void) | ||
618 | * | ||
619 | * Input: | ||
620 | * | ||
621 | * lr = return address | ||
622 | * | ||
623 | * Output: | ||
624 | * | ||
625 | * none | ||
626 | * | ||
627 | * Clobbered: | ||
628 | * | ||
629 | * the Z flag might be lost | ||
630 | * | ||
631 | * Definition and user space usage example: | ||
632 | * | ||
633 | * typedef void (__kernel_dmb_t)(void); | ||
634 | * #define __kernel_dmb (*(__kernel_dmb_t *)0xffff0fa0) | ||
635 | * | ||
636 | * Apply any needed memory barrier to preserve consistency with data modified | ||
637 | * manually and __kuser_cmpxchg usage. | ||
638 | * | ||
639 | * This could be used as follows: | ||
640 | * | ||
641 | * #define __kernel_dmb() \ | ||
642 | * asm volatile ( "mov r0, #0xffff0fff; mov lr, pc; sub pc, r0, #95" \ | ||
643 | * : : : "lr","cc" ) | ||
644 | */ | ||
645 | |||
646 | __kuser_memory_barrier: @ 0xffff0fa0 | ||
647 | |||
648 | #if __LINUX_ARM_ARCH__ >= 6 && defined(CONFIG_SMP) | ||
649 | mcr p15, 0, r0, c7, c10, 5 @ dmb | ||
650 | #endif | ||
651 | mov pc, lr | ||
652 | |||
653 | .align 5 | ||
654 | |||
655 | /* | ||
656 | * Reference prototype: | ||
657 | * | ||
617 | * int __kernel_cmpxchg(int oldval, int newval, int *ptr) | 658 | * int __kernel_cmpxchg(int oldval, int newval, int *ptr) |
618 | * | 659 | * |
619 | * Input: | 660 | * Input: |
@@ -642,6 +683,8 @@ __kuser_helper_start: | |||
642 | * The C flag is also set if *ptr was changed to allow for assembly | 683 | * The C flag is also set if *ptr was changed to allow for assembly |
643 | * optimization in the calling code. | 684 | * optimization in the calling code. |
644 | * | 685 | * |
686 | * Note: this routine already includes memory barriers as needed. | ||
687 | * | ||
645 | * For example, a user space atomic_add implementation could look like this: | 688 | * For example, a user space atomic_add implementation could look like this: |
646 | * | 689 | * |
647 | * #define atomic_add(ptr, val) \ | 690 | * #define atomic_add(ptr, val) \ |
@@ -698,10 +741,16 @@ __kuser_cmpxchg: @ 0xffff0fc0 | |||
698 | 741 | ||
699 | #else | 742 | #else |
700 | 743 | ||
744 | #ifdef CONFIG_SMP | ||
745 | mcr p15, 0, r0, c7, c10, 5 @ dmb | ||
746 | #endif | ||
701 | ldrex r3, [r2] | 747 | ldrex r3, [r2] |
702 | subs r3, r3, r0 | 748 | subs r3, r3, r0 |
703 | strexeq r3, r1, [r2] | 749 | strexeq r3, r1, [r2] |
704 | rsbs r0, r3, #0 | 750 | rsbs r0, r3, #0 |
751 | #ifdef CONFIG_SMP | ||
752 | mcr p15, 0, r0, c7, c10, 5 @ dmb | ||
753 | #endif | ||
705 | mov pc, lr | 754 | mov pc, lr |
706 | 755 | ||
707 | #endif | 756 | #endif |
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S index f7f183075237..e2b42997ad33 100644 --- a/arch/arm/kernel/entry-common.S +++ b/arch/arm/kernel/entry-common.S | |||
@@ -145,7 +145,7 @@ ENTRY(vector_swi) | |||
145 | #endif | 145 | #endif |
146 | enable_irq | 146 | enable_irq |
147 | 147 | ||
148 | str r4, [sp, #-S_OFF]! @ push fifth arg | 148 | stmdb sp!, {r4, r5} @ push fifth and sixth args |
149 | 149 | ||
150 | get_thread_info tsk | 150 | get_thread_info tsk |
151 | ldr ip, [tsk, #TI_FLAGS] @ check for syscall tracing | 151 | ldr ip, [tsk, #TI_FLAGS] @ check for syscall tracing |
@@ -204,7 +204,7 @@ ENTRY(sys_call_table) | |||
204 | * Special system call wrappers | 204 | * Special system call wrappers |
205 | */ | 205 | */ |
206 | @ r0 = syscall number | 206 | @ r0 = syscall number |
207 | @ r5 = syscall table | 207 | @ r8 = syscall table |
208 | .type sys_syscall, #function | 208 | .type sys_syscall, #function |
209 | sys_syscall: | 209 | sys_syscall: |
210 | eor scno, r0, #__NR_SYSCALL_BASE | 210 | eor scno, r0, #__NR_SYSCALL_BASE |
@@ -255,22 +255,6 @@ sys_sigaltstack_wrapper: | |||
255 | ldr r2, [sp, #S_OFF + S_SP] | 255 | ldr r2, [sp, #S_OFF + S_SP] |
256 | b do_sigaltstack | 256 | b do_sigaltstack |
257 | 257 | ||
258 | sys_futex_wrapper: | ||
259 | str r5, [sp, #4] @ push sixth arg | ||
260 | b sys_futex | ||
261 | |||
262 | sys_arm_fadvise64_64_wrapper: | ||
263 | str r5, [sp, #4] @ push r5 to stack | ||
264 | b sys_arm_fadvise64_64 | ||
265 | |||
266 | sys_mbind_wrapper: | ||
267 | str r5, [sp, #4] | ||
268 | b sys_mbind | ||
269 | |||
270 | sys_ipc_wrapper: | ||
271 | str r5, [sp, #4] @ push sixth arg | ||
272 | b sys_ipc | ||
273 | |||
274 | /* | 258 | /* |
275 | * Note: off_4k (r5) is always units of 4K. If we can't do the requested | 259 | * Note: off_4k (r5) is always units of 4K. If we can't do the requested |
276 | * offset, we return EINVAL. | 260 | * offset, we return EINVAL. |