aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/kernel
diff options
context:
space:
mode:
authorNicolas Pitre <nico@cam.org>2007-11-20 11:20:29 -0500
committerRussell King <rmk+kernel@arm.linux.org.uk>2007-11-26 14:43:58 -0500
commitb49c0f24cf6744a3f4fd09289fe7cade349dead5 (patch)
tree23d244eae940f2e36a9bc1a2e77b8f1cc53e071a /arch/arm/kernel
parentaeb747afb3fb1f42d9c82615a103882f7f97f291 (diff)
[ARM] 4659/1: remove possibilities for spurious false negative with __kuser_cmpxchg
The ARM __kuser_cmpxchg routine is meant to implement an atomic cmpxchg in user space. It however can produce spurious false negative if a processor exception occurs in the middle of the operation. Normally this is not a problem since cmpxchg is typically called in a loop until it succeeds to implement an atomic increment for example. Some use cases which don't involve a loop require that the operation be 100% reliable though. This patch changes the implementation so to reattempt the operation after an exception has occurred in the critical section rather than abort it. Here's a simple program to test the fix (don't use CONFIG_NO_HZ in your kernel as this depends on a sufficiently high interrupt rate): #include <stdio.h> typedef int (__kernel_cmpxchg_t)(int oldval, int newval, int *ptr); #define __kernel_cmpxchg (*(__kernel_cmpxchg_t *)0xffff0fc0) int main() { int i, x = 0; for (i = 0; i < 100000000; i++) { int v = x; if (__kernel_cmpxchg(v, v+1, &x)) printf("failed at %d: %d vs %d\n", i, v, x); } printf("done with %d vs %d\n", i, x); return 0; } Signed-off-by: Nicolas Pitre <nico@marvell.com> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm/kernel')
-rw-r--r--arch/arm/kernel/entry-armv.S94
-rw-r--r--arch/arm/kernel/traps.c3
2 files changed, 58 insertions, 39 deletions
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index d645897652c2..0d1bbea84df0 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -339,16 +339,6 @@ __pabt_svc:
339 str r1, [sp] @ save the "real" r0 copied 339 str r1, [sp] @ save the "real" r0 copied
340 @ from the exception stack 340 @ from the exception stack
341 341
342#if __LINUX_ARM_ARCH__ < 6 && !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
343#ifndef CONFIG_MMU
344#warning "NPTL on non MMU needs fixing"
345#else
346 @ make sure our user space atomic helper is aborted
347 cmp r2, #TASK_SIZE
348 bichs r3, r3, #PSR_Z_BIT
349#endif
350#endif
351
352 @ 342 @
353 @ We are now ready to fill in the remaining blanks on the stack: 343 @ We are now ready to fill in the remaining blanks on the stack:
354 @ 344 @
@@ -372,9 +362,25 @@ __pabt_svc:
372 zero_fp 362 zero_fp
373 .endm 363 .endm
374 364
365 .macro kuser_cmpxchg_check
366#if __LINUX_ARM_ARCH__ < 6 && !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
367#ifndef CONFIG_MMU
368#warning "NPTL on non MMU needs fixing"
369#else
370 @ Make sure our user space atomic helper is restarted
371 @ if it was interrupted in a critical region. Here we
372 @ perform a quick test inline since it should be false
373 @ 99.9999% of the time. The rest is done out of line.
374 cmp r2, #TASK_SIZE
375 blhs kuser_cmpxchg_fixup
376#endif
377#endif
378 .endm
379
375 .align 5 380 .align 5
376__dabt_usr: 381__dabt_usr:
377 usr_entry 382 usr_entry
383 kuser_cmpxchg_check
378 384
379 @ 385 @
380 @ Call the processor-specific abort handler: 386 @ Call the processor-specific abort handler:
@@ -404,6 +410,7 @@ __dabt_usr:
404 .align 5 410 .align 5
405__irq_usr: 411__irq_usr:
406 usr_entry 412 usr_entry
413 kuser_cmpxchg_check
407 414
408#ifdef CONFIG_TRACE_IRQFLAGS 415#ifdef CONFIG_TRACE_IRQFLAGS
409 bl trace_hardirqs_off 416 bl trace_hardirqs_off
@@ -669,7 +676,7 @@ __kuser_helper_start:
669 * 676 *
670 * Clobbered: 677 * Clobbered:
671 * 678 *
672 * the Z flag might be lost 679 * none
673 * 680 *
674 * Definition and user space usage example: 681 * Definition and user space usage example:
675 * 682 *
@@ -730,9 +737,6 @@ __kuser_memory_barrier: @ 0xffff0fa0
730 * 737 *
731 * - This routine already includes memory barriers as needed. 738 * - This routine already includes memory barriers as needed.
732 * 739 *
733 * - A failure might be transient, i.e. it is possible, although unlikely,
734 * that "failure" be returned even if *ptr == oldval.
735 *
736 * For example, a user space atomic_add implementation could look like this: 740 * For example, a user space atomic_add implementation could look like this:
737 * 741 *
738 * #define atomic_add(ptr, val) \ 742 * #define atomic_add(ptr, val) \
@@ -769,46 +773,62 @@ __kuser_cmpxchg: @ 0xffff0fc0
769 773
770#elif __LINUX_ARM_ARCH__ < 6 774#elif __LINUX_ARM_ARCH__ < 6
771 775
776#ifdef CONFIG_MMU
777
772 /* 778 /*
773 * Theory of operation: 779 * The only thing that can break atomicity in this cmpxchg
774 * 780 * implementation is either an IRQ or a data abort exception
775 * We set the Z flag before loading oldval. If ever an exception 781 * causing another process/thread to be scheduled in the middle
776 * occurs we can not be sure the loaded value will still be the same 782 * of the critical sequence. To prevent this, code is added to
777 * when the exception returns, therefore the user exception handler 783 * the IRQ and data abort exception handlers to set the pc back
778 * will clear the Z flag whenever the interrupted user code was 784 * to the beginning of the critical section if it is found to be
779 * actually from the kernel address space (see the usr_entry macro). 785 * within that critical section (see kuser_cmpxchg_fixup).
780 *
781 * The post-increment on the str is used to prevent a race with an
782 * exception happening just after the str instruction which would
783 * clear the Z flag although the exchange was done.
784 */ 786 */
785#ifdef CONFIG_MMU 7871: ldr r3, [r2] @ load current val
786 teq ip, ip @ set Z flag 788 subs r3, r3, r0 @ compare with oldval
787 ldr ip, [r2] @ load current val 7892: streq r1, [r2] @ store newval if eq
788 add r3, r2, #1 @ prepare store ptr 790 rsbs r0, r3, #0 @ set return val and C flag
789 teqeq ip, r0 @ compare with oldval if still allowed 791 usr_ret lr
790 streq r1, [r3, #-1]! @ store newval if still allowed 792
791 subs r0, r2, r3 @ if r2 == r3 the str occured 793 .text
794kuser_cmpxchg_fixup:
795 @ Called from kuser_cmpxchg_check macro.
796 @ r2 = address of interrupted insn (must be preserved).
797 @ sp = saved regs. r7 and r8 are clobbered.
798 @ 1b = first critical insn, 2b = last critical insn.
799 @ If r2 >= 1b and r2 <= 2b then saved pc_usr is set to 1b.
800 mov r7, #0xffff0fff
801 sub r7, r7, #(0xffff0fff - (0xffff0fc0 + (1b - __kuser_cmpxchg)))
802 subs r8, r2, r7
803 rsbcss r8, r8, #(2b - 1b)
804 strcs r7, [sp, #S_PC]
805 mov pc, lr
806 .previous
807
792#else 808#else
793#warning "NPTL on non MMU needs fixing" 809#warning "NPTL on non MMU needs fixing"
794 mov r0, #-1 810 mov r0, #-1
795 adds r0, r0, #0 811 adds r0, r0, #0
796#endif
797 usr_ret lr 812 usr_ret lr
813#endif
798 814
799#else 815#else
800 816
801#ifdef CONFIG_SMP 817#ifdef CONFIG_SMP
802 mcr p15, 0, r0, c7, c10, 5 @ dmb 818 mcr p15, 0, r0, c7, c10, 5 @ dmb
803#endif 819#endif
804 ldrex r3, [r2] 8201: ldrex r3, [r2]
805 subs r3, r3, r0 821 subs r3, r3, r0
806 strexeq r3, r1, [r2] 822 strexeq r3, r1, [r2]
823 teqeq r3, #1
824 beq 1b
807 rsbs r0, r3, #0 825 rsbs r0, r3, #0
826 /* beware -- each __kuser slot must be 8 instructions max */
808#ifdef CONFIG_SMP 827#ifdef CONFIG_SMP
809 mcr p15, 0, r0, c7, c10, 5 @ dmb 828 b __kuser_memory_barrier
810#endif 829#else
811 usr_ret lr 830 usr_ret lr
831#endif
812 832
813#endif 833#endif
814 834
@@ -829,7 +849,7 @@ __kuser_cmpxchg: @ 0xffff0fc0
829 * 849 *
830 * Clobbered: 850 * Clobbered:
831 * 851 *
832 * the Z flag might be lost 852 * none
833 * 853 *
834 * Definition and user space usage example: 854 * Definition and user space usage example:
835 * 855 *
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index 5a52dd7f97fe..c34db4e868fa 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -509,7 +509,7 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs)
509 * existence. Don't ever use this from user code. 509 * existence. Don't ever use this from user code.
510 */ 510 */
511 case 0xfff0: 511 case 0xfff0:
512 { 512 for (;;) {
513 extern void do_DataAbort(unsigned long addr, unsigned int fsr, 513 extern void do_DataAbort(unsigned long addr, unsigned int fsr,
514 struct pt_regs *regs); 514 struct pt_regs *regs);
515 unsigned long val; 515 unsigned long val;
@@ -545,7 +545,6 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs)
545 up_read(&mm->mmap_sem); 545 up_read(&mm->mmap_sem);
546 /* simulate a write access fault */ 546 /* simulate a write access fault */
547 do_DataAbort(addr, 15 + (1 << 11), regs); 547 do_DataAbort(addr, 15 + (1 << 11), regs);
548 return -1;
549 } 548 }
550#endif 549#endif
551 550