aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/arm/include/asm/atomic.h26
-rw-r--r--arch/arm/kernel/entry-armv.S7
-rw-r--r--arch/arm/kernel/entry-header.S14
3 files changed, 21 insertions, 26 deletions
diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
index 9ed2377fe8e5..d0daeab2234e 100644
--- a/arch/arm/include/asm/atomic.h
+++ b/arch/arm/include/asm/atomic.h
@@ -19,31 +19,21 @@
19 19
20#ifdef __KERNEL__ 20#ifdef __KERNEL__
21 21
22/*
23 * On ARM, ordinary assignment (str instruction) doesn't clear the local
24 * strex/ldrex monitor on some implementations. The reason we can use it for
25 * atomic_set() is the clrex or dummy strex done on every exception return.
26 */
22#define atomic_read(v) ((v)->counter) 27#define atomic_read(v) ((v)->counter)
28#define atomic_set(v,i) (((v)->counter) = (i))
23 29
24#if __LINUX_ARM_ARCH__ >= 6 30#if __LINUX_ARM_ARCH__ >= 6
25 31
26/* 32/*
27 * ARMv6 UP and SMP safe atomic ops. We use load exclusive and 33 * ARMv6 UP and SMP safe atomic ops. We use load exclusive and
28 * store exclusive to ensure that these are atomic. We may loop 34 * store exclusive to ensure that these are atomic. We may loop
29 * to ensure that the update happens. Writing to 'v->counter' 35 * to ensure that the update happens.
30 * without using the following operations WILL break the atomic
31 * nature of these ops.
32 */ 36 */
33static inline void atomic_set(atomic_t *v, int i)
34{
35 unsigned long tmp;
36
37 __asm__ __volatile__("@ atomic_set\n"
38"1: ldrex %0, [%1]\n"
39" strex %0, %2, [%1]\n"
40" teq %0, #0\n"
41" bne 1b"
42 : "=&r" (tmp)
43 : "r" (&v->counter), "r" (i)
44 : "cc");
45}
46
47static inline void atomic_add(int i, atomic_t *v) 37static inline void atomic_add(int i, atomic_t *v)
48{ 38{
49 unsigned long tmp; 39 unsigned long tmp;
@@ -163,8 +153,6 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
163#error SMP not supported on pre-ARMv6 CPUs 153#error SMP not supported on pre-ARMv6 CPUs
164#endif 154#endif
165 155
166#define atomic_set(v,i) (((v)->counter) = (i))
167
168static inline int atomic_add_return(int i, atomic_t *v) 156static inline int atomic_add_return(int i, atomic_t *v)
169{ 157{
170 unsigned long flags; 158 unsigned long flags;
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index 3d727a8a23bc..a332bc7225bf 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -734,13 +734,6 @@ ENTRY(__switch_to)
734#ifdef CONFIG_MMU 734#ifdef CONFIG_MMU
735 ldr r6, [r2, #TI_CPU_DOMAIN] 735 ldr r6, [r2, #TI_CPU_DOMAIN]
736#endif 736#endif
737#if __LINUX_ARM_ARCH__ >= 6
738#ifdef CONFIG_CPU_32v6K
739 clrex
740#else
741 strex r5, r4, [ip] @ Clear exclusive monitor
742#endif
743#endif
744#if defined(CONFIG_HAS_TLS_REG) 737#if defined(CONFIG_HAS_TLS_REG)
745 mcr p15, 0, r3, c13, c0, 3 @ set TLS register 738 mcr p15, 0, r3, c13, c0, 3 @ set TLS register
746#elif !defined(CONFIG_TLS_REG_EMUL) 739#elif !defined(CONFIG_TLS_REG_EMUL)
diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
index a4eaf4f920c5..e17e3c30d957 100644
--- a/arch/arm/kernel/entry-header.S
+++ b/arch/arm/kernel/entry-header.S
@@ -76,13 +76,25 @@
76#ifndef CONFIG_THUMB2_KERNEL 76#ifndef CONFIG_THUMB2_KERNEL
77 .macro svc_exit, rpsr 77 .macro svc_exit, rpsr
78 msr spsr_cxsf, \rpsr 78 msr spsr_cxsf, \rpsr
79#if defined(CONFIG_CPU_32v6K)
80 clrex @ clear the exclusive monitor
79 ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr 81 ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
82#elif defined (CONFIG_CPU_V6)
83 ldr r0, [sp]
84 strex r1, r2, [sp] @ clear the exclusive monitor
85 ldmib sp, {r1 - pc}^ @ load r1 - pc, cpsr
86#endif
80 .endm 87 .endm
81 88
82 .macro restore_user_regs, fast = 0, offset = 0 89 .macro restore_user_regs, fast = 0, offset = 0
83 ldr r1, [sp, #\offset + S_PSR] @ get calling cpsr 90 ldr r1, [sp, #\offset + S_PSR] @ get calling cpsr
84 ldr lr, [sp, #\offset + S_PC]! @ get pc 91 ldr lr, [sp, #\offset + S_PC]! @ get pc
85 msr spsr_cxsf, r1 @ save in spsr_svc 92 msr spsr_cxsf, r1 @ save in spsr_svc
93#if defined(CONFIG_CPU_32v6K)
94 clrex @ clear the exclusive monitor
95#elif defined (CONFIG_CPU_V6)
96 strex r1, r2, [sp] @ clear the exclusive monitor
97#endif
86 .if \fast 98 .if \fast
87 ldmdb sp, {r1 - lr}^ @ get calling r1 - lr 99 ldmdb sp, {r1 - lr}^ @ get calling r1 - lr
88 .else 100 .else
@@ -98,6 +110,7 @@
98 .endm 110 .endm
99#else /* CONFIG_THUMB2_KERNEL */ 111#else /* CONFIG_THUMB2_KERNEL */
100 .macro svc_exit, rpsr 112 .macro svc_exit, rpsr
113 clrex @ clear the exclusive monitor
101 ldr r0, [sp, #S_SP] @ top of the stack 114 ldr r0, [sp, #S_SP] @ top of the stack
102 ldr r1, [sp, #S_PC] @ return address 115 ldr r1, [sp, #S_PC] @ return address
103 tst r0, #4 @ orig stack 8-byte aligned? 116 tst r0, #4 @ orig stack 8-byte aligned?
@@ -110,6 +123,7 @@
110 .endm 123 .endm
111 124
112 .macro restore_user_regs, fast = 0, offset = 0 125 .macro restore_user_regs, fast = 0, offset = 0
126 clrex @ clear the exclusive monitor
113 mov r2, sp 127 mov r2, sp
114 load_user_sp_lr r2, r3, \offset + S_SP @ calling sp, lr 128 load_user_sp_lr r2, r3, \offset + S_SP @ calling sp, lr
115 ldr r1, [sp, #\offset + S_PSR] @ get calling cpsr 129 ldr r1, [sp, #\offset + S_PSR] @ get calling cpsr