aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/include
diff options
context:
space:
mode:
authorCatalin Marinas <catalin.marinas@arm.com>2009-09-18 18:27:05 -0400
committerCatalin Marinas <catalin.marinas@arm.com>2009-09-18 18:30:11 -0400
commit200b812d0084f800bc52465e273b118ff5f8141f (patch)
tree6dbef78960ad7b83a76df064751275913583068c /arch/arm/include
parentdf58bee21ed218cb7dfb561a590b1bd2a99531cf (diff)
Clear the exclusive monitor when returning from an exception
The patch adds a CLREX or dummy STREX to the exception return path. This is needed because several atomic/locking operations use a pair of LDREX/STREXEQ and the EQ condition may not always be satisfied. This would leave the exclusive monitor status set and may cause problems with atomic/locking operations in the interrupted code. With this patch, the atomic_set() operation can be a simple STR instruction (on SMP systems, the global exclusive monitor is cleared by STR anyway). Clearing the exclusive monitor during context switch is no longer needed as this is handled by the exception return path anyway. Signed-off-by: Catalin Marinas <catalin.marinas@arm.com> Reported-by: Jamie Lokier <jamie@shareable.org>
Diffstat (limited to 'arch/arm/include')
-rw-r--r--arch/arm/include/asm/atomic.h26
1 files changed, 7 insertions, 19 deletions
diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
index 9ed2377fe8e5..d0daeab2234e 100644
--- a/arch/arm/include/asm/atomic.h
+++ b/arch/arm/include/asm/atomic.h
@@ -19,31 +19,21 @@
19 19
20#ifdef __KERNEL__ 20#ifdef __KERNEL__
21 21
22/*
23 * On ARM, ordinary assignment (str instruction) doesn't clear the local
24 * strex/ldrex monitor on some implementations. The reason we can use it for
25 * atomic_set() is the clrex or dummy strex done on every exception return.
26 */
22#define atomic_read(v) ((v)->counter) 27#define atomic_read(v) ((v)->counter)
28#define atomic_set(v,i) (((v)->counter) = (i))
23 29
24#if __LINUX_ARM_ARCH__ >= 6 30#if __LINUX_ARM_ARCH__ >= 6
25 31
26/* 32/*
27 * ARMv6 UP and SMP safe atomic ops. We use load exclusive and 33 * ARMv6 UP and SMP safe atomic ops. We use load exclusive and
28 * store exclusive to ensure that these are atomic. We may loop 34 * store exclusive to ensure that these are atomic. We may loop
29 * to ensure that the update happens. Writing to 'v->counter' 35 * to ensure that the update happens.
30 * without using the following operations WILL break the atomic
31 * nature of these ops.
32 */ 36 */
33static inline void atomic_set(atomic_t *v, int i)
34{
35 unsigned long tmp;
36
37 __asm__ __volatile__("@ atomic_set\n"
38"1: ldrex %0, [%1]\n"
39" strex %0, %2, [%1]\n"
40" teq %0, #0\n"
41" bne 1b"
42 : "=&r" (tmp)
43 : "r" (&v->counter), "r" (i)
44 : "cc");
45}
46
47static inline void atomic_add(int i, atomic_t *v) 37static inline void atomic_add(int i, atomic_t *v)
48{ 38{
49 unsigned long tmp; 39 unsigned long tmp;
@@ -163,8 +153,6 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
163#error SMP not supported on pre-ARMv6 CPUs 153#error SMP not supported on pre-ARMv6 CPUs
164#endif 154#endif
165 155
166#define atomic_set(v,i) (((v)->counter) = (i))
167
168static inline int atomic_add_return(int i, atomic_t *v) 156static inline int atomic_add_return(int i, atomic_t *v)
169{ 157{
170 unsigned long flags; 158 unsigned long flags;