aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/include/asm/atomic.h
diff options
context:
space:
mode:
authorRussell King <rmk+kernel@arm.linux.org.uk>2009-09-19 08:47:57 -0400
committerRussell King <rmk+kernel@arm.linux.org.uk>2009-09-19 08:47:57 -0400
commit40d743b8c16a8cf6e30c1d941aa6147f9550ea75 (patch)
tree9fcdf9a06b18a275253048d1ea7c9803cec38845 /arch/arm/include/asm/atomic.h
parent7da18afa423f167e7ef3c9728e584d8bf05bd55a (diff)
parent83e686ea0291ee93b87dcdc00b96443b80de56c9 (diff)
Merge branch 'for-rmk' of git://linux-arm.org/linux-2.6
Diffstat (limited to 'arch/arm/include/asm/atomic.h')
-rw-r--r--arch/arm/include/asm/atomic.h26
1 files changed, 7 insertions, 19 deletions
diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
index 9ed2377fe8e5..d0daeab2234e 100644
--- a/arch/arm/include/asm/atomic.h
+++ b/arch/arm/include/asm/atomic.h
@@ -19,31 +19,21 @@
19 19
20#ifdef __KERNEL__ 20#ifdef __KERNEL__
21 21
22/*
23 * On ARM, ordinary assignment (str instruction) doesn't clear the local
24 * strex/ldrex monitor on some implementations. The reason we can use it for
25 * atomic_set() is the clrex or dummy strex done on every exception return.
26 */
22#define atomic_read(v) ((v)->counter) 27#define atomic_read(v) ((v)->counter)
28#define atomic_set(v,i) (((v)->counter) = (i))
23 29
24#if __LINUX_ARM_ARCH__ >= 6 30#if __LINUX_ARM_ARCH__ >= 6
25 31
26/* 32/*
27 * ARMv6 UP and SMP safe atomic ops. We use load exclusive and 33 * ARMv6 UP and SMP safe atomic ops. We use load exclusive and
28 * store exclusive to ensure that these are atomic. We may loop 34 * store exclusive to ensure that these are atomic. We may loop
29 * to ensure that the update happens. Writing to 'v->counter' 35 * to ensure that the update happens.
30 * without using the following operations WILL break the atomic
31 * nature of these ops.
32 */ 36 */
33static inline void atomic_set(atomic_t *v, int i)
34{
35 unsigned long tmp;
36
37 __asm__ __volatile__("@ atomic_set\n"
38"1: ldrex %0, [%1]\n"
39" strex %0, %2, [%1]\n"
40" teq %0, #0\n"
41" bne 1b"
42 : "=&r" (tmp)
43 : "r" (&v->counter), "r" (i)
44 : "cc");
45}
46
47static inline void atomic_add(int i, atomic_t *v) 37static inline void atomic_add(int i, atomic_t *v)
48{ 38{
49 unsigned long tmp; 39 unsigned long tmp;
@@ -163,8 +153,6 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
163#error SMP not supported on pre-ARMv6 CPUs 153#error SMP not supported on pre-ARMv6 CPUs
164#endif 154#endif
165 155
166#define atomic_set(v,i) (((v)->counter) = (i))
167
168static inline int atomic_add_return(int i, atomic_t *v) 156static inline int atomic_add_return(int i, atomic_t *v)
169{ 157{
170 unsigned long flags; 158 unsigned long flags;