aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorAoi Shinkai <shinkoi2005@gmail.com>2009-06-10 12:15:42 -0400
committerPaul Mundt <lethal@linux-sh.org>2009-06-11 02:31:55 -0400
commit4c7c99788631bab177bd51e15e893be4689bb085 (patch)
tree52e50e0b2a30b31dc8c6460e98279b3c1b47c476 /arch
parentf168dd00a9440a6f644db73bda47718fd12008e4 (diff)
sh: Fix sh4a llsc-based cmpxchg()
This fixes up a typo in the ll/sc based cmpxchg code which apparently wasn't getting a lot of testing due to the swapped old/new pair. With that fixed up, the ll/sc code also starts using it and provides its own atomic_add_unless(). Signed-off-by: Aoi Shinkai <shinkoi2005@gmail.com> Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/sh/include/asm/atomic-llsc.h27
-rw-r--r--arch/sh/include/asm/atomic.h4
-rw-r--r--arch/sh/include/asm/cmpxchg-llsc.h2
-rw-r--r--arch/sh/include/asm/spinlock.h2
4 files changed, 31 insertions, 4 deletions
diff --git a/arch/sh/include/asm/atomic-llsc.h b/arch/sh/include/asm/atomic-llsc.h
index 4b00b78e3f4f..b040e1e08610 100644
--- a/arch/sh/include/asm/atomic-llsc.h
+++ b/arch/sh/include/asm/atomic-llsc.h
@@ -104,4 +104,31 @@ static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
104 : "t"); 104 : "t");
105} 105}
106 106
107#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
108
109/**
110 * atomic_add_unless - add unless the number is a given value
111 * @v: pointer of type atomic_t
112 * @a: the amount to add to v...
113 * @u: ...unless v is equal to u.
114 *
115 * Atomically adds @a to @v, so long as it was not @u.
116 * Returns non-zero if @v was not @u, and zero otherwise.
117 */
118static inline int atomic_add_unless(atomic_t *v, int a, int u)
119{
120 int c, old;
121 c = atomic_read(v);
122 for (;;) {
123 if (unlikely(c == (u)))
124 break;
125 old = atomic_cmpxchg((v), c, c + (a));
126 if (likely(old == c))
127 break;
128 c = old;
129 }
130
131 return c != (u);
132}
133
107#endif /* __ASM_SH_ATOMIC_LLSC_H */ 134#endif /* __ASM_SH_ATOMIC_LLSC_H */
diff --git a/arch/sh/include/asm/atomic.h b/arch/sh/include/asm/atomic.h
index 6327ffbb1992..978b58efb1e9 100644
--- a/arch/sh/include/asm/atomic.h
+++ b/arch/sh/include/asm/atomic.h
@@ -45,7 +45,7 @@
45#define atomic_inc(v) atomic_add(1,(v)) 45#define atomic_inc(v) atomic_add(1,(v))
46#define atomic_dec(v) atomic_sub(1,(v)) 46#define atomic_dec(v) atomic_sub(1,(v))
47 47
48#ifndef CONFIG_GUSA_RB 48#if !defined(CONFIG_GUSA_RB) && !defined(CONFIG_CPU_SH4A)
49static inline int atomic_cmpxchg(atomic_t *v, int old, int new) 49static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
50{ 50{
51 int ret; 51 int ret;
@@ -73,7 +73,7 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
73 73
74 return ret != u; 74 return ret != u;
75} 75}
76#endif 76#endif /* !CONFIG_GUSA_RB && !CONFIG_CPU_SH4A */
77 77
78#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) 78#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
79#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) 79#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
diff --git a/arch/sh/include/asm/cmpxchg-llsc.h b/arch/sh/include/asm/cmpxchg-llsc.h
index 0fac3da536ca..47136661a203 100644
--- a/arch/sh/include/asm/cmpxchg-llsc.h
+++ b/arch/sh/include/asm/cmpxchg-llsc.h
@@ -55,7 +55,7 @@ __cmpxchg_u32(volatile int *m, unsigned long old, unsigned long new)
55 "mov %0, %1 \n\t" 55 "mov %0, %1 \n\t"
56 "cmp/eq %1, %3 \n\t" 56 "cmp/eq %1, %3 \n\t"
57 "bf 2f \n\t" 57 "bf 2f \n\t"
58 "mov %3, %0 \n\t" 58 "mov %4, %0 \n\t"
59 "2: \n\t" 59 "2: \n\t"
60 "movco.l %0, @%2 \n\t" 60 "movco.l %0, @%2 \n\t"
61 "bf 1b \n\t" 61 "bf 1b \n\t"
diff --git a/arch/sh/include/asm/spinlock.h b/arch/sh/include/asm/spinlock.h
index 60283565f89b..a28c9f0053fd 100644
--- a/arch/sh/include/asm/spinlock.h
+++ b/arch/sh/include/asm/spinlock.h
@@ -26,7 +26,7 @@
26#define __raw_spin_is_locked(x) ((x)->lock <= 0) 26#define __raw_spin_is_locked(x) ((x)->lock <= 0)
27#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) 27#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
28#define __raw_spin_unlock_wait(x) \ 28#define __raw_spin_unlock_wait(x) \
29 do { cpu_relax(); } while ((x)->lock) 29 do { while (__raw_spin_is_locked(x)) cpu_relax(); } while (0)
30 30
31/* 31/*
32 * Simple spin lock operations. There are two variants, one clears IRQ's 32 * Simple spin lock operations. There are two variants, one clears IRQ's