aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>2011-08-23 19:59:58 -0400
committerH. Peter Anvin <hpa@linux.intel.com>2011-08-29 16:44:12 -0400
commit8b8bc2f7311c3223213dbe346d9cc2e299fdb5eb (patch)
tree50a2f0faa3762084336be4f97c03a97b14b22262
parent433b3520616be694e0aa777089346c8718c91a7b (diff)
x86: Use xadd helper more widely
This covers the trivial cases from open-coded xadd to the xadd macros. Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com> Link: http://lkml.kernel.org/r/4E5BCC40.3030501@goop.org Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
-rw-r--r--arch/x86/include/asm/atomic.h8
-rw-r--r--arch/x86/include/asm/atomic64_64.h6
-rw-r--r--arch/x86/include/asm/rwsem.h8
-rw-r--r--arch/x86/include/asm/uv/uv_bau.h6
4 files changed, 5 insertions, 23 deletions
diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
index 10572e309ab2..58cb6d4085f7 100644
--- a/arch/x86/include/asm/atomic.h
+++ b/arch/x86/include/asm/atomic.h
@@ -172,18 +172,14 @@ static inline int atomic_add_negative(int i, atomic_t *v)
172 */ 172 */
173static inline int atomic_add_return(int i, atomic_t *v) 173static inline int atomic_add_return(int i, atomic_t *v)
174{ 174{
175 int __i;
176#ifdef CONFIG_M386 175#ifdef CONFIG_M386
176 int __i;
177 unsigned long flags; 177 unsigned long flags;
178 if (unlikely(boot_cpu_data.x86 <= 3)) 178 if (unlikely(boot_cpu_data.x86 <= 3))
179 goto no_xadd; 179 goto no_xadd;
180#endif 180#endif
181 /* Modern 486+ processor */ 181 /* Modern 486+ processor */
182 __i = i; 182 return i + xadd(&v->counter, i);
183 asm volatile(LOCK_PREFIX "xaddl %0, %1"
184 : "+r" (i), "+m" (v->counter)
185 : : "memory");
186 return i + __i;
187 183
188#ifdef CONFIG_M386 184#ifdef CONFIG_M386
189no_xadd: /* Legacy 386 processor */ 185no_xadd: /* Legacy 386 processor */
diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
index 017594d403f6..0e1cbfc8ee06 100644
--- a/arch/x86/include/asm/atomic64_64.h
+++ b/arch/x86/include/asm/atomic64_64.h
@@ -170,11 +170,7 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
170 */ 170 */
171static inline long atomic64_add_return(long i, atomic64_t *v) 171static inline long atomic64_add_return(long i, atomic64_t *v)
172{ 172{
173 long __i = i; 173 return i + xadd(&v->counter, i);
174 asm volatile(LOCK_PREFIX "xaddq %0, %1;"
175 : "+r" (i), "+m" (v->counter)
176 : : "memory");
177 return i + __i;
178} 174}
179 175
180static inline long atomic64_sub_return(long i, atomic64_t *v) 176static inline long atomic64_sub_return(long i, atomic64_t *v)
diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
index df4cd32b4cc6..2dbe4a721ce5 100644
--- a/arch/x86/include/asm/rwsem.h
+++ b/arch/x86/include/asm/rwsem.h
@@ -204,13 +204,7 @@ static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
204 */ 204 */
205static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem) 205static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
206{ 206{
207 long tmp = delta; 207 return delta + xadd(&sem->count, delta);
208
209 asm volatile(LOCK_PREFIX "xadd %0,%1"
210 : "+r" (tmp), "+m" (sem->count)
211 : : "memory");
212
213 return tmp + delta;
214} 208}
215 209
216#endif /* __KERNEL__ */ 210#endif /* __KERNEL__ */
diff --git a/arch/x86/include/asm/uv/uv_bau.h b/arch/x86/include/asm/uv/uv_bau.h
index 37d369859c8e..c568ccca6e0e 100644
--- a/arch/x86/include/asm/uv/uv_bau.h
+++ b/arch/x86/include/asm/uv/uv_bau.h
@@ -656,11 +656,7 @@ static inline int atomic_read_short(const struct atomic_short *v)
656 */ 656 */
657static inline int atom_asr(short i, struct atomic_short *v) 657static inline int atom_asr(short i, struct atomic_short *v)
658{ 658{
659 short __i = i; 659 return i + xadd(&v->counter, i);
660 asm volatile(LOCK_PREFIX "xaddw %0, %1"
661 : "+r" (i), "+m" (v->counter)
662 : : "memory");
663 return i + __i;
664} 660}
665 661
666/* 662/*