aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/include/asm/cmpxchg.h
diff options
context:
space:
mode:
authorJeremy Fitzhardinge <jeremy@goop.org>2011-09-28 14:49:28 -0400
committerJeremy Fitzhardinge <jeremy@goop.org>2011-11-25 13:42:59 -0500
commit3d94ae0c70a71a9824479366775e2c7679a57d94 (patch)
tree041c1013f123fb3177bed6b70375fdc4f6c0ddff /arch/x86/include/asm/cmpxchg.h
parent4a7f340c6a75ec5fca23d9c80a59f3f28cc4a61e (diff)
x86/cmpxchg: add a locked add() helper
Mostly to remove some conditional code in spinlock.h. Signed-off-by: Jeremy Fitzhardinge <jeremy@goop.org>
Diffstat (limited to 'arch/x86/include/asm/cmpxchg.h')
-rw-r--r--arch/x86/include/asm/cmpxchg.h42
1 files changed, 42 insertions, 0 deletions
diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
index 5d3acdf5a7a6..49eade13161c 100644
--- a/arch/x86/include/asm/cmpxchg.h
+++ b/arch/x86/include/asm/cmpxchg.h
@@ -14,6 +14,8 @@ extern void __cmpxchg_wrong_size(void)
14 __compiletime_error("Bad argument size for cmpxchg"); 14 __compiletime_error("Bad argument size for cmpxchg");
15extern void __xadd_wrong_size(void) 15extern void __xadd_wrong_size(void)
16 __compiletime_error("Bad argument size for xadd"); 16 __compiletime_error("Bad argument size for xadd");
17extern void __add_wrong_size(void)
18 __compiletime_error("Bad argument size for add");
17 19
18/* 20/*
19 * Constants for operation sizes. On 32-bit, the 64-bit size it set to 21 * Constants for operation sizes. On 32-bit, the 64-bit size it set to
@@ -207,4 +209,44 @@ extern void __xadd_wrong_size(void)
207#define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ") 209#define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ")
208#define xadd_local(ptr, inc) __xadd((ptr), (inc), "") 210#define xadd_local(ptr, inc) __xadd((ptr), (inc), "")
209 211
212#define __add(ptr, inc, lock) \
213 ({ \
214 __typeof__ (*(ptr)) __ret = (inc); \
215 switch (sizeof(*(ptr))) { \
216 case __X86_CASE_B: \
217 asm volatile (lock "addb %b1, %0\n" \
218 : "+m" (*(ptr)) : "ri" (inc) \
219 : "memory", "cc"); \
220 break; \
221 case __X86_CASE_W: \
222 asm volatile (lock "addw %w1, %0\n" \
223 : "+m" (*(ptr)) : "ri" (inc) \
224 : "memory", "cc"); \
225 break; \
226 case __X86_CASE_L: \
227 asm volatile (lock "addl %1, %0\n" \
228 : "+m" (*(ptr)) : "ri" (inc) \
229 : "memory", "cc"); \
230 break; \
231 case __X86_CASE_Q: \
232 asm volatile (lock "addq %1, %0\n" \
233 : "+m" (*(ptr)) : "ri" (inc) \
234 : "memory", "cc"); \
235 break; \
236 default: \
237 __add_wrong_size(); \
238 } \
239 __ret; \
240 })
241
242/*
243 * add_*() adds "inc" to "*ptr"
244 *
245 * __add() takes a lock prefix
246 * add_smp() is locked when multiple CPUs are online
247 * add_sync() is always locked
248 */
249#define add_smp(ptr, inc) __add((ptr), (inc), LOCK_PREFIX)
250#define add_sync(ptr, inc) __add((ptr), (inc), "lock; ")
251
210#endif /* ASM_X86_CMPXCHG_H */ 252#endif /* ASM_X86_CMPXCHG_H */