aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorJeremy Fitzhardinge <jeremy@goop.org>2011-09-30 15:14:10 -0400
committerJeremy Fitzhardinge <jeremy@goop.org>2011-11-25 13:43:12 -0500
commit31a8394e069e47dc47f4c29e4213aa943342f19f (patch)
treef96a8826877e4c28fdc40f6e1aa4a1cf42187e22 /arch
parent3d94ae0c70a71a9824479366775e2c7679a57d94 (diff)
x86: consolidate xchg and xadd macros
They both have a basic "put new value in location, return old value" pattern, so they can use the same macro easily. Signed-off-by: Jeremy Fitzhardinge <jeremy@goop.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/include/asm/cmpxchg.h114
1 files changed, 36 insertions, 78 deletions
diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
index 49eade13161..5488e10b9db 100644
--- a/arch/x86/include/asm/cmpxchg.h
+++ b/arch/x86/include/asm/cmpxchg.h
@@ -33,60 +33,47 @@ extern void __add_wrong_size(void)
33#define __X86_CASE_Q -1 /* sizeof will never return -1 */ 33#define __X86_CASE_Q -1 /* sizeof will never return -1 */
34#endif 34#endif
35 35
36/*
37 * An exchange-type operation, which takes a value and a pointer, and
38 * returns a the old value.
39 */
40#define __xchg_op(ptr, arg, op, lock) \
41 ({ \
42 __typeof__ (*(ptr)) __ret = (arg); \
43 switch (sizeof(*(ptr))) { \
44 case __X86_CASE_B: \
45 asm volatile (lock #op "b %b0, %1\n" \
46 : "+r" (__ret), "+m" (*(ptr)) \
47 : : "memory", "cc"); \
48 break; \
49 case __X86_CASE_W: \
50 asm volatile (lock #op "w %w0, %1\n" \
51 : "+r" (__ret), "+m" (*(ptr)) \
52 : : "memory", "cc"); \
53 break; \
54 case __X86_CASE_L: \
55 asm volatile (lock #op "l %0, %1\n" \
56 : "+r" (__ret), "+m" (*(ptr)) \
57 : : "memory", "cc"); \
58 break; \
59 case __X86_CASE_Q: \
60 asm volatile (lock #op "q %q0, %1\n" \
61 : "+r" (__ret), "+m" (*(ptr)) \
62 : : "memory", "cc"); \
63 break; \
64 default: \
65 __ ## op ## _wrong_size(); \
66 } \
67 __ret; \
68 })
69
36/* 70/*
37 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway. 71 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway.
38 * Since this is generally used to protect other memory information, we 72 * Since this is generally used to protect other memory information, we
39 * use "asm volatile" and "memory" clobbers to prevent gcc from moving 73 * use "asm volatile" and "memory" clobbers to prevent gcc from moving
40 * information around. 74 * information around.
41 */ 75 */
42#define __xchg(x, ptr, size) \ 76#define xchg(ptr, v) __xchg_op((ptr), (v), xchg, "")
43({ \
44 __typeof(*(ptr)) __x = (x); \
45 switch (size) { \
46 case __X86_CASE_B: \
47 { \
48 volatile u8 *__ptr = (volatile u8 *)(ptr); \
49 asm volatile("xchgb %0,%1" \
50 : "=q" (__x), "+m" (*__ptr) \
51 : "0" (__x) \
52 : "memory"); \
53 break; \
54 } \
55 case __X86_CASE_W: \
56 { \
57 volatile u16 *__ptr = (volatile u16 *)(ptr); \
58 asm volatile("xchgw %0,%1" \
59 : "=r" (__x), "+m" (*__ptr) \
60 : "0" (__x) \
61 : "memory"); \
62 break; \
63 } \
64 case __X86_CASE_L: \
65 { \
66 volatile u32 *__ptr = (volatile u32 *)(ptr); \
67 asm volatile("xchgl %0,%1" \
68 : "=r" (__x), "+m" (*__ptr) \
69 : "0" (__x) \
70 : "memory"); \
71 break; \
72 } \
73 case __X86_CASE_Q: \
74 { \
75 volatile u64 *__ptr = (volatile u64 *)(ptr); \
76 asm volatile("xchgq %0,%1" \
77 : "=r" (__x), "+m" (*__ptr) \
78 : "0" (__x) \
79 : "memory"); \
80 break; \
81 } \
82 default: \
83 __xchg_wrong_size(); \
84 } \
85 __x; \
86})
87
88#define xchg(ptr, v) \
89 __xchg((v), (ptr), sizeof(*ptr))
90 77
91/* 78/*
92 * Atomic compare and exchange. Compare OLD with MEM, if identical, 79 * Atomic compare and exchange. Compare OLD with MEM, if identical,
@@ -167,36 +154,6 @@ extern void __add_wrong_size(void)
167 __cmpxchg_local((ptr), (old), (new), sizeof(*ptr)) 154 __cmpxchg_local((ptr), (old), (new), sizeof(*ptr))
168#endif 155#endif
169 156
170#define __xadd(ptr, inc, lock) \
171 ({ \
172 __typeof__ (*(ptr)) __ret = (inc); \
173 switch (sizeof(*(ptr))) { \
174 case __X86_CASE_B: \
175 asm volatile (lock "xaddb %b0, %1\n" \
176 : "+r" (__ret), "+m" (*(ptr)) \
177 : : "memory", "cc"); \
178 break; \
179 case __X86_CASE_W: \
180 asm volatile (lock "xaddw %w0, %1\n" \
181 : "+r" (__ret), "+m" (*(ptr)) \
182 : : "memory", "cc"); \
183 break; \
184 case __X86_CASE_L: \
185 asm volatile (lock "xaddl %0, %1\n" \
186 : "+r" (__ret), "+m" (*(ptr)) \
187 : : "memory", "cc"); \
188 break; \
189 case __X86_CASE_Q: \
190 asm volatile (lock "xaddq %q0, %1\n" \
191 : "+r" (__ret), "+m" (*(ptr)) \
192 : : "memory", "cc"); \
193 break; \
194 default: \
195 __xadd_wrong_size(); \
196 } \
197 __ret; \
198 })
199
200/* 157/*
201 * xadd() adds "inc" to "*ptr" and atomically returns the previous 158 * xadd() adds "inc" to "*ptr" and atomically returns the previous
202 * value of "*ptr". 159 * value of "*ptr".
@@ -205,6 +162,7 @@ extern void __add_wrong_size(void)
205 * xadd_sync() is always locked 162 * xadd_sync() is always locked
206 * xadd_local() is never locked 163 * xadd_local() is never locked
207 */ 164 */
165#define __xadd(ptr, inc, lock) __xchg_op((ptr), (inc), xadd, lock)
208#define xadd(ptr, inc) __xadd((ptr), (inc), LOCK_PREFIX) 166#define xadd(ptr, inc) __xadd((ptr), (inc), LOCK_PREFIX)
209#define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ") 167#define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ")
210#define xadd_local(ptr, inc) __xadd((ptr), (inc), "") 168#define xadd_local(ptr, inc) __xadd((ptr), (inc), "")