aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/include/asm/cmpxchg.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/include/asm/cmpxchg.h')
-rw-r--r--arch/x86/include/asm/cmpxchg.h163
1 files changed, 93 insertions, 70 deletions
diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
index 5d3acdf5a7a6..0c9fa2745f13 100644
--- a/arch/x86/include/asm/cmpxchg.h
+++ b/arch/x86/include/asm/cmpxchg.h
@@ -14,6 +14,8 @@ extern void __cmpxchg_wrong_size(void)
14 __compiletime_error("Bad argument size for cmpxchg"); 14 __compiletime_error("Bad argument size for cmpxchg");
15extern void __xadd_wrong_size(void) 15extern void __xadd_wrong_size(void)
16 __compiletime_error("Bad argument size for xadd"); 16 __compiletime_error("Bad argument size for xadd");
17extern void __add_wrong_size(void)
18 __compiletime_error("Bad argument size for add");
17 19
18/* 20/*
19 * Constants for operation sizes. On 32-bit, the 64-bit size it set to 21 * Constants for operation sizes. On 32-bit, the 64-bit size it set to
@@ -31,60 +33,47 @@ extern void __xadd_wrong_size(void)
31#define __X86_CASE_Q -1 /* sizeof will never return -1 */ 33#define __X86_CASE_Q -1 /* sizeof will never return -1 */
32#endif 34#endif
33 35
36/*
37 * An exchange-type operation, which takes a value and a pointer, and
38 * returns a the old value.
39 */
40#define __xchg_op(ptr, arg, op, lock) \
41 ({ \
42 __typeof__ (*(ptr)) __ret = (arg); \
43 switch (sizeof(*(ptr))) { \
44 case __X86_CASE_B: \
45 asm volatile (lock #op "b %b0, %1\n" \
46 : "+r" (__ret), "+m" (*(ptr)) \
47 : : "memory", "cc"); \
48 break; \
49 case __X86_CASE_W: \
50 asm volatile (lock #op "w %w0, %1\n" \
51 : "+r" (__ret), "+m" (*(ptr)) \
52 : : "memory", "cc"); \
53 break; \
54 case __X86_CASE_L: \
55 asm volatile (lock #op "l %0, %1\n" \
56 : "+r" (__ret), "+m" (*(ptr)) \
57 : : "memory", "cc"); \
58 break; \
59 case __X86_CASE_Q: \
60 asm volatile (lock #op "q %q0, %1\n" \
61 : "+r" (__ret), "+m" (*(ptr)) \
62 : : "memory", "cc"); \
63 break; \
64 default: \
65 __ ## op ## _wrong_size(); \
66 } \
67 __ret; \
68 })
69
34/* 70/*
35 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway. 71 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway.
36 * Since this is generally used to protect other memory information, we 72 * Since this is generally used to protect other memory information, we
37 * use "asm volatile" and "memory" clobbers to prevent gcc from moving 73 * use "asm volatile" and "memory" clobbers to prevent gcc from moving
38 * information around. 74 * information around.
39 */ 75 */
40#define __xchg(x, ptr, size) \ 76#define xchg(ptr, v) __xchg_op((ptr), (v), xchg, "")
41({ \
42 __typeof(*(ptr)) __x = (x); \
43 switch (size) { \
44 case __X86_CASE_B: \
45 { \
46 volatile u8 *__ptr = (volatile u8 *)(ptr); \
47 asm volatile("xchgb %0,%1" \
48 : "=q" (__x), "+m" (*__ptr) \
49 : "0" (__x) \
50 : "memory"); \
51 break; \
52 } \
53 case __X86_CASE_W: \
54 { \
55 volatile u16 *__ptr = (volatile u16 *)(ptr); \
56 asm volatile("xchgw %0,%1" \
57 : "=r" (__x), "+m" (*__ptr) \
58 : "0" (__x) \
59 : "memory"); \
60 break; \
61 } \
62 case __X86_CASE_L: \
63 { \
64 volatile u32 *__ptr = (volatile u32 *)(ptr); \
65 asm volatile("xchgl %0,%1" \
66 : "=r" (__x), "+m" (*__ptr) \
67 : "0" (__x) \
68 : "memory"); \
69 break; \
70 } \
71 case __X86_CASE_Q: \
72 { \
73 volatile u64 *__ptr = (volatile u64 *)(ptr); \
74 asm volatile("xchgq %0,%1" \
75 : "=r" (__x), "+m" (*__ptr) \
76 : "0" (__x) \
77 : "memory"); \
78 break; \
79 } \
80 default: \
81 __xchg_wrong_size(); \
82 } \
83 __x; \
84})
85
86#define xchg(ptr, v) \
87 __xchg((v), (ptr), sizeof(*ptr))
88 77
89/* 78/*
90 * Atomic compare and exchange. Compare OLD with MEM, if identical, 79 * Atomic compare and exchange. Compare OLD with MEM, if identical,
@@ -165,46 +154,80 @@ extern void __xadd_wrong_size(void)
165 __cmpxchg_local((ptr), (old), (new), sizeof(*ptr)) 154 __cmpxchg_local((ptr), (old), (new), sizeof(*ptr))
166#endif 155#endif
167 156
168#define __xadd(ptr, inc, lock) \ 157/*
158 * xadd() adds "inc" to "*ptr" and atomically returns the previous
159 * value of "*ptr".
160 *
161 * xadd() is locked when multiple CPUs are online
162 * xadd_sync() is always locked
163 * xadd_local() is never locked
164 */
165#define __xadd(ptr, inc, lock) __xchg_op((ptr), (inc), xadd, lock)
166#define xadd(ptr, inc) __xadd((ptr), (inc), LOCK_PREFIX)
167#define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ")
168#define xadd_local(ptr, inc) __xadd((ptr), (inc), "")
169
170#define __add(ptr, inc, lock) \
169 ({ \ 171 ({ \
170 __typeof__ (*(ptr)) __ret = (inc); \ 172 __typeof__ (*(ptr)) __ret = (inc); \
171 switch (sizeof(*(ptr))) { \ 173 switch (sizeof(*(ptr))) { \
172 case __X86_CASE_B: \ 174 case __X86_CASE_B: \
173 asm volatile (lock "xaddb %b0, %1\n" \ 175 asm volatile (lock "addb %b1, %0\n" \
174 : "+r" (__ret), "+m" (*(ptr)) \ 176 : "+m" (*(ptr)) : "ri" (inc) \
175 : : "memory", "cc"); \ 177 : "memory", "cc"); \
176 break; \ 178 break; \
177 case __X86_CASE_W: \ 179 case __X86_CASE_W: \
178 asm volatile (lock "xaddw %w0, %1\n" \ 180 asm volatile (lock "addw %w1, %0\n" \
179 : "+r" (__ret), "+m" (*(ptr)) \ 181 : "+m" (*(ptr)) : "ri" (inc) \
180 : : "memory", "cc"); \ 182 : "memory", "cc"); \
181 break; \ 183 break; \
182 case __X86_CASE_L: \ 184 case __X86_CASE_L: \
183 asm volatile (lock "xaddl %0, %1\n" \ 185 asm volatile (lock "addl %1, %0\n" \
184 : "+r" (__ret), "+m" (*(ptr)) \ 186 : "+m" (*(ptr)) : "ri" (inc) \
185 : : "memory", "cc"); \ 187 : "memory", "cc"); \
186 break; \ 188 break; \
187 case __X86_CASE_Q: \ 189 case __X86_CASE_Q: \
188 asm volatile (lock "xaddq %q0, %1\n" \ 190 asm volatile (lock "addq %1, %0\n" \
189 : "+r" (__ret), "+m" (*(ptr)) \ 191 : "+m" (*(ptr)) : "ri" (inc) \
190 : : "memory", "cc"); \ 192 : "memory", "cc"); \
191 break; \ 193 break; \
192 default: \ 194 default: \
193 __xadd_wrong_size(); \ 195 __add_wrong_size(); \
194 } \ 196 } \
195 __ret; \ 197 __ret; \
196 }) 198 })
197 199
198/* 200/*
199 * xadd() adds "inc" to "*ptr" and atomically returns the previous 201 * add_*() adds "inc" to "*ptr"
200 * value of "*ptr".
201 * 202 *
202 * xadd() is locked when multiple CPUs are online 203 * __add() takes a lock prefix
203 * xadd_sync() is always locked 204 * add_smp() is locked when multiple CPUs are online
204 * xadd_local() is never locked 205 * add_sync() is always locked
205 */ 206 */
206#define xadd(ptr, inc) __xadd((ptr), (inc), LOCK_PREFIX) 207#define add_smp(ptr, inc) __add((ptr), (inc), LOCK_PREFIX)
207#define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ") 208#define add_sync(ptr, inc) __add((ptr), (inc), "lock; ")
208#define xadd_local(ptr, inc) __xadd((ptr), (inc), "") 209
210#define __cmpxchg_double(pfx, p1, p2, o1, o2, n1, n2) \
211({ \
212 bool __ret; \
213 __typeof__(*(p1)) __old1 = (o1), __new1 = (n1); \
214 __typeof__(*(p2)) __old2 = (o2), __new2 = (n2); \
215 BUILD_BUG_ON(sizeof(*(p1)) != sizeof(long)); \
216 BUILD_BUG_ON(sizeof(*(p2)) != sizeof(long)); \
217 VM_BUG_ON((unsigned long)(p1) % (2 * sizeof(long))); \
218 VM_BUG_ON((unsigned long)((p1) + 1) != (unsigned long)(p2)); \
219 asm volatile(pfx "cmpxchg%c4b %2; sete %0" \
220 : "=a" (__ret), "+d" (__old2), \
221 "+m" (*(p1)), "+m" (*(p2)) \
222 : "i" (2 * sizeof(long)), "a" (__old1), \
223 "b" (__new1), "c" (__new2)); \
224 __ret; \
225})
226
227#define cmpxchg_double(p1, p2, o1, o2, n1, n2) \
228 __cmpxchg_double(LOCK_PREFIX, p1, p2, o1, o2, n1, n2)
229
230#define cmpxchg_double_local(p1, p2, o1, o2, n1, n2) \
231 __cmpxchg_double(, p1, p2, o1, o2, n1, n2)
209 232
210#endif /* ASM_X86_CMPXCHG_H */ 233#endif /* ASM_X86_CMPXCHG_H */