diff options
Diffstat (limited to 'include/asm-i386/cmpxchg.h')
-rw-r--r-- | include/asm-i386/cmpxchg.h | 293 |
1 files changed, 293 insertions, 0 deletions
diff --git a/include/asm-i386/cmpxchg.h b/include/asm-i386/cmpxchg.h new file mode 100644 index 000000000000..7adcef0cd53b --- /dev/null +++ b/include/asm-i386/cmpxchg.h | |||
@@ -0,0 +1,293 @@ | |||
1 | #ifndef __ASM_CMPXCHG_H | ||
2 | #define __ASM_CMPXCHG_H | ||
3 | |||
4 | #include <linux/bitops.h> /* for LOCK_PREFIX */ | ||
5 | |||
6 | #define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr)))) | ||
7 | |||
8 | struct __xchg_dummy { unsigned long a[100]; }; | ||
9 | #define __xg(x) ((struct __xchg_dummy *)(x)) | ||
10 | |||
11 | |||
12 | #ifdef CONFIG_X86_CMPXCHG64 | ||
13 | |||
14 | /* | ||
15 | * The semantics of XCHGCMP8B are a bit strange, this is why | ||
16 | * there is a loop and the loading of %%eax and %%edx has to | ||
17 | * be inside. This inlines well in most cases, the cached | ||
18 | * cost is around ~38 cycles. (in the future we might want | ||
19 | * to do an SIMD/3DNOW!/MMX/FPU 64-bit store here, but that | ||
20 | * might have an implicit FPU-save as a cost, so it's not | ||
21 | * clear which path to go.) | ||
22 | * | ||
23 | * cmpxchg8b must be used with the lock prefix here to allow | ||
24 | * the instruction to be executed atomically, see page 3-102 | ||
25 | * of the instruction set reference 24319102.pdf. We need | ||
26 | * the reader side to see the coherent 64bit value. | ||
27 | */ | ||
28 | static inline void __set_64bit (unsigned long long * ptr, | ||
29 | unsigned int low, unsigned int high) | ||
30 | { | ||
31 | __asm__ __volatile__ ( | ||
32 | "\n1:\t" | ||
33 | "movl (%0), %%eax\n\t" | ||
34 | "movl 4(%0), %%edx\n\t" | ||
35 | "lock cmpxchg8b (%0)\n\t" | ||
36 | "jnz 1b" | ||
37 | : /* no outputs */ | ||
38 | : "D"(ptr), | ||
39 | "b"(low), | ||
40 | "c"(high) | ||
41 | : "ax","dx","memory"); | ||
42 | } | ||
43 | |||
44 | static inline void __set_64bit_constant (unsigned long long *ptr, | ||
45 | unsigned long long value) | ||
46 | { | ||
47 | __set_64bit(ptr,(unsigned int)(value), (unsigned int)((value)>>32ULL)); | ||
48 | } | ||
49 | #define ll_low(x) *(((unsigned int*)&(x))+0) | ||
50 | #define ll_high(x) *(((unsigned int*)&(x))+1) | ||
51 | |||
52 | static inline void __set_64bit_var (unsigned long long *ptr, | ||
53 | unsigned long long value) | ||
54 | { | ||
55 | __set_64bit(ptr,ll_low(value), ll_high(value)); | ||
56 | } | ||
57 | |||
58 | #define set_64bit(ptr,value) \ | ||
59 | (__builtin_constant_p(value) ? \ | ||
60 | __set_64bit_constant(ptr, value) : \ | ||
61 | __set_64bit_var(ptr, value) ) | ||
62 | |||
63 | #define _set_64bit(ptr,value) \ | ||
64 | (__builtin_constant_p(value) ? \ | ||
65 | __set_64bit(ptr, (unsigned int)(value), (unsigned int)((value)>>32ULL) ) : \ | ||
66 | __set_64bit(ptr, ll_low(value), ll_high(value)) ) | ||
67 | |||
68 | #endif | ||
69 | |||
70 | /* | ||
71 | * Note: no "lock" prefix even on SMP: xchg always implies lock anyway | ||
72 | * Note 2: xchg has side effect, so that attribute volatile is necessary, | ||
73 | * but generally the primitive is invalid, *ptr is output argument. --ANK | ||
74 | */ | ||
75 | static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size) | ||
76 | { | ||
77 | switch (size) { | ||
78 | case 1: | ||
79 | __asm__ __volatile__("xchgb %b0,%1" | ||
80 | :"=q" (x) | ||
81 | :"m" (*__xg(ptr)), "0" (x) | ||
82 | :"memory"); | ||
83 | break; | ||
84 | case 2: | ||
85 | __asm__ __volatile__("xchgw %w0,%1" | ||
86 | :"=r" (x) | ||
87 | :"m" (*__xg(ptr)), "0" (x) | ||
88 | :"memory"); | ||
89 | break; | ||
90 | case 4: | ||
91 | __asm__ __volatile__("xchgl %0,%1" | ||
92 | :"=r" (x) | ||
93 | :"m" (*__xg(ptr)), "0" (x) | ||
94 | :"memory"); | ||
95 | break; | ||
96 | } | ||
97 | return x; | ||
98 | } | ||
99 | |||
100 | /* | ||
101 | * Atomic compare and exchange. Compare OLD with MEM, if identical, | ||
102 | * store NEW in MEM. Return the initial value in MEM. Success is | ||
103 | * indicated by comparing RETURN with OLD. | ||
104 | */ | ||
105 | |||
106 | #ifdef CONFIG_X86_CMPXCHG | ||
107 | #define __HAVE_ARCH_CMPXCHG 1 | ||
108 | #define cmpxchg(ptr,o,n)\ | ||
109 | ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\ | ||
110 | (unsigned long)(n),sizeof(*(ptr)))) | ||
111 | #define sync_cmpxchg(ptr,o,n)\ | ||
112 | ((__typeof__(*(ptr)))__sync_cmpxchg((ptr),(unsigned long)(o),\ | ||
113 | (unsigned long)(n),sizeof(*(ptr)))) | ||
114 | #define cmpxchg_local(ptr,o,n)\ | ||
115 | ((__typeof__(*(ptr)))__cmpxchg_local((ptr),(unsigned long)(o),\ | ||
116 | (unsigned long)(n),sizeof(*(ptr)))) | ||
117 | #endif | ||
118 | |||
119 | static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, | ||
120 | unsigned long new, int size) | ||
121 | { | ||
122 | unsigned long prev; | ||
123 | switch (size) { | ||
124 | case 1: | ||
125 | __asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2" | ||
126 | : "=a"(prev) | ||
127 | : "q"(new), "m"(*__xg(ptr)), "0"(old) | ||
128 | : "memory"); | ||
129 | return prev; | ||
130 | case 2: | ||
131 | __asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2" | ||
132 | : "=a"(prev) | ||
133 | : "r"(new), "m"(*__xg(ptr)), "0"(old) | ||
134 | : "memory"); | ||
135 | return prev; | ||
136 | case 4: | ||
137 | __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %1,%2" | ||
138 | : "=a"(prev) | ||
139 | : "r"(new), "m"(*__xg(ptr)), "0"(old) | ||
140 | : "memory"); | ||
141 | return prev; | ||
142 | } | ||
143 | return old; | ||
144 | } | ||
145 | |||
146 | /* | ||
147 | * Always use locked operations when touching memory shared with a | ||
148 | * hypervisor, since the system may be SMP even if the guest kernel | ||
149 | * isn't. | ||
150 | */ | ||
151 | static inline unsigned long __sync_cmpxchg(volatile void *ptr, | ||
152 | unsigned long old, | ||
153 | unsigned long new, int size) | ||
154 | { | ||
155 | unsigned long prev; | ||
156 | switch (size) { | ||
157 | case 1: | ||
158 | __asm__ __volatile__("lock; cmpxchgb %b1,%2" | ||
159 | : "=a"(prev) | ||
160 | : "q"(new), "m"(*__xg(ptr)), "0"(old) | ||
161 | : "memory"); | ||
162 | return prev; | ||
163 | case 2: | ||
164 | __asm__ __volatile__("lock; cmpxchgw %w1,%2" | ||
165 | : "=a"(prev) | ||
166 | : "r"(new), "m"(*__xg(ptr)), "0"(old) | ||
167 | : "memory"); | ||
168 | return prev; | ||
169 | case 4: | ||
170 | __asm__ __volatile__("lock; cmpxchgl %1,%2" | ||
171 | : "=a"(prev) | ||
172 | : "r"(new), "m"(*__xg(ptr)), "0"(old) | ||
173 | : "memory"); | ||
174 | return prev; | ||
175 | } | ||
176 | return old; | ||
177 | } | ||
178 | |||
179 | static inline unsigned long __cmpxchg_local(volatile void *ptr, | ||
180 | unsigned long old, unsigned long new, int size) | ||
181 | { | ||
182 | unsigned long prev; | ||
183 | switch (size) { | ||
184 | case 1: | ||
185 | __asm__ __volatile__("cmpxchgb %b1,%2" | ||
186 | : "=a"(prev) | ||
187 | : "q"(new), "m"(*__xg(ptr)), "0"(old) | ||
188 | : "memory"); | ||
189 | return prev; | ||
190 | case 2: | ||
191 | __asm__ __volatile__("cmpxchgw %w1,%2" | ||
192 | : "=a"(prev) | ||
193 | : "r"(new), "m"(*__xg(ptr)), "0"(old) | ||
194 | : "memory"); | ||
195 | return prev; | ||
196 | case 4: | ||
197 | __asm__ __volatile__("cmpxchgl %1,%2" | ||
198 | : "=a"(prev) | ||
199 | : "r"(new), "m"(*__xg(ptr)), "0"(old) | ||
200 | : "memory"); | ||
201 | return prev; | ||
202 | } | ||
203 | return old; | ||
204 | } | ||
205 | |||
206 | #ifndef CONFIG_X86_CMPXCHG | ||
207 | /* | ||
208 | * Building a kernel capable running on 80386. It may be necessary to | ||
209 | * simulate the cmpxchg on the 80386 CPU. For that purpose we define | ||
210 | * a function for each of the sizes we support. | ||
211 | */ | ||
212 | |||
213 | extern unsigned long cmpxchg_386_u8(volatile void *, u8, u8); | ||
214 | extern unsigned long cmpxchg_386_u16(volatile void *, u16, u16); | ||
215 | extern unsigned long cmpxchg_386_u32(volatile void *, u32, u32); | ||
216 | |||
217 | static inline unsigned long cmpxchg_386(volatile void *ptr, unsigned long old, | ||
218 | unsigned long new, int size) | ||
219 | { | ||
220 | switch (size) { | ||
221 | case 1: | ||
222 | return cmpxchg_386_u8(ptr, old, new); | ||
223 | case 2: | ||
224 | return cmpxchg_386_u16(ptr, old, new); | ||
225 | case 4: | ||
226 | return cmpxchg_386_u32(ptr, old, new); | ||
227 | } | ||
228 | return old; | ||
229 | } | ||
230 | |||
231 | #define cmpxchg(ptr,o,n) \ | ||
232 | ({ \ | ||
233 | __typeof__(*(ptr)) __ret; \ | ||
234 | if (likely(boot_cpu_data.x86 > 3)) \ | ||
235 | __ret = __cmpxchg((ptr), (unsigned long)(o), \ | ||
236 | (unsigned long)(n), sizeof(*(ptr))); \ | ||
237 | else \ | ||
238 | __ret = cmpxchg_386((ptr), (unsigned long)(o), \ | ||
239 | (unsigned long)(n), sizeof(*(ptr))); \ | ||
240 | __ret; \ | ||
241 | }) | ||
242 | #define cmpxchg_local(ptr,o,n) \ | ||
243 | ({ \ | ||
244 | __typeof__(*(ptr)) __ret; \ | ||
245 | if (likely(boot_cpu_data.x86 > 3)) \ | ||
246 | __ret = __cmpxchg_local((ptr), (unsigned long)(o), \ | ||
247 | (unsigned long)(n), sizeof(*(ptr))); \ | ||
248 | else \ | ||
249 | __ret = cmpxchg_386((ptr), (unsigned long)(o), \ | ||
250 | (unsigned long)(n), sizeof(*(ptr))); \ | ||
251 | __ret; \ | ||
252 | }) | ||
253 | #endif | ||
254 | |||
255 | #ifdef CONFIG_X86_CMPXCHG64 | ||
256 | |||
257 | static inline unsigned long long __cmpxchg64(volatile void *ptr, unsigned long long old, | ||
258 | unsigned long long new) | ||
259 | { | ||
260 | unsigned long long prev; | ||
261 | __asm__ __volatile__(LOCK_PREFIX "cmpxchg8b %3" | ||
262 | : "=A"(prev) | ||
263 | : "b"((unsigned long)new), | ||
264 | "c"((unsigned long)(new >> 32)), | ||
265 | "m"(*__xg(ptr)), | ||
266 | "0"(old) | ||
267 | : "memory"); | ||
268 | return prev; | ||
269 | } | ||
270 | |||
271 | static inline unsigned long long __cmpxchg64_local(volatile void *ptr, | ||
272 | unsigned long long old, unsigned long long new) | ||
273 | { | ||
274 | unsigned long long prev; | ||
275 | __asm__ __volatile__("cmpxchg8b %3" | ||
276 | : "=A"(prev) | ||
277 | : "b"((unsigned long)new), | ||
278 | "c"((unsigned long)(new >> 32)), | ||
279 | "m"(*__xg(ptr)), | ||
280 | "0"(old) | ||
281 | : "memory"); | ||
282 | return prev; | ||
283 | } | ||
284 | |||
285 | #define cmpxchg64(ptr,o,n)\ | ||
286 | ((__typeof__(*(ptr)))__cmpxchg64((ptr),(unsigned long long)(o),\ | ||
287 | (unsigned long long)(n))) | ||
288 | #define cmpxchg64_local(ptr,o,n)\ | ||
289 | ((__typeof__(*(ptr)))__cmpxchg64_local((ptr),(unsigned long long)(o),\ | ||
290 | (unsigned long long)(n))) | ||
291 | #endif | ||
292 | |||
293 | #endif | ||