aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-i386
diff options
context:
space:
mode:
authorJeff Dike <jdike@addtoit.com>2007-05-08 03:35:02 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-05-08 14:15:20 -0400
commita436ed9c5106b41606cbb55ab3b28389fe8ae04f (patch)
treeb8df0bde6d7eb2808c37da815d8857396ee1eaf1 /include/asm-i386
parent5dc12ddee93d63d7107cbbf70db23476d7b30e43 (diff)
x86: create asm/cmpxchg.h
i386: Rearrange the cmpxchg code to allow atomic.h to get it without needing to include system.h. This kills warnings in the UML build from atomic.h about implicit declarations of cmpxchg symbols. The i386 build presumably isn't seeing this because a separate inclusion of system.h is covering it over. The cmpxchg stuff is moved to asm-i386/cmpxchg.h, with an include left in system.h for the benefit of generic code which expects cmpxchg there. Meanwhile, atomic.h includes cmpxchg.h. This causes no noticable damage to the i386 build. x86_64: Move cmpxchg into its own header. atomic.h already included system.h, so this is changed to include cmpxchg.h. This is purely cleanup - it's not fixing any warnings - so if the x86_64 system.h isn't considered as cleanup-worthy as i386, then this can be dropped. It causes no noticable damage to the x86_64 build. uml: The i386 and x86_64 cmpxchg patches require an asm-um/cmpxchg.h for the UML build. Signed-off-by: Jeff Dike <jdike@linux.intel.com> Cc: Paolo 'Blaisorblade' Giarrusso <blaisorblade@yahoo.it> Cc: Andi Kleen <ak@suse.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include/asm-i386')
-rw-r--r--include/asm-i386/atomic.h1
-rw-r--r--include/asm-i386/cmpxchg.h293
-rw-r--r--include/asm-i386/system.h289
3 files changed, 295 insertions, 288 deletions
diff --git a/include/asm-i386/atomic.h b/include/asm-i386/atomic.h
index ff90c6e3fcb4..b9fd03cef51e 100644
--- a/include/asm-i386/atomic.h
+++ b/include/asm-i386/atomic.h
@@ -3,6 +3,7 @@
3 3
4#include <linux/compiler.h> 4#include <linux/compiler.h>
5#include <asm/processor.h> 5#include <asm/processor.h>
6#include <asm/cmpxchg.h>
6 7
7/* 8/*
8 * Atomic operations that C can't guarantee us. Useful for 9 * Atomic operations that C can't guarantee us. Useful for
diff --git a/include/asm-i386/cmpxchg.h b/include/asm-i386/cmpxchg.h
new file mode 100644
index 000000000000..7adcef0cd53b
--- /dev/null
+++ b/include/asm-i386/cmpxchg.h
@@ -0,0 +1,293 @@
1#ifndef __ASM_CMPXCHG_H
2#define __ASM_CMPXCHG_H
3
4#include <linux/bitops.h> /* for LOCK_PREFIX */
5
6#define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
7
8struct __xchg_dummy { unsigned long a[100]; };
9#define __xg(x) ((struct __xchg_dummy *)(x))
10
11
12#ifdef CONFIG_X86_CMPXCHG64
13
14/*
15 * The semantics of XCHGCMP8B are a bit strange, this is why
16 * there is a loop and the loading of %%eax and %%edx has to
17 * be inside. This inlines well in most cases, the cached
18 * cost is around ~38 cycles. (in the future we might want
19 * to do an SIMD/3DNOW!/MMX/FPU 64-bit store here, but that
20 * might have an implicit FPU-save as a cost, so it's not
21 * clear which path to go.)
22 *
23 * cmpxchg8b must be used with the lock prefix here to allow
24 * the instruction to be executed atomically, see page 3-102
25 * of the instruction set reference 24319102.pdf. We need
26 * the reader side to see the coherent 64bit value.
27 */
28static inline void __set_64bit (unsigned long long * ptr,
29 unsigned int low, unsigned int high)
30{
31 __asm__ __volatile__ (
32 "\n1:\t"
33 "movl (%0), %%eax\n\t"
34 "movl 4(%0), %%edx\n\t"
35 "lock cmpxchg8b (%0)\n\t"
36 "jnz 1b"
37 : /* no outputs */
38 : "D"(ptr),
39 "b"(low),
40 "c"(high)
41 : "ax","dx","memory");
42}
43
44static inline void __set_64bit_constant (unsigned long long *ptr,
45 unsigned long long value)
46{
47 __set_64bit(ptr,(unsigned int)(value), (unsigned int)((value)>>32ULL));
48}
49#define ll_low(x) *(((unsigned int*)&(x))+0)
50#define ll_high(x) *(((unsigned int*)&(x))+1)
51
52static inline void __set_64bit_var (unsigned long long *ptr,
53 unsigned long long value)
54{
55 __set_64bit(ptr,ll_low(value), ll_high(value));
56}
57
58#define set_64bit(ptr,value) \
59(__builtin_constant_p(value) ? \
60 __set_64bit_constant(ptr, value) : \
61 __set_64bit_var(ptr, value) )
62
63#define _set_64bit(ptr,value) \
64(__builtin_constant_p(value) ? \
65 __set_64bit(ptr, (unsigned int)(value), (unsigned int)((value)>>32ULL) ) : \
66 __set_64bit(ptr, ll_low(value), ll_high(value)) )
67
68#endif
69
70/*
71 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
72 * Note 2: xchg has side effect, so that attribute volatile is necessary,
73 * but generally the primitive is invalid, *ptr is output argument. --ANK
74 */
75static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
76{
77 switch (size) {
78 case 1:
79 __asm__ __volatile__("xchgb %b0,%1"
80 :"=q" (x)
81 :"m" (*__xg(ptr)), "0" (x)
82 :"memory");
83 break;
84 case 2:
85 __asm__ __volatile__("xchgw %w0,%1"
86 :"=r" (x)
87 :"m" (*__xg(ptr)), "0" (x)
88 :"memory");
89 break;
90 case 4:
91 __asm__ __volatile__("xchgl %0,%1"
92 :"=r" (x)
93 :"m" (*__xg(ptr)), "0" (x)
94 :"memory");
95 break;
96 }
97 return x;
98}
99
100/*
101 * Atomic compare and exchange. Compare OLD with MEM, if identical,
102 * store NEW in MEM. Return the initial value in MEM. Success is
103 * indicated by comparing RETURN with OLD.
104 */
105
106#ifdef CONFIG_X86_CMPXCHG
107#define __HAVE_ARCH_CMPXCHG 1
108#define cmpxchg(ptr,o,n)\
109 ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
110 (unsigned long)(n),sizeof(*(ptr))))
111#define sync_cmpxchg(ptr,o,n)\
112 ((__typeof__(*(ptr)))__sync_cmpxchg((ptr),(unsigned long)(o),\
113 (unsigned long)(n),sizeof(*(ptr))))
114#define cmpxchg_local(ptr,o,n)\
115 ((__typeof__(*(ptr)))__cmpxchg_local((ptr),(unsigned long)(o),\
116 (unsigned long)(n),sizeof(*(ptr))))
117#endif
118
119static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
120 unsigned long new, int size)
121{
122 unsigned long prev;
123 switch (size) {
124 case 1:
125 __asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2"
126 : "=a"(prev)
127 : "q"(new), "m"(*__xg(ptr)), "0"(old)
128 : "memory");
129 return prev;
130 case 2:
131 __asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2"
132 : "=a"(prev)
133 : "r"(new), "m"(*__xg(ptr)), "0"(old)
134 : "memory");
135 return prev;
136 case 4:
137 __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %1,%2"
138 : "=a"(prev)
139 : "r"(new), "m"(*__xg(ptr)), "0"(old)
140 : "memory");
141 return prev;
142 }
143 return old;
144}
145
146/*
147 * Always use locked operations when touching memory shared with a
148 * hypervisor, since the system may be SMP even if the guest kernel
149 * isn't.
150 */
151static inline unsigned long __sync_cmpxchg(volatile void *ptr,
152 unsigned long old,
153 unsigned long new, int size)
154{
155 unsigned long prev;
156 switch (size) {
157 case 1:
158 __asm__ __volatile__("lock; cmpxchgb %b1,%2"
159 : "=a"(prev)
160 : "q"(new), "m"(*__xg(ptr)), "0"(old)
161 : "memory");
162 return prev;
163 case 2:
164 __asm__ __volatile__("lock; cmpxchgw %w1,%2"
165 : "=a"(prev)
166 : "r"(new), "m"(*__xg(ptr)), "0"(old)
167 : "memory");
168 return prev;
169 case 4:
170 __asm__ __volatile__("lock; cmpxchgl %1,%2"
171 : "=a"(prev)
172 : "r"(new), "m"(*__xg(ptr)), "0"(old)
173 : "memory");
174 return prev;
175 }
176 return old;
177}
178
179static inline unsigned long __cmpxchg_local(volatile void *ptr,
180 unsigned long old, unsigned long new, int size)
181{
182 unsigned long prev;
183 switch (size) {
184 case 1:
185 __asm__ __volatile__("cmpxchgb %b1,%2"
186 : "=a"(prev)
187 : "q"(new), "m"(*__xg(ptr)), "0"(old)
188 : "memory");
189 return prev;
190 case 2:
191 __asm__ __volatile__("cmpxchgw %w1,%2"
192 : "=a"(prev)
193 : "r"(new), "m"(*__xg(ptr)), "0"(old)
194 : "memory");
195 return prev;
196 case 4:
197 __asm__ __volatile__("cmpxchgl %1,%2"
198 : "=a"(prev)
199 : "r"(new), "m"(*__xg(ptr)), "0"(old)
200 : "memory");
201 return prev;
202 }
203 return old;
204}
205
206#ifndef CONFIG_X86_CMPXCHG
207/*
208 * Building a kernel capable running on 80386. It may be necessary to
209 * simulate the cmpxchg on the 80386 CPU. For that purpose we define
210 * a function for each of the sizes we support.
211 */
212
213extern unsigned long cmpxchg_386_u8(volatile void *, u8, u8);
214extern unsigned long cmpxchg_386_u16(volatile void *, u16, u16);
215extern unsigned long cmpxchg_386_u32(volatile void *, u32, u32);
216
217static inline unsigned long cmpxchg_386(volatile void *ptr, unsigned long old,
218 unsigned long new, int size)
219{
220 switch (size) {
221 case 1:
222 return cmpxchg_386_u8(ptr, old, new);
223 case 2:
224 return cmpxchg_386_u16(ptr, old, new);
225 case 4:
226 return cmpxchg_386_u32(ptr, old, new);
227 }
228 return old;
229}
230
231#define cmpxchg(ptr,o,n) \
232({ \
233 __typeof__(*(ptr)) __ret; \
234 if (likely(boot_cpu_data.x86 > 3)) \
235 __ret = __cmpxchg((ptr), (unsigned long)(o), \
236 (unsigned long)(n), sizeof(*(ptr))); \
237 else \
238 __ret = cmpxchg_386((ptr), (unsigned long)(o), \
239 (unsigned long)(n), sizeof(*(ptr))); \
240 __ret; \
241})
242#define cmpxchg_local(ptr,o,n) \
243({ \
244 __typeof__(*(ptr)) __ret; \
245 if (likely(boot_cpu_data.x86 > 3)) \
246 __ret = __cmpxchg_local((ptr), (unsigned long)(o), \
247 (unsigned long)(n), sizeof(*(ptr))); \
248 else \
249 __ret = cmpxchg_386((ptr), (unsigned long)(o), \
250 (unsigned long)(n), sizeof(*(ptr))); \
251 __ret; \
252})
253#endif
254
255#ifdef CONFIG_X86_CMPXCHG64
256
257static inline unsigned long long __cmpxchg64(volatile void *ptr, unsigned long long old,
258 unsigned long long new)
259{
260 unsigned long long prev;
261 __asm__ __volatile__(LOCK_PREFIX "cmpxchg8b %3"
262 : "=A"(prev)
263 : "b"((unsigned long)new),
264 "c"((unsigned long)(new >> 32)),
265 "m"(*__xg(ptr)),
266 "0"(old)
267 : "memory");
268 return prev;
269}
270
271static inline unsigned long long __cmpxchg64_local(volatile void *ptr,
272 unsigned long long old, unsigned long long new)
273{
274 unsigned long long prev;
275 __asm__ __volatile__("cmpxchg8b %3"
276 : "=A"(prev)
277 : "b"((unsigned long)new),
278 "c"((unsigned long)(new >> 32)),
279 "m"(*__xg(ptr)),
280 "0"(old)
281 : "memory");
282 return prev;
283}
284
285#define cmpxchg64(ptr,o,n)\
286 ((__typeof__(*(ptr)))__cmpxchg64((ptr),(unsigned long long)(o),\
287 (unsigned long long)(n)))
288#define cmpxchg64_local(ptr,o,n)\
289 ((__typeof__(*(ptr)))__cmpxchg64_local((ptr),(unsigned long long)(o),\
290 (unsigned long long)(n)))
291#endif
292
293#endif
diff --git a/include/asm-i386/system.h b/include/asm-i386/system.h
index a4ed087ac0ae..94ed3686a5f3 100644
--- a/include/asm-i386/system.h
+++ b/include/asm-i386/system.h
@@ -4,7 +4,7 @@
4#include <linux/kernel.h> 4#include <linux/kernel.h>
5#include <asm/segment.h> 5#include <asm/segment.h>
6#include <asm/cpufeature.h> 6#include <asm/cpufeature.h>
7#include <linux/bitops.h> /* for LOCK_PREFIX */ 7#include <asm/cmpxchg.h>
8 8
9#ifdef __KERNEL__ 9#ifdef __KERNEL__
10 10
@@ -195,293 +195,6 @@ static inline unsigned long get_limit(unsigned long segment)
195 195
196#define nop() __asm__ __volatile__ ("nop") 196#define nop() __asm__ __volatile__ ("nop")
197 197
198#define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
199
200struct __xchg_dummy { unsigned long a[100]; };
201#define __xg(x) ((struct __xchg_dummy *)(x))
202
203
204#ifdef CONFIG_X86_CMPXCHG64
205
206/*
207 * The semantics of XCHGCMP8B are a bit strange, this is why
208 * there is a loop and the loading of %%eax and %%edx has to
209 * be inside. This inlines well in most cases, the cached
210 * cost is around ~38 cycles. (in the future we might want
211 * to do an SIMD/3DNOW!/MMX/FPU 64-bit store here, but that
212 * might have an implicit FPU-save as a cost, so it's not
213 * clear which path to go.)
214 *
215 * cmpxchg8b must be used with the lock prefix here to allow
216 * the instruction to be executed atomically, see page 3-102
217 * of the instruction set reference 24319102.pdf. We need
218 * the reader side to see the coherent 64bit value.
219 */
220static inline void __set_64bit (unsigned long long * ptr,
221 unsigned int low, unsigned int high)
222{
223 __asm__ __volatile__ (
224 "\n1:\t"
225 "movl (%0), %%eax\n\t"
226 "movl 4(%0), %%edx\n\t"
227 "lock cmpxchg8b (%0)\n\t"
228 "jnz 1b"
229 : /* no outputs */
230 : "D"(ptr),
231 "b"(low),
232 "c"(high)
233 : "ax","dx","memory");
234}
235
236static inline void __set_64bit_constant (unsigned long long *ptr,
237 unsigned long long value)
238{
239 __set_64bit(ptr,(unsigned int)(value), (unsigned int)((value)>>32ULL));
240}
241#define ll_low(x) *(((unsigned int*)&(x))+0)
242#define ll_high(x) *(((unsigned int*)&(x))+1)
243
244static inline void __set_64bit_var (unsigned long long *ptr,
245 unsigned long long value)
246{
247 __set_64bit(ptr,ll_low(value), ll_high(value));
248}
249
250#define set_64bit(ptr,value) \
251(__builtin_constant_p(value) ? \
252 __set_64bit_constant(ptr, value) : \
253 __set_64bit_var(ptr, value) )
254
255#define _set_64bit(ptr,value) \
256(__builtin_constant_p(value) ? \
257 __set_64bit(ptr, (unsigned int)(value), (unsigned int)((value)>>32ULL) ) : \
258 __set_64bit(ptr, ll_low(value), ll_high(value)) )
259
260#endif
261
262/*
263 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
264 * Note 2: xchg has side effect, so that attribute volatile is necessary,
265 * but generally the primitive is invalid, *ptr is output argument. --ANK
266 */
267static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
268{
269 switch (size) {
270 case 1:
271 __asm__ __volatile__("xchgb %b0,%1"
272 :"=q" (x)
273 :"m" (*__xg(ptr)), "0" (x)
274 :"memory");
275 break;
276 case 2:
277 __asm__ __volatile__("xchgw %w0,%1"
278 :"=r" (x)
279 :"m" (*__xg(ptr)), "0" (x)
280 :"memory");
281 break;
282 case 4:
283 __asm__ __volatile__("xchgl %0,%1"
284 :"=r" (x)
285 :"m" (*__xg(ptr)), "0" (x)
286 :"memory");
287 break;
288 }
289 return x;
290}
291
292/*
293 * Atomic compare and exchange. Compare OLD with MEM, if identical,
294 * store NEW in MEM. Return the initial value in MEM. Success is
295 * indicated by comparing RETURN with OLD.
296 */
297
298#ifdef CONFIG_X86_CMPXCHG
299#define __HAVE_ARCH_CMPXCHG 1
300#define cmpxchg(ptr,o,n)\
301 ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
302 (unsigned long)(n),sizeof(*(ptr))))
303#define sync_cmpxchg(ptr,o,n)\
304 ((__typeof__(*(ptr)))__sync_cmpxchg((ptr),(unsigned long)(o),\
305 (unsigned long)(n),sizeof(*(ptr))))
306#define cmpxchg_local(ptr,o,n)\
307 ((__typeof__(*(ptr)))__cmpxchg_local((ptr),(unsigned long)(o),\
308 (unsigned long)(n),sizeof(*(ptr))))
309#endif
310
311static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
312 unsigned long new, int size)
313{
314 unsigned long prev;
315 switch (size) {
316 case 1:
317 __asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2"
318 : "=a"(prev)
319 : "q"(new), "m"(*__xg(ptr)), "0"(old)
320 : "memory");
321 return prev;
322 case 2:
323 __asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2"
324 : "=a"(prev)
325 : "r"(new), "m"(*__xg(ptr)), "0"(old)
326 : "memory");
327 return prev;
328 case 4:
329 __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %1,%2"
330 : "=a"(prev)
331 : "r"(new), "m"(*__xg(ptr)), "0"(old)
332 : "memory");
333 return prev;
334 }
335 return old;
336}
337
338/*
339 * Always use locked operations when touching memory shared with a
340 * hypervisor, since the system may be SMP even if the guest kernel
341 * isn't.
342 */
343static inline unsigned long __sync_cmpxchg(volatile void *ptr,
344 unsigned long old,
345 unsigned long new, int size)
346{
347 unsigned long prev;
348 switch (size) {
349 case 1:
350 __asm__ __volatile__("lock; cmpxchgb %b1,%2"
351 : "=a"(prev)
352 : "q"(new), "m"(*__xg(ptr)), "0"(old)
353 : "memory");
354 return prev;
355 case 2:
356 __asm__ __volatile__("lock; cmpxchgw %w1,%2"
357 : "=a"(prev)
358 : "r"(new), "m"(*__xg(ptr)), "0"(old)
359 : "memory");
360 return prev;
361 case 4:
362 __asm__ __volatile__("lock; cmpxchgl %1,%2"
363 : "=a"(prev)
364 : "r"(new), "m"(*__xg(ptr)), "0"(old)
365 : "memory");
366 return prev;
367 }
368 return old;
369}
370
371static inline unsigned long __cmpxchg_local(volatile void *ptr,
372 unsigned long old, unsigned long new, int size)
373{
374 unsigned long prev;
375 switch (size) {
376 case 1:
377 __asm__ __volatile__("cmpxchgb %b1,%2"
378 : "=a"(prev)
379 : "q"(new), "m"(*__xg(ptr)), "0"(old)
380 : "memory");
381 return prev;
382 case 2:
383 __asm__ __volatile__("cmpxchgw %w1,%2"
384 : "=a"(prev)
385 : "r"(new), "m"(*__xg(ptr)), "0"(old)
386 : "memory");
387 return prev;
388 case 4:
389 __asm__ __volatile__("cmpxchgl %1,%2"
390 : "=a"(prev)
391 : "r"(new), "m"(*__xg(ptr)), "0"(old)
392 : "memory");
393 return prev;
394 }
395 return old;
396}
397
398#ifndef CONFIG_X86_CMPXCHG
399/*
400 * Building a kernel capable running on 80386. It may be necessary to
401 * simulate the cmpxchg on the 80386 CPU. For that purpose we define
402 * a function for each of the sizes we support.
403 */
404
405extern unsigned long cmpxchg_386_u8(volatile void *, u8, u8);
406extern unsigned long cmpxchg_386_u16(volatile void *, u16, u16);
407extern unsigned long cmpxchg_386_u32(volatile void *, u32, u32);
408
409static inline unsigned long cmpxchg_386(volatile void *ptr, unsigned long old,
410 unsigned long new, int size)
411{
412 switch (size) {
413 case 1:
414 return cmpxchg_386_u8(ptr, old, new);
415 case 2:
416 return cmpxchg_386_u16(ptr, old, new);
417 case 4:
418 return cmpxchg_386_u32(ptr, old, new);
419 }
420 return old;
421}
422
423#define cmpxchg(ptr,o,n) \
424({ \
425 __typeof__(*(ptr)) __ret; \
426 if (likely(boot_cpu_data.x86 > 3)) \
427 __ret = __cmpxchg((ptr), (unsigned long)(o), \
428 (unsigned long)(n), sizeof(*(ptr))); \
429 else \
430 __ret = cmpxchg_386((ptr), (unsigned long)(o), \
431 (unsigned long)(n), sizeof(*(ptr))); \
432 __ret; \
433})
434#define cmpxchg_local(ptr,o,n) \
435({ \
436 __typeof__(*(ptr)) __ret; \
437 if (likely(boot_cpu_data.x86 > 3)) \
438 __ret = __cmpxchg_local((ptr), (unsigned long)(o), \
439 (unsigned long)(n), sizeof(*(ptr))); \
440 else \
441 __ret = cmpxchg_386((ptr), (unsigned long)(o), \
442 (unsigned long)(n), sizeof(*(ptr))); \
443 __ret; \
444})
445#endif
446
447#ifdef CONFIG_X86_CMPXCHG64
448
449static inline unsigned long long __cmpxchg64(volatile void *ptr, unsigned long long old,
450 unsigned long long new)
451{
452 unsigned long long prev;
453 __asm__ __volatile__(LOCK_PREFIX "cmpxchg8b %3"
454 : "=A"(prev)
455 : "b"((unsigned long)new),
456 "c"((unsigned long)(new >> 32)),
457 "m"(*__xg(ptr)),
458 "0"(old)
459 : "memory");
460 return prev;
461}
462
463static inline unsigned long long __cmpxchg64_local(volatile void *ptr,
464 unsigned long long old, unsigned long long new)
465{
466 unsigned long long prev;
467 __asm__ __volatile__("cmpxchg8b %3"
468 : "=A"(prev)
469 : "b"((unsigned long)new),
470 "c"((unsigned long)(new >> 32)),
471 "m"(*__xg(ptr)),
472 "0"(old)
473 : "memory");
474 return prev;
475}
476
477#define cmpxchg64(ptr,o,n)\
478 ((__typeof__(*(ptr)))__cmpxchg64((ptr),(unsigned long long)(o),\
479 (unsigned long long)(n)))
480#define cmpxchg64_local(ptr,o,n)\
481 ((__typeof__(*(ptr)))__cmpxchg64_local((ptr),(unsigned long long)(o),\
482 (unsigned long long)(n)))
483#endif
484
485/* 198/*
486 * Force strict CPU ordering. 199 * Force strict CPU ordering.
487 * And yes, this is required on UP too when we're talking 200 * And yes, this is required on UP too when we're talking