aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-i386/system.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-i386/system.h')
-rw-r--r--include/asm-i386/system.h73
1 files changed, 68 insertions, 5 deletions
diff --git a/include/asm-i386/system.h b/include/asm-i386/system.h
index acd5c26b69ba..772f85da1206 100644
--- a/include/asm-i386/system.h
+++ b/include/asm-i386/system.h
@@ -167,6 +167,8 @@ struct __xchg_dummy { unsigned long a[100]; };
167#define __xg(x) ((struct __xchg_dummy *)(x)) 167#define __xg(x) ((struct __xchg_dummy *)(x))
168 168
169 169
170#ifdef CONFIG_X86_CMPXCHG64
171
170/* 172/*
171 * The semantics of XCHGCMP8B are a bit strange, this is why 173 * The semantics of XCHGCMP8B are a bit strange, this is why
172 * there is a loop and the loading of %%eax and %%edx has to 174 * there is a loop and the loading of %%eax and %%edx has to
@@ -221,6 +223,8 @@ static inline void __set_64bit_var (unsigned long long *ptr,
221 __set_64bit(ptr, (unsigned int)(value), (unsigned int)((value)>>32ULL) ) : \ 223 __set_64bit(ptr, (unsigned int)(value), (unsigned int)((value)>>32ULL) ) : \
222 __set_64bit(ptr, ll_low(value), ll_high(value)) ) 224 __set_64bit(ptr, ll_low(value), ll_high(value)) )
223 225
226#endif
227
224/* 228/*
225 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway 229 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
226 * Note 2: xchg has side effect, so that attribute volatile is necessary, 230 * Note 2: xchg has side effect, so that attribute volatile is necessary,
@@ -259,6 +263,9 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz
259 263
260#ifdef CONFIG_X86_CMPXCHG 264#ifdef CONFIG_X86_CMPXCHG
261#define __HAVE_ARCH_CMPXCHG 1 265#define __HAVE_ARCH_CMPXCHG 1
266#define cmpxchg(ptr,o,n)\
267 ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
268 (unsigned long)(n),sizeof(*(ptr))))
262#endif 269#endif
263 270
264static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, 271static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
@@ -275,22 +282,78 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
275 case 2: 282 case 2:
276 __asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2" 283 __asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2"
277 : "=a"(prev) 284 : "=a"(prev)
278 : "q"(new), "m"(*__xg(ptr)), "0"(old) 285 : "r"(new), "m"(*__xg(ptr)), "0"(old)
279 : "memory"); 286 : "memory");
280 return prev; 287 return prev;
281 case 4: 288 case 4:
282 __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %1,%2" 289 __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %1,%2"
283 : "=a"(prev) 290 : "=a"(prev)
284 : "q"(new), "m"(*__xg(ptr)), "0"(old) 291 : "r"(new), "m"(*__xg(ptr)), "0"(old)
285 : "memory"); 292 : "memory");
286 return prev; 293 return prev;
287 } 294 }
288 return old; 295 return old;
289} 296}
290 297
291#define cmpxchg(ptr,o,n)\ 298#ifndef CONFIG_X86_CMPXCHG
292 ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\ 299/*
293 (unsigned long)(n),sizeof(*(ptr)))) 300 * Building a kernel capable running on 80386. It may be necessary to
301 * simulate the cmpxchg on the 80386 CPU. For that purpose we define
302 * a function for each of the sizes we support.
303 */
304
305extern unsigned long cmpxchg_386_u8(volatile void *, u8, u8);
306extern unsigned long cmpxchg_386_u16(volatile void *, u16, u16);
307extern unsigned long cmpxchg_386_u32(volatile void *, u32, u32);
308
309static inline unsigned long cmpxchg_386(volatile void *ptr, unsigned long old,
310 unsigned long new, int size)
311{
312 switch (size) {
313 case 1:
314 return cmpxchg_386_u8(ptr, old, new);
315 case 2:
316 return cmpxchg_386_u16(ptr, old, new);
317 case 4:
318 return cmpxchg_386_u32(ptr, old, new);
319 }
320 return old;
321}
322
323#define cmpxchg(ptr,o,n) \
324({ \
325 __typeof__(*(ptr)) __ret; \
326 if (likely(boot_cpu_data.x86 > 3)) \
327 __ret = __cmpxchg((ptr), (unsigned long)(o), \
328 (unsigned long)(n), sizeof(*(ptr))); \
329 else \
330 __ret = cmpxchg_386((ptr), (unsigned long)(o), \
331 (unsigned long)(n), sizeof(*(ptr))); \
332 __ret; \
333})
334#endif
335
336#ifdef CONFIG_X86_CMPXCHG64
337
338static inline unsigned long long __cmpxchg64(volatile void *ptr, unsigned long long old,
339 unsigned long long new)
340{
341 unsigned long long prev;
342 __asm__ __volatile__(LOCK_PREFIX "cmpxchg8b %3"
343 : "=A"(prev)
344 : "b"((unsigned long)new),
345 "c"((unsigned long)(new >> 32)),
346 "m"(*__xg(ptr)),
347 "0"(old)
348 : "memory");
349 return prev;
350}
351
352#define cmpxchg64(ptr,o,n)\
353 ((__typeof__(*(ptr)))__cmpxchg64((ptr),(unsigned long long)(o),\
354 (unsigned long long)(n)))
355
356#endif
294 357
295#ifdef __KERNEL__ 358#ifdef __KERNEL__
296struct alt_instr { 359struct alt_instr {