aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/cpu/intel.c
diff options
context:
space:
mode:
authorThomas Petazzoni <thomas.petazzoni@free-electrons.com>2008-08-18 06:33:21 -0400
committerIngo Molnar <mingo@elte.hu>2008-08-18 10:05:47 -0400
commit774400a3ba23b63f4de39e67ce6c4e48935809dc (patch)
treec5386a1c3151d97971f48c75a3c861abbac1105f /arch/x86/kernel/cpu/intel.c
parent8bfcb3960fde049b863266dab8c3617bb5a541aa (diff)
x86: move cmpxchg fallbacks to a generic place
arch/x86/kernel/cpu/intel.c defines a few fallback functions (cmpxchg_*()) that are used when the CPU doesn't support cmpxchg and/or cmpxchg64 natively. However, while defined in an Intel-specific file, these functions are also used for CPUs from other vendors when they don't support cmpxchg and/or cmpxchg64. This breaks the compilation when support for Intel CPUs is disabled. This patch moves these functions to a new arch/x86/kernel/cpu/cmpxchg.c file, unconditionally compiled when X86_32 is enabled. Signed-off-by: Thomas Petazzoni <thomas.petazzoni@free-electrons.com> Cc: michael@free-electrons.com Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel/cpu/intel.c')
-rw-r--r--arch/x86/kernel/cpu/intel.c64
1 files changed, 0 insertions, 64 deletions
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index 5c8959b8a42e..77618c717d76 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -307,69 +307,5 @@ static struct cpu_dev intel_cpu_dev __cpuinitdata = {
307 307
308cpu_vendor_dev_register(X86_VENDOR_INTEL, &intel_cpu_dev); 308cpu_vendor_dev_register(X86_VENDOR_INTEL, &intel_cpu_dev);
309 309
310#ifndef CONFIG_X86_CMPXCHG
311unsigned long cmpxchg_386_u8(volatile void *ptr, u8 old, u8 new)
312{
313 u8 prev;
314 unsigned long flags;
315
316 /* Poor man's cmpxchg for 386. Unsuitable for SMP */
317 local_irq_save(flags);
318 prev = *(u8 *)ptr;
319 if (prev == old)
320 *(u8 *)ptr = new;
321 local_irq_restore(flags);
322 return prev;
323}
324EXPORT_SYMBOL(cmpxchg_386_u8);
325
326unsigned long cmpxchg_386_u16(volatile void *ptr, u16 old, u16 new)
327{
328 u16 prev;
329 unsigned long flags;
330
331 /* Poor man's cmpxchg for 386. Unsuitable for SMP */
332 local_irq_save(flags);
333 prev = *(u16 *)ptr;
334 if (prev == old)
335 *(u16 *)ptr = new;
336 local_irq_restore(flags);
337 return prev;
338}
339EXPORT_SYMBOL(cmpxchg_386_u16);
340
341unsigned long cmpxchg_386_u32(volatile void *ptr, u32 old, u32 new)
342{
343 u32 prev;
344 unsigned long flags;
345
346 /* Poor man's cmpxchg for 386. Unsuitable for SMP */
347 local_irq_save(flags);
348 prev = *(u32 *)ptr;
349 if (prev == old)
350 *(u32 *)ptr = new;
351 local_irq_restore(flags);
352 return prev;
353}
354EXPORT_SYMBOL(cmpxchg_386_u32);
355#endif
356
357#ifndef CONFIG_X86_CMPXCHG64
358unsigned long long cmpxchg_486_u64(volatile void *ptr, u64 old, u64 new)
359{
360 u64 prev;
361 unsigned long flags;
362
363 /* Poor man's cmpxchg8b for 386 and 486. Unsuitable for SMP */
364 local_irq_save(flags);
365 prev = *(u64 *)ptr;
366 if (prev == old)
367 *(u64 *)ptr = new;
368 local_irq_restore(flags);
369 return prev;
370}
371EXPORT_SYMBOL(cmpxchg_486_u64);
372#endif
373
374/* arch_initcall(intel_cpu_init); */ 310/* arch_initcall(intel_cpu_init); */
375 311