diff options
author | H. Peter Anvin <hpa@linux.intel.com> | 2012-11-28 14:50:24 -0500 |
---|---|---|
committer | H. Peter Anvin <hpa@linux.intel.com> | 2012-11-29 16:23:01 -0500 |
commit | d55c5a93db2d5fa95f233ab153f594365d95b777 (patch) | |
tree | f589ea96fe3fab9c935c0963dab7ec8f8a7745fb /arch/x86/include/asm/cmpxchg_32.h | |
parent | eb068e781020cf491333c773fb41820b57bfada4 (diff) |
x86, 386 removal: Remove CONFIG_CMPXCHG
All 486+ CPUs support CMPXCHG, so remove the fallback 386 support
code.
Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
Link: http://lkml.kernel.org/r/1354132230-21854-3-git-send-email-hpa@linux.intel.com
Diffstat (limited to 'arch/x86/include/asm/cmpxchg_32.h')
-rw-r--r-- | arch/x86/include/asm/cmpxchg_32.h | 55 |
1 files changed, 0 insertions, 55 deletions
diff --git a/arch/x86/include/asm/cmpxchg_32.h b/arch/x86/include/asm/cmpxchg_32.h index 53f4b219336b..f8bf2eecab86 100644 --- a/arch/x86/include/asm/cmpxchg_32.h +++ b/arch/x86/include/asm/cmpxchg_32.h | |||
@@ -34,9 +34,7 @@ static inline void set_64bit(volatile u64 *ptr, u64 value) | |||
34 | : "memory"); | 34 | : "memory"); |
35 | } | 35 | } |
36 | 36 | ||
37 | #ifdef CONFIG_X86_CMPXCHG | ||
38 | #define __HAVE_ARCH_CMPXCHG 1 | 37 | #define __HAVE_ARCH_CMPXCHG 1 |
39 | #endif | ||
40 | 38 | ||
41 | #ifdef CONFIG_X86_CMPXCHG64 | 39 | #ifdef CONFIG_X86_CMPXCHG64 |
42 | #define cmpxchg64(ptr, o, n) \ | 40 | #define cmpxchg64(ptr, o, n) \ |
@@ -73,59 +71,6 @@ static inline u64 __cmpxchg64_local(volatile u64 *ptr, u64 old, u64 new) | |||
73 | return prev; | 71 | return prev; |
74 | } | 72 | } |
75 | 73 | ||
76 | #ifndef CONFIG_X86_CMPXCHG | ||
77 | /* | ||
78 | * Building a kernel capable running on 80386. It may be necessary to | ||
79 | * simulate the cmpxchg on the 80386 CPU. For that purpose we define | ||
80 | * a function for each of the sizes we support. | ||
81 | */ | ||
82 | |||
83 | extern unsigned long cmpxchg_386_u8(volatile void *, u8, u8); | ||
84 | extern unsigned long cmpxchg_386_u16(volatile void *, u16, u16); | ||
85 | extern unsigned long cmpxchg_386_u32(volatile void *, u32, u32); | ||
86 | |||
87 | static inline unsigned long cmpxchg_386(volatile void *ptr, unsigned long old, | ||
88 | unsigned long new, int size) | ||
89 | { | ||
90 | switch (size) { | ||
91 | case 1: | ||
92 | return cmpxchg_386_u8(ptr, old, new); | ||
93 | case 2: | ||
94 | return cmpxchg_386_u16(ptr, old, new); | ||
95 | case 4: | ||
96 | return cmpxchg_386_u32(ptr, old, new); | ||
97 | } | ||
98 | return old; | ||
99 | } | ||
100 | |||
101 | #define cmpxchg(ptr, o, n) \ | ||
102 | ({ \ | ||
103 | __typeof__(*(ptr)) __ret; \ | ||
104 | if (likely(boot_cpu_data.x86 > 3)) \ | ||
105 | __ret = (__typeof__(*(ptr)))__cmpxchg((ptr), \ | ||
106 | (unsigned long)(o), (unsigned long)(n), \ | ||
107 | sizeof(*(ptr))); \ | ||
108 | else \ | ||
109 | __ret = (__typeof__(*(ptr)))cmpxchg_386((ptr), \ | ||
110 | (unsigned long)(o), (unsigned long)(n), \ | ||
111 | sizeof(*(ptr))); \ | ||
112 | __ret; \ | ||
113 | }) | ||
114 | #define cmpxchg_local(ptr, o, n) \ | ||
115 | ({ \ | ||
116 | __typeof__(*(ptr)) __ret; \ | ||
117 | if (likely(boot_cpu_data.x86 > 3)) \ | ||
118 | __ret = (__typeof__(*(ptr)))__cmpxchg_local((ptr), \ | ||
119 | (unsigned long)(o), (unsigned long)(n), \ | ||
120 | sizeof(*(ptr))); \ | ||
121 | else \ | ||
122 | __ret = (__typeof__(*(ptr)))cmpxchg_386((ptr), \ | ||
123 | (unsigned long)(o), (unsigned long)(n), \ | ||
124 | sizeof(*(ptr))); \ | ||
125 | __ret; \ | ||
126 | }) | ||
127 | #endif | ||
128 | |||
129 | #ifndef CONFIG_X86_CMPXCHG64 | 74 | #ifndef CONFIG_X86_CMPXCHG64 |
130 | /* | 75 | /* |
131 | * Building a kernel capable running on 80386 and 80486. It may be necessary | 76 | * Building a kernel capable running on 80386 and 80486. It may be necessary |