diff options
author | Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca> | 2008-01-30 07:30:47 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-01-30 07:30:47 -0500 |
commit | 2c0b8a7578f7653e1e5312a5232e8ead563cf477 (patch) | |
tree | bc220bff551e39f9faff17e807d828094f420607 /include/asm-x86/cmpxchg_32.h | |
parent | 5f627f8e122a163ce53908d55e088247db31f1d7 (diff) |
x86: fall back on interrupt disable in cmpxchg8b on 80386 and 80486
Actually, on 386, cmpxchg and cmpxchg_local fall back on
cmpxchg_386_u8/16/32: it disables interruptions around non atomic
updates to mimic the cmpxchg behavior.
The comment:
/* Poor man's cmpxchg for 386. Unsuitable for SMP */
already present in cmpxchg_386_u32 tells much about how this cmpxchg
implementation should not be used in a SMP context. However, the cmpxchg_local
can perfectly use this fallback, since it only needs to be atomic wrt the local
cpu.
This patch adds a cmpxchg_486_u64 and uses it as a fallback for cmpxchg64
and cmpxchg64_local on 80386 and 80486.
Q:
but why is it called cmpxchg_486 when the other functions are called
A:
Because the standard cmpxchg is missing only on 386, but cmpxchg8b is
missing both on 386 and 486.
Citing Intel's Instruction set reference:
cmpxchg:
This instruction is not supported on Intel processors earlier than the
Intel486 processors.
cmpxchg8b:
This instruction encoding is not supported on Intel processors earlier
than the Pentium processors.
Q:
What's the reason to have cmpxchg64_local on 32 bit architectures?
Without that need all this would just be a few simple defines.
A:
cmpxchg64_local on 32 bits architectures takes unsigned long long
parameters, but cmpxchg_local only takes longs. Since we have cmpxchg8b
to execute a 8 byte cmpxchg atomically on pentium and +, it makes sense
to provide a flavor of cmpxchg and cmpxchg_local using this instruction.
Also, for 32 bits architectures lacking the 64 bits atomic cmpxchg, it
makes sense _not_ to define cmpxchg64 while cmpxchg could still be
available.
Moreover, the fallback for cmpxchg8b on i386 for 386 and 486 is a
However, cmpxchg64_local will be emulated by disabling interrupts on all
architectures where it is not supported atomically.
Therefore, we *could* turn cmpxchg64_local into a cmpxchg_local, but it
would make the 386/486 fallbacks ugly, make its design different from
cmpxchg/cmpxchg64 (which really depends on atomic operations and cannot
be emulated) and require the __cmpxchg_local to be expressed as a macro
rather than an inline function so the parameters would not be fixed to
unsigned long long in every case.
So I think cmpxchg64_local makes sense there, but I am open to
suggestions.
Q:
Are there any callers?
A:
I am actually using it in LTTng in my timestamping code. I use it to
work around CPUs with asynchronous TSCs. I need to update 64 bits
values atomically on this 32 bits architecture.
Changelog:
- Ran though checkpatch.
Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
Cc: Andi Kleen <ak@suse.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'include/asm-x86/cmpxchg_32.h')
-rw-r--r-- | include/asm-x86/cmpxchg_32.h | 122 |
1 files changed, 79 insertions, 43 deletions
diff --git a/include/asm-x86/cmpxchg_32.h b/include/asm-x86/cmpxchg_32.h index f86ede28f6dc..cea1dae288a7 100644 --- a/include/asm-x86/cmpxchg_32.h +++ b/include/asm-x86/cmpxchg_32.h | |||
@@ -105,15 +105,24 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz | |||
105 | 105 | ||
106 | #ifdef CONFIG_X86_CMPXCHG | 106 | #ifdef CONFIG_X86_CMPXCHG |
107 | #define __HAVE_ARCH_CMPXCHG 1 | 107 | #define __HAVE_ARCH_CMPXCHG 1 |
108 | #define cmpxchg(ptr,o,n)\ | 108 | #define cmpxchg(ptr, o, n) \ |
109 | ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\ | 109 | ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \ |
110 | (unsigned long)(n),sizeof(*(ptr)))) | 110 | (unsigned long)(n), sizeof(*(ptr)))) |
111 | #define sync_cmpxchg(ptr,o,n)\ | 111 | #define sync_cmpxchg(ptr, o, n) \ |
112 | ((__typeof__(*(ptr)))__sync_cmpxchg((ptr),(unsigned long)(o),\ | 112 | ((__typeof__(*(ptr)))__sync_cmpxchg((ptr), (unsigned long)(o), \ |
113 | (unsigned long)(n),sizeof(*(ptr)))) | 113 | (unsigned long)(n), sizeof(*(ptr)))) |
114 | #define cmpxchg_local(ptr,o,n)\ | 114 | #define cmpxchg_local(ptr, o, n) \ |
115 | ((__typeof__(*(ptr)))__cmpxchg_local((ptr),(unsigned long)(o),\ | 115 | ((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \ |
116 | (unsigned long)(n),sizeof(*(ptr)))) | 116 | (unsigned long)(n), sizeof(*(ptr)))) |
117 | #endif | ||
118 | |||
119 | #ifdef CONFIG_X86_CMPXCHG64 | ||
120 | #define cmpxchg64(ptr, o, n) \ | ||
121 | ((__typeof__(*(ptr)))__cmpxchg64((ptr), (unsigned long long)(o), \ | ||
122 | (unsigned long long)(n))) | ||
123 | #define cmpxchg64_local(ptr, o, n) \ | ||
124 | ((__typeof__(*(ptr)))__cmpxchg64_local((ptr), (unsigned long long)(o),\ | ||
125 | (unsigned long long)(n))) | ||
117 | #endif | 126 | #endif |
118 | 127 | ||
119 | static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, | 128 | static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, |
@@ -203,6 +212,34 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr, | |||
203 | return old; | 212 | return old; |
204 | } | 213 | } |
205 | 214 | ||
215 | static inline unsigned long long __cmpxchg64(volatile void *ptr, | ||
216 | unsigned long long old, unsigned long long new) | ||
217 | { | ||
218 | unsigned long long prev; | ||
219 | __asm__ __volatile__(LOCK_PREFIX "cmpxchg8b %3" | ||
220 | : "=A"(prev) | ||
221 | : "b"((unsigned long)new), | ||
222 | "c"((unsigned long)(new >> 32)), | ||
223 | "m"(*__xg(ptr)), | ||
224 | "0"(old) | ||
225 | : "memory"); | ||
226 | return prev; | ||
227 | } | ||
228 | |||
229 | static inline unsigned long long __cmpxchg64_local(volatile void *ptr, | ||
230 | unsigned long long old, unsigned long long new) | ||
231 | { | ||
232 | unsigned long long prev; | ||
233 | __asm__ __volatile__("cmpxchg8b %3" | ||
234 | : "=A"(prev) | ||
235 | : "b"((unsigned long)new), | ||
236 | "c"((unsigned long)(new >> 32)), | ||
237 | "m"(*__xg(ptr)), | ||
238 | "0"(old) | ||
239 | : "memory"); | ||
240 | return prev; | ||
241 | } | ||
242 | |||
206 | #ifndef CONFIG_X86_CMPXCHG | 243 | #ifndef CONFIG_X86_CMPXCHG |
207 | /* | 244 | /* |
208 | * Building a kernel capable running on 80386. It may be necessary to | 245 | * Building a kernel capable running on 80386. It may be necessary to |
@@ -228,7 +265,7 @@ static inline unsigned long cmpxchg_386(volatile void *ptr, unsigned long old, | |||
228 | return old; | 265 | return old; |
229 | } | 266 | } |
230 | 267 | ||
231 | #define cmpxchg(ptr,o,n) \ | 268 | #define cmpxchg(ptr, o, n) \ |
232 | ({ \ | 269 | ({ \ |
233 | __typeof__(*(ptr)) __ret; \ | 270 | __typeof__(*(ptr)) __ret; \ |
234 | if (likely(boot_cpu_data.x86 > 3)) \ | 271 | if (likely(boot_cpu_data.x86 > 3)) \ |
@@ -239,7 +276,7 @@ static inline unsigned long cmpxchg_386(volatile void *ptr, unsigned long old, | |||
239 | (unsigned long)(n), sizeof(*(ptr))); \ | 276 | (unsigned long)(n), sizeof(*(ptr))); \ |
240 | __ret; \ | 277 | __ret; \ |
241 | }) | 278 | }) |
242 | #define cmpxchg_local(ptr,o,n) \ | 279 | #define cmpxchg_local(ptr, o, n) \ |
243 | ({ \ | 280 | ({ \ |
244 | __typeof__(*(ptr)) __ret; \ | 281 | __typeof__(*(ptr)) __ret; \ |
245 | if (likely(boot_cpu_data.x86 > 3)) \ | 282 | if (likely(boot_cpu_data.x86 > 3)) \ |
@@ -252,38 +289,37 @@ static inline unsigned long cmpxchg_386(volatile void *ptr, unsigned long old, | |||
252 | }) | 289 | }) |
253 | #endif | 290 | #endif |
254 | 291 | ||
255 | static inline unsigned long long __cmpxchg64(volatile void *ptr, unsigned long long old, | 292 | #ifndef CONFIG_X86_CMPXCHG64 |
256 | unsigned long long new) | 293 | /* |
257 | { | 294 | * Building a kernel capable running on 80386 and 80486. It may be necessary |
258 | unsigned long long prev; | 295 | * to simulate the cmpxchg8b on the 80386 and 80486 CPU. |
259 | __asm__ __volatile__(LOCK_PREFIX "cmpxchg8b %3" | 296 | */ |
260 | : "=A"(prev) | ||
261 | : "b"((unsigned long)new), | ||
262 | "c"((unsigned long)(new >> 32)), | ||
263 | "m"(*__xg(ptr)), | ||
264 | "0"(old) | ||
265 | : "memory"); | ||
266 | return prev; | ||
267 | } | ||
268 | 297 | ||
269 | static inline unsigned long long __cmpxchg64_local(volatile void *ptr, | 298 | extern unsigned long long cmpxchg_486_u64(volatile void *, u64, u64); |
270 | unsigned long long old, unsigned long long new) | 299 | |
271 | { | 300 | #define cmpxchg64(ptr, o, n) \ |
272 | unsigned long long prev; | 301 | ({ \ |
273 | __asm__ __volatile__("cmpxchg8b %3" | 302 | __typeof__(*(ptr)) __ret; \ |
274 | : "=A"(prev) | 303 | if (likely(boot_cpu_data.x86 > 4)) \ |
275 | : "b"((unsigned long)new), | 304 | __ret = __cmpxchg64((ptr), (unsigned long long)(o), \ |
276 | "c"((unsigned long)(new >> 32)), | 305 | (unsigned long long)(n)); \ |
277 | "m"(*__xg(ptr)), | 306 | else \ |
278 | "0"(old) | 307 | __ret = cmpxchg_486_u64((ptr), (unsigned long long)(o), \ |
279 | : "memory"); | 308 | (unsigned long long)(n)); \ |
280 | return prev; | 309 | __ret; \ |
281 | } | 310 | }) |
311 | #define cmpxchg64_local(ptr, o, n) \ | ||
312 | ({ \ | ||
313 | __typeof__(*(ptr)) __ret; \ | ||
314 | if (likely(boot_cpu_data.x86 > 4)) \ | ||
315 | __ret = __cmpxchg64_local((ptr), (unsigned long long)(o), \ | ||
316 | (unsigned long long)(n)); \ | ||
317 | else \ | ||
318 | __ret = cmpxchg_486_u64((ptr), (unsigned long long)(o), \ | ||
319 | (unsigned long long)(n)); \ | ||
320 | __ret; \ | ||
321 | }) | ||
322 | |||
323 | #endif | ||
282 | 324 | ||
283 | #define cmpxchg64(ptr,o,n)\ | ||
284 | ((__typeof__(*(ptr)))__cmpxchg64((ptr),(unsigned long long)(o),\ | ||
285 | (unsigned long long)(n))) | ||
286 | #define cmpxchg64_local(ptr,o,n)\ | ||
287 | ((__typeof__(*(ptr)))__cmpxchg64_local((ptr),(unsigned long long)(o),\ | ||
288 | (unsigned long long)(n))) | ||
289 | #endif | 325 | #endif |