aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-x86/system.h
diff options
context:
space:
mode:
authorGlauber de Oliveira Costa <gcosta@redhat.com>2008-01-30 07:31:08 -0500
committerIngo Molnar <mingo@elte.hu>2008-01-30 07:31:08 -0500
commit833d8469b102365f427f7791e79ec1843ff5f164 (patch)
tree7f3ac89dd2f445e86339166a66287be1a74253c1 /include/asm-x86/system.h
parent62fe164c5b036f4bdb19fbfb8f18a75631e67eee (diff)
x86: unify smp parts of system.h
The memory barrier parts of system.h are not very different between i386 and x86_64, the main difference being the availability of instructions, which we handle with the use of ifdefs. They are consolidated in system.h file, and then removed from the arch-specific headers. Signed-off-by: Glauber de Oliveira Costa <gcosta@redhat.com> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'include/asm-x86/system.h')
-rw-r--r--include/asm-x86/system.h105
1 files changed, 105 insertions, 0 deletions
diff --git a/include/asm-x86/system.h b/include/asm-x86/system.h
index 01ba1f8e64d1..4c15eb11a917 100644
--- a/include/asm-x86/system.h
+++ b/include/asm-x86/system.h
@@ -202,4 +202,109 @@ extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
202 202
203void default_idle(void); 203void default_idle(void);
204 204
205/*
206 * Force strict CPU ordering.
207 * And yes, this is required on UP too when we're talking
208 * to devices.
209 */
210#ifdef CONFIG_X86_32
211/*
212 * For now, "wmb()" doesn't actually do anything, as all
213 * Intel CPU's follow what Intel calls a *Processor Order*,
214 * in which all writes are seen in the program order even
215 * outside the CPU.
216 *
217 * I expect future Intel CPU's to have a weaker ordering,
218 * but I'd also expect them to finally get their act together
219 * and add some real memory barriers if so.
220 *
221 * Some non intel clones support out of order store. wmb() ceases to be a
222 * nop for these.
223 */
224#define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2)
225#define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2)
226#define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM)
227#else
228#define mb() asm volatile("mfence":::"memory")
229#define rmb() asm volatile("lfence":::"memory")
230#define wmb() asm volatile("sfence" ::: "memory")
231#endif
232
233/**
234 * read_barrier_depends - Flush all pending reads that subsequents reads
235 * depend on.
236 *
237 * No data-dependent reads from memory-like regions are ever reordered
238 * over this barrier. All reads preceding this primitive are guaranteed
239 * to access memory (but not necessarily other CPUs' caches) before any
240 * reads following this primitive that depend on the data return by
241 * any of the preceding reads. This primitive is much lighter weight than
242 * rmb() on most CPUs, and is never heavier weight than is
243 * rmb().
244 *
245 * These ordering constraints are respected by both the local CPU
246 * and the compiler.
247 *
248 * Ordering is not guaranteed by anything other than these primitives,
249 * not even by data dependencies. See the documentation for
250 * memory_barrier() for examples and URLs to more information.
251 *
252 * For example, the following code would force ordering (the initial
253 * value of "a" is zero, "b" is one, and "p" is "&a"):
254 *
255 * <programlisting>
256 * CPU 0 CPU 1
257 *
258 * b = 2;
259 * memory_barrier();
260 * p = &b; q = p;
261 * read_barrier_depends();
262 * d = *q;
263 * </programlisting>
264 *
265 * because the read of "*q" depends on the read of "p" and these
266 * two reads are separated by a read_barrier_depends(). However,
267 * the following code, with the same initial values for "a" and "b":
268 *
269 * <programlisting>
270 * CPU 0 CPU 1
271 *
272 * a = 2;
273 * memory_barrier();
274 * b = 3; y = b;
275 * read_barrier_depends();
276 * x = a;
277 * </programlisting>
278 *
279 * does not enforce ordering, since there is no data dependency between
280 * the read of "a" and the read of "b". Therefore, on some CPUs, such
281 * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
282 * in cases like this where there are no data dependencies.
283 **/
284
285#define read_barrier_depends() do { } while (0)
286
287#ifdef CONFIG_SMP
288#define smp_mb() mb()
289#ifdef CONFIG_X86_PPRO_FENCE
290# define smp_rmb() rmb()
291#else
292# define smp_rmb() barrier()
293#endif
294#ifdef CONFIG_X86_OOSTORE
295# define smp_wmb() wmb()
296#else
297# define smp_wmb() barrier()
298#endif
299#define smp_read_barrier_depends() read_barrier_depends()
300#define set_mb(var, value) do { (void) xchg(&var, value); } while (0)
301#else
302#define smp_mb() barrier()
303#define smp_rmb() barrier()
304#define smp_wmb() barrier()
305#define smp_read_barrier_depends() do { } while (0)
306#define set_mb(var, value) do { var = value; barrier(); } while (0)
307#endif
308
309
205#endif 310#endif