aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-x86/system_32.h
diff options
context:
space:
mode:
authorGlauber de Oliveira Costa <gcosta@redhat.com>2008-01-30 07:31:08 -0500
committerIngo Molnar <mingo@elte.hu>2008-01-30 07:31:08 -0500
commit833d8469b102365f427f7791e79ec1843ff5f164 (patch)
tree7f3ac89dd2f445e86339166a66287be1a74253c1 /include/asm-x86/system_32.h
parent62fe164c5b036f4bdb19fbfb8f18a75631e67eee (diff)
x86: unify smp parts of system.h
The memory barrier parts of system.h are not very different between i386 and x86_64, the main difference being the availability of instructions, which we handle with the use of ifdefs. They are consolidated in system.h file, and then removed from the arch-specific headers. Signed-off-by: Glauber de Oliveira Costa <gcosta@redhat.com> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'include/asm-x86/system_32.h')
-rw-r--r--include/asm-x86/system_32.h99
1 files changed, 0 insertions, 99 deletions
diff --git a/include/asm-x86/system_32.h b/include/asm-x86/system_32.h
index c05568290add..7da0716fb317 100644
--- a/include/asm-x86/system_32.h
+++ b/include/asm-x86/system_32.h
@@ -36,105 +36,6 @@ extern struct task_struct * FASTCALL(__switch_to(struct task_struct *prev, struc
36#endif /* __KERNEL__ */ 36#endif /* __KERNEL__ */
37 37
38 38
39/*
40 * Force strict CPU ordering.
41 * And yes, this is required on UP too when we're talking
42 * to devices.
43 *
44 * For now, "wmb()" doesn't actually do anything, as all
45 * Intel CPU's follow what Intel calls a *Processor Order*,
46 * in which all writes are seen in the program order even
47 * outside the CPU.
48 *
49 * I expect future Intel CPU's to have a weaker ordering,
50 * but I'd also expect them to finally get their act together
51 * and add some real memory barriers if so.
52 *
53 * Some non intel clones support out of order store. wmb() ceases to be a
54 * nop for these.
55 */
56
57
58#define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2)
59#define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2)
60#define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM)
61
62/**
63 * read_barrier_depends - Flush all pending reads that subsequents reads
64 * depend on.
65 *
66 * No data-dependent reads from memory-like regions are ever reordered
67 * over this barrier. All reads preceding this primitive are guaranteed
68 * to access memory (but not necessarily other CPUs' caches) before any
69 * reads following this primitive that depend on the data return by
70 * any of the preceding reads. This primitive is much lighter weight than
71 * rmb() on most CPUs, and is never heavier weight than is
72 * rmb().
73 *
74 * These ordering constraints are respected by both the local CPU
75 * and the compiler.
76 *
77 * Ordering is not guaranteed by anything other than these primitives,
78 * not even by data dependencies. See the documentation for
79 * memory_barrier() for examples and URLs to more information.
80 *
81 * For example, the following code would force ordering (the initial
82 * value of "a" is zero, "b" is one, and "p" is "&a"):
83 *
84 * <programlisting>
85 * CPU 0 CPU 1
86 *
87 * b = 2;
88 * memory_barrier();
89 * p = &b; q = p;
90 * read_barrier_depends();
91 * d = *q;
92 * </programlisting>
93 *
94 * because the read of "*q" depends on the read of "p" and these
95 * two reads are separated by a read_barrier_depends(). However,
96 * the following code, with the same initial values for "a" and "b":
97 *
98 * <programlisting>
99 * CPU 0 CPU 1
100 *
101 * a = 2;
102 * memory_barrier();
103 * b = 3; y = b;
104 * read_barrier_depends();
105 * x = a;
106 * </programlisting>
107 *
108 * does not enforce ordering, since there is no data dependency between
109 * the read of "a" and the read of "b". Therefore, on some CPUs, such
110 * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
111 * in cases like this where there are no data dependencies.
112 **/
113
114#define read_barrier_depends() do { } while(0)
115
116#ifdef CONFIG_SMP
117#define smp_mb() mb()
118#ifdef CONFIG_X86_PPRO_FENCE
119# define smp_rmb() rmb()
120#else
121# define smp_rmb() barrier()
122#endif
123#ifdef CONFIG_X86_OOSTORE
124# define smp_wmb() wmb()
125#else
126# define smp_wmb() barrier()
127#endif
128#define smp_read_barrier_depends() read_barrier_depends()
129#define set_mb(var, value) do { (void) xchg(&var, value); } while (0)
130#else
131#define smp_mb() barrier()
132#define smp_rmb() barrier()
133#define smp_wmb() barrier()
134#define smp_read_barrier_depends() do { } while(0)
135#define set_mb(var, value) do { var = value; barrier(); } while (0)
136#endif
137
138#include <linux/irqflags.h> 39#include <linux/irqflags.h>
139 40
140/* 41/*