aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorGlauber de Oliveira Costa <gcosta@redhat.com>2008-01-30 07:31:08 -0500
committerIngo Molnar <mingo@elte.hu>2008-01-30 07:31:08 -0500
commit833d8469b102365f427f7791e79ec1843ff5f164 (patch)
tree7f3ac89dd2f445e86339166a66287be1a74253c1 /include
parent62fe164c5b036f4bdb19fbfb8f18a75631e67eee (diff)
x86: unify smp parts of system.h
The memory barrier parts of system.h are not very different between i386 and x86_64, the main difference being the availability of instructions, which we handle with the use of ifdefs. They are consolidated in system.h file, and then removed from the arch-specific headers. Signed-off-by: Glauber de Oliveira Costa <gcosta@redhat.com> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'include')
-rw-r--r--include/asm-x86/system.h105
-rw-r--r--include/asm-x86/system_32.h99
-rw-r--r--include/asm-x86/system_64.h25
3 files changed, 105 insertions, 124 deletions
diff --git a/include/asm-x86/system.h b/include/asm-x86/system.h
index 01ba1f8e64d..4c15eb11a91 100644
--- a/include/asm-x86/system.h
+++ b/include/asm-x86/system.h
@@ -202,4 +202,109 @@ extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
202 202
203void default_idle(void); 203void default_idle(void);
204 204
205/*
206 * Force strict CPU ordering.
207 * And yes, this is required on UP too when we're talking
208 * to devices.
209 */
210#ifdef CONFIG_X86_32
211/*
212 * For now, "wmb()" doesn't actually do anything, as all
213 * Intel CPU's follow what Intel calls a *Processor Order*,
214 * in which all writes are seen in the program order even
215 * outside the CPU.
216 *
217 * I expect future Intel CPU's to have a weaker ordering,
218 * but I'd also expect them to finally get their act together
219 * and add some real memory barriers if so.
220 *
221 * Some non intel clones support out of order store. wmb() ceases to be a
222 * nop for these.
223 */
224#define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2)
225#define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2)
226#define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM)
227#else
228#define mb() asm volatile("mfence":::"memory")
229#define rmb() asm volatile("lfence":::"memory")
230#define wmb() asm volatile("sfence" ::: "memory")
231#endif
232
233/**
234 * read_barrier_depends - Flush all pending reads that subsequents reads
235 * depend on.
236 *
237 * No data-dependent reads from memory-like regions are ever reordered
238 * over this barrier. All reads preceding this primitive are guaranteed
239 * to access memory (but not necessarily other CPUs' caches) before any
240 * reads following this primitive that depend on the data return by
241 * any of the preceding reads. This primitive is much lighter weight than
242 * rmb() on most CPUs, and is never heavier weight than is
243 * rmb().
244 *
245 * These ordering constraints are respected by both the local CPU
246 * and the compiler.
247 *
248 * Ordering is not guaranteed by anything other than these primitives,
249 * not even by data dependencies. See the documentation for
250 * memory_barrier() for examples and URLs to more information.
251 *
252 * For example, the following code would force ordering (the initial
253 * value of "a" is zero, "b" is one, and "p" is "&a"):
254 *
255 * <programlisting>
256 * CPU 0 CPU 1
257 *
258 * b = 2;
259 * memory_barrier();
260 * p = &b; q = p;
261 * read_barrier_depends();
262 * d = *q;
263 * </programlisting>
264 *
265 * because the read of "*q" depends on the read of "p" and these
266 * two reads are separated by a read_barrier_depends(). However,
267 * the following code, with the same initial values for "a" and "b":
268 *
269 * <programlisting>
270 * CPU 0 CPU 1
271 *
272 * a = 2;
273 * memory_barrier();
274 * b = 3; y = b;
275 * read_barrier_depends();
276 * x = a;
277 * </programlisting>
278 *
279 * does not enforce ordering, since there is no data dependency between
280 * the read of "a" and the read of "b". Therefore, on some CPUs, such
281 * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
282 * in cases like this where there are no data dependencies.
283 **/
284
285#define read_barrier_depends() do { } while (0)
286
287#ifdef CONFIG_SMP
288#define smp_mb() mb()
289#ifdef CONFIG_X86_PPRO_FENCE
290# define smp_rmb() rmb()
291#else
292# define smp_rmb() barrier()
293#endif
294#ifdef CONFIG_X86_OOSTORE
295# define smp_wmb() wmb()
296#else
297# define smp_wmb() barrier()
298#endif
299#define smp_read_barrier_depends() read_barrier_depends()
300#define set_mb(var, value) do { (void) xchg(&var, value); } while (0)
301#else
302#define smp_mb() barrier()
303#define smp_rmb() barrier()
304#define smp_wmb() barrier()
305#define smp_read_barrier_depends() do { } while (0)
306#define set_mb(var, value) do { var = value; barrier(); } while (0)
307#endif
308
309
205#endif 310#endif
diff --git a/include/asm-x86/system_32.h b/include/asm-x86/system_32.h
index c05568290ad..7da0716fb31 100644
--- a/include/asm-x86/system_32.h
+++ b/include/asm-x86/system_32.h
@@ -36,105 +36,6 @@ extern struct task_struct * FASTCALL(__switch_to(struct task_struct *prev, struc
36#endif /* __KERNEL__ */ 36#endif /* __KERNEL__ */
37 37
38 38
39/*
40 * Force strict CPU ordering.
41 * And yes, this is required on UP too when we're talking
42 * to devices.
43 *
44 * For now, "wmb()" doesn't actually do anything, as all
45 * Intel CPU's follow what Intel calls a *Processor Order*,
46 * in which all writes are seen in the program order even
47 * outside the CPU.
48 *
49 * I expect future Intel CPU's to have a weaker ordering,
50 * but I'd also expect them to finally get their act together
51 * and add some real memory barriers if so.
52 *
53 * Some non intel clones support out of order store. wmb() ceases to be a
54 * nop for these.
55 */
56
57
58#define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2)
59#define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2)
60#define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM)
61
62/**
63 * read_barrier_depends - Flush all pending reads that subsequents reads
64 * depend on.
65 *
66 * No data-dependent reads from memory-like regions are ever reordered
67 * over this barrier. All reads preceding this primitive are guaranteed
68 * to access memory (but not necessarily other CPUs' caches) before any
69 * reads following this primitive that depend on the data return by
70 * any of the preceding reads. This primitive is much lighter weight than
71 * rmb() on most CPUs, and is never heavier weight than is
72 * rmb().
73 *
74 * These ordering constraints are respected by both the local CPU
75 * and the compiler.
76 *
77 * Ordering is not guaranteed by anything other than these primitives,
78 * not even by data dependencies. See the documentation for
79 * memory_barrier() for examples and URLs to more information.
80 *
81 * For example, the following code would force ordering (the initial
82 * value of "a" is zero, "b" is one, and "p" is "&a"):
83 *
84 * <programlisting>
85 * CPU 0 CPU 1
86 *
87 * b = 2;
88 * memory_barrier();
89 * p = &b; q = p;
90 * read_barrier_depends();
91 * d = *q;
92 * </programlisting>
93 *
94 * because the read of "*q" depends on the read of "p" and these
95 * two reads are separated by a read_barrier_depends(). However,
96 * the following code, with the same initial values for "a" and "b":
97 *
98 * <programlisting>
99 * CPU 0 CPU 1
100 *
101 * a = 2;
102 * memory_barrier();
103 * b = 3; y = b;
104 * read_barrier_depends();
105 * x = a;
106 * </programlisting>
107 *
108 * does not enforce ordering, since there is no data dependency between
109 * the read of "a" and the read of "b". Therefore, on some CPUs, such
110 * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
111 * in cases like this where there are no data dependencies.
112 **/
113
114#define read_barrier_depends() do { } while(0)
115
116#ifdef CONFIG_SMP
117#define smp_mb() mb()
118#ifdef CONFIG_X86_PPRO_FENCE
119# define smp_rmb() rmb()
120#else
121# define smp_rmb() barrier()
122#endif
123#ifdef CONFIG_X86_OOSTORE
124# define smp_wmb() wmb()
125#else
126# define smp_wmb() barrier()
127#endif
128#define smp_read_barrier_depends() read_barrier_depends()
129#define set_mb(var, value) do { (void) xchg(&var, value); } while (0)
130#else
131#define smp_mb() barrier()
132#define smp_rmb() barrier()
133#define smp_wmb() barrier()
134#define smp_read_barrier_depends() do { } while(0)
135#define set_mb(var, value) do { var = value; barrier(); } while (0)
136#endif
137
138#include <linux/irqflags.h> 39#include <linux/irqflags.h>
139 40
140/* 41/*
diff --git a/include/asm-x86/system_64.h b/include/asm-x86/system_64.h
index 560470ea27c..9def35eb75e 100644
--- a/include/asm-x86/system_64.h
+++ b/include/asm-x86/system_64.h
@@ -48,31 +48,6 @@
48 48
49#endif /* __KERNEL__ */ 49#endif /* __KERNEL__ */
50 50
51#ifdef CONFIG_SMP
52#define smp_mb() mb()
53#define smp_rmb() barrier()
54#define smp_wmb() barrier()
55#define smp_read_barrier_depends() do {} while(0)
56#else
57#define smp_mb() barrier()
58#define smp_rmb() barrier()
59#define smp_wmb() barrier()
60#define smp_read_barrier_depends() do {} while(0)
61#endif
62
63
64/*
65 * Force strict CPU ordering.
66 * And yes, this is required on UP too when we're talking
67 * to devices.
68 */
69#define mb() asm volatile("mfence":::"memory")
70#define rmb() asm volatile("lfence":::"memory")
71#define wmb() asm volatile("sfence" ::: "memory")
72
73#define read_barrier_depends() do {} while(0)
74#define set_mb(var, value) do { (void) xchg(&var, value); } while (0)
75
76static inline unsigned long read_cr8(void) 51static inline unsigned long read_cr8(void)
77{ 52{
78 unsigned long cr8; 53 unsigned long cr8;