aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/alpha/include/asm/barrier.h51
-rw-r--r--arch/blackfin/include/asm/barrier.h51
-rw-r--r--arch/ia64/include/asm/barrier.h22
-rw-r--r--arch/metag/include/asm/barrier.h7
-rw-r--r--arch/mips/include/asm/barrier.h52
-rw-r--r--arch/powerpc/include/asm/barrier.h6
-rw-r--r--arch/s390/include/asm/barrier.h5
-rw-r--r--arch/sparc/include/asm/barrier_64.h4
-rw-r--r--arch/x86/include/asm/barrier.h59
-rw-r--r--arch/x86/um/asm/barrier.h7
10 files changed, 129 insertions, 135 deletions
diff --git a/arch/alpha/include/asm/barrier.h b/arch/alpha/include/asm/barrier.h
index 3832bdb794fe..77516c87255d 100644
--- a/arch/alpha/include/asm/barrier.h
+++ b/arch/alpha/include/asm/barrier.h
@@ -7,6 +7,57 @@
7#define rmb() __asm__ __volatile__("mb": : :"memory") 7#define rmb() __asm__ __volatile__("mb": : :"memory")
8#define wmb() __asm__ __volatile__("wmb": : :"memory") 8#define wmb() __asm__ __volatile__("wmb": : :"memory")
9 9
10/**
11 * read_barrier_depends - Flush all pending reads that subsequents reads
12 * depend on.
13 *
14 * No data-dependent reads from memory-like regions are ever reordered
15 * over this barrier. All reads preceding this primitive are guaranteed
16 * to access memory (but not necessarily other CPUs' caches) before any
17 * reads following this primitive that depend on the data return by
18 * any of the preceding reads. This primitive is much lighter weight than
19 * rmb() on most CPUs, and is never heavier weight than is
20 * rmb().
21 *
22 * These ordering constraints are respected by both the local CPU
23 * and the compiler.
24 *
25 * Ordering is not guaranteed by anything other than these primitives,
26 * not even by data dependencies. See the documentation for
27 * memory_barrier() for examples and URLs to more information.
28 *
29 * For example, the following code would force ordering (the initial
30 * value of "a" is zero, "b" is one, and "p" is "&a"):
31 *
32 * <programlisting>
33 * CPU 0 CPU 1
34 *
35 * b = 2;
36 * memory_barrier();
37 * p = &b; q = p;
38 * read_barrier_depends();
39 * d = *q;
40 * </programlisting>
41 *
42 * because the read of "*q" depends on the read of "p" and these
43 * two reads are separated by a read_barrier_depends(). However,
44 * the following code, with the same initial values for "a" and "b":
45 *
46 * <programlisting>
47 * CPU 0 CPU 1
48 *
49 * a = 2;
50 * memory_barrier();
51 * b = 3; y = b;
52 * read_barrier_depends();
53 * x = a;
54 * </programlisting>
55 *
56 * does not enforce ordering, since there is no data dependency between
57 * the read of "a" and the read of "b". Therefore, on some CPUs, such
58 * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
59 * in cases like this where there are no data dependencies.
60 */
10#define read_barrier_depends() __asm__ __volatile__("mb": : :"memory") 61#define read_barrier_depends() __asm__ __volatile__("mb": : :"memory")
11 62
12#ifdef CONFIG_SMP 63#ifdef CONFIG_SMP
diff --git a/arch/blackfin/include/asm/barrier.h b/arch/blackfin/include/asm/barrier.h
index 420006877998..dfb66fe88b34 100644
--- a/arch/blackfin/include/asm/barrier.h
+++ b/arch/blackfin/include/asm/barrier.h
@@ -22,6 +22,57 @@
22# define mb() do { barrier(); smp_check_barrier(); smp_mark_barrier(); } while (0) 22# define mb() do { barrier(); smp_check_barrier(); smp_mark_barrier(); } while (0)
23# define rmb() do { barrier(); smp_check_barrier(); } while (0) 23# define rmb() do { barrier(); smp_check_barrier(); } while (0)
24# define wmb() do { barrier(); smp_mark_barrier(); } while (0) 24# define wmb() do { barrier(); smp_mark_barrier(); } while (0)
25/*
26 * read_barrier_depends - Flush all pending reads that subsequents reads
27 * depend on.
28 *
29 * No data-dependent reads from memory-like regions are ever reordered
30 * over this barrier. All reads preceding this primitive are guaranteed
31 * to access memory (but not necessarily other CPUs' caches) before any
32 * reads following this primitive that depend on the data return by
33 * any of the preceding reads. This primitive is much lighter weight than
34 * rmb() on most CPUs, and is never heavier weight than is
35 * rmb().
36 *
37 * These ordering constraints are respected by both the local CPU
38 * and the compiler.
39 *
40 * Ordering is not guaranteed by anything other than these primitives,
41 * not even by data dependencies. See the documentation for
42 * memory_barrier() for examples and URLs to more information.
43 *
44 * For example, the following code would force ordering (the initial
45 * value of "a" is zero, "b" is one, and "p" is "&a"):
46 *
47 * <programlisting>
48 * CPU 0 CPU 1
49 *
50 * b = 2;
51 * memory_barrier();
52 * p = &b; q = p;
53 * read_barrier_depends();
54 * d = *q;
55 * </programlisting>
56 *
57 * because the read of "*q" depends on the read of "p" and these
58 * two reads are separated by a read_barrier_depends(). However,
59 * the following code, with the same initial values for "a" and "b":
60 *
61 * <programlisting>
62 * CPU 0 CPU 1
63 *
64 * a = 2;
65 * memory_barrier();
66 * b = 3; y = b;
67 * read_barrier_depends();
68 * x = a;
69 * </programlisting>
70 *
71 * does not enforce ordering, since there is no data dependency between
72 * the read of "a" and the read of "b". Therefore, on some CPUs, such
73 * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
74 * in cases like this where there are no data dependencies.
75 */
25# define read_barrier_depends() do { barrier(); smp_check_barrier(); } while (0) 76# define read_barrier_depends() do { barrier(); smp_check_barrier(); } while (0)
26#endif 77#endif
27 78
diff --git a/arch/ia64/include/asm/barrier.h b/arch/ia64/include/asm/barrier.h
index a48957c7b445..e8fffb03963c 100644
--- a/arch/ia64/include/asm/barrier.h
+++ b/arch/ia64/include/asm/barrier.h
@@ -35,26 +35,22 @@
35 * it's (presumably) much slower than mf and (b) mf.a is supported for 35 * it's (presumably) much slower than mf and (b) mf.a is supported for
36 * sequential memory pages only. 36 * sequential memory pages only.
37 */ 37 */
38#define mb() ia64_mf() 38#define mb() ia64_mf()
39#define rmb() mb() 39#define rmb() mb()
40#define wmb() mb() 40#define wmb() mb()
41#define read_barrier_depends() do { } while(0)
42 41
43#ifdef CONFIG_SMP 42#ifdef CONFIG_SMP
44# define smp_mb() mb() 43# define smp_mb() mb()
45# define smp_rmb() rmb()
46# define smp_wmb() wmb()
47# define smp_read_barrier_depends() read_barrier_depends()
48
49#else 44#else
50
51# define smp_mb() barrier() 45# define smp_mb() barrier()
52# define smp_rmb() barrier()
53# define smp_wmb() barrier()
54# define smp_read_barrier_depends() do { } while(0)
55
56#endif 46#endif
57 47
48#define smp_rmb() smp_mb()
49#define smp_wmb() smp_mb()
50
51#define read_barrier_depends() do { } while (0)
52#define smp_read_barrier_depends() do { } while (0)
53
58#define smp_mb__before_atomic() barrier() 54#define smp_mb__before_atomic() barrier()
59#define smp_mb__after_atomic() barrier() 55#define smp_mb__after_atomic() barrier()
60 56
diff --git a/arch/metag/include/asm/barrier.h b/arch/metag/include/asm/barrier.h
index c7591e80067c..6d8b8c9b7c25 100644
--- a/arch/metag/include/asm/barrier.h
+++ b/arch/metag/include/asm/barrier.h
@@ -47,8 +47,6 @@ static inline void wmb(void)
47 wr_fence(); 47 wr_fence();
48} 48}
49 49
50#define read_barrier_depends() do { } while (0)
51
52#ifndef CONFIG_SMP 50#ifndef CONFIG_SMP
53#define fence() do { } while (0) 51#define fence() do { } while (0)
54#define smp_mb() barrier() 52#define smp_mb() barrier()
@@ -82,7 +80,10 @@ static inline void fence(void)
82#define smp_wmb() barrier() 80#define smp_wmb() barrier()
83#endif 81#endif
84#endif 82#endif
85#define smp_read_barrier_depends() do { } while (0) 83
84#define read_barrier_depends() do { } while (0)
85#define smp_read_barrier_depends() do { } while (0)
86
86#define set_mb(var, value) do { var = value; smp_mb(); } while (0) 87#define set_mb(var, value) do { var = value; smp_mb(); } while (0)
87 88
88#define smp_store_release(p, v) \ 89#define smp_store_release(p, v) \
diff --git a/arch/mips/include/asm/barrier.h b/arch/mips/include/asm/barrier.h
index d0101dd0575e..3d69aa829a76 100644
--- a/arch/mips/include/asm/barrier.h
+++ b/arch/mips/include/asm/barrier.h
@@ -10,58 +10,6 @@
10 10
11#include <asm/addrspace.h> 11#include <asm/addrspace.h>
12 12
13/*
14 * read_barrier_depends - Flush all pending reads that subsequents reads
15 * depend on.
16 *
17 * No data-dependent reads from memory-like regions are ever reordered
18 * over this barrier. All reads preceding this primitive are guaranteed
19 * to access memory (but not necessarily other CPUs' caches) before any
20 * reads following this primitive that depend on the data return by
21 * any of the preceding reads. This primitive is much lighter weight than
22 * rmb() on most CPUs, and is never heavier weight than is
23 * rmb().
24 *
25 * These ordering constraints are respected by both the local CPU
26 * and the compiler.
27 *
28 * Ordering is not guaranteed by anything other than these primitives,
29 * not even by data dependencies. See the documentation for
30 * memory_barrier() for examples and URLs to more information.
31 *
32 * For example, the following code would force ordering (the initial
33 * value of "a" is zero, "b" is one, and "p" is "&a"):
34 *
35 * <programlisting>
36 * CPU 0 CPU 1
37 *
38 * b = 2;
39 * memory_barrier();
40 * p = &b; q = p;
41 * read_barrier_depends();
42 * d = *q;
43 * </programlisting>
44 *
45 * because the read of "*q" depends on the read of "p" and these
46 * two reads are separated by a read_barrier_depends(). However,
47 * the following code, with the same initial values for "a" and "b":
48 *
49 * <programlisting>
50 * CPU 0 CPU 1
51 *
52 * a = 2;
53 * memory_barrier();
54 * b = 3; y = b;
55 * read_barrier_depends();
56 * x = a;
57 * </programlisting>
58 *
59 * does not enforce ordering, since there is no data dependency between
60 * the read of "a" and the read of "b". Therefore, on some CPUs, such
61 * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
62 * in cases like this where there are no data dependencies.
63 */
64
65#define read_barrier_depends() do { } while(0) 13#define read_barrier_depends() do { } while(0)
66#define smp_read_barrier_depends() do { } while(0) 14#define smp_read_barrier_depends() do { } while(0)
67 15
diff --git a/arch/powerpc/include/asm/barrier.h b/arch/powerpc/include/asm/barrier.h
index bab79a110c7b..cb6d66c6e3e1 100644
--- a/arch/powerpc/include/asm/barrier.h
+++ b/arch/powerpc/include/asm/barrier.h
@@ -33,7 +33,6 @@
33#define mb() __asm__ __volatile__ ("sync" : : : "memory") 33#define mb() __asm__ __volatile__ ("sync" : : : "memory")
34#define rmb() __asm__ __volatile__ ("sync" : : : "memory") 34#define rmb() __asm__ __volatile__ ("sync" : : : "memory")
35#define wmb() __asm__ __volatile__ ("sync" : : : "memory") 35#define wmb() __asm__ __volatile__ ("sync" : : : "memory")
36#define read_barrier_depends() do { } while(0)
37 36
38#define set_mb(var, value) do { var = value; mb(); } while (0) 37#define set_mb(var, value) do { var = value; mb(); } while (0)
39 38
@@ -50,16 +49,17 @@
50#define smp_mb() mb() 49#define smp_mb() mb()
51#define smp_rmb() __lwsync() 50#define smp_rmb() __lwsync()
52#define smp_wmb() __asm__ __volatile__ (stringify_in_c(SMPWMB) : : :"memory") 51#define smp_wmb() __asm__ __volatile__ (stringify_in_c(SMPWMB) : : :"memory")
53#define smp_read_barrier_depends() read_barrier_depends()
54#else 52#else
55#define __lwsync() barrier() 53#define __lwsync() barrier()
56 54
57#define smp_mb() barrier() 55#define smp_mb() barrier()
58#define smp_rmb() barrier() 56#define smp_rmb() barrier()
59#define smp_wmb() barrier() 57#define smp_wmb() barrier()
60#define smp_read_barrier_depends() do { } while(0)
61#endif /* CONFIG_SMP */ 58#endif /* CONFIG_SMP */
62 59
60#define read_barrier_depends() do { } while (0)
61#define smp_read_barrier_depends() do { } while (0)
62
63/* 63/*
64 * This is a barrier which prevents following instructions from being 64 * This is a barrier which prevents following instructions from being
65 * started until the value of the argument x is known. For example, if 65 * started until the value of the argument x is known. For example, if
diff --git a/arch/s390/include/asm/barrier.h b/arch/s390/include/asm/barrier.h
index b5dce6544d76..33d191d295e4 100644
--- a/arch/s390/include/asm/barrier.h
+++ b/arch/s390/include/asm/barrier.h
@@ -24,11 +24,12 @@
24 24
25#define rmb() mb() 25#define rmb() mb()
26#define wmb() mb() 26#define wmb() mb()
27#define read_barrier_depends() do { } while(0)
28#define smp_mb() mb() 27#define smp_mb() mb()
29#define smp_rmb() rmb() 28#define smp_rmb() rmb()
30#define smp_wmb() wmb() 29#define smp_wmb() wmb()
31#define smp_read_barrier_depends() read_barrier_depends() 30
31#define read_barrier_depends() do { } while (0)
32#define smp_read_barrier_depends() do { } while (0)
32 33
33#define smp_mb__before_atomic() smp_mb() 34#define smp_mb__before_atomic() smp_mb()
34#define smp_mb__after_atomic() smp_mb() 35#define smp_mb__after_atomic() smp_mb()
diff --git a/arch/sparc/include/asm/barrier_64.h b/arch/sparc/include/asm/barrier_64.h
index 305dcc3dc721..6c974c0977ad 100644
--- a/arch/sparc/include/asm/barrier_64.h
+++ b/arch/sparc/include/asm/barrier_64.h
@@ -37,7 +37,6 @@ do { __asm__ __volatile__("ba,pt %%xcc, 1f\n\t" \
37#define rmb() __asm__ __volatile__("":::"memory") 37#define rmb() __asm__ __volatile__("":::"memory")
38#define wmb() __asm__ __volatile__("":::"memory") 38#define wmb() __asm__ __volatile__("":::"memory")
39 39
40#define read_barrier_depends() do { } while(0)
41#define set_mb(__var, __value) \ 40#define set_mb(__var, __value) \
42 do { __var = __value; membar_safe("#StoreLoad"); } while(0) 41 do { __var = __value; membar_safe("#StoreLoad"); } while(0)
43 42
@@ -51,7 +50,8 @@ do { __asm__ __volatile__("ba,pt %%xcc, 1f\n\t" \
51#define smp_wmb() __asm__ __volatile__("":::"memory") 50#define smp_wmb() __asm__ __volatile__("":::"memory")
52#endif 51#endif
53 52
54#define smp_read_barrier_depends() do { } while(0) 53#define read_barrier_depends() do { } while (0)
54#define smp_read_barrier_depends() do { } while (0)
55 55
56#define smp_store_release(p, v) \ 56#define smp_store_release(p, v) \
57do { \ 57do { \
diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h
index 0f4460b5636d..5238000285c1 100644
--- a/arch/x86/include/asm/barrier.h
+++ b/arch/x86/include/asm/barrier.h
@@ -24,60 +24,6 @@
24#define wmb() asm volatile("sfence" ::: "memory") 24#define wmb() asm volatile("sfence" ::: "memory")
25#endif 25#endif
26 26
27/**
28 * read_barrier_depends - Flush all pending reads that subsequents reads
29 * depend on.
30 *
31 * No data-dependent reads from memory-like regions are ever reordered
32 * over this barrier. All reads preceding this primitive are guaranteed
33 * to access memory (but not necessarily other CPUs' caches) before any
34 * reads following this primitive that depend on the data return by
35 * any of the preceding reads. This primitive is much lighter weight than
36 * rmb() on most CPUs, and is never heavier weight than is
37 * rmb().
38 *
39 * These ordering constraints are respected by both the local CPU
40 * and the compiler.
41 *
42 * Ordering is not guaranteed by anything other than these primitives,
43 * not even by data dependencies. See the documentation for
44 * memory_barrier() for examples and URLs to more information.
45 *
46 * For example, the following code would force ordering (the initial
47 * value of "a" is zero, "b" is one, and "p" is "&a"):
48 *
49 * <programlisting>
50 * CPU 0 CPU 1
51 *
52 * b = 2;
53 * memory_barrier();
54 * p = &b; q = p;
55 * read_barrier_depends();
56 * d = *q;
57 * </programlisting>
58 *
59 * because the read of "*q" depends on the read of "p" and these
60 * two reads are separated by a read_barrier_depends(). However,
61 * the following code, with the same initial values for "a" and "b":
62 *
63 * <programlisting>
64 * CPU 0 CPU 1
65 *
66 * a = 2;
67 * memory_barrier();
68 * b = 3; y = b;
69 * read_barrier_depends();
70 * x = a;
71 * </programlisting>
72 *
73 * does not enforce ordering, since there is no data dependency between
74 * the read of "a" and the read of "b". Therefore, on some CPUs, such
75 * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
76 * in cases like this where there are no data dependencies.
77 **/
78
79#define read_barrier_depends() do { } while (0)
80
81#ifdef CONFIG_SMP 27#ifdef CONFIG_SMP
82#define smp_mb() mb() 28#define smp_mb() mb()
83#ifdef CONFIG_X86_PPRO_FENCE 29#ifdef CONFIG_X86_PPRO_FENCE
@@ -86,16 +32,17 @@
86# define smp_rmb() barrier() 32# define smp_rmb() barrier()
87#endif 33#endif
88#define smp_wmb() barrier() 34#define smp_wmb() barrier()
89#define smp_read_barrier_depends() read_barrier_depends()
90#define set_mb(var, value) do { (void)xchg(&var, value); } while (0) 35#define set_mb(var, value) do { (void)xchg(&var, value); } while (0)
91#else /* !SMP */ 36#else /* !SMP */
92#define smp_mb() barrier() 37#define smp_mb() barrier()
93#define smp_rmb() barrier() 38#define smp_rmb() barrier()
94#define smp_wmb() barrier() 39#define smp_wmb() barrier()
95#define smp_read_barrier_depends() do { } while (0)
96#define set_mb(var, value) do { var = value; barrier(); } while (0) 40#define set_mb(var, value) do { var = value; barrier(); } while (0)
97#endif /* SMP */ 41#endif /* SMP */
98 42
43#define read_barrier_depends() do { } while (0)
44#define smp_read_barrier_depends() do { } while (0)
45
99#if defined(CONFIG_X86_PPRO_FENCE) 46#if defined(CONFIG_X86_PPRO_FENCE)
100 47
101/* 48/*
diff --git a/arch/x86/um/asm/barrier.h b/arch/x86/um/asm/barrier.h
index cc04e67bfd05..d6511d954e2b 100644
--- a/arch/x86/um/asm/barrier.h
+++ b/arch/x86/um/asm/barrier.h
@@ -29,8 +29,6 @@
29 29
30#endif /* CONFIG_X86_32 */ 30#endif /* CONFIG_X86_32 */
31 31
32#define read_barrier_depends() do { } while (0)
33
34#ifdef CONFIG_SMP 32#ifdef CONFIG_SMP
35 33
36#define smp_mb() mb() 34#define smp_mb() mb()
@@ -42,7 +40,6 @@
42 40
43#define smp_wmb() barrier() 41#define smp_wmb() barrier()
44 42
45#define smp_read_barrier_depends() read_barrier_depends()
46#define set_mb(var, value) do { (void)xchg(&var, value); } while (0) 43#define set_mb(var, value) do { (void)xchg(&var, value); } while (0)
47 44
48#else /* CONFIG_SMP */ 45#else /* CONFIG_SMP */
@@ -50,11 +47,13 @@
50#define smp_mb() barrier() 47#define smp_mb() barrier()
51#define smp_rmb() barrier() 48#define smp_rmb() barrier()
52#define smp_wmb() barrier() 49#define smp_wmb() barrier()
53#define smp_read_barrier_depends() do { } while (0)
54#define set_mb(var, value) do { var = value; barrier(); } while (0) 50#define set_mb(var, value) do { var = value; barrier(); } while (0)
55 51
56#endif /* CONFIG_SMP */ 52#endif /* CONFIG_SMP */
57 53
54#define read_barrier_depends() do { } while (0)
55#define smp_read_barrier_depends() do { } while (0)
56
58/* 57/*
59 * Stop RDTSC speculation. This is needed when you need to use RDTSC 58 * Stop RDTSC speculation. This is needed when you need to use RDTSC
60 * (or get_cycles or vread that possibly accesses the TSC) in a defined 59 * (or get_cycles or vread that possibly accesses the TSC) in a defined