aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips/include/asm/barrier.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/include/asm/barrier.h')
-rw-r--r--arch/mips/include/asm/barrier.h10
1 files changed, 5 insertions, 5 deletions
diff --git a/arch/mips/include/asm/barrier.h b/arch/mips/include/asm/barrier.h
index f7fdc24e972d..314ab5532019 100644
--- a/arch/mips/include/asm/barrier.h
+++ b/arch/mips/include/asm/barrier.h
@@ -18,7 +18,7 @@
18 * over this barrier. All reads preceding this primitive are guaranteed 18 * over this barrier. All reads preceding this primitive are guaranteed
19 * to access memory (but not necessarily other CPUs' caches) before any 19 * to access memory (but not necessarily other CPUs' caches) before any
20 * reads following this primitive that depend on the data return by 20 * reads following this primitive that depend on the data return by
21 * any of the preceding reads. This primitive is much lighter weight than 21 * any of the preceding reads. This primitive is much lighter weight than
22 * rmb() on most CPUs, and is never heavier weight than is 22 * rmb() on most CPUs, and is never heavier weight than is
23 * rmb(). 23 * rmb().
24 * 24 *
@@ -43,7 +43,7 @@
43 * </programlisting> 43 * </programlisting>
44 * 44 *
45 * because the read of "*q" depends on the read of "p" and these 45 * because the read of "*q" depends on the read of "p" and these
46 * two reads are separated by a read_barrier_depends(). However, 46 * two reads are separated by a read_barrier_depends(). However,
47 * the following code, with the same initial values for "a" and "b": 47 * the following code, with the same initial values for "a" and "b":
48 * 48 *
49 * <programlisting> 49 * <programlisting>
@@ -57,7 +57,7 @@
57 * </programlisting> 57 * </programlisting>
58 * 58 *
59 * does not enforce ordering, since there is no data dependency between 59 * does not enforce ordering, since there is no data dependency between
60 * the read of "a" and the read of "b". Therefore, on some CPUs, such 60 * the read of "a" and the read of "b". Therefore, on some CPUs, such
61 * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb() 61 * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
62 * in cases like this where there are no data dependencies. 62 * in cases like this where there are no data dependencies.
63 */ 63 */
@@ -92,7 +92,7 @@
92 : "memory") 92 : "memory")
93#ifdef CONFIG_CPU_CAVIUM_OCTEON 93#ifdef CONFIG_CPU_CAVIUM_OCTEON
94# define OCTEON_SYNCW_STR ".set push\n.set arch=octeon\nsyncw\nsyncw\n.set pop\n" 94# define OCTEON_SYNCW_STR ".set push\n.set arch=octeon\nsyncw\nsyncw\n.set pop\n"
95# define __syncw() __asm__ __volatile__(OCTEON_SYNCW_STR : : : "memory") 95# define __syncw() __asm__ __volatile__(OCTEON_SYNCW_STR : : : "memory")
96 96
97# define fast_wmb() __syncw() 97# define fast_wmb() __syncw()
98# define fast_rmb() barrier() 98# define fast_rmb() barrier()
@@ -158,7 +158,7 @@
158#endif 158#endif
159 159
160#if defined(CONFIG_WEAK_REORDERING_BEYOND_LLSC) && defined(CONFIG_SMP) 160#if defined(CONFIG_WEAK_REORDERING_BEYOND_LLSC) && defined(CONFIG_SMP)
161#define __WEAK_LLSC_MB " sync \n" 161#define __WEAK_LLSC_MB " sync \n"
162#else 162#else
163#define __WEAK_LLSC_MB " \n" 163#define __WEAK_LLSC_MB " \n"
164#endif 164#endif