aboutsummaryrefslogtreecommitdiffstats
path: root/arch/m32r/include/asm
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2013-11-06 08:57:36 -0500
committerIngo Molnar <mingo@kernel.org>2014-01-12 04:37:15 -0500
commit93ea02bb84354370e51de803a9405f171f3edf88 (patch)
treedc7c7719fdfe0aed36eae3c2b374ced951e62a4c /arch/m32r/include/asm
parent1de7da377bd880ff23917f78924d0e908329d978 (diff)
arch: Clean up asm/barrier.h implementations using asm-generic/barrier.h
We're going to be adding a few new barrier primitives, and in order to avoid endless duplication make more agressive use of asm-generic/barrier.h. Change the asm-generic/barrier.h such that it allows partial barrier definitions and fills out the rest with defaults. There are a few architectures (m32r, m68k) that could probably do away with their barrier.h file entirely but are kept for now due to their unconventional nop() implementation. Suggested-by: Geert Uytterhoeven <geert@linux-m68k.org> Reviewed-by: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com> Reviewed-by: Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca> Signed-off-by: Peter Zijlstra <peterz@infradead.org> Cc: Michael Ellerman <michael@ellerman.id.au> Cc: Michael Neuling <mikey@neuling.org> Cc: Russell King <linux@arm.linux.org.uk> Cc: Heiko Carstens <heiko.carstens@de.ibm.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Victor Kaplansky <VICTORK@il.ibm.com> Cc: Tony Luck <tony.luck@intel.com> Cc: Oleg Nesterov <oleg@redhat.com> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Frederic Weisbecker <fweisbec@gmail.com> Link: http://lkml.kernel.org/r/20131213150640.846368594@infradead.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/m32r/include/asm')
-rw-r--r--arch/m32r/include/asm/barrier.h80
1 files changed, 1 insertions, 79 deletions
diff --git a/arch/m32r/include/asm/barrier.h b/arch/m32r/include/asm/barrier.h
index 6976621efd3f..1a40265e8d88 100644
--- a/arch/m32r/include/asm/barrier.h
+++ b/arch/m32r/include/asm/barrier.h
@@ -11,84 +11,6 @@
11 11
12#define nop() __asm__ __volatile__ ("nop" : : ) 12#define nop() __asm__ __volatile__ ("nop" : : )
13 13
14/* 14#include <asm-generic/barrier.h>
15 * Memory barrier.
16 *
17 * mb() prevents loads and stores being reordered across this point.
18 * rmb() prevents loads being reordered across this point.
19 * wmb() prevents stores being reordered across this point.
20 */
21#define mb() barrier()
22#define rmb() mb()
23#define wmb() mb()
24
25/**
26 * read_barrier_depends - Flush all pending reads that subsequents reads
27 * depend on.
28 *
29 * No data-dependent reads from memory-like regions are ever reordered
30 * over this barrier. All reads preceding this primitive are guaranteed
31 * to access memory (but not necessarily other CPUs' caches) before any
32 * reads following this primitive that depend on the data return by
33 * any of the preceding reads. This primitive is much lighter weight than
34 * rmb() on most CPUs, and is never heavier weight than is
35 * rmb().
36 *
37 * These ordering constraints are respected by both the local CPU
38 * and the compiler.
39 *
40 * Ordering is not guaranteed by anything other than these primitives,
41 * not even by data dependencies. See the documentation for
42 * memory_barrier() for examples and URLs to more information.
43 *
44 * For example, the following code would force ordering (the initial
45 * value of "a" is zero, "b" is one, and "p" is "&a"):
46 *
47 * <programlisting>
48 * CPU 0 CPU 1
49 *
50 * b = 2;
51 * memory_barrier();
52 * p = &b; q = p;
53 * read_barrier_depends();
54 * d = *q;
55 * </programlisting>
56 *
57 *
58 * because the read of "*q" depends on the read of "p" and these
59 * two reads are separated by a read_barrier_depends(). However,
60 * the following code, with the same initial values for "a" and "b":
61 *
62 * <programlisting>
63 * CPU 0 CPU 1
64 *
65 * a = 2;
66 * memory_barrier();
67 * b = 3; y = b;
68 * read_barrier_depends();
69 * x = a;
70 * </programlisting>
71 *
72 * does not enforce ordering, since there is no data dependency between
73 * the read of "a" and the read of "b". Therefore, on some CPUs, such
74 * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
75 * in cases like this where there are no data dependencies.
76 **/
77
78#define read_barrier_depends() do { } while (0)
79
80#ifdef CONFIG_SMP
81#define smp_mb() mb()
82#define smp_rmb() rmb()
83#define smp_wmb() wmb()
84#define smp_read_barrier_depends() read_barrier_depends()
85#define set_mb(var, value) do { (void) xchg(&var, value); } while (0)
86#else
87#define smp_mb() barrier()
88#define smp_rmb() barrier()
89#define smp_wmb() barrier()
90#define smp_read_barrier_depends() do { } while (0)
91#define set_mb(var, value) do { var = value; barrier(); } while (0)
92#endif
93 15
94#endif /* _ASM_M32R_BARRIER_H */ 16#endif /* _ASM_M32R_BARRIER_H */