aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2009-11-05 23:24:33 -0500
committerDavid S. Miller <davem@davemloft.net>2009-11-05 23:24:33 -0500
commit4eb0c00b6221f28b8988df37c9cb1bc5a2b91b39 (patch)
treef1caf8f77980f74766c26e5bf1ce9a140fd17b2d /arch/sparc
parent03717e3d12b625268848414e39beda25e4515692 (diff)
sparc64: Add a comment about why we only use certain memory barriers these days.
Based upon feedback from Mathieu Desnoyers. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc')
-rw-r--r--arch/sparc/include/asm/system_64.h4
1 files changed, 4 insertions, 0 deletions
diff --git a/arch/sparc/include/asm/system_64.h b/arch/sparc/include/asm/system_64.h
index 25e848f0cad7..d47a98e66972 100644
--- a/arch/sparc/include/asm/system_64.h
+++ b/arch/sparc/include/asm/system_64.h
@@ -63,6 +63,10 @@ do { __asm__ __volatile__("ba,pt %%xcc, 1f\n\t" \
63 : : : "memory"); \ 63 : : : "memory"); \
64} while (0) 64} while (0)
65 65
66/* The kernel always executes in TSO memory model these days,
67 * and furthermore most sparc64 chips implement more stringent
68 * memory ordering than required by the specifications.
69 */
66#define mb() membar_safe("#StoreLoad") 70#define mb() membar_safe("#StoreLoad")
67#define rmb() __asm__ __volatile__("":::"memory") 71#define rmb() __asm__ __volatile__("":::"memory")
68#define wmb() __asm__ __volatile__("":::"memory") 72#define wmb() __asm__ __volatile__("":::"memory")