aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/include/asm/system.h
diff options
context:
space:
mode:
authorCatalin Marinas <catalin.marinas@arm.com>2010-03-24 11:49:54 -0400
committerRussell King <rmk+kernel@arm.linux.org.uk>2010-03-25 17:13:50 -0400
commite7c5650f6067f65f8e961394f376d4862808d0d2 (patch)
treeac1a298272cf4b452f3b36fef7878982d7eead9c /arch/arm/include/asm/system.h
parent23107c542068b2b94390aa333f6b330af64961e4 (diff)
ARM: 5996/1: ARM: Change the mandatory barriers implementation (4/4)
The mandatory barriers (mb, rmb, wmb) are used even on uniprocessor systems for things like ordering Normal Non-cacheable memory accesses with DMA transfer (via Device memory writes). The current implementation uses dmb() for mb() and friends but this is not sufficient. The DMB only ensures the relative ordering of the observability of accesses by other processors or devices acting as masters. In case of DMA transfers started by writes to device memory, the relative ordering is not ensured because accesses to slave ports of a device are not considered observable by the DMB definition. A DSB is required for the data to reach the main memory (even if mapped as Normal Non-cacheable) before the device receives the notification to begin the transfer. Furthermore, some L2 cache controllers (like L2x0 or PL310) buffer stores to Normal Non-cacheable memory and this would need to be drained with the outer_sync() function call. The patch also allows platforms to define their own mandatory barriers implementation by selecting CONFIG_ARCH_HAS_BARRIERS and providing a mach/barriers.h file. Note that the SMP barriers are unchanged (being DMBs as before) since they are only guaranteed to work with Normal Cacheable memory. Signed-off-by: Catalin Marinas <catalin.marinas@arm.com> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm/include/asm/system.h')
-rw-r--r--arch/arm/include/asm/system.h16
1 files changed, 10 insertions, 6 deletions
diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h
index ca88e6a84707..4ace45ec3ef8 100644
--- a/arch/arm/include/asm/system.h
+++ b/arch/arm/include/asm/system.h
@@ -60,6 +60,8 @@
60#include <linux/linkage.h> 60#include <linux/linkage.h>
61#include <linux/irqflags.h> 61#include <linux/irqflags.h>
62 62
63#include <asm/outercache.h>
64
63#define __exception __attribute__((section(".exception.text"))) 65#define __exception __attribute__((section(".exception.text")))
64 66
65struct thread_info; 67struct thread_info;
@@ -137,10 +139,12 @@ extern unsigned int user_debug;
137#define dmb() __asm__ __volatile__ ("" : : : "memory") 139#define dmb() __asm__ __volatile__ ("" : : : "memory")
138#endif 140#endif
139 141
140#if __LINUX_ARM_ARCH__ >= 7 || defined(CONFIG_SMP) 142#ifdef CONFIG_ARCH_HAS_BARRIERS
141#define mb() dmb() 143#include <mach/barriers.h>
144#elif __LINUX_ARM_ARCH__ >= 7 || defined(CONFIG_SMP)
145#define mb() do { dsb(); outer_sync(); } while (0)
142#define rmb() dmb() 146#define rmb() dmb()
143#define wmb() dmb() 147#define wmb() mb()
144#else 148#else
145#define mb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0) 149#define mb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0)
146#define rmb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0) 150#define rmb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0)
@@ -152,9 +156,9 @@ extern unsigned int user_debug;
152#define smp_rmb() barrier() 156#define smp_rmb() barrier()
153#define smp_wmb() barrier() 157#define smp_wmb() barrier()
154#else 158#else
155#define smp_mb() mb() 159#define smp_mb() dmb()
156#define smp_rmb() rmb() 160#define smp_rmb() dmb()
157#define smp_wmb() wmb() 161#define smp_wmb() dmb()
158#endif 162#endif
159 163
160#define read_barrier_depends() do { } while(0) 164#define read_barrier_depends() do { } while(0)