aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorCatalin Marinas <catalin.marinas@arm.com>2010-07-28 17:01:55 -0400
committerRussell King <rmk+kernel@arm.linux.org.uk>2010-07-29 09:04:37 -0400
commit79f64dbf68c8a9779a7e9a25e0a9f0217a25b57a (patch)
treebd75d76d566c5dd211d310bf136cf3e4b07184ae
parent6775a558fece413376e1dacd435adb5fbe225f40 (diff)
ARM: 6273/1: Add barriers to the I/O accessors if ARM_DMA_MEM_BUFFERABLE
When the coherent DMA buffers are mapped as Normal Non-cacheable (ARM_DMA_MEM_BUFFERABLE enabled), buffer accesses are no longer ordered with Device memory accesses causing failures in device drivers that do not use the mandatory memory barriers before starting a DMA transfer. LKML discussions led to the conclusion that such barriers have to be added to the I/O accessors: http://thread.gmane.org/gmane.linux.kernel/683509/focus=686153 http://thread.gmane.org/gmane.linux.ide/46414 http://thread.gmane.org/gmane.linux.kernel.cross-arch/5250 This patch introduces a wmb() barrier to the write*() I/O accessors to handle the situations where Normal Non-cacheable writes are still in the processor (or L2 cache controller) write buffer before a DMA transfer command is issued. For the read*() accessors, a rmb() is introduced after the I/O to avoid speculative loads where the driver polls for a DMA transfer ready bit. Signed-off-by: Catalin Marinas <catalin.marinas@arm.com> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
-rw-r--r--arch/arm/include/asm/io.h11
1 files changed, 11 insertions, 0 deletions
diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h
index 9db072df2b3d..3c91e7c80c29 100644
--- a/arch/arm/include/asm/io.h
+++ b/arch/arm/include/asm/io.h
@@ -26,6 +26,7 @@
26#include <linux/types.h> 26#include <linux/types.h>
27#include <asm/byteorder.h> 27#include <asm/byteorder.h>
28#include <asm/memory.h> 28#include <asm/memory.h>
29#include <asm/system.h>
29 30
30/* 31/*
31 * ISA I/O bus memory addresses are 1:1 with the physical address. 32 * ISA I/O bus memory addresses are 1:1 with the physical address.
@@ -191,6 +192,15 @@ extern void _memset_io(volatile void __iomem *, int, size_t);
191#define writel_relaxed(v,c) ((void)__raw_writel((__force u32) \ 192#define writel_relaxed(v,c) ((void)__raw_writel((__force u32) \
192 cpu_to_le32(v),__mem_pci(c))) 193 cpu_to_le32(v),__mem_pci(c)))
193 194
195#ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE
196#define readb(c) ({ u8 __v = readb_relaxed(c); rmb(); __v; })
197#define readw(c) ({ u16 __v = readw_relaxed(c); rmb(); __v; })
198#define readl(c) ({ u32 __v = readl_relaxed(c); rmb(); __v; })
199
200#define writeb(v,c) ({ wmb(); writeb_relaxed(v,c); })
201#define writew(v,c) ({ wmb(); writew_relaxed(v,c); })
202#define writel(v,c) ({ wmb(); writel_relaxed(v,c); })
203#else
194#define readb(c) readb_relaxed(c) 204#define readb(c) readb_relaxed(c)
195#define readw(c) readw_relaxed(c) 205#define readw(c) readw_relaxed(c)
196#define readl(c) readl_relaxed(c) 206#define readl(c) readl_relaxed(c)
@@ -198,6 +208,7 @@ extern void _memset_io(volatile void __iomem *, int, size_t);
198#define writeb(v,c) writeb_relaxed(v,c) 208#define writeb(v,c) writeb_relaxed(v,c)
199#define writew(v,c) writew_relaxed(v,c) 209#define writew(v,c) writew_relaxed(v,c)
200#define writel(v,c) writel_relaxed(v,c) 210#define writel(v,c) writel_relaxed(v,c)
211#endif
201 212
202#define readsb(p,d,l) __raw_readsb(__mem_pci(p),d,l) 213#define readsb(p,d,l) __raw_readsb(__mem_pci(p),d,l)
203#define readsw(p,d,l) __raw_readsw(__mem_pci(p),d,l) 214#define readsw(p,d,l) __raw_readsw(__mem_pci(p),d,l)