aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorCatalin Marinas <catalin.marinas@arm.com>2010-06-21 10:10:07 -0400
committerRussell King <rmk+kernel@arm.linux.org.uk>2010-07-01 05:12:31 -0400
commitad642d9f58f1af6e96efccb5f84e52c6d01db5c4 (patch)
tree4319ed1e81f50c7035457cc0fa1cf4bcdf760abb /arch
parentca57926d53580f7c950496cb7ef6d7930610e1dd (diff)
ARM: 6188/1: Add a config option for the ARM11MPCore DMA cache maintenance workaround
Commit f4d6477f introduced a workaround for the lack of hardware broadcasting of the cache maintenance operations on ARM11MPCore. However, the workaround is only valid on CPUs that do not do speculative loads into the D-cache. This patch adds a Kconfig option with the corresponding help to make the above clear. When the DMA_CACHE_RWFO option is disabled, the kernel behaviour is that prior to the f4d6477f commit. This also allows ARMv6 UP processors with speculative loads to work correctly. For other processors, a different workaround may be needed. Cc: Ronen Shitrit <rshitrit@marvell.com> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/mm/Kconfig19
-rw-r--r--arch/arm/mm/cache-v6.S15
2 files changed, 31 insertions, 3 deletions
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index 346ae14824a5..fc1b2fa59429 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -735,6 +735,25 @@ config NEEDS_SYSCALL_FOR_CMPXCHG
735 Forget about fast user space cmpxchg support. 735 Forget about fast user space cmpxchg support.
736 It is just not possible. 736 It is just not possible.
737 737
738config DMA_CACHE_RWFO
739 bool "Enable read/write for ownership DMA cache maintenance"
740 depends on CPU_V6 && SMP
741 default y
742 help
743 The Snoop Control Unit on ARM11MPCore does not detect the
744 cache maintenance operations and the dma_{map,unmap}_area()
745 functions may leave stale cache entries on other CPUs. By
746 enabling this option, Read or Write For Ownership in the ARMv6
747 DMA cache maintenance functions is performed. These LDR/STR
748 instructions change the cache line state to shared or modified
749 so that the cache operation has the desired effect.
750
751 Note that the workaround is only valid on processors that do
752 not perform speculative loads into the D-cache. For such
753 processors, if cache maintenance operations are not broadcast
754 in hardware, other workarounds are needed (e.g. cache
755 maintenance broadcasting in software via FIQ).
756
738config OUTER_CACHE 757config OUTER_CACHE
739 bool 758 bool
740 759
diff --git a/arch/arm/mm/cache-v6.S b/arch/arm/mm/cache-v6.S
index 332b48c6d4ff..86aa689ef1aa 100644
--- a/arch/arm/mm/cache-v6.S
+++ b/arch/arm/mm/cache-v6.S
@@ -211,7 +211,7 @@ v6_dma_inv_range:
211 mcrne p15, 0, r1, c7, c15, 1 @ clean & invalidate unified line 211 mcrne p15, 0, r1, c7, c15, 1 @ clean & invalidate unified line
212#endif 212#endif
2131: 2131:
214#ifdef CONFIG_SMP 214#ifdef CONFIG_DMA_CACHE_RWFO
215 ldr r2, [r0] @ read for ownership 215 ldr r2, [r0] @ read for ownership
216 str r2, [r0] @ write for ownership 216 str r2, [r0] @ write for ownership
217#endif 217#endif
@@ -235,7 +235,7 @@ v6_dma_inv_range:
235v6_dma_clean_range: 235v6_dma_clean_range:
236 bic r0, r0, #D_CACHE_LINE_SIZE - 1 236 bic r0, r0, #D_CACHE_LINE_SIZE - 1
2371: 2371:
238#ifdef CONFIG_SMP 238#ifdef CONFIG_DMA_CACHE_RWFO
239 ldr r2, [r0] @ read for ownership 239 ldr r2, [r0] @ read for ownership
240#endif 240#endif
241#ifdef HARVARD_CACHE 241#ifdef HARVARD_CACHE
@@ -258,7 +258,7 @@ v6_dma_clean_range:
258ENTRY(v6_dma_flush_range) 258ENTRY(v6_dma_flush_range)
259 bic r0, r0, #D_CACHE_LINE_SIZE - 1 259 bic r0, r0, #D_CACHE_LINE_SIZE - 1
2601: 2601:
261#ifdef CONFIG_SMP 261#ifdef CONFIG_DMA_CACHE_RWFO
262 ldr r2, [r0] @ read for ownership 262 ldr r2, [r0] @ read for ownership
263 str r2, [r0] @ write for ownership 263 str r2, [r0] @ write for ownership
264#endif 264#endif
@@ -284,9 +284,13 @@ ENTRY(v6_dma_map_area)
284 add r1, r1, r0 284 add r1, r1, r0
285 teq r2, #DMA_FROM_DEVICE 285 teq r2, #DMA_FROM_DEVICE
286 beq v6_dma_inv_range 286 beq v6_dma_inv_range
287#ifndef CONFIG_DMA_CACHE_RWFO
288 b v6_dma_clean_range
289#else
287 teq r2, #DMA_TO_DEVICE 290 teq r2, #DMA_TO_DEVICE
288 beq v6_dma_clean_range 291 beq v6_dma_clean_range
289 b v6_dma_flush_range 292 b v6_dma_flush_range
293#endif
290ENDPROC(v6_dma_map_area) 294ENDPROC(v6_dma_map_area)
291 295
292/* 296/*
@@ -296,6 +300,11 @@ ENDPROC(v6_dma_map_area)
296 * - dir - DMA direction 300 * - dir - DMA direction
297 */ 301 */
298ENTRY(v6_dma_unmap_area) 302ENTRY(v6_dma_unmap_area)
303#ifndef CONFIG_DMA_CACHE_RWFO
304 add r1, r1, r0
305 teq r2, #DMA_TO_DEVICE
306 bne v6_dma_inv_range
307#endif
299 mov pc, lr 308 mov pc, lr
300ENDPROC(v6_dma_unmap_area) 309ENDPROC(v6_dma_unmap_area)
301 310