diff options
author | Russell King <rmk+kernel@arm.linux.org.uk> | 2010-07-21 04:22:45 -0400 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2010-07-21 04:22:45 -0400 |
commit | 14764b01a5576ce23a9d0c95a027049206a19cef (patch) | |
tree | fa7f4e3b9b62598076baf7d312dd37d6c91b0d56 /arch/arm/mm | |
parent | fc4978b796e5e52ab3a709495a968199afe0a108 (diff) | |
parent | 5ccd4302a20bfe56eb72a5e27ad0be046fc820a5 (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/nico/orion into devel-stable
Diffstat (limited to 'arch/arm/mm')
-rw-r--r-- | arch/arm/mm/Kconfig | 21 | ||||
-rw-r--r-- | arch/arm/mm/cache-l2x0.c | 5 | ||||
-rw-r--r-- | arch/arm/mm/cache-v6.S | 18 | ||||
-rw-r--r-- | arch/arm/mm/dma-mapping.c | 18 |
4 files changed, 47 insertions, 15 deletions
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index 346ae14824a5..101105e52610 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig | |||
@@ -735,6 +735,25 @@ config NEEDS_SYSCALL_FOR_CMPXCHG | |||
735 | Forget about fast user space cmpxchg support. | 735 | Forget about fast user space cmpxchg support. |
736 | It is just not possible. | 736 | It is just not possible. |
737 | 737 | ||
738 | config DMA_CACHE_RWFO | ||
739 | bool "Enable read/write for ownership DMA cache maintenance" | ||
740 | depends on CPU_V6 && SMP | ||
741 | default y | ||
742 | help | ||
743 | The Snoop Control Unit on ARM11MPCore does not detect the | ||
744 | cache maintenance operations and the dma_{map,unmap}_area() | ||
745 | functions may leave stale cache entries on other CPUs. By | ||
746 | enabling this option, Read or Write For Ownership in the ARMv6 | ||
747 | DMA cache maintenance functions is performed. These LDR/STR | ||
748 | instructions change the cache line state to shared or modified | ||
749 | so that the cache operation has the desired effect. | ||
750 | |||
751 | Note that the workaround is only valid on processors that do | ||
752 | not perform speculative loads into the D-cache. For such | ||
753 | processors, if cache maintenance operations are not broadcast | ||
754 | in hardware, other workarounds are needed (e.g. cache | ||
755 | maintenance broadcasting in software via FIQ). | ||
756 | |||
738 | config OUTER_CACHE | 757 | config OUTER_CACHE |
739 | bool | 758 | bool |
740 | 759 | ||
@@ -794,6 +813,8 @@ config ARM_L1_CACHE_SHIFT | |||
794 | 813 | ||
795 | config ARM_DMA_MEM_BUFFERABLE | 814 | config ARM_DMA_MEM_BUFFERABLE |
796 | bool "Use non-cacheable memory for DMA" if CPU_V6 && !CPU_V7 | 815 | bool "Use non-cacheable memory for DMA" if CPU_V6 && !CPU_V7 |
816 | depends on !(MACH_REALVIEW_PB1176 || REALVIEW_EB_ARM11MP || \ | ||
817 | MACH_REALVIEW_PB11MP) | ||
797 | default y if CPU_V6 || CPU_V7 | 818 | default y if CPU_V6 || CPU_V7 |
798 | help | 819 | help |
799 | Historically, the kernel has used strongly ordered mappings to | 820 | Historically, the kernel has used strongly ordered mappings to |
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c index 9819869d2bc9..df4955885b21 100644 --- a/arch/arm/mm/cache-l2x0.c +++ b/arch/arm/mm/cache-l2x0.c | |||
@@ -218,6 +218,9 @@ void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask) | |||
218 | cache_id = readl(l2x0_base + L2X0_CACHE_ID); | 218 | cache_id = readl(l2x0_base + L2X0_CACHE_ID); |
219 | aux = readl(l2x0_base + L2X0_AUX_CTRL); | 219 | aux = readl(l2x0_base + L2X0_AUX_CTRL); |
220 | 220 | ||
221 | aux &= aux_mask; | ||
222 | aux |= aux_val; | ||
223 | |||
221 | /* Determine the number of ways */ | 224 | /* Determine the number of ways */ |
222 | switch (cache_id & L2X0_CACHE_ID_PART_MASK) { | 225 | switch (cache_id & L2X0_CACHE_ID_PART_MASK) { |
223 | case L2X0_CACHE_ID_PART_L310: | 226 | case L2X0_CACHE_ID_PART_L310: |
@@ -248,8 +251,6 @@ void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask) | |||
248 | if (!(readl(l2x0_base + L2X0_CTRL) & 1)) { | 251 | if (!(readl(l2x0_base + L2X0_CTRL) & 1)) { |
249 | 252 | ||
250 | /* l2x0 controller is disabled */ | 253 | /* l2x0 controller is disabled */ |
251 | aux &= aux_mask; | ||
252 | aux |= aux_val; | ||
253 | writel(aux, l2x0_base + L2X0_AUX_CTRL); | 254 | writel(aux, l2x0_base + L2X0_AUX_CTRL); |
254 | 255 | ||
255 | l2x0_inv_all(); | 256 | l2x0_inv_all(); |
diff --git a/arch/arm/mm/cache-v6.S b/arch/arm/mm/cache-v6.S index e46ecd847138..86aa689ef1aa 100644 --- a/arch/arm/mm/cache-v6.S +++ b/arch/arm/mm/cache-v6.S | |||
@@ -211,8 +211,9 @@ v6_dma_inv_range: | |||
211 | mcrne p15, 0, r1, c7, c15, 1 @ clean & invalidate unified line | 211 | mcrne p15, 0, r1, c7, c15, 1 @ clean & invalidate unified line |
212 | #endif | 212 | #endif |
213 | 1: | 213 | 1: |
214 | #ifdef CONFIG_SMP | 214 | #ifdef CONFIG_DMA_CACHE_RWFO |
215 | str r0, [r0] @ write for ownership | 215 | ldr r2, [r0] @ read for ownership |
216 | str r2, [r0] @ write for ownership | ||
216 | #endif | 217 | #endif |
217 | #ifdef HARVARD_CACHE | 218 | #ifdef HARVARD_CACHE |
218 | mcr p15, 0, r0, c7, c6, 1 @ invalidate D line | 219 | mcr p15, 0, r0, c7, c6, 1 @ invalidate D line |
@@ -234,7 +235,7 @@ v6_dma_inv_range: | |||
234 | v6_dma_clean_range: | 235 | v6_dma_clean_range: |
235 | bic r0, r0, #D_CACHE_LINE_SIZE - 1 | 236 | bic r0, r0, #D_CACHE_LINE_SIZE - 1 |
236 | 1: | 237 | 1: |
237 | #ifdef CONFIG_SMP | 238 | #ifdef CONFIG_DMA_CACHE_RWFO |
238 | ldr r2, [r0] @ read for ownership | 239 | ldr r2, [r0] @ read for ownership |
239 | #endif | 240 | #endif |
240 | #ifdef HARVARD_CACHE | 241 | #ifdef HARVARD_CACHE |
@@ -257,7 +258,7 @@ v6_dma_clean_range: | |||
257 | ENTRY(v6_dma_flush_range) | 258 | ENTRY(v6_dma_flush_range) |
258 | bic r0, r0, #D_CACHE_LINE_SIZE - 1 | 259 | bic r0, r0, #D_CACHE_LINE_SIZE - 1 |
259 | 1: | 260 | 1: |
260 | #ifdef CONFIG_SMP | 261 | #ifdef CONFIG_DMA_CACHE_RWFO |
261 | ldr r2, [r0] @ read for ownership | 262 | ldr r2, [r0] @ read for ownership |
262 | str r2, [r0] @ write for ownership | 263 | str r2, [r0] @ write for ownership |
263 | #endif | 264 | #endif |
@@ -283,9 +284,13 @@ ENTRY(v6_dma_map_area) | |||
283 | add r1, r1, r0 | 284 | add r1, r1, r0 |
284 | teq r2, #DMA_FROM_DEVICE | 285 | teq r2, #DMA_FROM_DEVICE |
285 | beq v6_dma_inv_range | 286 | beq v6_dma_inv_range |
287 | #ifndef CONFIG_DMA_CACHE_RWFO | ||
288 | b v6_dma_clean_range | ||
289 | #else | ||
286 | teq r2, #DMA_TO_DEVICE | 290 | teq r2, #DMA_TO_DEVICE |
287 | beq v6_dma_clean_range | 291 | beq v6_dma_clean_range |
288 | b v6_dma_flush_range | 292 | b v6_dma_flush_range |
293 | #endif | ||
289 | ENDPROC(v6_dma_map_area) | 294 | ENDPROC(v6_dma_map_area) |
290 | 295 | ||
291 | /* | 296 | /* |
@@ -295,6 +300,11 @@ ENDPROC(v6_dma_map_area) | |||
295 | * - dir - DMA direction | 300 | * - dir - DMA direction |
296 | */ | 301 | */ |
297 | ENTRY(v6_dma_unmap_area) | 302 | ENTRY(v6_dma_unmap_area) |
303 | #ifndef CONFIG_DMA_CACHE_RWFO | ||
304 | add r1, r1, r0 | ||
305 | teq r2, #DMA_TO_DEVICE | ||
306 | bne v6_dma_inv_range | ||
307 | #endif | ||
298 | mov pc, lr | 308 | mov pc, lr |
299 | ENDPROC(v6_dma_unmap_area) | 309 | ENDPROC(v6_dma_unmap_area) |
300 | 310 | ||
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 13fa536d82e6..9e7742f0a102 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c | |||
@@ -24,15 +24,6 @@ | |||
24 | #include <asm/tlbflush.h> | 24 | #include <asm/tlbflush.h> |
25 | #include <asm/sizes.h> | 25 | #include <asm/sizes.h> |
26 | 26 | ||
27 | /* Sanity check size */ | ||
28 | #if (CONSISTENT_DMA_SIZE % SZ_2M) | ||
29 | #error "CONSISTENT_DMA_SIZE must be multiple of 2MiB" | ||
30 | #endif | ||
31 | |||
32 | #define CONSISTENT_OFFSET(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PAGE_SHIFT) | ||
33 | #define CONSISTENT_PTE_INDEX(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PGDIR_SHIFT) | ||
34 | #define NUM_CONSISTENT_PTES (CONSISTENT_DMA_SIZE >> PGDIR_SHIFT) | ||
35 | |||
36 | static u64 get_coherent_dma_mask(struct device *dev) | 27 | static u64 get_coherent_dma_mask(struct device *dev) |
37 | { | 28 | { |
38 | u64 mask = ISA_DMA_THRESHOLD; | 29 | u64 mask = ISA_DMA_THRESHOLD; |
@@ -123,6 +114,15 @@ static void __dma_free_buffer(struct page *page, size_t size) | |||
123 | } | 114 | } |
124 | 115 | ||
125 | #ifdef CONFIG_MMU | 116 | #ifdef CONFIG_MMU |
117 | /* Sanity check size */ | ||
118 | #if (CONSISTENT_DMA_SIZE % SZ_2M) | ||
119 | #error "CONSISTENT_DMA_SIZE must be multiple of 2MiB" | ||
120 | #endif | ||
121 | |||
122 | #define CONSISTENT_OFFSET(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PAGE_SHIFT) | ||
123 | #define CONSISTENT_PTE_INDEX(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PGDIR_SHIFT) | ||
124 | #define NUM_CONSISTENT_PTES (CONSISTENT_DMA_SIZE >> PGDIR_SHIFT) | ||
125 | |||
126 | /* | 126 | /* |
127 | * These are the page tables (2MB each) covering uncached, DMA consistent allocations | 127 | * These are the page tables (2MB each) covering uncached, DMA consistent allocations |
128 | */ | 128 | */ |