aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/mm/cache-v6.S10
-rw-r--r--arch/arm/mm/cache-v7.S10
-rw-r--r--arch/arm/mm/dma-mapping.c68
3 files changed, 42 insertions, 46 deletions
diff --git a/arch/arm/mm/cache-v6.S b/arch/arm/mm/cache-v6.S
index a11934e53fbd..9d89c67a1cc3 100644
--- a/arch/arm/mm/cache-v6.S
+++ b/arch/arm/mm/cache-v6.S
@@ -271,10 +271,9 @@ ENTRY(v6_dma_flush_range)
271 */ 271 */
272ENTRY(v6_dma_map_area) 272ENTRY(v6_dma_map_area)
273 add r1, r1, r0 273 add r1, r1, r0
274 cmp r2, #DMA_TO_DEVICE 274 teq r2, #DMA_FROM_DEVICE
275 beq v6_dma_clean_range 275 beq v6_dma_inv_range
276 bcs v6_dma_inv_range 276 b v6_dma_clean_range
277 b v6_dma_flush_range
278ENDPROC(v6_dma_map_area) 277ENDPROC(v6_dma_map_area)
279 278
280/* 279/*
@@ -284,6 +283,9 @@ ENDPROC(v6_dma_map_area)
284 * - dir - DMA direction 283 * - dir - DMA direction
285 */ 284 */
286ENTRY(v6_dma_unmap_area) 285ENTRY(v6_dma_unmap_area)
286 add r1, r1, r0
287 teq r2, #DMA_TO_DEVICE
288 bne v6_dma_inv_range
287 mov pc, lr 289 mov pc, lr
288ENDPROC(v6_dma_unmap_area) 290ENDPROC(v6_dma_unmap_area)
289 291
diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S
index b1cd0fd91207..bcd64f265870 100644
--- a/arch/arm/mm/cache-v7.S
+++ b/arch/arm/mm/cache-v7.S
@@ -279,10 +279,9 @@ ENDPROC(v7_dma_flush_range)
279 */ 279 */
280ENTRY(v7_dma_map_area) 280ENTRY(v7_dma_map_area)
281 add r1, r1, r0 281 add r1, r1, r0
282 cmp r2, #DMA_TO_DEVICE 282 teq r2, #DMA_FROM_DEVICE
283 beq v7_dma_clean_range 283 beq v7_dma_inv_range
284 bcs v7_dma_inv_range 284 b v7_dma_clean_range
285 b v7_dma_flush_range
286ENDPROC(v7_dma_map_area) 285ENDPROC(v7_dma_map_area)
287 286
288/* 287/*
@@ -292,6 +291,9 @@ ENDPROC(v7_dma_map_area)
292 * - dir - DMA direction 291 * - dir - DMA direction
293 */ 292 */
294ENTRY(v7_dma_unmap_area) 293ENTRY(v7_dma_unmap_area)
294 add r1, r1, r0
295 teq r2, #DMA_TO_DEVICE
296 bne v7_dma_inv_range
295 mov pc, lr 297 mov pc, lr
296ENDPROC(v7_dma_unmap_area) 298ENDPROC(v7_dma_unmap_area)
297 299
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index efa8efa33f5e..64daef2173bd 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -404,34 +404,22 @@ EXPORT_SYMBOL(dma_free_coherent);
404 * platforms with CONFIG_DMABOUNCE. 404 * platforms with CONFIG_DMABOUNCE.
405 * Use the driver DMA support - see dma-mapping.h (dma_sync_*) 405 * Use the driver DMA support - see dma-mapping.h (dma_sync_*)
406 */ 406 */
407static void dma_cache_maint(const void *start, size_t size, int direction)
408{
409 void (*outer_op)(unsigned long, unsigned long);
410
411 switch (direction) {
412 case DMA_FROM_DEVICE: /* invalidate only */
413 outer_op = outer_inv_range;
414 break;
415 case DMA_TO_DEVICE: /* writeback only */
416 outer_op = outer_clean_range;
417 break;
418 case DMA_BIDIRECTIONAL: /* writeback and invalidate */
419 outer_op = outer_flush_range;
420 break;
421 default:
422 BUG();
423 }
424
425 outer_op(__pa(start), __pa(start) + size);
426}
427
428void ___dma_single_cpu_to_dev(const void *kaddr, size_t size, 407void ___dma_single_cpu_to_dev(const void *kaddr, size_t size,
429 enum dma_data_direction dir) 408 enum dma_data_direction dir)
430{ 409{
410 unsigned long paddr;
411
431 BUG_ON(!virt_addr_valid(kaddr) || !virt_addr_valid(kaddr + size - 1)); 412 BUG_ON(!virt_addr_valid(kaddr) || !virt_addr_valid(kaddr + size - 1));
432 413
433 dmac_map_area(kaddr, size, dir); 414 dmac_map_area(kaddr, size, dir);
434 dma_cache_maint(kaddr, size, dir); 415
416 paddr = __pa(kaddr);
417 if (dir == DMA_FROM_DEVICE) {
418 outer_inv_range(paddr, paddr + size);
419 } else {
420 outer_clean_range(paddr, paddr + size);
421 }
422 /* FIXME: non-speculating: flush on bidirectional mappings? */
435} 423}
436EXPORT_SYMBOL(___dma_single_cpu_to_dev); 424EXPORT_SYMBOL(___dma_single_cpu_to_dev);
437 425
@@ -440,6 +428,13 @@ void ___dma_single_dev_to_cpu(const void *kaddr, size_t size,
440{ 428{
441 BUG_ON(!virt_addr_valid(kaddr) || !virt_addr_valid(kaddr + size - 1)); 429 BUG_ON(!virt_addr_valid(kaddr) || !virt_addr_valid(kaddr + size - 1));
442 430
431 /* FIXME: non-speculating: not required */
432 /* don't bother invalidating if DMA to device */
433 if (dir != DMA_TO_DEVICE) {
434 unsigned long paddr = __pa(kaddr);
435 outer_inv_range(paddr, paddr + size);
436 }
437
443 dmac_unmap_area(kaddr, size, dir); 438 dmac_unmap_area(kaddr, size, dir);
444} 439}
445EXPORT_SYMBOL(___dma_single_dev_to_cpu); 440EXPORT_SYMBOL(___dma_single_dev_to_cpu);
@@ -487,32 +482,29 @@ void ___dma_page_cpu_to_dev(struct page *page, unsigned long off,
487 size_t size, enum dma_data_direction dir) 482 size_t size, enum dma_data_direction dir)
488{ 483{
489 unsigned long paddr; 484 unsigned long paddr;
490 void (*outer_op)(unsigned long, unsigned long);
491
492 switch (direction) {
493 case DMA_FROM_DEVICE: /* invalidate only */
494 outer_op = outer_inv_range;
495 break;
496 case DMA_TO_DEVICE: /* writeback only */
497 outer_op = outer_clean_range;
498 break;
499 case DMA_BIDIRECTIONAL: /* writeback and invalidate */
500 outer_op = outer_flush_range;
501 break;
502 default:
503 BUG();
504 }
505 485
506 dma_cache_maint_page(page, off, size, dir, dmac_map_area); 486 dma_cache_maint_page(page, off, size, dir, dmac_map_area);
507 487
508 paddr = page_to_phys(page) + off; 488 paddr = page_to_phys(page) + off;
509 outer_op(paddr, paddr + size); 489 if (dir == DMA_FROM_DEVICE) {
490 outer_inv_range(paddr, paddr + size);
491 } else {
492 outer_clean_range(paddr, paddr + size);
493 }
494 /* FIXME: non-speculating: flush on bidirectional mappings? */
510} 495}
511EXPORT_SYMBOL(___dma_page_cpu_to_dev); 496EXPORT_SYMBOL(___dma_page_cpu_to_dev);
512 497
513void ___dma_page_dev_to_cpu(struct page *page, unsigned long off, 498void ___dma_page_dev_to_cpu(struct page *page, unsigned long off,
514 size_t size, enum dma_data_direction dir) 499 size_t size, enum dma_data_direction dir)
515{ 500{
501 unsigned long paddr = page_to_phys(page) + off;
502
503 /* FIXME: non-speculating: not required */
504 /* don't bother invalidating if DMA to device */
505 if (dir != DMA_TO_DEVICE)
506 outer_inv_range(paddr, paddr + size);
507
516 dma_cache_maint_page(page, off, size, dir, dmac_unmap_area); 508 dma_cache_maint_page(page, off, size, dir, dmac_unmap_area);
517} 509}
518EXPORT_SYMBOL(___dma_page_dev_to_cpu); 510EXPORT_SYMBOL(___dma_page_dev_to_cpu);