diff options
| -rw-r--r-- | arch/arm/mm/dma-mapping.c | 59 |
1 files changed, 30 insertions, 29 deletions
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index bbf87880b915..77dc483e64c1 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c | |||
| @@ -447,48 +447,25 @@ void ___dma_single_dev_to_cpu(const void *kaddr, size_t size, | |||
| 447 | EXPORT_SYMBOL(___dma_single_dev_to_cpu); | 447 | EXPORT_SYMBOL(___dma_single_dev_to_cpu); |
| 448 | 448 | ||
| 449 | static void dma_cache_maint_contiguous(struct page *page, unsigned long offset, | 449 | static void dma_cache_maint_contiguous(struct page *page, unsigned long offset, |
| 450 | size_t size, int direction) | 450 | size_t size, void (*op)(const void *, const void *)) |
| 451 | { | 451 | { |
| 452 | void *vaddr; | 452 | void *vaddr; |
| 453 | unsigned long paddr; | ||
| 454 | void (*inner_op)(const void *, const void *); | ||
| 455 | void (*outer_op)(unsigned long, unsigned long); | ||
| 456 | |||
| 457 | switch (direction) { | ||
| 458 | case DMA_FROM_DEVICE: /* invalidate only */ | ||
| 459 | inner_op = dmac_inv_range; | ||
| 460 | outer_op = outer_inv_range; | ||
| 461 | break; | ||
| 462 | case DMA_TO_DEVICE: /* writeback only */ | ||
| 463 | inner_op = dmac_clean_range; | ||
| 464 | outer_op = outer_clean_range; | ||
| 465 | break; | ||
| 466 | case DMA_BIDIRECTIONAL: /* writeback and invalidate */ | ||
| 467 | inner_op = dmac_flush_range; | ||
| 468 | outer_op = outer_flush_range; | ||
| 469 | break; | ||
| 470 | default: | ||
| 471 | BUG(); | ||
| 472 | } | ||
| 473 | 453 | ||
| 474 | if (!PageHighMem(page)) { | 454 | if (!PageHighMem(page)) { |
| 475 | vaddr = page_address(page) + offset; | 455 | vaddr = page_address(page) + offset; |
| 476 | inner_op(vaddr, vaddr + size); | 456 | op(vaddr, vaddr + size); |
| 477 | } else { | 457 | } else { |
| 478 | vaddr = kmap_high_get(page); | 458 | vaddr = kmap_high_get(page); |
| 479 | if (vaddr) { | 459 | if (vaddr) { |
| 480 | vaddr += offset; | 460 | vaddr += offset; |
| 481 | inner_op(vaddr, vaddr + size); | 461 | op(vaddr, vaddr + size); |
| 482 | kunmap_high(page); | 462 | kunmap_high(page); |
| 483 | } | 463 | } |
| 484 | } | 464 | } |
| 485 | |||
| 486 | paddr = page_to_phys(page) + offset; | ||
| 487 | outer_op(paddr, paddr + size); | ||
| 488 | } | 465 | } |
| 489 | 466 | ||
| 490 | static void dma_cache_maint_page(struct page *page, unsigned long offset, | 467 | static void dma_cache_maint_page(struct page *page, unsigned long offset, |
| 491 | size_t size, int dir) | 468 | size_t size, void (*op)(const void *, const void *)) |
| 492 | { | 469 | { |
| 493 | /* | 470 | /* |
| 494 | * A single sg entry may refer to multiple physically contiguous | 471 | * A single sg entry may refer to multiple physically contiguous |
| @@ -506,7 +483,7 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset, | |||
| 506 | } | 483 | } |
| 507 | len = PAGE_SIZE - offset; | 484 | len = PAGE_SIZE - offset; |
| 508 | } | 485 | } |
| 509 | dma_cache_maint_contiguous(page, offset, len, dir); | 486 | dma_cache_maint_contiguous(page, offset, len, op); |
| 510 | offset = 0; | 487 | offset = 0; |
| 511 | page++; | 488 | page++; |
| 512 | left -= len; | 489 | left -= len; |
| @@ -516,7 +493,31 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset, | |||
| 516 | void ___dma_page_cpu_to_dev(struct page *page, unsigned long off, | 493 | void ___dma_page_cpu_to_dev(struct page *page, unsigned long off, |
| 517 | size_t size, enum dma_data_direction dir) | 494 | size_t size, enum dma_data_direction dir) |
| 518 | { | 495 | { |
| 519 | dma_cache_maint_page(page, off, size, dir); | 496 | unsigned long paddr; |
| 497 | void (*inner_op)(const void *, const void *); | ||
| 498 | void (*outer_op)(unsigned long, unsigned long); | ||
| 499 | |||
| 500 | switch (direction) { | ||
| 501 | case DMA_FROM_DEVICE: /* invalidate only */ | ||
| 502 | inner_op = dmac_inv_range; | ||
| 503 | outer_op = outer_inv_range; | ||
| 504 | break; | ||
| 505 | case DMA_TO_DEVICE: /* writeback only */ | ||
| 506 | inner_op = dmac_clean_range; | ||
| 507 | outer_op = outer_clean_range; | ||
| 508 | break; | ||
| 509 | case DMA_BIDIRECTIONAL: /* writeback and invalidate */ | ||
| 510 | inner_op = dmac_flush_range; | ||
| 511 | outer_op = outer_flush_range; | ||
| 512 | break; | ||
| 513 | default: | ||
| 514 | BUG(); | ||
| 515 | } | ||
| 516 | |||
| 517 | dma_cache_maint_page(page, off, size, inner_op); | ||
| 518 | |||
| 519 | paddr = page_to_phys(page) + off; | ||
| 520 | outer_op(paddr, paddr + size); | ||
| 520 | } | 521 | } |
| 521 | EXPORT_SYMBOL(___dma_page_cpu_to_dev); | 522 | EXPORT_SYMBOL(___dma_page_cpu_to_dev); |
| 522 | 523 | ||
