diff options
Diffstat (limited to 'arch/arm/mm/dma-mapping.c')
-rw-r--r-- | arch/arm/mm/dma-mapping.c | 162 |
1 files changed, 85 insertions, 77 deletions
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 26325cb5d368..0da7eccf7749 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c | |||
@@ -29,9 +29,6 @@ | |||
29 | #error "CONSISTENT_DMA_SIZE must be multiple of 2MiB" | 29 | #error "CONSISTENT_DMA_SIZE must be multiple of 2MiB" |
30 | #endif | 30 | #endif |
31 | 31 | ||
32 | #define CONSISTENT_END (0xffe00000) | ||
33 | #define CONSISTENT_BASE (CONSISTENT_END - CONSISTENT_DMA_SIZE) | ||
34 | |||
35 | #define CONSISTENT_OFFSET(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PAGE_SHIFT) | 32 | #define CONSISTENT_OFFSET(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PAGE_SHIFT) |
36 | #define CONSISTENT_PTE_INDEX(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PGDIR_SHIFT) | 33 | #define CONSISTENT_PTE_INDEX(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PGDIR_SHIFT) |
37 | #define NUM_CONSISTENT_PTES (CONSISTENT_DMA_SIZE >> PGDIR_SHIFT) | 34 | #define NUM_CONSISTENT_PTES (CONSISTENT_DMA_SIZE >> PGDIR_SHIFT) |
@@ -404,78 +401,44 @@ EXPORT_SYMBOL(dma_free_coherent); | |||
404 | * platforms with CONFIG_DMABOUNCE. | 401 | * platforms with CONFIG_DMABOUNCE. |
405 | * Use the driver DMA support - see dma-mapping.h (dma_sync_*) | 402 | * Use the driver DMA support - see dma-mapping.h (dma_sync_*) |
406 | */ | 403 | */ |
407 | void dma_cache_maint(const void *start, size_t size, int direction) | 404 | void ___dma_single_cpu_to_dev(const void *kaddr, size_t size, |
405 | enum dma_data_direction dir) | ||
408 | { | 406 | { |
409 | void (*inner_op)(const void *, const void *); | 407 | unsigned long paddr; |
410 | void (*outer_op)(unsigned long, unsigned long); | 408 | |
411 | 409 | BUG_ON(!virt_addr_valid(kaddr) || !virt_addr_valid(kaddr + size - 1)); | |
412 | BUG_ON(!virt_addr_valid(start) || !virt_addr_valid(start + size - 1)); | ||
413 | |||
414 | switch (direction) { | ||
415 | case DMA_FROM_DEVICE: /* invalidate only */ | ||
416 | inner_op = dmac_inv_range; | ||
417 | outer_op = outer_inv_range; | ||
418 | break; | ||
419 | case DMA_TO_DEVICE: /* writeback only */ | ||
420 | inner_op = dmac_clean_range; | ||
421 | outer_op = outer_clean_range; | ||
422 | break; | ||
423 | case DMA_BIDIRECTIONAL: /* writeback and invalidate */ | ||
424 | inner_op = dmac_flush_range; | ||
425 | outer_op = outer_flush_range; | ||
426 | break; | ||
427 | default: | ||
428 | BUG(); | ||
429 | } | ||
430 | 410 | ||
431 | inner_op(start, start + size); | 411 | dmac_map_area(kaddr, size, dir); |
432 | outer_op(__pa(start), __pa(start) + size); | 412 | |
413 | paddr = __pa(kaddr); | ||
414 | if (dir == DMA_FROM_DEVICE) { | ||
415 | outer_inv_range(paddr, paddr + size); | ||
416 | } else { | ||
417 | outer_clean_range(paddr, paddr + size); | ||
418 | } | ||
419 | /* FIXME: non-speculating: flush on bidirectional mappings? */ | ||
433 | } | 420 | } |
434 | EXPORT_SYMBOL(dma_cache_maint); | 421 | EXPORT_SYMBOL(___dma_single_cpu_to_dev); |
435 | 422 | ||
436 | static void dma_cache_maint_contiguous(struct page *page, unsigned long offset, | 423 | void ___dma_single_dev_to_cpu(const void *kaddr, size_t size, |
437 | size_t size, int direction) | 424 | enum dma_data_direction dir) |
438 | { | 425 | { |
439 | void *vaddr; | 426 | BUG_ON(!virt_addr_valid(kaddr) || !virt_addr_valid(kaddr + size - 1)); |
440 | unsigned long paddr; | ||
441 | void (*inner_op)(const void *, const void *); | ||
442 | void (*outer_op)(unsigned long, unsigned long); | ||
443 | |||
444 | switch (direction) { | ||
445 | case DMA_FROM_DEVICE: /* invalidate only */ | ||
446 | inner_op = dmac_inv_range; | ||
447 | outer_op = outer_inv_range; | ||
448 | break; | ||
449 | case DMA_TO_DEVICE: /* writeback only */ | ||
450 | inner_op = dmac_clean_range; | ||
451 | outer_op = outer_clean_range; | ||
452 | break; | ||
453 | case DMA_BIDIRECTIONAL: /* writeback and invalidate */ | ||
454 | inner_op = dmac_flush_range; | ||
455 | outer_op = outer_flush_range; | ||
456 | break; | ||
457 | default: | ||
458 | BUG(); | ||
459 | } | ||
460 | 427 | ||
461 | if (!PageHighMem(page)) { | 428 | /* FIXME: non-speculating: not required */ |
462 | vaddr = page_address(page) + offset; | 429 | /* don't bother invalidating if DMA to device */ |
463 | inner_op(vaddr, vaddr + size); | 430 | if (dir != DMA_TO_DEVICE) { |
464 | } else { | 431 | unsigned long paddr = __pa(kaddr); |
465 | vaddr = kmap_high_get(page); | 432 | outer_inv_range(paddr, paddr + size); |
466 | if (vaddr) { | ||
467 | vaddr += offset; | ||
468 | inner_op(vaddr, vaddr + size); | ||
469 | kunmap_high(page); | ||
470 | } | ||
471 | } | 433 | } |
472 | 434 | ||
473 | paddr = page_to_phys(page) + offset; | 435 | dmac_unmap_area(kaddr, size, dir); |
474 | outer_op(paddr, paddr + size); | ||
475 | } | 436 | } |
437 | EXPORT_SYMBOL(___dma_single_dev_to_cpu); | ||
476 | 438 | ||
477 | void dma_cache_maint_page(struct page *page, unsigned long offset, | 439 | static void dma_cache_maint_page(struct page *page, unsigned long offset, |
478 | size_t size, int dir) | 440 | size_t size, enum dma_data_direction dir, |
441 | void (*op)(const void *, size_t, int)) | ||
479 | { | 442 | { |
480 | /* | 443 | /* |
481 | * A single sg entry may refer to multiple physically contiguous | 444 | * A single sg entry may refer to multiple physically contiguous |
@@ -486,20 +449,62 @@ void dma_cache_maint_page(struct page *page, unsigned long offset, | |||
486 | size_t left = size; | 449 | size_t left = size; |
487 | do { | 450 | do { |
488 | size_t len = left; | 451 | size_t len = left; |
489 | if (PageHighMem(page) && len + offset > PAGE_SIZE) { | 452 | void *vaddr; |
490 | if (offset >= PAGE_SIZE) { | 453 | |
491 | page += offset / PAGE_SIZE; | 454 | if (PageHighMem(page)) { |
492 | offset %= PAGE_SIZE; | 455 | if (len + offset > PAGE_SIZE) { |
456 | if (offset >= PAGE_SIZE) { | ||
457 | page += offset / PAGE_SIZE; | ||
458 | offset %= PAGE_SIZE; | ||
459 | } | ||
460 | len = PAGE_SIZE - offset; | ||
493 | } | 461 | } |
494 | len = PAGE_SIZE - offset; | 462 | vaddr = kmap_high_get(page); |
463 | if (vaddr) { | ||
464 | vaddr += offset; | ||
465 | op(vaddr, len, dir); | ||
466 | kunmap_high(page); | ||
467 | } | ||
468 | } else { | ||
469 | vaddr = page_address(page) + offset; | ||
470 | op(vaddr, len, dir); | ||
495 | } | 471 | } |
496 | dma_cache_maint_contiguous(page, offset, len, dir); | ||
497 | offset = 0; | 472 | offset = 0; |
498 | page++; | 473 | page++; |
499 | left -= len; | 474 | left -= len; |
500 | } while (left); | 475 | } while (left); |
501 | } | 476 | } |
502 | EXPORT_SYMBOL(dma_cache_maint_page); | 477 | |
478 | void ___dma_page_cpu_to_dev(struct page *page, unsigned long off, | ||
479 | size_t size, enum dma_data_direction dir) | ||
480 | { | ||
481 | unsigned long paddr; | ||
482 | |||
483 | dma_cache_maint_page(page, off, size, dir, dmac_map_area); | ||
484 | |||
485 | paddr = page_to_phys(page) + off; | ||
486 | if (dir == DMA_FROM_DEVICE) { | ||
487 | outer_inv_range(paddr, paddr + size); | ||
488 | } else { | ||
489 | outer_clean_range(paddr, paddr + size); | ||
490 | } | ||
491 | /* FIXME: non-speculating: flush on bidirectional mappings? */ | ||
492 | } | ||
493 | EXPORT_SYMBOL(___dma_page_cpu_to_dev); | ||
494 | |||
495 | void ___dma_page_dev_to_cpu(struct page *page, unsigned long off, | ||
496 | size_t size, enum dma_data_direction dir) | ||
497 | { | ||
498 | unsigned long paddr = page_to_phys(page) + off; | ||
499 | |||
500 | /* FIXME: non-speculating: not required */ | ||
501 | /* don't bother invalidating if DMA to device */ | ||
502 | if (dir != DMA_TO_DEVICE) | ||
503 | outer_inv_range(paddr, paddr + size); | ||
504 | |||
505 | dma_cache_maint_page(page, off, size, dir, dmac_unmap_area); | ||
506 | } | ||
507 | EXPORT_SYMBOL(___dma_page_dev_to_cpu); | ||
503 | 508 | ||
504 | /** | 509 | /** |
505 | * dma_map_sg - map a set of SG buffers for streaming mode DMA | 510 | * dma_map_sg - map a set of SG buffers for streaming mode DMA |
@@ -573,8 +578,12 @@ void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, | |||
573 | int i; | 578 | int i; |
574 | 579 | ||
575 | for_each_sg(sg, s, nents, i) { | 580 | for_each_sg(sg, s, nents, i) { |
576 | dmabounce_sync_for_cpu(dev, sg_dma_address(s), 0, | 581 | if (!dmabounce_sync_for_cpu(dev, sg_dma_address(s), 0, |
577 | sg_dma_len(s), dir); | 582 | sg_dma_len(s), dir)) |
583 | continue; | ||
584 | |||
585 | __dma_page_dev_to_cpu(sg_page(s), s->offset, | ||
586 | s->length, dir); | ||
578 | } | 587 | } |
579 | } | 588 | } |
580 | EXPORT_SYMBOL(dma_sync_sg_for_cpu); | 589 | EXPORT_SYMBOL(dma_sync_sg_for_cpu); |
@@ -597,9 +606,8 @@ void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, | |||
597 | sg_dma_len(s), dir)) | 606 | sg_dma_len(s), dir)) |
598 | continue; | 607 | continue; |
599 | 608 | ||
600 | if (!arch_is_coherent()) | 609 | __dma_page_cpu_to_dev(sg_page(s), s->offset, |
601 | dma_cache_maint_page(sg_page(s), s->offset, | 610 | s->length, dir); |
602 | s->length, dir); | ||
603 | } | 611 | } |
604 | } | 612 | } |
605 | EXPORT_SYMBOL(dma_sync_sg_for_device); | 613 | EXPORT_SYMBOL(dma_sync_sg_for_device); |