diff options
author | Marek Szyprowski <m.szyprowski@samsung.com> | 2012-02-10 13:55:20 -0500 |
---|---|---|
committer | Marek Szyprowski <m.szyprowski@samsung.com> | 2012-05-21 09:06:14 -0400 |
commit | 2dc6a016bbedf18f18ad73997e5338307d6dbde9 (patch) | |
tree | 741bf3c884304108b2e1d0a471aa6042c0d142e7 /arch/arm/mm | |
parent | a227fb92a0f5f0dd8282719386e9b3a29f0d16b2 (diff) |
ARM: dma-mapping: use asm-generic/dma-mapping-common.h
This patch modifies dma-mapping implementation on ARM architecture to
use common dma_map_ops structure and asm-generic/dma-mapping-common.h
helpers.
Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
Acked-by: Kyungmin Park <kyungmin.park@samsung.com>
Tested-By: Subash Patel <subash.ramaswamy@linaro.org>
Diffstat (limited to 'arch/arm/mm')
-rw-r--r-- | arch/arm/mm/dma-mapping.c | 148 |
1 files changed, 86 insertions, 62 deletions
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index a16993a5b7e..70be6e10666 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c | |||
@@ -29,6 +29,85 @@ | |||
29 | 29 | ||
30 | #include "mm.h" | 30 | #include "mm.h" |
31 | 31 | ||
32 | /** | ||
33 | * arm_dma_map_page - map a portion of a page for streaming DMA | ||
34 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | ||
35 | * @page: page that buffer resides in | ||
36 | * @offset: offset into page for start of buffer | ||
37 | * @size: size of buffer to map | ||
38 | * @dir: DMA transfer direction | ||
39 | * | ||
40 | * Ensure that any data held in the cache is appropriately discarded | ||
41 | * or written back. | ||
42 | * | ||
43 | * The device owns this memory once this call has completed. The CPU | ||
44 | * can regain ownership by calling dma_unmap_page(). | ||
45 | */ | ||
46 | static inline dma_addr_t arm_dma_map_page(struct device *dev, struct page *page, | ||
47 | unsigned long offset, size_t size, enum dma_data_direction dir, | ||
48 | struct dma_attrs *attrs) | ||
49 | { | ||
50 | return __dma_map_page(dev, page, offset, size, dir); | ||
51 | } | ||
52 | |||
53 | /** | ||
54 | * arm_dma_unmap_page - unmap a buffer previously mapped through dma_map_page() | ||
55 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | ||
56 | * @handle: DMA address of buffer | ||
57 | * @size: size of buffer (same as passed to dma_map_page) | ||
58 | * @dir: DMA transfer direction (same as passed to dma_map_page) | ||
59 | * | ||
60 | * Unmap a page streaming mode DMA translation. The handle and size | ||
61 | * must match what was provided in the previous dma_map_page() call. | ||
62 | * All other usages are undefined. | ||
63 | * | ||
64 | * After this call, reads by the CPU to the buffer are guaranteed to see | ||
65 | * whatever the device wrote there. | ||
66 | */ | ||
67 | static inline void arm_dma_unmap_page(struct device *dev, dma_addr_t handle, | ||
68 | size_t size, enum dma_data_direction dir, | ||
69 | struct dma_attrs *attrs) | ||
70 | { | ||
71 | __dma_unmap_page(dev, handle, size, dir); | ||
72 | } | ||
73 | |||
74 | static inline void arm_dma_sync_single_for_cpu(struct device *dev, | ||
75 | dma_addr_t handle, size_t size, enum dma_data_direction dir) | ||
76 | { | ||
77 | unsigned int offset = handle & (PAGE_SIZE - 1); | ||
78 | struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset)); | ||
79 | if (!dmabounce_sync_for_cpu(dev, handle, size, dir)) | ||
80 | return; | ||
81 | |||
82 | __dma_page_dev_to_cpu(page, offset, size, dir); | ||
83 | } | ||
84 | |||
85 | static inline void arm_dma_sync_single_for_device(struct device *dev, | ||
86 | dma_addr_t handle, size_t size, enum dma_data_direction dir) | ||
87 | { | ||
88 | unsigned int offset = handle & (PAGE_SIZE - 1); | ||
89 | struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset)); | ||
90 | if (!dmabounce_sync_for_device(dev, handle, size, dir)) | ||
91 | return; | ||
92 | |||
93 | __dma_page_cpu_to_dev(page, offset, size, dir); | ||
94 | } | ||
95 | |||
96 | static int arm_dma_set_mask(struct device *dev, u64 dma_mask); | ||
97 | |||
98 | struct dma_map_ops arm_dma_ops = { | ||
99 | .map_page = arm_dma_map_page, | ||
100 | .unmap_page = arm_dma_unmap_page, | ||
101 | .map_sg = arm_dma_map_sg, | ||
102 | .unmap_sg = arm_dma_unmap_sg, | ||
103 | .sync_single_for_cpu = arm_dma_sync_single_for_cpu, | ||
104 | .sync_single_for_device = arm_dma_sync_single_for_device, | ||
105 | .sync_sg_for_cpu = arm_dma_sync_sg_for_cpu, | ||
106 | .sync_sg_for_device = arm_dma_sync_sg_for_device, | ||
107 | .set_dma_mask = arm_dma_set_mask, | ||
108 | }; | ||
109 | EXPORT_SYMBOL(arm_dma_ops); | ||
110 | |||
32 | static u64 get_coherent_dma_mask(struct device *dev) | 111 | static u64 get_coherent_dma_mask(struct device *dev) |
33 | { | 112 | { |
34 | u64 mask = (u64)arm_dma_limit; | 113 | u64 mask = (u64)arm_dma_limit; |
@@ -461,47 +540,6 @@ void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr | |||
461 | } | 540 | } |
462 | EXPORT_SYMBOL(dma_free_coherent); | 541 | EXPORT_SYMBOL(dma_free_coherent); |
463 | 542 | ||
464 | /* | ||
465 | * Make an area consistent for devices. | ||
466 | * Note: Drivers should NOT use this function directly, as it will break | ||
467 | * platforms with CONFIG_DMABOUNCE. | ||
468 | * Use the driver DMA support - see dma-mapping.h (dma_sync_*) | ||
469 | */ | ||
470 | void ___dma_single_cpu_to_dev(const void *kaddr, size_t size, | ||
471 | enum dma_data_direction dir) | ||
472 | { | ||
473 | unsigned long paddr; | ||
474 | |||
475 | BUG_ON(!virt_addr_valid(kaddr) || !virt_addr_valid(kaddr + size - 1)); | ||
476 | |||
477 | dmac_map_area(kaddr, size, dir); | ||
478 | |||
479 | paddr = __pa(kaddr); | ||
480 | if (dir == DMA_FROM_DEVICE) { | ||
481 | outer_inv_range(paddr, paddr + size); | ||
482 | } else { | ||
483 | outer_clean_range(paddr, paddr + size); | ||
484 | } | ||
485 | /* FIXME: non-speculating: flush on bidirectional mappings? */ | ||
486 | } | ||
487 | EXPORT_SYMBOL(___dma_single_cpu_to_dev); | ||
488 | |||
489 | void ___dma_single_dev_to_cpu(const void *kaddr, size_t size, | ||
490 | enum dma_data_direction dir) | ||
491 | { | ||
492 | BUG_ON(!virt_addr_valid(kaddr) || !virt_addr_valid(kaddr + size - 1)); | ||
493 | |||
494 | /* FIXME: non-speculating: not required */ | ||
495 | /* don't bother invalidating if DMA to device */ | ||
496 | if (dir != DMA_TO_DEVICE) { | ||
497 | unsigned long paddr = __pa(kaddr); | ||
498 | outer_inv_range(paddr, paddr + size); | ||
499 | } | ||
500 | |||
501 | dmac_unmap_area(kaddr, size, dir); | ||
502 | } | ||
503 | EXPORT_SYMBOL(___dma_single_dev_to_cpu); | ||
504 | |||
505 | static void dma_cache_maint_page(struct page *page, unsigned long offset, | 543 | static void dma_cache_maint_page(struct page *page, unsigned long offset, |
506 | size_t size, enum dma_data_direction dir, | 544 | size_t size, enum dma_data_direction dir, |
507 | void (*op)(const void *, size_t, int)) | 545 | void (*op)(const void *, size_t, int)) |
@@ -599,21 +637,18 @@ EXPORT_SYMBOL(___dma_page_dev_to_cpu); | |||
599 | * Device ownership issues as mentioned for dma_map_single are the same | 637 | * Device ownership issues as mentioned for dma_map_single are the same |
600 | * here. | 638 | * here. |
601 | */ | 639 | */ |
602 | int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, | 640 | int arm_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, |
603 | enum dma_data_direction dir) | 641 | enum dma_data_direction dir, struct dma_attrs *attrs) |
604 | { | 642 | { |
605 | struct scatterlist *s; | 643 | struct scatterlist *s; |
606 | int i, j; | 644 | int i, j; |
607 | 645 | ||
608 | BUG_ON(!valid_dma_direction(dir)); | ||
609 | |||
610 | for_each_sg(sg, s, nents, i) { | 646 | for_each_sg(sg, s, nents, i) { |
611 | s->dma_address = __dma_map_page(dev, sg_page(s), s->offset, | 647 | s->dma_address = __dma_map_page(dev, sg_page(s), s->offset, |
612 | s->length, dir); | 648 | s->length, dir); |
613 | if (dma_mapping_error(dev, s->dma_address)) | 649 | if (dma_mapping_error(dev, s->dma_address)) |
614 | goto bad_mapping; | 650 | goto bad_mapping; |
615 | } | 651 | } |
616 | debug_dma_map_sg(dev, sg, nents, nents, dir); | ||
617 | return nents; | 652 | return nents; |
618 | 653 | ||
619 | bad_mapping: | 654 | bad_mapping: |
@@ -621,7 +656,6 @@ int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, | |||
621 | __dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir); | 656 | __dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir); |
622 | return 0; | 657 | return 0; |
623 | } | 658 | } |
624 | EXPORT_SYMBOL(dma_map_sg); | ||
625 | 659 | ||
626 | /** | 660 | /** |
627 | * dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg | 661 | * dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg |
@@ -633,18 +667,15 @@ EXPORT_SYMBOL(dma_map_sg); | |||
633 | * Unmap a set of streaming mode DMA translations. Again, CPU access | 667 | * Unmap a set of streaming mode DMA translations. Again, CPU access |
634 | * rules concerning calls here are the same as for dma_unmap_single(). | 668 | * rules concerning calls here are the same as for dma_unmap_single(). |
635 | */ | 669 | */ |
636 | void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, | 670 | void arm_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, |
637 | enum dma_data_direction dir) | 671 | enum dma_data_direction dir, struct dma_attrs *attrs) |
638 | { | 672 | { |
639 | struct scatterlist *s; | 673 | struct scatterlist *s; |
640 | int i; | 674 | int i; |
641 | 675 | ||
642 | debug_dma_unmap_sg(dev, sg, nents, dir); | ||
643 | |||
644 | for_each_sg(sg, s, nents, i) | 676 | for_each_sg(sg, s, nents, i) |
645 | __dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir); | 677 | __dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir); |
646 | } | 678 | } |
647 | EXPORT_SYMBOL(dma_unmap_sg); | ||
648 | 679 | ||
649 | /** | 680 | /** |
650 | * dma_sync_sg_for_cpu | 681 | * dma_sync_sg_for_cpu |
@@ -653,7 +684,7 @@ EXPORT_SYMBOL(dma_unmap_sg); | |||
653 | * @nents: number of buffers to map (returned from dma_map_sg) | 684 | * @nents: number of buffers to map (returned from dma_map_sg) |
654 | * @dir: DMA transfer direction (same as was passed to dma_map_sg) | 685 | * @dir: DMA transfer direction (same as was passed to dma_map_sg) |
655 | */ | 686 | */ |
656 | void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, | 687 | void arm_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, |
657 | int nents, enum dma_data_direction dir) | 688 | int nents, enum dma_data_direction dir) |
658 | { | 689 | { |
659 | struct scatterlist *s; | 690 | struct scatterlist *s; |
@@ -667,10 +698,7 @@ void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, | |||
667 | __dma_page_dev_to_cpu(sg_page(s), s->offset, | 698 | __dma_page_dev_to_cpu(sg_page(s), s->offset, |
668 | s->length, dir); | 699 | s->length, dir); |
669 | } | 700 | } |
670 | |||
671 | debug_dma_sync_sg_for_cpu(dev, sg, nents, dir); | ||
672 | } | 701 | } |
673 | EXPORT_SYMBOL(dma_sync_sg_for_cpu); | ||
674 | 702 | ||
675 | /** | 703 | /** |
676 | * dma_sync_sg_for_device | 704 | * dma_sync_sg_for_device |
@@ -679,7 +707,7 @@ EXPORT_SYMBOL(dma_sync_sg_for_cpu); | |||
679 | * @nents: number of buffers to map (returned from dma_map_sg) | 707 | * @nents: number of buffers to map (returned from dma_map_sg) |
680 | * @dir: DMA transfer direction (same as was passed to dma_map_sg) | 708 | * @dir: DMA transfer direction (same as was passed to dma_map_sg) |
681 | */ | 709 | */ |
682 | void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, | 710 | void arm_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, |
683 | int nents, enum dma_data_direction dir) | 711 | int nents, enum dma_data_direction dir) |
684 | { | 712 | { |
685 | struct scatterlist *s; | 713 | struct scatterlist *s; |
@@ -693,10 +721,7 @@ void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, | |||
693 | __dma_page_cpu_to_dev(sg_page(s), s->offset, | 721 | __dma_page_cpu_to_dev(sg_page(s), s->offset, |
694 | s->length, dir); | 722 | s->length, dir); |
695 | } | 723 | } |
696 | |||
697 | debug_dma_sync_sg_for_device(dev, sg, nents, dir); | ||
698 | } | 724 | } |
699 | EXPORT_SYMBOL(dma_sync_sg_for_device); | ||
700 | 725 | ||
701 | /* | 726 | /* |
702 | * Return whether the given device DMA address mask can be supported | 727 | * Return whether the given device DMA address mask can be supported |
@@ -712,7 +737,7 @@ int dma_supported(struct device *dev, u64 mask) | |||
712 | } | 737 | } |
713 | EXPORT_SYMBOL(dma_supported); | 738 | EXPORT_SYMBOL(dma_supported); |
714 | 739 | ||
715 | int dma_set_mask(struct device *dev, u64 dma_mask) | 740 | static int arm_dma_set_mask(struct device *dev, u64 dma_mask) |
716 | { | 741 | { |
717 | if (!dev->dma_mask || !dma_supported(dev, dma_mask)) | 742 | if (!dev->dma_mask || !dma_supported(dev, dma_mask)) |
718 | return -EIO; | 743 | return -EIO; |
@@ -723,7 +748,6 @@ int dma_set_mask(struct device *dev, u64 dma_mask) | |||
723 | 748 | ||
724 | return 0; | 749 | return 0; |
725 | } | 750 | } |
726 | EXPORT_SYMBOL(dma_set_mask); | ||
727 | 751 | ||
728 | #define PREALLOC_DMA_DEBUG_ENTRIES 4096 | 752 | #define PREALLOC_DMA_DEBUG_ENTRIES 4096 |
729 | 753 | ||