diff options
author | Marek Szyprowski <m.szyprowski@samsung.com> | 2012-02-10 13:55:20 -0500 |
---|---|---|
committer | Marek Szyprowski <m.szyprowski@samsung.com> | 2012-05-21 09:06:18 -0400 |
commit | 15237e1f505b3e5c2276f240b01cd2133e110cbc (patch) | |
tree | 989e8a8580420ad3759a7bab81cd86347a3dadca /arch/arm/mm/dma-mapping.c | |
parent | 2a550e73d3e5f040a3e8eb733c942ab352eafb36 (diff) |
ARM: dma-mapping: move all dma bounce code to separate dma ops structure
This patch removes dma bounce hooks from the common dma mapping
implementation on ARM architecture and creates a separate set of
dma_map_ops for dma bounce devices.
Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
Acked-by: Kyungmin Park <kyungmin.park@samsung.com>
Tested-By: Subash Patel <subash.ramaswamy@linaro.org>
Diffstat (limited to 'arch/arm/mm/dma-mapping.c')
-rw-r--r-- | arch/arm/mm/dma-mapping.c | 79 |
1 files changed, 69 insertions, 10 deletions
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index b50fa578df81..c94966891dee 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c | |||
@@ -29,6 +29,75 @@ | |||
29 | 29 | ||
30 | #include "mm.h" | 30 | #include "mm.h" |
31 | 31 | ||
32 | /* | ||
33 | * The DMA API is built upon the notion of "buffer ownership". A buffer | ||
34 | * is either exclusively owned by the CPU (and therefore may be accessed | ||
35 | * by it) or exclusively owned by the DMA device. These helper functions | ||
36 | * represent the transitions between these two ownership states. | ||
37 | * | ||
38 | * Note, however, that on later ARMs, this notion does not work due to | ||
39 | * speculative prefetches. We model our approach on the assumption that | ||
40 | * the CPU does do speculative prefetches, which means we clean caches | ||
41 | * before transfers and delay cache invalidation until transfer completion. | ||
42 | * | ||
43 | * Private support functions: these are not part of the API and are | ||
44 | * liable to change. Drivers must not use these. | ||
45 | */ | ||
46 | static inline void __dma_single_cpu_to_dev(const void *kaddr, size_t size, | ||
47 | enum dma_data_direction dir) | ||
48 | { | ||
49 | extern void ___dma_single_cpu_to_dev(const void *, size_t, | ||
50 | enum dma_data_direction); | ||
51 | |||
52 | if (!arch_is_coherent()) | ||
53 | ___dma_single_cpu_to_dev(kaddr, size, dir); | ||
54 | } | ||
55 | |||
56 | static inline void __dma_single_dev_to_cpu(const void *kaddr, size_t size, | ||
57 | enum dma_data_direction dir) | ||
58 | { | ||
59 | extern void ___dma_single_dev_to_cpu(const void *, size_t, | ||
60 | enum dma_data_direction); | ||
61 | |||
62 | if (!arch_is_coherent()) | ||
63 | ___dma_single_dev_to_cpu(kaddr, size, dir); | ||
64 | } | ||
65 | |||
66 | static inline void __dma_page_cpu_to_dev(struct page *page, unsigned long off, | ||
67 | size_t size, enum dma_data_direction dir) | ||
68 | { | ||
69 | extern void ___dma_page_cpu_to_dev(struct page *, unsigned long, | ||
70 | size_t, enum dma_data_direction); | ||
71 | |||
72 | if (!arch_is_coherent()) | ||
73 | ___dma_page_cpu_to_dev(page, off, size, dir); | ||
74 | } | ||
75 | |||
76 | static inline void __dma_page_dev_to_cpu(struct page *page, unsigned long off, | ||
77 | size_t size, enum dma_data_direction dir) | ||
78 | { | ||
79 | extern void ___dma_page_dev_to_cpu(struct page *, unsigned long, | ||
80 | size_t, enum dma_data_direction); | ||
81 | |||
82 | if (!arch_is_coherent()) | ||
83 | ___dma_page_dev_to_cpu(page, off, size, dir); | ||
84 | } | ||
85 | |||
86 | |||
87 | static inline dma_addr_t __dma_map_page(struct device *dev, struct page *page, | ||
88 | unsigned long offset, size_t size, enum dma_data_direction dir) | ||
89 | { | ||
90 | __dma_page_cpu_to_dev(page, offset, size, dir); | ||
91 | return pfn_to_dma(dev, page_to_pfn(page)) + offset; | ||
92 | } | ||
93 | |||
94 | static inline void __dma_unmap_page(struct device *dev, dma_addr_t handle, | ||
95 | size_t size, enum dma_data_direction dir) | ||
96 | { | ||
97 | __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)), | ||
98 | handle & ~PAGE_MASK, size, dir); | ||
99 | } | ||
100 | |||
32 | /** | 101 | /** |
33 | * arm_dma_map_page - map a portion of a page for streaming DMA | 102 | * arm_dma_map_page - map a portion of a page for streaming DMA |
34 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | 103 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices |
@@ -76,9 +145,6 @@ static inline void arm_dma_sync_single_for_cpu(struct device *dev, | |||
76 | { | 145 | { |
77 | unsigned int offset = handle & (PAGE_SIZE - 1); | 146 | unsigned int offset = handle & (PAGE_SIZE - 1); |
78 | struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset)); | 147 | struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset)); |
79 | if (!dmabounce_sync_for_cpu(dev, handle, size, dir)) | ||
80 | return; | ||
81 | |||
82 | __dma_page_dev_to_cpu(page, offset, size, dir); | 148 | __dma_page_dev_to_cpu(page, offset, size, dir); |
83 | } | 149 | } |
84 | 150 | ||
@@ -87,9 +153,6 @@ static inline void arm_dma_sync_single_for_device(struct device *dev, | |||
87 | { | 153 | { |
88 | unsigned int offset = handle & (PAGE_SIZE - 1); | 154 | unsigned int offset = handle & (PAGE_SIZE - 1); |
89 | struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset)); | 155 | struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset)); |
90 | if (!dmabounce_sync_for_device(dev, handle, size, dir)) | ||
91 | return; | ||
92 | |||
93 | __dma_page_cpu_to_dev(page, offset, size, dir); | 156 | __dma_page_cpu_to_dev(page, offset, size, dir); |
94 | } | 157 | } |
95 | 158 | ||
@@ -599,7 +662,6 @@ void ___dma_page_cpu_to_dev(struct page *page, unsigned long off, | |||
599 | } | 662 | } |
600 | /* FIXME: non-speculating: flush on bidirectional mappings? */ | 663 | /* FIXME: non-speculating: flush on bidirectional mappings? */ |
601 | } | 664 | } |
602 | EXPORT_SYMBOL(___dma_page_cpu_to_dev); | ||
603 | 665 | ||
604 | void ___dma_page_dev_to_cpu(struct page *page, unsigned long off, | 666 | void ___dma_page_dev_to_cpu(struct page *page, unsigned long off, |
605 | size_t size, enum dma_data_direction dir) | 667 | size_t size, enum dma_data_direction dir) |
@@ -619,7 +681,6 @@ void ___dma_page_dev_to_cpu(struct page *page, unsigned long off, | |||
619 | if (dir != DMA_TO_DEVICE && off == 0 && size >= PAGE_SIZE) | 681 | if (dir != DMA_TO_DEVICE && off == 0 && size >= PAGE_SIZE) |
620 | set_bit(PG_dcache_clean, &page->flags); | 682 | set_bit(PG_dcache_clean, &page->flags); |
621 | } | 683 | } |
622 | EXPORT_SYMBOL(___dma_page_dev_to_cpu); | ||
623 | 684 | ||
624 | /** | 685 | /** |
625 | * arm_dma_map_sg - map a set of SG buffers for streaming mode DMA | 686 | * arm_dma_map_sg - map a set of SG buffers for streaming mode DMA |
@@ -737,9 +798,7 @@ static int arm_dma_set_mask(struct device *dev, u64 dma_mask) | |||
737 | if (!dev->dma_mask || !dma_supported(dev, dma_mask)) | 798 | if (!dev->dma_mask || !dma_supported(dev, dma_mask)) |
738 | return -EIO; | 799 | return -EIO; |
739 | 800 | ||
740 | #ifndef CONFIG_DMABOUNCE | ||
741 | *dev->dma_mask = dma_mask; | 801 | *dev->dma_mask = dma_mask; |
742 | #endif | ||
743 | 802 | ||
744 | return 0; | 803 | return 0; |
745 | } | 804 | } |