diff options
author | Marek Szyprowski <m.szyprowski@samsung.com> | 2012-02-10 13:55:20 -0500 |
---|---|---|
committer | Marek Szyprowski <m.szyprowski@samsung.com> | 2012-05-21 09:06:19 -0400 |
commit | 51fde3499b531d4cf278f4d2eaa6c45b2865b16b (patch) | |
tree | 63ec32ec33f9e97b9f4e04c118385c2366b2f922 /arch/arm/mm/dma-mapping.c | |
parent | 15237e1f505b3e5c2276f240b01cd2133e110cbc (diff) |
ARM: dma-mapping: remove redundant code and do the cleanup
This patch just performs a global cleanup in DMA mapping implementation
for ARM architecture. Some of the tiny helper functions have been moved
to the caller code, some have been merged together.
Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
Acked-by: Kyungmin Park <kyungmin.park@samsung.com>
Acked-by: Arnd Bergmann <arnd@arndb.de>
Tested-By: Subash Patel <subash.ramaswamy@linaro.org>
Diffstat (limited to 'arch/arm/mm/dma-mapping.c')
-rw-r--r-- | arch/arm/mm/dma-mapping.c | 88 |
1 files changed, 24 insertions, 64 deletions
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index c94966891dee..dddb406d0763 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c | |||
@@ -40,64 +40,12 @@ | |||
40 | * the CPU does do speculative prefetches, which means we clean caches | 40 | * the CPU does do speculative prefetches, which means we clean caches |
41 | * before transfers and delay cache invalidation until transfer completion. | 41 | * before transfers and delay cache invalidation until transfer completion. |
42 | * | 42 | * |
43 | * Private support functions: these are not part of the API and are | ||
44 | * liable to change. Drivers must not use these. | ||
45 | */ | 43 | */ |
46 | static inline void __dma_single_cpu_to_dev(const void *kaddr, size_t size, | 44 | static void __dma_page_cpu_to_dev(struct page *, unsigned long, |
47 | enum dma_data_direction dir) | ||
48 | { | ||
49 | extern void ___dma_single_cpu_to_dev(const void *, size_t, | ||
50 | enum dma_data_direction); | ||
51 | |||
52 | if (!arch_is_coherent()) | ||
53 | ___dma_single_cpu_to_dev(kaddr, size, dir); | ||
54 | } | ||
55 | |||
56 | static inline void __dma_single_dev_to_cpu(const void *kaddr, size_t size, | ||
57 | enum dma_data_direction dir) | ||
58 | { | ||
59 | extern void ___dma_single_dev_to_cpu(const void *, size_t, | ||
60 | enum dma_data_direction); | ||
61 | |||
62 | if (!arch_is_coherent()) | ||
63 | ___dma_single_dev_to_cpu(kaddr, size, dir); | ||
64 | } | ||
65 | |||
66 | static inline void __dma_page_cpu_to_dev(struct page *page, unsigned long off, | ||
67 | size_t size, enum dma_data_direction dir) | ||
68 | { | ||
69 | extern void ___dma_page_cpu_to_dev(struct page *, unsigned long, | ||
70 | size_t, enum dma_data_direction); | 45 | size_t, enum dma_data_direction); |
71 | 46 | static void __dma_page_dev_to_cpu(struct page *, unsigned long, | |
72 | if (!arch_is_coherent()) | ||
73 | ___dma_page_cpu_to_dev(page, off, size, dir); | ||
74 | } | ||
75 | |||
76 | static inline void __dma_page_dev_to_cpu(struct page *page, unsigned long off, | ||
77 | size_t size, enum dma_data_direction dir) | ||
78 | { | ||
79 | extern void ___dma_page_dev_to_cpu(struct page *, unsigned long, | ||
80 | size_t, enum dma_data_direction); | 47 | size_t, enum dma_data_direction); |
81 | 48 | ||
82 | if (!arch_is_coherent()) | ||
83 | ___dma_page_dev_to_cpu(page, off, size, dir); | ||
84 | } | ||
85 | |||
86 | |||
87 | static inline dma_addr_t __dma_map_page(struct device *dev, struct page *page, | ||
88 | unsigned long offset, size_t size, enum dma_data_direction dir) | ||
89 | { | ||
90 | __dma_page_cpu_to_dev(page, offset, size, dir); | ||
91 | return pfn_to_dma(dev, page_to_pfn(page)) + offset; | ||
92 | } | ||
93 | |||
94 | static inline void __dma_unmap_page(struct device *dev, dma_addr_t handle, | ||
95 | size_t size, enum dma_data_direction dir) | ||
96 | { | ||
97 | __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)), | ||
98 | handle & ~PAGE_MASK, size, dir); | ||
99 | } | ||
100 | |||
101 | /** | 49 | /** |
102 | * arm_dma_map_page - map a portion of a page for streaming DMA | 50 | * arm_dma_map_page - map a portion of a page for streaming DMA |
103 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | 51 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices |
@@ -112,11 +60,13 @@ static inline void __dma_unmap_page(struct device *dev, dma_addr_t handle, | |||
112 | * The device owns this memory once this call has completed. The CPU | 60 | * The device owns this memory once this call has completed. The CPU |
113 | * can regain ownership by calling dma_unmap_page(). | 61 | * can regain ownership by calling dma_unmap_page(). |
114 | */ | 62 | */ |
115 | static inline dma_addr_t arm_dma_map_page(struct device *dev, struct page *page, | 63 | static dma_addr_t arm_dma_map_page(struct device *dev, struct page *page, |
116 | unsigned long offset, size_t size, enum dma_data_direction dir, | 64 | unsigned long offset, size_t size, enum dma_data_direction dir, |
117 | struct dma_attrs *attrs) | 65 | struct dma_attrs *attrs) |
118 | { | 66 | { |
119 | return __dma_map_page(dev, page, offset, size, dir); | 67 | if (!arch_is_coherent()) |
68 | __dma_page_cpu_to_dev(page, offset, size, dir); | ||
69 | return pfn_to_dma(dev, page_to_pfn(page)) + offset; | ||
120 | } | 70 | } |
121 | 71 | ||
122 | /** | 72 | /** |
@@ -133,27 +83,31 @@ static inline dma_addr_t arm_dma_map_page(struct device *dev, struct page *page, | |||
133 | * After this call, reads by the CPU to the buffer are guaranteed to see | 83 | * After this call, reads by the CPU to the buffer are guaranteed to see |
134 | * whatever the device wrote there. | 84 | * whatever the device wrote there. |
135 | */ | 85 | */ |
136 | static inline void arm_dma_unmap_page(struct device *dev, dma_addr_t handle, | 86 | static void arm_dma_unmap_page(struct device *dev, dma_addr_t handle, |
137 | size_t size, enum dma_data_direction dir, | 87 | size_t size, enum dma_data_direction dir, |
138 | struct dma_attrs *attrs) | 88 | struct dma_attrs *attrs) |
139 | { | 89 | { |
140 | __dma_unmap_page(dev, handle, size, dir); | 90 | if (!arch_is_coherent()) |
91 | __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)), | ||
92 | handle & ~PAGE_MASK, size, dir); | ||
141 | } | 93 | } |
142 | 94 | ||
143 | static inline void arm_dma_sync_single_for_cpu(struct device *dev, | 95 | static void arm_dma_sync_single_for_cpu(struct device *dev, |
144 | dma_addr_t handle, size_t size, enum dma_data_direction dir) | 96 | dma_addr_t handle, size_t size, enum dma_data_direction dir) |
145 | { | 97 | { |
146 | unsigned int offset = handle & (PAGE_SIZE - 1); | 98 | unsigned int offset = handle & (PAGE_SIZE - 1); |
147 | struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset)); | 99 | struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset)); |
148 | __dma_page_dev_to_cpu(page, offset, size, dir); | 100 | if (!arch_is_coherent()) |
101 | __dma_page_dev_to_cpu(page, offset, size, dir); | ||
149 | } | 102 | } |
150 | 103 | ||
151 | static inline void arm_dma_sync_single_for_device(struct device *dev, | 104 | static void arm_dma_sync_single_for_device(struct device *dev, |
152 | dma_addr_t handle, size_t size, enum dma_data_direction dir) | 105 | dma_addr_t handle, size_t size, enum dma_data_direction dir) |
153 | { | 106 | { |
154 | unsigned int offset = handle & (PAGE_SIZE - 1); | 107 | unsigned int offset = handle & (PAGE_SIZE - 1); |
155 | struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset)); | 108 | struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset)); |
156 | __dma_page_cpu_to_dev(page, offset, size, dir); | 109 | if (!arch_is_coherent()) |
110 | __dma_page_cpu_to_dev(page, offset, size, dir); | ||
157 | } | 111 | } |
158 | 112 | ||
159 | static int arm_dma_set_mask(struct device *dev, u64 dma_mask); | 113 | static int arm_dma_set_mask(struct device *dev, u64 dma_mask); |
@@ -647,7 +601,13 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset, | |||
647 | } while (left); | 601 | } while (left); |
648 | } | 602 | } |
649 | 603 | ||
650 | void ___dma_page_cpu_to_dev(struct page *page, unsigned long off, | 604 | /* |
605 | * Make an area consistent for devices. | ||
606 | * Note: Drivers should NOT use this function directly, as it will break | ||
607 | * platforms with CONFIG_DMABOUNCE. | ||
608 | * Use the driver DMA support - see dma-mapping.h (dma_sync_*) | ||
609 | */ | ||
610 | static void __dma_page_cpu_to_dev(struct page *page, unsigned long off, | ||
651 | size_t size, enum dma_data_direction dir) | 611 | size_t size, enum dma_data_direction dir) |
652 | { | 612 | { |
653 | unsigned long paddr; | 613 | unsigned long paddr; |
@@ -663,7 +623,7 @@ void ___dma_page_cpu_to_dev(struct page *page, unsigned long off, | |||
663 | /* FIXME: non-speculating: flush on bidirectional mappings? */ | 623 | /* FIXME: non-speculating: flush on bidirectional mappings? */ |
664 | } | 624 | } |
665 | 625 | ||
666 | void ___dma_page_dev_to_cpu(struct page *page, unsigned long off, | 626 | static void __dma_page_dev_to_cpu(struct page *page, unsigned long off, |
667 | size_t size, enum dma_data_direction dir) | 627 | size_t size, enum dma_data_direction dir) |
668 | { | 628 | { |
669 | unsigned long paddr = page_to_phys(page) + off; | 629 | unsigned long paddr = page_to_phys(page) + off; |