diff options
author | Marek Szyprowski <m.szyprowski@samsung.com> | 2012-05-22 02:55:43 -0400 |
---|---|---|
committer | Marek Szyprowski <m.szyprowski@samsung.com> | 2012-05-22 02:55:43 -0400 |
commit | 0f51596bd39a5c928307ffcffc9ba07f90f42a8b (patch) | |
tree | b636403815316ecad2170092b70f1079df260a95 /arch/arm/include | |
parent | 61f6c7a47a2f84b7ba4b65240ffe9247df772b06 (diff) | |
parent | 4ce63fcd919c32d22528e54dcd89506962933719 (diff) |
Merge branch 'for-next-arm-dma' into for-linus
Conflicts:
arch/arm/Kconfig
arch/arm/mm/dma-mapping.c
Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
Diffstat (limited to 'arch/arm/include')
-rw-r--r-- | arch/arm/include/asm/device.h | 4 | ||||
-rw-r--r-- | arch/arm/include/asm/dma-iommu.h | 34 | ||||
-rw-r--r-- | arch/arm/include/asm/dma-mapping.h | 407 |
3 files changed, 147 insertions, 298 deletions
diff --git a/arch/arm/include/asm/device.h b/arch/arm/include/asm/device.h index 7aa368003b05..b69c0d3285f8 100644 --- a/arch/arm/include/asm/device.h +++ b/arch/arm/include/asm/device.h | |||
@@ -7,12 +7,16 @@ | |||
7 | #define ASMARM_DEVICE_H | 7 | #define ASMARM_DEVICE_H |
8 | 8 | ||
9 | struct dev_archdata { | 9 | struct dev_archdata { |
10 | struct dma_map_ops *dma_ops; | ||
10 | #ifdef CONFIG_DMABOUNCE | 11 | #ifdef CONFIG_DMABOUNCE |
11 | struct dmabounce_device_info *dmabounce; | 12 | struct dmabounce_device_info *dmabounce; |
12 | #endif | 13 | #endif |
13 | #ifdef CONFIG_IOMMU_API | 14 | #ifdef CONFIG_IOMMU_API |
14 | void *iommu; /* private IOMMU data */ | 15 | void *iommu; /* private IOMMU data */ |
15 | #endif | 16 | #endif |
17 | #ifdef CONFIG_ARM_DMA_USE_IOMMU | ||
18 | struct dma_iommu_mapping *mapping; | ||
19 | #endif | ||
16 | }; | 20 | }; |
17 | 21 | ||
18 | struct omap_device; | 22 | struct omap_device; |
diff --git a/arch/arm/include/asm/dma-iommu.h b/arch/arm/include/asm/dma-iommu.h new file mode 100644 index 000000000000..799b09409fad --- /dev/null +++ b/arch/arm/include/asm/dma-iommu.h | |||
@@ -0,0 +1,34 @@ | |||
1 | #ifndef ASMARM_DMA_IOMMU_H | ||
2 | #define ASMARM_DMA_IOMMU_H | ||
3 | |||
4 | #ifdef __KERNEL__ | ||
5 | |||
6 | #include <linux/mm_types.h> | ||
7 | #include <linux/scatterlist.h> | ||
8 | #include <linux/dma-debug.h> | ||
9 | #include <linux/kmemcheck.h> | ||
10 | |||
11 | struct dma_iommu_mapping { | ||
12 | /* iommu specific data */ | ||
13 | struct iommu_domain *domain; | ||
14 | |||
15 | void *bitmap; | ||
16 | size_t bits; | ||
17 | unsigned int order; | ||
18 | dma_addr_t base; | ||
19 | |||
20 | spinlock_t lock; | ||
21 | struct kref kref; | ||
22 | }; | ||
23 | |||
24 | struct dma_iommu_mapping * | ||
25 | arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, size_t size, | ||
26 | int order); | ||
27 | |||
28 | void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping); | ||
29 | |||
30 | int arm_iommu_attach_device(struct device *dev, | ||
31 | struct dma_iommu_mapping *mapping); | ||
32 | |||
33 | #endif /* __KERNEL__ */ | ||
34 | #endif | ||
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h index cb3b7c981c4b..bbef15d04890 100644 --- a/arch/arm/include/asm/dma-mapping.h +++ b/arch/arm/include/asm/dma-mapping.h | |||
@@ -5,11 +5,35 @@ | |||
5 | 5 | ||
6 | #include <linux/mm_types.h> | 6 | #include <linux/mm_types.h> |
7 | #include <linux/scatterlist.h> | 7 | #include <linux/scatterlist.h> |
8 | #include <linux/dma-attrs.h> | ||
8 | #include <linux/dma-debug.h> | 9 | #include <linux/dma-debug.h> |
9 | 10 | ||
10 | #include <asm-generic/dma-coherent.h> | 11 | #include <asm-generic/dma-coherent.h> |
11 | #include <asm/memory.h> | 12 | #include <asm/memory.h> |
12 | 13 | ||
14 | #define DMA_ERROR_CODE (~0) | ||
15 | extern struct dma_map_ops arm_dma_ops; | ||
16 | |||
17 | static inline struct dma_map_ops *get_dma_ops(struct device *dev) | ||
18 | { | ||
19 | if (dev && dev->archdata.dma_ops) | ||
20 | return dev->archdata.dma_ops; | ||
21 | return &arm_dma_ops; | ||
22 | } | ||
23 | |||
24 | static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops) | ||
25 | { | ||
26 | BUG_ON(!dev); | ||
27 | dev->archdata.dma_ops = ops; | ||
28 | } | ||
29 | |||
30 | #include <asm-generic/dma-mapping-common.h> | ||
31 | |||
32 | static inline int dma_set_mask(struct device *dev, u64 mask) | ||
33 | { | ||
34 | return get_dma_ops(dev)->set_dma_mask(dev, mask); | ||
35 | } | ||
36 | |||
13 | #ifdef __arch_page_to_dma | 37 | #ifdef __arch_page_to_dma |
14 | #error Please update to __arch_pfn_to_dma | 38 | #error Please update to __arch_pfn_to_dma |
15 | #endif | 39 | #endif |
@@ -62,68 +86,11 @@ static inline dma_addr_t virt_to_dma(struct device *dev, void *addr) | |||
62 | #endif | 86 | #endif |
63 | 87 | ||
64 | /* | 88 | /* |
65 | * The DMA API is built upon the notion of "buffer ownership". A buffer | ||
66 | * is either exclusively owned by the CPU (and therefore may be accessed | ||
67 | * by it) or exclusively owned by the DMA device. These helper functions | ||
68 | * represent the transitions between these two ownership states. | ||
69 | * | ||
70 | * Note, however, that on later ARMs, this notion does not work due to | ||
71 | * speculative prefetches. We model our approach on the assumption that | ||
72 | * the CPU does do speculative prefetches, which means we clean caches | ||
73 | * before transfers and delay cache invalidation until transfer completion. | ||
74 | * | ||
75 | * Private support functions: these are not part of the API and are | ||
76 | * liable to change. Drivers must not use these. | ||
77 | */ | ||
78 | static inline void __dma_single_cpu_to_dev(const void *kaddr, size_t size, | ||
79 | enum dma_data_direction dir) | ||
80 | { | ||
81 | extern void ___dma_single_cpu_to_dev(const void *, size_t, | ||
82 | enum dma_data_direction); | ||
83 | |||
84 | if (!arch_is_coherent()) | ||
85 | ___dma_single_cpu_to_dev(kaddr, size, dir); | ||
86 | } | ||
87 | |||
88 | static inline void __dma_single_dev_to_cpu(const void *kaddr, size_t size, | ||
89 | enum dma_data_direction dir) | ||
90 | { | ||
91 | extern void ___dma_single_dev_to_cpu(const void *, size_t, | ||
92 | enum dma_data_direction); | ||
93 | |||
94 | if (!arch_is_coherent()) | ||
95 | ___dma_single_dev_to_cpu(kaddr, size, dir); | ||
96 | } | ||
97 | |||
98 | static inline void __dma_page_cpu_to_dev(struct page *page, unsigned long off, | ||
99 | size_t size, enum dma_data_direction dir) | ||
100 | { | ||
101 | extern void ___dma_page_cpu_to_dev(struct page *, unsigned long, | ||
102 | size_t, enum dma_data_direction); | ||
103 | |||
104 | if (!arch_is_coherent()) | ||
105 | ___dma_page_cpu_to_dev(page, off, size, dir); | ||
106 | } | ||
107 | |||
108 | static inline void __dma_page_dev_to_cpu(struct page *page, unsigned long off, | ||
109 | size_t size, enum dma_data_direction dir) | ||
110 | { | ||
111 | extern void ___dma_page_dev_to_cpu(struct page *, unsigned long, | ||
112 | size_t, enum dma_data_direction); | ||
113 | |||
114 | if (!arch_is_coherent()) | ||
115 | ___dma_page_dev_to_cpu(page, off, size, dir); | ||
116 | } | ||
117 | |||
118 | extern int dma_supported(struct device *, u64); | ||
119 | extern int dma_set_mask(struct device *, u64); | ||
120 | |||
121 | /* | ||
122 | * DMA errors are defined by all-bits-set in the DMA address. | 89 | * DMA errors are defined by all-bits-set in the DMA address. |
123 | */ | 90 | */ |
124 | static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) | 91 | static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) |
125 | { | 92 | { |
126 | return dma_addr == ~0; | 93 | return dma_addr == DMA_ERROR_CODE; |
127 | } | 94 | } |
128 | 95 | ||
129 | /* | 96 | /* |
@@ -141,69 +108,118 @@ static inline void dma_free_noncoherent(struct device *dev, size_t size, | |||
141 | { | 108 | { |
142 | } | 109 | } |
143 | 110 | ||
111 | extern int dma_supported(struct device *dev, u64 mask); | ||
112 | |||
144 | /** | 113 | /** |
145 | * dma_alloc_coherent - allocate consistent memory for DMA | 114 | * arm_dma_alloc - allocate consistent memory for DMA |
146 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | 115 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices |
147 | * @size: required memory size | 116 | * @size: required memory size |
148 | * @handle: bus-specific DMA address | 117 | * @handle: bus-specific DMA address |
118 | * @attrs: optinal attributes that specific mapping properties | ||
149 | * | 119 | * |
150 | * Allocate some uncached, unbuffered memory for a device for | 120 | * Allocate some memory for a device for performing DMA. This function |
151 | * performing DMA. This function allocates pages, and will | 121 | * allocates pages, and will return the CPU-viewed address, and sets @handle |
152 | * return the CPU-viewed address, and sets @handle to be the | 122 | * to be the device-viewed address. |
153 | * device-viewed address. | ||
154 | */ | 123 | */ |
155 | extern void *dma_alloc_coherent(struct device *, size_t, dma_addr_t *, gfp_t); | 124 | extern void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, |
125 | gfp_t gfp, struct dma_attrs *attrs); | ||
126 | |||
127 | #define dma_alloc_coherent(d, s, h, f) dma_alloc_attrs(d, s, h, f, NULL) | ||
128 | |||
129 | static inline void *dma_alloc_attrs(struct device *dev, size_t size, | ||
130 | dma_addr_t *dma_handle, gfp_t flag, | ||
131 | struct dma_attrs *attrs) | ||
132 | { | ||
133 | struct dma_map_ops *ops = get_dma_ops(dev); | ||
134 | void *cpu_addr; | ||
135 | BUG_ON(!ops); | ||
136 | |||
137 | cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs); | ||
138 | debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr); | ||
139 | return cpu_addr; | ||
140 | } | ||
156 | 141 | ||
157 | /** | 142 | /** |
158 | * dma_free_coherent - free memory allocated by dma_alloc_coherent | 143 | * arm_dma_free - free memory allocated by arm_dma_alloc |
159 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | 144 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices |
160 | * @size: size of memory originally requested in dma_alloc_coherent | 145 | * @size: size of memory originally requested in dma_alloc_coherent |
161 | * @cpu_addr: CPU-view address returned from dma_alloc_coherent | 146 | * @cpu_addr: CPU-view address returned from dma_alloc_coherent |
162 | * @handle: device-view address returned from dma_alloc_coherent | 147 | * @handle: device-view address returned from dma_alloc_coherent |
148 | * @attrs: optinal attributes that specific mapping properties | ||
163 | * | 149 | * |
164 | * Free (and unmap) a DMA buffer previously allocated by | 150 | * Free (and unmap) a DMA buffer previously allocated by |
165 | * dma_alloc_coherent(). | 151 | * arm_dma_alloc(). |
166 | * | 152 | * |
167 | * References to memory and mappings associated with cpu_addr/handle | 153 | * References to memory and mappings associated with cpu_addr/handle |
168 | * during and after this call executing are illegal. | 154 | * during and after this call executing are illegal. |
169 | */ | 155 | */ |
170 | extern void dma_free_coherent(struct device *, size_t, void *, dma_addr_t); | 156 | extern void arm_dma_free(struct device *dev, size_t size, void *cpu_addr, |
157 | dma_addr_t handle, struct dma_attrs *attrs); | ||
158 | |||
159 | #define dma_free_coherent(d, s, c, h) dma_free_attrs(d, s, c, h, NULL) | ||
160 | |||
161 | static inline void dma_free_attrs(struct device *dev, size_t size, | ||
162 | void *cpu_addr, dma_addr_t dma_handle, | ||
163 | struct dma_attrs *attrs) | ||
164 | { | ||
165 | struct dma_map_ops *ops = get_dma_ops(dev); | ||
166 | BUG_ON(!ops); | ||
167 | |||
168 | debug_dma_free_coherent(dev, size, cpu_addr, dma_handle); | ||
169 | ops->free(dev, size, cpu_addr, dma_handle, attrs); | ||
170 | } | ||
171 | 171 | ||
172 | /** | 172 | /** |
173 | * dma_mmap_coherent - map a coherent DMA allocation into user space | 173 | * arm_dma_mmap - map a coherent DMA allocation into user space |
174 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | 174 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices |
175 | * @vma: vm_area_struct describing requested user mapping | 175 | * @vma: vm_area_struct describing requested user mapping |
176 | * @cpu_addr: kernel CPU-view address returned from dma_alloc_coherent | 176 | * @cpu_addr: kernel CPU-view address returned from dma_alloc_coherent |
177 | * @handle: device-view address returned from dma_alloc_coherent | 177 | * @handle: device-view address returned from dma_alloc_coherent |
178 | * @size: size of memory originally requested in dma_alloc_coherent | 178 | * @size: size of memory originally requested in dma_alloc_coherent |
179 | * @attrs: optinal attributes that specific mapping properties | ||
179 | * | 180 | * |
180 | * Map a coherent DMA buffer previously allocated by dma_alloc_coherent | 181 | * Map a coherent DMA buffer previously allocated by dma_alloc_coherent |
181 | * into user space. The coherent DMA buffer must not be freed by the | 182 | * into user space. The coherent DMA buffer must not be freed by the |
182 | * driver until the user space mapping has been released. | 183 | * driver until the user space mapping has been released. |
183 | */ | 184 | */ |
184 | int dma_mmap_coherent(struct device *, struct vm_area_struct *, | 185 | extern int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma, |
185 | void *, dma_addr_t, size_t); | 186 | void *cpu_addr, dma_addr_t dma_addr, size_t size, |
187 | struct dma_attrs *attrs); | ||
186 | 188 | ||
189 | #define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, NULL) | ||
187 | 190 | ||
188 | /** | 191 | static inline int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, |
189 | * dma_alloc_writecombine - allocate writecombining memory for DMA | 192 | void *cpu_addr, dma_addr_t dma_addr, |
190 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | 193 | size_t size, struct dma_attrs *attrs) |
191 | * @size: required memory size | 194 | { |
192 | * @handle: bus-specific DMA address | 195 | struct dma_map_ops *ops = get_dma_ops(dev); |
193 | * | 196 | BUG_ON(!ops); |
194 | * Allocate some uncached, buffered memory for a device for | 197 | return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs); |
195 | * performing DMA. This function allocates pages, and will | 198 | } |
196 | * return the CPU-viewed address, and sets @handle to be the | 199 | |
197 | * device-viewed address. | 200 | static inline void *dma_alloc_writecombine(struct device *dev, size_t size, |
198 | */ | 201 | dma_addr_t *dma_handle, gfp_t flag) |
199 | extern void *dma_alloc_writecombine(struct device *, size_t, dma_addr_t *, | 202 | { |
200 | gfp_t); | 203 | DEFINE_DMA_ATTRS(attrs); |
204 | dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs); | ||
205 | return dma_alloc_attrs(dev, size, dma_handle, flag, &attrs); | ||
206 | } | ||
201 | 207 | ||
202 | #define dma_free_writecombine(dev,size,cpu_addr,handle) \ | 208 | static inline void dma_free_writecombine(struct device *dev, size_t size, |
203 | dma_free_coherent(dev,size,cpu_addr,handle) | 209 | void *cpu_addr, dma_addr_t dma_handle) |
210 | { | ||
211 | DEFINE_DMA_ATTRS(attrs); | ||
212 | dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs); | ||
213 | return dma_free_attrs(dev, size, cpu_addr, dma_handle, &attrs); | ||
214 | } | ||
204 | 215 | ||
205 | int dma_mmap_writecombine(struct device *, struct vm_area_struct *, | 216 | static inline int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma, |
206 | void *, dma_addr_t, size_t); | 217 | void *cpu_addr, dma_addr_t dma_addr, size_t size) |
218 | { | ||
219 | DEFINE_DMA_ATTRS(attrs); | ||
220 | dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs); | ||
221 | return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, &attrs); | ||
222 | } | ||
207 | 223 | ||
208 | /* | 224 | /* |
209 | * This can be called during boot to increase the size of the consistent | 225 | * This can be called during boot to increase the size of the consistent |
@@ -212,8 +228,6 @@ int dma_mmap_writecombine(struct device *, struct vm_area_struct *, | |||
212 | */ | 228 | */ |
213 | extern void __init init_consistent_dma_size(unsigned long size); | 229 | extern void __init init_consistent_dma_size(unsigned long size); |
214 | 230 | ||
215 | |||
216 | #ifdef CONFIG_DMABOUNCE | ||
217 | /* | 231 | /* |
218 | * For SA-1111, IXP425, and ADI systems the dma-mapping functions are "magic" | 232 | * For SA-1111, IXP425, and ADI systems the dma-mapping functions are "magic" |
219 | * and utilize bounce buffers as needed to work around limited DMA windows. | 233 | * and utilize bounce buffers as needed to work around limited DMA windows. |
@@ -253,222 +267,19 @@ extern int dmabounce_register_dev(struct device *, unsigned long, | |||
253 | */ | 267 | */ |
254 | extern void dmabounce_unregister_dev(struct device *); | 268 | extern void dmabounce_unregister_dev(struct device *); |
255 | 269 | ||
256 | /* | ||
257 | * The DMA API, implemented by dmabounce.c. See below for descriptions. | ||
258 | */ | ||
259 | extern dma_addr_t __dma_map_page(struct device *, struct page *, | ||
260 | unsigned long, size_t, enum dma_data_direction); | ||
261 | extern void __dma_unmap_page(struct device *, dma_addr_t, size_t, | ||
262 | enum dma_data_direction); | ||
263 | |||
264 | /* | ||
265 | * Private functions | ||
266 | */ | ||
267 | int dmabounce_sync_for_cpu(struct device *, dma_addr_t, unsigned long, | ||
268 | size_t, enum dma_data_direction); | ||
269 | int dmabounce_sync_for_device(struct device *, dma_addr_t, unsigned long, | ||
270 | size_t, enum dma_data_direction); | ||
271 | #else | ||
272 | static inline int dmabounce_sync_for_cpu(struct device *d, dma_addr_t addr, | ||
273 | unsigned long offset, size_t size, enum dma_data_direction dir) | ||
274 | { | ||
275 | return 1; | ||
276 | } | ||
277 | 270 | ||
278 | static inline int dmabounce_sync_for_device(struct device *d, dma_addr_t addr, | ||
279 | unsigned long offset, size_t size, enum dma_data_direction dir) | ||
280 | { | ||
281 | return 1; | ||
282 | } | ||
283 | |||
284 | |||
285 | static inline dma_addr_t __dma_map_page(struct device *dev, struct page *page, | ||
286 | unsigned long offset, size_t size, enum dma_data_direction dir) | ||
287 | { | ||
288 | __dma_page_cpu_to_dev(page, offset, size, dir); | ||
289 | return pfn_to_dma(dev, page_to_pfn(page)) + offset; | ||
290 | } | ||
291 | |||
292 | static inline void __dma_unmap_page(struct device *dev, dma_addr_t handle, | ||
293 | size_t size, enum dma_data_direction dir) | ||
294 | { | ||
295 | __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)), | ||
296 | handle & ~PAGE_MASK, size, dir); | ||
297 | } | ||
298 | #endif /* CONFIG_DMABOUNCE */ | ||
299 | |||
300 | /** | ||
301 | * dma_map_single - map a single buffer for streaming DMA | ||
302 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | ||
303 | * @cpu_addr: CPU direct mapped address of buffer | ||
304 | * @size: size of buffer to map | ||
305 | * @dir: DMA transfer direction | ||
306 | * | ||
307 | * Ensure that any data held in the cache is appropriately discarded | ||
308 | * or written back. | ||
309 | * | ||
310 | * The device owns this memory once this call has completed. The CPU | ||
311 | * can regain ownership by calling dma_unmap_single() or | ||
312 | * dma_sync_single_for_cpu(). | ||
313 | */ | ||
314 | static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, | ||
315 | size_t size, enum dma_data_direction dir) | ||
316 | { | ||
317 | unsigned long offset; | ||
318 | struct page *page; | ||
319 | dma_addr_t addr; | ||
320 | |||
321 | BUG_ON(!virt_addr_valid(cpu_addr)); | ||
322 | BUG_ON(!virt_addr_valid(cpu_addr + size - 1)); | ||
323 | BUG_ON(!valid_dma_direction(dir)); | ||
324 | |||
325 | page = virt_to_page(cpu_addr); | ||
326 | offset = (unsigned long)cpu_addr & ~PAGE_MASK; | ||
327 | addr = __dma_map_page(dev, page, offset, size, dir); | ||
328 | debug_dma_map_page(dev, page, offset, size, dir, addr, true); | ||
329 | |||
330 | return addr; | ||
331 | } | ||
332 | |||
333 | /** | ||
334 | * dma_map_page - map a portion of a page for streaming DMA | ||
335 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | ||
336 | * @page: page that buffer resides in | ||
337 | * @offset: offset into page for start of buffer | ||
338 | * @size: size of buffer to map | ||
339 | * @dir: DMA transfer direction | ||
340 | * | ||
341 | * Ensure that any data held in the cache is appropriately discarded | ||
342 | * or written back. | ||
343 | * | ||
344 | * The device owns this memory once this call has completed. The CPU | ||
345 | * can regain ownership by calling dma_unmap_page(). | ||
346 | */ | ||
347 | static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, | ||
348 | unsigned long offset, size_t size, enum dma_data_direction dir) | ||
349 | { | ||
350 | dma_addr_t addr; | ||
351 | |||
352 | BUG_ON(!valid_dma_direction(dir)); | ||
353 | |||
354 | addr = __dma_map_page(dev, page, offset, size, dir); | ||
355 | debug_dma_map_page(dev, page, offset, size, dir, addr, false); | ||
356 | |||
357 | return addr; | ||
358 | } | ||
359 | |||
360 | /** | ||
361 | * dma_unmap_single - unmap a single buffer previously mapped | ||
362 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | ||
363 | * @handle: DMA address of buffer | ||
364 | * @size: size of buffer (same as passed to dma_map_single) | ||
365 | * @dir: DMA transfer direction (same as passed to dma_map_single) | ||
366 | * | ||
367 | * Unmap a single streaming mode DMA translation. The handle and size | ||
368 | * must match what was provided in the previous dma_map_single() call. | ||
369 | * All other usages are undefined. | ||
370 | * | ||
371 | * After this call, reads by the CPU to the buffer are guaranteed to see | ||
372 | * whatever the device wrote there. | ||
373 | */ | ||
374 | static inline void dma_unmap_single(struct device *dev, dma_addr_t handle, | ||
375 | size_t size, enum dma_data_direction dir) | ||
376 | { | ||
377 | debug_dma_unmap_page(dev, handle, size, dir, true); | ||
378 | __dma_unmap_page(dev, handle, size, dir); | ||
379 | } | ||
380 | |||
381 | /** | ||
382 | * dma_unmap_page - unmap a buffer previously mapped through dma_map_page() | ||
383 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | ||
384 | * @handle: DMA address of buffer | ||
385 | * @size: size of buffer (same as passed to dma_map_page) | ||
386 | * @dir: DMA transfer direction (same as passed to dma_map_page) | ||
387 | * | ||
388 | * Unmap a page streaming mode DMA translation. The handle and size | ||
389 | * must match what was provided in the previous dma_map_page() call. | ||
390 | * All other usages are undefined. | ||
391 | * | ||
392 | * After this call, reads by the CPU to the buffer are guaranteed to see | ||
393 | * whatever the device wrote there. | ||
394 | */ | ||
395 | static inline void dma_unmap_page(struct device *dev, dma_addr_t handle, | ||
396 | size_t size, enum dma_data_direction dir) | ||
397 | { | ||
398 | debug_dma_unmap_page(dev, handle, size, dir, false); | ||
399 | __dma_unmap_page(dev, handle, size, dir); | ||
400 | } | ||
401 | |||
402 | /** | ||
403 | * dma_sync_single_range_for_cpu | ||
404 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | ||
405 | * @handle: DMA address of buffer | ||
406 | * @offset: offset of region to start sync | ||
407 | * @size: size of region to sync | ||
408 | * @dir: DMA transfer direction (same as passed to dma_map_single) | ||
409 | * | ||
410 | * Make physical memory consistent for a single streaming mode DMA | ||
411 | * translation after a transfer. | ||
412 | * | ||
413 | * If you perform a dma_map_single() but wish to interrogate the | ||
414 | * buffer using the cpu, yet do not wish to teardown the PCI dma | ||
415 | * mapping, you must call this function before doing so. At the | ||
416 | * next point you give the PCI dma address back to the card, you | ||
417 | * must first the perform a dma_sync_for_device, and then the | ||
418 | * device again owns the buffer. | ||
419 | */ | ||
420 | static inline void dma_sync_single_range_for_cpu(struct device *dev, | ||
421 | dma_addr_t handle, unsigned long offset, size_t size, | ||
422 | enum dma_data_direction dir) | ||
423 | { | ||
424 | BUG_ON(!valid_dma_direction(dir)); | ||
425 | |||
426 | debug_dma_sync_single_for_cpu(dev, handle + offset, size, dir); | ||
427 | |||
428 | if (!dmabounce_sync_for_cpu(dev, handle, offset, size, dir)) | ||
429 | return; | ||
430 | |||
431 | __dma_single_dev_to_cpu(dma_to_virt(dev, handle) + offset, size, dir); | ||
432 | } | ||
433 | |||
434 | static inline void dma_sync_single_range_for_device(struct device *dev, | ||
435 | dma_addr_t handle, unsigned long offset, size_t size, | ||
436 | enum dma_data_direction dir) | ||
437 | { | ||
438 | BUG_ON(!valid_dma_direction(dir)); | ||
439 | |||
440 | debug_dma_sync_single_for_device(dev, handle + offset, size, dir); | ||
441 | |||
442 | if (!dmabounce_sync_for_device(dev, handle, offset, size, dir)) | ||
443 | return; | ||
444 | |||
445 | __dma_single_cpu_to_dev(dma_to_virt(dev, handle) + offset, size, dir); | ||
446 | } | ||
447 | |||
448 | static inline void dma_sync_single_for_cpu(struct device *dev, | ||
449 | dma_addr_t handle, size_t size, enum dma_data_direction dir) | ||
450 | { | ||
451 | dma_sync_single_range_for_cpu(dev, handle, 0, size, dir); | ||
452 | } | ||
453 | |||
454 | static inline void dma_sync_single_for_device(struct device *dev, | ||
455 | dma_addr_t handle, size_t size, enum dma_data_direction dir) | ||
456 | { | ||
457 | dma_sync_single_range_for_device(dev, handle, 0, size, dir); | ||
458 | } | ||
459 | 271 | ||
460 | /* | 272 | /* |
461 | * The scatter list versions of the above methods. | 273 | * The scatter list versions of the above methods. |
462 | */ | 274 | */ |
463 | extern int dma_map_sg(struct device *, struct scatterlist *, int, | 275 | extern int arm_dma_map_sg(struct device *, struct scatterlist *, int, |
464 | enum dma_data_direction); | 276 | enum dma_data_direction, struct dma_attrs *attrs); |
465 | extern void dma_unmap_sg(struct device *, struct scatterlist *, int, | 277 | extern void arm_dma_unmap_sg(struct device *, struct scatterlist *, int, |
278 | enum dma_data_direction, struct dma_attrs *attrs); | ||
279 | extern void arm_dma_sync_sg_for_cpu(struct device *, struct scatterlist *, int, | ||
466 | enum dma_data_direction); | 280 | enum dma_data_direction); |
467 | extern void dma_sync_sg_for_cpu(struct device *, struct scatterlist *, int, | 281 | extern void arm_dma_sync_sg_for_device(struct device *, struct scatterlist *, int, |
468 | enum dma_data_direction); | 282 | enum dma_data_direction); |
469 | extern void dma_sync_sg_for_device(struct device *, struct scatterlist *, int, | ||
470 | enum dma_data_direction); | ||
471 | |||
472 | 283 | ||
473 | #endif /* __KERNEL__ */ | 284 | #endif /* __KERNEL__ */ |
474 | #endif | 285 | #endif |