diff options
Diffstat (limited to 'include/asm-arm/dma-mapping.h')
-rw-r--r-- | include/asm-arm/dma-mapping.h | 426 |
1 files changed, 426 insertions, 0 deletions
diff --git a/include/asm-arm/dma-mapping.h b/include/asm-arm/dma-mapping.h new file mode 100644 index 000000000000..925d016dd4b5 --- /dev/null +++ b/include/asm-arm/dma-mapping.h | |||
@@ -0,0 +1,426 @@ | |||
1 | #ifndef ASMARM_DMA_MAPPING_H | ||
2 | #define ASMARM_DMA_MAPPING_H | ||
3 | |||
4 | #ifdef __KERNEL__ | ||
5 | |||
6 | #include <linux/config.h> | ||
7 | #include <linux/mm.h> /* need struct page */ | ||
8 | |||
9 | #include <asm/scatterlist.h> | ||
10 | |||
11 | /* | ||
12 | * DMA-consistent mapping functions. These allocate/free a region of | ||
13 | * uncached, unwrite-buffered mapped memory space for use with DMA | ||
14 | * devices. This is the "generic" version. The PCI specific version | ||
15 | * is in pci.h | ||
16 | */ | ||
17 | extern void consistent_sync(void *kaddr, size_t size, int rw); | ||
18 | |||
19 | /* | ||
20 | * Return whether the given device DMA address mask can be supported | ||
21 | * properly. For example, if your device can only drive the low 24-bits | ||
22 | * during bus mastering, then you would pass 0x00ffffff as the mask | ||
23 | * to this function. | ||
24 | */ | ||
25 | static inline int dma_supported(struct device *dev, u64 mask) | ||
26 | { | ||
27 | return dev->dma_mask && *dev->dma_mask != 0; | ||
28 | } | ||
29 | |||
30 | static inline int dma_set_mask(struct device *dev, u64 dma_mask) | ||
31 | { | ||
32 | if (!dev->dma_mask || !dma_supported(dev, dma_mask)) | ||
33 | return -EIO; | ||
34 | |||
35 | *dev->dma_mask = dma_mask; | ||
36 | |||
37 | return 0; | ||
38 | } | ||
39 | |||
40 | static inline int dma_get_cache_alignment(void) | ||
41 | { | ||
42 | return 32; | ||
43 | } | ||
44 | |||
45 | static inline int dma_is_consistent(dma_addr_t handle) | ||
46 | { | ||
47 | return 0; | ||
48 | } | ||
49 | |||
50 | /* | ||
51 | * DMA errors are defined by all-bits-set in the DMA address. | ||
52 | */ | ||
53 | static inline int dma_mapping_error(dma_addr_t dma_addr) | ||
54 | { | ||
55 | return dma_addr == ~0; | ||
56 | } | ||
57 | |||
58 | /** | ||
59 | * dma_alloc_coherent - allocate consistent memory for DMA | ||
60 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | ||
61 | * @size: required memory size | ||
62 | * @handle: bus-specific DMA address | ||
63 | * | ||
64 | * Allocate some uncached, unbuffered memory for a device for | ||
65 | * performing DMA. This function allocates pages, and will | ||
66 | * return the CPU-viewed address, and sets @handle to be the | ||
67 | * device-viewed address. | ||
68 | */ | ||
69 | extern void * | ||
70 | dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, int gfp); | ||
71 | |||
72 | /** | ||
73 | * dma_free_coherent - free memory allocated by dma_alloc_coherent | ||
74 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | ||
75 | * @size: size of memory originally requested in dma_alloc_coherent | ||
76 | * @cpu_addr: CPU-view address returned from dma_alloc_coherent | ||
77 | * @handle: device-view address returned from dma_alloc_coherent | ||
78 | * | ||
79 | * Free (and unmap) a DMA buffer previously allocated by | ||
80 | * dma_alloc_coherent(). | ||
81 | * | ||
82 | * References to memory and mappings associated with cpu_addr/handle | ||
83 | * during and after this call executing are illegal. | ||
84 | */ | ||
85 | extern void | ||
86 | dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, | ||
87 | dma_addr_t handle); | ||
88 | |||
89 | /** | ||
90 | * dma_mmap_coherent - map a coherent DMA allocation into user space | ||
91 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | ||
92 | * @vma: vm_area_struct describing requested user mapping | ||
93 | * @cpu_addr: kernel CPU-view address returned from dma_alloc_coherent | ||
94 | * @handle: device-view address returned from dma_alloc_coherent | ||
95 | * @size: size of memory originally requested in dma_alloc_coherent | ||
96 | * | ||
97 | * Map a coherent DMA buffer previously allocated by dma_alloc_coherent | ||
98 | * into user space. The coherent DMA buffer must not be freed by the | ||
99 | * driver until the user space mapping has been released. | ||
100 | */ | ||
101 | int dma_mmap_coherent(struct device *dev, struct vm_area_struct *vma, | ||
102 | void *cpu_addr, dma_addr_t handle, size_t size); | ||
103 | |||
104 | |||
105 | /** | ||
106 | * dma_alloc_writecombine - allocate writecombining memory for DMA | ||
107 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | ||
108 | * @size: required memory size | ||
109 | * @handle: bus-specific DMA address | ||
110 | * | ||
111 | * Allocate some uncached, buffered memory for a device for | ||
112 | * performing DMA. This function allocates pages, and will | ||
113 | * return the CPU-viewed address, and sets @handle to be the | ||
114 | * device-viewed address. | ||
115 | */ | ||
116 | extern void * | ||
117 | dma_alloc_writecombine(struct device *dev, size_t size, dma_addr_t *handle, int gfp); | ||
118 | |||
119 | #define dma_free_writecombine(dev,size,cpu_addr,handle) \ | ||
120 | dma_free_coherent(dev,size,cpu_addr,handle) | ||
121 | |||
122 | int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma, | ||
123 | void *cpu_addr, dma_addr_t handle, size_t size); | ||
124 | |||
125 | |||
126 | /** | ||
127 | * dma_map_single - map a single buffer for streaming DMA | ||
128 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | ||
129 | * @cpu_addr: CPU direct mapped address of buffer | ||
130 | * @size: size of buffer to map | ||
131 | * @dir: DMA transfer direction | ||
132 | * | ||
133 | * Ensure that any data held in the cache is appropriately discarded | ||
134 | * or written back. | ||
135 | * | ||
136 | * The device owns this memory once this call has completed. The CPU | ||
137 | * can regain ownership by calling dma_unmap_single() or | ||
138 | * dma_sync_single_for_cpu(). | ||
139 | */ | ||
140 | #ifndef CONFIG_DMABOUNCE | ||
141 | static inline dma_addr_t | ||
142 | dma_map_single(struct device *dev, void *cpu_addr, size_t size, | ||
143 | enum dma_data_direction dir) | ||
144 | { | ||
145 | consistent_sync(cpu_addr, size, dir); | ||
146 | return virt_to_dma(dev, (unsigned long)cpu_addr); | ||
147 | } | ||
148 | #else | ||
149 | extern dma_addr_t dma_map_single(struct device *,void *, size_t, enum dma_data_direction); | ||
150 | #endif | ||
151 | |||
152 | /** | ||
153 | * dma_map_page - map a portion of a page for streaming DMA | ||
154 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | ||
155 | * @page: page that buffer resides in | ||
156 | * @offset: offset into page for start of buffer | ||
157 | * @size: size of buffer to map | ||
158 | * @dir: DMA transfer direction | ||
159 | * | ||
160 | * Ensure that any data held in the cache is appropriately discarded | ||
161 | * or written back. | ||
162 | * | ||
163 | * The device owns this memory once this call has completed. The CPU | ||
164 | * can regain ownership by calling dma_unmap_page() or | ||
165 | * dma_sync_single_for_cpu(). | ||
166 | */ | ||
167 | static inline dma_addr_t | ||
168 | dma_map_page(struct device *dev, struct page *page, | ||
169 | unsigned long offset, size_t size, | ||
170 | enum dma_data_direction dir) | ||
171 | { | ||
172 | return dma_map_single(dev, page_address(page) + offset, size, (int)dir); | ||
173 | } | ||
174 | |||
175 | /** | ||
176 | * dma_unmap_single - unmap a single buffer previously mapped | ||
177 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | ||
178 | * @handle: DMA address of buffer | ||
179 | * @size: size of buffer to map | ||
180 | * @dir: DMA transfer direction | ||
181 | * | ||
182 | * Unmap a single streaming mode DMA translation. The handle and size | ||
183 | * must match what was provided in the previous dma_map_single() call. | ||
184 | * All other usages are undefined. | ||
185 | * | ||
186 | * After this call, reads by the CPU to the buffer are guaranteed to see | ||
187 | * whatever the device wrote there. | ||
188 | */ | ||
189 | #ifndef CONFIG_DMABOUNCE | ||
190 | static inline void | ||
191 | dma_unmap_single(struct device *dev, dma_addr_t handle, size_t size, | ||
192 | enum dma_data_direction dir) | ||
193 | { | ||
194 | /* nothing to do */ | ||
195 | } | ||
196 | #else | ||
197 | extern void dma_unmap_single(struct device *, dma_addr_t, size_t, enum dma_data_direction); | ||
198 | #endif | ||
199 | |||
200 | /** | ||
201 | * dma_unmap_page - unmap a buffer previously mapped through dma_map_page() | ||
202 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | ||
203 | * @handle: DMA address of buffer | ||
204 | * @size: size of buffer to map | ||
205 | * @dir: DMA transfer direction | ||
206 | * | ||
207 | * Unmap a single streaming mode DMA translation. The handle and size | ||
208 | * must match what was provided in the previous dma_map_single() call. | ||
209 | * All other usages are undefined. | ||
210 | * | ||
211 | * After this call, reads by the CPU to the buffer are guaranteed to see | ||
212 | * whatever the device wrote there. | ||
213 | */ | ||
214 | static inline void | ||
215 | dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size, | ||
216 | enum dma_data_direction dir) | ||
217 | { | ||
218 | dma_unmap_single(dev, handle, size, (int)dir); | ||
219 | } | ||
220 | |||
221 | /** | ||
222 | * dma_map_sg - map a set of SG buffers for streaming mode DMA | ||
223 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | ||
224 | * @sg: list of buffers | ||
225 | * @nents: number of buffers to map | ||
226 | * @dir: DMA transfer direction | ||
227 | * | ||
228 | * Map a set of buffers described by scatterlist in streaming | ||
229 | * mode for DMA. This is the scatter-gather version of the | ||
230 | * above dma_map_single interface. Here the scatter gather list | ||
231 | * elements are each tagged with the appropriate dma address | ||
232 | * and length. They are obtained via sg_dma_{address,length}(SG). | ||
233 | * | ||
234 | * NOTE: An implementation may be able to use a smaller number of | ||
235 | * DMA address/length pairs than there are SG table elements. | ||
236 | * (for example via virtual mapping capabilities) | ||
237 | * The routine returns the number of addr/length pairs actually | ||
238 | * used, at most nents. | ||
239 | * | ||
240 | * Device ownership issues as mentioned above for dma_map_single are | ||
241 | * the same here. | ||
242 | */ | ||
243 | #ifndef CONFIG_DMABOUNCE | ||
244 | static inline int | ||
245 | dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, | ||
246 | enum dma_data_direction dir) | ||
247 | { | ||
248 | int i; | ||
249 | |||
250 | for (i = 0; i < nents; i++, sg++) { | ||
251 | char *virt; | ||
252 | |||
253 | sg->dma_address = page_to_dma(dev, sg->page) + sg->offset; | ||
254 | virt = page_address(sg->page) + sg->offset; | ||
255 | consistent_sync(virt, sg->length, dir); | ||
256 | } | ||
257 | |||
258 | return nents; | ||
259 | } | ||
260 | #else | ||
261 | extern int dma_map_sg(struct device *, struct scatterlist *, int, enum dma_data_direction); | ||
262 | #endif | ||
263 | |||
264 | /** | ||
265 | * dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg | ||
266 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | ||
267 | * @sg: list of buffers | ||
268 | * @nents: number of buffers to map | ||
269 | * @dir: DMA transfer direction | ||
270 | * | ||
271 | * Unmap a set of streaming mode DMA translations. | ||
272 | * Again, CPU read rules concerning calls here are the same as for | ||
273 | * dma_unmap_single() above. | ||
274 | */ | ||
275 | #ifndef CONFIG_DMABOUNCE | ||
276 | static inline void | ||
277 | dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, | ||
278 | enum dma_data_direction dir) | ||
279 | { | ||
280 | |||
281 | /* nothing to do */ | ||
282 | } | ||
283 | #else | ||
284 | extern void dma_unmap_sg(struct device *, struct scatterlist *, int, enum dma_data_direction); | ||
285 | #endif | ||
286 | |||
287 | |||
288 | /** | ||
289 | * dma_sync_single_for_cpu | ||
290 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | ||
291 | * @handle: DMA address of buffer | ||
292 | * @size: size of buffer to map | ||
293 | * @dir: DMA transfer direction | ||
294 | * | ||
295 | * Make physical memory consistent for a single streaming mode DMA | ||
296 | * translation after a transfer. | ||
297 | * | ||
298 | * If you perform a dma_map_single() but wish to interrogate the | ||
299 | * buffer using the cpu, yet do not wish to teardown the PCI dma | ||
300 | * mapping, you must call this function before doing so. At the | ||
301 | * next point you give the PCI dma address back to the card, you | ||
302 | * must first the perform a dma_sync_for_device, and then the | ||
303 | * device again owns the buffer. | ||
304 | */ | ||
305 | #ifndef CONFIG_DMABOUNCE | ||
306 | static inline void | ||
307 | dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle, size_t size, | ||
308 | enum dma_data_direction dir) | ||
309 | { | ||
310 | consistent_sync((void *)dma_to_virt(dev, handle), size, dir); | ||
311 | } | ||
312 | |||
313 | static inline void | ||
314 | dma_sync_single_for_device(struct device *dev, dma_addr_t handle, size_t size, | ||
315 | enum dma_data_direction dir) | ||
316 | { | ||
317 | consistent_sync((void *)dma_to_virt(dev, handle), size, dir); | ||
318 | } | ||
319 | #else | ||
320 | extern void dma_sync_single_for_cpu(struct device*, dma_addr_t, size_t, enum dma_data_direction); | ||
321 | extern void dma_sync_single_for_device(struct device*, dma_addr_t, size_t, enum dma_data_direction); | ||
322 | #endif | ||
323 | |||
324 | |||
325 | /** | ||
326 | * dma_sync_sg_for_cpu | ||
327 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | ||
328 | * @sg: list of buffers | ||
329 | * @nents: number of buffers to map | ||
330 | * @dir: DMA transfer direction | ||
331 | * | ||
332 | * Make physical memory consistent for a set of streaming | ||
333 | * mode DMA translations after a transfer. | ||
334 | * | ||
335 | * The same as dma_sync_single_for_* but for a scatter-gather list, | ||
336 | * same rules and usage. | ||
337 | */ | ||
338 | #ifndef CONFIG_DMABOUNCE | ||
339 | static inline void | ||
340 | dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nents, | ||
341 | enum dma_data_direction dir) | ||
342 | { | ||
343 | int i; | ||
344 | |||
345 | for (i = 0; i < nents; i++, sg++) { | ||
346 | char *virt = page_address(sg->page) + sg->offset; | ||
347 | consistent_sync(virt, sg->length, dir); | ||
348 | } | ||
349 | } | ||
350 | |||
351 | static inline void | ||
352 | dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nents, | ||
353 | enum dma_data_direction dir) | ||
354 | { | ||
355 | int i; | ||
356 | |||
357 | for (i = 0; i < nents; i++, sg++) { | ||
358 | char *virt = page_address(sg->page) + sg->offset; | ||
359 | consistent_sync(virt, sg->length, dir); | ||
360 | } | ||
361 | } | ||
362 | #else | ||
363 | extern void dma_sync_sg_for_cpu(struct device*, struct scatterlist*, int, enum dma_data_direction); | ||
364 | extern void dma_sync_sg_for_device(struct device*, struct scatterlist*, int, enum dma_data_direction); | ||
365 | #endif | ||
366 | |||
367 | #ifdef CONFIG_DMABOUNCE | ||
368 | /* | ||
369 | * For SA-1111, IXP425, and ADI systems the dma-mapping functions are "magic" | ||
370 | * and utilize bounce buffers as needed to work around limited DMA windows. | ||
371 | * | ||
372 | * On the SA-1111, a bug limits DMA to only certain regions of RAM. | ||
373 | * On the IXP425, the PCI inbound window is 64MB (256MB total RAM) | ||
374 | * On some ADI engineering sytems, PCI inbound window is 32MB (12MB total RAM) | ||
375 | * | ||
376 | * The following are helper functions used by the dmabounce subystem | ||
377 | * | ||
378 | */ | ||
379 | |||
380 | /** | ||
381 | * dmabounce_register_dev | ||
382 | * | ||
383 | * @dev: valid struct device pointer | ||
384 | * @small_buf_size: size of buffers to use with small buffer pool | ||
385 | * @large_buf_size: size of buffers to use with large buffer pool (can be 0) | ||
386 | * | ||
387 | * This function should be called by low-level platform code to register | ||
388 | * a device as requireing DMA buffer bouncing. The function will allocate | ||
389 | * appropriate DMA pools for the device. | ||
390 | * | ||
391 | */ | ||
392 | extern int dmabounce_register_dev(struct device *, unsigned long, unsigned long); | ||
393 | |||
394 | /** | ||
395 | * dmabounce_unregister_dev | ||
396 | * | ||
397 | * @dev: valid struct device pointer | ||
398 | * | ||
399 | * This function should be called by low-level platform code when device | ||
400 | * that was previously registered with dmabounce_register_dev is removed | ||
401 | * from the system. | ||
402 | * | ||
403 | */ | ||
404 | extern void dmabounce_unregister_dev(struct device *); | ||
405 | |||
406 | /** | ||
407 | * dma_needs_bounce | ||
408 | * | ||
409 | * @dev: valid struct device pointer | ||
410 | * @dma_handle: dma_handle of unbounced buffer | ||
411 | * @size: size of region being mapped | ||
412 | * | ||
413 | * Platforms that utilize the dmabounce mechanism must implement | ||
414 | * this function. | ||
415 | * | ||
416 | * The dmabounce routines call this function whenever a dma-mapping | ||
417 | * is requested to determine whether a given buffer needs to be bounced | ||
418 | * or not. The function must return 0 if the the buffer is OK for | ||
419 | * DMA access and 1 if the buffer needs to be bounced. | ||
420 | * | ||
421 | */ | ||
422 | extern int dma_needs_bounce(struct device*, dma_addr_t, size_t); | ||
423 | #endif /* CONFIG_DMABOUNCE */ | ||
424 | |||
425 | #endif /* __KERNEL__ */ | ||
426 | #endif | ||