diff options
Diffstat (limited to 'Documentation/DMA-API.txt')
-rw-r--r-- | Documentation/DMA-API.txt | 526 |
1 files changed, 526 insertions, 0 deletions
diff --git a/Documentation/DMA-API.txt b/Documentation/DMA-API.txt new file mode 100644 index 000000000000..6ee3cd6134df --- /dev/null +++ b/Documentation/DMA-API.txt | |||
@@ -0,0 +1,526 @@ | |||
1 | Dynamic DMA mapping using the generic device | ||
2 | ============================================ | ||
3 | |||
4 | James E.J. Bottomley <James.Bottomley@HansenPartnership.com> | ||
5 | |||
6 | This document describes the DMA API. For a more gentle introduction | ||
7 | phrased in terms of the pci_ equivalents (and actual examples) see | ||
8 | DMA-mapping.txt | ||
9 | |||
10 | This API is split into two pieces. Part I describes the API and the | ||
11 | corresponding pci_ API. Part II describes the extensions to the API | ||
12 | for supporting non-consistent memory machines. Unless you know that | ||
13 | your driver absolutely has to support non-consistent platforms (this | ||
14 | is usually only legacy platforms) you should only use the API | ||
15 | described in part I. | ||
16 | |||
17 | Part I - pci_ and dma_ Equivalent API | ||
18 | ------------------------------------- | ||
19 | |||
20 | To get the pci_ API, you must #include <linux/pci.h> | ||
21 | To get the dma_ API, you must #include <linux/dma-mapping.h> | ||
22 | |||
23 | |||
24 | Part Ia - Using large dma-coherent buffers | ||
25 | ------------------------------------------ | ||
26 | |||
27 | void * | ||
28 | dma_alloc_coherent(struct device *dev, size_t size, | ||
29 | dma_addr_t *dma_handle, int flag) | ||
30 | void * | ||
31 | pci_alloc_consistent(struct pci_dev *dev, size_t size, | ||
32 | dma_addr_t *dma_handle) | ||
33 | |||
34 | Consistent memory is memory for which a write by either the device or | ||
35 | the processor can immediately be read by the processor or device | ||
36 | without having to worry about caching effects. | ||
37 | |||
38 | This routine allocates a region of <size> bytes of consistent memory. | ||
39 | it also returns a <dma_handle> which may be cast to an unsigned | ||
40 | integer the same width as the bus and used as the physical address | ||
41 | base of the region. | ||
42 | |||
43 | Returns: a pointer to the allocated region (in the processor's virtual | ||
44 | address space) or NULL if the allocation failed. | ||
45 | |||
46 | Note: consistent memory can be expensive on some platforms, and the | ||
47 | minimum allocation length may be as big as a page, so you should | ||
48 | consolidate your requests for consistent memory as much as possible. | ||
49 | The simplest way to do that is to use the dma_pool calls (see below). | ||
50 | |||
51 | The flag parameter (dma_alloc_coherent only) allows the caller to | ||
52 | specify the GFP_ flags (see kmalloc) for the allocation (the | ||
53 | implementation may chose to ignore flags that affect the location of | ||
54 | the returned memory, like GFP_DMA). For pci_alloc_consistent, you | ||
55 | must assume GFP_ATOMIC behaviour. | ||
56 | |||
57 | void | ||
58 | dma_free_coherent(struct device *dev, size_t size, void *cpu_addr | ||
59 | dma_addr_t dma_handle) | ||
60 | void | ||
61 | pci_free_consistent(struct pci_dev *dev, size_t size, void *cpu_addr | ||
62 | dma_addr_t dma_handle) | ||
63 | |||
64 | Free the region of consistent memory you previously allocated. dev, | ||
65 | size and dma_handle must all be the same as those passed into the | ||
66 | consistent allocate. cpu_addr must be the virtual address returned by | ||
67 | the consistent allocate | ||
68 | |||
69 | |||
70 | Part Ib - Using small dma-coherent buffers | ||
71 | ------------------------------------------ | ||
72 | |||
73 | To get this part of the dma_ API, you must #include <linux/dmapool.h> | ||
74 | |||
75 | Many drivers need lots of small dma-coherent memory regions for DMA | ||
76 | descriptors or I/O buffers. Rather than allocating in units of a page | ||
77 | or more using dma_alloc_coherent(), you can use DMA pools. These work | ||
78 | much like a kmem_cache_t, except that they use the dma-coherent allocator | ||
79 | not __get_free_pages(). Also, they understand common hardware constraints | ||
80 | for alignment, like queue heads needing to be aligned on N byte boundaries. | ||
81 | |||
82 | |||
83 | struct dma_pool * | ||
84 | dma_pool_create(const char *name, struct device *dev, | ||
85 | size_t size, size_t align, size_t alloc); | ||
86 | |||
87 | struct pci_pool * | ||
88 | pci_pool_create(const char *name, struct pci_device *dev, | ||
89 | size_t size, size_t align, size_t alloc); | ||
90 | |||
91 | The pool create() routines initialize a pool of dma-coherent buffers | ||
92 | for use with a given device. It must be called in a context which | ||
93 | can sleep. | ||
94 | |||
95 | The "name" is for diagnostics (like a kmem_cache_t name); dev and size | ||
96 | are like what you'd pass to dma_alloc_coherent(). The device's hardware | ||
97 | alignment requirement for this type of data is "align" (which is expressed | ||
98 | in bytes, and must be a power of two). If your device has no boundary | ||
99 | crossing restrictions, pass 0 for alloc; passing 4096 says memory allocated | ||
100 | from this pool must not cross 4KByte boundaries. | ||
101 | |||
102 | |||
103 | void *dma_pool_alloc(struct dma_pool *pool, int gfp_flags, | ||
104 | dma_addr_t *dma_handle); | ||
105 | |||
106 | void *pci_pool_alloc(struct pci_pool *pool, int gfp_flags, | ||
107 | dma_addr_t *dma_handle); | ||
108 | |||
109 | This allocates memory from the pool; the returned memory will meet the size | ||
110 | and alignment requirements specified at creation time. Pass GFP_ATOMIC to | ||
111 | prevent blocking, or if it's permitted (not in_interrupt, not holding SMP locks) | ||
112 | pass GFP_KERNEL to allow blocking. Like dma_alloc_coherent(), this returns | ||
113 | two values: an address usable by the cpu, and the dma address usable by the | ||
114 | pool's device. | ||
115 | |||
116 | |||
117 | void dma_pool_free(struct dma_pool *pool, void *vaddr, | ||
118 | dma_addr_t addr); | ||
119 | |||
120 | void pci_pool_free(struct pci_pool *pool, void *vaddr, | ||
121 | dma_addr_t addr); | ||
122 | |||
123 | This puts memory back into the pool. The pool is what was passed to | ||
124 | the the pool allocation routine; the cpu and dma addresses are what | ||
125 | were returned when that routine allocated the memory being freed. | ||
126 | |||
127 | |||
128 | void dma_pool_destroy(struct dma_pool *pool); | ||
129 | |||
130 | void pci_pool_destroy(struct pci_pool *pool); | ||
131 | |||
132 | The pool destroy() routines free the resources of the pool. They must be | ||
133 | called in a context which can sleep. Make sure you've freed all allocated | ||
134 | memory back to the pool before you destroy it. | ||
135 | |||
136 | |||
137 | Part Ic - DMA addressing limitations | ||
138 | ------------------------------------ | ||
139 | |||
140 | int | ||
141 | dma_supported(struct device *dev, u64 mask) | ||
142 | int | ||
143 | pci_dma_supported(struct device *dev, u64 mask) | ||
144 | |||
145 | Checks to see if the device can support DMA to the memory described by | ||
146 | mask. | ||
147 | |||
148 | Returns: 1 if it can and 0 if it can't. | ||
149 | |||
150 | Notes: This routine merely tests to see if the mask is possible. It | ||
151 | won't change the current mask settings. It is more intended as an | ||
152 | internal API for use by the platform than an external API for use by | ||
153 | driver writers. | ||
154 | |||
155 | int | ||
156 | dma_set_mask(struct device *dev, u64 mask) | ||
157 | int | ||
158 | pci_set_dma_mask(struct pci_device *dev, u64 mask) | ||
159 | |||
160 | Checks to see if the mask is possible and updates the device | ||
161 | parameters if it is. | ||
162 | |||
163 | Returns: 0 if successful and a negative error if not. | ||
164 | |||
165 | u64 | ||
166 | dma_get_required_mask(struct device *dev) | ||
167 | |||
168 | After setting the mask with dma_set_mask(), this API returns the | ||
169 | actual mask (within that already set) that the platform actually | ||
170 | requires to operate efficiently. Usually this means the returned mask | ||
171 | is the minimum required to cover all of memory. Examining the | ||
172 | required mask gives drivers with variable descriptor sizes the | ||
173 | opportunity to use smaller descriptors as necessary. | ||
174 | |||
175 | Requesting the required mask does not alter the current mask. If you | ||
176 | wish to take advantage of it, you should issue another dma_set_mask() | ||
177 | call to lower the mask again. | ||
178 | |||
179 | |||
180 | Part Id - Streaming DMA mappings | ||
181 | -------------------------------- | ||
182 | |||
183 | dma_addr_t | ||
184 | dma_map_single(struct device *dev, void *cpu_addr, size_t size, | ||
185 | enum dma_data_direction direction) | ||
186 | dma_addr_t | ||
187 | pci_map_single(struct device *dev, void *cpu_addr, size_t size, | ||
188 | int direction) | ||
189 | |||
190 | Maps a piece of processor virtual memory so it can be accessed by the | ||
191 | device and returns the physical handle of the memory. | ||
192 | |||
193 | The direction for both api's may be converted freely by casting. | ||
194 | However the dma_ API uses a strongly typed enumerator for its | ||
195 | direction: | ||
196 | |||
197 | DMA_NONE = PCI_DMA_NONE no direction (used for | ||
198 | debugging) | ||
199 | DMA_TO_DEVICE = PCI_DMA_TODEVICE data is going from the | ||
200 | memory to the device | ||
201 | DMA_FROM_DEVICE = PCI_DMA_FROMDEVICE data is coming from | ||
202 | the device to the | ||
203 | memory | ||
204 | DMA_BIDIRECTIONAL = PCI_DMA_BIDIRECTIONAL direction isn't known | ||
205 | |||
206 | Notes: Not all memory regions in a machine can be mapped by this | ||
207 | API. Further, regions that appear to be physically contiguous in | ||
208 | kernel virtual space may not be contiguous as physical memory. Since | ||
209 | this API does not provide any scatter/gather capability, it will fail | ||
210 | if the user tries to map a non physically contiguous piece of memory. | ||
211 | For this reason, it is recommended that memory mapped by this API be | ||
212 | obtained only from sources which guarantee to be physically contiguous | ||
213 | (like kmalloc). | ||
214 | |||
215 | Further, the physical address of the memory must be within the | ||
216 | dma_mask of the device (the dma_mask represents a bit mask of the | ||
217 | addressable region for the device. i.e. if the physical address of | ||
218 | the memory anded with the dma_mask is still equal to the physical | ||
219 | address, then the device can perform DMA to the memory). In order to | ||
220 | ensure that the memory allocated by kmalloc is within the dma_mask, | ||
221 | the driver may specify various platform dependent flags to restrict | ||
222 | the physical memory range of the allocation (e.g. on x86, GFP_DMA | ||
223 | guarantees to be within the first 16Mb of available physical memory, | ||
224 | as required by ISA devices). | ||
225 | |||
226 | Note also that the above constraints on physical contiguity and | ||
227 | dma_mask may not apply if the platform has an IOMMU (a device which | ||
228 | supplies a physical to virtual mapping between the I/O memory bus and | ||
229 | the device). However, to be portable, device driver writers may *not* | ||
230 | assume that such an IOMMU exists. | ||
231 | |||
232 | Warnings: Memory coherency operates at a granularity called the cache | ||
233 | line width. In order for memory mapped by this API to operate | ||
234 | correctly, the mapped region must begin exactly on a cache line | ||
235 | boundary and end exactly on one (to prevent two separately mapped | ||
236 | regions from sharing a single cache line). Since the cache line size | ||
237 | may not be known at compile time, the API will not enforce this | ||
238 | requirement. Therefore, it is recommended that driver writers who | ||
239 | don't take special care to determine the cache line size at run time | ||
240 | only map virtual regions that begin and end on page boundaries (which | ||
241 | are guaranteed also to be cache line boundaries). | ||
242 | |||
243 | DMA_TO_DEVICE synchronisation must be done after the last modification | ||
244 | of the memory region by the software and before it is handed off to | ||
245 | the driver. Once this primitive is used. Memory covered by this | ||
246 | primitive should be treated as read only by the device. If the device | ||
247 | may write to it at any point, it should be DMA_BIDIRECTIONAL (see | ||
248 | below). | ||
249 | |||
250 | DMA_FROM_DEVICE synchronisation must be done before the driver | ||
251 | accesses data that may be changed by the device. This memory should | ||
252 | be treated as read only by the driver. If the driver needs to write | ||
253 | to it at any point, it should be DMA_BIDIRECTIONAL (see below). | ||
254 | |||
255 | DMA_BIDIRECTIONAL requires special handling: it means that the driver | ||
256 | isn't sure if the memory was modified before being handed off to the | ||
257 | device and also isn't sure if the device will also modify it. Thus, | ||
258 | you must always sync bidirectional memory twice: once before the | ||
259 | memory is handed off to the device (to make sure all memory changes | ||
260 | are flushed from the processor) and once before the data may be | ||
261 | accessed after being used by the device (to make sure any processor | ||
262 | cache lines are updated with data that the device may have changed. | ||
263 | |||
264 | void | ||
265 | dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, | ||
266 | enum dma_data_direction direction) | ||
267 | void | ||
268 | pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr, | ||
269 | size_t size, int direction) | ||
270 | |||
271 | Unmaps the region previously mapped. All the parameters passed in | ||
272 | must be identical to those passed in (and returned) by the mapping | ||
273 | API. | ||
274 | |||
275 | dma_addr_t | ||
276 | dma_map_page(struct device *dev, struct page *page, | ||
277 | unsigned long offset, size_t size, | ||
278 | enum dma_data_direction direction) | ||
279 | dma_addr_t | ||
280 | pci_map_page(struct pci_dev *hwdev, struct page *page, | ||
281 | unsigned long offset, size_t size, int direction) | ||
282 | void | ||
283 | dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, | ||
284 | enum dma_data_direction direction) | ||
285 | void | ||
286 | pci_unmap_page(struct pci_dev *hwdev, dma_addr_t dma_address, | ||
287 | size_t size, int direction) | ||
288 | |||
289 | API for mapping and unmapping for pages. All the notes and warnings | ||
290 | for the other mapping APIs apply here. Also, although the <offset> | ||
291 | and <size> parameters are provided to do partial page mapping, it is | ||
292 | recommended that you never use these unless you really know what the | ||
293 | cache width is. | ||
294 | |||
295 | int | ||
296 | dma_mapping_error(dma_addr_t dma_addr) | ||
297 | |||
298 | int | ||
299 | pci_dma_mapping_error(dma_addr_t dma_addr) | ||
300 | |||
301 | In some circumstances dma_map_single and dma_map_page will fail to create | ||
302 | a mapping. A driver can check for these errors by testing the returned | ||
303 | dma address with dma_mapping_error(). A non zero return value means the mapping | ||
304 | could not be created and the driver should take appropriate action (eg | ||
305 | reduce current DMA mapping usage or delay and try again later). | ||
306 | |||
307 | int | ||
308 | dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, | ||
309 | enum dma_data_direction direction) | ||
310 | int | ||
311 | pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg, | ||
312 | int nents, int direction) | ||
313 | |||
314 | Maps a scatter gather list from the block layer. | ||
315 | |||
316 | Returns: the number of physical segments mapped (this may be shorted | ||
317 | than <nents> passed in if the block layer determines that some | ||
318 | elements of the scatter/gather list are physically adjacent and thus | ||
319 | may be mapped with a single entry). | ||
320 | |||
321 | Please note that the sg cannot be mapped again if it has been mapped once. | ||
322 | The mapping process is allowed to destroy information in the sg. | ||
323 | |||
324 | As with the other mapping interfaces, dma_map_sg can fail. When it | ||
325 | does, 0 is returned and a driver must take appropriate action. It is | ||
326 | critical that the driver do something, in the case of a block driver | ||
327 | aborting the request or even oopsing is better than doing nothing and | ||
328 | corrupting the filesystem. | ||
329 | |||
330 | void | ||
331 | dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, | ||
332 | enum dma_data_direction direction) | ||
333 | void | ||
334 | pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg, | ||
335 | int nents, int direction) | ||
336 | |||
337 | unmap the previously mapped scatter/gather list. All the parameters | ||
338 | must be the same as those and passed in to the scatter/gather mapping | ||
339 | API. | ||
340 | |||
341 | Note: <nents> must be the number you passed in, *not* the number of | ||
342 | physical entries returned. | ||
343 | |||
344 | void | ||
345 | dma_sync_single(struct device *dev, dma_addr_t dma_handle, size_t size, | ||
346 | enum dma_data_direction direction) | ||
347 | void | ||
348 | pci_dma_sync_single(struct pci_dev *hwdev, dma_addr_t dma_handle, | ||
349 | size_t size, int direction) | ||
350 | void | ||
351 | dma_sync_sg(struct device *dev, struct scatterlist *sg, int nelems, | ||
352 | enum dma_data_direction direction) | ||
353 | void | ||
354 | pci_dma_sync_sg(struct pci_dev *hwdev, struct scatterlist *sg, | ||
355 | int nelems, int direction) | ||
356 | |||
357 | synchronise a single contiguous or scatter/gather mapping. All the | ||
358 | parameters must be the same as those passed into the single mapping | ||
359 | API. | ||
360 | |||
361 | Notes: You must do this: | ||
362 | |||
363 | - Before reading values that have been written by DMA from the device | ||
364 | (use the DMA_FROM_DEVICE direction) | ||
365 | - After writing values that will be written to the device using DMA | ||
366 | (use the DMA_TO_DEVICE) direction | ||
367 | - before *and* after handing memory to the device if the memory is | ||
368 | DMA_BIDIRECTIONAL | ||
369 | |||
370 | See also dma_map_single(). | ||
371 | |||
372 | |||
373 | Part II - Advanced dma_ usage | ||
374 | ----------------------------- | ||
375 | |||
376 | Warning: These pieces of the DMA API have no PCI equivalent. They | ||
377 | should also not be used in the majority of cases, since they cater for | ||
378 | unlikely corner cases that don't belong in usual drivers. | ||
379 | |||
380 | If you don't understand how cache line coherency works between a | ||
381 | processor and an I/O device, you should not be using this part of the | ||
382 | API at all. | ||
383 | |||
384 | void * | ||
385 | dma_alloc_noncoherent(struct device *dev, size_t size, | ||
386 | dma_addr_t *dma_handle, int flag) | ||
387 | |||
388 | Identical to dma_alloc_coherent() except that the platform will | ||
389 | choose to return either consistent or non-consistent memory as it sees | ||
390 | fit. By using this API, you are guaranteeing to the platform that you | ||
391 | have all the correct and necessary sync points for this memory in the | ||
392 | driver should it choose to return non-consistent memory. | ||
393 | |||
394 | Note: where the platform can return consistent memory, it will | ||
395 | guarantee that the sync points become nops. | ||
396 | |||
397 | Warning: Handling non-consistent memory is a real pain. You should | ||
398 | only ever use this API if you positively know your driver will be | ||
399 | required to work on one of the rare (usually non-PCI) architectures | ||
400 | that simply cannot make consistent memory. | ||
401 | |||
402 | void | ||
403 | dma_free_noncoherent(struct device *dev, size_t size, void *cpu_addr, | ||
404 | dma_addr_t dma_handle) | ||
405 | |||
406 | free memory allocated by the nonconsistent API. All parameters must | ||
407 | be identical to those passed in (and returned by | ||
408 | dma_alloc_noncoherent()). | ||
409 | |||
410 | int | ||
411 | dma_is_consistent(dma_addr_t dma_handle) | ||
412 | |||
413 | returns true if the memory pointed to by the dma_handle is actually | ||
414 | consistent. | ||
415 | |||
416 | int | ||
417 | dma_get_cache_alignment(void) | ||
418 | |||
419 | returns the processor cache alignment. This is the absolute minimum | ||
420 | alignment *and* width that you must observe when either mapping | ||
421 | memory or doing partial flushes. | ||
422 | |||
423 | Notes: This API may return a number *larger* than the actual cache | ||
424 | line, but it will guarantee that one or more cache lines fit exactly | ||
425 | into the width returned by this call. It will also always be a power | ||
426 | of two for easy alignment | ||
427 | |||
428 | void | ||
429 | dma_sync_single_range(struct device *dev, dma_addr_t dma_handle, | ||
430 | unsigned long offset, size_t size, | ||
431 | enum dma_data_direction direction) | ||
432 | |||
433 | does a partial sync. starting at offset and continuing for size. You | ||
434 | must be careful to observe the cache alignment and width when doing | ||
435 | anything like this. You must also be extra careful about accessing | ||
436 | memory you intend to sync partially. | ||
437 | |||
438 | void | ||
439 | dma_cache_sync(void *vaddr, size_t size, | ||
440 | enum dma_data_direction direction) | ||
441 | |||
442 | Do a partial sync of memory that was allocated by | ||
443 | dma_alloc_noncoherent(), starting at virtual address vaddr and | ||
444 | continuing on for size. Again, you *must* observe the cache line | ||
445 | boundaries when doing this. | ||
446 | |||
447 | int | ||
448 | dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr, | ||
449 | dma_addr_t device_addr, size_t size, int | ||
450 | flags) | ||
451 | |||
452 | |||
453 | Declare region of memory to be handed out by dma_alloc_coherent when | ||
454 | it's asked for coherent memory for this device. | ||
455 | |||
456 | bus_addr is the physical address to which the memory is currently | ||
457 | assigned in the bus responding region (this will be used by the | ||
458 | platform to perform the mapping) | ||
459 | |||
460 | device_addr is the physical address the device needs to be programmed | ||
461 | with actually to address this memory (this will be handed out as the | ||
462 | dma_addr_t in dma_alloc_coherent()) | ||
463 | |||
464 | size is the size of the area (must be multiples of PAGE_SIZE). | ||
465 | |||
466 | flags can be or'd together and are | ||
467 | |||
468 | DMA_MEMORY_MAP - request that the memory returned from | ||
469 | dma_alloc_coherent() be directly writeable. | ||
470 | |||
471 | DMA_MEMORY_IO - request that the memory returned from | ||
472 | dma_alloc_coherent() be addressable using read/write/memcpy_toio etc. | ||
473 | |||
474 | One or both of these flags must be present | ||
475 | |||
476 | DMA_MEMORY_INCLUDES_CHILDREN - make the declared memory be allocated by | ||
477 | dma_alloc_coherent of any child devices of this one (for memory residing | ||
478 | on a bridge). | ||
479 | |||
480 | DMA_MEMORY_EXCLUSIVE - only allocate memory from the declared regions. | ||
481 | Do not allow dma_alloc_coherent() to fall back to system memory when | ||
482 | it's out of memory in the declared region. | ||
483 | |||
484 | The return value will be either DMA_MEMORY_MAP or DMA_MEMORY_IO and | ||
485 | must correspond to a passed in flag (i.e. no returning DMA_MEMORY_IO | ||
486 | if only DMA_MEMORY_MAP were passed in) for success or zero for | ||
487 | failure. | ||
488 | |||
489 | Note, for DMA_MEMORY_IO returns, all subsequent memory returned by | ||
490 | dma_alloc_coherent() may no longer be accessed directly, but instead | ||
491 | must be accessed using the correct bus functions. If your driver | ||
492 | isn't prepared to handle this contingency, it should not specify | ||
493 | DMA_MEMORY_IO in the input flags. | ||
494 | |||
495 | As a simplification for the platforms, only *one* such region of | ||
496 | memory may be declared per device. | ||
497 | |||
498 | For reasons of efficiency, most platforms choose to track the declared | ||
499 | region only at the granularity of a page. For smaller allocations, | ||
500 | you should use the dma_pool() API. | ||
501 | |||
502 | void | ||
503 | dma_release_declared_memory(struct device *dev) | ||
504 | |||
505 | Remove the memory region previously declared from the system. This | ||
506 | API performs *no* in-use checking for this region and will return | ||
507 | unconditionally having removed all the required structures. It is the | ||
508 | drivers job to ensure that no parts of this memory region are | ||
509 | currently in use. | ||
510 | |||
511 | void * | ||
512 | dma_mark_declared_memory_occupied(struct device *dev, | ||
513 | dma_addr_t device_addr, size_t size) | ||
514 | |||
515 | This is used to occupy specific regions of the declared space | ||
516 | (dma_alloc_coherent() will hand out the first free region it finds). | ||
517 | |||
518 | device_addr is the *device* address of the region requested | ||
519 | |||
520 | size is the size (and should be a page sized multiple). | ||
521 | |||
522 | The return value will be either a pointer to the processor virtual | ||
523 | address of the memory, or an error (via PTR_ERR()) if any part of the | ||
524 | region is occupied. | ||
525 | |||
526 | |||