diff options
author | Randy Dunlap <randy.dunlap@oracle.com> | 2007-07-31 03:38:17 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-07-31 18:39:39 -0400 |
commit | a12e2c6cde6392287b9cd3b4bd8d843fd1458087 (patch) | |
tree | de28f0a232077e1dfaa56ffe35a7dcb238c0b72f /Documentation/DMA-API.txt | |
parent | 9eb3ff40376e505eafb927b4a4cbccc928df68ec (diff) |
Doc: DMA-API update
Fix typos and update function parameters.
Signed-off-by: Randy Dunlap <randy.dunlap@oracle.com>
Acked-by: Muli Ben-Yehuda <muli@il.ibm.com>
Cc: James Bottomley <James.Bottomley@steeleye.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'Documentation/DMA-API.txt')
-rw-r--r-- | Documentation/DMA-API.txt | 79 |
1 files changed, 38 insertions, 41 deletions
diff --git a/Documentation/DMA-API.txt b/Documentation/DMA-API.txt index 805db4b2cba6..cc7a8c39fb6f 100644 --- a/Documentation/DMA-API.txt +++ b/Documentation/DMA-API.txt | |||
@@ -26,7 +26,7 @@ Part Ia - Using large dma-coherent buffers | |||
26 | 26 | ||
27 | void * | 27 | void * |
28 | dma_alloc_coherent(struct device *dev, size_t size, | 28 | dma_alloc_coherent(struct device *dev, size_t size, |
29 | dma_addr_t *dma_handle, int flag) | 29 | dma_addr_t *dma_handle, gfp_t flag) |
30 | void * | 30 | void * |
31 | pci_alloc_consistent(struct pci_dev *dev, size_t size, | 31 | pci_alloc_consistent(struct pci_dev *dev, size_t size, |
32 | dma_addr_t *dma_handle) | 32 | dma_addr_t *dma_handle) |
@@ -38,7 +38,7 @@ to make sure to flush the processor's write buffers before telling | |||
38 | devices to read that memory.) | 38 | devices to read that memory.) |
39 | 39 | ||
40 | This routine allocates a region of <size> bytes of consistent memory. | 40 | This routine allocates a region of <size> bytes of consistent memory. |
41 | it also returns a <dma_handle> which may be cast to an unsigned | 41 | It also returns a <dma_handle> which may be cast to an unsigned |
42 | integer the same width as the bus and used as the physical address | 42 | integer the same width as the bus and used as the physical address |
43 | base of the region. | 43 | base of the region. |
44 | 44 | ||
@@ -52,21 +52,21 @@ The simplest way to do that is to use the dma_pool calls (see below). | |||
52 | 52 | ||
53 | The flag parameter (dma_alloc_coherent only) allows the caller to | 53 | The flag parameter (dma_alloc_coherent only) allows the caller to |
54 | specify the GFP_ flags (see kmalloc) for the allocation (the | 54 | specify the GFP_ flags (see kmalloc) for the allocation (the |
55 | implementation may chose to ignore flags that affect the location of | 55 | implementation may choose to ignore flags that affect the location of |
56 | the returned memory, like GFP_DMA). For pci_alloc_consistent, you | 56 | the returned memory, like GFP_DMA). For pci_alloc_consistent, you |
57 | must assume GFP_ATOMIC behaviour. | 57 | must assume GFP_ATOMIC behaviour. |
58 | 58 | ||
59 | void | 59 | void |
60 | dma_free_coherent(struct device *dev, size_t size, void *cpu_addr | 60 | dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, |
61 | dma_addr_t dma_handle) | 61 | dma_addr_t dma_handle) |
62 | void | 62 | void |
63 | pci_free_consistent(struct pci_dev *dev, size_t size, void *cpu_addr | 63 | pci_free_consistent(struct pci_dev *dev, size_t size, void *cpu_addr, |
64 | dma_addr_t dma_handle) | 64 | dma_addr_t dma_handle) |
65 | 65 | ||
66 | Free the region of consistent memory you previously allocated. dev, | 66 | Free the region of consistent memory you previously allocated. dev, |
67 | size and dma_handle must all be the same as those passed into the | 67 | size and dma_handle must all be the same as those passed into the |
68 | consistent allocate. cpu_addr must be the virtual address returned by | 68 | consistent allocate. cpu_addr must be the virtual address returned by |
69 | the consistent allocate | 69 | the consistent allocate. |
70 | 70 | ||
71 | 71 | ||
72 | Part Ib - Using small dma-coherent buffers | 72 | Part Ib - Using small dma-coherent buffers |
@@ -77,9 +77,9 @@ To get this part of the dma_ API, you must #include <linux/dmapool.h> | |||
77 | Many drivers need lots of small dma-coherent memory regions for DMA | 77 | Many drivers need lots of small dma-coherent memory regions for DMA |
78 | descriptors or I/O buffers. Rather than allocating in units of a page | 78 | descriptors or I/O buffers. Rather than allocating in units of a page |
79 | or more using dma_alloc_coherent(), you can use DMA pools. These work | 79 | or more using dma_alloc_coherent(), you can use DMA pools. These work |
80 | much like a struct kmem_cache, except that they use the dma-coherent allocator | 80 | much like a struct kmem_cache, except that they use the dma-coherent allocator, |
81 | not __get_free_pages(). Also, they understand common hardware constraints | 81 | not __get_free_pages(). Also, they understand common hardware constraints |
82 | for alignment, like queue heads needing to be aligned on N byte boundaries. | 82 | for alignment, like queue heads needing to be aligned on N-byte boundaries. |
83 | 83 | ||
84 | 84 | ||
85 | struct dma_pool * | 85 | struct dma_pool * |
@@ -102,15 +102,15 @@ crossing restrictions, pass 0 for alloc; passing 4096 says memory allocated | |||
102 | from this pool must not cross 4KByte boundaries. | 102 | from this pool must not cross 4KByte boundaries. |
103 | 103 | ||
104 | 104 | ||
105 | void *dma_pool_alloc(struct dma_pool *pool, int gfp_flags, | 105 | void *dma_pool_alloc(struct dma_pool *pool, gfp_t gfp_flags, |
106 | dma_addr_t *dma_handle); | 106 | dma_addr_t *dma_handle); |
107 | 107 | ||
108 | void *pci_pool_alloc(struct pci_pool *pool, int gfp_flags, | 108 | void *pci_pool_alloc(struct pci_pool *pool, gfp_t gfp_flags, |
109 | dma_addr_t *dma_handle); | 109 | dma_addr_t *dma_handle); |
110 | 110 | ||
111 | This allocates memory from the pool; the returned memory will meet the size | 111 | This allocates memory from the pool; the returned memory will meet the size |
112 | and alignment requirements specified at creation time. Pass GFP_ATOMIC to | 112 | and alignment requirements specified at creation time. Pass GFP_ATOMIC to |
113 | prevent blocking, or if it's permitted (not in_interrupt, not holding SMP locks) | 113 | prevent blocking, or if it's permitted (not in_interrupt, not holding SMP locks), |
114 | pass GFP_KERNEL to allow blocking. Like dma_alloc_coherent(), this returns | 114 | pass GFP_KERNEL to allow blocking. Like dma_alloc_coherent(), this returns |
115 | two values: an address usable by the cpu, and the dma address usable by the | 115 | two values: an address usable by the cpu, and the dma address usable by the |
116 | pool's device. | 116 | pool's device. |
@@ -123,7 +123,7 @@ pool's device. | |||
123 | dma_addr_t addr); | 123 | dma_addr_t addr); |
124 | 124 | ||
125 | This puts memory back into the pool. The pool is what was passed to | 125 | This puts memory back into the pool. The pool is what was passed to |
126 | the pool allocation routine; the cpu and dma addresses are what | 126 | the pool allocation routine; the cpu (vaddr) and dma addresses are what |
127 | were returned when that routine allocated the memory being freed. | 127 | were returned when that routine allocated the memory being freed. |
128 | 128 | ||
129 | 129 | ||
@@ -209,18 +209,18 @@ Notes: Not all memory regions in a machine can be mapped by this | |||
209 | API. Further, regions that appear to be physically contiguous in | 209 | API. Further, regions that appear to be physically contiguous in |
210 | kernel virtual space may not be contiguous as physical memory. Since | 210 | kernel virtual space may not be contiguous as physical memory. Since |
211 | this API does not provide any scatter/gather capability, it will fail | 211 | this API does not provide any scatter/gather capability, it will fail |
212 | if the user tries to map a non physically contiguous piece of memory. | 212 | if the user tries to map a non-physically contiguous piece of memory. |
213 | For this reason, it is recommended that memory mapped by this API be | 213 | For this reason, it is recommended that memory mapped by this API be |
214 | obtained only from sources which guarantee to be physically contiguous | 214 | obtained only from sources which guarantee it to be physically contiguous |
215 | (like kmalloc). | 215 | (like kmalloc). |
216 | 216 | ||
217 | Further, the physical address of the memory must be within the | 217 | Further, the physical address of the memory must be within the |
218 | dma_mask of the device (the dma_mask represents a bit mask of the | 218 | dma_mask of the device (the dma_mask represents a bit mask of the |
219 | addressable region for the device. i.e. if the physical address of | 219 | addressable region for the device. I.e., if the physical address of |
220 | the memory anded with the dma_mask is still equal to the physical | 220 | the memory anded with the dma_mask is still equal to the physical |
221 | address, then the device can perform DMA to the memory). In order to | 221 | address, then the device can perform DMA to the memory). In order to |
222 | ensure that the memory allocated by kmalloc is within the dma_mask, | 222 | ensure that the memory allocated by kmalloc is within the dma_mask, |
223 | the driver may specify various platform dependent flags to restrict | 223 | the driver may specify various platform-dependent flags to restrict |
224 | the physical memory range of the allocation (e.g. on x86, GFP_DMA | 224 | the physical memory range of the allocation (e.g. on x86, GFP_DMA |
225 | guarantees to be within the first 16Mb of available physical memory, | 225 | guarantees to be within the first 16Mb of available physical memory, |
226 | as required by ISA devices). | 226 | as required by ISA devices). |
@@ -244,14 +244,14 @@ are guaranteed also to be cache line boundaries). | |||
244 | 244 | ||
245 | DMA_TO_DEVICE synchronisation must be done after the last modification | 245 | DMA_TO_DEVICE synchronisation must be done after the last modification |
246 | of the memory region by the software and before it is handed off to | 246 | of the memory region by the software and before it is handed off to |
247 | the driver. Once this primitive is used. Memory covered by this | 247 | the driver. Once this primitive is used, memory covered by this |
248 | primitive should be treated as read only by the device. If the device | 248 | primitive should be treated as read-only by the device. If the device |
249 | may write to it at any point, it should be DMA_BIDIRECTIONAL (see | 249 | may write to it at any point, it should be DMA_BIDIRECTIONAL (see |
250 | below). | 250 | below). |
251 | 251 | ||
252 | DMA_FROM_DEVICE synchronisation must be done before the driver | 252 | DMA_FROM_DEVICE synchronisation must be done before the driver |
253 | accesses data that may be changed by the device. This memory should | 253 | accesses data that may be changed by the device. This memory should |
254 | be treated as read only by the driver. If the driver needs to write | 254 | be treated as read-only by the driver. If the driver needs to write |
255 | to it at any point, it should be DMA_BIDIRECTIONAL (see below). | 255 | to it at any point, it should be DMA_BIDIRECTIONAL (see below). |
256 | 256 | ||
257 | DMA_BIDIRECTIONAL requires special handling: it means that the driver | 257 | DMA_BIDIRECTIONAL requires special handling: it means that the driver |
@@ -261,7 +261,7 @@ you must always sync bidirectional memory twice: once before the | |||
261 | memory is handed off to the device (to make sure all memory changes | 261 | memory is handed off to the device (to make sure all memory changes |
262 | are flushed from the processor) and once before the data may be | 262 | are flushed from the processor) and once before the data may be |
263 | accessed after being used by the device (to make sure any processor | 263 | accessed after being used by the device (to make sure any processor |
264 | cache lines are updated with data that the device may have changed. | 264 | cache lines are updated with data that the device may have changed). |
265 | 265 | ||
266 | void | 266 | void |
267 | dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, | 267 | dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, |
@@ -302,8 +302,8 @@ pci_dma_mapping_error(dma_addr_t dma_addr) | |||
302 | 302 | ||
303 | In some circumstances dma_map_single and dma_map_page will fail to create | 303 | In some circumstances dma_map_single and dma_map_page will fail to create |
304 | a mapping. A driver can check for these errors by testing the returned | 304 | a mapping. A driver can check for these errors by testing the returned |
305 | dma address with dma_mapping_error(). A non zero return value means the mapping | 305 | dma address with dma_mapping_error(). A non-zero return value means the mapping |
306 | could not be created and the driver should take appropriate action (eg | 306 | could not be created and the driver should take appropriate action (e.g. |
307 | reduce current DMA mapping usage or delay and try again later). | 307 | reduce current DMA mapping usage or delay and try again later). |
308 | 308 | ||
309 | int | 309 | int |
@@ -315,7 +315,7 @@ reduce current DMA mapping usage or delay and try again later). | |||
315 | 315 | ||
316 | Maps a scatter gather list from the block layer. | 316 | Maps a scatter gather list from the block layer. |
317 | 317 | ||
318 | Returns: the number of physical segments mapped (this may be shorted | 318 | Returns: the number of physical segments mapped (this may be shorter |
319 | than <nents> passed in if the block layer determines that some | 319 | than <nents> passed in if the block layer determines that some |
320 | elements of the scatter/gather list are physically adjacent and thus | 320 | elements of the scatter/gather list are physically adjacent and thus |
321 | may be mapped with a single entry). | 321 | may be mapped with a single entry). |
@@ -357,7 +357,7 @@ accessed sg->address and sg->length as shown above. | |||
357 | pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg, | 357 | pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg, |
358 | int nents, int direction) | 358 | int nents, int direction) |
359 | 359 | ||
360 | unmap the previously mapped scatter/gather list. All the parameters | 360 | Unmap the previously mapped scatter/gather list. All the parameters |
361 | must be the same as those and passed in to the scatter/gather mapping | 361 | must be the same as those and passed in to the scatter/gather mapping |
362 | API. | 362 | API. |
363 | 363 | ||
@@ -377,7 +377,7 @@ void | |||
377 | pci_dma_sync_sg(struct pci_dev *hwdev, struct scatterlist *sg, | 377 | pci_dma_sync_sg(struct pci_dev *hwdev, struct scatterlist *sg, |
378 | int nelems, int direction) | 378 | int nelems, int direction) |
379 | 379 | ||
380 | synchronise a single contiguous or scatter/gather mapping. All the | 380 | Synchronise a single contiguous or scatter/gather mapping. All the |
381 | parameters must be the same as those passed into the single mapping | 381 | parameters must be the same as those passed into the single mapping |
382 | API. | 382 | API. |
383 | 383 | ||
@@ -406,7 +406,7 @@ API at all. | |||
406 | 406 | ||
407 | void * | 407 | void * |
408 | dma_alloc_noncoherent(struct device *dev, size_t size, | 408 | dma_alloc_noncoherent(struct device *dev, size_t size, |
409 | dma_addr_t *dma_handle, int flag) | 409 | dma_addr_t *dma_handle, gfp_t flag) |
410 | 410 | ||
411 | Identical to dma_alloc_coherent() except that the platform will | 411 | Identical to dma_alloc_coherent() except that the platform will |
412 | choose to return either consistent or non-consistent memory as it sees | 412 | choose to return either consistent or non-consistent memory as it sees |
@@ -426,34 +426,34 @@ void | |||
426 | dma_free_noncoherent(struct device *dev, size_t size, void *cpu_addr, | 426 | dma_free_noncoherent(struct device *dev, size_t size, void *cpu_addr, |
427 | dma_addr_t dma_handle) | 427 | dma_addr_t dma_handle) |
428 | 428 | ||
429 | free memory allocated by the nonconsistent API. All parameters must | 429 | Free memory allocated by the nonconsistent API. All parameters must |
430 | be identical to those passed in (and returned by | 430 | be identical to those passed in (and returned by |
431 | dma_alloc_noncoherent()). | 431 | dma_alloc_noncoherent()). |
432 | 432 | ||
433 | int | 433 | int |
434 | dma_is_consistent(struct device *dev, dma_addr_t dma_handle) | 434 | dma_is_consistent(struct device *dev, dma_addr_t dma_handle) |
435 | 435 | ||
436 | returns true if the device dev is performing consistent DMA on the memory | 436 | Returns true if the device dev is performing consistent DMA on the memory |
437 | area pointed to by the dma_handle. | 437 | area pointed to by the dma_handle. |
438 | 438 | ||
439 | int | 439 | int |
440 | dma_get_cache_alignment(void) | 440 | dma_get_cache_alignment(void) |
441 | 441 | ||
442 | returns the processor cache alignment. This is the absolute minimum | 442 | Returns the processor cache alignment. This is the absolute minimum |
443 | alignment *and* width that you must observe when either mapping | 443 | alignment *and* width that you must observe when either mapping |
444 | memory or doing partial flushes. | 444 | memory or doing partial flushes. |
445 | 445 | ||
446 | Notes: This API may return a number *larger* than the actual cache | 446 | Notes: This API may return a number *larger* than the actual cache |
447 | line, but it will guarantee that one or more cache lines fit exactly | 447 | line, but it will guarantee that one or more cache lines fit exactly |
448 | into the width returned by this call. It will also always be a power | 448 | into the width returned by this call. It will also always be a power |
449 | of two for easy alignment | 449 | of two for easy alignment. |
450 | 450 | ||
451 | void | 451 | void |
452 | dma_sync_single_range(struct device *dev, dma_addr_t dma_handle, | 452 | dma_sync_single_range(struct device *dev, dma_addr_t dma_handle, |
453 | unsigned long offset, size_t size, | 453 | unsigned long offset, size_t size, |
454 | enum dma_data_direction direction) | 454 | enum dma_data_direction direction) |
455 | 455 | ||
456 | does a partial sync. starting at offset and continuing for size. You | 456 | Does a partial sync, starting at offset and continuing for size. You |
457 | must be careful to observe the cache alignment and width when doing | 457 | must be careful to observe the cache alignment and width when doing |
458 | anything like this. You must also be extra careful about accessing | 458 | anything like this. You must also be extra careful about accessing |
459 | memory you intend to sync partially. | 459 | memory you intend to sync partially. |
@@ -472,21 +472,20 @@ dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr, | |||
472 | dma_addr_t device_addr, size_t size, int | 472 | dma_addr_t device_addr, size_t size, int |
473 | flags) | 473 | flags) |
474 | 474 | ||
475 | |||
476 | Declare region of memory to be handed out by dma_alloc_coherent when | 475 | Declare region of memory to be handed out by dma_alloc_coherent when |
477 | it's asked for coherent memory for this device. | 476 | it's asked for coherent memory for this device. |
478 | 477 | ||
479 | bus_addr is the physical address to which the memory is currently | 478 | bus_addr is the physical address to which the memory is currently |
480 | assigned in the bus responding region (this will be used by the | 479 | assigned in the bus responding region (this will be used by the |
481 | platform to perform the mapping) | 480 | platform to perform the mapping). |
482 | 481 | ||
483 | device_addr is the physical address the device needs to be programmed | 482 | device_addr is the physical address the device needs to be programmed |
484 | with actually to address this memory (this will be handed out as the | 483 | with actually to address this memory (this will be handed out as the |
485 | dma_addr_t in dma_alloc_coherent()) | 484 | dma_addr_t in dma_alloc_coherent()). |
486 | 485 | ||
487 | size is the size of the area (must be multiples of PAGE_SIZE). | 486 | size is the size of the area (must be multiples of PAGE_SIZE). |
488 | 487 | ||
489 | flags can be or'd together and are | 488 | flags can be or'd together and are: |
490 | 489 | ||
491 | DMA_MEMORY_MAP - request that the memory returned from | 490 | DMA_MEMORY_MAP - request that the memory returned from |
492 | dma_alloc_coherent() be directly writable. | 491 | dma_alloc_coherent() be directly writable. |
@@ -494,7 +493,7 @@ dma_alloc_coherent() be directly writable. | |||
494 | DMA_MEMORY_IO - request that the memory returned from | 493 | DMA_MEMORY_IO - request that the memory returned from |
495 | dma_alloc_coherent() be addressable using read/write/memcpy_toio etc. | 494 | dma_alloc_coherent() be addressable using read/write/memcpy_toio etc. |
496 | 495 | ||
497 | One or both of these flags must be present | 496 | One or both of these flags must be present. |
498 | 497 | ||
499 | DMA_MEMORY_INCLUDES_CHILDREN - make the declared memory be allocated by | 498 | DMA_MEMORY_INCLUDES_CHILDREN - make the declared memory be allocated by |
500 | dma_alloc_coherent of any child devices of this one (for memory residing | 499 | dma_alloc_coherent of any child devices of this one (for memory residing |
@@ -528,7 +527,7 @@ dma_release_declared_memory(struct device *dev) | |||
528 | Remove the memory region previously declared from the system. This | 527 | Remove the memory region previously declared from the system. This |
529 | API performs *no* in-use checking for this region and will return | 528 | API performs *no* in-use checking for this region and will return |
530 | unconditionally having removed all the required structures. It is the | 529 | unconditionally having removed all the required structures. It is the |
531 | drivers job to ensure that no parts of this memory region are | 530 | driver's job to ensure that no parts of this memory region are |
532 | currently in use. | 531 | currently in use. |
533 | 532 | ||
534 | void * | 533 | void * |
@@ -538,12 +537,10 @@ dma_mark_declared_memory_occupied(struct device *dev, | |||
538 | This is used to occupy specific regions of the declared space | 537 | This is used to occupy specific regions of the declared space |
539 | (dma_alloc_coherent() will hand out the first free region it finds). | 538 | (dma_alloc_coherent() will hand out the first free region it finds). |
540 | 539 | ||
541 | device_addr is the *device* address of the region requested | 540 | device_addr is the *device* address of the region requested. |
542 | 541 | ||
543 | size is the size (and should be a page sized multiple). | 542 | size is the size (and should be a page-sized multiple). |
544 | 543 | ||
545 | The return value will be either a pointer to the processor virtual | 544 | The return value will be either a pointer to the processor virtual |
546 | address of the memory, or an error (via PTR_ERR()) if any part of the | 545 | address of the memory, or an error (via PTR_ERR()) if any part of the |
547 | region is occupied. | 546 | region is occupied. |
548 | |||
549 | |||