diff options
Diffstat (limited to 'Documentation/DMA-API.txt')
-rw-r--r-- | Documentation/DMA-API.txt | 122 |
1 files changed, 36 insertions, 86 deletions
diff --git a/Documentation/DMA-API.txt b/Documentation/DMA-API.txt index 5aceb88b3f8b..05e2ae236865 100644 --- a/Documentation/DMA-API.txt +++ b/Documentation/DMA-API.txt | |||
@@ -4,20 +4,18 @@ | |||
4 | James E.J. Bottomley <James.Bottomley@HansenPartnership.com> | 4 | James E.J. Bottomley <James.Bottomley@HansenPartnership.com> |
5 | 5 | ||
6 | This document describes the DMA API. For a more gentle introduction | 6 | This document describes the DMA API. For a more gentle introduction |
7 | phrased in terms of the pci_ equivalents (and actual examples) see | 7 | of the API (and actual examples) see |
8 | Documentation/PCI/PCI-DMA-mapping.txt. | 8 | Documentation/DMA-API-HOWTO.txt. |
9 | 9 | ||
10 | This API is split into two pieces. Part I describes the API and the | 10 | This API is split into two pieces. Part I describes the API. Part II |
11 | corresponding pci_ API. Part II describes the extensions to the API | 11 | describes the extensions to the API for supporting non-consistent |
12 | for supporting non-consistent memory machines. Unless you know that | 12 | memory machines. Unless you know that your driver absolutely has to |
13 | your driver absolutely has to support non-consistent platforms (this | 13 | support non-consistent platforms (this is usually only legacy |
14 | is usually only legacy platforms) you should only use the API | 14 | platforms) you should only use the API described in part I. |
15 | described in part I. | ||
16 | 15 | ||
17 | Part I - pci_ and dma_ Equivalent API | 16 | Part I - dma_ API |
18 | ------------------------------------- | 17 | ------------------------------------- |
19 | 18 | ||
20 | To get the pci_ API, you must #include <linux/pci.h> | ||
21 | To get the dma_ API, you must #include <linux/dma-mapping.h> | 19 | To get the dma_ API, you must #include <linux/dma-mapping.h> |
22 | 20 | ||
23 | 21 | ||
@@ -27,9 +25,6 @@ Part Ia - Using large dma-coherent buffers | |||
27 | void * | 25 | void * |
28 | dma_alloc_coherent(struct device *dev, size_t size, | 26 | dma_alloc_coherent(struct device *dev, size_t size, |
29 | dma_addr_t *dma_handle, gfp_t flag) | 27 | dma_addr_t *dma_handle, gfp_t flag) |
30 | void * | ||
31 | pci_alloc_consistent(struct pci_dev *dev, size_t size, | ||
32 | dma_addr_t *dma_handle) | ||
33 | 28 | ||
34 | Consistent memory is memory for which a write by either the device or | 29 | Consistent memory is memory for which a write by either the device or |
35 | the processor can immediately be read by the processor or device | 30 | the processor can immediately be read by the processor or device |
@@ -53,15 +48,11 @@ The simplest way to do that is to use the dma_pool calls (see below). | |||
53 | The flag parameter (dma_alloc_coherent only) allows the caller to | 48 | The flag parameter (dma_alloc_coherent only) allows the caller to |
54 | specify the GFP_ flags (see kmalloc) for the allocation (the | 49 | specify the GFP_ flags (see kmalloc) for the allocation (the |
55 | implementation may choose to ignore flags that affect the location of | 50 | implementation may choose to ignore flags that affect the location of |
56 | the returned memory, like GFP_DMA). For pci_alloc_consistent, you | 51 | the returned memory, like GFP_DMA). |
57 | must assume GFP_ATOMIC behaviour. | ||
58 | 52 | ||
59 | void | 53 | void |
60 | dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, | 54 | dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, |
61 | dma_addr_t dma_handle) | 55 | dma_addr_t dma_handle) |
62 | void | ||
63 | pci_free_consistent(struct pci_dev *dev, size_t size, void *cpu_addr, | ||
64 | dma_addr_t dma_handle) | ||
65 | 56 | ||
66 | Free the region of consistent memory you previously allocated. dev, | 57 | Free the region of consistent memory you previously allocated. dev, |
67 | size and dma_handle must all be the same as those passed into the | 58 | size and dma_handle must all be the same as those passed into the |
@@ -89,10 +80,6 @@ for alignment, like queue heads needing to be aligned on N-byte boundaries. | |||
89 | dma_pool_create(const char *name, struct device *dev, | 80 | dma_pool_create(const char *name, struct device *dev, |
90 | size_t size, size_t align, size_t alloc); | 81 | size_t size, size_t align, size_t alloc); |
91 | 82 | ||
92 | struct pci_pool * | ||
93 | pci_pool_create(const char *name, struct pci_device *dev, | ||
94 | size_t size, size_t align, size_t alloc); | ||
95 | |||
96 | The pool create() routines initialize a pool of dma-coherent buffers | 83 | The pool create() routines initialize a pool of dma-coherent buffers |
97 | for use with a given device. It must be called in a context which | 84 | for use with a given device. It must be called in a context which |
98 | can sleep. | 85 | can sleep. |
@@ -108,9 +95,6 @@ from this pool must not cross 4KByte boundaries. | |||
108 | void *dma_pool_alloc(struct dma_pool *pool, gfp_t gfp_flags, | 95 | void *dma_pool_alloc(struct dma_pool *pool, gfp_t gfp_flags, |
109 | dma_addr_t *dma_handle); | 96 | dma_addr_t *dma_handle); |
110 | 97 | ||
111 | void *pci_pool_alloc(struct pci_pool *pool, gfp_t gfp_flags, | ||
112 | dma_addr_t *dma_handle); | ||
113 | |||
114 | This allocates memory from the pool; the returned memory will meet the size | 98 | This allocates memory from the pool; the returned memory will meet the size |
115 | and alignment requirements specified at creation time. Pass GFP_ATOMIC to | 99 | and alignment requirements specified at creation time. Pass GFP_ATOMIC to |
116 | prevent blocking, or if it's permitted (not in_interrupt, not holding SMP locks), | 100 | prevent blocking, or if it's permitted (not in_interrupt, not holding SMP locks), |
@@ -122,9 +106,6 @@ pool's device. | |||
122 | void dma_pool_free(struct dma_pool *pool, void *vaddr, | 106 | void dma_pool_free(struct dma_pool *pool, void *vaddr, |
123 | dma_addr_t addr); | 107 | dma_addr_t addr); |
124 | 108 | ||
125 | void pci_pool_free(struct pci_pool *pool, void *vaddr, | ||
126 | dma_addr_t addr); | ||
127 | |||
128 | This puts memory back into the pool. The pool is what was passed to | 109 | This puts memory back into the pool. The pool is what was passed to |
129 | the pool allocation routine; the cpu (vaddr) and dma addresses are what | 110 | the pool allocation routine; the cpu (vaddr) and dma addresses are what |
130 | were returned when that routine allocated the memory being freed. | 111 | were returned when that routine allocated the memory being freed. |
@@ -132,8 +113,6 @@ were returned when that routine allocated the memory being freed. | |||
132 | 113 | ||
133 | void dma_pool_destroy(struct dma_pool *pool); | 114 | void dma_pool_destroy(struct dma_pool *pool); |
134 | 115 | ||
135 | void pci_pool_destroy(struct pci_pool *pool); | ||
136 | |||
137 | The pool destroy() routines free the resources of the pool. They must be | 116 | The pool destroy() routines free the resources of the pool. They must be |
138 | called in a context which can sleep. Make sure you've freed all allocated | 117 | called in a context which can sleep. Make sure you've freed all allocated |
139 | memory back to the pool before you destroy it. | 118 | memory back to the pool before you destroy it. |
@@ -144,8 +123,6 @@ Part Ic - DMA addressing limitations | |||
144 | 123 | ||
145 | int | 124 | int |
146 | dma_supported(struct device *dev, u64 mask) | 125 | dma_supported(struct device *dev, u64 mask) |
147 | int | ||
148 | pci_dma_supported(struct pci_dev *hwdev, u64 mask) | ||
149 | 126 | ||
150 | Checks to see if the device can support DMA to the memory described by | 127 | Checks to see if the device can support DMA to the memory described by |
151 | mask. | 128 | mask. |
@@ -159,8 +136,14 @@ driver writers. | |||
159 | 136 | ||
160 | int | 137 | int |
161 | dma_set_mask(struct device *dev, u64 mask) | 138 | dma_set_mask(struct device *dev, u64 mask) |
139 | |||
140 | Checks to see if the mask is possible and updates the device | ||
141 | parameters if it is. | ||
142 | |||
143 | Returns: 0 if successful and a negative error if not. | ||
144 | |||
162 | int | 145 | int |
163 | pci_set_dma_mask(struct pci_device *dev, u64 mask) | 146 | dma_set_coherent_mask(struct device *dev, u64 mask) |
164 | 147 | ||
165 | Checks to see if the mask is possible and updates the device | 148 | Checks to see if the mask is possible and updates the device |
166 | parameters if it is. | 149 | parameters if it is. |
@@ -187,9 +170,6 @@ Part Id - Streaming DMA mappings | |||
187 | dma_addr_t | 170 | dma_addr_t |
188 | dma_map_single(struct device *dev, void *cpu_addr, size_t size, | 171 | dma_map_single(struct device *dev, void *cpu_addr, size_t size, |
189 | enum dma_data_direction direction) | 172 | enum dma_data_direction direction) |
190 | dma_addr_t | ||
191 | pci_map_single(struct pci_dev *hwdev, void *cpu_addr, size_t size, | ||
192 | int direction) | ||
193 | 173 | ||
194 | Maps a piece of processor virtual memory so it can be accessed by the | 174 | Maps a piece of processor virtual memory so it can be accessed by the |
195 | device and returns the physical handle of the memory. | 175 | device and returns the physical handle of the memory. |
@@ -198,14 +178,10 @@ The direction for both api's may be converted freely by casting. | |||
198 | However the dma_ API uses a strongly typed enumerator for its | 178 | However the dma_ API uses a strongly typed enumerator for its |
199 | direction: | 179 | direction: |
200 | 180 | ||
201 | DMA_NONE = PCI_DMA_NONE no direction (used for | 181 | DMA_NONE no direction (used for debugging) |
202 | debugging) | 182 | DMA_TO_DEVICE data is going from the memory to the device |
203 | DMA_TO_DEVICE = PCI_DMA_TODEVICE data is going from the | 183 | DMA_FROM_DEVICE data is coming from the device to the memory |
204 | memory to the device | 184 | DMA_BIDIRECTIONAL direction isn't known |
205 | DMA_FROM_DEVICE = PCI_DMA_FROMDEVICE data is coming from | ||
206 | the device to the | ||
207 | memory | ||
208 | DMA_BIDIRECTIONAL = PCI_DMA_BIDIRECTIONAL direction isn't known | ||
209 | 185 | ||
210 | Notes: Not all memory regions in a machine can be mapped by this | 186 | Notes: Not all memory regions in a machine can be mapped by this |
211 | API. Further, regions that appear to be physically contiguous in | 187 | API. Further, regions that appear to be physically contiguous in |
@@ -268,9 +244,6 @@ cache lines are updated with data that the device may have changed). | |||
268 | void | 244 | void |
269 | dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, | 245 | dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, |
270 | enum dma_data_direction direction) | 246 | enum dma_data_direction direction) |
271 | void | ||
272 | pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr, | ||
273 | size_t size, int direction) | ||
274 | 247 | ||
275 | Unmaps the region previously mapped. All the parameters passed in | 248 | Unmaps the region previously mapped. All the parameters passed in |
276 | must be identical to those passed in (and returned) by the mapping | 249 | must be identical to those passed in (and returned) by the mapping |
@@ -280,15 +253,9 @@ dma_addr_t | |||
280 | dma_map_page(struct device *dev, struct page *page, | 253 | dma_map_page(struct device *dev, struct page *page, |
281 | unsigned long offset, size_t size, | 254 | unsigned long offset, size_t size, |
282 | enum dma_data_direction direction) | 255 | enum dma_data_direction direction) |
283 | dma_addr_t | ||
284 | pci_map_page(struct pci_dev *hwdev, struct page *page, | ||
285 | unsigned long offset, size_t size, int direction) | ||
286 | void | 256 | void |
287 | dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, | 257 | dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, |
288 | enum dma_data_direction direction) | 258 | enum dma_data_direction direction) |
289 | void | ||
290 | pci_unmap_page(struct pci_dev *hwdev, dma_addr_t dma_address, | ||
291 | size_t size, int direction) | ||
292 | 259 | ||
293 | API for mapping and unmapping for pages. All the notes and warnings | 260 | API for mapping and unmapping for pages. All the notes and warnings |
294 | for the other mapping APIs apply here. Also, although the <offset> | 261 | for the other mapping APIs apply here. Also, although the <offset> |
@@ -299,9 +266,6 @@ cache width is. | |||
299 | int | 266 | int |
300 | dma_mapping_error(struct device *dev, dma_addr_t dma_addr) | 267 | dma_mapping_error(struct device *dev, dma_addr_t dma_addr) |
301 | 268 | ||
302 | int | ||
303 | pci_dma_mapping_error(struct pci_dev *hwdev, dma_addr_t dma_addr) | ||
304 | |||
305 | In some circumstances dma_map_single and dma_map_page will fail to create | 269 | In some circumstances dma_map_single and dma_map_page will fail to create |
306 | a mapping. A driver can check for these errors by testing the returned | 270 | a mapping. A driver can check for these errors by testing the returned |
307 | dma address with dma_mapping_error(). A non-zero return value means the mapping | 271 | dma address with dma_mapping_error(). A non-zero return value means the mapping |
@@ -311,9 +275,6 @@ reduce current DMA mapping usage or delay and try again later). | |||
311 | int | 275 | int |
312 | dma_map_sg(struct device *dev, struct scatterlist *sg, | 276 | dma_map_sg(struct device *dev, struct scatterlist *sg, |
313 | int nents, enum dma_data_direction direction) | 277 | int nents, enum dma_data_direction direction) |
314 | int | ||
315 | pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg, | ||
316 | int nents, int direction) | ||
317 | 278 | ||
318 | Returns: the number of physical segments mapped (this may be shorter | 279 | Returns: the number of physical segments mapped (this may be shorter |
319 | than <nents> passed in if some elements of the scatter/gather list are | 280 | than <nents> passed in if some elements of the scatter/gather list are |
@@ -353,9 +314,6 @@ accessed sg->address and sg->length as shown above. | |||
353 | void | 314 | void |
354 | dma_unmap_sg(struct device *dev, struct scatterlist *sg, | 315 | dma_unmap_sg(struct device *dev, struct scatterlist *sg, |
355 | int nhwentries, enum dma_data_direction direction) | 316 | int nhwentries, enum dma_data_direction direction) |
356 | void | ||
357 | pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg, | ||
358 | int nents, int direction) | ||
359 | 317 | ||
360 | Unmap the previously mapped scatter/gather list. All the parameters | 318 | Unmap the previously mapped scatter/gather list. All the parameters |
361 | must be the same as those and passed in to the scatter/gather mapping | 319 | must be the same as those and passed in to the scatter/gather mapping |
@@ -365,21 +323,23 @@ Note: <nents> must be the number you passed in, *not* the number of | |||
365 | physical entries returned. | 323 | physical entries returned. |
366 | 324 | ||
367 | void | 325 | void |
368 | dma_sync_single(struct device *dev, dma_addr_t dma_handle, size_t size, | 326 | dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size, |
369 | enum dma_data_direction direction) | 327 | enum dma_data_direction direction) |
370 | void | 328 | void |
371 | pci_dma_sync_single(struct pci_dev *hwdev, dma_addr_t dma_handle, | 329 | dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size, |
372 | size_t size, int direction) | 330 | enum dma_data_direction direction) |
373 | void | 331 | void |
374 | dma_sync_sg(struct device *dev, struct scatterlist *sg, int nelems, | 332 | dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, |
375 | enum dma_data_direction direction) | 333 | enum dma_data_direction direction) |
376 | void | 334 | void |
377 | pci_dma_sync_sg(struct pci_dev *hwdev, struct scatterlist *sg, | 335 | dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems, |
378 | int nelems, int direction) | 336 | enum dma_data_direction direction) |
379 | 337 | ||
380 | Synchronise a single contiguous or scatter/gather mapping. All the | 338 | Synchronise a single contiguous or scatter/gather mapping for the cpu |
381 | parameters must be the same as those passed into the single mapping | 339 | and device. With the sync_sg API, all the parameters must be the same |
382 | API. | 340 | as those passed into the single mapping API. With the sync_single API, |
341 | you can use dma_handle and size parameters that aren't identical to | ||
342 | those passed into the single mapping API to do a partial sync. | ||
383 | 343 | ||
384 | Notes: You must do this: | 344 | Notes: You must do this: |
385 | 345 | ||
@@ -461,9 +421,9 @@ void whizco_dma_map_sg_attrs(struct device *dev, dma_addr_t dma_addr, | |||
461 | Part II - Advanced dma_ usage | 421 | Part II - Advanced dma_ usage |
462 | ----------------------------- | 422 | ----------------------------- |
463 | 423 | ||
464 | Warning: These pieces of the DMA API have no PCI equivalent. They | 424 | Warning: These pieces of the DMA API should not be used in the |
465 | should also not be used in the majority of cases, since they cater for | 425 | majority of cases, since they cater for unlikely corner cases that |
466 | unlikely corner cases that don't belong in usual drivers. | 426 | don't belong in usual drivers. |
467 | 427 | ||
468 | If you don't understand how cache line coherency works between a | 428 | If you don't understand how cache line coherency works between a |
469 | processor and an I/O device, you should not be using this part of the | 429 | processor and an I/O device, you should not be using this part of the |
@@ -514,16 +474,6 @@ into the width returned by this call. It will also always be a power | |||
514 | of two for easy alignment. | 474 | of two for easy alignment. |
515 | 475 | ||
516 | void | 476 | void |
517 | dma_sync_single_range(struct device *dev, dma_addr_t dma_handle, | ||
518 | unsigned long offset, size_t size, | ||
519 | enum dma_data_direction direction) | ||
520 | |||
521 | Does a partial sync, starting at offset and continuing for size. You | ||
522 | must be careful to observe the cache alignment and width when doing | ||
523 | anything like this. You must also be extra careful about accessing | ||
524 | memory you intend to sync partially. | ||
525 | |||
526 | void | ||
527 | dma_cache_sync(struct device *dev, void *vaddr, size_t size, | 477 | dma_cache_sync(struct device *dev, void *vaddr, size_t size, |
528 | enum dma_data_direction direction) | 478 | enum dma_data_direction direction) |
529 | 479 | ||