diff options
-rw-r--r-- | Documentation/DMA-API-HOWTO.txt | 192 | ||||
-rw-r--r-- | Documentation/DMA-API.txt | 139 | ||||
-rw-r--r-- | Documentation/DMA-ISA-LPC.txt | 4 | ||||
-rw-r--r-- | include/linux/dma-mapping.h | 6 | ||||
-rw-r--r-- | include/linux/types.h | 1 |
5 files changed, 204 insertions, 138 deletions
diff --git a/Documentation/DMA-API-HOWTO.txt b/Documentation/DMA-API-HOWTO.txt index 5e983031cc11..fd3727b94ac2 100644 --- a/Documentation/DMA-API-HOWTO.txt +++ b/Documentation/DMA-API-HOWTO.txt | |||
@@ -9,16 +9,76 @@ This is a guide to device driver writers on how to use the DMA API | |||
9 | with example pseudo-code. For a concise description of the API, see | 9 | with example pseudo-code. For a concise description of the API, see |
10 | DMA-API.txt. | 10 | DMA-API.txt. |
11 | 11 | ||
12 | Most of the 64bit platforms have special hardware that translates bus | 12 | CPU and DMA addresses |
13 | addresses (DMA addresses) into physical addresses. This is similar to | 13 | |
14 | how page tables and/or a TLB translates virtual addresses to physical | 14 | There are several kinds of addresses involved in the DMA API, and it's |
15 | addresses on a CPU. This is needed so that e.g. PCI devices can | 15 | important to understand the differences. |
16 | access with a Single Address Cycle (32bit DMA address) any page in the | 16 | |
17 | 64bit physical address space. Previously in Linux those 64bit | 17 | The kernel normally uses virtual addresses. Any address returned by |
18 | platforms had to set artificial limits on the maximum RAM size in the | 18 | kmalloc(), vmalloc(), and similar interfaces is a virtual address and can |
19 | system, so that the virt_to_bus() static scheme works (the DMA address | 19 | be stored in a "void *". |
20 | translation tables were simply filled on bootup to map each bus | 20 | |
21 | address to the physical page __pa(bus_to_virt())). | 21 | The virtual memory system (TLB, page tables, etc.) translates virtual |
22 | addresses to CPU physical addresses, which are stored as "phys_addr_t" or | ||
23 | "resource_size_t". The kernel manages device resources like registers as | ||
24 | physical addresses. These are the addresses in /proc/iomem. The physical | ||
25 | address is not directly useful to a driver; it must use ioremap() to map | ||
26 | the space and produce a virtual address. | ||
27 | |||
28 | I/O devices use a third kind of address: a "bus address" or "DMA address". | ||
29 | If a device has registers at an MMIO address, or if it performs DMA to read | ||
30 | or write system memory, the addresses used by the device are bus addresses. | ||
31 | In some systems, bus addresses are identical to CPU physical addresses, but | ||
32 | in general they are not. IOMMUs and host bridges can produce arbitrary | ||
33 | mappings between physical and bus addresses. | ||
34 | |||
35 | Here's a picture and some examples: | ||
36 | |||
37 | CPU CPU Bus | ||
38 | Virtual Physical Address | ||
39 | Address Address Space | ||
40 | Space Space | ||
41 | |||
42 | +-------+ +------+ +------+ | ||
43 | | | |MMIO | Offset | | | ||
44 | | | Virtual |Space | applied | | | ||
45 | C +-------+ --------> B +------+ ----------> +------+ A | ||
46 | | | mapping | | by host | | | ||
47 | +-----+ | | | | bridge | | +--------+ | ||
48 | | | | | +------+ | | | | | ||
49 | | CPU | | | | RAM | | | | Device | | ||
50 | | | | | | | | | | | | ||
51 | +-----+ +-------+ +------+ +------+ +--------+ | ||
52 | | | Virtual |Buffer| Mapping | | | ||
53 | X +-------+ --------> Y +------+ <---------- +------+ Z | ||
54 | | | mapping | RAM | by IOMMU | ||
55 | | | | | | ||
56 | | | | | | ||
57 | +-------+ +------+ | ||
58 | |||
59 | During the enumeration process, the kernel learns about I/O devices and | ||
60 | their MMIO space and the host bridges that connect them to the system. For | ||
61 | example, if a PCI device has a BAR, the kernel reads the bus address (A) | ||
62 | from the BAR and converts it to a CPU physical address (B). The address B | ||
63 | is stored in a struct resource and usually exposed via /proc/iomem. When a | ||
64 | driver claims a device, it typically uses ioremap() to map physical address | ||
65 | B at a virtual address (C). It can then use, e.g., ioread32(C), to access | ||
66 | the device registers at bus address A. | ||
67 | |||
68 | If the device supports DMA, the driver sets up a buffer using kmalloc() or | ||
69 | a similar interface, which returns a virtual address (X). The virtual | ||
70 | memory system maps X to a physical address (Y) in system RAM. The driver | ||
71 | can use virtual address X to access the buffer, but the device itself | ||
72 | cannot because DMA doesn't go through the CPU virtual memory system. | ||
73 | |||
74 | In some simple systems, the device can do DMA directly to physical address | ||
75 | Y. But in many others, there is IOMMU hardware that translates bus | ||
76 | addresses to physical addresses, e.g., it translates Z to Y. This is part | ||
77 | of the reason for the DMA API: the driver can give a virtual address X to | ||
78 | an interface like dma_map_single(), which sets up any required IOMMU | ||
79 | mapping and returns the bus address Z. The driver then tells the device to | ||
80 | do DMA to Z, and the IOMMU maps it to the buffer at address Y in system | ||
81 | RAM. | ||
22 | 82 | ||
23 | So that Linux can use the dynamic DMA mapping, it needs some help from the | 83 | So that Linux can use the dynamic DMA mapping, it needs some help from the |
24 | drivers, namely it has to take into account that DMA addresses should be | 84 | drivers, namely it has to take into account that DMA addresses should be |
@@ -29,17 +89,17 @@ The following API will work of course even on platforms where no such | |||
29 | hardware exists. | 89 | hardware exists. |
30 | 90 | ||
31 | Note that the DMA API works with any bus independent of the underlying | 91 | Note that the DMA API works with any bus independent of the underlying |
32 | microprocessor architecture. You should use the DMA API rather than | 92 | microprocessor architecture. You should use the DMA API rather than the |
33 | the bus specific DMA API (e.g. pci_dma_*). | 93 | bus-specific DMA API, i.e., use the dma_map_*() interfaces rather than the |
94 | pci_map_*() interfaces. | ||
34 | 95 | ||
35 | First of all, you should make sure | 96 | First of all, you should make sure |
36 | 97 | ||
37 | #include <linux/dma-mapping.h> | 98 | #include <linux/dma-mapping.h> |
38 | 99 | ||
39 | is in your driver. This file will obtain for you the definition of the | 100 | is in your driver, which provides the definition of dma_addr_t. This type |
40 | dma_addr_t (which can hold any valid DMA address for the platform) | 101 | can hold any valid DMA or bus address for the platform and should be used |
41 | type which should be used everywhere you hold a DMA (bus) address | 102 | everywhere you hold a DMA address returned from the DMA mapping functions. |
42 | returned from the DMA mapping functions. | ||
43 | 103 | ||
44 | What memory is DMA'able? | 104 | What memory is DMA'able? |
45 | 105 | ||
@@ -123,9 +183,9 @@ Here, dev is a pointer to the device struct of your device, and mask | |||
123 | is a bit mask describing which bits of an address your device | 183 | is a bit mask describing which bits of an address your device |
124 | supports. It returns zero if your card can perform DMA properly on | 184 | supports. It returns zero if your card can perform DMA properly on |
125 | the machine given the address mask you provided. In general, the | 185 | the machine given the address mask you provided. In general, the |
126 | device struct of your device is embedded in the bus specific device | 186 | device struct of your device is embedded in the bus-specific device |
127 | struct of your device. For example, a pointer to the device struct of | 187 | struct of your device. For example, &pdev->dev is a pointer to the |
128 | your PCI device is pdev->dev (pdev is a pointer to the PCI device | 188 | device struct of a PCI device (pdev is a pointer to the PCI device |
129 | struct of your device). | 189 | struct of your device). |
130 | 190 | ||
131 | If it returns non-zero, your device cannot perform DMA properly on | 191 | If it returns non-zero, your device cannot perform DMA properly on |
@@ -147,8 +207,7 @@ exactly why. | |||
147 | The standard 32-bit addressing device would do something like this: | 207 | The standard 32-bit addressing device would do something like this: |
148 | 208 | ||
149 | if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32))) { | 209 | if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32))) { |
150 | printk(KERN_WARNING | 210 | dev_warn(dev, "mydev: No suitable DMA available\n"); |
151 | "mydev: No suitable DMA available.\n"); | ||
152 | goto ignore_this_device; | 211 | goto ignore_this_device; |
153 | } | 212 | } |
154 | 213 | ||
@@ -170,8 +229,7 @@ all 64-bits when accessing streaming DMA: | |||
170 | } else if (!dma_set_mask(dev, DMA_BIT_MASK(32))) { | 229 | } else if (!dma_set_mask(dev, DMA_BIT_MASK(32))) { |
171 | using_dac = 0; | 230 | using_dac = 0; |
172 | } else { | 231 | } else { |
173 | printk(KERN_WARNING | 232 | dev_warn(dev, "mydev: No suitable DMA available\n"); |
174 | "mydev: No suitable DMA available.\n"); | ||
175 | goto ignore_this_device; | 233 | goto ignore_this_device; |
176 | } | 234 | } |
177 | 235 | ||
@@ -187,8 +245,7 @@ the case would look like this: | |||
187 | using_dac = 0; | 245 | using_dac = 0; |
188 | consistent_using_dac = 0; | 246 | consistent_using_dac = 0; |
189 | } else { | 247 | } else { |
190 | printk(KERN_WARNING | 248 | dev_warn(dev, "mydev: No suitable DMA available\n"); |
191 | "mydev: No suitable DMA available.\n"); | ||
192 | goto ignore_this_device; | 249 | goto ignore_this_device; |
193 | } | 250 | } |
194 | 251 | ||
@@ -201,8 +258,7 @@ Finally, if your device can only drive the low 24-bits of | |||
201 | address you might do something like: | 258 | address you might do something like: |
202 | 259 | ||
203 | if (dma_set_mask(dev, DMA_BIT_MASK(24))) { | 260 | if (dma_set_mask(dev, DMA_BIT_MASK(24))) { |
204 | printk(KERN_WARNING | 261 | dev_warn(dev, "mydev: 24-bit DMA addressing not available\n"); |
205 | "mydev: 24-bit DMA addressing not available.\n"); | ||
206 | goto ignore_this_device; | 262 | goto ignore_this_device; |
207 | } | 263 | } |
208 | 264 | ||
@@ -232,14 +288,14 @@ Here is pseudo-code showing how this might be done: | |||
232 | card->playback_enabled = 1; | 288 | card->playback_enabled = 1; |
233 | } else { | 289 | } else { |
234 | card->playback_enabled = 0; | 290 | card->playback_enabled = 0; |
235 | printk(KERN_WARNING "%s: Playback disabled due to DMA limitations.\n", | 291 | dev_warn(dev, "%s: Playback disabled due to DMA limitations\n", |
236 | card->name); | 292 | card->name); |
237 | } | 293 | } |
238 | if (!dma_set_mask(dev, RECORD_ADDRESS_BITS)) { | 294 | if (!dma_set_mask(dev, RECORD_ADDRESS_BITS)) { |
239 | card->record_enabled = 1; | 295 | card->record_enabled = 1; |
240 | } else { | 296 | } else { |
241 | card->record_enabled = 0; | 297 | card->record_enabled = 0; |
242 | printk(KERN_WARNING "%s: Record disabled due to DMA limitations.\n", | 298 | dev_warn(dev, "%s: Record disabled due to DMA limitations\n", |
243 | card->name); | 299 | card->name); |
244 | } | 300 | } |
245 | 301 | ||
@@ -331,7 +387,7 @@ context with the GFP_ATOMIC flag. | |||
331 | Size is the length of the region you want to allocate, in bytes. | 387 | Size is the length of the region you want to allocate, in bytes. |
332 | 388 | ||
333 | This routine will allocate RAM for that region, so it acts similarly to | 389 | This routine will allocate RAM for that region, so it acts similarly to |
334 | __get_free_pages (but takes size instead of a page order). If your | 390 | __get_free_pages() (but takes size instead of a page order). If your |
335 | driver needs regions sized smaller than a page, you may prefer using | 391 | driver needs regions sized smaller than a page, you may prefer using |
336 | the dma_pool interface, described below. | 392 | the dma_pool interface, described below. |
337 | 393 | ||
@@ -343,11 +399,11 @@ the consistent DMA mask has been explicitly changed via | |||
343 | dma_set_coherent_mask(). This is true of the dma_pool interface as | 399 | dma_set_coherent_mask(). This is true of the dma_pool interface as |
344 | well. | 400 | well. |
345 | 401 | ||
346 | dma_alloc_coherent returns two values: the virtual address which you | 402 | dma_alloc_coherent() returns two values: the virtual address which you |
347 | can use to access it from the CPU and dma_handle which you pass to the | 403 | can use to access it from the CPU and dma_handle which you pass to the |
348 | card. | 404 | card. |
349 | 405 | ||
350 | The cpu return address and the DMA bus master address are both | 406 | The CPU virtual address and the DMA bus address are both |
351 | guaranteed to be aligned to the smallest PAGE_SIZE order which | 407 | guaranteed to be aligned to the smallest PAGE_SIZE order which |
352 | is greater than or equal to the requested size. This invariant | 408 | is greater than or equal to the requested size. This invariant |
353 | exists (for example) to guarantee that if you allocate a chunk | 409 | exists (for example) to guarantee that if you allocate a chunk |
@@ -359,13 +415,13 @@ To unmap and free such a DMA region, you call: | |||
359 | dma_free_coherent(dev, size, cpu_addr, dma_handle); | 415 | dma_free_coherent(dev, size, cpu_addr, dma_handle); |
360 | 416 | ||
361 | where dev, size are the same as in the above call and cpu_addr and | 417 | where dev, size are the same as in the above call and cpu_addr and |
362 | dma_handle are the values dma_alloc_coherent returned to you. | 418 | dma_handle are the values dma_alloc_coherent() returned to you. |
363 | This function may not be called in interrupt context. | 419 | This function may not be called in interrupt context. |
364 | 420 | ||
365 | If your driver needs lots of smaller memory regions, you can write | 421 | If your driver needs lots of smaller memory regions, you can write |
366 | custom code to subdivide pages returned by dma_alloc_coherent, | 422 | custom code to subdivide pages returned by dma_alloc_coherent(), |
367 | or you can use the dma_pool API to do that. A dma_pool is like | 423 | or you can use the dma_pool API to do that. A dma_pool is like |
368 | a kmem_cache, but it uses dma_alloc_coherent not __get_free_pages. | 424 | a kmem_cache, but it uses dma_alloc_coherent(), not __get_free_pages(). |
369 | Also, it understands common hardware constraints for alignment, | 425 | Also, it understands common hardware constraints for alignment, |
370 | like queue heads needing to be aligned on N byte boundaries. | 426 | like queue heads needing to be aligned on N byte boundaries. |
371 | 427 | ||
@@ -381,29 +437,29 @@ type of data is "align" (which is expressed in bytes, and must be a | |||
381 | power of two). If your device has no boundary crossing restrictions, | 437 | power of two). If your device has no boundary crossing restrictions, |
382 | pass 0 for alloc; passing 4096 says memory allocated from this pool | 438 | pass 0 for alloc; passing 4096 says memory allocated from this pool |
383 | must not cross 4KByte boundaries (but at that time it may be better to | 439 | must not cross 4KByte boundaries (but at that time it may be better to |
384 | go for dma_alloc_coherent directly instead). | 440 | use dma_alloc_coherent() directly instead). |
385 | 441 | ||
386 | Allocate memory from a dma pool like this: | 442 | Allocate memory from a DMA pool like this: |
387 | 443 | ||
388 | cpu_addr = dma_pool_alloc(pool, flags, &dma_handle); | 444 | cpu_addr = dma_pool_alloc(pool, flags, &dma_handle); |
389 | 445 | ||
390 | flags are SLAB_KERNEL if blocking is permitted (not in_interrupt nor | 446 | flags are SLAB_KERNEL if blocking is permitted (not in_interrupt nor |
391 | holding SMP locks), SLAB_ATOMIC otherwise. Like dma_alloc_coherent, | 447 | holding SMP locks), SLAB_ATOMIC otherwise. Like dma_alloc_coherent(), |
392 | this returns two values, cpu_addr and dma_handle. | 448 | this returns two values, cpu_addr and dma_handle. |
393 | 449 | ||
394 | Free memory that was allocated from a dma_pool like this: | 450 | Free memory that was allocated from a dma_pool like this: |
395 | 451 | ||
396 | dma_pool_free(pool, cpu_addr, dma_handle); | 452 | dma_pool_free(pool, cpu_addr, dma_handle); |
397 | 453 | ||
398 | where pool is what you passed to dma_pool_alloc, and cpu_addr and | 454 | where pool is what you passed to dma_pool_alloc(), and cpu_addr and |
399 | dma_handle are the values dma_pool_alloc returned. This function | 455 | dma_handle are the values dma_pool_alloc() returned. This function |
400 | may be called in interrupt context. | 456 | may be called in interrupt context. |
401 | 457 | ||
402 | Destroy a dma_pool by calling: | 458 | Destroy a dma_pool by calling: |
403 | 459 | ||
404 | dma_pool_destroy(pool); | 460 | dma_pool_destroy(pool); |
405 | 461 | ||
406 | Make sure you've called dma_pool_free for all memory allocated | 462 | Make sure you've called dma_pool_free() for all memory allocated |
407 | from a pool before you destroy the pool. This function may not | 463 | from a pool before you destroy the pool. This function may not |
408 | be called in interrupt context. | 464 | be called in interrupt context. |
409 | 465 | ||
@@ -418,7 +474,7 @@ one of the following values: | |||
418 | DMA_FROM_DEVICE | 474 | DMA_FROM_DEVICE |
419 | DMA_NONE | 475 | DMA_NONE |
420 | 476 | ||
421 | One should provide the exact DMA direction if you know it. | 477 | You should provide the exact DMA direction if you know it. |
422 | 478 | ||
423 | DMA_TO_DEVICE means "from main memory to the device" | 479 | DMA_TO_DEVICE means "from main memory to the device" |
424 | DMA_FROM_DEVICE means "from the device to main memory" | 480 | DMA_FROM_DEVICE means "from the device to main memory" |
@@ -489,14 +545,14 @@ and to unmap it: | |||
489 | dma_unmap_single(dev, dma_handle, size, direction); | 545 | dma_unmap_single(dev, dma_handle, size, direction); |
490 | 546 | ||
491 | You should call dma_mapping_error() as dma_map_single() could fail and return | 547 | You should call dma_mapping_error() as dma_map_single() could fail and return |
492 | error. Not all dma implementations support dma_mapping_error() interface. | 548 | error. Not all DMA implementations support the dma_mapping_error() interface. |
493 | However, it is a good practice to call dma_mapping_error() interface, which | 549 | However, it is a good practice to call dma_mapping_error() interface, which |
494 | will invoke the generic mapping error check interface. Doing so will ensure | 550 | will invoke the generic mapping error check interface. Doing so will ensure |
495 | that the mapping code will work correctly on all dma implementations without | 551 | that the mapping code will work correctly on all DMA implementations without |
496 | any dependency on the specifics of the underlying implementation. Using the | 552 | any dependency on the specifics of the underlying implementation. Using the |
497 | returned address without checking for errors could result in failures ranging | 553 | returned address without checking for errors could result in failures ranging |
498 | from panics to silent data corruption. A couple of examples of incorrect ways | 554 | from panics to silent data corruption. A couple of examples of incorrect ways |
499 | to check for errors that make assumptions about the underlying dma | 555 | to check for errors that make assumptions about the underlying DMA |
500 | implementation are as follows and these are applicable to dma_map_page() as | 556 | implementation are as follows and these are applicable to dma_map_page() as |
501 | well. | 557 | well. |
502 | 558 | ||
@@ -516,12 +572,12 @@ Incorrect example 2: | |||
516 | goto map_error; | 572 | goto map_error; |
517 | } | 573 | } |
518 | 574 | ||
519 | You should call dma_unmap_single when the DMA activity is finished, e.g. | 575 | You should call dma_unmap_single() when the DMA activity is finished, e.g., |
520 | from the interrupt which told you that the DMA transfer is done. | 576 | from the interrupt which told you that the DMA transfer is done. |
521 | 577 | ||
522 | Using cpu pointers like this for single mappings has a disadvantage, | 578 | Using cpu pointers like this for single mappings has a disadvantage: |
523 | you cannot reference HIGHMEM memory in this way. Thus, there is a | 579 | you cannot reference HIGHMEM memory in this way. Thus, there is a |
524 | map/unmap interface pair akin to dma_{map,unmap}_single. These | 580 | map/unmap interface pair akin to dma_{map,unmap}_single(). These |
525 | interfaces deal with page/offset pairs instead of cpu pointers. | 581 | interfaces deal with page/offset pairs instead of cpu pointers. |
526 | Specifically: | 582 | Specifically: |
527 | 583 | ||
@@ -550,7 +606,7 @@ Here, "offset" means byte offset within the given page. | |||
550 | You should call dma_mapping_error() as dma_map_page() could fail and return | 606 | You should call dma_mapping_error() as dma_map_page() could fail and return |
551 | error as outlined under the dma_map_single() discussion. | 607 | error as outlined under the dma_map_single() discussion. |
552 | 608 | ||
553 | You should call dma_unmap_page when the DMA activity is finished, e.g. | 609 | You should call dma_unmap_page() when the DMA activity is finished, e.g., |
554 | from the interrupt which told you that the DMA transfer is done. | 610 | from the interrupt which told you that the DMA transfer is done. |
555 | 611 | ||
556 | With scatterlists, you map a region gathered from several regions by: | 612 | With scatterlists, you map a region gathered from several regions by: |
@@ -588,18 +644,16 @@ PLEASE NOTE: The 'nents' argument to the dma_unmap_sg call must be | |||
588 | it should _NOT_ be the 'count' value _returned_ from the | 644 | it should _NOT_ be the 'count' value _returned_ from the |
589 | dma_map_sg call. | 645 | dma_map_sg call. |
590 | 646 | ||
591 | Every dma_map_{single,sg} call should have its dma_unmap_{single,sg} | 647 | Every dma_map_{single,sg}() call should have its dma_unmap_{single,sg}() |
592 | counterpart, because the bus address space is a shared resource (although | 648 | counterpart, because the bus address space is a shared resource and |
593 | in some ports the mapping is per each BUS so less devices contend for the | 649 | you could render the machine unusable by consuming all bus addresses. |
594 | same bus address space) and you could render the machine unusable by eating | ||
595 | all bus addresses. | ||
596 | 650 | ||
597 | If you need to use the same streaming DMA region multiple times and touch | 651 | If you need to use the same streaming DMA region multiple times and touch |
598 | the data in between the DMA transfers, the buffer needs to be synced | 652 | the data in between the DMA transfers, the buffer needs to be synced |
599 | properly in order for the cpu and device to see the most uptodate and | 653 | properly in order for the cpu and device to see the most up-to-date and |
600 | correct copy of the DMA buffer. | 654 | correct copy of the DMA buffer. |
601 | 655 | ||
602 | So, firstly, just map it with dma_map_{single,sg}, and after each DMA | 656 | So, firstly, just map it with dma_map_{single,sg}(), and after each DMA |
603 | transfer call either: | 657 | transfer call either: |
604 | 658 | ||
605 | dma_sync_single_for_cpu(dev, dma_handle, size, direction); | 659 | dma_sync_single_for_cpu(dev, dma_handle, size, direction); |
@@ -623,9 +677,9 @@ or: | |||
623 | as appropriate. | 677 | as appropriate. |
624 | 678 | ||
625 | After the last DMA transfer call one of the DMA unmap routines | 679 | After the last DMA transfer call one of the DMA unmap routines |
626 | dma_unmap_{single,sg}. If you don't touch the data from the first dma_map_* | 680 | dma_unmap_{single,sg}(). If you don't touch the data from the first |
627 | call till dma_unmap_*, then you don't have to call the dma_sync_* | 681 | dma_map_*() call till dma_unmap_*(), then you don't have to call the |
628 | routines at all. | 682 | dma_sync_*() routines at all. |
629 | 683 | ||
630 | Here is pseudo code which shows a situation in which you would need | 684 | Here is pseudo code which shows a situation in which you would need |
631 | to use the dma_sync_*() interfaces. | 685 | to use the dma_sync_*() interfaces. |
@@ -690,12 +744,12 @@ to use the dma_sync_*() interfaces. | |||
690 | } | 744 | } |
691 | } | 745 | } |
692 | 746 | ||
693 | Drivers converted fully to this interface should not use virt_to_bus any | 747 | Drivers converted fully to this interface should not use virt_to_bus() any |
694 | longer, nor should they use bus_to_virt. Some drivers have to be changed a | 748 | longer, nor should they use bus_to_virt(). Some drivers have to be changed a |
695 | little bit, because there is no longer an equivalent to bus_to_virt in the | 749 | little bit, because there is no longer an equivalent to bus_to_virt() in the |
696 | dynamic DMA mapping scheme - you have to always store the DMA addresses | 750 | dynamic DMA mapping scheme - you have to always store the DMA addresses |
697 | returned by the dma_alloc_coherent, dma_pool_alloc, and dma_map_single | 751 | returned by the dma_alloc_coherent(), dma_pool_alloc(), and dma_map_single() |
698 | calls (dma_map_sg stores them in the scatterlist itself if the platform | 752 | calls (dma_map_sg() stores them in the scatterlist itself if the platform |
699 | supports dynamic DMA mapping in hardware) in your driver structures and/or | 753 | supports dynamic DMA mapping in hardware) in your driver structures and/or |
700 | in the card registers. | 754 | in the card registers. |
701 | 755 | ||
@@ -709,9 +763,9 @@ as it is impossible to correctly support them. | |||
709 | DMA address space is limited on some architectures and an allocation | 763 | DMA address space is limited on some architectures and an allocation |
710 | failure can be determined by: | 764 | failure can be determined by: |
711 | 765 | ||
712 | - checking if dma_alloc_coherent returns NULL or dma_map_sg returns 0 | 766 | - checking if dma_alloc_coherent() returns NULL or dma_map_sg returns 0 |
713 | 767 | ||
714 | - checking the returned dma_addr_t of dma_map_single and dma_map_page | 768 | - checking the dma_addr_t returned from dma_map_single() and dma_map_page() |
715 | by using dma_mapping_error(): | 769 | by using dma_mapping_error(): |
716 | 770 | ||
717 | dma_addr_t dma_handle; | 771 | dma_addr_t dma_handle; |
@@ -794,7 +848,7 @@ Example 2: (if buffers are allocated in a loop, unmap all mapped buffers when | |||
794 | dma_unmap_single(array[i].dma_addr); | 848 | dma_unmap_single(array[i].dma_addr); |
795 | } | 849 | } |
796 | 850 | ||
797 | Networking drivers must call dev_kfree_skb to free the socket buffer | 851 | Networking drivers must call dev_kfree_skb() to free the socket buffer |
798 | and return NETDEV_TX_OK if the DMA mapping fails on the transmit hook | 852 | and return NETDEV_TX_OK if the DMA mapping fails on the transmit hook |
799 | (ndo_start_xmit). This means that the socket buffer is just dropped in | 853 | (ndo_start_xmit). This means that the socket buffer is just dropped in |
800 | the failure case. | 854 | the failure case. |
@@ -831,7 +885,7 @@ transform some example code. | |||
831 | DEFINE_DMA_UNMAP_LEN(len); | 885 | DEFINE_DMA_UNMAP_LEN(len); |
832 | }; | 886 | }; |
833 | 887 | ||
834 | 2) Use dma_unmap_{addr,len}_set to set these values. | 888 | 2) Use dma_unmap_{addr,len}_set() to set these values. |
835 | Example, before: | 889 | Example, before: |
836 | 890 | ||
837 | ringp->mapping = FOO; | 891 | ringp->mapping = FOO; |
@@ -842,7 +896,7 @@ transform some example code. | |||
842 | dma_unmap_addr_set(ringp, mapping, FOO); | 896 | dma_unmap_addr_set(ringp, mapping, FOO); |
843 | dma_unmap_len_set(ringp, len, BAR); | 897 | dma_unmap_len_set(ringp, len, BAR); |
844 | 898 | ||
845 | 3) Use dma_unmap_{addr,len} to access these values. | 899 | 3) Use dma_unmap_{addr,len}() to access these values. |
846 | Example, before: | 900 | Example, before: |
847 | 901 | ||
848 | dma_unmap_single(dev, ringp->mapping, ringp->len, | 902 | dma_unmap_single(dev, ringp->mapping, ringp->len, |
diff --git a/Documentation/DMA-API.txt b/Documentation/DMA-API.txt index e865279cec58..1147eba43128 100644 --- a/Documentation/DMA-API.txt +++ b/Documentation/DMA-API.txt | |||
@@ -4,22 +4,26 @@ | |||
4 | James E.J. Bottomley <James.Bottomley@HansenPartnership.com> | 4 | James E.J. Bottomley <James.Bottomley@HansenPartnership.com> |
5 | 5 | ||
6 | This document describes the DMA API. For a more gentle introduction | 6 | This document describes the DMA API. For a more gentle introduction |
7 | of the API (and actual examples) see | 7 | of the API (and actual examples), see Documentation/DMA-API-HOWTO.txt. |
8 | Documentation/DMA-API-HOWTO.txt. | ||
9 | 8 | ||
10 | This API is split into two pieces. Part I describes the API. Part II | 9 | This API is split into two pieces. Part I describes the basic API. |
11 | describes the extensions to the API for supporting non-consistent | 10 | Part II describes extensions for supporting non-consistent memory |
12 | memory machines. Unless you know that your driver absolutely has to | 11 | machines. Unless you know that your driver absolutely has to support |
13 | support non-consistent platforms (this is usually only legacy | 12 | non-consistent platforms (this is usually only legacy platforms) you |
14 | platforms) you should only use the API described in part I. | 13 | should only use the API described in part I. |
15 | 14 | ||
16 | Part I - dma_ API | 15 | Part I - dma_ API |
17 | ------------------------------------- | 16 | ------------------------------------- |
18 | 17 | ||
19 | To get the dma_ API, you must #include <linux/dma-mapping.h> | 18 | To get the dma_ API, you must #include <linux/dma-mapping.h>. This |
19 | provides dma_addr_t and the interfaces described below. | ||
20 | 20 | ||
21 | A dma_addr_t can hold any valid DMA or bus address for the platform. It | ||
22 | can be given to a device to use as a DMA source or target. A cpu cannot | ||
23 | reference a dma_addr_t directly because there may be translation between | ||
24 | its physical address space and the bus address space. | ||
21 | 25 | ||
22 | Part Ia - Using large dma-coherent buffers | 26 | Part Ia - Using large DMA-coherent buffers |
23 | ------------------------------------------ | 27 | ------------------------------------------ |
24 | 28 | ||
25 | void * | 29 | void * |
@@ -33,20 +37,21 @@ to make sure to flush the processor's write buffers before telling | |||
33 | devices to read that memory.) | 37 | devices to read that memory.) |
34 | 38 | ||
35 | This routine allocates a region of <size> bytes of consistent memory. | 39 | This routine allocates a region of <size> bytes of consistent memory. |
36 | It also returns a <dma_handle> which may be cast to an unsigned | ||
37 | integer the same width as the bus and used as the physical address | ||
38 | base of the region. | ||
39 | 40 | ||
40 | Returns: a pointer to the allocated region (in the processor's virtual | 41 | It returns a pointer to the allocated region (in the processor's virtual |
41 | address space) or NULL if the allocation failed. | 42 | address space) or NULL if the allocation failed. |
42 | 43 | ||
44 | It also returns a <dma_handle> which may be cast to an unsigned integer the | ||
45 | same width as the bus and given to the device as the bus address base of | ||
46 | the region. | ||
47 | |||
43 | Note: consistent memory can be expensive on some platforms, and the | 48 | Note: consistent memory can be expensive on some platforms, and the |
44 | minimum allocation length may be as big as a page, so you should | 49 | minimum allocation length may be as big as a page, so you should |
45 | consolidate your requests for consistent memory as much as possible. | 50 | consolidate your requests for consistent memory as much as possible. |
46 | The simplest way to do that is to use the dma_pool calls (see below). | 51 | The simplest way to do that is to use the dma_pool calls (see below). |
47 | 52 | ||
48 | The flag parameter (dma_alloc_coherent only) allows the caller to | 53 | The flag parameter (dma_alloc_coherent() only) allows the caller to |
49 | specify the GFP_ flags (see kmalloc) for the allocation (the | 54 | specify the GFP_ flags (see kmalloc()) for the allocation (the |
50 | implementation may choose to ignore flags that affect the location of | 55 | implementation may choose to ignore flags that affect the location of |
51 | the returned memory, like GFP_DMA). | 56 | the returned memory, like GFP_DMA). |
52 | 57 | ||
@@ -61,24 +66,24 @@ void | |||
61 | dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, | 66 | dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, |
62 | dma_addr_t dma_handle) | 67 | dma_addr_t dma_handle) |
63 | 68 | ||
64 | Free the region of consistent memory you previously allocated. dev, | 69 | Free a region of consistent memory you previously allocated. dev, |
65 | size and dma_handle must all be the same as those passed into the | 70 | size and dma_handle must all be the same as those passed into |
66 | consistent allocate. cpu_addr must be the virtual address returned by | 71 | dma_alloc_coherent(). cpu_addr must be the virtual address returned by |
67 | the consistent allocate. | 72 | the dma_alloc_coherent(). |
68 | 73 | ||
69 | Note that unlike their sibling allocation calls, these routines | 74 | Note that unlike their sibling allocation calls, these routines |
70 | may only be called with IRQs enabled. | 75 | may only be called with IRQs enabled. |
71 | 76 | ||
72 | 77 | ||
73 | Part Ib - Using small dma-coherent buffers | 78 | Part Ib - Using small DMA-coherent buffers |
74 | ------------------------------------------ | 79 | ------------------------------------------ |
75 | 80 | ||
76 | To get this part of the dma_ API, you must #include <linux/dmapool.h> | 81 | To get this part of the dma_ API, you must #include <linux/dmapool.h> |
77 | 82 | ||
78 | Many drivers need lots of small dma-coherent memory regions for DMA | 83 | Many drivers need lots of small DMA-coherent memory regions for DMA |
79 | descriptors or I/O buffers. Rather than allocating in units of a page | 84 | descriptors or I/O buffers. Rather than allocating in units of a page |
80 | or more using dma_alloc_coherent(), you can use DMA pools. These work | 85 | or more using dma_alloc_coherent(), you can use DMA pools. These work |
81 | much like a struct kmem_cache, except that they use the dma-coherent allocator, | 86 | much like a struct kmem_cache, except that they use the DMA-coherent allocator, |
82 | not __get_free_pages(). Also, they understand common hardware constraints | 87 | not __get_free_pages(). Also, they understand common hardware constraints |
83 | for alignment, like queue heads needing to be aligned on N-byte boundaries. | 88 | for alignment, like queue heads needing to be aligned on N-byte boundaries. |
84 | 89 | ||
@@ -87,7 +92,7 @@ for alignment, like queue heads needing to be aligned on N-byte boundaries. | |||
87 | dma_pool_create(const char *name, struct device *dev, | 92 | dma_pool_create(const char *name, struct device *dev, |
88 | size_t size, size_t align, size_t alloc); | 93 | size_t size, size_t align, size_t alloc); |
89 | 94 | ||
90 | The pool create() routines initialize a pool of dma-coherent buffers | 95 | dma_pool_create() initializes a pool of DMA-coherent buffers |
91 | for use with a given device. It must be called in a context which | 96 | for use with a given device. It must be called in a context which |
92 | can sleep. | 97 | can sleep. |
93 | 98 | ||
@@ -102,25 +107,26 @@ from this pool must not cross 4KByte boundaries. | |||
102 | void *dma_pool_alloc(struct dma_pool *pool, gfp_t gfp_flags, | 107 | void *dma_pool_alloc(struct dma_pool *pool, gfp_t gfp_flags, |
103 | dma_addr_t *dma_handle); | 108 | dma_addr_t *dma_handle); |
104 | 109 | ||
105 | This allocates memory from the pool; the returned memory will meet the size | 110 | This allocates memory from the pool; the returned memory will meet the |
106 | and alignment requirements specified at creation time. Pass GFP_ATOMIC to | 111 | size and alignment requirements specified at creation time. Pass |
107 | prevent blocking, or if it's permitted (not in_interrupt, not holding SMP locks), | 112 | GFP_ATOMIC to prevent blocking, or if it's permitted (not |
108 | pass GFP_KERNEL to allow blocking. Like dma_alloc_coherent(), this returns | 113 | in_interrupt, not holding SMP locks), pass GFP_KERNEL to allow |
109 | two values: an address usable by the cpu, and the dma address usable by the | 114 | blocking. Like dma_alloc_coherent(), this returns two values: an |
110 | pool's device. | 115 | address usable by the cpu, and the DMA address usable by the pool's |
116 | device. | ||
111 | 117 | ||
112 | 118 | ||
113 | void dma_pool_free(struct dma_pool *pool, void *vaddr, | 119 | void dma_pool_free(struct dma_pool *pool, void *vaddr, |
114 | dma_addr_t addr); | 120 | dma_addr_t addr); |
115 | 121 | ||
116 | This puts memory back into the pool. The pool is what was passed to | 122 | This puts memory back into the pool. The pool is what was passed to |
117 | the pool allocation routine; the cpu (vaddr) and dma addresses are what | 123 | dma_pool_alloc(); the cpu (vaddr) and DMA addresses are what |
118 | were returned when that routine allocated the memory being freed. | 124 | were returned when that routine allocated the memory being freed. |
119 | 125 | ||
120 | 126 | ||
121 | void dma_pool_destroy(struct dma_pool *pool); | 127 | void dma_pool_destroy(struct dma_pool *pool); |
122 | 128 | ||
123 | The pool destroy() routines free the resources of the pool. They must be | 129 | dma_pool_destroy() frees the resources of the pool. It must be |
124 | called in a context which can sleep. Make sure you've freed all allocated | 130 | called in a context which can sleep. Make sure you've freed all allocated |
125 | memory back to the pool before you destroy it. | 131 | memory back to the pool before you destroy it. |
126 | 132 | ||
@@ -187,9 +193,9 @@ dma_map_single(struct device *dev, void *cpu_addr, size_t size, | |||
187 | enum dma_data_direction direction) | 193 | enum dma_data_direction direction) |
188 | 194 | ||
189 | Maps a piece of processor virtual memory so it can be accessed by the | 195 | Maps a piece of processor virtual memory so it can be accessed by the |
190 | device and returns the physical handle of the memory. | 196 | device and returns the bus address of the memory. |
191 | 197 | ||
192 | The direction for both api's may be converted freely by casting. | 198 | The direction for both APIs may be converted freely by casting. |
193 | However the dma_ API uses a strongly typed enumerator for its | 199 | However the dma_ API uses a strongly typed enumerator for its |
194 | direction: | 200 | direction: |
195 | 201 | ||
@@ -198,31 +204,30 @@ DMA_TO_DEVICE data is going from the memory to the device | |||
198 | DMA_FROM_DEVICE data is coming from the device to the memory | 204 | DMA_FROM_DEVICE data is coming from the device to the memory |
199 | DMA_BIDIRECTIONAL direction isn't known | 205 | DMA_BIDIRECTIONAL direction isn't known |
200 | 206 | ||
201 | Notes: Not all memory regions in a machine can be mapped by this | 207 | Notes: Not all memory regions in a machine can be mapped by this API. |
202 | API. Further, regions that appear to be physically contiguous in | 208 | Further, contiguous kernel virtual space may not be contiguous as |
203 | kernel virtual space may not be contiguous as physical memory. Since | 209 | physical memory. Since this API does not provide any scatter/gather |
204 | this API does not provide any scatter/gather capability, it will fail | 210 | capability, it will fail if the user tries to map a non-physically |
205 | if the user tries to map a non-physically contiguous piece of memory. | 211 | contiguous piece of memory. For this reason, memory to be mapped by |
206 | For this reason, it is recommended that memory mapped by this API be | 212 | this API should be obtained from sources which guarantee it to be |
207 | obtained only from sources which guarantee it to be physically contiguous | 213 | physically contiguous (like kmalloc). |
208 | (like kmalloc). | 214 | |
209 | 215 | Further, the bus address of the memory must be within the | |
210 | Further, the physical address of the memory must be within the | 216 | dma_mask of the device (the dma_mask is a bit mask of the |
211 | dma_mask of the device (the dma_mask represents a bit mask of the | 217 | addressable region for the device, i.e., if the bus address of |
212 | addressable region for the device. I.e., if the physical address of | 218 | the memory ANDed with the dma_mask is still equal to the bus |
213 | the memory anded with the dma_mask is still equal to the physical | 219 | address, then the device can perform DMA to the memory). To |
214 | address, then the device can perform DMA to the memory). In order to | ||
215 | ensure that the memory allocated by kmalloc is within the dma_mask, | 220 | ensure that the memory allocated by kmalloc is within the dma_mask, |
216 | the driver may specify various platform-dependent flags to restrict | 221 | the driver may specify various platform-dependent flags to restrict |
217 | the physical memory range of the allocation (e.g. on x86, GFP_DMA | 222 | the bus address range of the allocation (e.g., on x86, GFP_DMA |
218 | guarantees to be within the first 16Mb of available physical memory, | 223 | guarantees to be within the first 16MB of available bus addresses, |
219 | as required by ISA devices). | 224 | as required by ISA devices). |
220 | 225 | ||
221 | Note also that the above constraints on physical contiguity and | 226 | Note also that the above constraints on physical contiguity and |
222 | dma_mask may not apply if the platform has an IOMMU (a device which | 227 | dma_mask may not apply if the platform has an IOMMU (a device which |
223 | supplies a physical to virtual mapping between the I/O memory bus and | 228 | maps an I/O bus address to a physical memory address). However, to be |
224 | the device). However, to be portable, device driver writers may *not* | 229 | portable, device driver writers may *not* assume that such an IOMMU |
225 | assume that such an IOMMU exists. | 230 | exists. |
226 | 231 | ||
227 | Warnings: Memory coherency operates at a granularity called the cache | 232 | Warnings: Memory coherency operates at a granularity called the cache |
228 | line width. In order for memory mapped by this API to operate | 233 | line width. In order for memory mapped by this API to operate |
@@ -281,9 +286,9 @@ cache width is. | |||
281 | int | 286 | int |
282 | dma_mapping_error(struct device *dev, dma_addr_t dma_addr) | 287 | dma_mapping_error(struct device *dev, dma_addr_t dma_addr) |
283 | 288 | ||
284 | In some circumstances dma_map_single and dma_map_page will fail to create | 289 | In some circumstances dma_map_single() and dma_map_page() will fail to create |
285 | a mapping. A driver can check for these errors by testing the returned | 290 | a mapping. A driver can check for these errors by testing the returned |
286 | dma address with dma_mapping_error(). A non-zero return value means the mapping | 291 | DMA address with dma_mapping_error(). A non-zero return value means the mapping |
287 | could not be created and the driver should take appropriate action (e.g. | 292 | could not be created and the driver should take appropriate action (e.g. |
288 | reduce current DMA mapping usage or delay and try again later). | 293 | reduce current DMA mapping usage or delay and try again later). |
289 | 294 | ||
@@ -291,7 +296,7 @@ reduce current DMA mapping usage or delay and try again later). | |||
291 | dma_map_sg(struct device *dev, struct scatterlist *sg, | 296 | dma_map_sg(struct device *dev, struct scatterlist *sg, |
292 | int nents, enum dma_data_direction direction) | 297 | int nents, enum dma_data_direction direction) |
293 | 298 | ||
294 | Returns: the number of physical segments mapped (this may be shorter | 299 | Returns: the number of bus address segments mapped (this may be shorter |
295 | than <nents> passed in if some elements of the scatter/gather list are | 300 | than <nents> passed in if some elements of the scatter/gather list are |
296 | physically or virtually adjacent and an IOMMU maps them with a single | 301 | physically or virtually adjacent and an IOMMU maps them with a single |
297 | entry). | 302 | entry). |
@@ -299,7 +304,7 @@ entry). | |||
299 | Please note that the sg cannot be mapped again if it has been mapped once. | 304 | Please note that the sg cannot be mapped again if it has been mapped once. |
300 | The mapping process is allowed to destroy information in the sg. | 305 | The mapping process is allowed to destroy information in the sg. |
301 | 306 | ||
302 | As with the other mapping interfaces, dma_map_sg can fail. When it | 307 | As with the other mapping interfaces, dma_map_sg() can fail. When it |
303 | does, 0 is returned and a driver must take appropriate action. It is | 308 | does, 0 is returned and a driver must take appropriate action. It is |
304 | critical that the driver do something, in the case of a block driver | 309 | critical that the driver do something, in the case of a block driver |
305 | aborting the request or even oopsing is better than doing nothing and | 310 | aborting the request or even oopsing is better than doing nothing and |
@@ -335,7 +340,7 @@ must be the same as those and passed in to the scatter/gather mapping | |||
335 | API. | 340 | API. |
336 | 341 | ||
337 | Note: <nents> must be the number you passed in, *not* the number of | 342 | Note: <nents> must be the number you passed in, *not* the number of |
338 | physical entries returned. | 343 | bus address entries returned. |
339 | 344 | ||
340 | void | 345 | void |
341 | dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size, | 346 | dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size, |
@@ -391,10 +396,10 @@ The four functions above are just like the counterpart functions | |||
391 | without the _attrs suffixes, except that they pass an optional | 396 | without the _attrs suffixes, except that they pass an optional |
392 | struct dma_attrs*. | 397 | struct dma_attrs*. |
393 | 398 | ||
394 | struct dma_attrs encapsulates a set of "dma attributes". For the | 399 | struct dma_attrs encapsulates a set of "DMA attributes". For the |
395 | definition of struct dma_attrs see linux/dma-attrs.h. | 400 | definition of struct dma_attrs see linux/dma-attrs.h. |
396 | 401 | ||
397 | The interpretation of dma attributes is architecture-specific, and | 402 | The interpretation of DMA attributes is architecture-specific, and |
398 | each attribute should be documented in Documentation/DMA-attributes.txt. | 403 | each attribute should be documented in Documentation/DMA-attributes.txt. |
399 | 404 | ||
400 | If struct dma_attrs* is NULL, the semantics of each of these | 405 | If struct dma_attrs* is NULL, the semantics of each of these |
@@ -458,7 +463,7 @@ Note: where the platform can return consistent memory, it will | |||
458 | guarantee that the sync points become nops. | 463 | guarantee that the sync points become nops. |
459 | 464 | ||
460 | Warning: Handling non-consistent memory is a real pain. You should | 465 | Warning: Handling non-consistent memory is a real pain. You should |
461 | only ever use this API if you positively know your driver will be | 466 | only use this API if you positively know your driver will be |
462 | required to work on one of the rare (usually non-PCI) architectures | 467 | required to work on one of the rare (usually non-PCI) architectures |
463 | that simply cannot make consistent memory. | 468 | that simply cannot make consistent memory. |
464 | 469 | ||
@@ -496,26 +501,26 @@ dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr, | |||
496 | dma_addr_t device_addr, size_t size, int | 501 | dma_addr_t device_addr, size_t size, int |
497 | flags) | 502 | flags) |
498 | 503 | ||
499 | Declare region of memory to be handed out by dma_alloc_coherent when | 504 | Declare region of memory to be handed out by dma_alloc_coherent() when |
500 | it's asked for coherent memory for this device. | 505 | it's asked for coherent memory for this device. |
501 | 506 | ||
502 | bus_addr is the physical address to which the memory is currently | 507 | bus_addr is the physical address to which the memory is currently |
503 | assigned in the bus responding region (this will be used by the | 508 | assigned in the bus responding region (this will be used by the |
504 | platform to perform the mapping). | 509 | platform to perform the mapping). |
505 | 510 | ||
506 | device_addr is the physical address the device needs to be programmed | 511 | device_addr is the bus address the device needs to be programmed |
507 | with actually to address this memory (this will be handed out as the | 512 | with actually to address this memory (this will be handed out as the |
508 | dma_addr_t in dma_alloc_coherent()). | 513 | dma_addr_t in dma_alloc_coherent()). |
509 | 514 | ||
510 | size is the size of the area (must be multiples of PAGE_SIZE). | 515 | size is the size of the area (must be multiples of PAGE_SIZE). |
511 | 516 | ||
512 | flags can be or'd together and are: | 517 | flags can be ORed together and are: |
513 | 518 | ||
514 | DMA_MEMORY_MAP - request that the memory returned from | 519 | DMA_MEMORY_MAP - request that the memory returned from |
515 | dma_alloc_coherent() be directly writable. | 520 | dma_alloc_coherent() be directly writable. |
516 | 521 | ||
517 | DMA_MEMORY_IO - request that the memory returned from | 522 | DMA_MEMORY_IO - request that the memory returned from |
518 | dma_alloc_coherent() be addressable using read/write/memcpy_toio etc. | 523 | dma_alloc_coherent() be addressable using read()/write()/memcpy_toio() etc. |
519 | 524 | ||
520 | One or both of these flags must be present. | 525 | One or both of these flags must be present. |
521 | 526 | ||
@@ -572,7 +577,7 @@ region is occupied. | |||
572 | Part III - Debug drivers use of the DMA-API | 577 | Part III - Debug drivers use of the DMA-API |
573 | ------------------------------------------- | 578 | ------------------------------------------- |
574 | 579 | ||
575 | The DMA-API as described above as some constraints. DMA addresses must be | 580 | The DMA-API as described above has some constraints. DMA addresses must be |
576 | released with the corresponding function with the same size for example. With | 581 | released with the corresponding function with the same size for example. With |
577 | the advent of hardware IOMMUs it becomes more and more important that drivers | 582 | the advent of hardware IOMMUs it becomes more and more important that drivers |
578 | do not violate those constraints. In the worst case such a violation can | 583 | do not violate those constraints. In the worst case such a violation can |
@@ -690,11 +695,11 @@ architectural default. | |||
690 | void debug_dmap_mapping_error(struct device *dev, dma_addr_t dma_addr); | 695 | void debug_dmap_mapping_error(struct device *dev, dma_addr_t dma_addr); |
691 | 696 | ||
692 | dma-debug interface debug_dma_mapping_error() to debug drivers that fail | 697 | dma-debug interface debug_dma_mapping_error() to debug drivers that fail |
693 | to check dma mapping errors on addresses returned by dma_map_single() and | 698 | to check DMA mapping errors on addresses returned by dma_map_single() and |
694 | dma_map_page() interfaces. This interface clears a flag set by | 699 | dma_map_page() interfaces. This interface clears a flag set by |
695 | debug_dma_map_page() to indicate that dma_mapping_error() has been called by | 700 | debug_dma_map_page() to indicate that dma_mapping_error() has been called by |
696 | the driver. When driver does unmap, debug_dma_unmap() checks the flag and if | 701 | the driver. When driver does unmap, debug_dma_unmap() checks the flag and if |
697 | this flag is still set, prints warning message that includes call trace that | 702 | this flag is still set, prints warning message that includes call trace that |
698 | leads up to the unmap. This interface can be called from dma_mapping_error() | 703 | leads up to the unmap. This interface can be called from dma_mapping_error() |
699 | routines to enable dma mapping error check debugging. | 704 | routines to enable DMA mapping error check debugging. |
700 | 705 | ||
diff --git a/Documentation/DMA-ISA-LPC.txt b/Documentation/DMA-ISA-LPC.txt index e767805b4182..b1a19835e907 100644 --- a/Documentation/DMA-ISA-LPC.txt +++ b/Documentation/DMA-ISA-LPC.txt | |||
@@ -16,7 +16,7 @@ To do ISA style DMA you need to include two headers: | |||
16 | #include <asm/dma.h> | 16 | #include <asm/dma.h> |
17 | 17 | ||
18 | The first is the generic DMA API used to convert virtual addresses to | 18 | The first is the generic DMA API used to convert virtual addresses to |
19 | physical addresses (see Documentation/DMA-API.txt for details). | 19 | bus addresses (see Documentation/DMA-API.txt for details). |
20 | 20 | ||
21 | The second contains the routines specific to ISA DMA transfers. Since | 21 | The second contains the routines specific to ISA DMA transfers. Since |
22 | this is not present on all platforms make sure you construct your | 22 | this is not present on all platforms make sure you construct your |
@@ -50,7 +50,7 @@ early as possible and not release it until the driver is unloaded.) | |||
50 | Part III - Address translation | 50 | Part III - Address translation |
51 | ------------------------------ | 51 | ------------------------------ |
52 | 52 | ||
53 | To translate the virtual address to a physical use the normal DMA | 53 | To translate the virtual address to a bus address, use the normal DMA |
54 | API. Do _not_ use isa_virt_to_phys() even though it does the same | 54 | API. Do _not_ use isa_virt_to_phys() even though it does the same |
55 | thing. The reason for this is that the function isa_virt_to_phys() | 55 | thing. The reason for this is that the function isa_virt_to_phys() |
56 | will require a Kconfig dependency to ISA, not just ISA_DMA_API which | 56 | will require a Kconfig dependency to ISA, not just ISA_DMA_API which |
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index fd4aee29ad10..b9aa2b97aab5 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h | |||
@@ -8,6 +8,12 @@ | |||
8 | #include <linux/dma-direction.h> | 8 | #include <linux/dma-direction.h> |
9 | #include <linux/scatterlist.h> | 9 | #include <linux/scatterlist.h> |
10 | 10 | ||
11 | /* | ||
12 | * A dma_addr_t can hold any valid DMA or bus address for the platform. | ||
13 | * It can be given to a device to use as a DMA source or target. A CPU cannot | ||
14 | * reference a dma_addr_t directly because there may be translation between | ||
15 | * its physical address space and the bus address space. | ||
16 | */ | ||
11 | struct dma_map_ops { | 17 | struct dma_map_ops { |
12 | void* (*alloc)(struct device *dev, size_t size, | 18 | void* (*alloc)(struct device *dev, size_t size, |
13 | dma_addr_t *dma_handle, gfp_t gfp, | 19 | dma_addr_t *dma_handle, gfp_t gfp, |
diff --git a/include/linux/types.h b/include/linux/types.h index 4d118ba11349..a0bb7048687f 100644 --- a/include/linux/types.h +++ b/include/linux/types.h | |||
@@ -142,6 +142,7 @@ typedef unsigned long blkcnt_t; | |||
142 | #define pgoff_t unsigned long | 142 | #define pgoff_t unsigned long |
143 | #endif | 143 | #endif |
144 | 144 | ||
145 | /* A dma_addr_t can hold any valid DMA or bus address for the platform */ | ||
145 | #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT | 146 | #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT |
146 | typedef u64 dma_addr_t; | 147 | typedef u64 dma_addr_t; |
147 | #else | 148 | #else |