diff options
Diffstat (limited to 'Documentation/DMA-API-HOWTO.txt')
| -rw-r--r-- | Documentation/DMA-API-HOWTO.txt | 85 |
1 files changed, 49 insertions, 36 deletions
diff --git a/Documentation/DMA-API-HOWTO.txt b/Documentation/DMA-API-HOWTO.txt index 2e435adfbd6b..98ce51796f71 100644 --- a/Documentation/DMA-API-HOWTO.txt +++ b/Documentation/DMA-API-HOWTO.txt | |||
| @@ -639,6 +639,36 @@ is planned to completely remove virt_to_bus() and bus_to_virt() as | |||
| 639 | they are entirely deprecated. Some ports already do not provide these | 639 | they are entirely deprecated. Some ports already do not provide these |
| 640 | as it is impossible to correctly support them. | 640 | as it is impossible to correctly support them. |
| 641 | 641 | ||
| 642 | Handling Errors | ||
| 643 | |||
| 644 | DMA address space is limited on some architectures and an allocation | ||
| 645 | failure can be determined by: | ||
| 646 | |||
| 647 | - checking if dma_alloc_coherent returns NULL or dma_map_sg returns 0 | ||
| 648 | |||
| 649 | - checking the returned dma_addr_t of dma_map_single and dma_map_page | ||
| 650 | by using dma_mapping_error(): | ||
| 651 | |||
| 652 | dma_addr_t dma_handle; | ||
| 653 | |||
| 654 | dma_handle = dma_map_single(dev, addr, size, direction); | ||
| 655 | if (dma_mapping_error(dev, dma_handle)) { | ||
| 656 | /* | ||
| 657 | * reduce current DMA mapping usage, | ||
| 658 | * delay and try again later or | ||
| 659 | * reset driver. | ||
| 660 | */ | ||
| 661 | } | ||
| 662 | |||
| 663 | Networking drivers must call dev_kfree_skb to free the socket buffer | ||
| 664 | and return NETDEV_TX_OK if the DMA mapping fails on the transmit hook | ||
| 665 | (ndo_start_xmit). This means that the socket buffer is just dropped in | ||
| 666 | the failure case. | ||
| 667 | |||
| 668 | SCSI drivers must return SCSI_MLQUEUE_HOST_BUSY if the DMA mapping | ||
| 669 | fails in the queuecommand hook. This means that the SCSI subsystem | ||
| 670 | passes the command to the driver again later. | ||
| 671 | |||
| 642 | Optimizing Unmap State Space Consumption | 672 | Optimizing Unmap State Space Consumption |
| 643 | 673 | ||
| 644 | On many platforms, dma_unmap_{single,page}() is simply a nop. | 674 | On many platforms, dma_unmap_{single,page}() is simply a nop. |
| @@ -703,42 +733,25 @@ to "Closing". | |||
| 703 | 733 | ||
| 704 | 1) Struct scatterlist requirements. | 734 | 1) Struct scatterlist requirements. |
| 705 | 735 | ||
| 706 | Struct scatterlist must contain, at a minimum, the following | 736 | Don't invent the architecture specific struct scatterlist; just use |
| 707 | members: | 737 | <asm-generic/scatterlist.h>. You need to enable |
| 708 | 738 | CONFIG_NEED_SG_DMA_LENGTH if the architecture supports IOMMUs | |
| 709 | struct page *page; | 739 | (including software IOMMU). |
| 710 | unsigned int offset; | 740 | |
| 711 | unsigned int length; | 741 | 2) ARCH_KMALLOC_MINALIGN |
| 712 | 742 | ||
| 713 | The base address is specified by a "page+offset" pair. | 743 | Architectures must ensure that kmalloc'ed buffer is |
| 714 | 744 | DMA-safe. Drivers and subsystems depend on it. If an architecture | |
| 715 | Previous versions of struct scatterlist contained a "void *address" | 745 | isn't fully DMA-coherent (i.e. hardware doesn't ensure that data in |
| 716 | field that was sometimes used instead of page+offset. As of Linux | 746 | the CPU cache is identical to data in main memory), |
| 717 | 2.5., page+offset is always used, and the "address" field has been | 747 | ARCH_KMALLOC_MINALIGN must be set so that the memory allocator |
| 718 | deleted. | 748 | makes sure that kmalloc'ed buffer doesn't share a cache line with |
| 719 | 749 | the others. See arch/arm/include/asm/cache.h as an example. | |
| 720 | 2) More to come... | 750 | |
| 721 | 751 | Note that ARCH_KMALLOC_MINALIGN is about DMA memory alignment | |
| 722 | Handling Errors | 752 | constraints. You don't need to worry about the architecture data |
| 723 | 753 | alignment constraints (e.g. the alignment constraints about 64-bit | |
| 724 | DMA address space is limited on some architectures and an allocation | 754 | objects). |
| 725 | failure can be determined by: | ||
| 726 | |||
| 727 | - checking if dma_alloc_coherent returns NULL or dma_map_sg returns 0 | ||
| 728 | |||
| 729 | - checking the returned dma_addr_t of dma_map_single and dma_map_page | ||
| 730 | by using dma_mapping_error(): | ||
| 731 | |||
| 732 | dma_addr_t dma_handle; | ||
| 733 | |||
| 734 | dma_handle = dma_map_single(dev, addr, size, direction); | ||
| 735 | if (dma_mapping_error(dev, dma_handle)) { | ||
| 736 | /* | ||
| 737 | * reduce current DMA mapping usage, | ||
| 738 | * delay and try again later or | ||
| 739 | * reset driver. | ||
| 740 | */ | ||
| 741 | } | ||
| 742 | 755 | ||
| 743 | Closing | 756 | Closing |
| 744 | 757 | ||
