diff options
Diffstat (limited to 'lib/swiotlb.c')
| -rw-r--r-- | lib/swiotlb.c | 119 |
1 files changed, 62 insertions, 57 deletions
diff --git a/lib/swiotlb.c b/lib/swiotlb.c index 2b0b5a7d2ced..bffe6d7ef9d9 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c | |||
| @@ -60,8 +60,8 @@ enum dma_sync_target { | |||
| 60 | int swiotlb_force; | 60 | int swiotlb_force; |
| 61 | 61 | ||
| 62 | /* | 62 | /* |
| 63 | * Used to do a quick range check in swiotlb_unmap_single and | 63 | * Used to do a quick range check in unmap_single and |
| 64 | * swiotlb_sync_single_*, to see if the memory was in fact allocated by this | 64 | * sync_single_*, to see if the memory was in fact allocated by this |
| 65 | * API. | 65 | * API. |
| 66 | */ | 66 | */ |
| 67 | static char *io_tlb_start, *io_tlb_end; | 67 | static char *io_tlb_start, *io_tlb_end; |
| @@ -129,7 +129,7 @@ dma_addr_t __weak swiotlb_phys_to_bus(struct device *hwdev, phys_addr_t paddr) | |||
| 129 | return paddr; | 129 | return paddr; |
| 130 | } | 130 | } |
| 131 | 131 | ||
| 132 | phys_addr_t __weak swiotlb_bus_to_phys(dma_addr_t baddr) | 132 | phys_addr_t __weak swiotlb_bus_to_phys(struct device *hwdev, dma_addr_t baddr) |
| 133 | { | 133 | { |
| 134 | return baddr; | 134 | return baddr; |
| 135 | } | 135 | } |
| @@ -140,9 +140,15 @@ static dma_addr_t swiotlb_virt_to_bus(struct device *hwdev, | |||
| 140 | return swiotlb_phys_to_bus(hwdev, virt_to_phys(address)); | 140 | return swiotlb_phys_to_bus(hwdev, virt_to_phys(address)); |
| 141 | } | 141 | } |
| 142 | 142 | ||
| 143 | static void *swiotlb_bus_to_virt(dma_addr_t address) | 143 | void * __weak swiotlb_bus_to_virt(struct device *hwdev, dma_addr_t address) |
| 144 | { | 144 | { |
| 145 | return phys_to_virt(swiotlb_bus_to_phys(address)); | 145 | return phys_to_virt(swiotlb_bus_to_phys(hwdev, address)); |
| 146 | } | ||
| 147 | |||
| 148 | int __weak swiotlb_arch_address_needs_mapping(struct device *hwdev, | ||
| 149 | dma_addr_t addr, size_t size) | ||
| 150 | { | ||
| 151 | return !is_buffer_dma_capable(dma_get_mask(hwdev), addr, size); | ||
| 146 | } | 152 | } |
| 147 | 153 | ||
| 148 | int __weak swiotlb_arch_range_needs_mapping(phys_addr_t paddr, size_t size) | 154 | int __weak swiotlb_arch_range_needs_mapping(phys_addr_t paddr, size_t size) |
| @@ -309,10 +315,10 @@ cleanup1: | |||
| 309 | return -ENOMEM; | 315 | return -ENOMEM; |
| 310 | } | 316 | } |
| 311 | 317 | ||
| 312 | static int | 318 | static inline int |
| 313 | address_needs_mapping(struct device *hwdev, dma_addr_t addr, size_t size) | 319 | address_needs_mapping(struct device *hwdev, dma_addr_t addr, size_t size) |
| 314 | { | 320 | { |
| 315 | return !is_buffer_dma_capable(dma_get_mask(hwdev), addr, size); | 321 | return swiotlb_arch_address_needs_mapping(hwdev, addr, size); |
| 316 | } | 322 | } |
| 317 | 323 | ||
| 318 | static inline int range_needs_mapping(phys_addr_t paddr, size_t size) | 324 | static inline int range_needs_mapping(phys_addr_t paddr, size_t size) |
| @@ -341,7 +347,7 @@ static void swiotlb_bounce(phys_addr_t phys, char *dma_addr, size_t size, | |||
| 341 | unsigned long flags; | 347 | unsigned long flags; |
| 342 | 348 | ||
| 343 | while (size) { | 349 | while (size) { |
| 344 | sz = min(PAGE_SIZE - offset, size); | 350 | sz = min_t(size_t, PAGE_SIZE - offset, size); |
| 345 | 351 | ||
| 346 | local_irq_save(flags); | 352 | local_irq_save(flags); |
| 347 | buffer = kmap_atomic(pfn_to_page(pfn), | 353 | buffer = kmap_atomic(pfn_to_page(pfn), |
| @@ -476,7 +482,7 @@ found: | |||
| 476 | * dma_addr is the kernel virtual address of the bounce buffer to unmap. | 482 | * dma_addr is the kernel virtual address of the bounce buffer to unmap. |
| 477 | */ | 483 | */ |
| 478 | static void | 484 | static void |
| 479 | unmap_single(struct device *hwdev, char *dma_addr, size_t size, int dir) | 485 | do_unmap_single(struct device *hwdev, char *dma_addr, size_t size, int dir) |
| 480 | { | 486 | { |
| 481 | unsigned long flags; | 487 | unsigned long flags; |
| 482 | int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; | 488 | int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; |
| @@ -560,7 +566,6 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size, | |||
| 560 | size)) { | 566 | size)) { |
| 561 | /* | 567 | /* |
| 562 | * The allocated memory isn't reachable by the device. | 568 | * The allocated memory isn't reachable by the device. |
| 563 | * Fall back on swiotlb_map_single(). | ||
| 564 | */ | 569 | */ |
| 565 | free_pages((unsigned long) ret, order); | 570 | free_pages((unsigned long) ret, order); |
| 566 | ret = NULL; | 571 | ret = NULL; |
| @@ -568,9 +573,8 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size, | |||
| 568 | if (!ret) { | 573 | if (!ret) { |
| 569 | /* | 574 | /* |
| 570 | * We are either out of memory or the device can't DMA | 575 | * We are either out of memory or the device can't DMA |
| 571 | * to GFP_DMA memory; fall back on | 576 | * to GFP_DMA memory; fall back on map_single(), which |
| 572 | * swiotlb_map_single(), which will grab memory from | 577 | * will grab memory from the lowest available address range. |
| 573 | * the lowest available address range. | ||
| 574 | */ | 578 | */ |
| 575 | ret = map_single(hwdev, 0, size, DMA_FROM_DEVICE); | 579 | ret = map_single(hwdev, 0, size, DMA_FROM_DEVICE); |
| 576 | if (!ret) | 580 | if (!ret) |
| @@ -587,7 +591,7 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size, | |||
| 587 | (unsigned long long)dev_addr); | 591 | (unsigned long long)dev_addr); |
| 588 | 592 | ||
| 589 | /* DMA_TO_DEVICE to avoid memcpy in unmap_single */ | 593 | /* DMA_TO_DEVICE to avoid memcpy in unmap_single */ |
| 590 | unmap_single(hwdev, ret, size, DMA_TO_DEVICE); | 594 | do_unmap_single(hwdev, ret, size, DMA_TO_DEVICE); |
| 591 | return NULL; | 595 | return NULL; |
| 592 | } | 596 | } |
| 593 | *dma_handle = dev_addr; | 597 | *dma_handle = dev_addr; |
| @@ -604,7 +608,7 @@ swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr, | |||
| 604 | free_pages((unsigned long) vaddr, get_order(size)); | 608 | free_pages((unsigned long) vaddr, get_order(size)); |
| 605 | else | 609 | else |
| 606 | /* DMA_TO_DEVICE to avoid memcpy in unmap_single */ | 610 | /* DMA_TO_DEVICE to avoid memcpy in unmap_single */ |
| 607 | unmap_single(hwdev, vaddr, size, DMA_TO_DEVICE); | 611 | do_unmap_single(hwdev, vaddr, size, DMA_TO_DEVICE); |
| 608 | } | 612 | } |
| 609 | EXPORT_SYMBOL(swiotlb_free_coherent); | 613 | EXPORT_SYMBOL(swiotlb_free_coherent); |
| 610 | 614 | ||
| @@ -634,7 +638,7 @@ swiotlb_full(struct device *dev, size_t size, int dir, int do_panic) | |||
| 634 | * physical address to use is returned. | 638 | * physical address to use is returned. |
| 635 | * | 639 | * |
| 636 | * Once the device is given the dma address, the device owns this memory until | 640 | * Once the device is given the dma address, the device owns this memory until |
| 637 | * either swiotlb_unmap_single or swiotlb_dma_sync_single is performed. | 641 | * either swiotlb_unmap_page or swiotlb_dma_sync_single is performed. |
| 638 | */ | 642 | */ |
| 639 | dma_addr_t swiotlb_map_page(struct device *dev, struct page *page, | 643 | dma_addr_t swiotlb_map_page(struct device *dev, struct page *page, |
| 640 | unsigned long offset, size_t size, | 644 | unsigned long offset, size_t size, |
| @@ -642,18 +646,17 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page, | |||
| 642 | struct dma_attrs *attrs) | 646 | struct dma_attrs *attrs) |
| 643 | { | 647 | { |
| 644 | phys_addr_t phys = page_to_phys(page) + offset; | 648 | phys_addr_t phys = page_to_phys(page) + offset; |
| 645 | void *ptr = page_address(page) + offset; | ||
| 646 | dma_addr_t dev_addr = swiotlb_phys_to_bus(dev, phys); | 649 | dma_addr_t dev_addr = swiotlb_phys_to_bus(dev, phys); |
| 647 | void *map; | 650 | void *map; |
| 648 | 651 | ||
| 649 | BUG_ON(dir == DMA_NONE); | 652 | BUG_ON(dir == DMA_NONE); |
| 650 | /* | 653 | /* |
| 651 | * If the pointer passed in happens to be in the device's DMA window, | 654 | * If the address happens to be in the device's DMA window, |
| 652 | * we can safely return the device addr and not worry about bounce | 655 | * we can safely return the device addr and not worry about bounce |
| 653 | * buffering it. | 656 | * buffering it. |
| 654 | */ | 657 | */ |
| 655 | if (!address_needs_mapping(dev, dev_addr, size) && | 658 | if (!address_needs_mapping(dev, dev_addr, size) && |
| 656 | !range_needs_mapping(virt_to_phys(ptr), size)) | 659 | !range_needs_mapping(phys, size)) |
| 657 | return dev_addr; | 660 | return dev_addr; |
| 658 | 661 | ||
| 659 | /* | 662 | /* |
| @@ -679,23 +682,35 @@ EXPORT_SYMBOL_GPL(swiotlb_map_page); | |||
| 679 | 682 | ||
| 680 | /* | 683 | /* |
| 681 | * Unmap a single streaming mode DMA translation. The dma_addr and size must | 684 | * Unmap a single streaming mode DMA translation. The dma_addr and size must |
| 682 | * match what was provided for in a previous swiotlb_map_single call. All | 685 | * match what was provided for in a previous swiotlb_map_page call. All |
| 683 | * other usages are undefined. | 686 | * other usages are undefined. |
| 684 | * | 687 | * |
| 685 | * After this call, reads by the cpu to the buffer are guaranteed to see | 688 | * After this call, reads by the cpu to the buffer are guaranteed to see |
| 686 | * whatever the device wrote there. | 689 | * whatever the device wrote there. |
| 687 | */ | 690 | */ |
| 691 | static void unmap_single(struct device *hwdev, dma_addr_t dev_addr, | ||
| 692 | size_t size, int dir) | ||
| 693 | { | ||
| 694 | char *dma_addr = swiotlb_bus_to_virt(hwdev, dev_addr); | ||
| 695 | |||
| 696 | BUG_ON(dir == DMA_NONE); | ||
| 697 | |||
| 698 | if (is_swiotlb_buffer(dma_addr)) { | ||
| 699 | do_unmap_single(hwdev, dma_addr, size, dir); | ||
| 700 | return; | ||
| 701 | } | ||
| 702 | |||
| 703 | if (dir != DMA_FROM_DEVICE) | ||
| 704 | return; | ||
| 705 | |||
| 706 | dma_mark_clean(dma_addr, size); | ||
| 707 | } | ||
| 708 | |||
| 688 | void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr, | 709 | void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr, |
| 689 | size_t size, enum dma_data_direction dir, | 710 | size_t size, enum dma_data_direction dir, |
| 690 | struct dma_attrs *attrs) | 711 | struct dma_attrs *attrs) |
| 691 | { | 712 | { |
| 692 | char *dma_addr = swiotlb_bus_to_virt(dev_addr); | 713 | unmap_single(hwdev, dev_addr, size, dir); |
| 693 | |||
| 694 | BUG_ON(dir == DMA_NONE); | ||
| 695 | if (is_swiotlb_buffer(dma_addr)) | ||
| 696 | unmap_single(hwdev, dma_addr, size, dir); | ||
| 697 | else if (dir == DMA_FROM_DEVICE) | ||
| 698 | dma_mark_clean(dma_addr, size); | ||
| 699 | } | 714 | } |
| 700 | EXPORT_SYMBOL_GPL(swiotlb_unmap_page); | 715 | EXPORT_SYMBOL_GPL(swiotlb_unmap_page); |
| 701 | 716 | ||
| @@ -703,7 +718,7 @@ EXPORT_SYMBOL_GPL(swiotlb_unmap_page); | |||
| 703 | * Make physical memory consistent for a single streaming mode DMA translation | 718 | * Make physical memory consistent for a single streaming mode DMA translation |
| 704 | * after a transfer. | 719 | * after a transfer. |
| 705 | * | 720 | * |
| 706 | * If you perform a swiotlb_map_single() but wish to interrogate the buffer | 721 | * If you perform a swiotlb_map_page() but wish to interrogate the buffer |
| 707 | * using the cpu, yet do not wish to teardown the dma mapping, you must | 722 | * using the cpu, yet do not wish to teardown the dma mapping, you must |
| 708 | * call this function before doing so. At the next point you give the dma | 723 | * call this function before doing so. At the next point you give the dma |
| 709 | * address back to the card, you must first perform a | 724 | * address back to the card, you must first perform a |
| @@ -713,13 +728,19 @@ static void | |||
| 713 | swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr, | 728 | swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr, |
| 714 | size_t size, int dir, int target) | 729 | size_t size, int dir, int target) |
| 715 | { | 730 | { |
| 716 | char *dma_addr = swiotlb_bus_to_virt(dev_addr); | 731 | char *dma_addr = swiotlb_bus_to_virt(hwdev, dev_addr); |
| 717 | 732 | ||
| 718 | BUG_ON(dir == DMA_NONE); | 733 | BUG_ON(dir == DMA_NONE); |
| 719 | if (is_swiotlb_buffer(dma_addr)) | 734 | |
| 735 | if (is_swiotlb_buffer(dma_addr)) { | ||
| 720 | sync_single(hwdev, dma_addr, size, dir, target); | 736 | sync_single(hwdev, dma_addr, size, dir, target); |
| 721 | else if (dir == DMA_FROM_DEVICE) | 737 | return; |
| 722 | dma_mark_clean(dma_addr, size); | 738 | } |
| 739 | |||
| 740 | if (dir != DMA_FROM_DEVICE) | ||
| 741 | return; | ||
| 742 | |||
| 743 | dma_mark_clean(dma_addr, size); | ||
| 723 | } | 744 | } |
| 724 | 745 | ||
| 725 | void | 746 | void |
| @@ -746,13 +767,7 @@ swiotlb_sync_single_range(struct device *hwdev, dma_addr_t dev_addr, | |||
| 746 | unsigned long offset, size_t size, | 767 | unsigned long offset, size_t size, |
| 747 | int dir, int target) | 768 | int dir, int target) |
| 748 | { | 769 | { |
| 749 | char *dma_addr = swiotlb_bus_to_virt(dev_addr) + offset; | 770 | swiotlb_sync_single(hwdev, dev_addr + offset, size, dir, target); |
| 750 | |||
| 751 | BUG_ON(dir == DMA_NONE); | ||
| 752 | if (is_swiotlb_buffer(dma_addr)) | ||
| 753 | sync_single(hwdev, dma_addr, size, dir, target); | ||
| 754 | else if (dir == DMA_FROM_DEVICE) | ||
| 755 | dma_mark_clean(dma_addr, size); | ||
| 756 | } | 771 | } |
| 757 | 772 | ||
| 758 | void | 773 | void |
| @@ -777,7 +792,7 @@ EXPORT_SYMBOL_GPL(swiotlb_sync_single_range_for_device); | |||
| 777 | 792 | ||
| 778 | /* | 793 | /* |
| 779 | * Map a set of buffers described by scatterlist in streaming mode for DMA. | 794 | * Map a set of buffers described by scatterlist in streaming mode for DMA. |
| 780 | * This is the scatter-gather version of the above swiotlb_map_single | 795 | * This is the scatter-gather version of the above swiotlb_map_page |
| 781 | * interface. Here the scatter gather list elements are each tagged with the | 796 | * interface. Here the scatter gather list elements are each tagged with the |
| 782 | * appropriate dma address and length. They are obtained via | 797 | * appropriate dma address and length. They are obtained via |
| 783 | * sg_dma_{address,length}(SG). | 798 | * sg_dma_{address,length}(SG). |
| @@ -788,7 +803,7 @@ EXPORT_SYMBOL_GPL(swiotlb_sync_single_range_for_device); | |||
| 788 | * The routine returns the number of addr/length pairs actually | 803 | * The routine returns the number of addr/length pairs actually |
| 789 | * used, at most nents. | 804 | * used, at most nents. |
| 790 | * | 805 | * |
| 791 | * Device ownership issues as mentioned above for swiotlb_map_single are the | 806 | * Device ownership issues as mentioned above for swiotlb_map_page are the |
| 792 | * same here. | 807 | * same here. |
| 793 | */ | 808 | */ |
| 794 | int | 809 | int |
| @@ -836,7 +851,7 @@ EXPORT_SYMBOL(swiotlb_map_sg); | |||
| 836 | 851 | ||
| 837 | /* | 852 | /* |
| 838 | * Unmap a set of streaming mode DMA translations. Again, cpu read rules | 853 | * Unmap a set of streaming mode DMA translations. Again, cpu read rules |
| 839 | * concerning calls here are the same as for swiotlb_unmap_single() above. | 854 | * concerning calls here are the same as for swiotlb_unmap_page() above. |
| 840 | */ | 855 | */ |
| 841 | void | 856 | void |
| 842 | swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl, | 857 | swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl, |
| @@ -847,13 +862,9 @@ swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl, | |||
| 847 | 862 | ||
| 848 | BUG_ON(dir == DMA_NONE); | 863 | BUG_ON(dir == DMA_NONE); |
| 849 | 864 | ||
| 850 | for_each_sg(sgl, sg, nelems, i) { | 865 | for_each_sg(sgl, sg, nelems, i) |
| 851 | if (sg->dma_address != swiotlb_phys_to_bus(hwdev, sg_phys(sg))) | 866 | unmap_single(hwdev, sg->dma_address, sg->dma_length, dir); |
| 852 | unmap_single(hwdev, swiotlb_bus_to_virt(sg->dma_address), | 867 | |
| 853 | sg->dma_length, dir); | ||
| 854 | else if (dir == DMA_FROM_DEVICE) | ||
| 855 | dma_mark_clean(swiotlb_bus_to_virt(sg->dma_address), sg->dma_length); | ||
| 856 | } | ||
| 857 | } | 868 | } |
| 858 | EXPORT_SYMBOL(swiotlb_unmap_sg_attrs); | 869 | EXPORT_SYMBOL(swiotlb_unmap_sg_attrs); |
| 859 | 870 | ||
| @@ -879,15 +890,9 @@ swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl, | |||
| 879 | struct scatterlist *sg; | 890 | struct scatterlist *sg; |
| 880 | int i; | 891 | int i; |
| 881 | 892 | ||
| 882 | BUG_ON(dir == DMA_NONE); | 893 | for_each_sg(sgl, sg, nelems, i) |
| 883 | 894 | swiotlb_sync_single(hwdev, sg->dma_address, | |
| 884 | for_each_sg(sgl, sg, nelems, i) { | ||
| 885 | if (sg->dma_address != swiotlb_phys_to_bus(hwdev, sg_phys(sg))) | ||
| 886 | sync_single(hwdev, swiotlb_bus_to_virt(sg->dma_address), | ||
| 887 | sg->dma_length, dir, target); | 895 | sg->dma_length, dir, target); |
| 888 | else if (dir == DMA_FROM_DEVICE) | ||
| 889 | dma_mark_clean(swiotlb_bus_to_virt(sg->dma_address), sg->dma_length); | ||
| 890 | } | ||
| 891 | } | 896 | } |
| 892 | 897 | ||
| 893 | void | 898 | void |
