diff options
Diffstat (limited to 'lib')
-rw-r--r-- | lib/swiotlb.c | 88 |
1 files changed, 39 insertions, 49 deletions
diff --git a/lib/swiotlb.c b/lib/swiotlb.c index 1f991acc2a05..32e2bd3b1142 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c | |||
@@ -145,7 +145,7 @@ static void *swiotlb_bus_to_virt(dma_addr_t address) | |||
145 | return phys_to_virt(swiotlb_bus_to_phys(address)); | 145 | return phys_to_virt(swiotlb_bus_to_phys(address)); |
146 | } | 146 | } |
147 | 147 | ||
148 | int __weak swiotlb_arch_range_needs_mapping(void *ptr, size_t size) | 148 | int __weak swiotlb_arch_range_needs_mapping(phys_addr_t paddr, size_t size) |
149 | { | 149 | { |
150 | return 0; | 150 | return 0; |
151 | } | 151 | } |
@@ -315,9 +315,9 @@ address_needs_mapping(struct device *hwdev, dma_addr_t addr, size_t size) | |||
315 | return !is_buffer_dma_capable(dma_get_mask(hwdev), addr, size); | 315 | return !is_buffer_dma_capable(dma_get_mask(hwdev), addr, size); |
316 | } | 316 | } |
317 | 317 | ||
318 | static inline int range_needs_mapping(void *ptr, size_t size) | 318 | static inline int range_needs_mapping(phys_addr_t paddr, size_t size) |
319 | { | 319 | { |
320 | return swiotlb_force || swiotlb_arch_range_needs_mapping(ptr, size); | 320 | return swiotlb_force || swiotlb_arch_range_needs_mapping(paddr, size); |
321 | } | 321 | } |
322 | 322 | ||
323 | static int is_swiotlb_buffer(char *addr) | 323 | static int is_swiotlb_buffer(char *addr) |
@@ -636,11 +636,14 @@ swiotlb_full(struct device *dev, size_t size, int dir, int do_panic) | |||
636 | * Once the device is given the dma address, the device owns this memory until | 636 | * Once the device is given the dma address, the device owns this memory until |
637 | * either swiotlb_unmap_single or swiotlb_dma_sync_single is performed. | 637 | * either swiotlb_unmap_single or swiotlb_dma_sync_single is performed. |
638 | */ | 638 | */ |
639 | dma_addr_t | 639 | dma_addr_t swiotlb_map_page(struct device *dev, struct page *page, |
640 | swiotlb_map_single_attrs(struct device *hwdev, void *ptr, size_t size, | 640 | unsigned long offset, size_t size, |
641 | int dir, struct dma_attrs *attrs) | 641 | enum dma_data_direction dir, |
642 | { | 642 | struct dma_attrs *attrs) |
643 | dma_addr_t dev_addr = swiotlb_virt_to_bus(hwdev, ptr); | 643 | { |
644 | phys_addr_t phys = page_to_phys(page) + offset; | ||
645 | void *ptr = page_address(page) + offset; | ||
646 | dma_addr_t dev_addr = swiotlb_phys_to_bus(dev, phys); | ||
644 | void *map; | 647 | void *map; |
645 | 648 | ||
646 | BUG_ON(dir == DMA_NONE); | 649 | BUG_ON(dir == DMA_NONE); |
@@ -649,37 +652,30 @@ swiotlb_map_single_attrs(struct device *hwdev, void *ptr, size_t size, | |||
649 | * we can safely return the device addr and not worry about bounce | 652 | * we can safely return the device addr and not worry about bounce |
650 | * buffering it. | 653 | * buffering it. |
651 | */ | 654 | */ |
652 | if (!address_needs_mapping(hwdev, dev_addr, size) && | 655 | if (!address_needs_mapping(dev, dev_addr, size) && |
653 | !range_needs_mapping(ptr, size)) | 656 | !range_needs_mapping(virt_to_phys(ptr), size)) |
654 | return dev_addr; | 657 | return dev_addr; |
655 | 658 | ||
656 | /* | 659 | /* |
657 | * Oh well, have to allocate and map a bounce buffer. | 660 | * Oh well, have to allocate and map a bounce buffer. |
658 | */ | 661 | */ |
659 | map = map_single(hwdev, virt_to_phys(ptr), size, dir); | 662 | map = map_single(dev, phys, size, dir); |
660 | if (!map) { | 663 | if (!map) { |
661 | swiotlb_full(hwdev, size, dir, 1); | 664 | swiotlb_full(dev, size, dir, 1); |
662 | map = io_tlb_overflow_buffer; | 665 | map = io_tlb_overflow_buffer; |
663 | } | 666 | } |
664 | 667 | ||
665 | dev_addr = swiotlb_virt_to_bus(hwdev, map); | 668 | dev_addr = swiotlb_virt_to_bus(dev, map); |
666 | 669 | ||
667 | /* | 670 | /* |
668 | * Ensure that the address returned is DMA'ble | 671 | * Ensure that the address returned is DMA'ble |
669 | */ | 672 | */ |
670 | if (address_needs_mapping(hwdev, dev_addr, size)) | 673 | if (address_needs_mapping(dev, dev_addr, size)) |
671 | panic("map_single: bounce buffer is not DMA'ble"); | 674 | panic("map_single: bounce buffer is not DMA'ble"); |
672 | 675 | ||
673 | return dev_addr; | 676 | return dev_addr; |
674 | } | 677 | } |
675 | EXPORT_SYMBOL(swiotlb_map_single_attrs); | 678 | EXPORT_SYMBOL_GPL(swiotlb_map_page); |
676 | |||
677 | dma_addr_t | ||
678 | swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir) | ||
679 | { | ||
680 | return swiotlb_map_single_attrs(hwdev, ptr, size, dir, NULL); | ||
681 | } | ||
682 | EXPORT_SYMBOL(swiotlb_map_single); | ||
683 | 679 | ||
684 | /* | 680 | /* |
685 | * Unmap a single streaming mode DMA translation. The dma_addr and size must | 681 | * Unmap a single streaming mode DMA translation. The dma_addr and size must |
@@ -689,9 +685,9 @@ EXPORT_SYMBOL(swiotlb_map_single); | |||
689 | * After this call, reads by the cpu to the buffer are guaranteed to see | 685 | * After this call, reads by the cpu to the buffer are guaranteed to see |
690 | * whatever the device wrote there. | 686 | * whatever the device wrote there. |
691 | */ | 687 | */ |
692 | void | 688 | void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr, |
693 | swiotlb_unmap_single_attrs(struct device *hwdev, dma_addr_t dev_addr, | 689 | size_t size, enum dma_data_direction dir, |
694 | size_t size, int dir, struct dma_attrs *attrs) | 690 | struct dma_attrs *attrs) |
695 | { | 691 | { |
696 | char *dma_addr = swiotlb_bus_to_virt(dev_addr); | 692 | char *dma_addr = swiotlb_bus_to_virt(dev_addr); |
697 | 693 | ||
@@ -701,15 +697,7 @@ swiotlb_unmap_single_attrs(struct device *hwdev, dma_addr_t dev_addr, | |||
701 | else if (dir == DMA_FROM_DEVICE) | 697 | else if (dir == DMA_FROM_DEVICE) |
702 | dma_mark_clean(dma_addr, size); | 698 | dma_mark_clean(dma_addr, size); |
703 | } | 699 | } |
704 | EXPORT_SYMBOL(swiotlb_unmap_single_attrs); | 700 | EXPORT_SYMBOL_GPL(swiotlb_unmap_page); |
705 | |||
706 | void | ||
707 | swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, size_t size, | ||
708 | int dir) | ||
709 | { | ||
710 | return swiotlb_unmap_single_attrs(hwdev, dev_addr, size, dir, NULL); | ||
711 | } | ||
712 | EXPORT_SYMBOL(swiotlb_unmap_single); | ||
713 | 701 | ||
714 | /* | 702 | /* |
715 | * Make physical memory consistent for a single streaming mode DMA translation | 703 | * Make physical memory consistent for a single streaming mode DMA translation |
@@ -736,7 +724,7 @@ swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr, | |||
736 | 724 | ||
737 | void | 725 | void |
738 | swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr, | 726 | swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr, |
739 | size_t size, int dir) | 727 | size_t size, enum dma_data_direction dir) |
740 | { | 728 | { |
741 | swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_CPU); | 729 | swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_CPU); |
742 | } | 730 | } |
@@ -744,7 +732,7 @@ EXPORT_SYMBOL(swiotlb_sync_single_for_cpu); | |||
744 | 732 | ||
745 | void | 733 | void |
746 | swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr, | 734 | swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr, |
747 | size_t size, int dir) | 735 | size_t size, enum dma_data_direction dir) |
748 | { | 736 | { |
749 | swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_DEVICE); | 737 | swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_DEVICE); |
750 | } | 738 | } |
@@ -769,7 +757,8 @@ swiotlb_sync_single_range(struct device *hwdev, dma_addr_t dev_addr, | |||
769 | 757 | ||
770 | void | 758 | void |
771 | swiotlb_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dev_addr, | 759 | swiotlb_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dev_addr, |
772 | unsigned long offset, size_t size, int dir) | 760 | unsigned long offset, size_t size, |
761 | enum dma_data_direction dir) | ||
773 | { | 762 | { |
774 | swiotlb_sync_single_range(hwdev, dev_addr, offset, size, dir, | 763 | swiotlb_sync_single_range(hwdev, dev_addr, offset, size, dir, |
775 | SYNC_FOR_CPU); | 764 | SYNC_FOR_CPU); |
@@ -778,7 +767,8 @@ EXPORT_SYMBOL_GPL(swiotlb_sync_single_range_for_cpu); | |||
778 | 767 | ||
779 | void | 768 | void |
780 | swiotlb_sync_single_range_for_device(struct device *hwdev, dma_addr_t dev_addr, | 769 | swiotlb_sync_single_range_for_device(struct device *hwdev, dma_addr_t dev_addr, |
781 | unsigned long offset, size_t size, int dir) | 770 | unsigned long offset, size_t size, |
771 | enum dma_data_direction dir) | ||
782 | { | 772 | { |
783 | swiotlb_sync_single_range(hwdev, dev_addr, offset, size, dir, | 773 | swiotlb_sync_single_range(hwdev, dev_addr, offset, size, dir, |
784 | SYNC_FOR_DEVICE); | 774 | SYNC_FOR_DEVICE); |
@@ -803,7 +793,7 @@ EXPORT_SYMBOL_GPL(swiotlb_sync_single_range_for_device); | |||
803 | */ | 793 | */ |
804 | int | 794 | int |
805 | swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems, | 795 | swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems, |
806 | int dir, struct dma_attrs *attrs) | 796 | enum dma_data_direction dir, struct dma_attrs *attrs) |
807 | { | 797 | { |
808 | struct scatterlist *sg; | 798 | struct scatterlist *sg; |
809 | int i; | 799 | int i; |
@@ -811,10 +801,10 @@ swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems, | |||
811 | BUG_ON(dir == DMA_NONE); | 801 | BUG_ON(dir == DMA_NONE); |
812 | 802 | ||
813 | for_each_sg(sgl, sg, nelems, i) { | 803 | for_each_sg(sgl, sg, nelems, i) { |
814 | void *addr = sg_virt(sg); | 804 | phys_addr_t paddr = sg_phys(sg); |
815 | dma_addr_t dev_addr = swiotlb_virt_to_bus(hwdev, addr); | 805 | dma_addr_t dev_addr = swiotlb_phys_to_bus(hwdev, paddr); |
816 | 806 | ||
817 | if (range_needs_mapping(addr, sg->length) || | 807 | if (range_needs_mapping(paddr, sg->length) || |
818 | address_needs_mapping(hwdev, dev_addr, sg->length)) { | 808 | address_needs_mapping(hwdev, dev_addr, sg->length)) { |
819 | void *map = map_single(hwdev, sg_phys(sg), | 809 | void *map = map_single(hwdev, sg_phys(sg), |
820 | sg->length, dir); | 810 | sg->length, dir); |
@@ -850,7 +840,7 @@ EXPORT_SYMBOL(swiotlb_map_sg); | |||
850 | */ | 840 | */ |
851 | void | 841 | void |
852 | swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl, | 842 | swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl, |
853 | int nelems, int dir, struct dma_attrs *attrs) | 843 | int nelems, enum dma_data_direction dir, struct dma_attrs *attrs) |
854 | { | 844 | { |
855 | struct scatterlist *sg; | 845 | struct scatterlist *sg; |
856 | int i; | 846 | int i; |
@@ -858,11 +848,11 @@ swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl, | |||
858 | BUG_ON(dir == DMA_NONE); | 848 | BUG_ON(dir == DMA_NONE); |
859 | 849 | ||
860 | for_each_sg(sgl, sg, nelems, i) { | 850 | for_each_sg(sgl, sg, nelems, i) { |
861 | if (sg->dma_address != swiotlb_virt_to_bus(hwdev, sg_virt(sg))) | 851 | if (sg->dma_address != swiotlb_phys_to_bus(hwdev, sg_phys(sg))) |
862 | unmap_single(hwdev, swiotlb_bus_to_virt(sg->dma_address), | 852 | unmap_single(hwdev, swiotlb_bus_to_virt(sg->dma_address), |
863 | sg->dma_length, dir); | 853 | sg->dma_length, dir); |
864 | else if (dir == DMA_FROM_DEVICE) | 854 | else if (dir == DMA_FROM_DEVICE) |
865 | dma_mark_clean(sg_virt(sg), sg->dma_length); | 855 | dma_mark_clean(swiotlb_bus_to_virt(sg->dma_address), sg->dma_length); |
866 | } | 856 | } |
867 | } | 857 | } |
868 | EXPORT_SYMBOL(swiotlb_unmap_sg_attrs); | 858 | EXPORT_SYMBOL(swiotlb_unmap_sg_attrs); |
@@ -892,17 +882,17 @@ swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl, | |||
892 | BUG_ON(dir == DMA_NONE); | 882 | BUG_ON(dir == DMA_NONE); |
893 | 883 | ||
894 | for_each_sg(sgl, sg, nelems, i) { | 884 | for_each_sg(sgl, sg, nelems, i) { |
895 | if (sg->dma_address != swiotlb_virt_to_bus(hwdev, sg_virt(sg))) | 885 | if (sg->dma_address != swiotlb_phys_to_bus(hwdev, sg_phys(sg))) |
896 | sync_single(hwdev, swiotlb_bus_to_virt(sg->dma_address), | 886 | sync_single(hwdev, swiotlb_bus_to_virt(sg->dma_address), |
897 | sg->dma_length, dir, target); | 887 | sg->dma_length, dir, target); |
898 | else if (dir == DMA_FROM_DEVICE) | 888 | else if (dir == DMA_FROM_DEVICE) |
899 | dma_mark_clean(sg_virt(sg), sg->dma_length); | 889 | dma_mark_clean(swiotlb_bus_to_virt(sg->dma_address), sg->dma_length); |
900 | } | 890 | } |
901 | } | 891 | } |
902 | 892 | ||
903 | void | 893 | void |
904 | swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg, | 894 | swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg, |
905 | int nelems, int dir) | 895 | int nelems, enum dma_data_direction dir) |
906 | { | 896 | { |
907 | swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_CPU); | 897 | swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_CPU); |
908 | } | 898 | } |
@@ -910,7 +900,7 @@ EXPORT_SYMBOL(swiotlb_sync_sg_for_cpu); | |||
910 | 900 | ||
911 | void | 901 | void |
912 | swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, | 902 | swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, |
913 | int nelems, int dir) | 903 | int nelems, enum dma_data_direction dir) |
914 | { | 904 | { |
915 | swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE); | 905 | swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE); |
916 | } | 906 | } |