diff options
-rw-r--r-- | arch/arm64/mm/dma-mapping.c | 6 | ||||
-rw-r--r-- | include/linux/swiotlb.h | 5 | ||||
-rw-r--r-- | kernel/dma/swiotlb.c | 105 |
3 files changed, 5 insertions, 111 deletions
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c index 8d91b927e09e..eee6cfcfde9e 100644 --- a/arch/arm64/mm/dma-mapping.c +++ b/arch/arm64/mm/dma-mapping.c | |||
@@ -112,7 +112,7 @@ static void *__dma_alloc(struct device *dev, size_t size, | |||
112 | return addr; | 112 | return addr; |
113 | } | 113 | } |
114 | 114 | ||
115 | ptr = swiotlb_alloc(dev, size, dma_handle, flags, attrs); | 115 | ptr = dma_direct_alloc_pages(dev, size, dma_handle, flags, attrs); |
116 | if (!ptr) | 116 | if (!ptr) |
117 | goto no_mem; | 117 | goto no_mem; |
118 | 118 | ||
@@ -133,7 +133,7 @@ static void *__dma_alloc(struct device *dev, size_t size, | |||
133 | return coherent_ptr; | 133 | return coherent_ptr; |
134 | 134 | ||
135 | no_map: | 135 | no_map: |
136 | swiotlb_free(dev, size, ptr, *dma_handle, attrs); | 136 | dma_direct_free_pages(dev, size, ptr, *dma_handle, attrs); |
137 | no_mem: | 137 | no_mem: |
138 | return NULL; | 138 | return NULL; |
139 | } | 139 | } |
@@ -151,7 +151,7 @@ static void __dma_free(struct device *dev, size_t size, | |||
151 | return; | 151 | return; |
152 | vunmap(vaddr); | 152 | vunmap(vaddr); |
153 | } | 153 | } |
154 | swiotlb_free(dev, size, swiotlb_addr, dma_handle, attrs); | 154 | dma_direct_free_pages(dev, size, swiotlb_addr, dma_handle, attrs); |
155 | } | 155 | } |
156 | 156 | ||
157 | static dma_addr_t __swiotlb_map_page(struct device *dev, struct page *page, | 157 | static dma_addr_t __swiotlb_map_page(struct device *dev, struct page *page, |
diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h index f847c1b265c4..a387b59640a4 100644 --- a/include/linux/swiotlb.h +++ b/include/linux/swiotlb.h | |||
@@ -67,11 +67,6 @@ extern void swiotlb_tbl_sync_single(struct device *hwdev, | |||
67 | 67 | ||
68 | /* Accessory functions. */ | 68 | /* Accessory functions. */ |
69 | 69 | ||
70 | void *swiotlb_alloc(struct device *hwdev, size_t size, dma_addr_t *dma_handle, | ||
71 | gfp_t flags, unsigned long attrs); | ||
72 | void swiotlb_free(struct device *dev, size_t size, void *vaddr, | ||
73 | dma_addr_t dma_addr, unsigned long attrs); | ||
74 | |||
75 | extern dma_addr_t swiotlb_map_page(struct device *dev, struct page *page, | 70 | extern dma_addr_t swiotlb_map_page(struct device *dev, struct page *page, |
76 | unsigned long offset, size_t size, | 71 | unsigned long offset, size_t size, |
77 | enum dma_data_direction dir, | 72 | enum dma_data_direction dir, |
diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c index 57507b18caa4..1a01b0ac0a5e 100644 --- a/kernel/dma/swiotlb.c +++ b/kernel/dma/swiotlb.c | |||
@@ -622,78 +622,6 @@ void swiotlb_tbl_sync_single(struct device *hwdev, phys_addr_t tlb_addr, | |||
622 | } | 622 | } |
623 | } | 623 | } |
624 | 624 | ||
625 | static inline bool dma_coherent_ok(struct device *dev, dma_addr_t addr, | ||
626 | size_t size) | ||
627 | { | ||
628 | u64 mask = DMA_BIT_MASK(32); | ||
629 | |||
630 | if (dev && dev->coherent_dma_mask) | ||
631 | mask = dev->coherent_dma_mask; | ||
632 | return addr + size - 1 <= mask; | ||
633 | } | ||
634 | |||
635 | static void * | ||
636 | swiotlb_alloc_buffer(struct device *dev, size_t size, dma_addr_t *dma_handle, | ||
637 | unsigned long attrs) | ||
638 | { | ||
639 | phys_addr_t phys_addr; | ||
640 | |||
641 | if (swiotlb_force == SWIOTLB_NO_FORCE) | ||
642 | goto out_warn; | ||
643 | |||
644 | phys_addr = swiotlb_tbl_map_single(dev, | ||
645 | __phys_to_dma(dev, io_tlb_start), | ||
646 | 0, size, DMA_FROM_DEVICE, attrs); | ||
647 | if (phys_addr == SWIOTLB_MAP_ERROR) | ||
648 | goto out_warn; | ||
649 | |||
650 | *dma_handle = __phys_to_dma(dev, phys_addr); | ||
651 | if (!dma_coherent_ok(dev, *dma_handle, size)) | ||
652 | goto out_unmap; | ||
653 | |||
654 | memset(phys_to_virt(phys_addr), 0, size); | ||
655 | return phys_to_virt(phys_addr); | ||
656 | |||
657 | out_unmap: | ||
658 | dev_warn(dev, "hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n", | ||
659 | (unsigned long long)dev->coherent_dma_mask, | ||
660 | (unsigned long long)*dma_handle); | ||
661 | |||
662 | /* | ||
663 | * DMA_TO_DEVICE to avoid memcpy in unmap_single. | ||
664 | * DMA_ATTR_SKIP_CPU_SYNC is optional. | ||
665 | */ | ||
666 | swiotlb_tbl_unmap_single(dev, phys_addr, size, DMA_TO_DEVICE, | ||
667 | DMA_ATTR_SKIP_CPU_SYNC); | ||
668 | out_warn: | ||
669 | if (!(attrs & DMA_ATTR_NO_WARN) && printk_ratelimit()) { | ||
670 | dev_warn(dev, | ||
671 | "swiotlb: coherent allocation failed, size=%zu\n", | ||
672 | size); | ||
673 | dump_stack(); | ||
674 | } | ||
675 | return NULL; | ||
676 | } | ||
677 | |||
678 | static bool swiotlb_free_buffer(struct device *dev, size_t size, | ||
679 | dma_addr_t dma_addr) | ||
680 | { | ||
681 | phys_addr_t phys_addr = dma_to_phys(dev, dma_addr); | ||
682 | |||
683 | WARN_ON_ONCE(irqs_disabled()); | ||
684 | |||
685 | if (!is_swiotlb_buffer(phys_addr)) | ||
686 | return false; | ||
687 | |||
688 | /* | ||
689 | * DMA_TO_DEVICE to avoid memcpy in swiotlb_tbl_unmap_single. | ||
690 | * DMA_ATTR_SKIP_CPU_SYNC is optional. | ||
691 | */ | ||
692 | swiotlb_tbl_unmap_single(dev, phys_addr, size, DMA_TO_DEVICE, | ||
693 | DMA_ATTR_SKIP_CPU_SYNC); | ||
694 | return true; | ||
695 | } | ||
696 | |||
697 | static dma_addr_t swiotlb_bounce_page(struct device *dev, phys_addr_t *phys, | 625 | static dma_addr_t swiotlb_bounce_page(struct device *dev, phys_addr_t *phys, |
698 | size_t size, enum dma_data_direction dir, unsigned long attrs) | 626 | size_t size, enum dma_data_direction dir, unsigned long attrs) |
699 | { | 627 | { |
@@ -926,39 +854,10 @@ swiotlb_dma_supported(struct device *hwdev, u64 mask) | |||
926 | return __phys_to_dma(hwdev, io_tlb_end - 1) <= mask; | 854 | return __phys_to_dma(hwdev, io_tlb_end - 1) <= mask; |
927 | } | 855 | } |
928 | 856 | ||
929 | void *swiotlb_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, | ||
930 | gfp_t gfp, unsigned long attrs) | ||
931 | { | ||
932 | void *vaddr; | ||
933 | |||
934 | /* temporary workaround: */ | ||
935 | if (gfp & __GFP_NOWARN) | ||
936 | attrs |= DMA_ATTR_NO_WARN; | ||
937 | |||
938 | /* | ||
939 | * Don't print a warning when the first allocation attempt fails. | ||
940 | * swiotlb_alloc_coherent() will print a warning when the DMA memory | ||
941 | * allocation ultimately failed. | ||
942 | */ | ||
943 | gfp |= __GFP_NOWARN; | ||
944 | |||
945 | vaddr = dma_direct_alloc(dev, size, dma_handle, gfp, attrs); | ||
946 | if (!vaddr) | ||
947 | vaddr = swiotlb_alloc_buffer(dev, size, dma_handle, attrs); | ||
948 | return vaddr; | ||
949 | } | ||
950 | |||
951 | void swiotlb_free(struct device *dev, size_t size, void *vaddr, | ||
952 | dma_addr_t dma_addr, unsigned long attrs) | ||
953 | { | ||
954 | if (!swiotlb_free_buffer(dev, size, dma_addr)) | ||
955 | dma_direct_free(dev, size, vaddr, dma_addr, attrs); | ||
956 | } | ||
957 | |||
958 | const struct dma_map_ops swiotlb_dma_ops = { | 857 | const struct dma_map_ops swiotlb_dma_ops = { |
959 | .mapping_error = dma_direct_mapping_error, | 858 | .mapping_error = dma_direct_mapping_error, |
960 | .alloc = swiotlb_alloc, | 859 | .alloc = dma_direct_alloc, |
961 | .free = swiotlb_free, | 860 | .free = dma_direct_free, |
962 | .sync_single_for_cpu = swiotlb_sync_single_for_cpu, | 861 | .sync_single_for_cpu = swiotlb_sync_single_for_cpu, |
963 | .sync_single_for_device = swiotlb_sync_single_for_device, | 862 | .sync_single_for_device = swiotlb_sync_single_for_device, |
964 | .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu, | 863 | .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu, |