aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2018-08-20 10:21:10 -0400
committerChristoph Hellwig <hch@lst.de>2018-10-19 02:46:58 -0400
commitc4dae366925f929749b2a26efa53b561904a9a4f (patch)
treed963b2c400b43fe7462848d3bf019cc172c59a75
parent4803b44e68fc08e76f00dec90074d199a11ad6f5 (diff)
swiotlb: refactor swiotlb_map_page
Remove the somewhat useless map_single function, and replace it with a swiotlb_bounce_page handler that handles everything related to actually bouncing a page. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Robin Murphy <robin.murphy@arm.com> Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
-rw-r--r--kernel/dma/swiotlb.c67
1 files changed, 30 insertions, 37 deletions
diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
index 15755d7a5242..57507b18caa4 100644
--- a/kernel/dma/swiotlb.c
+++ b/kernel/dma/swiotlb.c
@@ -544,26 +544,6 @@ found:
544} 544}
545 545
546/* 546/*
547 * Allocates bounce buffer and returns its physical address.
548 */
549static phys_addr_t
550map_single(struct device *hwdev, phys_addr_t phys, size_t size,
551 enum dma_data_direction dir, unsigned long attrs)
552{
553 dma_addr_t start_dma_addr;
554
555 if (swiotlb_force == SWIOTLB_NO_FORCE) {
556 dev_warn_ratelimited(hwdev, "Cannot do DMA to address %pa\n",
557 &phys);
558 return SWIOTLB_MAP_ERROR;
559 }
560
561 start_dma_addr = __phys_to_dma(hwdev, io_tlb_start);
562 return swiotlb_tbl_map_single(hwdev, start_dma_addr, phys, size,
563 dir, attrs);
564}
565
566/*
567 * tlb_addr is the physical address of the bounce buffer to unmap. 547 * tlb_addr is the physical address of the bounce buffer to unmap.
568 */ 548 */
569void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr, 549void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr,
@@ -714,6 +694,34 @@ static bool swiotlb_free_buffer(struct device *dev, size_t size,
714 return true; 694 return true;
715} 695}
716 696
697static dma_addr_t swiotlb_bounce_page(struct device *dev, phys_addr_t *phys,
698 size_t size, enum dma_data_direction dir, unsigned long attrs)
699{
700 dma_addr_t dma_addr;
701
702 if (unlikely(swiotlb_force == SWIOTLB_NO_FORCE)) {
703 dev_warn_ratelimited(dev,
704 "Cannot do DMA to address %pa\n", phys);
705 return DIRECT_MAPPING_ERROR;
706 }
707
708 /* Oh well, have to allocate and map a bounce buffer. */
709 *phys = swiotlb_tbl_map_single(dev, __phys_to_dma(dev, io_tlb_start),
710 *phys, size, dir, attrs);
711 if (*phys == SWIOTLB_MAP_ERROR)
712 return DIRECT_MAPPING_ERROR;
713
714 /* Ensure that the address returned is DMA'ble */
715 dma_addr = __phys_to_dma(dev, *phys);
716 if (unlikely(!dma_capable(dev, dma_addr, size))) {
717 swiotlb_tbl_unmap_single(dev, *phys, size, dir,
718 attrs | DMA_ATTR_SKIP_CPU_SYNC);
719 return DIRECT_MAPPING_ERROR;
720 }
721
722 return dma_addr;
723}
724
717/* 725/*
718 * Map a single buffer of the indicated size for DMA in streaming mode. The 726 * Map a single buffer of the indicated size for DMA in streaming mode. The
719 * physical address to use is returned. 727 * physical address to use is returned.
@@ -726,7 +734,7 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
726 enum dma_data_direction dir, 734 enum dma_data_direction dir,
727 unsigned long attrs) 735 unsigned long attrs)
728{ 736{
729 phys_addr_t map, phys = page_to_phys(page) + offset; 737 phys_addr_t phys = page_to_phys(page) + offset;
730 dma_addr_t dev_addr = phys_to_dma(dev, phys); 738 dma_addr_t dev_addr = phys_to_dma(dev, phys);
731 739
732 BUG_ON(dir == DMA_NONE); 740 BUG_ON(dir == DMA_NONE);
@@ -739,22 +747,7 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
739 return dev_addr; 747 return dev_addr;
740 748
741 trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force); 749 trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force);
742 750 return swiotlb_bounce_page(dev, &phys, size, dir, attrs);
743 /* Oh well, have to allocate and map a bounce buffer. */
744 map = map_single(dev, phys, size, dir, attrs);
745 if (map == SWIOTLB_MAP_ERROR)
746 return DIRECT_MAPPING_ERROR;
747
748 dev_addr = __phys_to_dma(dev, map);
749
750 /* Ensure that the address returned is DMA'ble */
751 if (dma_capable(dev, dev_addr, size))
752 return dev_addr;
753
754 attrs |= DMA_ATTR_SKIP_CPU_SYNC;
755 swiotlb_tbl_unmap_single(dev, map, size, dir, attrs);
756
757 return DIRECT_MAPPING_ERROR;
758} 751}
759 752
760/* 753/*