aboutsummaryrefslogtreecommitdiffstats
path: root/lib/swiotlb.c
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2018-03-19 06:38:24 -0400
committerIngo Molnar <mingo@kernel.org>2018-03-20 05:01:59 -0400
commitb6e05477c10c12e36141558fc14f04b00ea634d4 (patch)
tree10fa56168c0fdeb896a6c845fadac8bbd112f554 /lib/swiotlb.c
parente7de6c7cc207be78369d45fb833d7d53aeda47f8 (diff)
dma/direct: Handle the memory encryption bit in common code
Give the basic phys_to_dma() and dma_to_phys() helpers a __-prefix and add the memory encryption mask to the non-prefixed versions. Use the __-prefixed versions directly instead of clearing the mask again in various places. Tested-by: Tom Lendacky <thomas.lendacky@amd.com> Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Thomas Gleixner <tglx@linutronix.de> Cc: David Woodhouse <dwmw2@infradead.org> Cc: Joerg Roedel <joro@8bytes.org> Cc: Jon Mason <jdmason@kudzu.us> Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Muli Ben-Yehuda <mulix@mulix.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: iommu@lists.linux-foundation.org Link: http://lkml.kernel.org/r/20180319103826.12853-13-hch@lst.de Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'lib/swiotlb.c')
-rw-r--r--lib/swiotlb.c25
1 files changed, 9 insertions, 16 deletions
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index 005d1d87bb2e..8b06b4485e65 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -157,13 +157,6 @@ unsigned long swiotlb_size_or_default(void)
157 return size ? size : (IO_TLB_DEFAULT_SIZE); 157 return size ? size : (IO_TLB_DEFAULT_SIZE);
158} 158}
159 159
160/* For swiotlb, clear memory encryption mask from dma addresses */
161static dma_addr_t swiotlb_phys_to_dma(struct device *hwdev,
162 phys_addr_t address)
163{
164 return __sme_clr(phys_to_dma(hwdev, address));
165}
166
167/* Note that this doesn't work with highmem page */ 160/* Note that this doesn't work with highmem page */
168static dma_addr_t swiotlb_virt_to_bus(struct device *hwdev, 161static dma_addr_t swiotlb_virt_to_bus(struct device *hwdev,
169 volatile void *address) 162 volatile void *address)
@@ -622,7 +615,7 @@ map_single(struct device *hwdev, phys_addr_t phys, size_t size,
622 return SWIOTLB_MAP_ERROR; 615 return SWIOTLB_MAP_ERROR;
623 } 616 }
624 617
625 start_dma_addr = swiotlb_phys_to_dma(hwdev, io_tlb_start); 618 start_dma_addr = __phys_to_dma(hwdev, io_tlb_start);
626 return swiotlb_tbl_map_single(hwdev, start_dma_addr, phys, size, 619 return swiotlb_tbl_map_single(hwdev, start_dma_addr, phys, size,
627 dir, attrs); 620 dir, attrs);
628} 621}
@@ -726,12 +719,12 @@ swiotlb_alloc_buffer(struct device *dev, size_t size, dma_addr_t *dma_handle,
726 goto out_warn; 719 goto out_warn;
727 720
728 phys_addr = swiotlb_tbl_map_single(dev, 721 phys_addr = swiotlb_tbl_map_single(dev,
729 swiotlb_phys_to_dma(dev, io_tlb_start), 722 __phys_to_dma(dev, io_tlb_start),
730 0, size, DMA_FROM_DEVICE, 0); 723 0, size, DMA_FROM_DEVICE, 0);
731 if (phys_addr == SWIOTLB_MAP_ERROR) 724 if (phys_addr == SWIOTLB_MAP_ERROR)
732 goto out_warn; 725 goto out_warn;
733 726
734 *dma_handle = swiotlb_phys_to_dma(dev, phys_addr); 727 *dma_handle = __phys_to_dma(dev, phys_addr);
735 if (dma_coherent_ok(dev, *dma_handle, size)) 728 if (dma_coherent_ok(dev, *dma_handle, size))
736 goto out_unmap; 729 goto out_unmap;
737 730
@@ -867,10 +860,10 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
867 map = map_single(dev, phys, size, dir, attrs); 860 map = map_single(dev, phys, size, dir, attrs);
868 if (map == SWIOTLB_MAP_ERROR) { 861 if (map == SWIOTLB_MAP_ERROR) {
869 swiotlb_full(dev, size, dir, 1); 862 swiotlb_full(dev, size, dir, 1);
870 return swiotlb_phys_to_dma(dev, io_tlb_overflow_buffer); 863 return __phys_to_dma(dev, io_tlb_overflow_buffer);
871 } 864 }
872 865
873 dev_addr = swiotlb_phys_to_dma(dev, map); 866 dev_addr = __phys_to_dma(dev, map);
874 867
875 /* Ensure that the address returned is DMA'ble */ 868 /* Ensure that the address returned is DMA'ble */
876 if (dma_capable(dev, dev_addr, size)) 869 if (dma_capable(dev, dev_addr, size))
@@ -879,7 +872,7 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
879 attrs |= DMA_ATTR_SKIP_CPU_SYNC; 872 attrs |= DMA_ATTR_SKIP_CPU_SYNC;
880 swiotlb_tbl_unmap_single(dev, map, size, dir, attrs); 873 swiotlb_tbl_unmap_single(dev, map, size, dir, attrs);
881 874
882 return swiotlb_phys_to_dma(dev, io_tlb_overflow_buffer); 875 return __phys_to_dma(dev, io_tlb_overflow_buffer);
883} 876}
884 877
885/* 878/*
@@ -1009,7 +1002,7 @@ swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
1009 sg_dma_len(sgl) = 0; 1002 sg_dma_len(sgl) = 0;
1010 return 0; 1003 return 0;
1011 } 1004 }
1012 sg->dma_address = swiotlb_phys_to_dma(hwdev, map); 1005 sg->dma_address = __phys_to_dma(hwdev, map);
1013 } else 1006 } else
1014 sg->dma_address = dev_addr; 1007 sg->dma_address = dev_addr;
1015 sg_dma_len(sg) = sg->length; 1008 sg_dma_len(sg) = sg->length;
@@ -1073,7 +1066,7 @@ swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
1073int 1066int
1074swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr) 1067swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr)
1075{ 1068{
1076 return (dma_addr == swiotlb_phys_to_dma(hwdev, io_tlb_overflow_buffer)); 1069 return (dma_addr == __phys_to_dma(hwdev, io_tlb_overflow_buffer));
1077} 1070}
1078 1071
1079/* 1072/*
@@ -1085,7 +1078,7 @@ swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr)
1085int 1078int
1086swiotlb_dma_supported(struct device *hwdev, u64 mask) 1079swiotlb_dma_supported(struct device *hwdev, u64 mask)
1087{ 1080{
1088 return swiotlb_phys_to_dma(hwdev, io_tlb_end - 1) <= mask; 1081 return __phys_to_dma(hwdev, io_tlb_end - 1) <= mask;
1089} 1082}
1090 1083
1091#ifdef CONFIG_DMA_DIRECT_OPS 1084#ifdef CONFIG_DMA_DIRECT_OPS