aboutsummaryrefslogtreecommitdiffstats
path: root/lib/dma-direct.c
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2018-03-19 06:38:25 -0400
committerIngo Molnar <mingo@kernel.org>2018-03-20 05:01:59 -0400
commitc10f07aa27dadf5ab5b3d58c48c91a467f80db49 (patch)
treed5cae52525387ecf0de4a2ad043d2b433307b2b4 /lib/dma-direct.c
parentb6e05477c10c12e36141558fc14f04b00ea634d4 (diff)
dma/direct: Handle force decryption for DMA coherent buffers in common code
With that in place the generic DMA-direct routines can be used to allocate non-encrypted bounce buffers, and the x86 SEV case can use the generic swiotlb ops including nice features such as using CMA allocations. Note that I'm not too happy about using sev_active() in DMA-direct, but I couldn't come up with a good enough name for a wrapper to make it worth adding. Tested-by: Tom Lendacky <thomas.lendacky@amd.com> Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Thomas Gleixner <tglx@linutronix.de> Cc: David Woodhouse <dwmw2@infradead.org> Cc: Joerg Roedel <joro@8bytes.org> Cc: Jon Mason <jdmason@kudzu.us> Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Muli Ben-Yehuda <mulix@mulix.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: iommu@lists.linux-foundation.org Link: http://lkml.kernel.org/r/20180319103826.12853-14-hch@lst.de Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'lib/dma-direct.c')
-rw-r--r--lib/dma-direct.c32
1 files changed, 26 insertions, 6 deletions
diff --git a/lib/dma-direct.c b/lib/dma-direct.c
index c9e8e21cb334..1277d293d4da 100644
--- a/lib/dma-direct.c
+++ b/lib/dma-direct.c
@@ -9,6 +9,7 @@
9#include <linux/scatterlist.h> 9#include <linux/scatterlist.h>
10#include <linux/dma-contiguous.h> 10#include <linux/dma-contiguous.h>
11#include <linux/pfn.h> 11#include <linux/pfn.h>
12#include <linux/set_memory.h>
12 13
13#define DIRECT_MAPPING_ERROR 0 14#define DIRECT_MAPPING_ERROR 0
14 15
@@ -20,6 +21,14 @@
20#define ARCH_ZONE_DMA_BITS 24 21#define ARCH_ZONE_DMA_BITS 24
21#endif 22#endif
22 23
24/*
25 * For AMD SEV all DMA must be to unencrypted addresses.
26 */
27static inline bool force_dma_unencrypted(void)
28{
29 return sev_active();
30}
31
23static bool 32static bool
24check_addr(struct device *dev, dma_addr_t dma_addr, size_t size, 33check_addr(struct device *dev, dma_addr_t dma_addr, size_t size,
25 const char *caller) 34 const char *caller)
@@ -37,7 +46,9 @@ check_addr(struct device *dev, dma_addr_t dma_addr, size_t size,
37 46
38static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size) 47static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size)
39{ 48{
40 return phys_to_dma(dev, phys) + size - 1 <= dev->coherent_dma_mask; 49 dma_addr_t addr = force_dma_unencrypted() ?
50 __phys_to_dma(dev, phys) : phys_to_dma(dev, phys);
51 return addr + size - 1 <= dev->coherent_dma_mask;
41} 52}
42 53
43void *dma_direct_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, 54void *dma_direct_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
@@ -46,6 +57,7 @@ void *dma_direct_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
46 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; 57 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
47 int page_order = get_order(size); 58 int page_order = get_order(size);
48 struct page *page = NULL; 59 struct page *page = NULL;
60 void *ret;
49 61
50 /* GFP_DMA32 and GFP_DMA are no ops without the corresponding zones: */ 62 /* GFP_DMA32 and GFP_DMA are no ops without the corresponding zones: */
51 if (dev->coherent_dma_mask <= DMA_BIT_MASK(ARCH_ZONE_DMA_BITS)) 63 if (dev->coherent_dma_mask <= DMA_BIT_MASK(ARCH_ZONE_DMA_BITS))
@@ -78,10 +90,15 @@ again:
78 90
79 if (!page) 91 if (!page)
80 return NULL; 92 return NULL;
81 93 ret = page_address(page);
82 *dma_handle = phys_to_dma(dev, page_to_phys(page)); 94 if (force_dma_unencrypted()) {
83 memset(page_address(page), 0, size); 95 set_memory_decrypted((unsigned long)ret, 1 << page_order);
84 return page_address(page); 96 *dma_handle = __phys_to_dma(dev, page_to_phys(page));
97 } else {
98 *dma_handle = phys_to_dma(dev, page_to_phys(page));
99 }
100 memset(ret, 0, size);
101 return ret;
85} 102}
86 103
87/* 104/*
@@ -92,9 +109,12 @@ void dma_direct_free(struct device *dev, size_t size, void *cpu_addr,
92 dma_addr_t dma_addr, unsigned long attrs) 109 dma_addr_t dma_addr, unsigned long attrs)
93{ 110{
94 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; 111 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
112 unsigned int page_order = get_order(size);
95 113
114 if (force_dma_unencrypted())
115 set_memory_encrypted((unsigned long)cpu_addr, 1 << page_order);
96 if (!dma_release_from_contiguous(dev, virt_to_page(cpu_addr), count)) 116 if (!dma_release_from_contiguous(dev, virt_to_page(cpu_addr), count))
97 free_pages((unsigned long)cpu_addr, get_order(size)); 117 free_pages((unsigned long)cpu_addr, page_order);
98} 118}
99 119
100static dma_addr_t dma_direct_map_page(struct device *dev, struct page *page, 120static dma_addr_t dma_direct_map_page(struct device *dev, struct page *page,