diff options
-rw-r--r-- | arch/x86/mm/mem_encrypt.c | 73 | ||||
-rw-r--r-- | lib/dma-direct.c | 32 |
2 files changed, 29 insertions, 76 deletions
diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c index 1b396422d26f..b2de398d1fd3 100644 --- a/arch/x86/mm/mem_encrypt.c +++ b/arch/x86/mm/mem_encrypt.c | |||
@@ -195,58 +195,6 @@ void __init sme_early_init(void) | |||
195 | swiotlb_force = SWIOTLB_FORCE; | 195 | swiotlb_force = SWIOTLB_FORCE; |
196 | } | 196 | } |
197 | 197 | ||
198 | static void *sev_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, | ||
199 | gfp_t gfp, unsigned long attrs) | ||
200 | { | ||
201 | unsigned int order; | ||
202 | struct page *page; | ||
203 | void *vaddr = NULL; | ||
204 | |||
205 | order = get_order(size); | ||
206 | page = alloc_pages_node(dev_to_node(dev), gfp, order); | ||
207 | if (page) { | ||
208 | dma_addr_t addr; | ||
209 | |||
210 | /* | ||
211 | * Since we will be clearing the encryption bit, check the | ||
212 | * mask with it already cleared. | ||
213 | */ | ||
214 | addr = __phys_to_dma(dev, page_to_phys(page)); | ||
215 | if ((addr + size) > dev->coherent_dma_mask) { | ||
216 | __free_pages(page, get_order(size)); | ||
217 | } else { | ||
218 | vaddr = page_address(page); | ||
219 | *dma_handle = addr; | ||
220 | } | ||
221 | } | ||
222 | |||
223 | if (!vaddr) | ||
224 | vaddr = swiotlb_alloc_coherent(dev, size, dma_handle, gfp); | ||
225 | |||
226 | if (!vaddr) | ||
227 | return NULL; | ||
228 | |||
229 | /* Clear the SME encryption bit for DMA use if not swiotlb area */ | ||
230 | if (!is_swiotlb_buffer(dma_to_phys(dev, *dma_handle))) { | ||
231 | set_memory_decrypted((unsigned long)vaddr, 1 << order); | ||
232 | memset(vaddr, 0, PAGE_SIZE << order); | ||
233 | *dma_handle = __sme_clr(*dma_handle); | ||
234 | } | ||
235 | |||
236 | return vaddr; | ||
237 | } | ||
238 | |||
239 | static void sev_free(struct device *dev, size_t size, void *vaddr, | ||
240 | dma_addr_t dma_handle, unsigned long attrs) | ||
241 | { | ||
242 | /* Set the SME encryption bit for re-use if not swiotlb area */ | ||
243 | if (!is_swiotlb_buffer(dma_to_phys(dev, dma_handle))) | ||
244 | set_memory_encrypted((unsigned long)vaddr, | ||
245 | 1 << get_order(size)); | ||
246 | |||
247 | swiotlb_free_coherent(dev, size, vaddr, dma_handle); | ||
248 | } | ||
249 | |||
250 | static void __init __set_clr_pte_enc(pte_t *kpte, int level, bool enc) | 198 | static void __init __set_clr_pte_enc(pte_t *kpte, int level, bool enc) |
251 | { | 199 | { |
252 | pgprot_t old_prot, new_prot; | 200 | pgprot_t old_prot, new_prot; |
@@ -399,20 +347,6 @@ bool sev_active(void) | |||
399 | } | 347 | } |
400 | EXPORT_SYMBOL(sev_active); | 348 | EXPORT_SYMBOL(sev_active); |
401 | 349 | ||
402 | static const struct dma_map_ops sev_dma_ops = { | ||
403 | .alloc = sev_alloc, | ||
404 | .free = sev_free, | ||
405 | .map_page = swiotlb_map_page, | ||
406 | .unmap_page = swiotlb_unmap_page, | ||
407 | .map_sg = swiotlb_map_sg_attrs, | ||
408 | .unmap_sg = swiotlb_unmap_sg_attrs, | ||
409 | .sync_single_for_cpu = swiotlb_sync_single_for_cpu, | ||
410 | .sync_single_for_device = swiotlb_sync_single_for_device, | ||
411 | .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu, | ||
412 | .sync_sg_for_device = swiotlb_sync_sg_for_device, | ||
413 | .mapping_error = swiotlb_dma_mapping_error, | ||
414 | }; | ||
415 | |||
416 | /* Architecture __weak replacement functions */ | 350 | /* Architecture __weak replacement functions */ |
417 | void __init mem_encrypt_init(void) | 351 | void __init mem_encrypt_init(void) |
418 | { | 352 | { |
@@ -423,12 +357,11 @@ void __init mem_encrypt_init(void) | |||
423 | swiotlb_update_mem_attributes(); | 357 | swiotlb_update_mem_attributes(); |
424 | 358 | ||
425 | /* | 359 | /* |
426 | * With SEV, DMA operations cannot use encryption. New DMA ops | 360 | * With SEV, DMA operations cannot use encryption, we need to use |
427 | * are required in order to mark the DMA areas as decrypted or | 361 | * SWIOTLB to bounce buffer DMA operation. |
428 | * to use bounce buffers. | ||
429 | */ | 362 | */ |
430 | if (sev_active()) | 363 | if (sev_active()) |
431 | dma_ops = &sev_dma_ops; | 364 | dma_ops = &swiotlb_dma_ops; |
432 | 365 | ||
433 | /* | 366 | /* |
434 | * With SEV, we need to unroll the rep string I/O instructions. | 367 | * With SEV, we need to unroll the rep string I/O instructions. |
diff --git a/lib/dma-direct.c b/lib/dma-direct.c index c9e8e21cb334..1277d293d4da 100644 --- a/lib/dma-direct.c +++ b/lib/dma-direct.c | |||
@@ -9,6 +9,7 @@ | |||
9 | #include <linux/scatterlist.h> | 9 | #include <linux/scatterlist.h> |
10 | #include <linux/dma-contiguous.h> | 10 | #include <linux/dma-contiguous.h> |
11 | #include <linux/pfn.h> | 11 | #include <linux/pfn.h> |
12 | #include <linux/set_memory.h> | ||
12 | 13 | ||
13 | #define DIRECT_MAPPING_ERROR 0 | 14 | #define DIRECT_MAPPING_ERROR 0 |
14 | 15 | ||
@@ -20,6 +21,14 @@ | |||
20 | #define ARCH_ZONE_DMA_BITS 24 | 21 | #define ARCH_ZONE_DMA_BITS 24 |
21 | #endif | 22 | #endif |
22 | 23 | ||
24 | /* | ||
25 | * For AMD SEV all DMA must be to unencrypted addresses. | ||
26 | */ | ||
27 | static inline bool force_dma_unencrypted(void) | ||
28 | { | ||
29 | return sev_active(); | ||
30 | } | ||
31 | |||
23 | static bool | 32 | static bool |
24 | check_addr(struct device *dev, dma_addr_t dma_addr, size_t size, | 33 | check_addr(struct device *dev, dma_addr_t dma_addr, size_t size, |
25 | const char *caller) | 34 | const char *caller) |
@@ -37,7 +46,9 @@ check_addr(struct device *dev, dma_addr_t dma_addr, size_t size, | |||
37 | 46 | ||
38 | static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size) | 47 | static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size) |
39 | { | 48 | { |
40 | return phys_to_dma(dev, phys) + size - 1 <= dev->coherent_dma_mask; | 49 | dma_addr_t addr = force_dma_unencrypted() ? |
50 | __phys_to_dma(dev, phys) : phys_to_dma(dev, phys); | ||
51 | return addr + size - 1 <= dev->coherent_dma_mask; | ||
41 | } | 52 | } |
42 | 53 | ||
43 | void *dma_direct_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, | 54 | void *dma_direct_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, |
@@ -46,6 +57,7 @@ void *dma_direct_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, | |||
46 | unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; | 57 | unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; |
47 | int page_order = get_order(size); | 58 | int page_order = get_order(size); |
48 | struct page *page = NULL; | 59 | struct page *page = NULL; |
60 | void *ret; | ||
49 | 61 | ||
50 | /* GFP_DMA32 and GFP_DMA are no ops without the corresponding zones: */ | 62 | /* GFP_DMA32 and GFP_DMA are no ops without the corresponding zones: */ |
51 | if (dev->coherent_dma_mask <= DMA_BIT_MASK(ARCH_ZONE_DMA_BITS)) | 63 | if (dev->coherent_dma_mask <= DMA_BIT_MASK(ARCH_ZONE_DMA_BITS)) |
@@ -78,10 +90,15 @@ again: | |||
78 | 90 | ||
79 | if (!page) | 91 | if (!page) |
80 | return NULL; | 92 | return NULL; |
81 | 93 | ret = page_address(page); | |
82 | *dma_handle = phys_to_dma(dev, page_to_phys(page)); | 94 | if (force_dma_unencrypted()) { |
83 | memset(page_address(page), 0, size); | 95 | set_memory_decrypted((unsigned long)ret, 1 << page_order); |
84 | return page_address(page); | 96 | *dma_handle = __phys_to_dma(dev, page_to_phys(page)); |
97 | } else { | ||
98 | *dma_handle = phys_to_dma(dev, page_to_phys(page)); | ||
99 | } | ||
100 | memset(ret, 0, size); | ||
101 | return ret; | ||
85 | } | 102 | } |
86 | 103 | ||
87 | /* | 104 | /* |
@@ -92,9 +109,12 @@ void dma_direct_free(struct device *dev, size_t size, void *cpu_addr, | |||
92 | dma_addr_t dma_addr, unsigned long attrs) | 109 | dma_addr_t dma_addr, unsigned long attrs) |
93 | { | 110 | { |
94 | unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; | 111 | unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; |
112 | unsigned int page_order = get_order(size); | ||
95 | 113 | ||
114 | if (force_dma_unencrypted()) | ||
115 | set_memory_encrypted((unsigned long)cpu_addr, 1 << page_order); | ||
96 | if (!dma_release_from_contiguous(dev, virt_to_page(cpu_addr), count)) | 116 | if (!dma_release_from_contiguous(dev, virt_to_page(cpu_addr), count)) |
97 | free_pages((unsigned long)cpu_addr, get_order(size)); | 117 | free_pages((unsigned long)cpu_addr, page_order); |
98 | } | 118 | } |
99 | 119 | ||
100 | static dma_addr_t dma_direct_map_page(struct device *dev, struct page *page, | 120 | static dma_addr_t dma_direct_map_page(struct device *dev, struct page *page, |