aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/mm/mem_encrypt.c
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2018-03-19 06:38:25 -0400
committerIngo Molnar <mingo@kernel.org>2018-03-20 05:01:59 -0400
commitc10f07aa27dadf5ab5b3d58c48c91a467f80db49 (patch)
treed5cae52525387ecf0de4a2ad043d2b433307b2b4 /arch/x86/mm/mem_encrypt.c
parentb6e05477c10c12e36141558fc14f04b00ea634d4 (diff)
dma/direct: Handle force decryption for DMA coherent buffers in common code
With that in place the generic DMA-direct routines can be used to allocate non-encrypted bounce buffers, and the x86 SEV case can use the generic swiotlb ops including nice features such as using CMA allocations. Note that I'm not too happy about using sev_active() in DMA-direct, but I couldn't come up with a good enough name for a wrapper to make it worth adding. Tested-by: Tom Lendacky <thomas.lendacky@amd.com> Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Thomas Gleixner <tglx@linutronix.de> Cc: David Woodhouse <dwmw2@infradead.org> Cc: Joerg Roedel <joro@8bytes.org> Cc: Jon Mason <jdmason@kudzu.us> Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Muli Ben-Yehuda <mulix@mulix.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: iommu@lists.linux-foundation.org Link: http://lkml.kernel.org/r/20180319103826.12853-14-hch@lst.de Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/x86/mm/mem_encrypt.c')
-rw-r--r--arch/x86/mm/mem_encrypt.c73
1 files changed, 3 insertions, 70 deletions
diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
index 1b396422d26f..b2de398d1fd3 100644
--- a/arch/x86/mm/mem_encrypt.c
+++ b/arch/x86/mm/mem_encrypt.c
@@ -195,58 +195,6 @@ void __init sme_early_init(void)
195 swiotlb_force = SWIOTLB_FORCE; 195 swiotlb_force = SWIOTLB_FORCE;
196} 196}
197 197
198static void *sev_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
199 gfp_t gfp, unsigned long attrs)
200{
201 unsigned int order;
202 struct page *page;
203 void *vaddr = NULL;
204
205 order = get_order(size);
206 page = alloc_pages_node(dev_to_node(dev), gfp, order);
207 if (page) {
208 dma_addr_t addr;
209
210 /*
211 * Since we will be clearing the encryption bit, check the
212 * mask with it already cleared.
213 */
214 addr = __phys_to_dma(dev, page_to_phys(page));
215 if ((addr + size) > dev->coherent_dma_mask) {
216 __free_pages(page, get_order(size));
217 } else {
218 vaddr = page_address(page);
219 *dma_handle = addr;
220 }
221 }
222
223 if (!vaddr)
224 vaddr = swiotlb_alloc_coherent(dev, size, dma_handle, gfp);
225
226 if (!vaddr)
227 return NULL;
228
229 /* Clear the SME encryption bit for DMA use if not swiotlb area */
230 if (!is_swiotlb_buffer(dma_to_phys(dev, *dma_handle))) {
231 set_memory_decrypted((unsigned long)vaddr, 1 << order);
232 memset(vaddr, 0, PAGE_SIZE << order);
233 *dma_handle = __sme_clr(*dma_handle);
234 }
235
236 return vaddr;
237}
238
239static void sev_free(struct device *dev, size_t size, void *vaddr,
240 dma_addr_t dma_handle, unsigned long attrs)
241{
242 /* Set the SME encryption bit for re-use if not swiotlb area */
243 if (!is_swiotlb_buffer(dma_to_phys(dev, dma_handle)))
244 set_memory_encrypted((unsigned long)vaddr,
245 1 << get_order(size));
246
247 swiotlb_free_coherent(dev, size, vaddr, dma_handle);
248}
249
250static void __init __set_clr_pte_enc(pte_t *kpte, int level, bool enc) 198static void __init __set_clr_pte_enc(pte_t *kpte, int level, bool enc)
251{ 199{
252 pgprot_t old_prot, new_prot; 200 pgprot_t old_prot, new_prot;
@@ -399,20 +347,6 @@ bool sev_active(void)
399} 347}
400EXPORT_SYMBOL(sev_active); 348EXPORT_SYMBOL(sev_active);
401 349
402static const struct dma_map_ops sev_dma_ops = {
403 .alloc = sev_alloc,
404 .free = sev_free,
405 .map_page = swiotlb_map_page,
406 .unmap_page = swiotlb_unmap_page,
407 .map_sg = swiotlb_map_sg_attrs,
408 .unmap_sg = swiotlb_unmap_sg_attrs,
409 .sync_single_for_cpu = swiotlb_sync_single_for_cpu,
410 .sync_single_for_device = swiotlb_sync_single_for_device,
411 .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
412 .sync_sg_for_device = swiotlb_sync_sg_for_device,
413 .mapping_error = swiotlb_dma_mapping_error,
414};
415
416/* Architecture __weak replacement functions */ 350/* Architecture __weak replacement functions */
417void __init mem_encrypt_init(void) 351void __init mem_encrypt_init(void)
418{ 352{
@@ -423,12 +357,11 @@ void __init mem_encrypt_init(void)
423 swiotlb_update_mem_attributes(); 357 swiotlb_update_mem_attributes();
424 358
425 /* 359 /*
426 * With SEV, DMA operations cannot use encryption. New DMA ops 360 * With SEV, DMA operations cannot use encryption, we need to use
427 * are required in order to mark the DMA areas as decrypted or 361 * SWIOTLB to bounce buffer DMA operation.
428 * to use bounce buffers.
429 */ 362 */
430 if (sev_active()) 363 if (sev_active())
431 dma_ops = &sev_dma_ops; 364 dma_ops = &swiotlb_dma_ops;
432 365
433 /* 366 /*
434 * With SEV, we need to unroll the rep string I/O instructions. 367 * With SEV, we need to unroll the rep string I/O instructions.