aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2018-03-19 06:38:23 -0400
committerIngo Molnar <mingo@kernel.org>2018-03-20 05:01:58 -0400
commite7de6c7cc207be78369d45fb833d7d53aeda47f8 (patch)
tree2d21bc67f87253ea1b0f94e5f4ef1703674151c2
parentb7fa07460b0f0e9fbe6d9319a0864c145bd59bcb (diff)
dma/swiotlb: Remove swiotlb_set_mem_attributes()
Now that set_memory_decrypted() is always available we can just call it directly. Tested-by: Tom Lendacky <thomas.lendacky@amd.com> Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Reviewed-by: Tom Lendacky <thomas.lendacky@amd.com> Cc: David Woodhouse <dwmw2@infradead.org> Cc: Joerg Roedel <joro@8bytes.org> Cc: Jon Mason <jdmason@kudzu.us> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Muli Ben-Yehuda <mulix@mulix.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: iommu@lists.linux-foundation.org Link: http://lkml.kernel.org/r/20180319103826.12853-12-hch@lst.de Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--arch/x86/include/asm/mem_encrypt.h2
-rw-r--r--arch/x86/mm/mem_encrypt.c8
-rw-r--r--lib/swiotlb.c12
3 files changed, 6 insertions, 16 deletions
diff --git a/arch/x86/include/asm/mem_encrypt.h b/arch/x86/include/asm/mem_encrypt.h
index 8fe61ad21047..c0643831706e 100644
--- a/arch/x86/include/asm/mem_encrypt.h
+++ b/arch/x86/include/asm/mem_encrypt.h
@@ -49,8 +49,6 @@ int __init early_set_memory_encrypted(unsigned long vaddr, unsigned long size);
49/* Architecture __weak replacement functions */ 49/* Architecture __weak replacement functions */
50void __init mem_encrypt_init(void); 50void __init mem_encrypt_init(void);
51 51
52void swiotlb_set_mem_attributes(void *vaddr, unsigned long size);
53
54bool sme_active(void); 52bool sme_active(void);
55bool sev_active(void); 53bool sev_active(void);
56 54
diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
index 1217a4fab915..d243e8d80d89 100644
--- a/arch/x86/mm/mem_encrypt.c
+++ b/arch/x86/mm/mem_encrypt.c
@@ -441,11 +441,3 @@ void __init mem_encrypt_init(void)
441 : "Secure Memory Encryption (SME)"); 441 : "Secure Memory Encryption (SME)");
442} 442}
443 443
444void swiotlb_set_mem_attributes(void *vaddr, unsigned long size)
445{
446 WARN(PAGE_ALIGN(size) != size,
447 "size is not page-aligned (%#lx)\n", size);
448
449 /* Make the SWIOTLB buffer area decrypted */
450 set_memory_decrypted((unsigned long)vaddr, size >> PAGE_SHIFT);
451}
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index c43ec2271469..005d1d87bb2e 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -31,6 +31,7 @@
31#include <linux/gfp.h> 31#include <linux/gfp.h>
32#include <linux/scatterlist.h> 32#include <linux/scatterlist.h>
33#include <linux/mem_encrypt.h> 33#include <linux/mem_encrypt.h>
34#include <linux/set_memory.h>
34 35
35#include <asm/io.h> 36#include <asm/io.h>
36#include <asm/dma.h> 37#include <asm/dma.h>
@@ -156,8 +157,6 @@ unsigned long swiotlb_size_or_default(void)
156 return size ? size : (IO_TLB_DEFAULT_SIZE); 157 return size ? size : (IO_TLB_DEFAULT_SIZE);
157} 158}
158 159
159void __weak swiotlb_set_mem_attributes(void *vaddr, unsigned long size) { }
160
161/* For swiotlb, clear memory encryption mask from dma addresses */ 160/* For swiotlb, clear memory encryption mask from dma addresses */
162static dma_addr_t swiotlb_phys_to_dma(struct device *hwdev, 161static dma_addr_t swiotlb_phys_to_dma(struct device *hwdev,
163 phys_addr_t address) 162 phys_addr_t address)
@@ -209,12 +208,12 @@ void __init swiotlb_update_mem_attributes(void)
209 208
210 vaddr = phys_to_virt(io_tlb_start); 209 vaddr = phys_to_virt(io_tlb_start);
211 bytes = PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT); 210 bytes = PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT);
212 swiotlb_set_mem_attributes(vaddr, bytes); 211 set_memory_decrypted((unsigned long)vaddr, bytes >> PAGE_SHIFT);
213 memset(vaddr, 0, bytes); 212 memset(vaddr, 0, bytes);
214 213
215 vaddr = phys_to_virt(io_tlb_overflow_buffer); 214 vaddr = phys_to_virt(io_tlb_overflow_buffer);
216 bytes = PAGE_ALIGN(io_tlb_overflow); 215 bytes = PAGE_ALIGN(io_tlb_overflow);
217 swiotlb_set_mem_attributes(vaddr, bytes); 216 set_memory_decrypted((unsigned long)vaddr, bytes >> PAGE_SHIFT);
218 memset(vaddr, 0, bytes); 217 memset(vaddr, 0, bytes);
219} 218}
220 219
@@ -355,7 +354,7 @@ swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs)
355 io_tlb_start = virt_to_phys(tlb); 354 io_tlb_start = virt_to_phys(tlb);
356 io_tlb_end = io_tlb_start + bytes; 355 io_tlb_end = io_tlb_start + bytes;
357 356
358 swiotlb_set_mem_attributes(tlb, bytes); 357 set_memory_decrypted((unsigned long)tlb, bytes >> PAGE_SHIFT);
359 memset(tlb, 0, bytes); 358 memset(tlb, 0, bytes);
360 359
361 /* 360 /*
@@ -366,7 +365,8 @@ swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs)
366 if (!v_overflow_buffer) 365 if (!v_overflow_buffer)
367 goto cleanup2; 366 goto cleanup2;
368 367
369 swiotlb_set_mem_attributes(v_overflow_buffer, io_tlb_overflow); 368 set_memory_decrypted((unsigned long)v_overflow_buffer,
369 io_tlb_overflow >> PAGE_SHIFT);
370 memset(v_overflow_buffer, 0, io_tlb_overflow); 370 memset(v_overflow_buffer, 0, io_tlb_overflow);
371 io_tlb_overflow_buffer = virt_to_phys(v_overflow_buffer); 371 io_tlb_overflow_buffer = virt_to_phys(v_overflow_buffer);
372 372