aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2018-08-16 08:30:39 -0400
committerChristoph Hellwig <hch@lst.de>2018-10-19 02:43:46 -0400
commitdff8d6c1ed584de65aac40494d3e7468c50980c3 (patch)
tree998f71d7b0951479f511df2ac236eaf850120a06
parent8088546832aa2c0d8f99dd56edf6384f8a9b63b3 (diff)
swiotlb: remove the overflow buffer
Like all other dma mapping drivers just return an error code instead of an actual memory buffer. The reason for the overflow buffer was that at the time swiotlb was invented there was no way to check for dma mapping errors, but this has long been fixed. Signed-off-by: Christoph Hellwig <hch@lst.de> Acked-by: Catalin Marinas <catalin.marinas@arm.com> Reviewed-by: Robin Murphy <robin.murphy@arm.com> Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
-rw-r--r--arch/arm64/mm/dma-mapping.c2
-rw-r--r--arch/powerpc/kernel/dma-swiotlb.c4
-rw-r--r--include/linux/dma-direct.h2
-rw-r--r--include/linux/swiotlb.h3
-rw-r--r--kernel/dma/direct.c2
-rw-r--r--kernel/dma/swiotlb.c59
6 files changed, 8 insertions, 64 deletions
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index 072c51fb07d7..8d91b927e09e 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -324,7 +324,7 @@ static int __swiotlb_dma_supported(struct device *hwdev, u64 mask)
324static int __swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t addr) 324static int __swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t addr)
325{ 325{
326 if (swiotlb) 326 if (swiotlb)
327 return swiotlb_dma_mapping_error(hwdev, addr); 327 return dma_direct_mapping_error(hwdev, addr);
328 return 0; 328 return 0;
329} 329}
330 330
diff --git a/arch/powerpc/kernel/dma-swiotlb.c b/arch/powerpc/kernel/dma-swiotlb.c
index 88f3963ca30f..5fc335f4d9cd 100644
--- a/arch/powerpc/kernel/dma-swiotlb.c
+++ b/arch/powerpc/kernel/dma-swiotlb.c
@@ -11,7 +11,7 @@
11 * 11 *
12 */ 12 */
13 13
14#include <linux/dma-mapping.h> 14#include <linux/dma-direct.h>
15#include <linux/memblock.h> 15#include <linux/memblock.h>
16#include <linux/pfn.h> 16#include <linux/pfn.h>
17#include <linux/of_platform.h> 17#include <linux/of_platform.h>
@@ -59,7 +59,7 @@ const struct dma_map_ops powerpc_swiotlb_dma_ops = {
59 .sync_single_for_device = swiotlb_sync_single_for_device, 59 .sync_single_for_device = swiotlb_sync_single_for_device,
60 .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu, 60 .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
61 .sync_sg_for_device = swiotlb_sync_sg_for_device, 61 .sync_sg_for_device = swiotlb_sync_sg_for_device,
62 .mapping_error = swiotlb_dma_mapping_error, 62 .mapping_error = dma_direct_mapping_error,
63 .get_required_mask = swiotlb_powerpc_get_required, 63 .get_required_mask = swiotlb_powerpc_get_required,
64}; 64};
65 65
diff --git a/include/linux/dma-direct.h b/include/linux/dma-direct.h
index fbca184ff5a0..bd73e7a91410 100644
--- a/include/linux/dma-direct.h
+++ b/include/linux/dma-direct.h
@@ -5,6 +5,8 @@
5#include <linux/dma-mapping.h> 5#include <linux/dma-mapping.h>
6#include <linux/mem_encrypt.h> 6#include <linux/mem_encrypt.h>
7 7
8#define DIRECT_MAPPING_ERROR 0
9
8#ifdef CONFIG_ARCH_HAS_PHYS_TO_DMA 10#ifdef CONFIG_ARCH_HAS_PHYS_TO_DMA
9#include <asm/dma-direct.h> 11#include <asm/dma-direct.h>
10#else 12#else
diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
index 7ef541ce8f34..f847c1b265c4 100644
--- a/include/linux/swiotlb.h
+++ b/include/linux/swiotlb.h
@@ -107,9 +107,6 @@ swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
107 int nelems, enum dma_data_direction dir); 107 int nelems, enum dma_data_direction dir);
108 108
109extern int 109extern int
110swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr);
111
112extern int
113swiotlb_dma_supported(struct device *hwdev, u64 mask); 110swiotlb_dma_supported(struct device *hwdev, u64 mask);
114 111
115#ifdef CONFIG_SWIOTLB 112#ifdef CONFIG_SWIOTLB
diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
index 87a6bc2a96c0..f14c376937e5 100644
--- a/kernel/dma/direct.c
+++ b/kernel/dma/direct.c
@@ -14,8 +14,6 @@
14#include <linux/pfn.h> 14#include <linux/pfn.h>
15#include <linux/set_memory.h> 15#include <linux/set_memory.h>
16 16
17#define DIRECT_MAPPING_ERROR 0
18
19/* 17/*
20 * Most architectures use ZONE_DMA for the first 16 Megabytes, but 18 * Most architectures use ZONE_DMA for the first 16 Megabytes, but
21 * some use it for entirely different regions: 19 * some use it for entirely different regions:
diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
index 69bf305ee5f8..11dbcd80b4a6 100644
--- a/kernel/dma/swiotlb.c
+++ b/kernel/dma/swiotlb.c
@@ -73,13 +73,6 @@ static phys_addr_t io_tlb_start, io_tlb_end;
73static unsigned long io_tlb_nslabs; 73static unsigned long io_tlb_nslabs;
74 74
75/* 75/*
76 * When the IOMMU overflows we return a fallback buffer. This sets the size.
77 */
78static unsigned long io_tlb_overflow = 32*1024;
79
80static phys_addr_t io_tlb_overflow_buffer;
81
82/*
83 * This is a free list describing the number of free entries available from 76 * This is a free list describing the number of free entries available from
84 * each index 77 * each index
85 */ 78 */
@@ -126,7 +119,6 @@ setup_io_tlb_npages(char *str)
126 return 0; 119 return 0;
127} 120}
128early_param("swiotlb", setup_io_tlb_npages); 121early_param("swiotlb", setup_io_tlb_npages);
129/* make io_tlb_overflow tunable too? */
130 122
131unsigned long swiotlb_nr_tbl(void) 123unsigned long swiotlb_nr_tbl(void)
132{ 124{
@@ -194,16 +186,10 @@ void __init swiotlb_update_mem_attributes(void)
194 bytes = PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT); 186 bytes = PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT);
195 set_memory_decrypted((unsigned long)vaddr, bytes >> PAGE_SHIFT); 187 set_memory_decrypted((unsigned long)vaddr, bytes >> PAGE_SHIFT);
196 memset(vaddr, 0, bytes); 188 memset(vaddr, 0, bytes);
197
198 vaddr = phys_to_virt(io_tlb_overflow_buffer);
199 bytes = PAGE_ALIGN(io_tlb_overflow);
200 set_memory_decrypted((unsigned long)vaddr, bytes >> PAGE_SHIFT);
201 memset(vaddr, 0, bytes);
202} 189}
203 190
204int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose) 191int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
205{ 192{
206 void *v_overflow_buffer;
207 unsigned long i, bytes; 193 unsigned long i, bytes;
208 194
209 bytes = nslabs << IO_TLB_SHIFT; 195 bytes = nslabs << IO_TLB_SHIFT;
@@ -213,17 +199,6 @@ int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
213 io_tlb_end = io_tlb_start + bytes; 199 io_tlb_end = io_tlb_start + bytes;
214 200
215 /* 201 /*
216 * Get the overflow emergency buffer
217 */
218 v_overflow_buffer = memblock_virt_alloc_low_nopanic(
219 PAGE_ALIGN(io_tlb_overflow),
220 PAGE_SIZE);
221 if (!v_overflow_buffer)
222 return -ENOMEM;
223
224 io_tlb_overflow_buffer = __pa(v_overflow_buffer);
225
226 /*
227 * Allocate and initialize the free list array. This array is used 202 * Allocate and initialize the free list array. This array is used
228 * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE 203 * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
229 * between io_tlb_start and io_tlb_end. 204 * between io_tlb_start and io_tlb_end.
@@ -330,7 +305,6 @@ int
330swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs) 305swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs)
331{ 306{
332 unsigned long i, bytes; 307 unsigned long i, bytes;
333 unsigned char *v_overflow_buffer;
334 308
335 bytes = nslabs << IO_TLB_SHIFT; 309 bytes = nslabs << IO_TLB_SHIFT;
336 310
@@ -342,19 +316,6 @@ swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs)
342 memset(tlb, 0, bytes); 316 memset(tlb, 0, bytes);
343 317
344 /* 318 /*
345 * Get the overflow emergency buffer
346 */
347 v_overflow_buffer = (void *)__get_free_pages(GFP_DMA,
348 get_order(io_tlb_overflow));
349 if (!v_overflow_buffer)
350 goto cleanup2;
351
352 set_memory_decrypted((unsigned long)v_overflow_buffer,
353 io_tlb_overflow >> PAGE_SHIFT);
354 memset(v_overflow_buffer, 0, io_tlb_overflow);
355 io_tlb_overflow_buffer = virt_to_phys(v_overflow_buffer);
356
357 /*
358 * Allocate and initialize the free list array. This array is used 319 * Allocate and initialize the free list array. This array is used
359 * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE 320 * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
360 * between io_tlb_start and io_tlb_end. 321 * between io_tlb_start and io_tlb_end.
@@ -390,10 +351,6 @@ cleanup4:
390 sizeof(int))); 351 sizeof(int)));
391 io_tlb_list = NULL; 352 io_tlb_list = NULL;
392cleanup3: 353cleanup3:
393 free_pages((unsigned long)v_overflow_buffer,
394 get_order(io_tlb_overflow));
395 io_tlb_overflow_buffer = 0;
396cleanup2:
397 io_tlb_end = 0; 354 io_tlb_end = 0;
398 io_tlb_start = 0; 355 io_tlb_start = 0;
399 io_tlb_nslabs = 0; 356 io_tlb_nslabs = 0;
@@ -407,8 +364,6 @@ void __init swiotlb_exit(void)
407 return; 364 return;
408 365
409 if (late_alloc) { 366 if (late_alloc) {
410 free_pages((unsigned long)phys_to_virt(io_tlb_overflow_buffer),
411 get_order(io_tlb_overflow));
412 free_pages((unsigned long)io_tlb_orig_addr, 367 free_pages((unsigned long)io_tlb_orig_addr,
413 get_order(io_tlb_nslabs * sizeof(phys_addr_t))); 368 get_order(io_tlb_nslabs * sizeof(phys_addr_t)));
414 free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs * 369 free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs *
@@ -416,8 +371,6 @@ void __init swiotlb_exit(void)
416 free_pages((unsigned long)phys_to_virt(io_tlb_start), 371 free_pages((unsigned long)phys_to_virt(io_tlb_start),
417 get_order(io_tlb_nslabs << IO_TLB_SHIFT)); 372 get_order(io_tlb_nslabs << IO_TLB_SHIFT));
418 } else { 373 } else {
419 memblock_free_late(io_tlb_overflow_buffer,
420 PAGE_ALIGN(io_tlb_overflow));
421 memblock_free_late(__pa(io_tlb_orig_addr), 374 memblock_free_late(__pa(io_tlb_orig_addr),
422 PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t))); 375 PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t)));
423 memblock_free_late(__pa(io_tlb_list), 376 memblock_free_late(__pa(io_tlb_list),
@@ -790,7 +743,7 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
790 /* Oh well, have to allocate and map a bounce buffer. */ 743 /* Oh well, have to allocate and map a bounce buffer. */
791 map = map_single(dev, phys, size, dir, attrs); 744 map = map_single(dev, phys, size, dir, attrs);
792 if (map == SWIOTLB_MAP_ERROR) 745 if (map == SWIOTLB_MAP_ERROR)
793 return __phys_to_dma(dev, io_tlb_overflow_buffer); 746 return DIRECT_MAPPING_ERROR;
794 747
795 dev_addr = __phys_to_dma(dev, map); 748 dev_addr = __phys_to_dma(dev, map);
796 749
@@ -801,7 +754,7 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
801 attrs |= DMA_ATTR_SKIP_CPU_SYNC; 754 attrs |= DMA_ATTR_SKIP_CPU_SYNC;
802 swiotlb_tbl_unmap_single(dev, map, size, dir, attrs); 755 swiotlb_tbl_unmap_single(dev, map, size, dir, attrs);
803 756
804 return __phys_to_dma(dev, io_tlb_overflow_buffer); 757 return DIRECT_MAPPING_ERROR;
805} 758}
806 759
807/* 760/*
@@ -985,12 +938,6 @@ swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
985 swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE); 938 swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE);
986} 939}
987 940
988int
989swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr)
990{
991 return (dma_addr == __phys_to_dma(hwdev, io_tlb_overflow_buffer));
992}
993
994/* 941/*
995 * Return whether the given device DMA address mask can be supported 942 * Return whether the given device DMA address mask can be supported
996 * properly. For example, if your device can only drive the low 24-bits 943 * properly. For example, if your device can only drive the low 24-bits
@@ -1033,7 +980,7 @@ void swiotlb_free(struct device *dev, size_t size, void *vaddr,
1033} 980}
1034 981
1035const struct dma_map_ops swiotlb_dma_ops = { 982const struct dma_map_ops swiotlb_dma_ops = {
1036 .mapping_error = swiotlb_dma_mapping_error, 983 .mapping_error = dma_direct_mapping_error,
1037 .alloc = swiotlb_alloc, 984 .alloc = swiotlb_alloc,
1038 .free = swiotlb_free, 985 .free = swiotlb_free,
1039 .sync_single_for_cpu = swiotlb_sync_single_for_cpu, 986 .sync_single_for_cpu = swiotlb_sync_single_for_cpu,