aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/arc/include/asm/dma-mapping.h13
-rw-r--r--arch/arc/mm/cache.c23
-rw-r--r--arch/arc/mm/dma.c54
3 files changed, 50 insertions, 40 deletions
diff --git a/arch/arc/include/asm/dma-mapping.h b/arch/arc/include/asm/dma-mapping.h
new file mode 100644
index 000000000000..c946c0a83e76
--- /dev/null
+++ b/arch/arc/include/asm/dma-mapping.h
@@ -0,0 +1,13 @@
1// SPDX-License-Identifier: GPL-2.0
2// (C) 2018 Synopsys, Inc. (www.synopsys.com)
3
4#ifndef ASM_ARC_DMA_MAPPING_H
5#define ASM_ARC_DMA_MAPPING_H
6
7#include <asm-generic/dma-mapping.h>
8
9void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
10 const struct iommu_ops *iommu, bool coherent);
11#define arch_setup_dma_ops arch_setup_dma_ops
12
13#endif
diff --git a/arch/arc/mm/cache.c b/arch/arc/mm/cache.c
index 25c631942500..2d389cab46ba 100644
--- a/arch/arc/mm/cache.c
+++ b/arch/arc/mm/cache.c
@@ -65,7 +65,7 @@ char *arc_cache_mumbojumbo(int c, char *buf, int len)
65 65
66 n += scnprintf(buf + n, len - n, "Peripherals\t: %#lx%s%s\n", 66 n += scnprintf(buf + n, len - n, "Peripherals\t: %#lx%s%s\n",
67 perip_base, 67 perip_base,
68 IS_AVAIL3(ioc_exists, ioc_enable, ", IO-Coherency ")); 68 IS_AVAIL3(ioc_exists, ioc_enable, ", IO-Coherency (per-device) "));
69 69
70 return buf; 70 return buf;
71} 71}
@@ -897,15 +897,6 @@ static void __dma_cache_wback_slc(phys_addr_t start, unsigned long sz)
897} 897}
898 898
899/* 899/*
900 * DMA ops for systems with IOC
901 * IOC hardware snoops all DMA traffic keeping the caches consistent with
902 * memory - eliding need for any explicit cache maintenance of DMA buffers
903 */
904static void __dma_cache_wback_inv_ioc(phys_addr_t start, unsigned long sz) {}
905static void __dma_cache_inv_ioc(phys_addr_t start, unsigned long sz) {}
906static void __dma_cache_wback_ioc(phys_addr_t start, unsigned long sz) {}
907
908/*
909 * Exported DMA API 900 * Exported DMA API
910 */ 901 */
911void dma_cache_wback_inv(phys_addr_t start, unsigned long sz) 902void dma_cache_wback_inv(phys_addr_t start, unsigned long sz)
@@ -1264,11 +1255,7 @@ void __init arc_cache_init_master(void)
1264 if (is_isa_arcv2() && ioc_enable) 1255 if (is_isa_arcv2() && ioc_enable)
1265 arc_ioc_setup(); 1256 arc_ioc_setup();
1266 1257
1267 if (is_isa_arcv2() && ioc_enable) { 1258 if (is_isa_arcv2() && l2_line_sz && slc_enable) {
1268 __dma_cache_wback_inv = __dma_cache_wback_inv_ioc;
1269 __dma_cache_inv = __dma_cache_inv_ioc;
1270 __dma_cache_wback = __dma_cache_wback_ioc;
1271 } else if (is_isa_arcv2() && l2_line_sz && slc_enable) {
1272 __dma_cache_wback_inv = __dma_cache_wback_inv_slc; 1259 __dma_cache_wback_inv = __dma_cache_wback_inv_slc;
1273 __dma_cache_inv = __dma_cache_inv_slc; 1260 __dma_cache_inv = __dma_cache_inv_slc;
1274 __dma_cache_wback = __dma_cache_wback_slc; 1261 __dma_cache_wback = __dma_cache_wback_slc;
@@ -1277,6 +1264,12 @@ void __init arc_cache_init_master(void)
1277 __dma_cache_inv = __dma_cache_inv_l1; 1264 __dma_cache_inv = __dma_cache_inv_l1;
1278 __dma_cache_wback = __dma_cache_wback_l1; 1265 __dma_cache_wback = __dma_cache_wback_l1;
1279 } 1266 }
1267 /*
1268 * In case of IOC (say IOC+SLC case), pointers above could still be set
1269 * but end up not being relevant as the first function in chain is not
1270 * called at all for @dma_direct_ops
1271 * arch_sync_dma_for_cpu() -> dma_cache_*() -> __dma_cache_*()
1272 */
1280} 1273}
1281 1274
1282void __ref arc_cache_init(void) 1275void __ref arc_cache_init(void)
diff --git a/arch/arc/mm/dma.c b/arch/arc/mm/dma.c
index ec47e6079f5d..c0b49399225d 100644
--- a/arch/arc/mm/dma.c
+++ b/arch/arc/mm/dma.c
@@ -6,20 +6,17 @@
6 * published by the Free Software Foundation. 6 * published by the Free Software Foundation.
7 */ 7 */
8 8
9/*
10 * DMA Coherent API Notes
11 *
12 * I/O is inherently non-coherent on ARC. So a coherent DMA buffer is
13 * implemented by accessing it using a kernel virtual address, with
14 * Cache bit off in the TLB entry.
15 *
16 * The default DMA address == Phy address which is 0x8000_0000 based.
17 */
18
19#include <linux/dma-noncoherent.h> 9#include <linux/dma-noncoherent.h>
20#include <asm/cache.h> 10#include <asm/cache.h>
21#include <asm/cacheflush.h> 11#include <asm/cacheflush.h>
22 12
13/*
14 * ARCH specific callbacks for generic noncoherent DMA ops (dma/noncoherent.c)
15 * - hardware IOC not available (or "dma-coherent" not set for device in DT)
16 * - But still handle both coherent and non-coherent requests from caller
17 *
18 * For DMA coherent hardware (IOC) generic code suffices
19 */
23void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, 20void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
24 gfp_t gfp, unsigned long attrs) 21 gfp_t gfp, unsigned long attrs)
25{ 22{
@@ -33,19 +30,7 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
33 if (!page) 30 if (!page)
34 return NULL; 31 return NULL;
35 32
36 /* 33 if (attrs & DMA_ATTR_NON_CONSISTENT)
37 * IOC relies on all data (even coherent DMA data) being in cache
38 * Thus allocate normal cached memory
39 *
40 * The gains with IOC are two pronged:
41 * -For streaming data, elides need for cache maintenance, saving
42 * cycles in flush code, and bus bandwidth as all the lines of a
43 * buffer need to be flushed out to memory
44 * -For coherent data, Read/Write to buffers terminate early in cache
45 * (vs. always going to memory - thus are faster)
46 */
47 if ((is_isa_arcv2() && ioc_enable) ||
48 (attrs & DMA_ATTR_NON_CONSISTENT))
49 need_coh = 0; 34 need_coh = 0;
50 35
51 /* 36 /*
@@ -95,8 +80,7 @@ void arch_dma_free(struct device *dev, size_t size, void *vaddr,
95 struct page *page = virt_to_page(paddr); 80 struct page *page = virt_to_page(paddr);
96 int is_non_coh = 1; 81 int is_non_coh = 1;
97 82
98 is_non_coh = (attrs & DMA_ATTR_NON_CONSISTENT) || 83 is_non_coh = (attrs & DMA_ATTR_NON_CONSISTENT);
99 (is_isa_arcv2() && ioc_enable);
100 84
101 if (PageHighMem(page) || !is_non_coh) 85 if (PageHighMem(page) || !is_non_coh)
102 iounmap((void __force __iomem *)vaddr); 86 iounmap((void __force __iomem *)vaddr);
@@ -185,3 +169,23 @@ void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
185 break; 169 break;
186 } 170 }
187} 171}
172
173/*
174 * Plug in coherent or noncoherent dma ops
175 */
176void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
177 const struct iommu_ops *iommu, bool coherent)
178{
179 /*
180 * IOC hardware snoops all DMA traffic keeping the caches consistent
181 * with memory - eliding need for any explicit cache maintenance of
182 * DMA buffers - so we can use dma_direct cache ops.
183 */
184 if (is_isa_arcv2() && ioc_enable && coherent) {
185 set_dma_ops(dev, &dma_direct_ops);
186 dev_info(dev, "use dma_direct_ops cache ops\n");
187 } else {
188 set_dma_ops(dev, &dma_noncoherent_ops);
189 dev_info(dev, "use dma_noncoherent_ops cache ops\n");
190 }
191}